code2llm 0.5.9__tar.gz → 0.5.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. {code2llm-0.5.9 → code2llm-0.5.10}/PKG-INFO +1 -1
  2. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/__init__.py +1 -1
  3. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/cli.py +27 -15
  4. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/readme_exporter.py +123 -26
  5. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/nlp/__init__.py +1 -1
  6. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm.egg-info/PKG-INFO +1 -1
  7. {code2llm-0.5.9 → code2llm-0.5.10}/pyproject.toml +1 -1
  8. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_prompt_txt.py +42 -20
  9. {code2llm-0.5.9 → code2llm-0.5.10}/LICENSE +0 -0
  10. {code2llm-0.5.9 → code2llm-0.5.10}/README.md +0 -0
  11. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/__main__.py +0 -0
  12. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/__init__.py +0 -0
  13. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/call_graph.py +0 -0
  14. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/cfg.py +0 -0
  15. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/coupling.py +0 -0
  16. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/data_analysis.py +0 -0
  17. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/dfg.py +0 -0
  18. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/pipeline_detector.py +0 -0
  19. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/side_effects.py +0 -0
  20. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/smells.py +0 -0
  21. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/analysis/type_inference.py +0 -0
  22. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/__init__.py +0 -0
  23. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/analyzer.py +0 -0
  24. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/config.py +0 -0
  25. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/core/__init__.py +0 -0
  26. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/core/file_analyzer.py +0 -0
  27. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/core/file_cache.py +0 -0
  28. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/core/file_filter.py +0 -0
  29. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/core/refactoring.py +0 -0
  30. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/models.py +0 -0
  31. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming/__init__.py +0 -0
  32. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming/cache.py +0 -0
  33. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming/incremental.py +0 -0
  34. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming/prioritizer.py +0 -0
  35. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming/scanner.py +0 -0
  36. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming/strategies.py +0 -0
  37. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/core/streaming_analyzer.py +0 -0
  38. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/__init__.py +0 -0
  39. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/base.py +0 -0
  40. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/context_exporter.py +0 -0
  41. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/evolution_exporter.py +0 -0
  42. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/flow_constants.py +0 -0
  43. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/flow_exporter.py +0 -0
  44. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/flow_renderer.py +0 -0
  45. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/json_exporter.py +0 -0
  46. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/llm_exporter.py +0 -0
  47. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/map_exporter.py +0 -0
  48. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/mermaid_exporter.py +0 -0
  49. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/toon/__init__.py +0 -0
  50. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/toon/helpers.py +0 -0
  51. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/toon/metrics.py +0 -0
  52. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/toon/module_detail.py +0 -0
  53. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/toon/renderer.py +0 -0
  54. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/toon.py +0 -0
  55. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/exporters/yaml_exporter.py +0 -0
  56. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/generators/__init__.py +0 -0
  57. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/generators/llm_flow.py +0 -0
  58. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/generators/llm_task.py +0 -0
  59. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/generators/mermaid.py +0 -0
  60. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/nlp/config.py +0 -0
  61. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/nlp/entity_resolution.py +0 -0
  62. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/nlp/intent_matching.py +0 -0
  63. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/nlp/normalization.py +0 -0
  64. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/nlp/pipeline.py +0 -0
  65. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/patterns/__init__.py +0 -0
  66. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/patterns/detector.py +0 -0
  67. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/refactor/__init__.py +0 -0
  68. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm/refactor/prompt_engine.py +0 -0
  69. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm.egg-info/SOURCES.txt +0 -0
  70. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm.egg-info/dependency_links.txt +0 -0
  71. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm.egg-info/entry_points.txt +0 -0
  72. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm.egg-info/requires.txt +0 -0
  73. {code2llm-0.5.9 → code2llm-0.5.10}/code2llm.egg-info/top_level.txt +0 -0
  74. {code2llm-0.5.9 → code2llm-0.5.10}/setup.cfg +0 -0
  75. {code2llm-0.5.9 → code2llm-0.5.10}/setup.py +0 -0
  76. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_advanced_analysis.py +0 -0
  77. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_analyzer.py +0 -0
  78. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_deep_analysis.py +0 -0
  79. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_edge_cases.py +0 -0
  80. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_flow_exporter.py +0 -0
  81. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_format_quality.py +0 -0
  82. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_nlp_pipeline.py +0 -0
  83. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_pipeline_detector.py +0 -0
  84. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_prompt_engine.py +0 -0
  85. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_refactoring_engine.py +0 -0
  86. {code2llm-0.5.9 → code2llm-0.5.10}/tests/test_toon_v2.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.9
3
+ Version: 0.5.10
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -8,7 +8,7 @@ Includes NLP Processing Pipeline for query normalization, intent matching,
8
8
  and entity resolution with multilingual support.
9
9
  """
10
10
 
11
- __version__ = "0.5.9"
11
+ __version__ = "0.5.10"
12
12
  __author__ = "STTS Project"
13
13
 
14
14
  # Core analysis components
@@ -445,36 +445,48 @@ def _export_code2logic(args, source_path: Path, output_dir: Path, formats: list[
445
445
  print(f" - CODE2LOGIC (project logic): {target}")
446
446
 
447
447
 
448
- def _export_prompt_txt(args, output_dir: Path, formats: list[str]) -> None:
448
+ def _export_prompt_txt(args, output_dir: Path, formats: list[str], source_path: Optional[Path] = None) -> None:
449
449
  """Generate prompt.txt useful to send to an LLM."""
450
450
  # Keep it conservative: generate when code2logic is requested.
451
451
  if 'code2logic' not in formats and 'all' not in formats:
452
452
  return
453
453
 
454
454
  prompt_path = output_dir / 'prompt.txt'
455
+
456
+ # Determine absolute paths for display
457
+ project_path = str(source_path.resolve()) if source_path else str(Path.cwd().resolve())
458
+ output_abs_path = str(output_dir.resolve())
455
459
 
456
460
  files = [
457
- 'analysis.toon',
458
- 'context.md',
459
- 'evolution.toon',
460
- 'project.toon',
461
- 'README.md',
461
+ ('analysis.toon', 'Health diagnostics - complexity metrics, god modules, coupling issues, refactoring priorities'),
462
+ ('context.md', 'LLM narrative - architecture summary, key entry points, process flows, public API surface'),
463
+ ('evolution.toon', 'Refactoring queue - ranked actions by impact/effort, risks, metrics targets, history'),
464
+ ('project.toon', 'Project logic - compact module view from code2logic, file sizes, dependencies overview'),
465
+ ('README.md', 'Documentation - complete guide to all generated files, usage examples, interpretation'),
462
466
  ]
463
- existing = [f for f in files if (output_dir / f).exists()]
464
- missing = [f for f in files if (output_dir / f).exists() is False]
467
+
468
+ existing = [(name, desc) for name, desc in files if (output_dir / name).exists()]
469
+ missing = [name for name, desc in files if (output_dir / name).exists() is False]
465
470
 
466
471
  lines: list[str] = []
467
472
  lines.append("You are an AI assistant helping me understand and improve a codebase.")
468
473
  lines.append("Use the attached/generated files as the authoritative context.")
469
474
  lines.append("")
470
- lines.append("Files:")
471
- for f in existing:
472
- lines.append(f"- {f}")
475
+ lines.append(f"we are in project path: {project_path}")
476
+ lines.append("")
477
+ lines.append("Files for analysis:")
478
+
479
+ for name, desc in existing:
480
+ file_path = f"{output_abs_path}/{name}"
481
+ lines.append(f"- {file_path} ({desc})")
482
+
473
483
  if missing:
474
484
  lines.append("")
475
- lines.append("Missing (not generated in this run):")
476
- for f in missing:
477
- lines.append(f"- {f}")
485
+ lines.append("Missing files (not generated in this run):")
486
+ for name in missing:
487
+ file_path = f"{output_abs_path}/{name}"
488
+ lines.append(f"- {file_path}")
489
+
478
490
  lines.append("")
479
491
  lines.append("Task:")
480
492
  lines.append("- Summarize the architecture and main flows.")
@@ -508,7 +520,7 @@ def _run_exports(args, result, output_dir: Path, source_path: Optional[Path] = N
508
520
 
509
521
  if source_path is not None:
510
522
  _export_code2logic(args, source_path, output_dir, formats)
511
- _export_prompt_txt(args, output_dir, formats)
523
+ _export_prompt_txt(args, output_dir, formats, source_path)
512
524
 
513
525
  if args.refactor:
514
526
  _export_refactor_prompts(args, result, output_dir)
@@ -91,6 +91,14 @@ class READMEExporter(BaseExporter):
91
91
 
92
92
  current_date = datetime.now().strftime('%Y-%m-%d')
93
93
 
94
+ # Check which files actually exist
95
+ existing_files = self._get_existing_files(output_dir)
96
+
97
+ # Build dynamic file sections
98
+ core_files_section = self._build_core_files_section(existing_files, insights)
99
+ llm_files_section = self._build_llm_files_section(existing_files)
100
+ viz_files_section = self._build_viz_files_section(existing_files)
101
+
94
102
  content = f"""# code2llm - Generated Analysis Files
95
103
 
96
104
  This directory contains the complete analysis of your Python project generated by `code2llm`. Each file serves a specific purpose for understanding, refactoring, and documenting your codebase.
@@ -99,32 +107,9 @@ This directory contains the complete analysis of your Python project generated b
99
107
 
100
108
  When you run `code2llm ./ -f all`, the following files are created:
101
109
 
102
- ### 🎯 Core Analysis Files
103
-
104
- | File | Format | Purpose | Key Insights |
105
- |------|--------|---------|--------------|
106
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | {insights['critical_functions']} critical functions, {insights['god_modules']} god modules |
107
- | `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | {insights['refactoring_actions']} refactoring actions needed |
108
- | `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
109
- | `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
110
-
111
- ### 🤖 LLM-Ready Documentation
112
-
113
- | File | Format | Purpose | Use Case |
114
- |------|--------|---------|----------|
115
- | `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
116
- | `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
117
- | `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
118
-
119
- ### 📊 Visualizations
120
-
121
- | File | Format | Purpose | Description |
122
- |------|--------|---------|-------------|
123
- | `flow.mmd` | **Mermaid** | **🔄 Control flow diagram** | Function call paths with complexity styling |
124
- | `calls.mmd` | **Mermaid** | **📞 Call graph** | Function dependencies (edges only) |
125
- | `compact_flow.mmd` | **Mermaid** | **📦 Module overview** | Aggregated module-level view |
126
- | `*.png` | **PNG** | **🖼️ Visual diagrams** | Rendered versions of Mermaid files |
127
-
110
+ {core_files_section}
111
+ {llm_files_section}
112
+ {viz_files_section}
128
113
  ## 🚀 Quick Start Commands
129
114
 
130
115
  ### Basic Analysis
@@ -232,6 +217,42 @@ cat map.toon | head -50
232
217
  grep "SIGNATURES" map.toon
233
218
  ```
234
219
 
220
+ ### `project.toon` - Project Logic (Code2Logic)
221
+ **Purpose**: Compact module view generated by code2logic integration
222
+ **Key sections**:
223
+ - **Modules list**: All project modules with file sizes
224
+ - **Imports**: Dependency information
225
+ - **Classes/Functions**: Summary counts
226
+
227
+ **When to use**: When you need a lightweight project overview combined with code2llm analysis
228
+
229
+ **Example usage**:
230
+ ```bash
231
+ # View compact project structure
232
+ cat project.toon | head -30
233
+
234
+ # Find largest files
235
+ grep -E "^ .*[0-9]{{3,}}$" project.toon | sort -t',' -k2 -n -r | head -10
236
+ ```
237
+
238
+ ### `prompt.txt` - Ready-to-Send LLM Prompt
239
+ **Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
240
+ **Contents**:
241
+ - **Files section**: Lists all existing generated files with descriptions
242
+ - **Missing section**: Shows which files weren't generated (if any)
243
+ - **Task section**: Instructions for LLM analysis
244
+ - **Requirements section**: Guidelines for suggested changes
245
+
246
+ **Example usage**:
247
+ ```bash
248
+ # View the prompt
249
+ cat prompt.txt
250
+
251
+ # Copy to clipboard and paste into ChatGPT/Claude
252
+ cat prompt.txt | pbcopy # macOS
253
+ cat prompt.txt | xclip -sel clip # Linux
254
+ ```
255
+
235
256
  ### `context.md` - LLM Narrative
236
257
  **Purpose**: Ready-to-paste context for AI assistants
237
258
  **Key sections**:
@@ -402,3 +423,79 @@ For more information about code2llm, visit: https://github.com/tom-sapletta/code
402
423
  """
403
424
 
404
425
  return content
426
+
427
+ def _get_existing_files(self, output_dir: Path) -> Dict[str, bool]:
428
+ """Check which files exist in the output directory."""
429
+ files_to_check = {
430
+ 'analysis.toon': 'Health diagnostics',
431
+ 'evolution.toon': 'Refactoring queue',
432
+ 'flow.toon': 'Data flow analysis',
433
+ 'map.toon': 'Structural map',
434
+ 'project.toon': 'Project logic',
435
+ 'prompt.txt': 'LLM prompt',
436
+ 'context.md': 'LLM narrative',
437
+ 'analysis.yaml': 'YAML data',
438
+ 'analysis.json': 'JSON data',
439
+ 'flow.mmd': 'Flow diagram',
440
+ 'calls.mmd': 'Call graph',
441
+ 'compact_flow.mmd': 'Module view',
442
+ }
443
+ return {name: (output_dir / name).exists() for name in files_to_check}
444
+
445
+ def _build_core_files_section(self, existing: Dict[str, bool], insights: Dict[str, Any]) -> str:
446
+ """Build the Core Analysis Files section dynamically."""
447
+ lines = ["### 🎯 Core Analysis Files", ""]
448
+ lines.append("| File | Format | Purpose | Key Insights |")
449
+ lines.append("|------|--------|---------|--------------|")
450
+
451
+ if existing.get('analysis.toon'):
452
+ lines.append(f"| `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | {insights['critical_functions']} critical functions, {insights['god_modules']} god modules |")
453
+ if existing.get('evolution.toon'):
454
+ lines.append(f"| `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | {insights['refactoring_actions']} refactoring actions needed |")
455
+ if existing.get('flow.toon'):
456
+ lines.append("| `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |")
457
+ if existing.get('map.toon'):
458
+ lines.append("| `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |")
459
+ if existing.get('project.toon'):
460
+ lines.append("| `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |")
461
+
462
+ lines.append("")
463
+ return "\n".join(lines)
464
+
465
+ def _build_llm_files_section(self, existing: Dict[str, bool]) -> str:
466
+ """Build the LLM-Ready Documentation section dynamically."""
467
+ lines = ["### 🤖 LLM-Ready Documentation", ""]
468
+ lines.append("| File | Format | Purpose | Use Case |")
469
+ lines.append("|------|--------|---------|----------|")
470
+
471
+ if existing.get('prompt.txt'):
472
+ lines.append("| `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all files with instructions | Attach to LLM conversation as context guide |")
473
+ if existing.get('context.md'):
474
+ lines.append("| `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |")
475
+ if existing.get('analysis.yaml'):
476
+ lines.append("| `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |")
477
+ if existing.get('analysis.json'):
478
+ lines.append("| `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |")
479
+
480
+ lines.append("")
481
+ return "\n".join(lines)
482
+
483
+ def _build_viz_files_section(self, existing: Dict[str, bool]) -> str:
484
+ """Build the Visualizations section dynamically."""
485
+ has_mermaid = existing.get('flow.mmd') or existing.get('calls.mmd') or existing.get('compact_flow.mmd')
486
+ if not has_mermaid:
487
+ return ""
488
+
489
+ lines = ["### 📊 Visualizations", ""]
490
+ lines.append("| File | Format | Purpose | Description |")
491
+ lines.append("|------|--------|---------|-------------|")
492
+
493
+ if existing.get('flow.mmd'):
494
+ lines.append("| `flow.mmd` | **Mermaid** | **🔄 Control flow diagram** | Function call paths with complexity styling |")
495
+ if existing.get('calls.mmd'):
496
+ lines.append("| `calls.mmd` | **Mermaid** | **📞 Call graph** | Function dependencies (edges only) |")
497
+ if existing.get('compact_flow.mmd'):
498
+ lines.append("| `compact_flow.mmd` | **Mermaid** | **📦 Module overview** | Aggregated module-level view |")
499
+
500
+ lines.append("")
501
+ return "\n".join(lines)
@@ -4,7 +4,7 @@ Provides query normalization, intent matching, and entity resolution
4
4
  with multilingual support and fuzzy matching.
5
5
  """
6
6
 
7
- __version__ = "0.5.9"
7
+ __version__ = "0.5.10"
8
8
 
9
9
  from .pipeline import NLPPipeline
10
10
  from .normalization import QueryNormalizer
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.9
3
+ Version: 0.5.10
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "code2llm"
7
- version = "0.5.9"
7
+ version = "0.5.10"
8
8
  description = "High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -27,8 +27,9 @@ class TestPromptTxtGeneration:
27
27
  def test_prompt_txt_not_generated_without_code2logic_format(self, temp_output_dir, mock_args):
28
28
  """Test that prompt.txt is NOT generated when code2logic is not in formats."""
29
29
  formats = ['toon', 'evolution']
30
+ source_path = Path('/home/user/myproject')
30
31
 
31
- _export_prompt_txt(mock_args, temp_output_dir, formats)
32
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
32
33
 
33
34
  prompt_file = temp_output_dir / 'prompt.txt'
34
35
  assert not prompt_file.exists(), "prompt.txt should not be generated without code2logic format"
@@ -36,12 +37,13 @@ class TestPromptTxtGeneration:
36
37
  def test_prompt_txt_generated_with_code2logic_format(self, temp_output_dir, mock_args):
37
38
  """Test that prompt.txt IS generated when code2logic is in formats."""
38
39
  formats = ['toon', 'evolution', 'code2logic']
40
+ source_path = Path('/home/user/myproject')
39
41
 
40
42
  # Create some existing files
41
43
  (temp_output_dir / 'analysis.toon').write_text('test')
42
44
  (temp_output_dir / 'context.md').write_text('test')
43
45
 
44
- _export_prompt_txt(mock_args, temp_output_dir, formats)
46
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
45
47
 
46
48
  prompt_file = temp_output_dir / 'prompt.txt'
47
49
  assert prompt_file.exists(), "prompt.txt should be generated with code2logic format"
@@ -49,93 +51,113 @@ class TestPromptTxtGeneration:
49
51
  def test_prompt_txt_generated_with_all_format(self, temp_output_dir, mock_args):
50
52
  """Test that prompt.txt IS generated when 'all' is in formats."""
51
53
  formats = ['all']
54
+ source_path = Path('/home/user/myproject')
52
55
 
53
- _export_prompt_txt(mock_args, temp_output_dir, formats)
56
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
54
57
 
55
58
  prompt_file = temp_output_dir / 'prompt.txt'
56
59
  assert prompt_file.exists(), "prompt.txt should be generated with 'all' format"
57
60
 
58
61
  def test_prompt_txt_lists_existing_files(self, temp_output_dir, mock_args):
59
- """Test that prompt.txt correctly lists existing files."""
62
+ """Test that prompt.txt correctly lists existing files with paths and descriptions."""
60
63
  formats = ['code2logic']
64
+ source_path = Path('/home/user/myproject')
61
65
 
62
66
  # Create some files that should be detected
63
67
  expected_files = ['analysis.toon', 'context.md']
64
68
  for f in expected_files:
65
69
  (temp_output_dir / f).write_text('test content')
66
70
 
67
- _export_prompt_txt(mock_args, temp_output_dir, formats)
71
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
68
72
 
69
73
  prompt_file = temp_output_dir / 'prompt.txt'
70
74
  content = prompt_file.read_text()
71
75
 
72
- # Check that existing files are listed
76
+ # Check that project path is shown
77
+ assert "we are in project path:" in content
78
+ assert "/home/user/myproject" in content
79
+
80
+ # Check that existing files are listed with paths and descriptions
81
+ assert "Files for analysis:" in content
73
82
  for f in expected_files:
74
- assert f"- {f}" in content, f"Existing file {f} should be listed in prompt.txt"
83
+ assert f in content, f"Existing file {f} should be listed in prompt.txt"
84
+ # Check for path format with description
85
+ assert "- " in content, "File should be listed with bullet point"
75
86
 
76
87
  # Check that missing files are marked
77
- assert "Missing" in content or "project.toon" in content, "Missing files should be indicated"
88
+ assert "Missing files" in content or "project.toon" in content, "Missing files should be indicated"
78
89
 
79
90
  def test_prompt_txt_shows_missing_files(self, temp_output_dir, mock_args):
80
91
  """Test that prompt.txt shows missing files section when files don't exist."""
81
92
  formats = ['code2logic']
93
+ source_path = Path('/home/user/myproject')
82
94
 
83
95
  # Don't create any files - all should be missing
84
- _export_prompt_txt(mock_args, temp_output_dir, formats)
96
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
85
97
 
86
98
  prompt_file = temp_output_dir / 'prompt.txt'
87
99
  content = prompt_file.read_text()
88
100
 
89
- assert "Missing" in content, "Missing section should be present when files don't exist"
101
+ assert "Missing files" in content, "Missing section should be present when files don't exist"
90
102
  assert "analysis.toon" in content, "Missing files should be listed"
91
103
 
92
104
  def test_prompt_txt_contains_task_instructions(self, temp_output_dir, mock_args):
93
105
  """Test that prompt.txt contains task instructions for LLM."""
94
106
  formats = ['code2logic']
107
+ source_path = Path('/home/user/myproject')
95
108
 
96
- _export_prompt_txt(mock_args, temp_output_dir, formats)
109
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
97
110
 
98
111
  prompt_file = temp_output_dir / 'prompt.txt'
99
112
  content = prompt_file.read_text()
100
113
 
101
114
  # Check for key sections
102
- assert "Files:" in content, "Files section should be present"
115
+ assert "You are an AI assistant" in content, "Main instruction should be present"
103
116
  assert "Task:" in content, "Task section should be present"
104
117
  assert "Constraints:" in content, "Constraints section should be present"
118
+ assert "we are in project path:" in content, "Project path should be present"
105
119
 
106
120
  def test_prompt_txt_content_structure(self, temp_output_dir, mock_args):
107
121
  """Test the overall structure of generated prompt.txt."""
108
122
  formats = ['code2logic']
123
+ source_path = Path('/home/user/myproject')
109
124
 
110
125
  # Create all expected files
111
126
  all_files = ['analysis.toon', 'context.md', 'evolution.toon', 'project.toon', 'README.md']
112
127
  for f in all_files:
113
128
  (temp_output_dir / f).write_text('test')
114
129
 
115
- _export_prompt_txt(mock_args, temp_output_dir, formats)
130
+ _export_prompt_txt(mock_args, temp_output_dir, formats, source_path)
116
131
 
117
132
  prompt_file = temp_output_dir / 'prompt.txt'
118
133
  content = prompt_file.read_text()
119
134
  lines = content.split('\n')
120
135
 
121
136
  # Check structure
122
- assert any("AI assistant" in line or "helping me" in line for line in lines), \
123
- "Prompt should mention AI assistant"
124
- assert any("authoritative context" in line for line in lines), \
125
- "Prompt should mention authoritative context"
137
+ assert any("You are an AI assistant" in line for line in lines), \
138
+ "Prompt should start with AI assistant instruction"
139
+ assert any("we are in project path:" in line for line in lines), \
140
+ "Project path should be present"
141
+ assert any("Files for analysis:" in line for line in lines), \
142
+ "Files section should be present"
126
143
 
127
144
  # All files should be listed without missing section
128
- assert "Missing" not in content, "No missing section when all files exist"
145
+ assert "Missing files" not in content, "No missing section when all files exist"
129
146
  for f in all_files:
130
- assert f"- {f}" in content, f"All files should be listed: {f}"
147
+ assert f in content, f"All files should be listed: {f}"
148
+
149
+ # Check for file paths with descriptions
150
+ assert "- " in content, "Files should be listed with bullet points"
151
+ assert "Health diagnostics" in content, "Descriptions should be present"
131
152
 
132
153
  def test_prompt_txt_no_verbose_output(self, temp_output_dir):
133
154
  """Test that no print occurs when verbose is False."""
134
155
  args = MagicMock()
135
156
  args.verbose = False
136
157
  formats = ['code2logic']
158
+ source_path = Path('/home/user/myproject')
137
159
 
138
160
  # Should not raise or print anything
139
- _export_prompt_txt(args, temp_output_dir, formats)
161
+ _export_prompt_txt(args, temp_output_dir, formats, source_path)
140
162
 
141
163
  assert (temp_output_dir / 'prompt.txt').exists()
File without changes
File without changes
File without changes
File without changes
File without changes