codescope-cli 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. codescope_cli-1.0.0/PKG-INFO +7 -0
  2. codescope_cli-1.0.0/codebase_analyzer/analyzer/__init__.py +0 -0
  3. codescope_cli-1.0.0/codebase_analyzer/analyzer/complexity.py +42 -0
  4. codescope_cli-1.0.0/codebase_analyzer/analyzer/duplicates.py +37 -0
  5. codescope_cli-1.0.0/codebase_analyzer/analyzer/empty_finder.py +30 -0
  6. codescope_cli-1.0.0/codebase_analyzer/analyzer/file_stats.py +31 -0
  7. codescope_cli-1.0.0/codebase_analyzer/analyzer/language_detector.py +39 -0
  8. codescope_cli-1.0.0/codebase_analyzer/analyzer/line_counter.py +22 -0
  9. codescope_cli-1.0.0/codebase_analyzer/analyzer/pretty_output.py +47 -0
  10. codescope_cli-1.0.0/codebase_analyzer/analyzer/scanner.py +34 -0
  11. codescope_cli-1.0.0/codebase_analyzer/cli/__init__.py +0 -0
  12. codescope_cli-1.0.0/codebase_analyzer/cli/args.py +38 -0
  13. codescope_cli-1.0.0/codebase_analyzer/cli/commands.py +53 -0
  14. codescope_cli-1.0.0/codebase_analyzer/constants.py +14 -0
  15. codescope_cli-1.0.0/codebase_analyzer/main.py +10 -0
  16. codescope_cli-1.0.0/codebase_analyzer/reports/__init__.py +0 -0
  17. codescope_cli-1.0.0/codebase_analyzer/reports/dependency_graph.py +57 -0
  18. codescope_cli-1.0.0/codebase_analyzer/reports/html_report.py +169 -0
  19. codescope_cli-1.0.0/codebase_analyzer/reports/reports.py +28 -0
  20. codescope_cli-1.0.0/codebase_analyzer/utils.py +0 -0
  21. codescope_cli-1.0.0/codescope_cli.egg-info/PKG-INFO +7 -0
  22. codescope_cli-1.0.0/codescope_cli.egg-info/SOURCES.txt +25 -0
  23. codescope_cli-1.0.0/codescope_cli.egg-info/dependency_links.txt +1 -0
  24. codescope_cli-1.0.0/codescope_cli.egg-info/entry_points.txt +2 -0
  25. codescope_cli-1.0.0/codescope_cli.egg-info/top_level.txt +2 -0
  26. codescope_cli-1.0.0/pyproject.toml +19 -0
  27. codescope_cli-1.0.0/setup.cfg +4 -0
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: codescope-cli
3
+ Version: 1.0.0
4
+ Summary: CLI tool to analyze, inspect and visualize codebases
5
+ Author: Aditya Seswani
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
@@ -0,0 +1,42 @@
1
+ import os
2
+
3
+
4
+ def compute_complexity(files, folders):
5
+ """
6
+ Compute simple structural complexity metrics for the project.
7
+ """
8
+
9
+ line_counts = []
10
+ folder_counts = {}
11
+
12
+ for file in files:
13
+ try:
14
+ with open(file, encoding="utf-8") as f:
15
+ lines = len(f.readlines())
16
+ line_counts.append(lines)
17
+ except:
18
+ continue
19
+
20
+ # count files per directory
21
+ folder = os.path.dirname(file)
22
+ folder_counts[folder] = folder_counts.get(folder, 0) + 1
23
+
24
+ total_files = len(line_counts)
25
+
26
+ avg_lines = sum(line_counts) // total_files if total_files else 0
27
+ max_lines = max(line_counts) if line_counts else 0
28
+
29
+ # deepest nesting
30
+ max_depth = max(file.count(os.sep) for file in files) if files else 0
31
+
32
+ # largest directory
33
+ largest_dir = max(folder_counts.items(), key=lambda x: x[1])[0] if folder_counts else ""
34
+ largest_dir_count = max(folder_counts.values()) if folder_counts else 0
35
+
36
+ return {
37
+ "avg_lines_per_file": avg_lines,
38
+ "max_file_lines": max_lines,
39
+ "max_depth": max_depth,
40
+ "largest_directory": largest_dir,
41
+ "largest_directory_files": largest_dir_count,
42
+ }
@@ -0,0 +1,37 @@
1
+ import hashlib
2
+
3
+
4
+ def file_hash(path):
5
+ """
6
+ Compute hash of file contents (fast + reliable).
7
+ """
8
+ h = hashlib.md5()
9
+
10
+ try:
11
+ with open(path, "rb") as f:
12
+ while chunk := f.read(8192):
13
+ h.update(chunk)
14
+ except:
15
+ return None
16
+
17
+ return h.hexdigest()
18
+
19
+
20
+ def find_duplicates(files):
21
+ """
22
+ Group files that have identical content.
23
+ Returns list of lists.
24
+ """
25
+
26
+ hashes = {}
27
+
28
+ for f in files:
29
+ h = file_hash(f)
30
+ if not h:
31
+ continue
32
+
33
+ hashes.setdefault(h, []).append(f)
34
+
35
+ duplicates = [group for group in hashes.values() if len(group) > 1]
36
+
37
+ return duplicates
@@ -0,0 +1,30 @@
1
+ from codebase_analyzer.constants import SMALL_FILE_LINE_LIMIT
2
+
3
+ def find_empty_and_small_files(files, min_lines=SMALL_FILE_LINE_LIMIT):
4
+
5
+ """
6
+ Finds empty files and very small files.
7
+
8
+ Parameters:
9
+ - files: list of file paths
10
+ - min_lines: minimum number of lines to be considered non-small
11
+ """
12
+
13
+ empty_files = []
14
+ small_files = []
15
+
16
+ for file_path in files:
17
+ try:
18
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as file:
19
+ lines = file.readlines()
20
+ line_count = len(lines)
21
+
22
+ if line_count == 0:
23
+ empty_files.append(file_path)
24
+ elif line_count < min_lines:
25
+ small_files.append((file_path, line_count))
26
+ except:
27
+ # Skip files that cannot be read
28
+ pass
29
+
30
+ return empty_files, small_files
@@ -0,0 +1,31 @@
1
+
2
+ import os
3
+
4
+
5
+ def get_file_and_folder_count(files, folders):
6
+ """
7
+ Returns total number of files and folders.
8
+ """
9
+ total_files = len(files)
10
+ total_folders = len(folders)
11
+ return total_files, total_folders
12
+
13
+
14
+ def get_largest_files(files, top_n=5):
15
+ """
16
+ Returns top N largest files by size.
17
+ """
18
+ file_sizes = []
19
+
20
+ for file_path in files:
21
+ try:
22
+ size = os.path.getsize(file_path)
23
+ file_sizes.append((file_path, size))
24
+ except:
25
+ # Skip files that cannot be accessed
26
+ pass
27
+
28
+ # Sort files by size (largest first)
29
+ file_sizes.sort(key=lambda x: x[1], reverse=True)
30
+
31
+ return file_sizes[:top_n]
@@ -0,0 +1,39 @@
1
+ import os
2
+
3
+
4
+ EXTENSION_LANGUAGE_MAP = {
5
+ ".py": "Python",
6
+ ".js": "JavaScript",
7
+ ".java": "Java",
8
+ ".c": "C",
9
+ ".cpp": "C++",
10
+ ".html": "HTML",
11
+ ".css": "CSS",
12
+ ".md": "Markdown",
13
+ ".txt": "Text"
14
+ }
15
+
16
+
17
+ def detect_languages(files):
18
+ """
19
+ Detects programming languages based on file extensions.
20
+ Returns a dictionary with language counts.
21
+ """
22
+
23
+ language_count = {}
24
+
25
+ for file_path in files:
26
+ _, ext = os.path.splitext(file_path)
27
+ ext = ext.lower()
28
+
29
+ if ext in EXTENSION_LANGUAGE_MAP:
30
+ language = EXTENSION_LANGUAGE_MAP[ext]
31
+ else:
32
+ language = "Other"
33
+
34
+ if language in language_count:
35
+ language_count[language] += 1
36
+ else:
37
+ language_count[language] = 1
38
+
39
+ return language_count
@@ -0,0 +1,22 @@
1
+ def count_lines(files):
2
+ """
3
+ Counts total, empty, and non-empty lines in the given files.
4
+ """
5
+
6
+ total_lines = 0
7
+ empty_lines = 0
8
+
9
+ for file_path in files:
10
+ try:
11
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as file:
12
+ for line in file:
13
+ total_lines += 1
14
+ if line.strip() == "":
15
+ empty_lines += 1
16
+ except:
17
+ # Skip files that cannot be read
18
+ pass
19
+
20
+ code_lines = total_lines - empty_lines
21
+
22
+ return total_lines, empty_lines, code_lines
@@ -0,0 +1,47 @@
1
+ def print_summary(data, verbose=False):
2
+ total_lang = sum(data["languages"].values())
3
+
4
+ top_langs = sorted(
5
+ data["languages"].items(),
6
+ key=lambda x: -x[1]
7
+ )[:3]
8
+
9
+ lang_str = " ".join(
10
+ f"{k}({int(v/total_lang*100)}%)"
11
+ for k, v in top_langs if total_lang
12
+ )
13
+
14
+ print("\n════════ Codebase Analyzer ════════\n")
15
+
16
+ print(
17
+ f"Files: {data['total_files']} | "
18
+ f"Folders: {data['total_folders']} | "
19
+ f"Lines: {data['total_lines']}"
20
+ )
21
+
22
+ print(f"Languages: {lang_str}")
23
+
24
+ complexity = data.get("complexity", {})
25
+ if complexity:
26
+ print(
27
+ f"Complexity: Avg {complexity['avg_lines_per_file']} lines/file | "
28
+ f"Depth {complexity['max_depth']}"
29
+ )
30
+
31
+ duplicates = data.get("duplicates", [])
32
+ print(f"Duplicates: {len(duplicates)} group(s)")
33
+
34
+ dead = data.get("dead_files", [])
35
+ print(f"Dead files: {len(dead)}")
36
+
37
+ # 🔥 detailed only if verbose
38
+ if verbose:
39
+ print("\n--- Duplicate Details ---")
40
+ for group in duplicates:
41
+ for f in group:
42
+ print(" ", f)
43
+ print()
44
+
45
+ print("\n--- Dead Files ---")
46
+ for f in dead:
47
+ print(" ", f)
@@ -0,0 +1,34 @@
1
+
2
+ import os
3
+
4
+ EXCLUDED_FOLDERS = [
5
+ ".git",
6
+ "__pycache__",
7
+ "venv",
8
+ "env",
9
+ "node_modules"
10
+ ]
11
+
12
+
13
+ def scan_project(project_path):
14
+ """
15
+ Scans a project folder and returns:
16
+ - list of file paths
17
+ - list of directory paths
18
+ """
19
+
20
+ files = []
21
+ directories = []
22
+
23
+ for root, dirs, filenames in os.walk(project_path):
24
+
25
+ # remove excluded folders so os.walk doesn't go inside them
26
+ dirs[:] = [d for d in dirs if d not in EXCLUDED_FOLDERS]
27
+
28
+ for d in dirs:
29
+ directories.append(os.path.join(root, d))
30
+
31
+ for file in filenames:
32
+ files.append(os.path.join(root, file))
33
+
34
+ return files, directories
File without changes
@@ -0,0 +1,38 @@
1
+ import argparse
2
+
3
+
4
+ def get_cli_args():
5
+ parser = argparse.ArgumentParser(
6
+ description="Codebase Analyzer Tool"
7
+ )
8
+
9
+ parser.add_argument(
10
+ "path",
11
+ help="Path of the project to analyze"
12
+ )
13
+
14
+ parser.add_argument(
15
+ "--report",
16
+ help="Generate analysis report to a text file",
17
+ default=None
18
+ )
19
+ parser.add_argument(
20
+ "--html",
21
+ type=str,
22
+ help="Generate HTML report at specified path"
23
+ )
24
+ parser.add_argument(
25
+ "--graph",
26
+ type=str,
27
+ help="Generate dependency graph (.dot file)"
28
+ )
29
+ parser.add_argument(
30
+ "--verbose",
31
+ action="store_true",
32
+ help="Show detailed output"
33
+ )
34
+
35
+
36
+
37
+
38
+ return parser.parse_args()
@@ -0,0 +1,53 @@
1
+ from codebase_analyzer.analyzer.scanner import scan_project
2
+ from codebase_analyzer.analyzer.file_stats import get_file_and_folder_count, get_largest_files
3
+ from codebase_analyzer.analyzer.line_counter import count_lines
4
+ from codebase_analyzer.analyzer.language_detector import detect_languages
5
+ from codebase_analyzer.analyzer.empty_finder import find_empty_and_small_files
6
+ from codebase_analyzer.reports.reports import write_report
7
+ from codebase_analyzer.analyzer.pretty_output import print_summary
8
+ from codebase_analyzer.analyzer.complexity import compute_complexity
9
+ from codebase_analyzer.analyzer.duplicates import find_duplicates
10
+ def run_analysis(project_path, report_path=None, html_path=None,graph_path=None,verbose=False):
11
+ files, folders = scan_project(project_path)
12
+ total_files, total_folders = get_file_and_folder_count(files, folders)
13
+ total_lines, empty_lines, code_lines = count_lines(files)
14
+ languages = detect_languages(files)
15
+ empty_files, small_files = find_empty_and_small_files(files)
16
+ largest_files = get_largest_files(files)
17
+ complexity = compute_complexity(files, folders)
18
+ duplicates = find_duplicates(files)
19
+ data = {
20
+ "total_files": total_files,
21
+ "total_folders": total_folders,
22
+ "total_lines": total_lines,
23
+ "empty_lines": empty_lines,
24
+ "code_lines": code_lines,
25
+ "languages": languages,
26
+ "largest_files": largest_files,
27
+ "empty_files": empty_files,
28
+ "small_files": small_files,
29
+ "duplicates": duplicates,
30
+ "complexity": complexity,
31
+ "duplicates": duplicates
32
+ }
33
+ print_summary(data,verbose)
34
+ data["complexity"] = compute_complexity(files, folders)
35
+
36
+
37
+
38
+ if report_path:
39
+ write_report(report_path, data)
40
+ print("\nText report generated at:", report_path)
41
+
42
+ from codebase_analyzer.reports.html_report import write_html_report
43
+
44
+ html_path = html_path or "analysis_report.html"
45
+ write_html_report(html_path, data)
46
+ print("\nHTML report generated at:", html_path)
47
+
48
+ from codebase_analyzer.reports.dependency_graph import write_dependency_graph
49
+
50
+ graph_path = graph_path or "dependency_graph.png"
51
+ write_dependency_graph(graph_path, files)
52
+ print("\nDependency graph generated at:", graph_path)
53
+
@@ -0,0 +1,14 @@
1
+ # Folders to skip while scanning
2
+ EXCLUDED_FOLDERS = [
3
+ ".git",
4
+ "__pycache__",
5
+ "venv",
6
+ "env",
7
+ "node_modules"
8
+ ]
9
+
10
+ # Small file threshold
11
+ SMALL_FILE_LINE_LIMIT = 5
12
+
13
+ # Largest files to display
14
+ TOP_LARGEST_FILES = 5
@@ -0,0 +1,10 @@
1
+ from codebase_analyzer.cli.args import get_cli_args
2
+ from codebase_analyzer.cli.commands import run_analysis
3
+
4
+ def main():
5
+ args = get_cli_args()
6
+ run_analysis(args.path, args.report,args.html,args.graph,args.verbose)
7
+
8
+
9
+ if __name__ == "__main__":
10
+ main()
@@ -0,0 +1,57 @@
1
+ import os
2
+ import ast
3
+ import subprocess
4
+ def extract_imports(file_path):
5
+ """Extract imported modules from a Python file using AST."""
6
+ imports = set()
7
+ try:
8
+ with open(file_path, "r", encoding="utf-8") as f:
9
+ tree = ast.parse(f.read())
10
+ for node in ast.walk(tree):
11
+ if isinstance(node, ast.Import):
12
+ for alias in node.names:
13
+ imports.add(alias.name.split(".")[0])
14
+ elif isinstance(node, ast.ImportFrom):
15
+ if node.module:
16
+ imports.add(node.module.split(".")[0])
17
+ except Exception:
18
+ pass
19
+ return imports
20
+ def write_dependency_graph(output_path, files):
21
+ dot_path = output_path.replace(".png", ".dot")
22
+ py_files = [f for f in files if f.endswith(".py")]
23
+ edges = []
24
+ for file in py_files:
25
+ module_name = os.path.splitext(os.path.basename(file))[0]
26
+ imports = extract_imports(file)
27
+ for imp in imports:
28
+ edges.append((module_name, imp))
29
+ with open(dot_path, "w", encoding="utf-8") as f:
30
+ f.write("digraph CodebaseDependencies {\n")
31
+ f.write("rankdir=LR;\n")
32
+ f.write('node [shape=box, style="filled,rounded", fontname="Arial"];\n\n')
33
+
34
+ project_modules = {
35
+ os.path.splitext(os.path.basename(f))[0]
36
+ for f in py_files
37
+ }
38
+
39
+ external_modules = set(dst for _, dst in edges if dst not in project_modules)
40
+
41
+ # ✅ FIRST: define project nodes (blue)
42
+ for module in project_modules:
43
+ f.write(f'"{module}" [fillcolor="#4F81BD", fontcolor="white"];\n')
44
+
45
+ # ✅ SECOND: define external nodes (gray)
46
+ for module in external_modules:
47
+ f.write(f'"{module}" [fillcolor="#D3D3D3", fontcolor="black"];\n')
48
+
49
+ f.write("\n")
50
+
51
+ # ✅ LAST: draw edges
52
+ for src, dst in edges:
53
+ f.write(f'"{src}" -> "{dst}";\n')
54
+
55
+ f.write("}\n")
56
+
57
+ subprocess.run(["dot", "-Tpng", dot_path, "-o", output_path], check=False)
@@ -0,0 +1,169 @@
1
+ import json
2
+ import webbrowser
3
+ import os
4
+ def write_html_report(path, data):
5
+ languages = data["languages"]
6
+ labels = list(languages.keys())
7
+ values = list(languages.values())
8
+ complexity = data.get("complexity", {})
9
+ duplicates = data.get("duplicates", [])
10
+ dead_files = data.get("dead_files", [])
11
+ largest_files = data.get("largest_files", [])
12
+ html_content = f"""
13
+ <!DOCTYPE html>
14
+ <html>
15
+ <head>
16
+ <title>Codebase Analysis Report</title>
17
+ <script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
18
+ <style>
19
+ body {{
20
+ font-family: Arial, sans-serif;
21
+ margin: 30px;
22
+ background: #0f172a;
23
+ color: #e2e8f0;
24
+ }}
25
+ h1 {{
26
+ text-align: center;
27
+ color: #38bdf8;
28
+ }}
29
+ h2 {{
30
+ margin-bottom: 10px;
31
+ color: #60a5fa;
32
+ }}
33
+ .grid {{
34
+ display: grid;
35
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
36
+ gap: 15px;
37
+ margin-bottom: 25px;
38
+ }}
39
+ .card {{
40
+ background: #1e293b;
41
+ padding: 20px;
42
+ border-radius: 12px;
43
+ box-shadow: 0 4px 12px rgba(0,0,0,0.4);
44
+ transition: transform 0.2s ease;
45
+ }}
46
+ .card:hover {{
47
+ transform: translateY(-4px);
48
+ }}
49
+ table {{
50
+ width: 100%;
51
+ border-collapse: collapse;
52
+ margin-top: 10px;
53
+ }}
54
+ th {{
55
+ text-align: left;
56
+ padding: 10px;
57
+ font-size: 14px;
58
+ color: #93c5fd;
59
+ border-bottom: 1px solid #334155;
60
+ }}
61
+ td {{
62
+ padding: 8px;
63
+ border-bottom: 1px solid #334155;
64
+ font-size: 13px;
65
+ color: #cbd5e1;
66
+ }}
67
+ tr:hover {{
68
+ background: #273449;
69
+ }}
70
+ ul {{
71
+ padding-left: 20px;
72
+ }}
73
+ li {{
74
+ margin-bottom: 6px;
75
+ }}
76
+ .chart-container {{
77
+ width: 420px;
78
+ height: 420px;
79
+ margin: auto;
80
+ }}
81
+ </style>
82
+ </head>
83
+ <body>
84
+ <h1>Codebase Analysis Dashboard</h1>
85
+ <!-- SUMMARY CARDS -->
86
+ <div class="grid">
87
+ <div class="card"><b>Files</b><br>{data['total_files']}</div>
88
+ <div class="card"><b>Folders</b><br>{data['total_folders']}</div>
89
+ <div class="card"><b>Total Lines</b><br>{data['total_lines']}</div>
90
+ <div class="card"><b>Code Lines</b><br>{data['code_lines']}</div>
91
+ </div>
92
+ <!-- LANGUAGE CHART -->
93
+ <div class="card">
94
+ <h2>Language Distribution</h2>
95
+ <div class="chart-container">
96
+ <canvas id="langChart"></canvas>
97
+ </div>
98
+ </div>
99
+ <!-- COMPLEXITY -->
100
+ <div class="card">
101
+ <h2>Complexity Metrics</h2>
102
+ <ul>
103
+ <li>Average lines per file: {complexity.get('avg_lines_per_file',0)}</li>
104
+ <li>Largest file: {complexity.get('max_file_lines',0)} lines</li>
105
+ <li>Max depth: {complexity.get('max_depth',0)}</li>
106
+ <li>Largest directory files: {complexity.get('largest_directory_files',0)}</li>
107
+ </ul>
108
+ </div>
109
+ <!-- LARGEST FILES -->
110
+ <div class="card">
111
+ <h2>Largest Files</h2>
112
+ <table>
113
+ <tr><th>File</th><th>Size</th></tr>
114
+ {''.join(f"<tr><td>{f}</td><td>{s}</td></tr>" for f,s in largest_files)}
115
+ </table>
116
+ </div>
117
+ <!-- DUPLICATES -->
118
+ <div class="card">
119
+ <h2>Duplicate Files ({len(duplicates)} groups)</h2>
120
+ <table>
121
+ <tr><th>Group</th><th>Files</th></tr>
122
+ {''.join(f"<tr><td>{i+1}</td><td>{'<br>'.join(g)}</td></tr>" for i,g in enumerate(duplicates))}
123
+ </table>
124
+ </div>
125
+ <!-- DEAD FILES -->
126
+ <div class="card">
127
+ <h2>Dead Files ({len(dead_files)})</h2>
128
+ <table>
129
+ {''.join(f"<tr><td>{f}</td></tr>" for f in dead_files)}
130
+ </table>
131
+ </div>
132
+ <script>
133
+ const ctx = document.getElementById('langChart');
134
+ new Chart(ctx, {{
135
+ type: 'pie',
136
+ data: {{
137
+ labels: {json.dumps(labels)},
138
+ datasets: [{{
139
+ data: {json.dumps(values)},
140
+ backgroundColor: [
141
+ "#38bdf8",
142
+ "#6366f1",
143
+ "#f59e0b",
144
+ "#ef4444",
145
+ "#10b981",
146
+ "#a855f7",
147
+ "#f472b6"
148
+ ],
149
+ borderColor: "#0f172a"
150
+ }}]
151
+ }},
152
+ options: {{
153
+ maintainAspectRatio: false,
154
+ plugins: {{
155
+ legend: {{
156
+ labels: {{
157
+ color: "#e2e8f0"
158
+ }}
159
+ }}
160
+ }}
161
+ }}
162
+ }});
163
+ </script>
164
+ </body>
165
+ </html>
166
+ """
167
+ with open(path, "w", encoding="utf-8") as f:
168
+ f.write(html_content)
169
+ webbrowser.open("file://" + os.path.abspath(path))
@@ -0,0 +1,28 @@
1
+ def write_report(report_path, data):
2
+ with open(report_path, "w", encoding="utf-8") as file:
3
+ file.write("CODEBASE ANALYSIS REPORT\n")
4
+ file.write("=" * 30 + "\n\n")
5
+
6
+ file.write(f"Total files: {data['total_files']}\n")
7
+ file.write(f"Total folders: {data['total_folders']}\n\n")
8
+
9
+ file.write("Line Statistics:\n")
10
+ file.write(f" Total lines: {data['total_lines']}\n")
11
+ file.write(f" Empty lines: {data['empty_lines']}\n")
12
+ file.write(f" Code lines: {data['code_lines']}\n\n")
13
+
14
+ file.write("Languages Used:\n")
15
+ for lang, count in data["languages"].items():
16
+ file.write(f" {lang}: {count}\n")
17
+
18
+ file.write("\nLargest Files:\n")
19
+ for path, size in data["largest_files"]:
20
+ file.write(f" {path} - {size} bytes\n")
21
+
22
+ file.write("\nEmpty Files:\n")
23
+ for path in data["empty_files"]:
24
+ file.write(f" {path}\n")
25
+
26
+ file.write("\nSmall Files:\n")
27
+ for path, lines in data["small_files"]:
28
+ file.write(f" {path} ({lines} lines)\n")
File without changes
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: codescope-cli
3
+ Version: 1.0.0
4
+ Summary: CLI tool to analyze, inspect and visualize codebases
5
+ Author: Aditya Seswani
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
@@ -0,0 +1,25 @@
1
+ pyproject.toml
2
+ codebase_analyzer/constants.py
3
+ codebase_analyzer/main.py
4
+ codebase_analyzer/utils.py
5
+ codebase_analyzer/analyzer/__init__.py
6
+ codebase_analyzer/analyzer/complexity.py
7
+ codebase_analyzer/analyzer/duplicates.py
8
+ codebase_analyzer/analyzer/empty_finder.py
9
+ codebase_analyzer/analyzer/file_stats.py
10
+ codebase_analyzer/analyzer/language_detector.py
11
+ codebase_analyzer/analyzer/line_counter.py
12
+ codebase_analyzer/analyzer/pretty_output.py
13
+ codebase_analyzer/analyzer/scanner.py
14
+ codebase_analyzer/cli/__init__.py
15
+ codebase_analyzer/cli/args.py
16
+ codebase_analyzer/cli/commands.py
17
+ codebase_analyzer/reports/__init__.py
18
+ codebase_analyzer/reports/dependency_graph.py
19
+ codebase_analyzer/reports/html_report.py
20
+ codebase_analyzer/reports/reports.py
21
+ codescope_cli.egg-info/PKG-INFO
22
+ codescope_cli.egg-info/SOURCES.txt
23
+ codescope_cli.egg-info/dependency_links.txt
24
+ codescope_cli.egg-info/entry_points.txt
25
+ codescope_cli.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ codescope = codebase_analyzer.main:main
@@ -0,0 +1,2 @@
1
+ codebase_analyzer
2
+ dist
@@ -0,0 +1,19 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "codescope-cli"
7
+ version = "1.0.0"
8
+ description = "CLI tool to analyze, inspect and visualize codebases"
9
+ readme = "README.md"
10
+ requires-python = ">=3.8"
11
+ authors = [
12
+ { name = "Aditya Seswani" }
13
+ ]
14
+
15
+ [project.scripts]
16
+ codescope = "codebase_analyzer.main:main"
17
+
18
+ [tool.setuptools.packages.find]
19
+ where = ["."]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+