gitputra 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitputra-0.1.0/PKG-INFO +18 -0
- gitputra-0.1.0/gitputra/cli.py +132 -0
- gitputra-0.1.0/gitputra/corelogic.py +409 -0
- gitputra-0.1.0/gitputra/fonts/NotoSans-Regular.ttf +0 -0
- gitputra-0.1.0/gitputra/fonts/NotoSansBengali-Regular.ttf +0 -0
- gitputra-0.1.0/gitputra/fonts/NotoSansDevanagari-Regular.ttf +0 -0
- gitputra-0.1.0/gitputra.egg-info/PKG-INFO +18 -0
- gitputra-0.1.0/gitputra.egg-info/SOURCES.txt +12 -0
- gitputra-0.1.0/gitputra.egg-info/dependency_links.txt +1 -0
- gitputra-0.1.0/gitputra.egg-info/entry_points.txt +2 -0
- gitputra-0.1.0/gitputra.egg-info/requires.txt +10 -0
- gitputra-0.1.0/gitputra.egg-info/top_level.txt +1 -0
- gitputra-0.1.0/pyproject.toml +37 -0
- gitputra-0.1.0/setup.cfg +4 -0
gitputra-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: gitputra
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: AI-powered GitHub repo analyzer CLI — analyze, chat, and visualize any codebase.
|
|
5
|
+
Author-email: Adityava Gangopadhyay <adityava49cse@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.10
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: click>=8.1
|
|
10
|
+
Requires-Dist: python-dotenv>=1.0
|
|
11
|
+
Requires-Dist: gitpython>=3.1
|
|
12
|
+
Requires-Dist: chromadb>=0.5
|
|
13
|
+
Requires-Dist: matplotlib>=3.8
|
|
14
|
+
Requires-Dist: networkx>=3.2
|
|
15
|
+
Requires-Dist: reportlab>=4.0
|
|
16
|
+
Requires-Dist: google-generativeai>=0.5
|
|
17
|
+
Requires-Dist: openai>=1.0
|
|
18
|
+
Requires-Dist: anthropic>=0.25
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cli.py — Gitputra CLI entry point.
|
|
3
|
+
Usage:
|
|
4
|
+
gitputra analyze <url> --ai gemini --key sk-xxx
|
|
5
|
+
gitputra chat --ai gemini --key sk-xxx
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import click
|
|
9
|
+
from dotenv import load_dotenv
|
|
10
|
+
from . import corelogic as core
|
|
11
|
+
|
|
12
|
+
load_dotenv()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@click.group()
|
|
16
|
+
@click.version_option("0.1.0", prog_name="gitputra")
|
|
17
|
+
def main():
|
|
18
|
+
"""🔍 Gitputra — AI-powered GitHub repo analyzer."""
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# ─────────────────────────────────────────────
|
|
23
|
+
# ANALYZE
|
|
24
|
+
# ─────────────────────────────────────────────
|
|
25
|
+
@main.command()
|
|
26
|
+
@click.argument("url")
|
|
27
|
+
@click.option(
|
|
28
|
+
"--ai", "ai_choice",
|
|
29
|
+
type=click.Choice(["gemini", "openai", "claude"], case_sensitive=False),
|
|
30
|
+
default="gemini", show_default=True,
|
|
31
|
+
help="AI provider to use."
|
|
32
|
+
)
|
|
33
|
+
@click.option(
|
|
34
|
+
"--key",
|
|
35
|
+
envvar="API_KEY", required=True,
|
|
36
|
+
help="API key (or set API_KEY in .env)."
|
|
37
|
+
)
|
|
38
|
+
@click.option(
|
|
39
|
+
"--lang", default="English", show_default=True,
|
|
40
|
+
help="Output language for the report."
|
|
41
|
+
)
|
|
42
|
+
@click.option(
|
|
43
|
+
"--no-pdf", is_flag=True, default=False,
|
|
44
|
+
help="Skip PDF generation."
|
|
45
|
+
)
|
|
46
|
+
@click.option(
|
|
47
|
+
"--no-diagram", is_flag=True, default=False,
|
|
48
|
+
help="Skip diagram generation."
|
|
49
|
+
)
|
|
50
|
+
def analyze(url, ai_choice, key, lang, no_pdf, no_diagram):
|
|
51
|
+
"""Clone a GitHub repo and generate an AI analysis report.
|
|
52
|
+
|
|
53
|
+
\b
|
|
54
|
+
Example:
|
|
55
|
+
gitputra analyze https://github.com/user/repo --ai gemini --key AIza...
|
|
56
|
+
"""
|
|
57
|
+
core.init(ai_choice.lower(), key, lang)
|
|
58
|
+
|
|
59
|
+
repo_path = core.clone_repo(url)
|
|
60
|
+
files = core.load_files(repo_path)
|
|
61
|
+
|
|
62
|
+
if not files:
|
|
63
|
+
click.secho("❌ No supported source files found.", fg="red")
|
|
64
|
+
raise SystemExit(1)
|
|
65
|
+
|
|
66
|
+
core.store_chunks(files)
|
|
67
|
+
analysis = core.analyze_repo(files)
|
|
68
|
+
|
|
69
|
+
click.echo("\n" + analysis + "\n")
|
|
70
|
+
|
|
71
|
+
if not no_diagram:
|
|
72
|
+
core.generate_diagram(files)
|
|
73
|
+
core.generate_mermaid(files)
|
|
74
|
+
|
|
75
|
+
if not no_pdf:
|
|
76
|
+
core.generate_pdf(analysis)
|
|
77
|
+
|
|
78
|
+
click.secho("✅ Done! Outputs saved in ./output/", fg="green")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# ─────────────────────────────────────────────
|
|
82
|
+
# CHAT
|
|
83
|
+
# ─────────────────────────────────────────────
|
|
84
|
+
@main.command()
|
|
85
|
+
@click.option(
|
|
86
|
+
"--ai", "ai_choice",
|
|
87
|
+
type=click.Choice(["gemini", "openai", "claude"], case_sensitive=False),
|
|
88
|
+
default="gemini", show_default=True,
|
|
89
|
+
)
|
|
90
|
+
@click.option(
|
|
91
|
+
"--key",
|
|
92
|
+
envvar="API_KEY", required=True,
|
|
93
|
+
help="API key (or set API_KEY in .env)."
|
|
94
|
+
)
|
|
95
|
+
@click.option("--lang", default="English", show_default=True)
|
|
96
|
+
def chat(ai_choice, key, lang):
|
|
97
|
+
"""Start an interactive RAG chat about the last analyzed repo.
|
|
98
|
+
|
|
99
|
+
\b
|
|
100
|
+
Example:
|
|
101
|
+
gitputra chat --ai gemini --key AIza...
|
|
102
|
+
Type 'exit' or Ctrl+C to quit.
|
|
103
|
+
"""
|
|
104
|
+
core.init(ai_choice.lower(), key, lang)
|
|
105
|
+
|
|
106
|
+
click.secho("\n💬 Chat mode — ask anything about the repo. Type 'exit' to quit.\n", fg="cyan")
|
|
107
|
+
|
|
108
|
+
while True:
|
|
109
|
+
try:
|
|
110
|
+
question = click.prompt(">>", prompt_suffix=" ")
|
|
111
|
+
except (KeyboardInterrupt, EOFError):
|
|
112
|
+
click.echo("\nBye!")
|
|
113
|
+
break
|
|
114
|
+
|
|
115
|
+
if question.strip().lower() in ("exit", "quit", "q"):
|
|
116
|
+
click.echo("Bye!")
|
|
117
|
+
break
|
|
118
|
+
|
|
119
|
+
answer = core.query_rag(question)
|
|
120
|
+
click.echo(f"\n{answer}\n")
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# ─────────────────────────────────────────────
|
|
124
|
+
# CLEAR DB
|
|
125
|
+
# ─────────────────────────────────────────────
|
|
126
|
+
@main.command("clear-db")
|
|
127
|
+
def clear_db():
|
|
128
|
+
"""Wipe the local ChromaDB collection."""
|
|
129
|
+
import chromadb
|
|
130
|
+
client = chromadb.PersistentClient(path="./chroma_db")
|
|
131
|
+
client.delete_collection("repo")
|
|
132
|
+
click.secho("🗑️ ChromaDB collection cleared.", fg="yellow")
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
"""
|
|
2
|
+
corelogic.py — All core logic for RepoLyzer.
|
|
3
|
+
AI setup, repo cloning, file loading, chunking,
|
|
4
|
+
embedding, ChromaDB, analysis, diagrams, PDF.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import uuid
|
|
9
|
+
import time
|
|
10
|
+
import importlib
|
|
11
|
+
|
|
12
|
+
from git import Repo
|
|
13
|
+
import chromadb
|
|
14
|
+
import matplotlib
|
|
15
|
+
matplotlib.use("Agg")
|
|
16
|
+
import matplotlib.pyplot as plt
|
|
17
|
+
import networkx as nx
|
|
18
|
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
|
|
19
|
+
from reportlab.lib.styles import ParagraphStyle
|
|
20
|
+
from reportlab.lib import colors
|
|
21
|
+
from reportlab.pdfbase import pdfmetrics
|
|
22
|
+
from reportlab.pdfbase.ttfonts import TTFont
|
|
23
|
+
|
|
24
|
+
# ─────────────────────────────────────────────
|
|
25
|
+
# GLOBALS
|
|
26
|
+
# ─────────────────────────────────────────────
|
|
27
|
+
_ai_config = {}
|
|
28
|
+
_ai_lib = None
|
|
29
|
+
_claude_client = None
|
|
30
|
+
_language = "English"
|
|
31
|
+
_ai_disabled = False
|
|
32
|
+
|
|
33
|
+
CHROMA_CLIENT = chromadb.PersistentClient(path="./chroma_db")
|
|
34
|
+
COLLECTION = CHROMA_CLIENT.get_or_create_collection("repo")
|
|
35
|
+
|
|
36
|
+
SUPPORTED_EXT = (".py", ".js", ".ts", ".java", ".cpp", ".c", ".go", ".rs")
|
|
37
|
+
|
|
38
|
+
AI_OPTIONS = {
|
|
39
|
+
"gemini": {
|
|
40
|
+
"name": "Gemini",
|
|
41
|
+
"lib": "google.generativeai",
|
|
42
|
+
"text_model": "models/gemini-2.5-flash",
|
|
43
|
+
"embed_model": "models/text-embedding-004",
|
|
44
|
+
},
|
|
45
|
+
"openai": {
|
|
46
|
+
"name": "OpenAI",
|
|
47
|
+
"lib": "openai",
|
|
48
|
+
"text_model": "gpt-4",
|
|
49
|
+
"embed_model": "text-embedding-3-large",
|
|
50
|
+
},
|
|
51
|
+
"claude": {
|
|
52
|
+
"name": "Claude",
|
|
53
|
+
"lib": "anthropic",
|
|
54
|
+
"text_model": "claude-opus-4-5",
|
|
55
|
+
"embed_model": None,
|
|
56
|
+
},
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# ─────────────────────────────────────────────
|
|
61
|
+
# INIT
|
|
62
|
+
# ─────────────────────────────────────────────
|
|
63
|
+
def init(ai_choice: str, api_key: str, language: str = "English"):
|
|
64
|
+
"""Bootstrap AI client and global settings. Call before anything else."""
|
|
65
|
+
global _ai_config, _ai_lib, _claude_client, _language
|
|
66
|
+
|
|
67
|
+
_language = language
|
|
68
|
+
_ai_config = AI_OPTIONS[ai_choice]
|
|
69
|
+
_ai_lib = importlib.import_module(_ai_config["lib"])
|
|
70
|
+
|
|
71
|
+
if ai_choice == "gemini":
|
|
72
|
+
_ai_lib.configure(api_key=api_key)
|
|
73
|
+
|
|
74
|
+
elif ai_choice == "openai":
|
|
75
|
+
_ai_lib.api_key = api_key
|
|
76
|
+
|
|
77
|
+
elif ai_choice == "claude":
|
|
78
|
+
_claude_client = _ai_lib.Anthropic(api_key=api_key)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# ─────────────────────────────────────────────
|
|
82
|
+
# AI — GENERATE
|
|
83
|
+
# ─────────────────────────────────────────────
|
|
84
|
+
def safe_generate(prompt: str, retries: int = 2) -> str | None:
|
|
85
|
+
global _ai_disabled
|
|
86
|
+
|
|
87
|
+
if _ai_disabled:
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
for _ in range(retries):
|
|
91
|
+
try:
|
|
92
|
+
name = _ai_config["name"]
|
|
93
|
+
|
|
94
|
+
if name == "Gemini":
|
|
95
|
+
model = _ai_lib.GenerativeModel(_ai_config["text_model"])
|
|
96
|
+
res = model.generate_content(prompt)
|
|
97
|
+
return res.text if res and hasattr(res, "text") else None
|
|
98
|
+
|
|
99
|
+
elif name == "OpenAI":
|
|
100
|
+
res = _ai_lib.ChatCompletion.create(
|
|
101
|
+
model=_ai_config["text_model"],
|
|
102
|
+
messages=[{"role": "user", "content": prompt}],
|
|
103
|
+
)
|
|
104
|
+
return res.choices[0].message.content
|
|
105
|
+
|
|
106
|
+
elif name == "Claude":
|
|
107
|
+
res = _claude_client.messages.create(
|
|
108
|
+
model=_ai_config["text_model"],
|
|
109
|
+
max_tokens=2048,
|
|
110
|
+
messages=[{"role": "user", "content": prompt}],
|
|
111
|
+
)
|
|
112
|
+
return res.content[0].text
|
|
113
|
+
|
|
114
|
+
except Exception as e:
|
|
115
|
+
if "429" in str(e) or "quota" in str(e).lower():
|
|
116
|
+
print("⚠️ Quota exceeded → AI disabled for this session")
|
|
117
|
+
_ai_disabled = True
|
|
118
|
+
return None
|
|
119
|
+
time.sleep(2)
|
|
120
|
+
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
# ─────────────────────────────────────────────
|
|
125
|
+
# AI — EMBED
|
|
126
|
+
# ─────────────────────────────────────────────
|
|
127
|
+
def embed_text(text: str) -> list[float]:
|
|
128
|
+
try:
|
|
129
|
+
name = _ai_config["name"]
|
|
130
|
+
|
|
131
|
+
if name == "Gemini":
|
|
132
|
+
return _ai_lib.embed_content(
|
|
133
|
+
model=_ai_config["embed_model"],
|
|
134
|
+
content=text,
|
|
135
|
+
)["embedding"]
|
|
136
|
+
|
|
137
|
+
elif name == "OpenAI":
|
|
138
|
+
return _ai_lib.Embedding.create(
|
|
139
|
+
model=_ai_config["embed_model"],
|
|
140
|
+
input=text,
|
|
141
|
+
)["data"][0]["embedding"]
|
|
142
|
+
|
|
143
|
+
# Claude has no embedding API — use zero vector as fallback
|
|
144
|
+
return [0.0] * 768
|
|
145
|
+
|
|
146
|
+
except Exception:
|
|
147
|
+
return [0.0] * 768
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# ─────────────────────────────────────────────
|
|
151
|
+
# REPO — CLONE & LOAD
|
|
152
|
+
# ─────────────────────────────────────────────
|
|
153
|
+
def clone_repo(url: str) -> str:
|
|
154
|
+
os.makedirs("repos", exist_ok=True)
|
|
155
|
+
name = url.rstrip("/").split("/")[-1].replace(".git", "")
|
|
156
|
+
path = os.path.join("repos", name)
|
|
157
|
+
|
|
158
|
+
if not os.path.exists(path):
|
|
159
|
+
print(f"📥 Cloning {url} ...")
|
|
160
|
+
Repo.clone_from(url, path)
|
|
161
|
+
else:
|
|
162
|
+
print(f"✅ Repo already exists at {path}")
|
|
163
|
+
|
|
164
|
+
return path
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def load_files(repo_path: str) -> list[tuple[str, str]]:
|
|
168
|
+
files_data = []
|
|
169
|
+
|
|
170
|
+
for root, _, files in os.walk(repo_path):
|
|
171
|
+
# skip hidden dirs like .git
|
|
172
|
+
if any(part.startswith(".") for part in root.split(os.sep)):
|
|
173
|
+
continue
|
|
174
|
+
for file in files:
|
|
175
|
+
if file.endswith(SUPPORTED_EXT):
|
|
176
|
+
try:
|
|
177
|
+
with open(os.path.join(root, file), "r", errors="ignore") as f:
|
|
178
|
+
content = f.read().strip()
|
|
179
|
+
if content:
|
|
180
|
+
files_data.append((file, content))
|
|
181
|
+
except Exception:
|
|
182
|
+
pass
|
|
183
|
+
|
|
184
|
+
print(f"📂 Loaded {len(files_data)} source files")
|
|
185
|
+
return files_data
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
# ─────────────────────────────────────────────
|
|
189
|
+
# CHUNKING
|
|
190
|
+
# ─────────────────────────────────────────────
|
|
191
|
+
def chunk_by_file(files: list[tuple[str, str]], max_chars: int = 3000) -> list[str]:
|
|
192
|
+
chunks, current = [], ""
|
|
193
|
+
|
|
194
|
+
for fname, content in files:
|
|
195
|
+
piece = f"\n# FILE: {fname}\n{content}\n"
|
|
196
|
+
if len(current) + len(piece) > max_chars:
|
|
197
|
+
if current:
|
|
198
|
+
chunks.append(current)
|
|
199
|
+
current = piece
|
|
200
|
+
else:
|
|
201
|
+
current += piece
|
|
202
|
+
|
|
203
|
+
if current:
|
|
204
|
+
chunks.append(current)
|
|
205
|
+
|
|
206
|
+
return chunks
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
# ─────────────────────────────────────────────
|
|
210
|
+
# CHROMADB — STORE & QUERY
|
|
211
|
+
# ─────────────────────────────────────────────
|
|
212
|
+
def store_chunks(files: list[tuple[str, str]]):
|
|
213
|
+
print("💾 Storing embeddings...")
|
|
214
|
+
for fname, content in files:
|
|
215
|
+
emb = embed_text(content)
|
|
216
|
+
COLLECTION.add(
|
|
217
|
+
documents=[content],
|
|
218
|
+
embeddings=[emb],
|
|
219
|
+
ids=[str(uuid.uuid4())],
|
|
220
|
+
metadatas=[{"file": fname}],
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def query_rag(question: str) -> str:
|
|
225
|
+
emb = embed_text(question)
|
|
226
|
+
res = COLLECTION.query(query_embeddings=[emb], n_results=5)
|
|
227
|
+
context = "\n".join(res["documents"][0])
|
|
228
|
+
|
|
229
|
+
prompt = f"""Answer in {_language}.
|
|
230
|
+
|
|
231
|
+
Context from codebase:
|
|
232
|
+
{context}
|
|
233
|
+
|
|
234
|
+
Question:
|
|
235
|
+
{question}
|
|
236
|
+
"""
|
|
237
|
+
return safe_generate(prompt) or "⚠️ AI unavailable."
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
# ─────────────────────────────────────────────
|
|
241
|
+
# ANALYSIS
|
|
242
|
+
# ─────────────────────────────────────────────
|
|
243
|
+
def analyze_repo(files: list[tuple[str, str]]) -> str:
|
|
244
|
+
print("\n🔍 Analyzing repo...\n")
|
|
245
|
+
chunks = chunk_by_file(files)
|
|
246
|
+
summaries = []
|
|
247
|
+
|
|
248
|
+
for i, chunk in enumerate(chunks, 1):
|
|
249
|
+
print(f" Chunk {i}/{len(chunks)}")
|
|
250
|
+
res = safe_generate(f"""Summarize this code chunk in {_language}.
|
|
251
|
+
Focus on logic, structure, and key parts.
|
|
252
|
+
|
|
253
|
+
Code:
|
|
254
|
+
{chunk}
|
|
255
|
+
""")
|
|
256
|
+
if res:
|
|
257
|
+
summaries.append(res)
|
|
258
|
+
|
|
259
|
+
if not summaries:
|
|
260
|
+
return "⚠️ AI unavailable — no analysis generated."
|
|
261
|
+
|
|
262
|
+
return safe_generate(f"""Respond ONLY in {_language}.
|
|
263
|
+
|
|
264
|
+
Using the summaries below, write ONE structured report with these sections:
|
|
265
|
+
|
|
266
|
+
### Summary
|
|
267
|
+
### Architecture
|
|
268
|
+
### Tech Stack
|
|
269
|
+
### Issues
|
|
270
|
+
### Suggested Improvements
|
|
271
|
+
|
|
272
|
+
No repetition. Be concise.
|
|
273
|
+
|
|
274
|
+
Summaries:
|
|
275
|
+
{chr(10).join(summaries)}
|
|
276
|
+
""") or "⚠️ Final synthesis failed."
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# ─────────────────────────────────────────────
|
|
280
|
+
# DIAGRAMS
|
|
281
|
+
# ─────────────────────────────────────────────
|
|
282
|
+
def _clean(name: str) -> str:
|
|
283
|
+
return (
|
|
284
|
+
name.replace(".py", "_py")
|
|
285
|
+
.replace(".", "_")
|
|
286
|
+
.replace("-", "_")
|
|
287
|
+
.replace("/", "_")
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def generate_diagram(files: list[tuple[str, str]]):
|
|
292
|
+
G = nx.DiGraph()
|
|
293
|
+
|
|
294
|
+
for fname, content in files:
|
|
295
|
+
for line in content.splitlines():
|
|
296
|
+
line = line.strip()
|
|
297
|
+
if line.startswith(("import ", "from ", "#include")):
|
|
298
|
+
G.add_edge(fname, line)
|
|
299
|
+
|
|
300
|
+
os.makedirs("output", exist_ok=True)
|
|
301
|
+
plt.figure(figsize=(12, 9))
|
|
302
|
+
nx.draw(G, with_labels=True, node_size=800, font_size=7, arrows=True)
|
|
303
|
+
plt.tight_layout()
|
|
304
|
+
plt.savefig("output/diagram.png", dpi=150)
|
|
305
|
+
plt.close()
|
|
306
|
+
print("📊 Saved output/diagram.png")
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def generate_mermaid(files: list[tuple[str, str]]):
|
|
310
|
+
lines, seen = ["graph TD"], set()
|
|
311
|
+
|
|
312
|
+
for fname, content in files:
|
|
313
|
+
file_node = _clean(fname)
|
|
314
|
+
|
|
315
|
+
for line in content.splitlines():
|
|
316
|
+
line = line.strip()
|
|
317
|
+
module = None
|
|
318
|
+
|
|
319
|
+
if line.startswith("import "):
|
|
320
|
+
parts = line.replace(",", " ").split()
|
|
321
|
+
if len(parts) >= 2:
|
|
322
|
+
module = _clean(parts[1])
|
|
323
|
+
|
|
324
|
+
elif line.startswith("from "):
|
|
325
|
+
parts = line.split()
|
|
326
|
+
if len(parts) >= 2:
|
|
327
|
+
module = _clean(parts[1])
|
|
328
|
+
|
|
329
|
+
elif line.startswith("#include"):
|
|
330
|
+
module = _clean(line.replace("#include", "").strip())
|
|
331
|
+
|
|
332
|
+
if module:
|
|
333
|
+
edge = f"{file_node} --> {module}"
|
|
334
|
+
if edge not in seen:
|
|
335
|
+
lines.append(f" {edge}")
|
|
336
|
+
seen.add(edge)
|
|
337
|
+
|
|
338
|
+
os.makedirs("output", exist_ok=True)
|
|
339
|
+
with open("output/mermaid.txt", "w", encoding="utf-8") as f:
|
|
340
|
+
f.write("\n".join(lines))
|
|
341
|
+
print("🗺️ Saved output/mermaid.txt")
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
# ─────────────────────────────────────────────
|
|
345
|
+
# PDF
|
|
346
|
+
# ─────────────────────────────────────────────
|
|
347
|
+
_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
348
|
+
|
|
349
|
+
def _register_fonts():
|
|
350
|
+
font_dir = os.path.join(_BASE_DIR, "fonts")
|
|
351
|
+
mappings = {
|
|
352
|
+
"NotoLatin": "NotoSans-Regular.ttf",
|
|
353
|
+
"NotoBengali": "NotoSansBengali-Regular.ttf",
|
|
354
|
+
"NotoDevanagari": "NotoSansDevanagari-Regular.ttf",
|
|
355
|
+
}
|
|
356
|
+
for font_name, filename in mappings.items():
|
|
357
|
+
path = os.path.join(font_dir, filename)
|
|
358
|
+
if os.path.exists(path):
|
|
359
|
+
pdfmetrics.registerFont(TTFont(font_name, path))
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def _get_font(language: str) -> str:
|
|
363
|
+
lang = language.lower()
|
|
364
|
+
if lang in ("bengali", "bangla"):
|
|
365
|
+
font = "NotoBengali"
|
|
366
|
+
elif lang == "hindi":
|
|
367
|
+
font = "NotoDevanagari"
|
|
368
|
+
else:
|
|
369
|
+
font = "NotoLatin"
|
|
370
|
+
|
|
371
|
+
try:
|
|
372
|
+
pdfmetrics.getFont(font)
|
|
373
|
+
return font
|
|
374
|
+
except KeyError:
|
|
375
|
+
return "Helvetica"
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def generate_pdf(text: str):
|
|
379
|
+
os.makedirs("output", exist_ok=True)
|
|
380
|
+
_register_fonts()
|
|
381
|
+
font = _get_font(_language)
|
|
382
|
+
|
|
383
|
+
styles = {
|
|
384
|
+
"title": ParagraphStyle("title", fontName=font, fontSize=20, textColor=colors.darkblue),
|
|
385
|
+
"head": ParagraphStyle("head", fontName=font, fontSize=14, textColor=colors.blue),
|
|
386
|
+
"body": ParagraphStyle("body", fontName=font, fontSize=11),
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
story = [
|
|
390
|
+
Paragraph(f"Repo Analysis Report ({_language})", styles["title"]),
|
|
391
|
+
Spacer(1, 12),
|
|
392
|
+
]
|
|
393
|
+
|
|
394
|
+
for section in text.split("###"):
|
|
395
|
+
lines = section.strip().splitlines()
|
|
396
|
+
if not lines or not lines[0].strip():
|
|
397
|
+
continue
|
|
398
|
+
story.append(Paragraph(lines[0].strip(), styles["head"]))
|
|
399
|
+
body_text = "<br/>".join(l for l in lines[1:] if l.strip())
|
|
400
|
+
if body_text:
|
|
401
|
+
story.append(Paragraph(body_text, styles["body"]))
|
|
402
|
+
story.append(Spacer(1, 10))
|
|
403
|
+
|
|
404
|
+
diagram_path = "output/diagram.png"
|
|
405
|
+
if os.path.exists(diagram_path):
|
|
406
|
+
story.append(Image(diagram_path, width=420, height=300))
|
|
407
|
+
|
|
408
|
+
SimpleDocTemplate("output/report.pdf").build(story)
|
|
409
|
+
print("📄 Saved output/report.pdf")
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: gitputra
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: AI-powered GitHub repo analyzer CLI — analyze, chat, and visualize any codebase.
|
|
5
|
+
Author-email: Adityava Gangopadhyay <adityava49cse@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.10
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: click>=8.1
|
|
10
|
+
Requires-Dist: python-dotenv>=1.0
|
|
11
|
+
Requires-Dist: gitpython>=3.1
|
|
12
|
+
Requires-Dist: chromadb>=0.5
|
|
13
|
+
Requires-Dist: matplotlib>=3.8
|
|
14
|
+
Requires-Dist: networkx>=3.2
|
|
15
|
+
Requires-Dist: reportlab>=4.0
|
|
16
|
+
Requires-Dist: google-generativeai>=0.5
|
|
17
|
+
Requires-Dist: openai>=1.0
|
|
18
|
+
Requires-Dist: anthropic>=0.25
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
pyproject.toml
|
|
2
|
+
gitputra/cli.py
|
|
3
|
+
gitputra/corelogic.py
|
|
4
|
+
gitputra.egg-info/PKG-INFO
|
|
5
|
+
gitputra.egg-info/SOURCES.txt
|
|
6
|
+
gitputra.egg-info/dependency_links.txt
|
|
7
|
+
gitputra.egg-info/entry_points.txt
|
|
8
|
+
gitputra.egg-info/requires.txt
|
|
9
|
+
gitputra.egg-info/top_level.txt
|
|
10
|
+
gitputra/fonts/NotoSans-Regular.ttf
|
|
11
|
+
gitputra/fonts/NotoSansBengali-Regular.ttf
|
|
12
|
+
gitputra/fonts/NotoSansDevanagari-Regular.ttf
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
gitputra
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "gitputra"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "AI-powered GitHub repo analyzer CLI — analyze, chat, and visualize any codebase."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = { text = "MIT" }
|
|
11
|
+
requires-python = ">=3.10"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Adityava Gangopadhyay", email = "adityava49cse@gmail.com" }
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
dependencies = [
|
|
17
|
+
"click>=8.1",
|
|
18
|
+
"python-dotenv>=1.0",
|
|
19
|
+
"gitpython>=3.1",
|
|
20
|
+
"chromadb>=0.5",
|
|
21
|
+
"matplotlib>=3.8",
|
|
22
|
+
"networkx>=3.2",
|
|
23
|
+
"reportlab>=4.0",
|
|
24
|
+
"google-generativeai>=0.5",
|
|
25
|
+
"openai>=1.0",
|
|
26
|
+
"anthropic>=0.25",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
[project.scripts]
|
|
30
|
+
gitputra = "gitputra.cli:main"
|
|
31
|
+
|
|
32
|
+
[tool.setuptools.packages.find]
|
|
33
|
+
where = ["."]
|
|
34
|
+
include = ["gitputra*"]
|
|
35
|
+
|
|
36
|
+
[tool.setuptools.package-data]
|
|
37
|
+
gitputra = ["fonts/*.ttf"]
|
gitputra-0.1.0/setup.cfg
ADDED