fancygit 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fancygit-1.0.0.dist-info/METADATA +169 -0
- fancygit-1.0.0.dist-info/RECORD +29 -0
- fancygit-1.0.0.dist-info/WHEEL +5 -0
- fancygit-1.0.0.dist-info/entry_points.txt +2 -0
- fancygit-1.0.0.dist-info/top_level.txt +2 -0
- src/__init__.py +1 -0
- src/colors.py +260 -0
- src/git_error.py +55 -0
- src/git_error_parser.py +43 -0
- src/git_insights.py +304 -0
- src/git_runner.py +20 -0
- src/loading_animation.py +167 -0
- src/merge_conflict.py +27 -0
- src/mermaid_export.py +430 -0
- src/ollama_client.py +142 -0
- src/output_colorizer.py +358 -0
- src/repo_state.py +29 -0
- src/utils.py +0 -0
- tests/README.md +186 -0
- tests/__init__.py +0 -0
- tests/conftest.py +61 -0
- tests/test_conflict_parser_integration.py +65 -0
- tests/test_fancygit_advanced.py +504 -0
- tests/test_fancygit_commands.py +507 -0
- tests/test_fancygit_integration.py +158 -0
- tests/test_fancygit_workflows.py +441 -0
- tests/test_git_error.py +74 -0
- tests/test_git_error_parser.py +129 -0
- tests/test_git_runner.py +118 -0
src/mermaid_export.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
import textwrap
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class MermaidExporter:
|
|
9
|
+
def __init__(self, runner):
|
|
10
|
+
self.runner = runner
|
|
11
|
+
|
|
12
|
+
def _git(self, args):
|
|
13
|
+
return self.runner.run_git_command(args)
|
|
14
|
+
|
|
15
|
+
def _sanitize_node_id(self, value: str) -> str:
|
|
16
|
+
safe = []
|
|
17
|
+
for ch in value:
|
|
18
|
+
if ch.isalnum() or ch == '_':
|
|
19
|
+
safe.append(ch)
|
|
20
|
+
else:
|
|
21
|
+
safe.append('_')
|
|
22
|
+
out = ''.join(safe)
|
|
23
|
+
if not out:
|
|
24
|
+
out = 'node'
|
|
25
|
+
if out[0].isdigit():
|
|
26
|
+
out = f'n_{out}'
|
|
27
|
+
return out
|
|
28
|
+
|
|
29
|
+
def _escape_label(self, value: str) -> str:
|
|
30
|
+
return value.replace('"', "'")
|
|
31
|
+
|
|
32
|
+
def _parse_gitignore(self, repo_root: str) -> set[str]:
|
|
33
|
+
"""Parse .gitignore file and return a set of patterns to ignore"""
|
|
34
|
+
gitignore_path = os.path.join(repo_root, '.gitignore')
|
|
35
|
+
ignore_patterns = set()
|
|
36
|
+
|
|
37
|
+
if os.path.exists(gitignore_path):
|
|
38
|
+
with open(gitignore_path, 'r') as f:
|
|
39
|
+
for line in f:
|
|
40
|
+
line = line.strip()
|
|
41
|
+
# Skip empty lines and comments
|
|
42
|
+
if not line or line.startswith('#'):
|
|
43
|
+
continue
|
|
44
|
+
# Remove trailing slashes for directory matching
|
|
45
|
+
if line.endswith('/'):
|
|
46
|
+
line = line[:-1]
|
|
47
|
+
ignore_patterns.add(line)
|
|
48
|
+
|
|
49
|
+
# Always add these essential directories for safety
|
|
50
|
+
ignore_patterns.update({'.git', '__pycache__', '.venv', 'venv', 'node_modules', '.fancygit'})
|
|
51
|
+
return ignore_patterns
|
|
52
|
+
|
|
53
|
+
def _discover_python_files(self, repo_root: str) -> list[str]:
|
|
54
|
+
out: list[str] = []
|
|
55
|
+
ignore_patterns = self._parse_gitignore(repo_root)
|
|
56
|
+
|
|
57
|
+
for root, dirs, files in os.walk(repo_root):
|
|
58
|
+
dirs[:] = [
|
|
59
|
+
d for d in dirs
|
|
60
|
+
if d not in ignore_patterns
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
for fn in files:
|
|
64
|
+
if fn.endswith('.py'):
|
|
65
|
+
out.append(os.path.join(root, fn))
|
|
66
|
+
return out
|
|
67
|
+
|
|
68
|
+
def _module_name_from_path(self, repo_root: str, path: str) -> str:
|
|
69
|
+
rel = os.path.relpath(path, repo_root)
|
|
70
|
+
rel = rel.replace(os.sep, '/')
|
|
71
|
+
if rel.endswith('.py'):
|
|
72
|
+
rel = rel[:-3]
|
|
73
|
+
if rel.endswith('/__init__'):
|
|
74
|
+
rel = rel[: -len('/__init__')]
|
|
75
|
+
return rel.replace('/', '.')
|
|
76
|
+
|
|
77
|
+
def _parse_imports_from_source(self, source: str, current_module: str) -> set[str]:
|
|
78
|
+
imports: set[str] = set()
|
|
79
|
+
for raw in source.splitlines():
|
|
80
|
+
line = raw.strip()
|
|
81
|
+
if not line or line.startswith('#'):
|
|
82
|
+
continue
|
|
83
|
+
m1 = re.match(r'^import\s+([a-zA-Z0-9_\.]+)', line)
|
|
84
|
+
if m1:
|
|
85
|
+
imports.add(m1.group(1))
|
|
86
|
+
continue
|
|
87
|
+
m2 = re.match(r'^from\s+([a-zA-Z0-9_\.]+)\s+import\s+', line)
|
|
88
|
+
if m2:
|
|
89
|
+
base = m2.group(1)
|
|
90
|
+
if base.startswith('.'):
|
|
91
|
+
dots = len(base) - len(base.lstrip('.'))
|
|
92
|
+
base_rest = base.lstrip('.')
|
|
93
|
+
parts = current_module.split('.')
|
|
94
|
+
if dots <= len(parts):
|
|
95
|
+
prefix = parts[: len(parts) - dots]
|
|
96
|
+
resolved = '.'.join([p for p in prefix if p] + ([base_rest] if base_rest else []))
|
|
97
|
+
if resolved:
|
|
98
|
+
imports.add(resolved)
|
|
99
|
+
else:
|
|
100
|
+
imports.add(base)
|
|
101
|
+
return imports
|
|
102
|
+
|
|
103
|
+
def python_import_dependency_flowchart(self, repo_root: str) -> str:
|
|
104
|
+
py_files = self._discover_python_files(repo_root)
|
|
105
|
+
module_by_path: dict[str, str] = {}
|
|
106
|
+
path_by_module: dict[str, str] = {}
|
|
107
|
+
|
|
108
|
+
for p in py_files:
|
|
109
|
+
m = self._module_name_from_path(repo_root, p)
|
|
110
|
+
module_by_path[p] = m
|
|
111
|
+
path_by_module[m] = p
|
|
112
|
+
|
|
113
|
+
edges: set[tuple[str, str]] = set()
|
|
114
|
+
|
|
115
|
+
for p in py_files:
|
|
116
|
+
cur_mod = module_by_path[p]
|
|
117
|
+
try:
|
|
118
|
+
with open(p, 'r', encoding='utf-8') as f:
|
|
119
|
+
src = f.read()
|
|
120
|
+
except Exception:
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
imports = self._parse_imports_from_source(src, cur_mod)
|
|
124
|
+
for imp in imports:
|
|
125
|
+
if imp in path_by_module:
|
|
126
|
+
edges.add((cur_mod, imp))
|
|
127
|
+
|
|
128
|
+
lines = ['flowchart LR']
|
|
129
|
+
|
|
130
|
+
def node(mod: str) -> str:
|
|
131
|
+
return self._sanitize_node_id(mod)
|
|
132
|
+
|
|
133
|
+
mods = sorted({m for e in edges for m in e})
|
|
134
|
+
for m in mods:
|
|
135
|
+
lines.append(f' {node(m)}["{self._escape_label(m)}"]')
|
|
136
|
+
|
|
137
|
+
for a, b in sorted(edges):
|
|
138
|
+
lines.append(f' {node(a)} --> {node(b)}')
|
|
139
|
+
|
|
140
|
+
return '\n'.join(lines) + '\n'
|
|
141
|
+
|
|
142
|
+
def repo_file_structure_flowchart(self, repo_root: str, max_nodes: int = 250) -> str:
|
|
143
|
+
lines = ['flowchart TB']
|
|
144
|
+
ignore_patterns = self._parse_gitignore(repo_root)
|
|
145
|
+
|
|
146
|
+
def skip_dir(name: str) -> bool:
|
|
147
|
+
return name in ignore_patterns
|
|
148
|
+
|
|
149
|
+
root_id = 'repo_root'
|
|
150
|
+
root_label = self._escape_label(os.path.basename(os.path.abspath(repo_root)) or 'repo')
|
|
151
|
+
lines.append(f' {root_id}["{root_label}"]')
|
|
152
|
+
|
|
153
|
+
node_count = 1
|
|
154
|
+
ids: dict[str, str] = {repo_root: root_id}
|
|
155
|
+
|
|
156
|
+
for current_root, dirs, files in os.walk(repo_root):
|
|
157
|
+
dirs[:] = [d for d in dirs if not skip_dir(d)]
|
|
158
|
+
rel_root = os.path.relpath(current_root, repo_root)
|
|
159
|
+
parent_path = repo_root if rel_root == '.' else current_root
|
|
160
|
+
|
|
161
|
+
parent_id = ids.get(parent_path)
|
|
162
|
+
if parent_id is None:
|
|
163
|
+
parent_id = self._sanitize_node_id(f'path_{rel_root}')
|
|
164
|
+
ids[parent_path] = parent_id
|
|
165
|
+
label = self._escape_label(rel_root)
|
|
166
|
+
lines.append(f' {parent_id}["{label}"]')
|
|
167
|
+
lines.append(f' {root_id} --> {parent_id}')
|
|
168
|
+
node_count += 1
|
|
169
|
+
|
|
170
|
+
for d in dirs:
|
|
171
|
+
if node_count >= max_nodes:
|
|
172
|
+
break
|
|
173
|
+
p = os.path.join(current_root, d)
|
|
174
|
+
rel = os.path.relpath(p, repo_root)
|
|
175
|
+
nid = self._sanitize_node_id(f'dir_{rel}')
|
|
176
|
+
ids[p] = nid
|
|
177
|
+
lines.append(f' {nid}["{self._escape_label(d)}/"]')
|
|
178
|
+
lines.append(f' {parent_id} --> {nid}')
|
|
179
|
+
node_count += 1
|
|
180
|
+
|
|
181
|
+
if node_count >= max_nodes:
|
|
182
|
+
break
|
|
183
|
+
|
|
184
|
+
show_files = [f for f in sorted(files) if not f.endswith('.pyc') and f != '.DS_Store']
|
|
185
|
+
for f in show_files[:30]:
|
|
186
|
+
if node_count >= max_nodes:
|
|
187
|
+
break
|
|
188
|
+
p = os.path.join(current_root, f)
|
|
189
|
+
rel = os.path.relpath(p, repo_root)
|
|
190
|
+
nid = self._sanitize_node_id(f'file_{rel}')
|
|
191
|
+
lines.append(f' {nid}["{self._escape_label(f)}"]')
|
|
192
|
+
lines.append(f' {parent_id} --> {nid}')
|
|
193
|
+
node_count += 1
|
|
194
|
+
|
|
195
|
+
if len(show_files) > 30 and node_count < max_nodes:
|
|
196
|
+
more_id = self._sanitize_node_id(f'file_{rel_root}_more')
|
|
197
|
+
lines.append(f' {more_id}["... +{len(show_files) - 30} more"]')
|
|
198
|
+
lines.append(f' {parent_id} --> {more_id}')
|
|
199
|
+
node_count += 1
|
|
200
|
+
|
|
201
|
+
if node_count >= max_nodes:
|
|
202
|
+
lines.append(f' cutoff["(truncated at ~{max_nodes} nodes)"]')
|
|
203
|
+
lines.append(f' {root_id} --> cutoff')
|
|
204
|
+
|
|
205
|
+
return '\n'.join(lines) + '\n'
|
|
206
|
+
|
|
207
|
+
def repo_status_flowchart(self, repo_state: dict) -> str:
|
|
208
|
+
branch = repo_state.get('branch') or '(detached)'
|
|
209
|
+
ahead = int(repo_state.get('ahead') or 0)
|
|
210
|
+
behind = int(repo_state.get('behind') or 0)
|
|
211
|
+
|
|
212
|
+
staged = repo_state.get('staged') or []
|
|
213
|
+
modified = repo_state.get('modified') or []
|
|
214
|
+
untracked = repo_state.get('untracked') or []
|
|
215
|
+
conflicts = repo_state.get('conflicts') or []
|
|
216
|
+
|
|
217
|
+
clean = bool(repo_state.get('clean'))
|
|
218
|
+
|
|
219
|
+
lines = [
|
|
220
|
+
'flowchart TB',
|
|
221
|
+
f' repo["Repo"] --> branch["Branch: {self._escape_label(branch)}"]',
|
|
222
|
+
f' branch --> sync["Sync: ahead {ahead} / behind {behind}"]',
|
|
223
|
+
f' repo --> clean["Clean: {str(clean).lower()}"]',
|
|
224
|
+
' repo --> buckets{Working Tree}',
|
|
225
|
+
f' buckets --> staged["Staged ({len(staged)})"]',
|
|
226
|
+
f' buckets --> modified["Modified ({len(modified)})"]',
|
|
227
|
+
f' buckets --> untracked["Untracked ({len(untracked)})"]',
|
|
228
|
+
f' buckets --> conflicts["Conflicts ({len(conflicts)})"]',
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
def add_file_nodes(group_id: str, files: list[str]):
|
|
232
|
+
for idx, f in enumerate(files[:30]):
|
|
233
|
+
node_id = f'{group_id}_{idx}'
|
|
234
|
+
lines.append(f' {group_id} --> {node_id}["{self._escape_label(f)}"]')
|
|
235
|
+
if len(files) > 30:
|
|
236
|
+
lines.append(f' {group_id} --> {group_id}_more["... +{len(files) - 30} more"]')
|
|
237
|
+
|
|
238
|
+
add_file_nodes('staged', staged)
|
|
239
|
+
add_file_nodes('modified', modified)
|
|
240
|
+
add_file_nodes('untracked', untracked)
|
|
241
|
+
add_file_nodes('conflicts', conflicts)
|
|
242
|
+
|
|
243
|
+
lines.extend([
|
|
244
|
+
' classDef bad fill:#7f1d1d,stroke:#fecaca,color:#fff;',
|
|
245
|
+
' classDef warn fill:#78350f,stroke:#fed7aa,color:#fff;',
|
|
246
|
+
' class conflicts bad;',
|
|
247
|
+
])
|
|
248
|
+
|
|
249
|
+
if len(modified) > 0 or len(untracked) > 0:
|
|
250
|
+
lines.append(' class modified,untracked warn;')
|
|
251
|
+
|
|
252
|
+
return '\n'.join(lines) + '\n'
|
|
253
|
+
|
|
254
|
+
def commit_graph_gitgraph(self, max_commits: int = 40) -> str:
|
|
255
|
+
args = [
|
|
256
|
+
'log',
|
|
257
|
+
f'-n{max_commits}',
|
|
258
|
+
'--date=short',
|
|
259
|
+
'--pretty=format:%H%x1f%h%x1f%d%x1f%ad%x1f%s',
|
|
260
|
+
]
|
|
261
|
+
returncode, stdout, stderr = self._git(args)
|
|
262
|
+
if returncode != 0:
|
|
263
|
+
msg = (stderr or stdout or '').strip()
|
|
264
|
+
if not msg:
|
|
265
|
+
msg = 'Unable to read git log'
|
|
266
|
+
raise RuntimeError(msg)
|
|
267
|
+
|
|
268
|
+
lines = ['flowchart TD']
|
|
269
|
+
|
|
270
|
+
# Use flowchart for vertical layout to improve commit readability
|
|
271
|
+
commits = []
|
|
272
|
+
for raw in stdout.splitlines():
|
|
273
|
+
parts = raw.split('\x1f')
|
|
274
|
+
if len(parts) != 5:
|
|
275
|
+
continue
|
|
276
|
+
_full, short, decos, date, subject = parts
|
|
277
|
+
label = f'{short} {date} {subject}'.strip()
|
|
278
|
+
if decos.strip():
|
|
279
|
+
label = f'{label} {decos.strip()}'
|
|
280
|
+
label = self._escape_label(label)
|
|
281
|
+
commits.append((short, label))
|
|
282
|
+
|
|
283
|
+
# Create nodes for each commit
|
|
284
|
+
for i, (short, label) in enumerate(commits):
|
|
285
|
+
node_id = self._sanitize_node_id(f'commit_{short}')
|
|
286
|
+
lines.append(f' {node_id}["{label}"]')
|
|
287
|
+
|
|
288
|
+
# Create vertical connections between commits
|
|
289
|
+
for i in range(len(commits) - 1):
|
|
290
|
+
current_id = self._sanitize_node_id(f'commit_{commits[i][0]}')
|
|
291
|
+
next_id = self._sanitize_node_id(f'commit_{commits[i+1][0]}')
|
|
292
|
+
lines.append(f' {current_id} --> {next_id}')
|
|
293
|
+
|
|
294
|
+
return '\n'.join(lines) + '\n'
|
|
295
|
+
|
|
296
|
+
def build_html(self, diagrams: dict, title: str = 'FancyGit Mermaid') -> str:
|
|
297
|
+
payload = json.dumps(diagrams)
|
|
298
|
+
now = datetime.now().isoformat(timespec='seconds')
|
|
299
|
+
|
|
300
|
+
return textwrap.dedent(
|
|
301
|
+
f"""\
|
|
302
|
+
<!doctype html>
|
|
303
|
+
<html lang=\"en\">
|
|
304
|
+
<head>
|
|
305
|
+
<meta charset=\"utf-8\" />
|
|
306
|
+
<meta name=\"viewport\" content=\"width=device-width,initial-scale=1\" />
|
|
307
|
+
<title>{self._escape_label(title)}</title>
|
|
308
|
+
<style>
|
|
309
|
+
body {{ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Arial, sans-serif; margin: 24px; }}
|
|
310
|
+
h1 {{ margin: 0 0 6px 0; font-size: 18px; }}
|
|
311
|
+
.meta {{ color: #6b7280; font-size: 12px; margin-bottom: 16px; }}
|
|
312
|
+
.grid {{ display: grid; grid-template-columns: 1fr; gap: 20px; }}
|
|
313
|
+
.card {{ border: 1px solid #e5e7eb; border-radius: 10px; padding: 16px; background: #fff; }}
|
|
314
|
+
.card h2 {{ margin: 0 0 12px 0; font-size: 14px; color: #111827; }}
|
|
315
|
+
svg .cluster > text {{ display: none; }}
|
|
316
|
+
svg g.cluster-label text {{ display: none; }}
|
|
317
|
+
pre {{ overflow: auto; padding: 12px; border-radius: 8px; background: #0b1020; color: #d1d5db; font-size: 12px; }}
|
|
318
|
+
.toolbar {{ display:flex; gap: 10px; align-items:center; margin-bottom: 12px; }}
|
|
319
|
+
button {{ padding: 6px 10px; font-size: 12px; border-radius: 8px; border: 1px solid #d1d5db; background:#f9fafb; cursor:pointer; }}
|
|
320
|
+
button:hover {{ background:#f3f4f6; }}
|
|
321
|
+
</style>
|
|
322
|
+
</head>
|
|
323
|
+
<body>
|
|
324
|
+
<h1>{self._escape_label(title)}</h1>
|
|
325
|
+
<div class=\"meta\">Generated: {now}</div>
|
|
326
|
+
|
|
327
|
+
<div id=\"root\" class=\"grid\"></div>
|
|
328
|
+
|
|
329
|
+
<script>
|
|
330
|
+
const diagrams = {payload};
|
|
331
|
+
const root = document.getElementById('root');
|
|
332
|
+
|
|
333
|
+
function mkCard(name, code) {{
|
|
334
|
+
const card = document.createElement('div');
|
|
335
|
+
card.className = 'card';
|
|
336
|
+
const h2 = document.createElement('h2');
|
|
337
|
+
h2.textContent = name;
|
|
338
|
+
|
|
339
|
+
const toolbar = document.createElement('div');
|
|
340
|
+
toolbar.className = 'toolbar';
|
|
341
|
+
const copyBtn = document.createElement('button');
|
|
342
|
+
copyBtn.textContent = 'Copy Mermaid';
|
|
343
|
+
copyBtn.onclick = async () => {{
|
|
344
|
+
await navigator.clipboard.writeText(code);
|
|
345
|
+
copyBtn.textContent = 'Copied';
|
|
346
|
+
setTimeout(() => copyBtn.textContent = 'Copy Mermaid', 900);
|
|
347
|
+
}};
|
|
348
|
+
|
|
349
|
+
const toggleBtn = document.createElement('button');
|
|
350
|
+
toggleBtn.textContent = 'Show source';
|
|
351
|
+
|
|
352
|
+
const mermaidDiv = document.createElement('div');
|
|
353
|
+
mermaidDiv.className = 'mermaid';
|
|
354
|
+
mermaidDiv.textContent = code;
|
|
355
|
+
|
|
356
|
+
const pre = document.createElement('pre');
|
|
357
|
+
pre.style.display = 'none';
|
|
358
|
+
pre.textContent = code;
|
|
359
|
+
|
|
360
|
+
toggleBtn.onclick = () => {{
|
|
361
|
+
const show = pre.style.display === 'none';
|
|
362
|
+
pre.style.display = show ? 'block' : 'none';
|
|
363
|
+
toggleBtn.textContent = show ? 'Hide source' : 'Show source';
|
|
364
|
+
}};
|
|
365
|
+
|
|
366
|
+
toolbar.appendChild(copyBtn);
|
|
367
|
+
toolbar.appendChild(toggleBtn);
|
|
368
|
+
|
|
369
|
+
card.appendChild(h2);
|
|
370
|
+
card.appendChild(toolbar);
|
|
371
|
+
card.appendChild(mermaidDiv);
|
|
372
|
+
card.appendChild(pre);
|
|
373
|
+
return card;
|
|
374
|
+
}}
|
|
375
|
+
|
|
376
|
+
Object.entries(diagrams).forEach(([name, code]) => root.appendChild(mkCard(name, code)));
|
|
377
|
+
</script>
|
|
378
|
+
|
|
379
|
+
<script type=\"module\">
|
|
380
|
+
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
|
|
381
|
+
mermaid.initialize({{ startOnLoad: true, theme: 'default' }});
|
|
382
|
+
</script>
|
|
383
|
+
</body>
|
|
384
|
+
</html>
|
|
385
|
+
"""
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
def export_all(self, output_dir: str, repo_state: dict, max_commits: int = 40) -> dict:
|
|
389
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
390
|
+
|
|
391
|
+
repo_root = os.getcwd()
|
|
392
|
+
|
|
393
|
+
status_mmd = self.repo_status_flowchart(repo_state)
|
|
394
|
+
graph_mmd = self.commit_graph_gitgraph(max_commits=max_commits)
|
|
395
|
+
deps_mmd = self.python_import_dependency_flowchart(repo_root)
|
|
396
|
+
tree_mmd = self.repo_file_structure_flowchart(repo_root)
|
|
397
|
+
|
|
398
|
+
diagrams = {
|
|
399
|
+
'Repo Status': status_mmd,
|
|
400
|
+
'Commit Graph': graph_mmd,
|
|
401
|
+
'Python Import Dependencies': deps_mmd,
|
|
402
|
+
'Repo File Structure': tree_mmd,
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
html = self.build_html(diagrams)
|
|
406
|
+
|
|
407
|
+
status_path = os.path.join(output_dir, 'repo_status.mmd')
|
|
408
|
+
graph_path = os.path.join(output_dir, 'commit_graph.mmd')
|
|
409
|
+
deps_path = os.path.join(output_dir, 'python_deps.mmd')
|
|
410
|
+
tree_path = os.path.join(output_dir, 'repo_tree.mmd')
|
|
411
|
+
html_path = os.path.join(output_dir, 'repo_visualization.html')
|
|
412
|
+
|
|
413
|
+
with open(status_path, 'w', encoding='utf-8') as f:
|
|
414
|
+
f.write(status_mmd)
|
|
415
|
+
with open(graph_path, 'w', encoding='utf-8') as f:
|
|
416
|
+
f.write(graph_mmd)
|
|
417
|
+
with open(deps_path, 'w', encoding='utf-8') as f:
|
|
418
|
+
f.write(deps_mmd)
|
|
419
|
+
with open(tree_path, 'w', encoding='utf-8') as f:
|
|
420
|
+
f.write(tree_mmd)
|
|
421
|
+
with open(html_path, 'w', encoding='utf-8') as f:
|
|
422
|
+
f.write(html)
|
|
423
|
+
|
|
424
|
+
return {
|
|
425
|
+
'status_mmd': status_path,
|
|
426
|
+
'graph_mmd': graph_path,
|
|
427
|
+
'deps_mmd': deps_path,
|
|
428
|
+
'tree_mmd': tree_path,
|
|
429
|
+
'html': html_path,
|
|
430
|
+
}
|
src/ollama_client.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
from typing import List, Dict, Optional
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class OllamaClient:
|
|
9
|
+
"""Client for communicating with local Ollama model for error analysis"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, base_url: str = "http://localhost:11434", model: str = "llama3.2"):
|
|
12
|
+
self.base_url = base_url
|
|
13
|
+
self.model = model
|
|
14
|
+
self.timeout = 30
|
|
15
|
+
self.max_retries = 2
|
|
16
|
+
|
|
17
|
+
def test_connection(self) -> bool:
|
|
18
|
+
"""Test if Ollama is running and accessible"""
|
|
19
|
+
try:
|
|
20
|
+
response = requests.get(f"{self.base_url}/api/tags", timeout=5)
|
|
21
|
+
return response.status_code == 200
|
|
22
|
+
except requests.exceptions.RequestException:
|
|
23
|
+
return False
|
|
24
|
+
|
|
25
|
+
def get_available_models(self) -> List[str]:
|
|
26
|
+
"""Get list of available Ollama models"""
|
|
27
|
+
try:
|
|
28
|
+
response = requests.get(f"{self.base_url}/api/tags", timeout=5)
|
|
29
|
+
if response.status_code == 200:
|
|
30
|
+
data = response.json()
|
|
31
|
+
return [model['name'] for model in data.get('models', [])]
|
|
32
|
+
return []
|
|
33
|
+
except requests.exceptions.RequestException:
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
def analyze_error_messages(self, messages: List[Dict]) -> str:
|
|
37
|
+
"""
|
|
38
|
+
Analyze error/warning messages using Ollama model
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
messages: List of dictionaries containing error/warning information
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
str: Analysis and suggestions from the AI model
|
|
45
|
+
"""
|
|
46
|
+
if not messages:
|
|
47
|
+
return "No messages to analyze."
|
|
48
|
+
|
|
49
|
+
# Prepare the prompt with error context
|
|
50
|
+
prompt = self._build_analysis_prompt(messages)
|
|
51
|
+
|
|
52
|
+
for attempt in range(self.max_retries + 1):
|
|
53
|
+
try:
|
|
54
|
+
response = self._call_ollama(prompt)
|
|
55
|
+
if response:
|
|
56
|
+
return response
|
|
57
|
+
except Exception as e:
|
|
58
|
+
if attempt == self.max_retries:
|
|
59
|
+
return f"Failed to get AI analysis after {self.max_retries + 1} attempts: {str(e)}"
|
|
60
|
+
time.sleep(1) # Brief delay before retry
|
|
61
|
+
|
|
62
|
+
return "Unable to get AI analysis."
|
|
63
|
+
|
|
64
|
+
def _build_analysis_prompt(self, messages: List[Dict]) -> str:
|
|
65
|
+
"""Build the analysis prompt for the AI model"""
|
|
66
|
+
prompt = """You are a Git expert assistant. Analyze the following Git error/warning messages and provide:
|
|
67
|
+
1. A clear summary of what went wrong
|
|
68
|
+
2. Step-by-step instructions on how to resolve the issue
|
|
69
|
+
3. Any preventive measures to avoid this in the future
|
|
70
|
+
|
|
71
|
+
Keep your response concise, practical, and focused on solutions.
|
|
72
|
+
|
|
73
|
+
Messages to analyze:
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
for i, msg in enumerate(messages, 1):
|
|
77
|
+
severity = msg.get('severity', 'unknown').upper()
|
|
78
|
+
message_text = msg.get('message', str(msg))
|
|
79
|
+
error_type = msg.get('type', 'Unknown')
|
|
80
|
+
file_info = f" (file: {msg.get('file', 'N/A')})" if msg.get('file') else ""
|
|
81
|
+
|
|
82
|
+
prompt += f"\n{i}. [{severity}] {error_type}{file_info}\n {message_text}\n"
|
|
83
|
+
|
|
84
|
+
prompt += "\n\nAnalysis:"
|
|
85
|
+
return prompt
|
|
86
|
+
|
|
87
|
+
def _call_ollama(self, prompt: str) -> Optional[str]:
|
|
88
|
+
"""Make API call to Ollama"""
|
|
89
|
+
try:
|
|
90
|
+
payload = {
|
|
91
|
+
"model": self.model,
|
|
92
|
+
"prompt": prompt,
|
|
93
|
+
"stream": False,
|
|
94
|
+
"options": {
|
|
95
|
+
"temperature": 0.3,
|
|
96
|
+
"top_p": 0.9,
|
|
97
|
+
"max_tokens": 500
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
response = requests.post(
|
|
102
|
+
f"{self.base_url}/api/generate",
|
|
103
|
+
json=payload,
|
|
104
|
+
timeout=self.timeout
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
if response.status_code == 200:
|
|
108
|
+
result = response.json()
|
|
109
|
+
return result.get('response', '').strip()
|
|
110
|
+
else:
|
|
111
|
+
raise Exception(f"Ollama API returned status {response.status_code}")
|
|
112
|
+
|
|
113
|
+
except requests.exceptions.Timeout:
|
|
114
|
+
raise Exception("Request to Ollama timed out")
|
|
115
|
+
except requests.exceptions.ConnectionError:
|
|
116
|
+
raise Exception("Cannot connect to Ollama. Make sure it's running.")
|
|
117
|
+
except json.JSONDecodeError:
|
|
118
|
+
raise Exception("Invalid response from Ollama")
|
|
119
|
+
except Exception as e:
|
|
120
|
+
raise Exception(f"Error calling Ollama: {str(e)}")
|
|
121
|
+
|
|
122
|
+
def set_model(self, model: str) -> bool:
|
|
123
|
+
"""Change the model being used"""
|
|
124
|
+
available_models = self.get_available_models()
|
|
125
|
+
if model in available_models:
|
|
126
|
+
self.model = model
|
|
127
|
+
return True
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
def get_model_info(self) -> Dict:
|
|
131
|
+
"""Get information about current model"""
|
|
132
|
+
try:
|
|
133
|
+
response = requests.post(
|
|
134
|
+
f"{self.base_url}/api/show",
|
|
135
|
+
json={"name": self.model},
|
|
136
|
+
timeout=5
|
|
137
|
+
)
|
|
138
|
+
if response.status_code == 200:
|
|
139
|
+
return response.json()
|
|
140
|
+
return {}
|
|
141
|
+
except requests.exceptions.RequestException:
|
|
142
|
+
return {}
|