cognify-code 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/__init__.py +14 -0
- ai_code_assistant/agent/__init__.py +63 -0
- ai_code_assistant/agent/code_agent.py +461 -0
- ai_code_assistant/agent/code_generator.py +388 -0
- ai_code_assistant/agent/code_reviewer.py +365 -0
- ai_code_assistant/agent/diff_engine.py +308 -0
- ai_code_assistant/agent/file_manager.py +300 -0
- ai_code_assistant/agent/intent_classifier.py +284 -0
- ai_code_assistant/chat/__init__.py +11 -0
- ai_code_assistant/chat/agent_session.py +156 -0
- ai_code_assistant/chat/session.py +165 -0
- ai_code_assistant/cli.py +1571 -0
- ai_code_assistant/config.py +149 -0
- ai_code_assistant/editor/__init__.py +8 -0
- ai_code_assistant/editor/diff_handler.py +270 -0
- ai_code_assistant/editor/file_editor.py +350 -0
- ai_code_assistant/editor/prompts.py +146 -0
- ai_code_assistant/generator/__init__.py +7 -0
- ai_code_assistant/generator/code_gen.py +265 -0
- ai_code_assistant/generator/prompts.py +114 -0
- ai_code_assistant/git/__init__.py +6 -0
- ai_code_assistant/git/commit_generator.py +130 -0
- ai_code_assistant/git/manager.py +203 -0
- ai_code_assistant/llm.py +111 -0
- ai_code_assistant/providers/__init__.py +23 -0
- ai_code_assistant/providers/base.py +124 -0
- ai_code_assistant/providers/cerebras.py +97 -0
- ai_code_assistant/providers/factory.py +148 -0
- ai_code_assistant/providers/google.py +103 -0
- ai_code_assistant/providers/groq.py +111 -0
- ai_code_assistant/providers/ollama.py +86 -0
- ai_code_assistant/providers/openai.py +114 -0
- ai_code_assistant/providers/openrouter.py +130 -0
- ai_code_assistant/py.typed +0 -0
- ai_code_assistant/refactor/__init__.py +20 -0
- ai_code_assistant/refactor/analyzer.py +189 -0
- ai_code_assistant/refactor/change_plan.py +172 -0
- ai_code_assistant/refactor/multi_file_editor.py +346 -0
- ai_code_assistant/refactor/prompts.py +175 -0
- ai_code_assistant/retrieval/__init__.py +19 -0
- ai_code_assistant/retrieval/chunker.py +215 -0
- ai_code_assistant/retrieval/indexer.py +236 -0
- ai_code_assistant/retrieval/search.py +239 -0
- ai_code_assistant/reviewer/__init__.py +7 -0
- ai_code_assistant/reviewer/analyzer.py +278 -0
- ai_code_assistant/reviewer/prompts.py +113 -0
- ai_code_assistant/utils/__init__.py +18 -0
- ai_code_assistant/utils/file_handler.py +155 -0
- ai_code_assistant/utils/formatters.py +259 -0
- cognify_code-0.2.0.dist-info/METADATA +383 -0
- cognify_code-0.2.0.dist-info/RECORD +55 -0
- cognify_code-0.2.0.dist-info/WHEEL +5 -0
- cognify_code-0.2.0.dist-info/entry_points.txt +3 -0
- cognify_code-0.2.0.dist-info/licenses/LICENSE +22 -0
- cognify_code-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
"""Output formatters for AI Code Assistant."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.markdown import Markdown
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich.syntax import Syntax
|
|
12
|
+
from rich.table import Table
|
|
13
|
+
|
|
14
|
+
from ai_code_assistant.reviewer.analyzer import ReviewResult, ReviewIssue
|
|
15
|
+
from ai_code_assistant.generator.code_gen import GenerationResult
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class BaseFormatter(ABC):
|
|
19
|
+
"""Base class for output formatters."""
|
|
20
|
+
|
|
21
|
+
@abstractmethod
|
|
22
|
+
def format_review(self, result: ReviewResult) -> str:
|
|
23
|
+
"""Format a review result."""
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
@abstractmethod
|
|
27
|
+
def format_generation(self, result: GenerationResult) -> str:
|
|
28
|
+
"""Format a generation result."""
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
def save(self, content: str, output_path: Path) -> None:
|
|
32
|
+
"""Save formatted content to file."""
|
|
33
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
34
|
+
output_path.write_text(content)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ConsoleFormatter(BaseFormatter):
|
|
38
|
+
"""Formatter for rich console output."""
|
|
39
|
+
|
|
40
|
+
def __init__(self, use_colors: bool = True):
|
|
41
|
+
self.console = Console(force_terminal=use_colors, color_system="auto" if use_colors else None)
|
|
42
|
+
|
|
43
|
+
def format_review(self, result: ReviewResult) -> str:
|
|
44
|
+
"""Format and display review result to console."""
|
|
45
|
+
if result.error:
|
|
46
|
+
self.console.print(Panel(f"[red]Error:[/red] {result.error}", title="Review Failed"))
|
|
47
|
+
return result.error
|
|
48
|
+
|
|
49
|
+
# Header
|
|
50
|
+
self.console.print(Panel(
|
|
51
|
+
f"[bold]{result.filename}[/bold]\nLanguage: {result.language} | Quality: {result.overall_quality}",
|
|
52
|
+
title="Code Review",
|
|
53
|
+
))
|
|
54
|
+
|
|
55
|
+
# Summary
|
|
56
|
+
if result.summary:
|
|
57
|
+
self.console.print(f"\n[bold]Summary:[/bold] {result.summary}\n")
|
|
58
|
+
|
|
59
|
+
# Issues by severity
|
|
60
|
+
severity_colors = {"critical": "red", "warning": "yellow", "suggestion": "blue"}
|
|
61
|
+
|
|
62
|
+
for severity in ["critical", "warning", "suggestion"]:
|
|
63
|
+
issues = [i for i in result.issues if i.severity == severity]
|
|
64
|
+
if not issues:
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
color = severity_colors.get(severity, "white")
|
|
68
|
+
self.console.print(f"\n[bold {color}]{severity.upper()} ({len(issues)})[/bold {color}]")
|
|
69
|
+
|
|
70
|
+
for issue in issues:
|
|
71
|
+
self._print_issue(issue, color)
|
|
72
|
+
|
|
73
|
+
# Metrics
|
|
74
|
+
if result.metrics:
|
|
75
|
+
self._print_metrics(result.metrics)
|
|
76
|
+
|
|
77
|
+
return "" # Console output is printed directly
|
|
78
|
+
|
|
79
|
+
def _print_issue(self, issue: ReviewIssue, color: str) -> None:
|
|
80
|
+
"""Print a single issue."""
|
|
81
|
+
lines = f"L{issue.line_start}"
|
|
82
|
+
if issue.line_end != issue.line_start:
|
|
83
|
+
lines += f"-{issue.line_end}"
|
|
84
|
+
|
|
85
|
+
confidence = f" ({issue.confidence:.0%})" if issue.confidence else ""
|
|
86
|
+
|
|
87
|
+
self.console.print(f" [{color}]●[/{color}] [{color}]{issue.title}[/{color}] [{lines}]{confidence}")
|
|
88
|
+
self.console.print(f" {issue.description}")
|
|
89
|
+
|
|
90
|
+
if issue.suggestion:
|
|
91
|
+
self.console.print(f" [green]→ {issue.suggestion}[/green]")
|
|
92
|
+
|
|
93
|
+
def _print_metrics(self, metrics: dict) -> None:
|
|
94
|
+
"""Print metrics table."""
|
|
95
|
+
table = Table(title="Metrics", show_header=False)
|
|
96
|
+
table.add_column("Metric")
|
|
97
|
+
table.add_column("Value")
|
|
98
|
+
|
|
99
|
+
for key, value in metrics.items():
|
|
100
|
+
table.add_row(key.replace("_", " ").title(), str(value))
|
|
101
|
+
|
|
102
|
+
self.console.print(table)
|
|
103
|
+
|
|
104
|
+
def format_generation(self, result: GenerationResult) -> str:
|
|
105
|
+
"""Format and display generation result to console."""
|
|
106
|
+
if result.error:
|
|
107
|
+
self.console.print(Panel(f"[red]Error:[/red] {result.error}", title="Generation Failed"))
|
|
108
|
+
return result.error
|
|
109
|
+
|
|
110
|
+
self.console.print(Panel(
|
|
111
|
+
f"Mode: {result.mode} | Language: {result.language}",
|
|
112
|
+
title="Generated Code",
|
|
113
|
+
))
|
|
114
|
+
|
|
115
|
+
if result.description:
|
|
116
|
+
self.console.print(f"\n[bold]Description:[/bold] {result.description}\n")
|
|
117
|
+
|
|
118
|
+
syntax = Syntax(result.code, result.language, theme="monokai", line_numbers=True)
|
|
119
|
+
self.console.print(syntax)
|
|
120
|
+
|
|
121
|
+
return result.code
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class MarkdownFormatter(BaseFormatter):
|
|
125
|
+
"""Formatter for markdown output."""
|
|
126
|
+
|
|
127
|
+
def format_review(self, result: ReviewResult) -> str:
|
|
128
|
+
"""Format review result as markdown."""
|
|
129
|
+
lines = [f"# Code Review: {result.filename}\n"]
|
|
130
|
+
|
|
131
|
+
if result.error:
|
|
132
|
+
lines.append(f"**Error:** {result.error}\n")
|
|
133
|
+
return "\n".join(lines)
|
|
134
|
+
|
|
135
|
+
lines.append(f"**Language:** {result.language} ")
|
|
136
|
+
lines.append(f"**Overall Quality:** {result.overall_quality}\n")
|
|
137
|
+
|
|
138
|
+
if result.summary:
|
|
139
|
+
lines.append(f"## Summary\n\n{result.summary}\n")
|
|
140
|
+
|
|
141
|
+
# Issues
|
|
142
|
+
lines.append("## Issues\n")
|
|
143
|
+
|
|
144
|
+
for severity in ["critical", "warning", "suggestion"]:
|
|
145
|
+
issues = [i for i in result.issues if i.severity == severity]
|
|
146
|
+
if not issues:
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
emoji = {"critical": "🔴", "warning": "🟡", "suggestion": "🔵"}.get(severity, "⚪")
|
|
150
|
+
lines.append(f"### {emoji} {severity.title()} ({len(issues)})\n")
|
|
151
|
+
|
|
152
|
+
for issue in issues:
|
|
153
|
+
lines.append(self._format_issue_md(issue))
|
|
154
|
+
|
|
155
|
+
# Metrics
|
|
156
|
+
if result.metrics:
|
|
157
|
+
lines.append("## Metrics\n")
|
|
158
|
+
lines.append("| Metric | Value |")
|
|
159
|
+
lines.append("|--------|-------|")
|
|
160
|
+
for key, value in result.metrics.items():
|
|
161
|
+
lines.append(f"| {key.replace('_', ' ').title()} | {value} |")
|
|
162
|
+
lines.append("")
|
|
163
|
+
|
|
164
|
+
return "\n".join(lines)
|
|
165
|
+
|
|
166
|
+
def _format_issue_md(self, issue: ReviewIssue) -> str:
|
|
167
|
+
"""Format a single issue as markdown."""
|
|
168
|
+
lines = issue.line_start
|
|
169
|
+
if issue.line_end != issue.line_start:
|
|
170
|
+
lines = f"{issue.line_start}-{issue.line_end}"
|
|
171
|
+
|
|
172
|
+
md = f"#### {issue.title} (Line {lines})\n\n"
|
|
173
|
+
md += f"{issue.description}\n\n"
|
|
174
|
+
|
|
175
|
+
if issue.suggestion:
|
|
176
|
+
md += f"**Suggestion:** {issue.suggestion}\n\n"
|
|
177
|
+
|
|
178
|
+
if issue.code_snippet:
|
|
179
|
+
md += f"```\n{issue.code_snippet}\n```\n\n"
|
|
180
|
+
|
|
181
|
+
if issue.fixed_code:
|
|
182
|
+
md += f"**Fixed code:**\n```\n{issue.fixed_code}\n```\n\n"
|
|
183
|
+
|
|
184
|
+
if issue.confidence:
|
|
185
|
+
md += f"*Confidence: {issue.confidence:.0%}*\n\n"
|
|
186
|
+
|
|
187
|
+
return md
|
|
188
|
+
|
|
189
|
+
def format_generation(self, result: GenerationResult) -> str:
|
|
190
|
+
"""Format generation result as markdown."""
|
|
191
|
+
lines = [f"# Generated Code\n"]
|
|
192
|
+
|
|
193
|
+
if result.error:
|
|
194
|
+
lines.append(f"**Error:** {result.error}\n")
|
|
195
|
+
return "\n".join(lines)
|
|
196
|
+
|
|
197
|
+
lines.append(f"**Mode:** {result.mode} ")
|
|
198
|
+
lines.append(f"**Language:** {result.language}\n")
|
|
199
|
+
|
|
200
|
+
if result.description:
|
|
201
|
+
lines.append(f"## Description\n\n{result.description}\n")
|
|
202
|
+
|
|
203
|
+
lines.append(f"## Code\n\n```{result.language}\n{result.code}\n```\n")
|
|
204
|
+
|
|
205
|
+
return "\n".join(lines)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class JsonFormatter(BaseFormatter):
|
|
209
|
+
"""Formatter for JSON output."""
|
|
210
|
+
|
|
211
|
+
def format_review(self, result: ReviewResult) -> str:
|
|
212
|
+
"""Format review result as JSON."""
|
|
213
|
+
data = {
|
|
214
|
+
"filename": result.filename,
|
|
215
|
+
"language": result.language,
|
|
216
|
+
"summary": result.summary,
|
|
217
|
+
"overall_quality": result.overall_quality,
|
|
218
|
+
"error": result.error,
|
|
219
|
+
"issues": [
|
|
220
|
+
{
|
|
221
|
+
"line_start": i.line_start,
|
|
222
|
+
"line_end": i.line_end,
|
|
223
|
+
"category": i.category,
|
|
224
|
+
"severity": i.severity,
|
|
225
|
+
"title": i.title,
|
|
226
|
+
"description": i.description,
|
|
227
|
+
"suggestion": i.suggestion,
|
|
228
|
+
"confidence": i.confidence,
|
|
229
|
+
}
|
|
230
|
+
for i in result.issues
|
|
231
|
+
],
|
|
232
|
+
"metrics": result.metrics,
|
|
233
|
+
}
|
|
234
|
+
return json.dumps(data, indent=2)
|
|
235
|
+
|
|
236
|
+
def format_generation(self, result: GenerationResult) -> str:
|
|
237
|
+
"""Format generation result as JSON."""
|
|
238
|
+
data = {
|
|
239
|
+
"mode": result.mode,
|
|
240
|
+
"language": result.language,
|
|
241
|
+
"description": result.description,
|
|
242
|
+
"code": result.code,
|
|
243
|
+
"error": result.error,
|
|
244
|
+
"success": result.success,
|
|
245
|
+
}
|
|
246
|
+
return json.dumps(data, indent=2)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def get_formatter(format_type: str, use_colors: bool = True) -> BaseFormatter:
|
|
250
|
+
"""Get formatter by type."""
|
|
251
|
+
formatters = {
|
|
252
|
+
"console": lambda: ConsoleFormatter(use_colors),
|
|
253
|
+
"markdown": MarkdownFormatter,
|
|
254
|
+
"json": JsonFormatter,
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
factory = formatters.get(format_type.lower(), formatters["console"])
|
|
258
|
+
return factory()
|
|
259
|
+
|
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cognify-code
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy.
|
|
5
|
+
Author-email: Ashok Kumar <akkssy@users.noreply.github.com>
|
|
6
|
+
Maintainer-email: Ashok Kumar <akkssy@users.noreply.github.com>
|
|
7
|
+
License: MIT
|
|
8
|
+
Project-URL: Homepage, https://akkssy.github.io/cognify-ai-website/
|
|
9
|
+
Project-URL: Documentation, https://github.com/akkssy/cognify-ai#readme
|
|
10
|
+
Project-URL: Repository, https://github.com/akkssy/cognify-ai
|
|
11
|
+
Project-URL: Issues, https://github.com/akkssy/cognify-ai/issues
|
|
12
|
+
Project-URL: Changelog, https://github.com/akkssy/cognify-ai/releases
|
|
13
|
+
Keywords: ai,code-review,code-generation,code-assistant,ai-agent,langchain,ollama,groq,gemini,cli,developer-tools,refactoring,semantic-search,local-llm,privacy
|
|
14
|
+
Classifier: Development Status :: 4 - Beta
|
|
15
|
+
Classifier: Environment :: Console
|
|
16
|
+
Classifier: Intended Audience :: Developers
|
|
17
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
18
|
+
Classifier: Operating System :: OS Independent
|
|
19
|
+
Classifier: Programming Language :: Python :: 3
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
24
|
+
Classifier: Topic :: Software Development
|
|
25
|
+
Classifier: Topic :: Software Development :: Code Generators
|
|
26
|
+
Classifier: Topic :: Software Development :: Quality Assurance
|
|
27
|
+
Classifier: Topic :: Software Development :: Testing
|
|
28
|
+
Classifier: Topic :: Text Processing :: Linguistic
|
|
29
|
+
Classifier: Typing :: Typed
|
|
30
|
+
Requires-Python: >=3.9
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
License-File: LICENSE
|
|
33
|
+
Requires-Dist: langchain>=0.1.0
|
|
34
|
+
Requires-Dist: langchain-core>=0.1.0
|
|
35
|
+
Requires-Dist: langchain-ollama>=0.1.0
|
|
36
|
+
Requires-Dist: langchain-openai>=0.1.0
|
|
37
|
+
Requires-Dist: click>=8.1.0
|
|
38
|
+
Requires-Dist: rich>=13.0.0
|
|
39
|
+
Requires-Dist: pyyaml>=6.0
|
|
40
|
+
Requires-Dist: pydantic>=2.0.0
|
|
41
|
+
Requires-Dist: pydantic-settings>=2.0.0
|
|
42
|
+
Requires-Dist: chromadb>=0.4.0
|
|
43
|
+
Requires-Dist: sentence-transformers>=2.0.0
|
|
44
|
+
Requires-Dist: watchdog>=4.0.0
|
|
45
|
+
Provides-Extra: google
|
|
46
|
+
Requires-Dist: langchain-google-genai>=1.0.0; extra == "google"
|
|
47
|
+
Provides-Extra: groq
|
|
48
|
+
Requires-Dist: langchain-groq>=0.1.0; extra == "groq"
|
|
49
|
+
Provides-Extra: all-providers
|
|
50
|
+
Requires-Dist: langchain-google-genai>=1.0.0; extra == "all-providers"
|
|
51
|
+
Requires-Dist: langchain-groq>=0.1.0; extra == "all-providers"
|
|
52
|
+
Provides-Extra: dev
|
|
53
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
54
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
55
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
|
|
56
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
57
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
58
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
59
|
+
Requires-Dist: build>=1.0.0; extra == "dev"
|
|
60
|
+
Requires-Dist: twine>=4.0.0; extra == "dev"
|
|
61
|
+
Dynamic: license-file
|
|
62
|
+
|
|
63
|
+
# 🧠 Cognify AI
|
|
64
|
+
|
|
65
|
+
<p align="center">
|
|
66
|
+
<strong>Code Cognition — Your AI-Powered Code Assistant</strong>
|
|
67
|
+
</p>
|
|
68
|
+
|
|
69
|
+
<p align="center">
|
|
70
|
+
<a href="#features">Features</a> •
|
|
71
|
+
<a href="#installation">Installation</a> •
|
|
72
|
+
<a href="#providers">Providers</a> •
|
|
73
|
+
<a href="#usage">Usage</a> •
|
|
74
|
+
<a href="#documentation">Docs</a> •
|
|
75
|
+
<a href="#contributing">Contributing</a>
|
|
76
|
+
</p>
|
|
77
|
+
|
|
78
|
+
<p align="center">
|
|
79
|
+
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="Python 3.9+">
|
|
80
|
+
<img src="https://img.shields.io/badge/license-MIT-green.svg" alt="MIT License">
|
|
81
|
+
<img src="https://img.shields.io/badge/tests-144%20passed-brightgreen.svg" alt="Tests">
|
|
82
|
+
<img src="https://img.shields.io/badge/providers-6%20supported-purple.svg" alt="6 Providers">
|
|
83
|
+
</p>
|
|
84
|
+
|
|
85
|
+
---
|
|
86
|
+
|
|
87
|
+
A powerful CLI tool that brings AI-powered code cognition to your terminal. Review code, generate functions, search your codebase semantically, and refactor projects—with support for **multiple LLM providers** including local (Ollama) and cloud options with free tiers.
|
|
88
|
+
|
|
89
|
+
## ✨ Features
|
|
90
|
+
|
|
91
|
+
| Feature | Description |
|
|
92
|
+
|---------|-------------|
|
|
93
|
+
| 🔍 **Code Review** | Analyze code for bugs, security issues, and style problems |
|
|
94
|
+
| ⚡ **Code Generation** | Generate functions, classes, and tests from natural language |
|
|
95
|
+
| 🔎 **Semantic Search** | Search your codebase using natural language queries |
|
|
96
|
+
| 📝 **AI File Editing** | Edit files with natural language instructions |
|
|
97
|
+
| 🔄 **Multi-File Refactor** | Refactor across multiple files at once |
|
|
98
|
+
| ��️ **Symbol Renaming** | Rename functions, classes, variables across your project |
|
|
99
|
+
| 💬 **Interactive Chat** | Chat with AI about your code |
|
|
100
|
+
| 📊 **Codebase Indexing** | Create searchable semantic index with RAG |
|
|
101
|
+
| 🌐 **Multi-Provider** | Support for 6 LLM providers (local & cloud) |
|
|
102
|
+
|
|
103
|
+
## 🤖 Supported Providers
|
|
104
|
+
|
|
105
|
+
| Provider | Free Tier | API Key | Best For |
|
|
106
|
+
|----------|-----------|---------|----------|
|
|
107
|
+
| **Ollama** | ✅ Unlimited | ❌ None | Privacy, offline use |
|
|
108
|
+
| **Google AI** | ✅ Generous | ✅ Required | 1M+ token context |
|
|
109
|
+
| **Groq** | ✅ 1000 req/day | ✅ Required | Fastest inference |
|
|
110
|
+
| **Cerebras** | ✅ Available | ✅ Required | Fast inference |
|
|
111
|
+
| **OpenRouter** | ✅ Free models | ✅ Required | Model variety |
|
|
112
|
+
| **OpenAI** | ❌ Paid only | ✅ Required | GPT-4 quality |
|
|
113
|
+
|
|
114
|
+
## 🚀 Quick Start
|
|
115
|
+
|
|
116
|
+
### Option 1: Local with Ollama (No API Key)
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
# Install Ollama from https://ollama.ai
|
|
120
|
+
ollama pull deepseek-coder:6.7b
|
|
121
|
+
ollama serve
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### Option 2: Cloud with Free Tier
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
# Google AI Studio (1M token context, free)
|
|
128
|
+
export GOOGLE_API_KEY="your-key" # Get from https://aistudio.google.com/apikey
|
|
129
|
+
|
|
130
|
+
# OR Groq (fastest inference, free tier)
|
|
131
|
+
export GROQ_API_KEY="your-key" # Get from https://console.groq.com/keys
|
|
132
|
+
|
|
133
|
+
# OR OpenRouter (free models available)
|
|
134
|
+
export OPENROUTER_API_KEY="your-key" # Get from https://openrouter.ai/keys
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Installation
|
|
138
|
+
|
|
139
|
+
```bash
|
|
140
|
+
# Clone the repository
|
|
141
|
+
git clone https://github.com/akkssy/cognify-ai.git
|
|
142
|
+
cd cognify-ai
|
|
143
|
+
|
|
144
|
+
# Create virtual environment
|
|
145
|
+
python -m venv .venv
|
|
146
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
147
|
+
|
|
148
|
+
# Install the package
|
|
149
|
+
pip install -e .
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### Verify Installation
|
|
153
|
+
|
|
154
|
+
```bash
|
|
155
|
+
# Check status and available providers
|
|
156
|
+
ai-assist status
|
|
157
|
+
ai-assist providers
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## 🌐 Provider Management
|
|
161
|
+
|
|
162
|
+
### List Available Providers
|
|
163
|
+
```bash
|
|
164
|
+
ai-assist providers
|
|
165
|
+
```
|
|
166
|
+
Shows all providers with their models, free tier status, and API key requirements.
|
|
167
|
+
|
|
168
|
+
### Switch Providers
|
|
169
|
+
```bash
|
|
170
|
+
# Switch to Groq (fast cloud inference)
|
|
171
|
+
ai-assist use-provider groq --test
|
|
172
|
+
|
|
173
|
+
# Switch to Google with specific model
|
|
174
|
+
ai-assist use-provider google --model gemini-1.5-pro --test
|
|
175
|
+
|
|
176
|
+
# Use OpenRouter with free DeepSeek R1
|
|
177
|
+
ai-assist use-provider openrouter --model deepseek/deepseek-r1:free --test
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### Test Provider Connection
|
|
181
|
+
```bash
|
|
182
|
+
ai-assist test-provider
|
|
183
|
+
ai-assist test-provider --provider groq --prompt "Hello world"
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
## 📖 Usage
|
|
187
|
+
|
|
188
|
+
### Code Review
|
|
189
|
+
```bash
|
|
190
|
+
ai-assist review path/to/file.py
|
|
191
|
+
ai-assist review src/ --format json
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
### Code Generation
|
|
195
|
+
```bash
|
|
196
|
+
ai-assist generate "binary search function" --language python
|
|
197
|
+
ai-assist generate "REST API client class" --mode class
|
|
198
|
+
ai-assist generate "unit tests for calculator" --mode test
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
### Semantic Search
|
|
202
|
+
```bash
|
|
203
|
+
# First, index your codebase
|
|
204
|
+
ai-assist index .
|
|
205
|
+
|
|
206
|
+
# Then search
|
|
207
|
+
ai-assist search "error handling"
|
|
208
|
+
ai-assist search "database connection" -k 10
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### File Editing
|
|
212
|
+
```bash
|
|
213
|
+
ai-assist edit config.py "add logging to all functions" --preview
|
|
214
|
+
ai-assist edit utils.py "add type hints" --backup
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### Multi-File Refactoring
|
|
218
|
+
```bash
|
|
219
|
+
ai-assist refactor "add docstrings to all functions" -p "src/**/*.py" --dry-run
|
|
220
|
+
ai-assist refactor "convert print to logging" --pattern "**/*.py" --confirm
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### Symbol Renaming
|
|
224
|
+
```bash
|
|
225
|
+
ai-assist rename old_function new_function --type function --dry-run
|
|
226
|
+
ai-assist rename MyClass BetterClass --type class -p "src/**/*.py"
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
### Interactive Chat
|
|
230
|
+
```bash
|
|
231
|
+
ai-assist chat
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
### All Commands
|
|
235
|
+
```bash
|
|
236
|
+
ai-assist --help
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
## ⚙️ Configuration
|
|
240
|
+
|
|
241
|
+
Configuration is managed via `config.yaml`:
|
|
242
|
+
|
|
243
|
+
```yaml
|
|
244
|
+
llm:
|
|
245
|
+
provider: "ollama" # ollama, google, groq, cerebras, openrouter, openai
|
|
246
|
+
model: "deepseek-coder:6.7b"
|
|
247
|
+
base_url: "http://localhost:11434" # For Ollama
|
|
248
|
+
temperature: 0.1
|
|
249
|
+
max_tokens: 4096
|
|
250
|
+
timeout: 120
|
|
251
|
+
|
|
252
|
+
review:
|
|
253
|
+
severity_levels: [critical, warning, suggestion]
|
|
254
|
+
categories: [bugs, security, performance, style]
|
|
255
|
+
|
|
256
|
+
generation:
|
|
257
|
+
include_type_hints: true
|
|
258
|
+
include_docstrings: true
|
|
259
|
+
|
|
260
|
+
retrieval:
|
|
261
|
+
embedding_model: "all-MiniLM-L6-v2"
|
|
262
|
+
chunk_size: 50
|
|
263
|
+
|
|
264
|
+
editor:
|
|
265
|
+
create_backup: true
|
|
266
|
+
show_diff: true
|
|
267
|
+
|
|
268
|
+
refactor:
|
|
269
|
+
max_files: 20
|
|
270
|
+
require_confirmation: true
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
Or use environment variables:
|
|
274
|
+
```bash
|
|
275
|
+
export AI_ASSISTANT_LLM_PROVIDER="groq"
|
|
276
|
+
export AI_ASSISTANT_LLM_MODEL="llama-3.3-70b-versatile"
|
|
277
|
+
export GROQ_API_KEY="your-key"
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
## 📁 Project Structure
|
|
281
|
+
|
|
282
|
+
```
|
|
283
|
+
cognify-ai/
|
|
284
|
+
├── src/ai_code_assistant/
|
|
285
|
+
│ ├── cli.py # Command-line interface
|
|
286
|
+
│ ├── config.py # Configuration management
|
|
287
|
+
│ ├── llm.py # LLM integration
|
|
288
|
+
│ ├── providers/ # Multi-provider support
|
|
289
|
+
│ │ ├── base.py # Provider base class
|
|
290
|
+
│ │ ├── factory.py # Provider factory
|
|
291
|
+
│ │ ├── ollama.py # Ollama (local)
|
|
292
|
+
│ │ ├── google.py # Google AI Studio
|
|
293
|
+
│ │ ├── groq.py # Groq
|
|
294
|
+
│ │ ├── cerebras.py # Cerebras
|
|
295
|
+
│ │ ├── openrouter.py # OpenRouter
|
|
296
|
+
│ │ └── openai.py # OpenAI
|
|
297
|
+
│ ├── reviewer/ # Code review module
|
|
298
|
+
│ ├── generator/ # Code generation module
|
|
299
|
+
│ ├── retrieval/ # Semantic search & indexing (RAG)
|
|
300
|
+
│ ├── editor/ # AI file editing
|
|
301
|
+
│ ├── refactor/ # Multi-file refactoring
|
|
302
|
+
│ ├── chat/ # Interactive chat
|
|
303
|
+
│ └── utils/ # Utilities & formatters
|
|
304
|
+
├── tests/ # 144 unit tests
|
|
305
|
+
├── docs/ # Documentation
|
|
306
|
+
├── config.yaml # Configuration
|
|
307
|
+
└── pyproject.toml # Dependencies
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
## 🧪 Testing
|
|
311
|
+
|
|
312
|
+
```bash
|
|
313
|
+
# Run all tests
|
|
314
|
+
PYTHONPATH=src pytest tests/ -v
|
|
315
|
+
|
|
316
|
+
# Run with coverage
|
|
317
|
+
PYTHONPATH=src pytest tests/ --cov=ai_code_assistant
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
## 🛠️ Tech Stack
|
|
321
|
+
|
|
322
|
+
| Component | Technology |
|
|
323
|
+
|-----------|------------|
|
|
324
|
+
| LLM Framework | LangChain |
|
|
325
|
+
| Local LLM | Ollama |
|
|
326
|
+
| Cloud LLMs | Google, Groq, OpenRouter, OpenAI |
|
|
327
|
+
| Vector Database | ChromaDB |
|
|
328
|
+
| Embeddings | Sentence Transformers |
|
|
329
|
+
| CLI | Click + Rich |
|
|
330
|
+
| Config | Pydantic |
|
|
331
|
+
| Testing | Pytest |
|
|
332
|
+
|
|
333
|
+
## 🐛 Troubleshooting
|
|
334
|
+
|
|
335
|
+
**"Connection refused" error (Ollama)**
|
|
336
|
+
```bash
|
|
337
|
+
ollama serve # Make sure Ollama is running
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
**API Key errors**
|
|
341
|
+
```bash
|
|
342
|
+
ai-assist providers # Check which API keys are set
|
|
343
|
+
export GROQ_API_KEY="your-key" # Set the appropriate key
|
|
344
|
+
```
|
|
345
|
+
|
|
346
|
+
**Test provider connection**
|
|
347
|
+
```bash
|
|
348
|
+
ai-assist test-provider --provider groq
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
**Import errors**
|
|
352
|
+
```bash
|
|
353
|
+
pip install -e ".[dev]"
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
## 🤝 Contributing
|
|
357
|
+
|
|
358
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
|
359
|
+
|
|
360
|
+
1. Fork the repository
|
|
361
|
+
2. Create your feature branch (`git checkout -b feature/amazing-feature`)
|
|
362
|
+
3. Commit your changes (`git commit -m 'Add amazing feature'`)
|
|
363
|
+
4. Push to the branch (`git push origin feature/amazing-feature`)
|
|
364
|
+
5. Open a Pull Request
|
|
365
|
+
|
|
366
|
+
## 📄 License
|
|
367
|
+
|
|
368
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
369
|
+
|
|
370
|
+
## 🙏 Acknowledgments
|
|
371
|
+
|
|
372
|
+
- [Ollama](https://ollama.ai) - Local LLM runtime
|
|
373
|
+
- [LangChain](https://langchain.com) - LLM framework
|
|
374
|
+
- [Google AI Studio](https://aistudio.google.com) - Gemini models
|
|
375
|
+
- [Groq](https://groq.com) - Fast inference
|
|
376
|
+
- [OpenRouter](https://openrouter.ai) - Multi-provider access
|
|
377
|
+
- [ChromaDB](https://www.trychroma.com) - Vector database
|
|
378
|
+
|
|
379
|
+
---
|
|
380
|
+
|
|
381
|
+
<p align="center">
|
|
382
|
+
Made with ❤️ for developers who want flexible AI-powered coding assistance
|
|
383
|
+
</p>
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
ai_code_assistant/__init__.py,sha256=NvpsibXIyExY8hIl3wO2y5PEh8Btxvs8QP2uwFDVBmc,409
|
|
2
|
+
ai_code_assistant/cli.py,sha256=2wGSe-Lj74Uhfqm2cz25tirRVoeP7nVZCjAuK8NdQzc,59003
|
|
3
|
+
ai_code_assistant/config.py,sha256=6sAufexwzfCu2JNWvt9KevS9k_gMcjj1TAnwuaO1ZFw,4727
|
|
4
|
+
ai_code_assistant/llm.py,sha256=DfcWJf6zEAUsPSEZLdEmb9o6BQNf1Ja88nswjpy6cOw,4209
|
|
5
|
+
ai_code_assistant/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
ai_code_assistant/agent/__init__.py,sha256=BcVe4Ebopv_J01ApnRl05oN5yOet5mEefBrQmdPsUj0,1284
|
|
7
|
+
ai_code_assistant/agent/code_agent.py,sha256=0_T3bFzPosMKKczYx49UGIWJtMBEuKy4ey-gT-qdfYQ,16155
|
|
8
|
+
ai_code_assistant/agent/code_generator.py,sha256=rAaziRU-mJ5NooERjR_Cd6_hwO0kuULw3Sp8Ca9kR48,13138
|
|
9
|
+
ai_code_assistant/agent/code_reviewer.py,sha256=YiM7lRJhoN-vBnQb29jF-5nmE9ppL-OJffvx4ocTHEU,12066
|
|
10
|
+
ai_code_assistant/agent/diff_engine.py,sha256=A5jszowc5VmWbdidpIW_QhswG_Hats3FYuemP8VoYv4,11018
|
|
11
|
+
ai_code_assistant/agent/file_manager.py,sha256=Inyfo-UXT4joms1ADIMA_TKtIHEjEkBz4I4U2iK5_jI,10742
|
|
12
|
+
ai_code_assistant/agent/intent_classifier.py,sha256=MuIcyWQntocrTlCb4CD54mhc3JfSsWGfulsPYGUoz6E,10667
|
|
13
|
+
ai_code_assistant/chat/__init__.py,sha256=KntIXcjbPgznax1E0fvdrA3XtKF-hCz5Fr1tcRbdl7U,279
|
|
14
|
+
ai_code_assistant/chat/agent_session.py,sha256=VcutqSq8GKwldJhhAs_4_kXOuUvtnjRXZML1Mamm9SM,5495
|
|
15
|
+
ai_code_assistant/chat/session.py,sha256=5JRd1DuLjxbtckmsMeHzNjoEZnJS9lx9NoX6z03F0xE,5500
|
|
16
|
+
ai_code_assistant/editor/__init__.py,sha256=892BfTIo6kLdfZdhnvl4OFe0QSnxE4EyfkBoyLdA5rc,340
|
|
17
|
+
ai_code_assistant/editor/diff_handler.py,sha256=LeI-00GuH7ASIetsUzT3Y_pDq4K1wmycuu4UFu5ZkGg,8759
|
|
18
|
+
ai_code_assistant/editor/file_editor.py,sha256=csD8MW0jrfXAek5blWNuot_QWlhkgTTmtQtf8rbIdhY,11143
|
|
19
|
+
ai_code_assistant/editor/prompts.py,sha256=wryxwb4dNaeSbl5mHkDuw2uTAZxkdrdyZ89Gfogafow,4356
|
|
20
|
+
ai_code_assistant/generator/__init__.py,sha256=CfCO58CBye-BlZHjOfLLShovp2TVXg_GHKJuXe6ihu0,273
|
|
21
|
+
ai_code_assistant/generator/code_gen.py,sha256=vJ4xeiG4_1hOZ8TW7YZ-gTAVKoiHwgUzzdcWMvhgeRo,8609
|
|
22
|
+
ai_code_assistant/generator/prompts.py,sha256=uoEDpcRzpTd-4TLHNW_EbSHJiADMlu9SoGWZvvo1Adk,3384
|
|
23
|
+
ai_code_assistant/git/__init__.py,sha256=YgqmzneAnZyRrbazMqGoFSPIk5Yf5OTm2LXPbkQmecU,232
|
|
24
|
+
ai_code_assistant/git/commit_generator.py,sha256=CzDH5ZPqEaXyPznBg8FgTz8wbV4adALUQD__kl8au6o,4135
|
|
25
|
+
ai_code_assistant/git/manager.py,sha256=BYeYSz3yPpeJJESy2Zmu4MKEvJ5YAtw3HAmU6uba3nM,6815
|
|
26
|
+
ai_code_assistant/providers/__init__.py,sha256=T8eLHOcjWvqNxLsD8uLmU2H1mJbGbZgUrUcrrVRcqPs,832
|
|
27
|
+
ai_code_assistant/providers/base.py,sha256=6NJxzidnf5e_0hrbq5PxL4qsk9lGGU1Uzk4WDiWsZso,3969
|
|
28
|
+
ai_code_assistant/providers/cerebras.py,sha256=PfjfFtkFOip5OquyOnxlSQowpy8uPWNRLA6y4m-iYio,3098
|
|
29
|
+
ai_code_assistant/providers/factory.py,sha256=U2zH3HFDGhed2nPRpTyDqG4JcFNHvTvxw25NER2NEi0,4579
|
|
30
|
+
ai_code_assistant/providers/google.py,sha256=nEHsAUiBhV9TjtJEwxkMWydtnWiouVtl_2MrcU8GQNI,3344
|
|
31
|
+
ai_code_assistant/providers/groq.py,sha256=ipsrosI4wBKDyZlVyxV74MuZDc9zSuJ0NIeRt9wDCGc,3518
|
|
32
|
+
ai_code_assistant/providers/ollama.py,sha256=p0pNEV5d5G329DCnkliNNwcOmNJ1aKqf3W7rHLynju8,2601
|
|
33
|
+
ai_code_assistant/providers/openai.py,sha256=iBbabqOO4Z4qAlf9rVKRhXaVa72RBjqkBvEkF5xZn3c,3602
|
|
34
|
+
ai_code_assistant/providers/openrouter.py,sha256=ih7Li17tNqbJpQppnutKYl6FRiy-0gazdStoouSTO6Y,4454
|
|
35
|
+
ai_code_assistant/refactor/__init__.py,sha256=udGFNgX3U3fY7UZFbf3bqCthuVmPiDQzdEmpme9TZkM,460
|
|
36
|
+
ai_code_assistant/refactor/analyzer.py,sha256=Gn6pp24Pg_1wn8HtNPeyuHKDCZmHPZvEOySxxyUyOKo,6100
|
|
37
|
+
ai_code_assistant/refactor/change_plan.py,sha256=MYkpgidYdvETxLYBTv5brWH3c8SVOJY9NGJNAOpjpu0,5349
|
|
38
|
+
ai_code_assistant/refactor/multi_file_editor.py,sha256=cWYVTR50-3vLmoSJZyvDoVxnY_2Voh1pzwkre6RsYXo,11800
|
|
39
|
+
ai_code_assistant/refactor/prompts.py,sha256=LwwbM2CYmePP4O64e7We4I8pwD5f0RXHtn7JF7ynC2o,4880
|
|
40
|
+
ai_code_assistant/retrieval/__init__.py,sha256=_hyfVZCeVH6HCppyxhVJBQxWx91eE4j-ZzmYDE1h-vA,428
|
|
41
|
+
ai_code_assistant/retrieval/chunker.py,sha256=1a4oW4pFPW-lE9X5F-7eAoY3UziF9SkgJXifwL3EVXI,7244
|
|
42
|
+
ai_code_assistant/retrieval/indexer.py,sha256=iEW8P6yTvGfcoRj0F9aNnbt3Vhayjj5jmHK5IQeGm8g,8167
|
|
43
|
+
ai_code_assistant/retrieval/search.py,sha256=mp_4oeZqRqMh3O3UbjNXpWA15wcsxFTS2UxklesCsgo,8088
|
|
44
|
+
ai_code_assistant/reviewer/__init__.py,sha256=mwEsH5mpEh7KNBFpYSGQL7nYlxqIoBkaZAqPBCyUElw,277
|
|
45
|
+
ai_code_assistant/reviewer/analyzer.py,sha256=rHffh5EXGJndy4wU8c_5uFH_jKCcWiFr3w0PNzFqThM,8892
|
|
46
|
+
ai_code_assistant/reviewer/prompts.py,sha256=9RrHEBttS5ngxY2BNsUvqGC6-cTxco-kDPbZm848yp4,3473
|
|
47
|
+
ai_code_assistant/utils/__init__.py,sha256=3HO-1Bj4VvUtM7W1C3MKR4DzQ9Xc875QKSHHkHwuqVs,368
|
|
48
|
+
ai_code_assistant/utils/file_handler.py,sha256=jPxvtI5dJxkpPjELgRJ11WXamtyKKmZANQ1fcfMVtiU,5239
|
|
49
|
+
ai_code_assistant/utils/formatters.py,sha256=5El9ew9HS6JLBucBUxxcw4fO5nLpOucgNJrJj2NC3zw,8945
|
|
50
|
+
cognify_code-0.2.0.dist-info/licenses/LICENSE,sha256=5yu_kWq2bK-XKhWo79Eykdg4Qf3O8V2Ys7cpOO7GyyE,1063
|
|
51
|
+
cognify_code-0.2.0.dist-info/METADATA,sha256=x_c1ZCjoUGq_p3pX7SeU2NBkvsIbKry_SPfoKQiZeII,11862
|
|
52
|
+
cognify_code-0.2.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
53
|
+
cognify_code-0.2.0.dist-info/entry_points.txt,sha256=MrBnnWPHZVozqqKyTlnJO63YN2kE5yPWKlr2nnRFRks,94
|
|
54
|
+
cognify_code-0.2.0.dist-info/top_level.txt,sha256=dD_r1x-oX0s1uspYY72kig4jfIsjh3oDKwOBCMYXqpo,18
|
|
55
|
+
cognify_code-0.2.0.dist-info/RECORD,,
|