cognify-code 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/agent/code_agent.py +696 -2
- ai_code_assistant/chat/agent_session.py +91 -1
- ai_code_assistant/cli.py +73 -34
- ai_code_assistant/context/__init__.py +12 -0
- ai_code_assistant/context/analyzer.py +363 -0
- ai_code_assistant/context/selector.py +309 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/METADATA +1 -1
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/RECORD +12 -9
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/WHEEL +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/entry_points.txt +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from dataclasses import dataclass, field
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Dict, List, Literal, Optional
|
|
6
|
+
from typing import Dict, Iterator, List, Literal, Optional, Tuple
|
|
7
7
|
|
|
8
8
|
from ai_code_assistant.config import Config
|
|
9
9
|
from ai_code_assistant.llm import LLMManager
|
|
@@ -67,6 +67,96 @@ class AgentChatSession:
|
|
|
67
67
|
|
|
68
68
|
return assistant_msg
|
|
69
69
|
|
|
70
|
+
|
|
71
|
+
def send_message_stream(self, user_input: str) -> Iterator[Tuple[str, Optional[AgentMessage]]]:
|
|
72
|
+
"""
|
|
73
|
+
Process user message with streaming output.
|
|
74
|
+
|
|
75
|
+
Yields tuples of (chunk, final_message).
|
|
76
|
+
During streaming, final_message is None.
|
|
77
|
+
The last yield will have the complete AgentMessage.
|
|
78
|
+
"""
|
|
79
|
+
# Add user message to history
|
|
80
|
+
user_msg = AgentMessage(role="user", content=user_input)
|
|
81
|
+
self.history.append(user_msg)
|
|
82
|
+
|
|
83
|
+
# Check for confirmation/rejection of pending changes
|
|
84
|
+
if self._awaiting_confirmation:
|
|
85
|
+
msg = self._handle_confirmation(user_input)
|
|
86
|
+
yield (msg.content, msg)
|
|
87
|
+
return
|
|
88
|
+
|
|
89
|
+
# Process through agent with streaming
|
|
90
|
+
full_content = ""
|
|
91
|
+
final_response = None
|
|
92
|
+
|
|
93
|
+
for chunk, response in self.agent.process_stream(user_input):
|
|
94
|
+
full_content += chunk
|
|
95
|
+
yield (chunk, None)
|
|
96
|
+
if response is not None:
|
|
97
|
+
final_response = response
|
|
98
|
+
|
|
99
|
+
# Create assistant message
|
|
100
|
+
assistant_msg = AgentMessage(
|
|
101
|
+
role="assistant",
|
|
102
|
+
content=full_content,
|
|
103
|
+
response=final_response,
|
|
104
|
+
pending_action=final_response.requires_confirmation if final_response else False,
|
|
105
|
+
)
|
|
106
|
+
self.history.append(assistant_msg)
|
|
107
|
+
|
|
108
|
+
# Track if we're awaiting confirmation
|
|
109
|
+
if final_response:
|
|
110
|
+
self._awaiting_confirmation = final_response.requires_confirmation
|
|
111
|
+
|
|
112
|
+
yield ("", assistant_msg)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def send_message_stream(self, user_input: str) -> Iterator[Tuple[str, Optional[AgentMessage]]]:
|
|
117
|
+
"""
|
|
118
|
+
Process user message with streaming output.
|
|
119
|
+
|
|
120
|
+
Yields tuples of (chunk, final_message).
|
|
121
|
+
During streaming, final_message is None.
|
|
122
|
+
The last yield will have the complete AgentMessage.
|
|
123
|
+
"""
|
|
124
|
+
# Add user message to history
|
|
125
|
+
user_msg = AgentMessage(role="user", content=user_input)
|
|
126
|
+
self.history.append(user_msg)
|
|
127
|
+
|
|
128
|
+
# Check for confirmation/rejection of pending changes
|
|
129
|
+
if self._awaiting_confirmation:
|
|
130
|
+
msg = self._handle_confirmation(user_input)
|
|
131
|
+
yield (msg.content, msg)
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
# Process through agent with streaming
|
|
135
|
+
full_content = ""
|
|
136
|
+
final_response = None
|
|
137
|
+
|
|
138
|
+
for chunk, response in self.agent.process_stream(user_input):
|
|
139
|
+
full_content += chunk
|
|
140
|
+
yield (chunk, None)
|
|
141
|
+
if response is not None:
|
|
142
|
+
final_response = response
|
|
143
|
+
|
|
144
|
+
# Create assistant message
|
|
145
|
+
assistant_msg = AgentMessage(
|
|
146
|
+
role="assistant",
|
|
147
|
+
content=full_content,
|
|
148
|
+
response=final_response,
|
|
149
|
+
pending_action=final_response.requires_confirmation if final_response else False,
|
|
150
|
+
)
|
|
151
|
+
self.history.append(assistant_msg)
|
|
152
|
+
|
|
153
|
+
# Track if we're awaiting confirmation
|
|
154
|
+
if final_response:
|
|
155
|
+
self._awaiting_confirmation = final_response.requires_confirmation
|
|
156
|
+
|
|
157
|
+
yield ("", assistant_msg)
|
|
158
|
+
|
|
159
|
+
|
|
70
160
|
def _handle_confirmation(self, user_input: str) -> AgentMessage:
|
|
71
161
|
"""Handle user confirmation or rejection of pending changes."""
|
|
72
162
|
lower_input = user_input.lower().strip()
|
ai_code_assistant/cli.py
CHANGED
|
@@ -17,6 +17,7 @@ from ai_code_assistant.generator import CodeGenerator
|
|
|
17
17
|
from ai_code_assistant.chat import ChatSession
|
|
18
18
|
from ai_code_assistant.editor import FileEditor
|
|
19
19
|
from ai_code_assistant.utils import FileHandler, get_formatter
|
|
20
|
+
from ai_code_assistant.context import ContextSelector, ContextConfig
|
|
20
21
|
|
|
21
22
|
console = Console()
|
|
22
23
|
|
|
@@ -89,9 +90,14 @@ def main(ctx, config: Optional[Path], verbose: bool):
|
|
|
89
90
|
type=click.Choice(["console", "markdown", "json"]), help="Output format")
|
|
90
91
|
@click.option("--output", "-o", type=click.Path(path_type=Path), help="Output file path")
|
|
91
92
|
@click.option("--recursive", "-r", is_flag=True, help="Recursively review directories")
|
|
93
|
+
@click.option("--context", multiple=True, type=click.Path(exists=True, path_type=Path),
|
|
94
|
+
help="Additional context files to include")
|
|
95
|
+
@click.option("--auto-context", is_flag=True, help="Automatically include related files as context")
|
|
96
|
+
@click.option("--max-context-tokens", type=int, default=8000, help="Max tokens for context")
|
|
92
97
|
@click.pass_context
|
|
93
98
|
def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
|
|
94
|
-
output: Optional[Path], recursive: bool
|
|
99
|
+
output: Optional[Path], recursive: bool, context: Tuple[Path, ...],
|
|
100
|
+
auto_context: bool, max_context_tokens: int):
|
|
95
101
|
"""Review code files for issues and improvements."""
|
|
96
102
|
if not files:
|
|
97
103
|
console.print("[red]Error:[/red] No files specified")
|
|
@@ -157,10 +163,15 @@ def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
|
|
|
157
163
|
@click.option("--source", "-s", type=click.Path(exists=True, path_type=Path),
|
|
158
164
|
help="Source file (for test mode)")
|
|
159
165
|
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
166
|
+
@click.option("--context", multiple=True, type=click.Path(exists=True, path_type=Path),
|
|
167
|
+
help="Context files to include for better generation")
|
|
168
|
+
@click.option("--auto-context", is_flag=True, help="Automatically find relevant context files")
|
|
169
|
+
@click.option("--max-context-tokens", type=int, default=8000, help="Max tokens for context")
|
|
160
170
|
@click.pass_context
|
|
161
171
|
def generate(ctx, description: str, mode: str, language: str, name: Optional[str],
|
|
162
172
|
params: Optional[str], output: Optional[Path], output_format: str,
|
|
163
|
-
source: Optional[Path], stream: bool
|
|
173
|
+
source: Optional[Path], stream: bool, context: Tuple[Path, ...],
|
|
174
|
+
auto_context: bool, max_context_tokens: int):
|
|
164
175
|
"""Generate code from natural language description."""
|
|
165
176
|
from rich.live import Live
|
|
166
177
|
from rich.markdown import Markdown
|
|
@@ -528,10 +539,15 @@ def search(ctx, query: str, top_k: int, file_filter: Optional[str],
|
|
|
528
539
|
type=click.Choice(["console", "json"]), help="Output format")
|
|
529
540
|
@click.option("--start-line", "-s", type=int, help="Start line for targeted edit")
|
|
530
541
|
@click.option("--end-line", "-e", type=int, help="End line for targeted edit")
|
|
542
|
+
@click.option("--context", multiple=True, type=click.Path(exists=True, path_type=Path),
|
|
543
|
+
help="Additional context files to include")
|
|
544
|
+
@click.option("--auto-context", is_flag=True, help="Automatically include related files as context")
|
|
545
|
+
@click.option("--max-context-tokens", type=int, default=8000, help="Max tokens for context")
|
|
531
546
|
@click.pass_context
|
|
532
547
|
def edit(ctx, file: Path, instruction: str, mode: str, preview: bool,
|
|
533
548
|
no_backup: bool, output_format: str, start_line: Optional[int],
|
|
534
|
-
end_line: Optional[int]
|
|
549
|
+
end_line: Optional[int], context: Tuple[Path, ...], auto_context: bool,
|
|
550
|
+
max_context_tokens: int):
|
|
535
551
|
"""Edit a file using AI based on natural language instructions.
|
|
536
552
|
|
|
537
553
|
Examples:
|
|
@@ -1491,21 +1507,20 @@ def agent(ctx, path: Path):
|
|
|
1491
1507
|
console.print("[green]No pending changes.[/green]")
|
|
1492
1508
|
continue
|
|
1493
1509
|
|
|
1494
|
-
# Process through agent
|
|
1495
|
-
with console.status("[bold green]Thinking..."):
|
|
1496
|
-
response = session.send_message(user_input)
|
|
1497
|
-
|
|
1498
|
-
# Display response
|
|
1510
|
+
# Process through agent with streaming
|
|
1499
1511
|
console.print(f"\n[bold green]Agent[/bold green]")
|
|
1500
1512
|
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1513
|
+
final_msg = None
|
|
1514
|
+
for chunk, msg in session.send_message_stream(user_input):
|
|
1515
|
+
if chunk:
|
|
1516
|
+
console.print(chunk, end="")
|
|
1517
|
+
if msg is not None:
|
|
1518
|
+
final_msg = msg
|
|
1519
|
+
|
|
1520
|
+
console.print() # Newline after streaming
|
|
1506
1521
|
|
|
1507
1522
|
# Show confirmation prompt if needed
|
|
1508
|
-
if
|
|
1523
|
+
if final_msg and final_msg.pending_action:
|
|
1509
1524
|
console.print("\n[yellow]Apply these changes? (yes/no)[/yellow]")
|
|
1510
1525
|
|
|
1511
1526
|
except KeyboardInterrupt:
|
|
@@ -1565,13 +1580,15 @@ def _show_agent_help():
|
|
|
1565
1580
|
@click.argument("file", type=click.Path(exists=True, path_type=Path))
|
|
1566
1581
|
@click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
|
|
1567
1582
|
default=".", help="Project root path")
|
|
1583
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
1568
1584
|
@click.pass_context
|
|
1569
|
-
def agent_review(ctx, file: Path, path: Path):
|
|
1585
|
+
def agent_review(ctx, file: Path, path: Path, stream: bool):
|
|
1570
1586
|
"""Quick code review using the agent.
|
|
1571
1587
|
|
|
1572
1588
|
Examples:
|
|
1573
1589
|
ai-assist agent-review src/main.py
|
|
1574
1590
|
ai-assist agent-review utils.py --path ./my-project
|
|
1591
|
+
ai-assist agent-review main.py --no-stream
|
|
1575
1592
|
"""
|
|
1576
1593
|
from ai_code_assistant.agent import CodeAgent
|
|
1577
1594
|
|
|
@@ -1580,10 +1597,15 @@ def agent_review(ctx, file: Path, path: Path):
|
|
|
1580
1597
|
|
|
1581
1598
|
console.print(f"\n[bold]Reviewing {file}...[/bold]\n")
|
|
1582
1599
|
|
|
1583
|
-
|
|
1584
|
-
response
|
|
1585
|
-
|
|
1586
|
-
|
|
1600
|
+
if stream:
|
|
1601
|
+
for chunk, response in agent.process_stream(f"review {file}"):
|
|
1602
|
+
if chunk:
|
|
1603
|
+
console.print(chunk, end="")
|
|
1604
|
+
console.print()
|
|
1605
|
+
else:
|
|
1606
|
+
with console.status("[bold green]Analyzing..."):
|
|
1607
|
+
response = agent.process(f"review {file}")
|
|
1608
|
+
console.print(response.message)
|
|
1587
1609
|
|
|
1588
1610
|
|
|
1589
1611
|
@main.command("agent-generate")
|
|
@@ -1593,15 +1615,17 @@ def agent_review(ctx, file: Path, path: Path):
|
|
|
1593
1615
|
@click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
|
|
1594
1616
|
default=".", help="Project root path")
|
|
1595
1617
|
@click.option("--apply", "-a", is_flag=True, help="Apply changes without confirmation")
|
|
1618
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
1596
1619
|
@click.pass_context
|
|
1597
1620
|
def agent_generate(ctx, description: str, file: Optional[Path], language: Optional[str],
|
|
1598
|
-
path: Path, apply: bool):
|
|
1621
|
+
path: Path, apply: bool, stream: bool):
|
|
1599
1622
|
"""Generate code using the agent.
|
|
1600
1623
|
|
|
1601
1624
|
Examples:
|
|
1602
1625
|
ai-assist agent-generate "a function to validate email"
|
|
1603
1626
|
ai-assist agent-generate "REST API for users" -f src/api.py
|
|
1604
1627
|
ai-assist agent-generate "sorting algorithm" -l python
|
|
1628
|
+
ai-assist agent-generate "hello world" --no-stream
|
|
1605
1629
|
"""
|
|
1606
1630
|
from ai_code_assistant.agent import CodeAgent
|
|
1607
1631
|
|
|
@@ -1617,12 +1641,20 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
|
|
|
1617
1641
|
|
|
1618
1642
|
console.print(f"\n[bold]Generating code...[/bold]\n")
|
|
1619
1643
|
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1644
|
+
final_response = None
|
|
1645
|
+
if stream:
|
|
1646
|
+
for chunk, response in agent.process_stream(request):
|
|
1647
|
+
if chunk:
|
|
1648
|
+
console.print(chunk, end="")
|
|
1649
|
+
if response is not None:
|
|
1650
|
+
final_response = response
|
|
1651
|
+
console.print()
|
|
1652
|
+
else:
|
|
1653
|
+
with console.status("[bold green]Generating..."):
|
|
1654
|
+
final_response = agent.process(request)
|
|
1655
|
+
console.print(final_response.message)
|
|
1624
1656
|
|
|
1625
|
-
if
|
|
1657
|
+
if final_response and final_response.requires_confirmation:
|
|
1626
1658
|
if apply:
|
|
1627
1659
|
success, msg = agent.confirm_changes()
|
|
1628
1660
|
console.print(f"\n{msg}")
|
|
@@ -1638,29 +1670,36 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
|
|
|
1638
1670
|
@click.argument("file", type=click.Path(exists=True, path_type=Path))
|
|
1639
1671
|
@click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
|
|
1640
1672
|
default=".", help="Project root path")
|
|
1673
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
1641
1674
|
@click.pass_context
|
|
1642
|
-
def agent_explain(ctx, file: Path, path: Path):
|
|
1675
|
+
def agent_explain(ctx, file: Path, path: Path, stream: bool):
|
|
1643
1676
|
"""Explain code using the agent.
|
|
1644
1677
|
|
|
1645
1678
|
Examples:
|
|
1646
1679
|
ai-assist agent-explain src/main.py
|
|
1647
1680
|
ai-assist agent-explain config.py --path ./my-project
|
|
1681
|
+
ai-assist agent-explain main.py --no-stream
|
|
1648
1682
|
"""
|
|
1649
1683
|
from ai_code_assistant.agent import CodeAgent
|
|
1650
|
-
from rich.markdown import Markdown
|
|
1651
1684
|
|
|
1652
1685
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1653
1686
|
agent = CodeAgent(llm, path.resolve())
|
|
1654
1687
|
|
|
1655
1688
|
console.print(f"\n[bold]Explaining {file}...[/bold]\n")
|
|
1656
1689
|
|
|
1657
|
-
|
|
1658
|
-
response
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
console.print(
|
|
1662
|
-
|
|
1663
|
-
|
|
1690
|
+
if stream:
|
|
1691
|
+
for chunk, response in agent.process_stream(f"explain {file}"):
|
|
1692
|
+
if chunk:
|
|
1693
|
+
console.print(chunk, end="")
|
|
1694
|
+
console.print()
|
|
1695
|
+
else:
|
|
1696
|
+
from rich.markdown import Markdown
|
|
1697
|
+
with console.status("[bold green]Analyzing..."):
|
|
1698
|
+
response = agent.process(f"explain {file}")
|
|
1699
|
+
try:
|
|
1700
|
+
console.print(Markdown(response.message))
|
|
1701
|
+
except Exception:
|
|
1702
|
+
console.print(response.message)
|
|
1664
1703
|
|
|
1665
1704
|
|
|
1666
1705
|
if __name__ == "__main__":
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Context-aware codebase understanding module."""
|
|
2
|
+
|
|
3
|
+
from .analyzer import ContextAnalyzer, FileContext
|
|
4
|
+
from .selector import ContextSelector, ContextConfig, ContextResult
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"ContextAnalyzer",
|
|
8
|
+
"ContextSelector",
|
|
9
|
+
"ContextConfig",
|
|
10
|
+
"ContextResult",
|
|
11
|
+
"FileContext",
|
|
12
|
+
]
|