cognify-code 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. ai_code_assistant/__init__.py +14 -0
  2. ai_code_assistant/agent/__init__.py +63 -0
  3. ai_code_assistant/agent/code_agent.py +461 -0
  4. ai_code_assistant/agent/code_generator.py +388 -0
  5. ai_code_assistant/agent/code_reviewer.py +365 -0
  6. ai_code_assistant/agent/diff_engine.py +308 -0
  7. ai_code_assistant/agent/file_manager.py +300 -0
  8. ai_code_assistant/agent/intent_classifier.py +284 -0
  9. ai_code_assistant/chat/__init__.py +11 -0
  10. ai_code_assistant/chat/agent_session.py +156 -0
  11. ai_code_assistant/chat/session.py +165 -0
  12. ai_code_assistant/cli.py +1571 -0
  13. ai_code_assistant/config.py +149 -0
  14. ai_code_assistant/editor/__init__.py +8 -0
  15. ai_code_assistant/editor/diff_handler.py +270 -0
  16. ai_code_assistant/editor/file_editor.py +350 -0
  17. ai_code_assistant/editor/prompts.py +146 -0
  18. ai_code_assistant/generator/__init__.py +7 -0
  19. ai_code_assistant/generator/code_gen.py +265 -0
  20. ai_code_assistant/generator/prompts.py +114 -0
  21. ai_code_assistant/git/__init__.py +6 -0
  22. ai_code_assistant/git/commit_generator.py +130 -0
  23. ai_code_assistant/git/manager.py +203 -0
  24. ai_code_assistant/llm.py +111 -0
  25. ai_code_assistant/providers/__init__.py +23 -0
  26. ai_code_assistant/providers/base.py +124 -0
  27. ai_code_assistant/providers/cerebras.py +97 -0
  28. ai_code_assistant/providers/factory.py +148 -0
  29. ai_code_assistant/providers/google.py +103 -0
  30. ai_code_assistant/providers/groq.py +111 -0
  31. ai_code_assistant/providers/ollama.py +86 -0
  32. ai_code_assistant/providers/openai.py +114 -0
  33. ai_code_assistant/providers/openrouter.py +130 -0
  34. ai_code_assistant/py.typed +0 -0
  35. ai_code_assistant/refactor/__init__.py +20 -0
  36. ai_code_assistant/refactor/analyzer.py +189 -0
  37. ai_code_assistant/refactor/change_plan.py +172 -0
  38. ai_code_assistant/refactor/multi_file_editor.py +346 -0
  39. ai_code_assistant/refactor/prompts.py +175 -0
  40. ai_code_assistant/retrieval/__init__.py +19 -0
  41. ai_code_assistant/retrieval/chunker.py +215 -0
  42. ai_code_assistant/retrieval/indexer.py +236 -0
  43. ai_code_assistant/retrieval/search.py +239 -0
  44. ai_code_assistant/reviewer/__init__.py +7 -0
  45. ai_code_assistant/reviewer/analyzer.py +278 -0
  46. ai_code_assistant/reviewer/prompts.py +113 -0
  47. ai_code_assistant/utils/__init__.py +18 -0
  48. ai_code_assistant/utils/file_handler.py +155 -0
  49. ai_code_assistant/utils/formatters.py +259 -0
  50. cognify_code-0.2.0.dist-info/METADATA +383 -0
  51. cognify_code-0.2.0.dist-info/RECORD +55 -0
  52. cognify_code-0.2.0.dist-info/WHEEL +5 -0
  53. cognify_code-0.2.0.dist-info/entry_points.txt +3 -0
  54. cognify_code-0.2.0.dist-info/licenses/LICENSE +22 -0
  55. cognify_code-0.2.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,156 @@
1
+ """Agent-enhanced chat session with code generation and review capabilities."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime
5
+ from pathlib import Path
6
+ from typing import Dict, List, Literal, Optional
7
+
8
+ from ai_code_assistant.config import Config
9
+ from ai_code_assistant.llm import LLMManager
10
+ from ai_code_assistant.agent import CodeAgent, AgentResponse, IntentType
11
+
12
+
13
+ @dataclass
14
+ class AgentMessage:
15
+ """A message in the agent chat session."""
16
+ role: Literal["user", "assistant", "system"]
17
+ content: str
18
+ timestamp: datetime = field(default_factory=datetime.now)
19
+ response: Optional[AgentResponse] = None
20
+ pending_action: bool = False
21
+
22
+
23
+ class AgentChatSession:
24
+ """Chat session enhanced with code agent capabilities."""
25
+
26
+ def __init__(
27
+ self,
28
+ config: Config,
29
+ llm_manager: LLMManager,
30
+ root_path: Optional[Path] = None,
31
+ ):
32
+ self.config = config
33
+ self.llm = llm_manager
34
+ self.agent = CodeAgent(llm_manager, root_path or Path.cwd())
35
+ self.history: List[AgentMessage] = []
36
+ self._awaiting_confirmation = False
37
+
38
+ @property
39
+ def has_pending_changes(self) -> bool:
40
+ """Check if there are pending changes awaiting confirmation."""
41
+ return self._awaiting_confirmation
42
+
43
+ def send_message(self, user_input: str) -> AgentMessage:
44
+ """Process user message through the agent."""
45
+ # Add user message to history
46
+ user_msg = AgentMessage(role="user", content=user_input)
47
+ self.history.append(user_msg)
48
+
49
+ # Check for confirmation/rejection of pending changes
50
+ if self._awaiting_confirmation:
51
+ return self._handle_confirmation(user_input)
52
+
53
+ # Process through agent
54
+ response = self.agent.process(user_input)
55
+
56
+ # Create assistant message
57
+ assistant_msg = AgentMessage(
58
+ role="assistant",
59
+ content=response.message,
60
+ response=response,
61
+ pending_action=response.requires_confirmation,
62
+ )
63
+ self.history.append(assistant_msg)
64
+
65
+ # Track if we're awaiting confirmation
66
+ self._awaiting_confirmation = response.requires_confirmation
67
+
68
+ return assistant_msg
69
+
70
+ def _handle_confirmation(self, user_input: str) -> AgentMessage:
71
+ """Handle user confirmation or rejection of pending changes."""
72
+ lower_input = user_input.lower().strip()
73
+
74
+ # Check for confirmation
75
+ if lower_input in ("yes", "y", "confirm", "apply", "ok", "sure", "do it"):
76
+ success, message = self.agent.confirm_changes()
77
+ self._awaiting_confirmation = False
78
+
79
+ return AgentMessage(
80
+ role="assistant",
81
+ content=message,
82
+ )
83
+
84
+ # Check for rejection
85
+ elif lower_input in ("no", "n", "cancel", "reject", "discard", "nevermind"):
86
+ message = self.agent.reject_changes()
87
+ self._awaiting_confirmation = False
88
+
89
+ return AgentMessage(
90
+ role="assistant",
91
+ content=message,
92
+ )
93
+
94
+ # Unclear response
95
+ else:
96
+ return AgentMessage(
97
+ role="assistant",
98
+ content="Please confirm with 'yes' to apply changes or 'no' to discard them.",
99
+ pending_action=True,
100
+ )
101
+
102
+ def confirm_changes(self) -> str:
103
+ """Programmatically confirm pending changes."""
104
+ if not self._awaiting_confirmation:
105
+ return "No pending changes."
106
+
107
+ success, message = self.agent.confirm_changes()
108
+ self._awaiting_confirmation = False
109
+ return message
110
+
111
+ def reject_changes(self) -> str:
112
+ """Programmatically reject pending changes."""
113
+ if not self._awaiting_confirmation:
114
+ return "No pending changes."
115
+
116
+ message = self.agent.reject_changes()
117
+ self._awaiting_confirmation = False
118
+ return message
119
+
120
+ def get_project_info(self) -> str:
121
+ """Get information about the current project."""
122
+ response = self.agent.process("show project info")
123
+ return response.message
124
+
125
+ def review_file(self, file_path: str) -> AgentMessage:
126
+ """Review a specific file."""
127
+ return self.send_message(f"review {file_path}")
128
+
129
+ def generate_code(self, description: str, file_path: Optional[str] = None) -> AgentMessage:
130
+ """Generate code based on description."""
131
+ if file_path:
132
+ return self.send_message(f"create {file_path}: {description}")
133
+ return self.send_message(f"generate code: {description}")
134
+
135
+ def explain_file(self, file_path: str) -> AgentMessage:
136
+ """Explain a file's code."""
137
+ return self.send_message(f"explain {file_path}")
138
+
139
+ def get_history(self) -> List[AgentMessage]:
140
+ """Get conversation history."""
141
+ return self.history.copy()
142
+
143
+ def clear_history(self) -> None:
144
+ """Clear conversation history."""
145
+ self.history.clear()
146
+ self._awaiting_confirmation = False
147
+
148
+ def format_response(self, msg: AgentMessage) -> str:
149
+ """Format a message for display."""
150
+ lines = [msg.content]
151
+
152
+ if msg.pending_action:
153
+ lines.append("")
154
+ lines.append("[yellow]Apply these changes? (yes/no)[/yellow]")
155
+
156
+ return "\n".join(lines)
@@ -0,0 +1,165 @@
1
+ """Interactive chat session for code discussions."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime
5
+ from pathlib import Path
6
+ from typing import Dict, List, Literal, Optional
7
+
8
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
9
+
10
+ from ai_code_assistant.config import Config
11
+ from ai_code_assistant.llm import LLMManager
12
+
13
+
14
+ @dataclass
15
+ class Message:
16
+ """A single message in the chat history."""
17
+ role: Literal["user", "assistant", "system"]
18
+ content: str
19
+ timestamp: datetime = field(default_factory=datetime.now)
20
+ code_context: Optional[str] = None
21
+
22
+
23
+ CHAT_SYSTEM_PROMPT = """You are an expert programming assistant with deep knowledge of software development.
24
+
25
+ You can help with:
26
+ - Explaining code and concepts
27
+ - Debugging issues
28
+ - Suggesting improvements
29
+ - Answering programming questions
30
+ - Discussing architecture and design patterns
31
+
32
+ When discussing code:
33
+ - Be clear and concise
34
+ - Provide code examples when helpful
35
+ - Explain your reasoning
36
+ - Consider edge cases and best practices
37
+
38
+ If code context is provided, reference it in your responses when relevant."""
39
+
40
+
41
+ class ChatSession:
42
+ """Manages an interactive chat session."""
43
+
44
+ def __init__(
45
+ self,
46
+ config: Config,
47
+ llm_manager: LLMManager,
48
+ system_prompt: Optional[str] = None,
49
+ ):
50
+ self.config = config
51
+ self.llm = llm_manager
52
+ self.system_prompt = system_prompt or CHAT_SYSTEM_PROMPT
53
+ self.history: List[Message] = []
54
+ self._code_context: Dict[str, str] = {} # filename -> code
55
+
56
+ def add_code_context(self, filename: str, code: str) -> None:
57
+ """Add code file to the conversation context."""
58
+ self._code_context[filename] = code
59
+
60
+ def load_file_context(self, file_path: Path) -> bool:
61
+ """Load a file into the conversation context."""
62
+ try:
63
+ code = file_path.read_text()
64
+ self.add_code_context(str(file_path), code)
65
+ return True
66
+ except Exception:
67
+ return False
68
+
69
+ def clear_context(self) -> None:
70
+ """Clear all code context."""
71
+ self._code_context.clear()
72
+
73
+ def send_message(self, user_input: str, stream: bool = False):
74
+ """Send a message and get a response."""
75
+ # Add user message to history
76
+ self.history.append(Message(role="user", content=user_input))
77
+
78
+ # Build messages for LLM
79
+ messages = self._build_messages(user_input)
80
+
81
+ if stream:
82
+ return self._stream_response(messages)
83
+ else:
84
+ return self._get_response(messages)
85
+
86
+ def _build_messages(self, current_input: str) -> List:
87
+ """Build message list for LLM including context and history."""
88
+ messages = []
89
+
90
+ # System message with code context
91
+ system_content = self.system_prompt
92
+ if self._code_context:
93
+ context_str = "\n\n".join(
94
+ f"**File: {name}**\n```\n{code}\n```"
95
+ for name, code in self._code_context.items()
96
+ )
97
+ system_content += f"\n\n**Code Context:**\n{context_str}"
98
+
99
+ messages.append(SystemMessage(content=system_content))
100
+
101
+ # Add conversation history (keep last N messages to manage context window)
102
+ max_history = 20
103
+ recent_history = self.history[-(max_history + 1):-1] # Exclude current message
104
+
105
+ for msg in recent_history:
106
+ if msg.role == "user":
107
+ messages.append(HumanMessage(content=msg.content))
108
+ elif msg.role == "assistant":
109
+ messages.append(AIMessage(content=msg.content))
110
+
111
+ # Add current message
112
+ messages.append(HumanMessage(content=current_input))
113
+
114
+ return messages
115
+
116
+ def _get_response(self, messages: List) -> str:
117
+ """Get complete response from LLM."""
118
+ response = self.llm.llm.invoke(messages)
119
+ content = str(response.content)
120
+
121
+ # Add assistant response to history
122
+ self.history.append(Message(role="assistant", content=content))
123
+
124
+ return content
125
+
126
+ def _stream_response(self, messages: List):
127
+ """Stream response from LLM."""
128
+ full_response = []
129
+
130
+ for chunk in self.llm.llm.stream(messages):
131
+ chunk_content = str(chunk.content)
132
+ full_response.append(chunk_content)
133
+ yield chunk_content
134
+
135
+ # Add complete response to history
136
+ self.history.append(Message(role="assistant", content="".join(full_response)))
137
+
138
+ def get_history(self) -> List[Message]:
139
+ """Get conversation history."""
140
+ return self.history.copy()
141
+
142
+ def clear_history(self) -> None:
143
+ """Clear conversation history."""
144
+ self.history.clear()
145
+
146
+ def export_history(self) -> str:
147
+ """Export conversation history as markdown."""
148
+ lines = ["# Chat Session\n"]
149
+
150
+ if self._code_context:
151
+ lines.append("## Code Context\n")
152
+ for name in self._code_context:
153
+ lines.append(f"- {name}")
154
+ lines.append("")
155
+
156
+ lines.append("## Conversation\n")
157
+ for msg in self.history:
158
+ timestamp = msg.timestamp.strftime("%H:%M:%S")
159
+ role = "**You**" if msg.role == "user" else "**Assistant**"
160
+ lines.append(f"### {role} ({timestamp})\n")
161
+ lines.append(msg.content)
162
+ lines.append("")
163
+
164
+ return "\n".join(lines)
165
+