cognify-code 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/agent/code_agent.py +31 -334
- ai_code_assistant/cli.py +92 -11
- ai_code_assistant/context/__init__.py +12 -0
- ai_code_assistant/context/analyzer.py +363 -0
- ai_code_assistant/context/selector.py +309 -0
- ai_code_assistant/providers/base.py +94 -4
- {cognify_code-0.2.4.dist-info → cognify_code-0.2.6.dist-info}/METADATA +94 -161
- {cognify_code-0.2.4.dist-info → cognify_code-0.2.6.dist-info}/RECORD +12 -9
- {cognify_code-0.2.4.dist-info → cognify_code-0.2.6.dist-info}/WHEEL +0 -0
- {cognify_code-0.2.4.dist-info → cognify_code-0.2.6.dist-info}/entry_points.txt +0 -0
- {cognify_code-0.2.4.dist-info → cognify_code-0.2.6.dist-info}/licenses/LICENSE +0 -0
- {cognify_code-0.2.4.dist-info → cognify_code-0.2.6.dist-info}/top_level.txt +0 -0
|
@@ -9,6 +9,7 @@ from ai_code_assistant.agent.intent_classifier import IntentClassifier, Intent,
|
|
|
9
9
|
from ai_code_assistant.agent.code_generator import CodeGenerator, CodeGenerationRequest, GeneratedCode
|
|
10
10
|
from ai_code_assistant.agent.diff_engine import DiffEngine, ChangeSet, FileDiff
|
|
11
11
|
from ai_code_assistant.agent.code_reviewer import CodeReviewer, ReviewResult
|
|
12
|
+
from ai_code_assistant.context import ContextSelector, ContextConfig
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@dataclass
|
|
@@ -30,7 +31,7 @@ class AgentResponse:
|
|
|
30
31
|
class CodeAgent:
|
|
31
32
|
"""Main agent that orchestrates code operations based on user intent."""
|
|
32
33
|
|
|
33
|
-
def __init__(self, llm_manager, root_path: Optional[Path] = None):
|
|
34
|
+
def __init__(self, llm_manager, root_path: Optional[Path] = None, auto_context: bool = True):
|
|
34
35
|
self.llm = llm_manager
|
|
35
36
|
self.file_manager = FileContextManager(root_path)
|
|
36
37
|
self.intent_classifier = IntentClassifier(llm_manager)
|
|
@@ -38,6 +39,13 @@ class CodeAgent:
|
|
|
38
39
|
self.diff_engine = DiffEngine(self.file_manager)
|
|
39
40
|
self.code_reviewer = CodeReviewer(llm_manager, self.file_manager)
|
|
40
41
|
|
|
42
|
+
# Context selector for smart context gathering
|
|
43
|
+
self.auto_context = auto_context
|
|
44
|
+
self.context_selector = ContextSelector(
|
|
45
|
+
root_path=root_path,
|
|
46
|
+
config=ContextConfig(max_tokens=8000, max_files=10)
|
|
47
|
+
)
|
|
48
|
+
|
|
41
49
|
# Pending changes awaiting confirmation
|
|
42
50
|
self._pending_changeset: Optional[ChangeSet] = None
|
|
43
51
|
|
|
@@ -84,6 +92,28 @@ class CodeAgent:
|
|
|
84
92
|
self._pending_changeset = None
|
|
85
93
|
return "Changes discarded."
|
|
86
94
|
|
|
95
|
+
def _get_relevant_context(self, query: str, file_path: Optional[Path] = None) -> str:
|
|
96
|
+
"""Get relevant context for a query using the context selector."""
|
|
97
|
+
if not self.auto_context:
|
|
98
|
+
return ""
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
# Get context based on query and optional file path
|
|
102
|
+
target_file = str(file_path) if file_path else None
|
|
103
|
+
context_result = self.context_selector.select_for_query(
|
|
104
|
+
query=query,
|
|
105
|
+
target_file=target_file
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
if not context_result.files:
|
|
109
|
+
return ""
|
|
110
|
+
|
|
111
|
+
# Use the built-in formatting
|
|
112
|
+
return context_result.format_for_prompt(include_summary=False)
|
|
113
|
+
except Exception:
|
|
114
|
+
# If context gathering fails, continue without it
|
|
115
|
+
return ""
|
|
116
|
+
|
|
87
117
|
def _handle_generate(self, message: str, intent: Intent) -> AgentResponse:
|
|
88
118
|
"""Handle code generation requests."""
|
|
89
119
|
request = CodeGenerationRequest(
|
|
@@ -778,339 +808,6 @@ Provide a helpful, concise response.
|
|
|
778
808
|
response = AgentResponse(message=full_response, intent=intent)
|
|
779
809
|
yield ("", response)
|
|
780
810
|
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
784
|
-
"""
|
|
785
|
-
Process a user message with streaming output.
|
|
786
|
-
|
|
787
|
-
Yields tuples of (chunk, final_response).
|
|
788
|
-
During streaming, final_response is None.
|
|
789
|
-
The last yield will have the complete AgentResponse.
|
|
790
|
-
"""
|
|
791
|
-
# Classify intent (non-streaming, it's fast)
|
|
792
|
-
if use_llm_classification:
|
|
793
|
-
intent = self.intent_classifier.classify_with_llm(message)
|
|
794
|
-
else:
|
|
795
|
-
intent = self.intent_classifier.classify(message)
|
|
796
|
-
|
|
797
|
-
# Route to appropriate streaming handler
|
|
798
|
-
streaming_handlers = {
|
|
799
|
-
IntentType.CODE_GENERATE: self._handle_generate_stream,
|
|
800
|
-
IntentType.CODE_EDIT: self._handle_edit_stream,
|
|
801
|
-
IntentType.CODE_REVIEW: self._handle_review_stream,
|
|
802
|
-
IntentType.CODE_EXPLAIN: self._handle_explain_stream,
|
|
803
|
-
IntentType.CODE_REFACTOR: self._handle_refactor_stream,
|
|
804
|
-
IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
|
|
805
|
-
}
|
|
806
|
-
|
|
807
|
-
handler = streaming_handlers.get(intent.type)
|
|
808
|
-
|
|
809
|
-
if handler:
|
|
810
|
-
yield from handler(message, intent)
|
|
811
|
-
else:
|
|
812
|
-
# Fall back to non-streaming for other intents
|
|
813
|
-
response = self.process(message, use_llm_classification)
|
|
814
|
-
yield (response.message, response)
|
|
815
|
-
|
|
816
|
-
def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
817
|
-
"""Handle code explanation with streaming."""
|
|
818
|
-
if not intent.file_paths:
|
|
819
|
-
response = AgentResponse(
|
|
820
|
-
message="Please specify which file or code you want me to explain.",
|
|
821
|
-
intent=intent,
|
|
822
|
-
)
|
|
823
|
-
yield (response.message, response)
|
|
824
|
-
return
|
|
825
|
-
|
|
826
|
-
file_path = intent.file_paths[0]
|
|
827
|
-
content = self.file_manager.read_file(file_path)
|
|
828
|
-
|
|
829
|
-
if not content:
|
|
830
|
-
response = AgentResponse(
|
|
831
|
-
message=f"Cannot find file: {file_path}",
|
|
832
|
-
intent=intent,
|
|
833
|
-
)
|
|
834
|
-
yield (response.message, response)
|
|
835
|
-
return
|
|
836
|
-
|
|
837
|
-
prompt = f"""Explain the following code in a clear, educational way.
|
|
838
|
-
|
|
839
|
-
## Code ({file_path})
|
|
840
|
-
```
|
|
841
|
-
{content[:5000]}
|
|
842
|
-
```
|
|
843
|
-
|
|
844
|
-
## Instructions
|
|
845
|
-
1. Start with a high-level overview
|
|
846
|
-
2. Explain the main components/functions
|
|
847
|
-
3. Describe the flow of execution
|
|
848
|
-
4. Note any important patterns or techniques used
|
|
849
|
-
5. Keep the explanation concise but thorough
|
|
850
|
-
"""
|
|
851
|
-
|
|
852
|
-
# Stream the explanation
|
|
853
|
-
full_response = f"📖 **Explanation of {file_path}**\n\n"
|
|
854
|
-
yield (f"📖 **Explanation of {file_path}**\n\n", None)
|
|
855
|
-
|
|
856
|
-
for chunk in self.llm.stream(prompt):
|
|
857
|
-
full_response += chunk
|
|
858
|
-
yield (chunk, None)
|
|
859
|
-
|
|
860
|
-
# Final response
|
|
861
|
-
response = AgentResponse(
|
|
862
|
-
message=full_response,
|
|
863
|
-
intent=intent,
|
|
864
|
-
)
|
|
865
|
-
yield ("", response)
|
|
866
|
-
|
|
867
|
-
def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
868
|
-
"""Handle code review with streaming."""
|
|
869
|
-
if not intent.file_paths:
|
|
870
|
-
context = self.file_manager.get_project_context()
|
|
871
|
-
py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
|
|
872
|
-
|
|
873
|
-
if py_files:
|
|
874
|
-
msg = f"Which file would you like me to review? Found these Python files:\n" + \
|
|
875
|
-
"\n".join(f" • {f}" for f in py_files)
|
|
876
|
-
else:
|
|
877
|
-
msg = "Please specify which file you want me to review."
|
|
878
|
-
|
|
879
|
-
response = AgentResponse(message=msg, intent=intent)
|
|
880
|
-
yield (msg, response)
|
|
881
|
-
return
|
|
882
|
-
|
|
883
|
-
file_path = intent.file_paths[0]
|
|
884
|
-
content = self.file_manager.read_file(file_path)
|
|
885
|
-
|
|
886
|
-
if not content:
|
|
887
|
-
response = AgentResponse(
|
|
888
|
-
message=f"Cannot find file: {file_path}",
|
|
889
|
-
intent=intent,
|
|
890
|
-
)
|
|
891
|
-
yield (response.message, response)
|
|
892
|
-
return
|
|
893
|
-
|
|
894
|
-
yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
|
|
895
|
-
|
|
896
|
-
# Use streaming for the review
|
|
897
|
-
prompt = f"""Review the following code for issues, bugs, and improvements.
|
|
898
|
-
|
|
899
|
-
## Code ({file_path})
|
|
900
|
-
```
|
|
901
|
-
{content[:5000]}
|
|
902
|
-
```
|
|
903
|
-
|
|
904
|
-
## Review Format
|
|
905
|
-
Provide a structured review with:
|
|
906
|
-
1. **Summary** - Brief overview
|
|
907
|
-
2. **Issues** - List any bugs, security issues, or problems
|
|
908
|
-
3. **Suggestions** - Improvements and best practices
|
|
909
|
-
4. **Score** - Rate the code quality (1-10)
|
|
910
|
-
|
|
911
|
-
Be specific and actionable.
|
|
912
|
-
"""
|
|
913
|
-
|
|
914
|
-
full_response = f"🔍 **Reviewing {file_path}...**\n\n"
|
|
915
|
-
|
|
916
|
-
for chunk in self.llm.stream(prompt):
|
|
917
|
-
full_response += chunk
|
|
918
|
-
yield (chunk, None)
|
|
919
|
-
|
|
920
|
-
response = AgentResponse(
|
|
921
|
-
message=full_response,
|
|
922
|
-
intent=intent,
|
|
923
|
-
)
|
|
924
|
-
yield ("", response)
|
|
925
|
-
|
|
926
|
-
def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
927
|
-
"""Handle code generation with streaming."""
|
|
928
|
-
yield ("🔨 **Generating code...**\n\n", None)
|
|
929
|
-
|
|
930
|
-
request = CodeGenerationRequest(
|
|
931
|
-
description=message,
|
|
932
|
-
language=intent.language,
|
|
933
|
-
file_path=intent.file_paths[0] if intent.file_paths else None,
|
|
934
|
-
)
|
|
935
|
-
|
|
936
|
-
# Generate code (this part streams)
|
|
937
|
-
full_code = ""
|
|
938
|
-
prompt = self.code_generator._build_prompt(request)
|
|
939
|
-
|
|
940
|
-
for chunk in self.llm.stream(prompt):
|
|
941
|
-
full_code += chunk
|
|
942
|
-
yield (chunk, None)
|
|
943
|
-
|
|
944
|
-
# Extract and create changeset
|
|
945
|
-
code = self._extract_code(full_code)
|
|
946
|
-
file_path = request.file_path or f"generated.{request.language or 'py'}"
|
|
947
|
-
|
|
948
|
-
generated = GeneratedCode(
|
|
949
|
-
code=code,
|
|
950
|
-
language=request.language or "python",
|
|
951
|
-
file_path=file_path,
|
|
952
|
-
description=request.description,
|
|
953
|
-
)
|
|
954
|
-
|
|
955
|
-
changeset = ChangeSet(description=f"Generate: {message[:50]}...")
|
|
956
|
-
diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
|
|
957
|
-
changeset.diffs.append(diff)
|
|
958
|
-
|
|
959
|
-
self._pending_changeset = changeset
|
|
960
|
-
|
|
961
|
-
response = AgentResponse(
|
|
962
|
-
message=f"\n\n✅ Code generated for {file_path}",
|
|
963
|
-
intent=intent,
|
|
964
|
-
generated_code=generated,
|
|
965
|
-
changeset=changeset,
|
|
966
|
-
requires_confirmation=True,
|
|
967
|
-
)
|
|
968
|
-
yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
|
|
969
|
-
|
|
970
|
-
def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
971
|
-
"""Handle code editing with streaming."""
|
|
972
|
-
if not intent.file_paths:
|
|
973
|
-
response = AgentResponse(
|
|
974
|
-
message="Please specify which file you want to edit.",
|
|
975
|
-
intent=intent,
|
|
976
|
-
)
|
|
977
|
-
yield (response.message, response)
|
|
978
|
-
return
|
|
979
|
-
|
|
980
|
-
file_path = intent.file_paths[0]
|
|
981
|
-
original = self.file_manager.read_file(file_path)
|
|
982
|
-
|
|
983
|
-
if not original:
|
|
984
|
-
response = AgentResponse(
|
|
985
|
-
message=f"Cannot find file: {file_path}",
|
|
986
|
-
intent=intent,
|
|
987
|
-
)
|
|
988
|
-
yield (response.message, response)
|
|
989
|
-
return
|
|
990
|
-
|
|
991
|
-
yield (f"✏️ **Editing {file_path}...**\n\n", None)
|
|
992
|
-
|
|
993
|
-
prompt = f"""Edit the following code according to the user's request.
|
|
994
|
-
|
|
995
|
-
## Original Code ({file_path})
|
|
996
|
-
```
|
|
997
|
-
{original[:5000]}
|
|
998
|
-
```
|
|
999
|
-
|
|
1000
|
-
## User Request
|
|
1001
|
-
{message}
|
|
1002
|
-
|
|
1003
|
-
## Instructions
|
|
1004
|
-
Return the COMPLETE modified file.
|
|
1005
|
-
|
|
1006
|
-
```
|
|
1007
|
-
"""
|
|
1008
|
-
|
|
1009
|
-
full_response = ""
|
|
1010
|
-
for chunk in self.llm.stream(prompt):
|
|
1011
|
-
full_response += chunk
|
|
1012
|
-
yield (chunk, None)
|
|
1013
|
-
|
|
1014
|
-
new_code = self._extract_code(full_response)
|
|
1015
|
-
|
|
1016
|
-
changeset = ChangeSet(description=f"Edit: {message[:50]}...")
|
|
1017
|
-
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
1018
|
-
changeset.diffs.append(diff)
|
|
1019
|
-
|
|
1020
|
-
self._pending_changeset = changeset
|
|
1021
|
-
|
|
1022
|
-
response = AgentResponse(
|
|
1023
|
-
message=f"\n\n✅ Edit complete for {file_path}",
|
|
1024
|
-
intent=intent,
|
|
1025
|
-
changeset=changeset,
|
|
1026
|
-
requires_confirmation=True,
|
|
1027
|
-
)
|
|
1028
|
-
yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
|
|
1029
|
-
|
|
1030
|
-
def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1031
|
-
"""Handle code refactoring with streaming."""
|
|
1032
|
-
if not intent.file_paths:
|
|
1033
|
-
response = AgentResponse(
|
|
1034
|
-
message="Please specify which file you want to refactor.",
|
|
1035
|
-
intent=intent,
|
|
1036
|
-
)
|
|
1037
|
-
yield (response.message, response)
|
|
1038
|
-
return
|
|
1039
|
-
|
|
1040
|
-
file_path = intent.file_paths[0]
|
|
1041
|
-
original = self.file_manager.read_file(file_path)
|
|
1042
|
-
|
|
1043
|
-
if not original:
|
|
1044
|
-
response = AgentResponse(
|
|
1045
|
-
message=f"Cannot find file: {file_path}",
|
|
1046
|
-
intent=intent,
|
|
1047
|
-
)
|
|
1048
|
-
yield (response.message, response)
|
|
1049
|
-
return
|
|
1050
|
-
|
|
1051
|
-
yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
|
|
1052
|
-
|
|
1053
|
-
prompt = f"""Refactor the following code to improve its quality.
|
|
1054
|
-
|
|
1055
|
-
## Original Code ({file_path})
|
|
1056
|
-
```
|
|
1057
|
-
{original[:5000]}
|
|
1058
|
-
```
|
|
1059
|
-
|
|
1060
|
-
## User Request
|
|
1061
|
-
{message}
|
|
1062
|
-
|
|
1063
|
-
Return the COMPLETE refactored file.
|
|
1064
|
-
|
|
1065
|
-
```
|
|
1066
|
-
"""
|
|
1067
|
-
|
|
1068
|
-
full_response = ""
|
|
1069
|
-
for chunk in self.llm.stream(prompt):
|
|
1070
|
-
full_response += chunk
|
|
1071
|
-
yield (chunk, None)
|
|
1072
|
-
|
|
1073
|
-
new_code = self._extract_code(full_response)
|
|
1074
|
-
|
|
1075
|
-
changeset = ChangeSet(description=f"Refactor: {file_path}")
|
|
1076
|
-
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
1077
|
-
changeset.diffs.append(diff)
|
|
1078
|
-
|
|
1079
|
-
self._pending_changeset = changeset
|
|
1080
|
-
|
|
1081
|
-
response = AgentResponse(
|
|
1082
|
-
message=f"\n\n✅ Refactoring complete for {file_path}",
|
|
1083
|
-
intent=intent,
|
|
1084
|
-
changeset=changeset,
|
|
1085
|
-
requires_confirmation=True,
|
|
1086
|
-
)
|
|
1087
|
-
yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
|
|
1088
|
-
|
|
1089
|
-
def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1090
|
-
"""Handle general chat with streaming."""
|
|
1091
|
-
context = self.file_manager.get_project_context()
|
|
1092
|
-
|
|
1093
|
-
prompt = f"""You are a helpful coding assistant. Answer the user's question.
|
|
1094
|
-
|
|
1095
|
-
Project context:
|
|
1096
|
-
- Root: {context.root_path.name}
|
|
1097
|
-
- Languages: {', '.join(context.languages)}
|
|
1098
|
-
- Files: {context.total_code_files} code files
|
|
1099
|
-
|
|
1100
|
-
User: {message}
|
|
1101
|
-
|
|
1102
|
-
Provide a helpful, concise response.
|
|
1103
|
-
"""
|
|
1104
|
-
|
|
1105
|
-
full_response = ""
|
|
1106
|
-
for chunk in self.llm.stream(prompt):
|
|
1107
|
-
full_response += chunk
|
|
1108
|
-
yield (chunk, None)
|
|
1109
|
-
|
|
1110
|
-
response = AgentResponse(message=full_response, intent=intent)
|
|
1111
|
-
yield ("", response)
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
811
|
def _extract_code(self, response: str) -> str:
|
|
1115
812
|
"""Extract code from LLM response."""
|
|
1116
813
|
import re
|
ai_code_assistant/cli.py
CHANGED
|
@@ -17,6 +17,7 @@ from ai_code_assistant.generator import CodeGenerator
|
|
|
17
17
|
from ai_code_assistant.chat import ChatSession
|
|
18
18
|
from ai_code_assistant.editor import FileEditor
|
|
19
19
|
from ai_code_assistant.utils import FileHandler, get_formatter
|
|
20
|
+
from ai_code_assistant.context import ContextSelector, ContextConfig
|
|
20
21
|
|
|
21
22
|
console = Console()
|
|
22
23
|
|
|
@@ -65,6 +66,59 @@ def get_components(config_path: Optional[Path] = None):
|
|
|
65
66
|
return config, llm
|
|
66
67
|
|
|
67
68
|
|
|
69
|
+
def validate_llm_connection(llm: LLMManager, exit_on_failure: bool = True) -> bool:
|
|
70
|
+
"""
|
|
71
|
+
Validate LLM connection before operations.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
llm: The LLM manager instance
|
|
75
|
+
exit_on_failure: If True, exit with error code on failure
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
True if connection is valid, False otherwise
|
|
79
|
+
"""
|
|
80
|
+
try:
|
|
81
|
+
info = llm.get_model_info()
|
|
82
|
+
provider = info.get("provider", "ollama")
|
|
83
|
+
|
|
84
|
+
# Quick validation - don't do full connection check for cloud providers with API keys
|
|
85
|
+
if provider != "ollama" and llm.config.llm.api_key:
|
|
86
|
+
return True
|
|
87
|
+
|
|
88
|
+
# For Ollama, check connection
|
|
89
|
+
if provider == "ollama":
|
|
90
|
+
# Try a lightweight check first
|
|
91
|
+
import socket
|
|
92
|
+
base_url = info.get("base_url", "http://localhost:11434")
|
|
93
|
+
host = base_url.replace("http://", "").replace("https://", "").split(":")[0]
|
|
94
|
+
port = int(base_url.split(":")[-1].split("/")[0]) if ":" in base_url else 11434
|
|
95
|
+
|
|
96
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
97
|
+
sock.settimeout(2)
|
|
98
|
+
result = sock.connect_ex((host, port))
|
|
99
|
+
sock.close()
|
|
100
|
+
|
|
101
|
+
if result != 0:
|
|
102
|
+
console.print(f"\n[red]Error: Cannot connect to Ollama at {base_url}[/red]")
|
|
103
|
+
console.print("\n[yellow]Quick fix:[/yellow]")
|
|
104
|
+
console.print(" 1. Make sure Ollama is installed: [cyan]https://ollama.ai[/cyan]")
|
|
105
|
+
console.print(f" 2. Pull the model: [cyan]ollama pull {info.get('model', 'deepseek-coder:6.7b')}[/cyan]")
|
|
106
|
+
console.print(" 3. Start Ollama: [cyan]ollama serve[/cyan]")
|
|
107
|
+
console.print("\n[dim]Or use a cloud provider: cognify status --help[/dim]")
|
|
108
|
+
|
|
109
|
+
if exit_on_failure:
|
|
110
|
+
sys.exit(1)
|
|
111
|
+
return False
|
|
112
|
+
|
|
113
|
+
return True
|
|
114
|
+
|
|
115
|
+
except Exception as e:
|
|
116
|
+
console.print(f"\n[red]Error validating LLM connection: {e}[/red]")
|
|
117
|
+
if exit_on_failure:
|
|
118
|
+
sys.exit(1)
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
|
|
68
122
|
@click.group(invoke_without_command=True)
|
|
69
123
|
@click.version_option(version=__version__, prog_name="cognify")
|
|
70
124
|
@click.option("--config", "-c", type=click.Path(exists=True, path_type=Path), help="Config file path")
|
|
@@ -89,15 +143,21 @@ def main(ctx, config: Optional[Path], verbose: bool):
|
|
|
89
143
|
type=click.Choice(["console", "markdown", "json"]), help="Output format")
|
|
90
144
|
@click.option("--output", "-o", type=click.Path(path_type=Path), help="Output file path")
|
|
91
145
|
@click.option("--recursive", "-r", is_flag=True, help="Recursively review directories")
|
|
146
|
+
@click.option("--context", multiple=True, type=click.Path(exists=True, path_type=Path),
|
|
147
|
+
help="Additional context files to include")
|
|
148
|
+
@click.option("--auto-context", is_flag=True, help="Automatically include related files as context")
|
|
149
|
+
@click.option("--max-context-tokens", type=int, default=8000, help="Max tokens for context")
|
|
92
150
|
@click.pass_context
|
|
93
151
|
def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
|
|
94
|
-
output: Optional[Path], recursive: bool
|
|
152
|
+
output: Optional[Path], recursive: bool, context: Tuple[Path, ...],
|
|
153
|
+
auto_context: bool, max_context_tokens: int):
|
|
95
154
|
"""Review code files for issues and improvements."""
|
|
96
155
|
if not files:
|
|
97
156
|
console.print("[red]Error:[/red] No files specified")
|
|
98
157
|
sys.exit(1)
|
|
99
158
|
|
|
100
159
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
160
|
+
validate_llm_connection(llm)
|
|
101
161
|
analyzer = CodeAnalyzer(config, llm)
|
|
102
162
|
file_handler = FileHandler(config)
|
|
103
163
|
formatter = get_formatter(output_format, config.output.use_colors)
|
|
@@ -157,16 +217,22 @@ def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
|
|
|
157
217
|
@click.option("--source", "-s", type=click.Path(exists=True, path_type=Path),
|
|
158
218
|
help="Source file (for test mode)")
|
|
159
219
|
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
220
|
+
@click.option("--context", multiple=True, type=click.Path(exists=True, path_type=Path),
|
|
221
|
+
help="Context files to include for better generation")
|
|
222
|
+
@click.option("--auto-context", is_flag=True, help="Automatically find relevant context files")
|
|
223
|
+
@click.option("--max-context-tokens", type=int, default=8000, help="Max tokens for context")
|
|
160
224
|
@click.pass_context
|
|
161
225
|
def generate(ctx, description: str, mode: str, language: str, name: Optional[str],
|
|
162
226
|
params: Optional[str], output: Optional[Path], output_format: str,
|
|
163
|
-
source: Optional[Path], stream: bool
|
|
227
|
+
source: Optional[Path], stream: bool, context: Tuple[Path, ...],
|
|
228
|
+
auto_context: bool, max_context_tokens: int):
|
|
164
229
|
"""Generate code from natural language description."""
|
|
165
230
|
from rich.live import Live
|
|
166
231
|
from rich.markdown import Markdown
|
|
167
232
|
from rich.panel import Panel
|
|
168
233
|
|
|
169
234
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
235
|
+
validate_llm_connection(llm)
|
|
170
236
|
generator = CodeGenerator(config, llm)
|
|
171
237
|
formatter = get_formatter(output_format, config.output.use_colors)
|
|
172
238
|
|
|
@@ -256,6 +322,7 @@ def generate(ctx, description: str, mode: str, language: str, name: Optional[str
|
|
|
256
322
|
def chat(ctx, context: Tuple[Path, ...], stream: bool):
|
|
257
323
|
"""Start an interactive chat session about code."""
|
|
258
324
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
325
|
+
validate_llm_connection(llm)
|
|
259
326
|
session = ChatSession(config, llm)
|
|
260
327
|
|
|
261
328
|
# Load context files
|
|
@@ -528,10 +595,15 @@ def search(ctx, query: str, top_k: int, file_filter: Optional[str],
|
|
|
528
595
|
type=click.Choice(["console", "json"]), help="Output format")
|
|
529
596
|
@click.option("--start-line", "-s", type=int, help="Start line for targeted edit")
|
|
530
597
|
@click.option("--end-line", "-e", type=int, help="End line for targeted edit")
|
|
598
|
+
@click.option("--context", multiple=True, type=click.Path(exists=True, path_type=Path),
|
|
599
|
+
help="Additional context files to include")
|
|
600
|
+
@click.option("--auto-context", is_flag=True, help="Automatically include related files as context")
|
|
601
|
+
@click.option("--max-context-tokens", type=int, default=8000, help="Max tokens for context")
|
|
531
602
|
@click.pass_context
|
|
532
603
|
def edit(ctx, file: Path, instruction: str, mode: str, preview: bool,
|
|
533
604
|
no_backup: bool, output_format: str, start_line: Optional[int],
|
|
534
|
-
end_line: Optional[int]
|
|
605
|
+
end_line: Optional[int], context: Tuple[Path, ...], auto_context: bool,
|
|
606
|
+
max_context_tokens: int):
|
|
535
607
|
"""Edit a file using AI based on natural language instructions.
|
|
536
608
|
|
|
537
609
|
Examples:
|
|
@@ -542,6 +614,7 @@ def edit(ctx, file: Path, instruction: str, mode: str, preview: bool,
|
|
|
542
614
|
ai-assist edit config.py "Update the timeout value" -s 10 -e 20
|
|
543
615
|
"""
|
|
544
616
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
617
|
+
validate_llm_connection(llm)
|
|
545
618
|
editor = FileEditor(config, llm)
|
|
546
619
|
|
|
547
620
|
# Determine edit mode
|
|
@@ -631,6 +704,7 @@ def refactor(ctx, instruction: str, files: Tuple[Path, ...], pattern: Optional[s
|
|
|
631
704
|
from ai_code_assistant.utils import FileHandler
|
|
632
705
|
|
|
633
706
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
707
|
+
validate_llm_connection(llm)
|
|
634
708
|
editor = MultiFileEditor(config, llm)
|
|
635
709
|
file_handler = FileHandler(config)
|
|
636
710
|
|
|
@@ -772,6 +846,7 @@ def rename(ctx, old_name: str, new_name: str, symbol_type: str, files: Tuple[Pat
|
|
|
772
846
|
from ai_code_assistant.utils import FileHandler
|
|
773
847
|
|
|
774
848
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
849
|
+
validate_llm_connection(llm)
|
|
775
850
|
editor = MultiFileEditor(config, llm)
|
|
776
851
|
file_handler = FileHandler(config)
|
|
777
852
|
|
|
@@ -1166,6 +1241,7 @@ def git_commit(ctx, message: Optional[str], stage_all: bool, push_after: bool, n
|
|
|
1166
1241
|
from ai_code_assistant.git import GitManager, CommitMessageGenerator
|
|
1167
1242
|
|
|
1168
1243
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1244
|
+
validate_llm_connection(llm)
|
|
1169
1245
|
|
|
1170
1246
|
try:
|
|
1171
1247
|
git_mgr = GitManager()
|
|
@@ -1299,6 +1375,7 @@ def git_sync(ctx, message: Optional[str], no_confirm: bool):
|
|
|
1299
1375
|
from ai_code_assistant.git import GitManager, CommitMessageGenerator
|
|
1300
1376
|
|
|
1301
1377
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1378
|
+
validate_llm_connection(llm)
|
|
1302
1379
|
|
|
1303
1380
|
try:
|
|
1304
1381
|
git_mgr = GitManager()
|
|
@@ -1435,9 +1512,10 @@ def agent(ctx, path: Path):
|
|
|
1435
1512
|
from ai_code_assistant.chat import AgentChatSession
|
|
1436
1513
|
from rich.markdown import Markdown
|
|
1437
1514
|
from rich.prompt import Prompt
|
|
1438
|
-
|
|
1515
|
+
|
|
1439
1516
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1440
|
-
|
|
1517
|
+
validate_llm_connection(llm)
|
|
1518
|
+
|
|
1441
1519
|
# Initialize agent session
|
|
1442
1520
|
session = AgentChatSession(config, llm, path.resolve())
|
|
1443
1521
|
|
|
@@ -1575,10 +1653,11 @@ def agent_review(ctx, file: Path, path: Path, stream: bool):
|
|
|
1575
1653
|
ai-assist agent-review main.py --no-stream
|
|
1576
1654
|
"""
|
|
1577
1655
|
from ai_code_assistant.agent import CodeAgent
|
|
1578
|
-
|
|
1656
|
+
|
|
1579
1657
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1658
|
+
validate_llm_connection(llm)
|
|
1580
1659
|
agent = CodeAgent(llm, path.resolve())
|
|
1581
|
-
|
|
1660
|
+
|
|
1582
1661
|
console.print(f"\n[bold]Reviewing {file}...[/bold]\n")
|
|
1583
1662
|
|
|
1584
1663
|
if stream:
|
|
@@ -1612,10 +1691,11 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
|
|
|
1612
1691
|
ai-assist agent-generate "hello world" --no-stream
|
|
1613
1692
|
"""
|
|
1614
1693
|
from ai_code_assistant.agent import CodeAgent
|
|
1615
|
-
|
|
1694
|
+
|
|
1616
1695
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1696
|
+
validate_llm_connection(llm)
|
|
1617
1697
|
agent = CodeAgent(llm, path.resolve())
|
|
1618
|
-
|
|
1698
|
+
|
|
1619
1699
|
# Build the request
|
|
1620
1700
|
request = description
|
|
1621
1701
|
if file:
|
|
@@ -1665,10 +1745,11 @@ def agent_explain(ctx, file: Path, path: Path, stream: bool):
|
|
|
1665
1745
|
ai-assist agent-explain main.py --no-stream
|
|
1666
1746
|
"""
|
|
1667
1747
|
from ai_code_assistant.agent import CodeAgent
|
|
1668
|
-
|
|
1748
|
+
|
|
1669
1749
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1750
|
+
validate_llm_connection(llm)
|
|
1670
1751
|
agent = CodeAgent(llm, path.resolve())
|
|
1671
|
-
|
|
1752
|
+
|
|
1672
1753
|
console.print(f"\n[bold]Explaining {file}...[/bold]\n")
|
|
1673
1754
|
|
|
1674
1755
|
if stream:
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Context-aware codebase understanding module."""
|
|
2
|
+
|
|
3
|
+
from .analyzer import ContextAnalyzer, FileContext
|
|
4
|
+
from .selector import ContextSelector, ContextConfig, ContextResult
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"ContextAnalyzer",
|
|
8
|
+
"ContextSelector",
|
|
9
|
+
"ContextConfig",
|
|
10
|
+
"ContextResult",
|
|
11
|
+
"FileContext",
|
|
12
|
+
]
|