cognify-code 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/agent/code_agent.py +696 -2
- ai_code_assistant/chat/agent_session.py +91 -1
- ai_code_assistant/cli.py +73 -34
- ai_code_assistant/context/__init__.py +12 -0
- ai_code_assistant/context/analyzer.py +363 -0
- ai_code_assistant/context/selector.py +309 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/METADATA +1 -1
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/RECORD +12 -9
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/WHEEL +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/entry_points.txt +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -2,13 +2,14 @@
|
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Callable, List, Optional, Tuple
|
|
5
|
+
from typing import Callable, Iterator, List, Optional, Tuple
|
|
6
6
|
|
|
7
7
|
from ai_code_assistant.agent.file_manager import FileContextManager
|
|
8
8
|
from ai_code_assistant.agent.intent_classifier import IntentClassifier, Intent, IntentType
|
|
9
9
|
from ai_code_assistant.agent.code_generator import CodeGenerator, CodeGenerationRequest, GeneratedCode
|
|
10
10
|
from ai_code_assistant.agent.diff_engine import DiffEngine, ChangeSet, FileDiff
|
|
11
11
|
from ai_code_assistant.agent.code_reviewer import CodeReviewer, ReviewResult
|
|
12
|
+
from ai_code_assistant.context import ContextSelector, ContextConfig
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@dataclass
|
|
@@ -30,7 +31,7 @@ class AgentResponse:
|
|
|
30
31
|
class CodeAgent:
|
|
31
32
|
"""Main agent that orchestrates code operations based on user intent."""
|
|
32
33
|
|
|
33
|
-
def __init__(self, llm_manager, root_path: Optional[Path] = None):
|
|
34
|
+
def __init__(self, llm_manager, root_path: Optional[Path] = None, auto_context: bool = True):
|
|
34
35
|
self.llm = llm_manager
|
|
35
36
|
self.file_manager = FileContextManager(root_path)
|
|
36
37
|
self.intent_classifier = IntentClassifier(llm_manager)
|
|
@@ -38,6 +39,13 @@ class CodeAgent:
|
|
|
38
39
|
self.diff_engine = DiffEngine(self.file_manager)
|
|
39
40
|
self.code_reviewer = CodeReviewer(llm_manager, self.file_manager)
|
|
40
41
|
|
|
42
|
+
# Context selector for smart context gathering
|
|
43
|
+
self.auto_context = auto_context
|
|
44
|
+
self.context_selector = ContextSelector(
|
|
45
|
+
root_path=root_path,
|
|
46
|
+
config=ContextConfig(max_tokens=8000, max_files=10)
|
|
47
|
+
)
|
|
48
|
+
|
|
41
49
|
# Pending changes awaiting confirmation
|
|
42
50
|
self._pending_changeset: Optional[ChangeSet] = None
|
|
43
51
|
|
|
@@ -84,6 +92,28 @@ class CodeAgent:
|
|
|
84
92
|
self._pending_changeset = None
|
|
85
93
|
return "Changes discarded."
|
|
86
94
|
|
|
95
|
+
def _get_relevant_context(self, query: str, file_path: Optional[Path] = None) -> str:
|
|
96
|
+
"""Get relevant context for a query using the context selector."""
|
|
97
|
+
if not self.auto_context:
|
|
98
|
+
return ""
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
# Get context based on query and optional file path
|
|
102
|
+
target_file = str(file_path) if file_path else None
|
|
103
|
+
context_result = self.context_selector.select_for_query(
|
|
104
|
+
query=query,
|
|
105
|
+
target_file=target_file
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
if not context_result.files:
|
|
109
|
+
return ""
|
|
110
|
+
|
|
111
|
+
# Use the built-in formatting
|
|
112
|
+
return context_result.format_for_prompt(include_summary=False)
|
|
113
|
+
except Exception:
|
|
114
|
+
# If context gathering fails, continue without it
|
|
115
|
+
return ""
|
|
116
|
+
|
|
87
117
|
def _handle_generate(self, message: str, intent: Intent) -> AgentResponse:
|
|
88
118
|
"""Handle code generation requests."""
|
|
89
119
|
request = CodeGenerationRequest(
|
|
@@ -447,6 +477,670 @@ Provide a helpful, concise response. If the question is about code, you can sugg
|
|
|
447
477
|
|
|
448
478
|
return AgentResponse(message=response, intent=intent)
|
|
449
479
|
|
|
480
|
+
|
|
481
|
+
def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
482
|
+
"""
|
|
483
|
+
Process a user message with streaming output.
|
|
484
|
+
|
|
485
|
+
Yields tuples of (chunk, final_response).
|
|
486
|
+
During streaming, final_response is None.
|
|
487
|
+
The last yield will have the complete AgentResponse.
|
|
488
|
+
"""
|
|
489
|
+
# Classify intent (non-streaming, it's fast)
|
|
490
|
+
if use_llm_classification:
|
|
491
|
+
intent = self.intent_classifier.classify_with_llm(message)
|
|
492
|
+
else:
|
|
493
|
+
intent = self.intent_classifier.classify(message)
|
|
494
|
+
|
|
495
|
+
# Route to appropriate streaming handler
|
|
496
|
+
streaming_handlers = {
|
|
497
|
+
IntentType.CODE_GENERATE: self._handle_generate_stream,
|
|
498
|
+
IntentType.CODE_EDIT: self._handle_edit_stream,
|
|
499
|
+
IntentType.CODE_REVIEW: self._handle_review_stream,
|
|
500
|
+
IntentType.CODE_EXPLAIN: self._handle_explain_stream,
|
|
501
|
+
IntentType.CODE_REFACTOR: self._handle_refactor_stream,
|
|
502
|
+
IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
handler = streaming_handlers.get(intent.type)
|
|
506
|
+
|
|
507
|
+
if handler:
|
|
508
|
+
yield from handler(message, intent)
|
|
509
|
+
else:
|
|
510
|
+
# Fall back to non-streaming for other intents
|
|
511
|
+
response = self.process(message, use_llm_classification)
|
|
512
|
+
yield (response.message, response)
|
|
513
|
+
|
|
514
|
+
def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
515
|
+
"""Handle code explanation with streaming."""
|
|
516
|
+
if not intent.file_paths:
|
|
517
|
+
response = AgentResponse(
|
|
518
|
+
message="Please specify which file or code you want me to explain.",
|
|
519
|
+
intent=intent,
|
|
520
|
+
)
|
|
521
|
+
yield (response.message, response)
|
|
522
|
+
return
|
|
523
|
+
|
|
524
|
+
file_path = intent.file_paths[0]
|
|
525
|
+
content = self.file_manager.read_file(file_path)
|
|
526
|
+
|
|
527
|
+
if not content:
|
|
528
|
+
response = AgentResponse(
|
|
529
|
+
message=f"Cannot find file: {file_path}",
|
|
530
|
+
intent=intent,
|
|
531
|
+
)
|
|
532
|
+
yield (response.message, response)
|
|
533
|
+
return
|
|
534
|
+
|
|
535
|
+
prompt = f"""Explain the following code in a clear, educational way.
|
|
536
|
+
|
|
537
|
+
## Code ({file_path})
|
|
538
|
+
```
|
|
539
|
+
{content[:5000]}
|
|
540
|
+
```
|
|
541
|
+
|
|
542
|
+
## Instructions
|
|
543
|
+
1. Start with a high-level overview
|
|
544
|
+
2. Explain the main components/functions
|
|
545
|
+
3. Describe the flow of execution
|
|
546
|
+
4. Note any important patterns or techniques used
|
|
547
|
+
5. Keep the explanation concise but thorough
|
|
548
|
+
"""
|
|
549
|
+
|
|
550
|
+
# Stream the explanation
|
|
551
|
+
full_response = f"📖 **Explanation of {file_path}**\n\n"
|
|
552
|
+
yield (f"📖 **Explanation of {file_path}**\n\n", None)
|
|
553
|
+
|
|
554
|
+
for chunk in self.llm.stream(prompt):
|
|
555
|
+
full_response += chunk
|
|
556
|
+
yield (chunk, None)
|
|
557
|
+
|
|
558
|
+
# Final response
|
|
559
|
+
response = AgentResponse(
|
|
560
|
+
message=full_response,
|
|
561
|
+
intent=intent,
|
|
562
|
+
)
|
|
563
|
+
yield ("", response)
|
|
564
|
+
|
|
565
|
+
def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
566
|
+
"""Handle code review with streaming."""
|
|
567
|
+
if not intent.file_paths:
|
|
568
|
+
context = self.file_manager.get_project_context()
|
|
569
|
+
py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
|
|
570
|
+
|
|
571
|
+
if py_files:
|
|
572
|
+
msg = f"Which file would you like me to review? Found these Python files:\n" + \
|
|
573
|
+
"\n".join(f" • {f}" for f in py_files)
|
|
574
|
+
else:
|
|
575
|
+
msg = "Please specify which file you want me to review."
|
|
576
|
+
|
|
577
|
+
response = AgentResponse(message=msg, intent=intent)
|
|
578
|
+
yield (msg, response)
|
|
579
|
+
return
|
|
580
|
+
|
|
581
|
+
file_path = intent.file_paths[0]
|
|
582
|
+
content = self.file_manager.read_file(file_path)
|
|
583
|
+
|
|
584
|
+
if not content:
|
|
585
|
+
response = AgentResponse(
|
|
586
|
+
message=f"Cannot find file: {file_path}",
|
|
587
|
+
intent=intent,
|
|
588
|
+
)
|
|
589
|
+
yield (response.message, response)
|
|
590
|
+
return
|
|
591
|
+
|
|
592
|
+
yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
|
|
593
|
+
|
|
594
|
+
# Use streaming for the review
|
|
595
|
+
prompt = f"""Review the following code for issues, bugs, and improvements.
|
|
596
|
+
|
|
597
|
+
## Code ({file_path})
|
|
598
|
+
```
|
|
599
|
+
{content[:5000]}
|
|
600
|
+
```
|
|
601
|
+
|
|
602
|
+
## Review Format
|
|
603
|
+
Provide a structured review with:
|
|
604
|
+
1. **Summary** - Brief overview
|
|
605
|
+
2. **Issues** - List any bugs, security issues, or problems
|
|
606
|
+
3. **Suggestions** - Improvements and best practices
|
|
607
|
+
4. **Score** - Rate the code quality (1-10)
|
|
608
|
+
|
|
609
|
+
Be specific and actionable.
|
|
610
|
+
"""
|
|
611
|
+
|
|
612
|
+
full_response = f"🔍 **Reviewing {file_path}...**\n\n"
|
|
613
|
+
|
|
614
|
+
for chunk in self.llm.stream(prompt):
|
|
615
|
+
full_response += chunk
|
|
616
|
+
yield (chunk, None)
|
|
617
|
+
|
|
618
|
+
response = AgentResponse(
|
|
619
|
+
message=full_response,
|
|
620
|
+
intent=intent,
|
|
621
|
+
)
|
|
622
|
+
yield ("", response)
|
|
623
|
+
|
|
624
|
+
def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
625
|
+
"""Handle code generation with streaming."""
|
|
626
|
+
yield ("🔨 **Generating code...**\n\n", None)
|
|
627
|
+
|
|
628
|
+
request = CodeGenerationRequest(
|
|
629
|
+
description=message,
|
|
630
|
+
language=intent.language,
|
|
631
|
+
file_path=intent.file_paths[0] if intent.file_paths else None,
|
|
632
|
+
)
|
|
633
|
+
|
|
634
|
+
# Generate code (this part streams)
|
|
635
|
+
full_code = ""
|
|
636
|
+
prompt = self.code_generator._build_prompt(request)
|
|
637
|
+
|
|
638
|
+
for chunk in self.llm.stream(prompt):
|
|
639
|
+
full_code += chunk
|
|
640
|
+
yield (chunk, None)
|
|
641
|
+
|
|
642
|
+
# Extract and create changeset
|
|
643
|
+
code = self._extract_code(full_code)
|
|
644
|
+
file_path = request.file_path or f"generated.{request.language or 'py'}"
|
|
645
|
+
|
|
646
|
+
generated = GeneratedCode(
|
|
647
|
+
code=code,
|
|
648
|
+
language=request.language or "python",
|
|
649
|
+
file_path=file_path,
|
|
650
|
+
description=request.description,
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
changeset = ChangeSet(description=f"Generate: {message[:50]}...")
|
|
654
|
+
diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
|
|
655
|
+
changeset.diffs.append(diff)
|
|
656
|
+
|
|
657
|
+
self._pending_changeset = changeset
|
|
658
|
+
|
|
659
|
+
response = AgentResponse(
|
|
660
|
+
message=f"\n\n✅ Code generated for {file_path}",
|
|
661
|
+
intent=intent,
|
|
662
|
+
generated_code=generated,
|
|
663
|
+
changeset=changeset,
|
|
664
|
+
requires_confirmation=True,
|
|
665
|
+
)
|
|
666
|
+
yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
|
|
667
|
+
|
|
668
|
+
def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
669
|
+
"""Handle code editing with streaming."""
|
|
670
|
+
if not intent.file_paths:
|
|
671
|
+
response = AgentResponse(
|
|
672
|
+
message="Please specify which file you want to edit.",
|
|
673
|
+
intent=intent,
|
|
674
|
+
)
|
|
675
|
+
yield (response.message, response)
|
|
676
|
+
return
|
|
677
|
+
|
|
678
|
+
file_path = intent.file_paths[0]
|
|
679
|
+
original = self.file_manager.read_file(file_path)
|
|
680
|
+
|
|
681
|
+
if not original:
|
|
682
|
+
response = AgentResponse(
|
|
683
|
+
message=f"Cannot find file: {file_path}",
|
|
684
|
+
intent=intent,
|
|
685
|
+
)
|
|
686
|
+
yield (response.message, response)
|
|
687
|
+
return
|
|
688
|
+
|
|
689
|
+
yield (f"✏️ **Editing {file_path}...**\n\n", None)
|
|
690
|
+
|
|
691
|
+
prompt = f"""Edit the following code according to the user's request.
|
|
692
|
+
|
|
693
|
+
## Original Code ({file_path})
|
|
694
|
+
```
|
|
695
|
+
{original[:5000]}
|
|
696
|
+
```
|
|
697
|
+
|
|
698
|
+
## User Request
|
|
699
|
+
{message}
|
|
700
|
+
|
|
701
|
+
## Instructions
|
|
702
|
+
Return the COMPLETE modified file.
|
|
703
|
+
|
|
704
|
+
```
|
|
705
|
+
"""
|
|
706
|
+
|
|
707
|
+
full_response = ""
|
|
708
|
+
for chunk in self.llm.stream(prompt):
|
|
709
|
+
full_response += chunk
|
|
710
|
+
yield (chunk, None)
|
|
711
|
+
|
|
712
|
+
new_code = self._extract_code(full_response)
|
|
713
|
+
|
|
714
|
+
changeset = ChangeSet(description=f"Edit: {message[:50]}...")
|
|
715
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
716
|
+
changeset.diffs.append(diff)
|
|
717
|
+
|
|
718
|
+
self._pending_changeset = changeset
|
|
719
|
+
|
|
720
|
+
response = AgentResponse(
|
|
721
|
+
message=f"\n\n✅ Edit complete for {file_path}",
|
|
722
|
+
intent=intent,
|
|
723
|
+
changeset=changeset,
|
|
724
|
+
requires_confirmation=True,
|
|
725
|
+
)
|
|
726
|
+
yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
|
|
727
|
+
|
|
728
|
+
def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
729
|
+
"""Handle code refactoring with streaming."""
|
|
730
|
+
if not intent.file_paths:
|
|
731
|
+
response = AgentResponse(
|
|
732
|
+
message="Please specify which file you want to refactor.",
|
|
733
|
+
intent=intent,
|
|
734
|
+
)
|
|
735
|
+
yield (response.message, response)
|
|
736
|
+
return
|
|
737
|
+
|
|
738
|
+
file_path = intent.file_paths[0]
|
|
739
|
+
original = self.file_manager.read_file(file_path)
|
|
740
|
+
|
|
741
|
+
if not original:
|
|
742
|
+
response = AgentResponse(
|
|
743
|
+
message=f"Cannot find file: {file_path}",
|
|
744
|
+
intent=intent,
|
|
745
|
+
)
|
|
746
|
+
yield (response.message, response)
|
|
747
|
+
return
|
|
748
|
+
|
|
749
|
+
yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
|
|
750
|
+
|
|
751
|
+
prompt = f"""Refactor the following code to improve its quality.
|
|
752
|
+
|
|
753
|
+
## Original Code ({file_path})
|
|
754
|
+
```
|
|
755
|
+
{original[:5000]}
|
|
756
|
+
```
|
|
757
|
+
|
|
758
|
+
## User Request
|
|
759
|
+
{message}
|
|
760
|
+
|
|
761
|
+
Return the COMPLETE refactored file.
|
|
762
|
+
|
|
763
|
+
```
|
|
764
|
+
"""
|
|
765
|
+
|
|
766
|
+
full_response = ""
|
|
767
|
+
for chunk in self.llm.stream(prompt):
|
|
768
|
+
full_response += chunk
|
|
769
|
+
yield (chunk, None)
|
|
770
|
+
|
|
771
|
+
new_code = self._extract_code(full_response)
|
|
772
|
+
|
|
773
|
+
changeset = ChangeSet(description=f"Refactor: {file_path}")
|
|
774
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
775
|
+
changeset.diffs.append(diff)
|
|
776
|
+
|
|
777
|
+
self._pending_changeset = changeset
|
|
778
|
+
|
|
779
|
+
response = AgentResponse(
|
|
780
|
+
message=f"\n\n✅ Refactoring complete for {file_path}",
|
|
781
|
+
intent=intent,
|
|
782
|
+
changeset=changeset,
|
|
783
|
+
requires_confirmation=True,
|
|
784
|
+
)
|
|
785
|
+
yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
|
|
786
|
+
|
|
787
|
+
def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
788
|
+
"""Handle general chat with streaming."""
|
|
789
|
+
context = self.file_manager.get_project_context()
|
|
790
|
+
|
|
791
|
+
prompt = f"""You are a helpful coding assistant. Answer the user's question.
|
|
792
|
+
|
|
793
|
+
Project context:
|
|
794
|
+
- Root: {context.root_path.name}
|
|
795
|
+
- Languages: {', '.join(context.languages)}
|
|
796
|
+
- Files: {context.total_code_files} code files
|
|
797
|
+
|
|
798
|
+
User: {message}
|
|
799
|
+
|
|
800
|
+
Provide a helpful, concise response.
|
|
801
|
+
"""
|
|
802
|
+
|
|
803
|
+
full_response = ""
|
|
804
|
+
for chunk in self.llm.stream(prompt):
|
|
805
|
+
full_response += chunk
|
|
806
|
+
yield (chunk, None)
|
|
807
|
+
|
|
808
|
+
response = AgentResponse(message=full_response, intent=intent)
|
|
809
|
+
yield ("", response)
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
814
|
+
"""
|
|
815
|
+
Process a user message with streaming output.
|
|
816
|
+
|
|
817
|
+
Yields tuples of (chunk, final_response).
|
|
818
|
+
During streaming, final_response is None.
|
|
819
|
+
The last yield will have the complete AgentResponse.
|
|
820
|
+
"""
|
|
821
|
+
# Classify intent (non-streaming, it's fast)
|
|
822
|
+
if use_llm_classification:
|
|
823
|
+
intent = self.intent_classifier.classify_with_llm(message)
|
|
824
|
+
else:
|
|
825
|
+
intent = self.intent_classifier.classify(message)
|
|
826
|
+
|
|
827
|
+
# Route to appropriate streaming handler
|
|
828
|
+
streaming_handlers = {
|
|
829
|
+
IntentType.CODE_GENERATE: self._handle_generate_stream,
|
|
830
|
+
IntentType.CODE_EDIT: self._handle_edit_stream,
|
|
831
|
+
IntentType.CODE_REVIEW: self._handle_review_stream,
|
|
832
|
+
IntentType.CODE_EXPLAIN: self._handle_explain_stream,
|
|
833
|
+
IntentType.CODE_REFACTOR: self._handle_refactor_stream,
|
|
834
|
+
IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
handler = streaming_handlers.get(intent.type)
|
|
838
|
+
|
|
839
|
+
if handler:
|
|
840
|
+
yield from handler(message, intent)
|
|
841
|
+
else:
|
|
842
|
+
# Fall back to non-streaming for other intents
|
|
843
|
+
response = self.process(message, use_llm_classification)
|
|
844
|
+
yield (response.message, response)
|
|
845
|
+
|
|
846
|
+
def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
847
|
+
"""Handle code explanation with streaming."""
|
|
848
|
+
if not intent.file_paths:
|
|
849
|
+
response = AgentResponse(
|
|
850
|
+
message="Please specify which file or code you want me to explain.",
|
|
851
|
+
intent=intent,
|
|
852
|
+
)
|
|
853
|
+
yield (response.message, response)
|
|
854
|
+
return
|
|
855
|
+
|
|
856
|
+
file_path = intent.file_paths[0]
|
|
857
|
+
content = self.file_manager.read_file(file_path)
|
|
858
|
+
|
|
859
|
+
if not content:
|
|
860
|
+
response = AgentResponse(
|
|
861
|
+
message=f"Cannot find file: {file_path}",
|
|
862
|
+
intent=intent,
|
|
863
|
+
)
|
|
864
|
+
yield (response.message, response)
|
|
865
|
+
return
|
|
866
|
+
|
|
867
|
+
prompt = f"""Explain the following code in a clear, educational way.
|
|
868
|
+
|
|
869
|
+
## Code ({file_path})
|
|
870
|
+
```
|
|
871
|
+
{content[:5000]}
|
|
872
|
+
```
|
|
873
|
+
|
|
874
|
+
## Instructions
|
|
875
|
+
1. Start with a high-level overview
|
|
876
|
+
2. Explain the main components/functions
|
|
877
|
+
3. Describe the flow of execution
|
|
878
|
+
4. Note any important patterns or techniques used
|
|
879
|
+
5. Keep the explanation concise but thorough
|
|
880
|
+
"""
|
|
881
|
+
|
|
882
|
+
# Stream the explanation
|
|
883
|
+
full_response = f"📖 **Explanation of {file_path}**\n\n"
|
|
884
|
+
yield (f"📖 **Explanation of {file_path}**\n\n", None)
|
|
885
|
+
|
|
886
|
+
for chunk in self.llm.stream(prompt):
|
|
887
|
+
full_response += chunk
|
|
888
|
+
yield (chunk, None)
|
|
889
|
+
|
|
890
|
+
# Final response
|
|
891
|
+
response = AgentResponse(
|
|
892
|
+
message=full_response,
|
|
893
|
+
intent=intent,
|
|
894
|
+
)
|
|
895
|
+
yield ("", response)
|
|
896
|
+
|
|
897
|
+
def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
898
|
+
"""Handle code review with streaming."""
|
|
899
|
+
if not intent.file_paths:
|
|
900
|
+
context = self.file_manager.get_project_context()
|
|
901
|
+
py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
|
|
902
|
+
|
|
903
|
+
if py_files:
|
|
904
|
+
msg = f"Which file would you like me to review? Found these Python files:\n" + \
|
|
905
|
+
"\n".join(f" • {f}" for f in py_files)
|
|
906
|
+
else:
|
|
907
|
+
msg = "Please specify which file you want me to review."
|
|
908
|
+
|
|
909
|
+
response = AgentResponse(message=msg, intent=intent)
|
|
910
|
+
yield (msg, response)
|
|
911
|
+
return
|
|
912
|
+
|
|
913
|
+
file_path = intent.file_paths[0]
|
|
914
|
+
content = self.file_manager.read_file(file_path)
|
|
915
|
+
|
|
916
|
+
if not content:
|
|
917
|
+
response = AgentResponse(
|
|
918
|
+
message=f"Cannot find file: {file_path}",
|
|
919
|
+
intent=intent,
|
|
920
|
+
)
|
|
921
|
+
yield (response.message, response)
|
|
922
|
+
return
|
|
923
|
+
|
|
924
|
+
yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
|
|
925
|
+
|
|
926
|
+
# Use streaming for the review
|
|
927
|
+
prompt = f"""Review the following code for issues, bugs, and improvements.
|
|
928
|
+
|
|
929
|
+
## Code ({file_path})
|
|
930
|
+
```
|
|
931
|
+
{content[:5000]}
|
|
932
|
+
```
|
|
933
|
+
|
|
934
|
+
## Review Format
|
|
935
|
+
Provide a structured review with:
|
|
936
|
+
1. **Summary** - Brief overview
|
|
937
|
+
2. **Issues** - List any bugs, security issues, or problems
|
|
938
|
+
3. **Suggestions** - Improvements and best practices
|
|
939
|
+
4. **Score** - Rate the code quality (1-10)
|
|
940
|
+
|
|
941
|
+
Be specific and actionable.
|
|
942
|
+
"""
|
|
943
|
+
|
|
944
|
+
full_response = f"🔍 **Reviewing {file_path}...**\n\n"
|
|
945
|
+
|
|
946
|
+
for chunk in self.llm.stream(prompt):
|
|
947
|
+
full_response += chunk
|
|
948
|
+
yield (chunk, None)
|
|
949
|
+
|
|
950
|
+
response = AgentResponse(
|
|
951
|
+
message=full_response,
|
|
952
|
+
intent=intent,
|
|
953
|
+
)
|
|
954
|
+
yield ("", response)
|
|
955
|
+
|
|
956
|
+
def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
957
|
+
"""Handle code generation with streaming."""
|
|
958
|
+
yield ("🔨 **Generating code...**\n\n", None)
|
|
959
|
+
|
|
960
|
+
request = CodeGenerationRequest(
|
|
961
|
+
description=message,
|
|
962
|
+
language=intent.language,
|
|
963
|
+
file_path=intent.file_paths[0] if intent.file_paths else None,
|
|
964
|
+
)
|
|
965
|
+
|
|
966
|
+
# Generate code (this part streams)
|
|
967
|
+
full_code = ""
|
|
968
|
+
prompt = self.code_generator._build_prompt(request)
|
|
969
|
+
|
|
970
|
+
for chunk in self.llm.stream(prompt):
|
|
971
|
+
full_code += chunk
|
|
972
|
+
yield (chunk, None)
|
|
973
|
+
|
|
974
|
+
# Extract and create changeset
|
|
975
|
+
code = self._extract_code(full_code)
|
|
976
|
+
file_path = request.file_path or f"generated.{request.language or 'py'}"
|
|
977
|
+
|
|
978
|
+
generated = GeneratedCode(
|
|
979
|
+
code=code,
|
|
980
|
+
language=request.language or "python",
|
|
981
|
+
file_path=file_path,
|
|
982
|
+
description=request.description,
|
|
983
|
+
)
|
|
984
|
+
|
|
985
|
+
changeset = ChangeSet(description=f"Generate: {message[:50]}...")
|
|
986
|
+
diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
|
|
987
|
+
changeset.diffs.append(diff)
|
|
988
|
+
|
|
989
|
+
self._pending_changeset = changeset
|
|
990
|
+
|
|
991
|
+
response = AgentResponse(
|
|
992
|
+
message=f"\n\n✅ Code generated for {file_path}",
|
|
993
|
+
intent=intent,
|
|
994
|
+
generated_code=generated,
|
|
995
|
+
changeset=changeset,
|
|
996
|
+
requires_confirmation=True,
|
|
997
|
+
)
|
|
998
|
+
yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
|
|
999
|
+
|
|
1000
|
+
def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1001
|
+
"""Handle code editing with streaming."""
|
|
1002
|
+
if not intent.file_paths:
|
|
1003
|
+
response = AgentResponse(
|
|
1004
|
+
message="Please specify which file you want to edit.",
|
|
1005
|
+
intent=intent,
|
|
1006
|
+
)
|
|
1007
|
+
yield (response.message, response)
|
|
1008
|
+
return
|
|
1009
|
+
|
|
1010
|
+
file_path = intent.file_paths[0]
|
|
1011
|
+
original = self.file_manager.read_file(file_path)
|
|
1012
|
+
|
|
1013
|
+
if not original:
|
|
1014
|
+
response = AgentResponse(
|
|
1015
|
+
message=f"Cannot find file: {file_path}",
|
|
1016
|
+
intent=intent,
|
|
1017
|
+
)
|
|
1018
|
+
yield (response.message, response)
|
|
1019
|
+
return
|
|
1020
|
+
|
|
1021
|
+
yield (f"✏️ **Editing {file_path}...**\n\n", None)
|
|
1022
|
+
|
|
1023
|
+
prompt = f"""Edit the following code according to the user's request.
|
|
1024
|
+
|
|
1025
|
+
## Original Code ({file_path})
|
|
1026
|
+
```
|
|
1027
|
+
{original[:5000]}
|
|
1028
|
+
```
|
|
1029
|
+
|
|
1030
|
+
## User Request
|
|
1031
|
+
{message}
|
|
1032
|
+
|
|
1033
|
+
## Instructions
|
|
1034
|
+
Return the COMPLETE modified file.
|
|
1035
|
+
|
|
1036
|
+
```
|
|
1037
|
+
"""
|
|
1038
|
+
|
|
1039
|
+
full_response = ""
|
|
1040
|
+
for chunk in self.llm.stream(prompt):
|
|
1041
|
+
full_response += chunk
|
|
1042
|
+
yield (chunk, None)
|
|
1043
|
+
|
|
1044
|
+
new_code = self._extract_code(full_response)
|
|
1045
|
+
|
|
1046
|
+
changeset = ChangeSet(description=f"Edit: {message[:50]}...")
|
|
1047
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
1048
|
+
changeset.diffs.append(diff)
|
|
1049
|
+
|
|
1050
|
+
self._pending_changeset = changeset
|
|
1051
|
+
|
|
1052
|
+
response = AgentResponse(
|
|
1053
|
+
message=f"\n\n✅ Edit complete for {file_path}",
|
|
1054
|
+
intent=intent,
|
|
1055
|
+
changeset=changeset,
|
|
1056
|
+
requires_confirmation=True,
|
|
1057
|
+
)
|
|
1058
|
+
yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
|
|
1059
|
+
|
|
1060
|
+
def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1061
|
+
"""Handle code refactoring with streaming."""
|
|
1062
|
+
if not intent.file_paths:
|
|
1063
|
+
response = AgentResponse(
|
|
1064
|
+
message="Please specify which file you want to refactor.",
|
|
1065
|
+
intent=intent,
|
|
1066
|
+
)
|
|
1067
|
+
yield (response.message, response)
|
|
1068
|
+
return
|
|
1069
|
+
|
|
1070
|
+
file_path = intent.file_paths[0]
|
|
1071
|
+
original = self.file_manager.read_file(file_path)
|
|
1072
|
+
|
|
1073
|
+
if not original:
|
|
1074
|
+
response = AgentResponse(
|
|
1075
|
+
message=f"Cannot find file: {file_path}",
|
|
1076
|
+
intent=intent,
|
|
1077
|
+
)
|
|
1078
|
+
yield (response.message, response)
|
|
1079
|
+
return
|
|
1080
|
+
|
|
1081
|
+
yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
|
|
1082
|
+
|
|
1083
|
+
prompt = f"""Refactor the following code to improve its quality.
|
|
1084
|
+
|
|
1085
|
+
## Original Code ({file_path})
|
|
1086
|
+
```
|
|
1087
|
+
{original[:5000]}
|
|
1088
|
+
```
|
|
1089
|
+
|
|
1090
|
+
## User Request
|
|
1091
|
+
{message}
|
|
1092
|
+
|
|
1093
|
+
Return the COMPLETE refactored file.
|
|
1094
|
+
|
|
1095
|
+
```
|
|
1096
|
+
"""
|
|
1097
|
+
|
|
1098
|
+
full_response = ""
|
|
1099
|
+
for chunk in self.llm.stream(prompt):
|
|
1100
|
+
full_response += chunk
|
|
1101
|
+
yield (chunk, None)
|
|
1102
|
+
|
|
1103
|
+
new_code = self._extract_code(full_response)
|
|
1104
|
+
|
|
1105
|
+
changeset = ChangeSet(description=f"Refactor: {file_path}")
|
|
1106
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
1107
|
+
changeset.diffs.append(diff)
|
|
1108
|
+
|
|
1109
|
+
self._pending_changeset = changeset
|
|
1110
|
+
|
|
1111
|
+
response = AgentResponse(
|
|
1112
|
+
message=f"\n\n✅ Refactoring complete for {file_path}",
|
|
1113
|
+
intent=intent,
|
|
1114
|
+
changeset=changeset,
|
|
1115
|
+
requires_confirmation=True,
|
|
1116
|
+
)
|
|
1117
|
+
yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
|
|
1118
|
+
|
|
1119
|
+
def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1120
|
+
"""Handle general chat with streaming."""
|
|
1121
|
+
context = self.file_manager.get_project_context()
|
|
1122
|
+
|
|
1123
|
+
prompt = f"""You are a helpful coding assistant. Answer the user's question.
|
|
1124
|
+
|
|
1125
|
+
Project context:
|
|
1126
|
+
- Root: {context.root_path.name}
|
|
1127
|
+
- Languages: {', '.join(context.languages)}
|
|
1128
|
+
- Files: {context.total_code_files} code files
|
|
1129
|
+
|
|
1130
|
+
User: {message}
|
|
1131
|
+
|
|
1132
|
+
Provide a helpful, concise response.
|
|
1133
|
+
"""
|
|
1134
|
+
|
|
1135
|
+
full_response = ""
|
|
1136
|
+
for chunk in self.llm.stream(prompt):
|
|
1137
|
+
full_response += chunk
|
|
1138
|
+
yield (chunk, None)
|
|
1139
|
+
|
|
1140
|
+
response = AgentResponse(message=full_response, intent=intent)
|
|
1141
|
+
yield ("", response)
|
|
1142
|
+
|
|
1143
|
+
|
|
450
1144
|
def _extract_code(self, response: str) -> str:
|
|
451
1145
|
"""Extract code from LLM response."""
|
|
452
1146
|
import re
|