cognify-code 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/agent/code_agent.py +665 -1
- ai_code_assistant/chat/agent_session.py +91 -1
- ai_code_assistant/cli.py +54 -31
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.4.dist-info}/METADATA +1 -1
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.4.dist-info}/RECORD +9 -9
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.4.dist-info}/WHEEL +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.4.dist-info}/entry_points.txt +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.4.dist-info}/licenses/LICENSE +0 -0
- {cognify_code-0.2.3.dist-info → cognify_code-0.2.4.dist-info}/top_level.txt +0 -0
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Callable, List, Optional, Tuple
|
|
5
|
+
from typing import Callable, Iterator, List, Optional, Tuple
|
|
6
6
|
|
|
7
7
|
from ai_code_assistant.agent.file_manager import FileContextManager
|
|
8
8
|
from ai_code_assistant.agent.intent_classifier import IntentClassifier, Intent, IntentType
|
|
@@ -447,6 +447,670 @@ Provide a helpful, concise response. If the question is about code, you can sugg
|
|
|
447
447
|
|
|
448
448
|
return AgentResponse(message=response, intent=intent)
|
|
449
449
|
|
|
450
|
+
|
|
451
|
+
def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
452
|
+
"""
|
|
453
|
+
Process a user message with streaming output.
|
|
454
|
+
|
|
455
|
+
Yields tuples of (chunk, final_response).
|
|
456
|
+
During streaming, final_response is None.
|
|
457
|
+
The last yield will have the complete AgentResponse.
|
|
458
|
+
"""
|
|
459
|
+
# Classify intent (non-streaming, it's fast)
|
|
460
|
+
if use_llm_classification:
|
|
461
|
+
intent = self.intent_classifier.classify_with_llm(message)
|
|
462
|
+
else:
|
|
463
|
+
intent = self.intent_classifier.classify(message)
|
|
464
|
+
|
|
465
|
+
# Route to appropriate streaming handler
|
|
466
|
+
streaming_handlers = {
|
|
467
|
+
IntentType.CODE_GENERATE: self._handle_generate_stream,
|
|
468
|
+
IntentType.CODE_EDIT: self._handle_edit_stream,
|
|
469
|
+
IntentType.CODE_REVIEW: self._handle_review_stream,
|
|
470
|
+
IntentType.CODE_EXPLAIN: self._handle_explain_stream,
|
|
471
|
+
IntentType.CODE_REFACTOR: self._handle_refactor_stream,
|
|
472
|
+
IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
handler = streaming_handlers.get(intent.type)
|
|
476
|
+
|
|
477
|
+
if handler:
|
|
478
|
+
yield from handler(message, intent)
|
|
479
|
+
else:
|
|
480
|
+
# Fall back to non-streaming for other intents
|
|
481
|
+
response = self.process(message, use_llm_classification)
|
|
482
|
+
yield (response.message, response)
|
|
483
|
+
|
|
484
|
+
def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
485
|
+
"""Handle code explanation with streaming."""
|
|
486
|
+
if not intent.file_paths:
|
|
487
|
+
response = AgentResponse(
|
|
488
|
+
message="Please specify which file or code you want me to explain.",
|
|
489
|
+
intent=intent,
|
|
490
|
+
)
|
|
491
|
+
yield (response.message, response)
|
|
492
|
+
return
|
|
493
|
+
|
|
494
|
+
file_path = intent.file_paths[0]
|
|
495
|
+
content = self.file_manager.read_file(file_path)
|
|
496
|
+
|
|
497
|
+
if not content:
|
|
498
|
+
response = AgentResponse(
|
|
499
|
+
message=f"Cannot find file: {file_path}",
|
|
500
|
+
intent=intent,
|
|
501
|
+
)
|
|
502
|
+
yield (response.message, response)
|
|
503
|
+
return
|
|
504
|
+
|
|
505
|
+
prompt = f"""Explain the following code in a clear, educational way.
|
|
506
|
+
|
|
507
|
+
## Code ({file_path})
|
|
508
|
+
```
|
|
509
|
+
{content[:5000]}
|
|
510
|
+
```
|
|
511
|
+
|
|
512
|
+
## Instructions
|
|
513
|
+
1. Start with a high-level overview
|
|
514
|
+
2. Explain the main components/functions
|
|
515
|
+
3. Describe the flow of execution
|
|
516
|
+
4. Note any important patterns or techniques used
|
|
517
|
+
5. Keep the explanation concise but thorough
|
|
518
|
+
"""
|
|
519
|
+
|
|
520
|
+
# Stream the explanation
|
|
521
|
+
full_response = f"📖 **Explanation of {file_path}**\n\n"
|
|
522
|
+
yield (f"📖 **Explanation of {file_path}**\n\n", None)
|
|
523
|
+
|
|
524
|
+
for chunk in self.llm.stream(prompt):
|
|
525
|
+
full_response += chunk
|
|
526
|
+
yield (chunk, None)
|
|
527
|
+
|
|
528
|
+
# Final response
|
|
529
|
+
response = AgentResponse(
|
|
530
|
+
message=full_response,
|
|
531
|
+
intent=intent,
|
|
532
|
+
)
|
|
533
|
+
yield ("", response)
|
|
534
|
+
|
|
535
|
+
def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
536
|
+
"""Handle code review with streaming."""
|
|
537
|
+
if not intent.file_paths:
|
|
538
|
+
context = self.file_manager.get_project_context()
|
|
539
|
+
py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
|
|
540
|
+
|
|
541
|
+
if py_files:
|
|
542
|
+
msg = f"Which file would you like me to review? Found these Python files:\n" + \
|
|
543
|
+
"\n".join(f" • {f}" for f in py_files)
|
|
544
|
+
else:
|
|
545
|
+
msg = "Please specify which file you want me to review."
|
|
546
|
+
|
|
547
|
+
response = AgentResponse(message=msg, intent=intent)
|
|
548
|
+
yield (msg, response)
|
|
549
|
+
return
|
|
550
|
+
|
|
551
|
+
file_path = intent.file_paths[0]
|
|
552
|
+
content = self.file_manager.read_file(file_path)
|
|
553
|
+
|
|
554
|
+
if not content:
|
|
555
|
+
response = AgentResponse(
|
|
556
|
+
message=f"Cannot find file: {file_path}",
|
|
557
|
+
intent=intent,
|
|
558
|
+
)
|
|
559
|
+
yield (response.message, response)
|
|
560
|
+
return
|
|
561
|
+
|
|
562
|
+
yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
|
|
563
|
+
|
|
564
|
+
# Use streaming for the review
|
|
565
|
+
prompt = f"""Review the following code for issues, bugs, and improvements.
|
|
566
|
+
|
|
567
|
+
## Code ({file_path})
|
|
568
|
+
```
|
|
569
|
+
{content[:5000]}
|
|
570
|
+
```
|
|
571
|
+
|
|
572
|
+
## Review Format
|
|
573
|
+
Provide a structured review with:
|
|
574
|
+
1. **Summary** - Brief overview
|
|
575
|
+
2. **Issues** - List any bugs, security issues, or problems
|
|
576
|
+
3. **Suggestions** - Improvements and best practices
|
|
577
|
+
4. **Score** - Rate the code quality (1-10)
|
|
578
|
+
|
|
579
|
+
Be specific and actionable.
|
|
580
|
+
"""
|
|
581
|
+
|
|
582
|
+
full_response = f"🔍 **Reviewing {file_path}...**\n\n"
|
|
583
|
+
|
|
584
|
+
for chunk in self.llm.stream(prompt):
|
|
585
|
+
full_response += chunk
|
|
586
|
+
yield (chunk, None)
|
|
587
|
+
|
|
588
|
+
response = AgentResponse(
|
|
589
|
+
message=full_response,
|
|
590
|
+
intent=intent,
|
|
591
|
+
)
|
|
592
|
+
yield ("", response)
|
|
593
|
+
|
|
594
|
+
def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
595
|
+
"""Handle code generation with streaming."""
|
|
596
|
+
yield ("🔨 **Generating code...**\n\n", None)
|
|
597
|
+
|
|
598
|
+
request = CodeGenerationRequest(
|
|
599
|
+
description=message,
|
|
600
|
+
language=intent.language,
|
|
601
|
+
file_path=intent.file_paths[0] if intent.file_paths else None,
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
# Generate code (this part streams)
|
|
605
|
+
full_code = ""
|
|
606
|
+
prompt = self.code_generator._build_prompt(request)
|
|
607
|
+
|
|
608
|
+
for chunk in self.llm.stream(prompt):
|
|
609
|
+
full_code += chunk
|
|
610
|
+
yield (chunk, None)
|
|
611
|
+
|
|
612
|
+
# Extract and create changeset
|
|
613
|
+
code = self._extract_code(full_code)
|
|
614
|
+
file_path = request.file_path or f"generated.{request.language or 'py'}"
|
|
615
|
+
|
|
616
|
+
generated = GeneratedCode(
|
|
617
|
+
code=code,
|
|
618
|
+
language=request.language or "python",
|
|
619
|
+
file_path=file_path,
|
|
620
|
+
description=request.description,
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
changeset = ChangeSet(description=f"Generate: {message[:50]}...")
|
|
624
|
+
diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
|
|
625
|
+
changeset.diffs.append(diff)
|
|
626
|
+
|
|
627
|
+
self._pending_changeset = changeset
|
|
628
|
+
|
|
629
|
+
response = AgentResponse(
|
|
630
|
+
message=f"\n\n✅ Code generated for {file_path}",
|
|
631
|
+
intent=intent,
|
|
632
|
+
generated_code=generated,
|
|
633
|
+
changeset=changeset,
|
|
634
|
+
requires_confirmation=True,
|
|
635
|
+
)
|
|
636
|
+
yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
|
|
637
|
+
|
|
638
|
+
def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
639
|
+
"""Handle code editing with streaming."""
|
|
640
|
+
if not intent.file_paths:
|
|
641
|
+
response = AgentResponse(
|
|
642
|
+
message="Please specify which file you want to edit.",
|
|
643
|
+
intent=intent,
|
|
644
|
+
)
|
|
645
|
+
yield (response.message, response)
|
|
646
|
+
return
|
|
647
|
+
|
|
648
|
+
file_path = intent.file_paths[0]
|
|
649
|
+
original = self.file_manager.read_file(file_path)
|
|
650
|
+
|
|
651
|
+
if not original:
|
|
652
|
+
response = AgentResponse(
|
|
653
|
+
message=f"Cannot find file: {file_path}",
|
|
654
|
+
intent=intent,
|
|
655
|
+
)
|
|
656
|
+
yield (response.message, response)
|
|
657
|
+
return
|
|
658
|
+
|
|
659
|
+
yield (f"✏️ **Editing {file_path}...**\n\n", None)
|
|
660
|
+
|
|
661
|
+
prompt = f"""Edit the following code according to the user's request.
|
|
662
|
+
|
|
663
|
+
## Original Code ({file_path})
|
|
664
|
+
```
|
|
665
|
+
{original[:5000]}
|
|
666
|
+
```
|
|
667
|
+
|
|
668
|
+
## User Request
|
|
669
|
+
{message}
|
|
670
|
+
|
|
671
|
+
## Instructions
|
|
672
|
+
Return the COMPLETE modified file.
|
|
673
|
+
|
|
674
|
+
```
|
|
675
|
+
"""
|
|
676
|
+
|
|
677
|
+
full_response = ""
|
|
678
|
+
for chunk in self.llm.stream(prompt):
|
|
679
|
+
full_response += chunk
|
|
680
|
+
yield (chunk, None)
|
|
681
|
+
|
|
682
|
+
new_code = self._extract_code(full_response)
|
|
683
|
+
|
|
684
|
+
changeset = ChangeSet(description=f"Edit: {message[:50]}...")
|
|
685
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
686
|
+
changeset.diffs.append(diff)
|
|
687
|
+
|
|
688
|
+
self._pending_changeset = changeset
|
|
689
|
+
|
|
690
|
+
response = AgentResponse(
|
|
691
|
+
message=f"\n\n✅ Edit complete for {file_path}",
|
|
692
|
+
intent=intent,
|
|
693
|
+
changeset=changeset,
|
|
694
|
+
requires_confirmation=True,
|
|
695
|
+
)
|
|
696
|
+
yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
|
|
697
|
+
|
|
698
|
+
def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
699
|
+
"""Handle code refactoring with streaming."""
|
|
700
|
+
if not intent.file_paths:
|
|
701
|
+
response = AgentResponse(
|
|
702
|
+
message="Please specify which file you want to refactor.",
|
|
703
|
+
intent=intent,
|
|
704
|
+
)
|
|
705
|
+
yield (response.message, response)
|
|
706
|
+
return
|
|
707
|
+
|
|
708
|
+
file_path = intent.file_paths[0]
|
|
709
|
+
original = self.file_manager.read_file(file_path)
|
|
710
|
+
|
|
711
|
+
if not original:
|
|
712
|
+
response = AgentResponse(
|
|
713
|
+
message=f"Cannot find file: {file_path}",
|
|
714
|
+
intent=intent,
|
|
715
|
+
)
|
|
716
|
+
yield (response.message, response)
|
|
717
|
+
return
|
|
718
|
+
|
|
719
|
+
yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
|
|
720
|
+
|
|
721
|
+
prompt = f"""Refactor the following code to improve its quality.
|
|
722
|
+
|
|
723
|
+
## Original Code ({file_path})
|
|
724
|
+
```
|
|
725
|
+
{original[:5000]}
|
|
726
|
+
```
|
|
727
|
+
|
|
728
|
+
## User Request
|
|
729
|
+
{message}
|
|
730
|
+
|
|
731
|
+
Return the COMPLETE refactored file.
|
|
732
|
+
|
|
733
|
+
```
|
|
734
|
+
"""
|
|
735
|
+
|
|
736
|
+
full_response = ""
|
|
737
|
+
for chunk in self.llm.stream(prompt):
|
|
738
|
+
full_response += chunk
|
|
739
|
+
yield (chunk, None)
|
|
740
|
+
|
|
741
|
+
new_code = self._extract_code(full_response)
|
|
742
|
+
|
|
743
|
+
changeset = ChangeSet(description=f"Refactor: {file_path}")
|
|
744
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
745
|
+
changeset.diffs.append(diff)
|
|
746
|
+
|
|
747
|
+
self._pending_changeset = changeset
|
|
748
|
+
|
|
749
|
+
response = AgentResponse(
|
|
750
|
+
message=f"\n\n✅ Refactoring complete for {file_path}",
|
|
751
|
+
intent=intent,
|
|
752
|
+
changeset=changeset,
|
|
753
|
+
requires_confirmation=True,
|
|
754
|
+
)
|
|
755
|
+
yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
|
|
756
|
+
|
|
757
|
+
def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
758
|
+
"""Handle general chat with streaming."""
|
|
759
|
+
context = self.file_manager.get_project_context()
|
|
760
|
+
|
|
761
|
+
prompt = f"""You are a helpful coding assistant. Answer the user's question.
|
|
762
|
+
|
|
763
|
+
Project context:
|
|
764
|
+
- Root: {context.root_path.name}
|
|
765
|
+
- Languages: {', '.join(context.languages)}
|
|
766
|
+
- Files: {context.total_code_files} code files
|
|
767
|
+
|
|
768
|
+
User: {message}
|
|
769
|
+
|
|
770
|
+
Provide a helpful, concise response.
|
|
771
|
+
"""
|
|
772
|
+
|
|
773
|
+
full_response = ""
|
|
774
|
+
for chunk in self.llm.stream(prompt):
|
|
775
|
+
full_response += chunk
|
|
776
|
+
yield (chunk, None)
|
|
777
|
+
|
|
778
|
+
response = AgentResponse(message=full_response, intent=intent)
|
|
779
|
+
yield ("", response)
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
784
|
+
"""
|
|
785
|
+
Process a user message with streaming output.
|
|
786
|
+
|
|
787
|
+
Yields tuples of (chunk, final_response).
|
|
788
|
+
During streaming, final_response is None.
|
|
789
|
+
The last yield will have the complete AgentResponse.
|
|
790
|
+
"""
|
|
791
|
+
# Classify intent (non-streaming, it's fast)
|
|
792
|
+
if use_llm_classification:
|
|
793
|
+
intent = self.intent_classifier.classify_with_llm(message)
|
|
794
|
+
else:
|
|
795
|
+
intent = self.intent_classifier.classify(message)
|
|
796
|
+
|
|
797
|
+
# Route to appropriate streaming handler
|
|
798
|
+
streaming_handlers = {
|
|
799
|
+
IntentType.CODE_GENERATE: self._handle_generate_stream,
|
|
800
|
+
IntentType.CODE_EDIT: self._handle_edit_stream,
|
|
801
|
+
IntentType.CODE_REVIEW: self._handle_review_stream,
|
|
802
|
+
IntentType.CODE_EXPLAIN: self._handle_explain_stream,
|
|
803
|
+
IntentType.CODE_REFACTOR: self._handle_refactor_stream,
|
|
804
|
+
IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
handler = streaming_handlers.get(intent.type)
|
|
808
|
+
|
|
809
|
+
if handler:
|
|
810
|
+
yield from handler(message, intent)
|
|
811
|
+
else:
|
|
812
|
+
# Fall back to non-streaming for other intents
|
|
813
|
+
response = self.process(message, use_llm_classification)
|
|
814
|
+
yield (response.message, response)
|
|
815
|
+
|
|
816
|
+
def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
817
|
+
"""Handle code explanation with streaming."""
|
|
818
|
+
if not intent.file_paths:
|
|
819
|
+
response = AgentResponse(
|
|
820
|
+
message="Please specify which file or code you want me to explain.",
|
|
821
|
+
intent=intent,
|
|
822
|
+
)
|
|
823
|
+
yield (response.message, response)
|
|
824
|
+
return
|
|
825
|
+
|
|
826
|
+
file_path = intent.file_paths[0]
|
|
827
|
+
content = self.file_manager.read_file(file_path)
|
|
828
|
+
|
|
829
|
+
if not content:
|
|
830
|
+
response = AgentResponse(
|
|
831
|
+
message=f"Cannot find file: {file_path}",
|
|
832
|
+
intent=intent,
|
|
833
|
+
)
|
|
834
|
+
yield (response.message, response)
|
|
835
|
+
return
|
|
836
|
+
|
|
837
|
+
prompt = f"""Explain the following code in a clear, educational way.
|
|
838
|
+
|
|
839
|
+
## Code ({file_path})
|
|
840
|
+
```
|
|
841
|
+
{content[:5000]}
|
|
842
|
+
```
|
|
843
|
+
|
|
844
|
+
## Instructions
|
|
845
|
+
1. Start with a high-level overview
|
|
846
|
+
2. Explain the main components/functions
|
|
847
|
+
3. Describe the flow of execution
|
|
848
|
+
4. Note any important patterns or techniques used
|
|
849
|
+
5. Keep the explanation concise but thorough
|
|
850
|
+
"""
|
|
851
|
+
|
|
852
|
+
# Stream the explanation
|
|
853
|
+
full_response = f"📖 **Explanation of {file_path}**\n\n"
|
|
854
|
+
yield (f"📖 **Explanation of {file_path}**\n\n", None)
|
|
855
|
+
|
|
856
|
+
for chunk in self.llm.stream(prompt):
|
|
857
|
+
full_response += chunk
|
|
858
|
+
yield (chunk, None)
|
|
859
|
+
|
|
860
|
+
# Final response
|
|
861
|
+
response = AgentResponse(
|
|
862
|
+
message=full_response,
|
|
863
|
+
intent=intent,
|
|
864
|
+
)
|
|
865
|
+
yield ("", response)
|
|
866
|
+
|
|
867
|
+
def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
868
|
+
"""Handle code review with streaming."""
|
|
869
|
+
if not intent.file_paths:
|
|
870
|
+
context = self.file_manager.get_project_context()
|
|
871
|
+
py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
|
|
872
|
+
|
|
873
|
+
if py_files:
|
|
874
|
+
msg = f"Which file would you like me to review? Found these Python files:\n" + \
|
|
875
|
+
"\n".join(f" • {f}" for f in py_files)
|
|
876
|
+
else:
|
|
877
|
+
msg = "Please specify which file you want me to review."
|
|
878
|
+
|
|
879
|
+
response = AgentResponse(message=msg, intent=intent)
|
|
880
|
+
yield (msg, response)
|
|
881
|
+
return
|
|
882
|
+
|
|
883
|
+
file_path = intent.file_paths[0]
|
|
884
|
+
content = self.file_manager.read_file(file_path)
|
|
885
|
+
|
|
886
|
+
if not content:
|
|
887
|
+
response = AgentResponse(
|
|
888
|
+
message=f"Cannot find file: {file_path}",
|
|
889
|
+
intent=intent,
|
|
890
|
+
)
|
|
891
|
+
yield (response.message, response)
|
|
892
|
+
return
|
|
893
|
+
|
|
894
|
+
yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
|
|
895
|
+
|
|
896
|
+
# Use streaming for the review
|
|
897
|
+
prompt = f"""Review the following code for issues, bugs, and improvements.
|
|
898
|
+
|
|
899
|
+
## Code ({file_path})
|
|
900
|
+
```
|
|
901
|
+
{content[:5000]}
|
|
902
|
+
```
|
|
903
|
+
|
|
904
|
+
## Review Format
|
|
905
|
+
Provide a structured review with:
|
|
906
|
+
1. **Summary** - Brief overview
|
|
907
|
+
2. **Issues** - List any bugs, security issues, or problems
|
|
908
|
+
3. **Suggestions** - Improvements and best practices
|
|
909
|
+
4. **Score** - Rate the code quality (1-10)
|
|
910
|
+
|
|
911
|
+
Be specific and actionable.
|
|
912
|
+
"""
|
|
913
|
+
|
|
914
|
+
full_response = f"🔍 **Reviewing {file_path}...**\n\n"
|
|
915
|
+
|
|
916
|
+
for chunk in self.llm.stream(prompt):
|
|
917
|
+
full_response += chunk
|
|
918
|
+
yield (chunk, None)
|
|
919
|
+
|
|
920
|
+
response = AgentResponse(
|
|
921
|
+
message=full_response,
|
|
922
|
+
intent=intent,
|
|
923
|
+
)
|
|
924
|
+
yield ("", response)
|
|
925
|
+
|
|
926
|
+
def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
927
|
+
"""Handle code generation with streaming."""
|
|
928
|
+
yield ("🔨 **Generating code...**\n\n", None)
|
|
929
|
+
|
|
930
|
+
request = CodeGenerationRequest(
|
|
931
|
+
description=message,
|
|
932
|
+
language=intent.language,
|
|
933
|
+
file_path=intent.file_paths[0] if intent.file_paths else None,
|
|
934
|
+
)
|
|
935
|
+
|
|
936
|
+
# Generate code (this part streams)
|
|
937
|
+
full_code = ""
|
|
938
|
+
prompt = self.code_generator._build_prompt(request)
|
|
939
|
+
|
|
940
|
+
for chunk in self.llm.stream(prompt):
|
|
941
|
+
full_code += chunk
|
|
942
|
+
yield (chunk, None)
|
|
943
|
+
|
|
944
|
+
# Extract and create changeset
|
|
945
|
+
code = self._extract_code(full_code)
|
|
946
|
+
file_path = request.file_path or f"generated.{request.language or 'py'}"
|
|
947
|
+
|
|
948
|
+
generated = GeneratedCode(
|
|
949
|
+
code=code,
|
|
950
|
+
language=request.language or "python",
|
|
951
|
+
file_path=file_path,
|
|
952
|
+
description=request.description,
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
changeset = ChangeSet(description=f"Generate: {message[:50]}...")
|
|
956
|
+
diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
|
|
957
|
+
changeset.diffs.append(diff)
|
|
958
|
+
|
|
959
|
+
self._pending_changeset = changeset
|
|
960
|
+
|
|
961
|
+
response = AgentResponse(
|
|
962
|
+
message=f"\n\n✅ Code generated for {file_path}",
|
|
963
|
+
intent=intent,
|
|
964
|
+
generated_code=generated,
|
|
965
|
+
changeset=changeset,
|
|
966
|
+
requires_confirmation=True,
|
|
967
|
+
)
|
|
968
|
+
yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
|
|
969
|
+
|
|
970
|
+
def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
971
|
+
"""Handle code editing with streaming."""
|
|
972
|
+
if not intent.file_paths:
|
|
973
|
+
response = AgentResponse(
|
|
974
|
+
message="Please specify which file you want to edit.",
|
|
975
|
+
intent=intent,
|
|
976
|
+
)
|
|
977
|
+
yield (response.message, response)
|
|
978
|
+
return
|
|
979
|
+
|
|
980
|
+
file_path = intent.file_paths[0]
|
|
981
|
+
original = self.file_manager.read_file(file_path)
|
|
982
|
+
|
|
983
|
+
if not original:
|
|
984
|
+
response = AgentResponse(
|
|
985
|
+
message=f"Cannot find file: {file_path}",
|
|
986
|
+
intent=intent,
|
|
987
|
+
)
|
|
988
|
+
yield (response.message, response)
|
|
989
|
+
return
|
|
990
|
+
|
|
991
|
+
yield (f"✏️ **Editing {file_path}...**\n\n", None)
|
|
992
|
+
|
|
993
|
+
prompt = f"""Edit the following code according to the user's request.
|
|
994
|
+
|
|
995
|
+
## Original Code ({file_path})
|
|
996
|
+
```
|
|
997
|
+
{original[:5000]}
|
|
998
|
+
```
|
|
999
|
+
|
|
1000
|
+
## User Request
|
|
1001
|
+
{message}
|
|
1002
|
+
|
|
1003
|
+
## Instructions
|
|
1004
|
+
Return the COMPLETE modified file.
|
|
1005
|
+
|
|
1006
|
+
```
|
|
1007
|
+
"""
|
|
1008
|
+
|
|
1009
|
+
full_response = ""
|
|
1010
|
+
for chunk in self.llm.stream(prompt):
|
|
1011
|
+
full_response += chunk
|
|
1012
|
+
yield (chunk, None)
|
|
1013
|
+
|
|
1014
|
+
new_code = self._extract_code(full_response)
|
|
1015
|
+
|
|
1016
|
+
changeset = ChangeSet(description=f"Edit: {message[:50]}...")
|
|
1017
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
1018
|
+
changeset.diffs.append(diff)
|
|
1019
|
+
|
|
1020
|
+
self._pending_changeset = changeset
|
|
1021
|
+
|
|
1022
|
+
response = AgentResponse(
|
|
1023
|
+
message=f"\n\n✅ Edit complete for {file_path}",
|
|
1024
|
+
intent=intent,
|
|
1025
|
+
changeset=changeset,
|
|
1026
|
+
requires_confirmation=True,
|
|
1027
|
+
)
|
|
1028
|
+
yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
|
|
1029
|
+
|
|
1030
|
+
def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1031
|
+
"""Handle code refactoring with streaming."""
|
|
1032
|
+
if not intent.file_paths:
|
|
1033
|
+
response = AgentResponse(
|
|
1034
|
+
message="Please specify which file you want to refactor.",
|
|
1035
|
+
intent=intent,
|
|
1036
|
+
)
|
|
1037
|
+
yield (response.message, response)
|
|
1038
|
+
return
|
|
1039
|
+
|
|
1040
|
+
file_path = intent.file_paths[0]
|
|
1041
|
+
original = self.file_manager.read_file(file_path)
|
|
1042
|
+
|
|
1043
|
+
if not original:
|
|
1044
|
+
response = AgentResponse(
|
|
1045
|
+
message=f"Cannot find file: {file_path}",
|
|
1046
|
+
intent=intent,
|
|
1047
|
+
)
|
|
1048
|
+
yield (response.message, response)
|
|
1049
|
+
return
|
|
1050
|
+
|
|
1051
|
+
yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
|
|
1052
|
+
|
|
1053
|
+
prompt = f"""Refactor the following code to improve its quality.
|
|
1054
|
+
|
|
1055
|
+
## Original Code ({file_path})
|
|
1056
|
+
```
|
|
1057
|
+
{original[:5000]}
|
|
1058
|
+
```
|
|
1059
|
+
|
|
1060
|
+
## User Request
|
|
1061
|
+
{message}
|
|
1062
|
+
|
|
1063
|
+
Return the COMPLETE refactored file.
|
|
1064
|
+
|
|
1065
|
+
```
|
|
1066
|
+
"""
|
|
1067
|
+
|
|
1068
|
+
full_response = ""
|
|
1069
|
+
for chunk in self.llm.stream(prompt):
|
|
1070
|
+
full_response += chunk
|
|
1071
|
+
yield (chunk, None)
|
|
1072
|
+
|
|
1073
|
+
new_code = self._extract_code(full_response)
|
|
1074
|
+
|
|
1075
|
+
changeset = ChangeSet(description=f"Refactor: {file_path}")
|
|
1076
|
+
diff = self.diff_engine.create_diff(original, new_code, file_path)
|
|
1077
|
+
changeset.diffs.append(diff)
|
|
1078
|
+
|
|
1079
|
+
self._pending_changeset = changeset
|
|
1080
|
+
|
|
1081
|
+
response = AgentResponse(
|
|
1082
|
+
message=f"\n\n✅ Refactoring complete for {file_path}",
|
|
1083
|
+
intent=intent,
|
|
1084
|
+
changeset=changeset,
|
|
1085
|
+
requires_confirmation=True,
|
|
1086
|
+
)
|
|
1087
|
+
yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
|
|
1088
|
+
|
|
1089
|
+
def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
|
|
1090
|
+
"""Handle general chat with streaming."""
|
|
1091
|
+
context = self.file_manager.get_project_context()
|
|
1092
|
+
|
|
1093
|
+
prompt = f"""You are a helpful coding assistant. Answer the user's question.
|
|
1094
|
+
|
|
1095
|
+
Project context:
|
|
1096
|
+
- Root: {context.root_path.name}
|
|
1097
|
+
- Languages: {', '.join(context.languages)}
|
|
1098
|
+
- Files: {context.total_code_files} code files
|
|
1099
|
+
|
|
1100
|
+
User: {message}
|
|
1101
|
+
|
|
1102
|
+
Provide a helpful, concise response.
|
|
1103
|
+
"""
|
|
1104
|
+
|
|
1105
|
+
full_response = ""
|
|
1106
|
+
for chunk in self.llm.stream(prompt):
|
|
1107
|
+
full_response += chunk
|
|
1108
|
+
yield (chunk, None)
|
|
1109
|
+
|
|
1110
|
+
response = AgentResponse(message=full_response, intent=intent)
|
|
1111
|
+
yield ("", response)
|
|
1112
|
+
|
|
1113
|
+
|
|
450
1114
|
def _extract_code(self, response: str) -> str:
|
|
451
1115
|
"""Extract code from LLM response."""
|
|
452
1116
|
import re
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from dataclasses import dataclass, field
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Dict, List, Literal, Optional
|
|
6
|
+
from typing import Dict, Iterator, List, Literal, Optional, Tuple
|
|
7
7
|
|
|
8
8
|
from ai_code_assistant.config import Config
|
|
9
9
|
from ai_code_assistant.llm import LLMManager
|
|
@@ -67,6 +67,96 @@ class AgentChatSession:
|
|
|
67
67
|
|
|
68
68
|
return assistant_msg
|
|
69
69
|
|
|
70
|
+
|
|
71
|
+
def send_message_stream(self, user_input: str) -> Iterator[Tuple[str, Optional[AgentMessage]]]:
|
|
72
|
+
"""
|
|
73
|
+
Process user message with streaming output.
|
|
74
|
+
|
|
75
|
+
Yields tuples of (chunk, final_message).
|
|
76
|
+
During streaming, final_message is None.
|
|
77
|
+
The last yield will have the complete AgentMessage.
|
|
78
|
+
"""
|
|
79
|
+
# Add user message to history
|
|
80
|
+
user_msg = AgentMessage(role="user", content=user_input)
|
|
81
|
+
self.history.append(user_msg)
|
|
82
|
+
|
|
83
|
+
# Check for confirmation/rejection of pending changes
|
|
84
|
+
if self._awaiting_confirmation:
|
|
85
|
+
msg = self._handle_confirmation(user_input)
|
|
86
|
+
yield (msg.content, msg)
|
|
87
|
+
return
|
|
88
|
+
|
|
89
|
+
# Process through agent with streaming
|
|
90
|
+
full_content = ""
|
|
91
|
+
final_response = None
|
|
92
|
+
|
|
93
|
+
for chunk, response in self.agent.process_stream(user_input):
|
|
94
|
+
full_content += chunk
|
|
95
|
+
yield (chunk, None)
|
|
96
|
+
if response is not None:
|
|
97
|
+
final_response = response
|
|
98
|
+
|
|
99
|
+
# Create assistant message
|
|
100
|
+
assistant_msg = AgentMessage(
|
|
101
|
+
role="assistant",
|
|
102
|
+
content=full_content,
|
|
103
|
+
response=final_response,
|
|
104
|
+
pending_action=final_response.requires_confirmation if final_response else False,
|
|
105
|
+
)
|
|
106
|
+
self.history.append(assistant_msg)
|
|
107
|
+
|
|
108
|
+
# Track if we're awaiting confirmation
|
|
109
|
+
if final_response:
|
|
110
|
+
self._awaiting_confirmation = final_response.requires_confirmation
|
|
111
|
+
|
|
112
|
+
yield ("", assistant_msg)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def send_message_stream(self, user_input: str) -> Iterator[Tuple[str, Optional[AgentMessage]]]:
|
|
117
|
+
"""
|
|
118
|
+
Process user message with streaming output.
|
|
119
|
+
|
|
120
|
+
Yields tuples of (chunk, final_message).
|
|
121
|
+
During streaming, final_message is None.
|
|
122
|
+
The last yield will have the complete AgentMessage.
|
|
123
|
+
"""
|
|
124
|
+
# Add user message to history
|
|
125
|
+
user_msg = AgentMessage(role="user", content=user_input)
|
|
126
|
+
self.history.append(user_msg)
|
|
127
|
+
|
|
128
|
+
# Check for confirmation/rejection of pending changes
|
|
129
|
+
if self._awaiting_confirmation:
|
|
130
|
+
msg = self._handle_confirmation(user_input)
|
|
131
|
+
yield (msg.content, msg)
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
# Process through agent with streaming
|
|
135
|
+
full_content = ""
|
|
136
|
+
final_response = None
|
|
137
|
+
|
|
138
|
+
for chunk, response in self.agent.process_stream(user_input):
|
|
139
|
+
full_content += chunk
|
|
140
|
+
yield (chunk, None)
|
|
141
|
+
if response is not None:
|
|
142
|
+
final_response = response
|
|
143
|
+
|
|
144
|
+
# Create assistant message
|
|
145
|
+
assistant_msg = AgentMessage(
|
|
146
|
+
role="assistant",
|
|
147
|
+
content=full_content,
|
|
148
|
+
response=final_response,
|
|
149
|
+
pending_action=final_response.requires_confirmation if final_response else False,
|
|
150
|
+
)
|
|
151
|
+
self.history.append(assistant_msg)
|
|
152
|
+
|
|
153
|
+
# Track if we're awaiting confirmation
|
|
154
|
+
if final_response:
|
|
155
|
+
self._awaiting_confirmation = final_response.requires_confirmation
|
|
156
|
+
|
|
157
|
+
yield ("", assistant_msg)
|
|
158
|
+
|
|
159
|
+
|
|
70
160
|
def _handle_confirmation(self, user_input: str) -> AgentMessage:
|
|
71
161
|
"""Handle user confirmation or rejection of pending changes."""
|
|
72
162
|
lower_input = user_input.lower().strip()
|
ai_code_assistant/cli.py
CHANGED
|
@@ -1491,21 +1491,20 @@ def agent(ctx, path: Path):
|
|
|
1491
1491
|
console.print("[green]No pending changes.[/green]")
|
|
1492
1492
|
continue
|
|
1493
1493
|
|
|
1494
|
-
# Process through agent
|
|
1495
|
-
with console.status("[bold green]Thinking..."):
|
|
1496
|
-
response = session.send_message(user_input)
|
|
1497
|
-
|
|
1498
|
-
# Display response
|
|
1494
|
+
# Process through agent with streaming
|
|
1499
1495
|
console.print(f"\n[bold green]Agent[/bold green]")
|
|
1500
1496
|
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1497
|
+
final_msg = None
|
|
1498
|
+
for chunk, msg in session.send_message_stream(user_input):
|
|
1499
|
+
if chunk:
|
|
1500
|
+
console.print(chunk, end="")
|
|
1501
|
+
if msg is not None:
|
|
1502
|
+
final_msg = msg
|
|
1503
|
+
|
|
1504
|
+
console.print() # Newline after streaming
|
|
1506
1505
|
|
|
1507
1506
|
# Show confirmation prompt if needed
|
|
1508
|
-
if
|
|
1507
|
+
if final_msg and final_msg.pending_action:
|
|
1509
1508
|
console.print("\n[yellow]Apply these changes? (yes/no)[/yellow]")
|
|
1510
1509
|
|
|
1511
1510
|
except KeyboardInterrupt:
|
|
@@ -1565,13 +1564,15 @@ def _show_agent_help():
|
|
|
1565
1564
|
@click.argument("file", type=click.Path(exists=True, path_type=Path))
|
|
1566
1565
|
@click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
|
|
1567
1566
|
default=".", help="Project root path")
|
|
1567
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
1568
1568
|
@click.pass_context
|
|
1569
|
-
def agent_review(ctx, file: Path, path: Path):
|
|
1569
|
+
def agent_review(ctx, file: Path, path: Path, stream: bool):
|
|
1570
1570
|
"""Quick code review using the agent.
|
|
1571
1571
|
|
|
1572
1572
|
Examples:
|
|
1573
1573
|
ai-assist agent-review src/main.py
|
|
1574
1574
|
ai-assist agent-review utils.py --path ./my-project
|
|
1575
|
+
ai-assist agent-review main.py --no-stream
|
|
1575
1576
|
"""
|
|
1576
1577
|
from ai_code_assistant.agent import CodeAgent
|
|
1577
1578
|
|
|
@@ -1580,10 +1581,15 @@ def agent_review(ctx, file: Path, path: Path):
|
|
|
1580
1581
|
|
|
1581
1582
|
console.print(f"\n[bold]Reviewing {file}...[/bold]\n")
|
|
1582
1583
|
|
|
1583
|
-
|
|
1584
|
-
response
|
|
1585
|
-
|
|
1586
|
-
|
|
1584
|
+
if stream:
|
|
1585
|
+
for chunk, response in agent.process_stream(f"review {file}"):
|
|
1586
|
+
if chunk:
|
|
1587
|
+
console.print(chunk, end="")
|
|
1588
|
+
console.print()
|
|
1589
|
+
else:
|
|
1590
|
+
with console.status("[bold green]Analyzing..."):
|
|
1591
|
+
response = agent.process(f"review {file}")
|
|
1592
|
+
console.print(response.message)
|
|
1587
1593
|
|
|
1588
1594
|
|
|
1589
1595
|
@main.command("agent-generate")
|
|
@@ -1593,15 +1599,17 @@ def agent_review(ctx, file: Path, path: Path):
|
|
|
1593
1599
|
@click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
|
|
1594
1600
|
default=".", help="Project root path")
|
|
1595
1601
|
@click.option("--apply", "-a", is_flag=True, help="Apply changes without confirmation")
|
|
1602
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
1596
1603
|
@click.pass_context
|
|
1597
1604
|
def agent_generate(ctx, description: str, file: Optional[Path], language: Optional[str],
|
|
1598
|
-
path: Path, apply: bool):
|
|
1605
|
+
path: Path, apply: bool, stream: bool):
|
|
1599
1606
|
"""Generate code using the agent.
|
|
1600
1607
|
|
|
1601
1608
|
Examples:
|
|
1602
1609
|
ai-assist agent-generate "a function to validate email"
|
|
1603
1610
|
ai-assist agent-generate "REST API for users" -f src/api.py
|
|
1604
1611
|
ai-assist agent-generate "sorting algorithm" -l python
|
|
1612
|
+
ai-assist agent-generate "hello world" --no-stream
|
|
1605
1613
|
"""
|
|
1606
1614
|
from ai_code_assistant.agent import CodeAgent
|
|
1607
1615
|
|
|
@@ -1617,12 +1625,20 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
|
|
|
1617
1625
|
|
|
1618
1626
|
console.print(f"\n[bold]Generating code...[/bold]\n")
|
|
1619
1627
|
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1628
|
+
final_response = None
|
|
1629
|
+
if stream:
|
|
1630
|
+
for chunk, response in agent.process_stream(request):
|
|
1631
|
+
if chunk:
|
|
1632
|
+
console.print(chunk, end="")
|
|
1633
|
+
if response is not None:
|
|
1634
|
+
final_response = response
|
|
1635
|
+
console.print()
|
|
1636
|
+
else:
|
|
1637
|
+
with console.status("[bold green]Generating..."):
|
|
1638
|
+
final_response = agent.process(request)
|
|
1639
|
+
console.print(final_response.message)
|
|
1624
1640
|
|
|
1625
|
-
if
|
|
1641
|
+
if final_response and final_response.requires_confirmation:
|
|
1626
1642
|
if apply:
|
|
1627
1643
|
success, msg = agent.confirm_changes()
|
|
1628
1644
|
console.print(f"\n{msg}")
|
|
@@ -1638,29 +1654,36 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
|
|
|
1638
1654
|
@click.argument("file", type=click.Path(exists=True, path_type=Path))
|
|
1639
1655
|
@click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
|
|
1640
1656
|
default=".", help="Project root path")
|
|
1657
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
1641
1658
|
@click.pass_context
|
|
1642
|
-
def agent_explain(ctx, file: Path, path: Path):
|
|
1659
|
+
def agent_explain(ctx, file: Path, path: Path, stream: bool):
|
|
1643
1660
|
"""Explain code using the agent.
|
|
1644
1661
|
|
|
1645
1662
|
Examples:
|
|
1646
1663
|
ai-assist agent-explain src/main.py
|
|
1647
1664
|
ai-assist agent-explain config.py --path ./my-project
|
|
1665
|
+
ai-assist agent-explain main.py --no-stream
|
|
1648
1666
|
"""
|
|
1649
1667
|
from ai_code_assistant.agent import CodeAgent
|
|
1650
|
-
from rich.markdown import Markdown
|
|
1651
1668
|
|
|
1652
1669
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
1653
1670
|
agent = CodeAgent(llm, path.resolve())
|
|
1654
1671
|
|
|
1655
1672
|
console.print(f"\n[bold]Explaining {file}...[/bold]\n")
|
|
1656
1673
|
|
|
1657
|
-
|
|
1658
|
-
response
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
console.print(
|
|
1662
|
-
|
|
1663
|
-
|
|
1674
|
+
if stream:
|
|
1675
|
+
for chunk, response in agent.process_stream(f"explain {file}"):
|
|
1676
|
+
if chunk:
|
|
1677
|
+
console.print(chunk, end="")
|
|
1678
|
+
console.print()
|
|
1679
|
+
else:
|
|
1680
|
+
from rich.markdown import Markdown
|
|
1681
|
+
with console.status("[bold green]Analyzing..."):
|
|
1682
|
+
response = agent.process(f"explain {file}")
|
|
1683
|
+
try:
|
|
1684
|
+
console.print(Markdown(response.message))
|
|
1685
|
+
except Exception:
|
|
1686
|
+
console.print(response.message)
|
|
1664
1687
|
|
|
1665
1688
|
|
|
1666
1689
|
if __name__ == "__main__":
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cognify-code
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.4
|
|
4
4
|
Summary: Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy.
|
|
5
5
|
Author-email: Ashok Kumar <akkssy@users.noreply.github.com>
|
|
6
6
|
Maintainer-email: Ashok Kumar <akkssy@users.noreply.github.com>
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
ai_code_assistant/__init__.py,sha256=XnpG4h-2gW3cXseFvqQT_-XyOmVJtikVMrHUnmy8XKI,409
|
|
2
|
-
ai_code_assistant/cli.py,sha256=
|
|
2
|
+
ai_code_assistant/cli.py,sha256=XSpwAQgWXhhLX9c628PwdSfx5czK3p5qS4nNNgESnWw,64153
|
|
3
3
|
ai_code_assistant/config.py,sha256=6sAufexwzfCu2JNWvt9KevS9k_gMcjj1TAnwuaO1ZFw,4727
|
|
4
4
|
ai_code_assistant/llm.py,sha256=DfcWJf6zEAUsPSEZLdEmb9o6BQNf1Ja88nswjpy6cOw,4209
|
|
5
5
|
ai_code_assistant/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
6
|
ai_code_assistant/agent/__init__.py,sha256=BcVe4Ebopv_J01ApnRl05oN5yOet5mEefBrQmdPsUj0,1284
|
|
7
|
-
ai_code_assistant/agent/code_agent.py,sha256=
|
|
7
|
+
ai_code_assistant/agent/code_agent.py,sha256=y0Osc8wzpIAW1B1NGrlsSw_vIYu-ZlMOae05IhM2XYM,38463
|
|
8
8
|
ai_code_assistant/agent/code_generator.py,sha256=rAaziRU-mJ5NooERjR_Cd6_hwO0kuULw3Sp8Ca9kR48,13138
|
|
9
9
|
ai_code_assistant/agent/code_reviewer.py,sha256=YiM7lRJhoN-vBnQb29jF-5nmE9ppL-OJffvx4ocTHEU,12066
|
|
10
10
|
ai_code_assistant/agent/diff_engine.py,sha256=A5jszowc5VmWbdidpIW_QhswG_Hats3FYuemP8VoYv4,11018
|
|
11
11
|
ai_code_assistant/agent/file_manager.py,sha256=Inyfo-UXT4joms1ADIMA_TKtIHEjEkBz4I4U2iK5_jI,10742
|
|
12
12
|
ai_code_assistant/agent/intent_classifier.py,sha256=MuIcyWQntocrTlCb4CD54mhc3JfSsWGfulsPYGUoz6E,10667
|
|
13
13
|
ai_code_assistant/chat/__init__.py,sha256=KntIXcjbPgznax1E0fvdrA3XtKF-hCz5Fr1tcRbdl7U,279
|
|
14
|
-
ai_code_assistant/chat/agent_session.py,sha256
|
|
14
|
+
ai_code_assistant/chat/agent_session.py,sha256=-sW78d0nifRBNO6PRDiqdd8Sqpkv98kedvZbBQzK3lo,8674
|
|
15
15
|
ai_code_assistant/chat/session.py,sha256=5JRd1DuLjxbtckmsMeHzNjoEZnJS9lx9NoX6z03F0xE,5500
|
|
16
16
|
ai_code_assistant/editor/__init__.py,sha256=892BfTIo6kLdfZdhnvl4OFe0QSnxE4EyfkBoyLdA5rc,340
|
|
17
17
|
ai_code_assistant/editor/diff_handler.py,sha256=LeI-00GuH7ASIetsUzT3Y_pDq4K1wmycuu4UFu5ZkGg,8759
|
|
@@ -47,9 +47,9 @@ ai_code_assistant/reviewer/prompts.py,sha256=9RrHEBttS5ngxY2BNsUvqGC6-cTxco-kDPb
|
|
|
47
47
|
ai_code_assistant/utils/__init__.py,sha256=3HO-1Bj4VvUtM7W1C3MKR4DzQ9Xc875QKSHHkHwuqVs,368
|
|
48
48
|
ai_code_assistant/utils/file_handler.py,sha256=jPxvtI5dJxkpPjELgRJ11WXamtyKKmZANQ1fcfMVtiU,5239
|
|
49
49
|
ai_code_assistant/utils/formatters.py,sha256=5El9ew9HS6JLBucBUxxcw4fO5nLpOucgNJrJj2NC3zw,8945
|
|
50
|
-
cognify_code-0.2.
|
|
51
|
-
cognify_code-0.2.
|
|
52
|
-
cognify_code-0.2.
|
|
53
|
-
cognify_code-0.2.
|
|
54
|
-
cognify_code-0.2.
|
|
55
|
-
cognify_code-0.2.
|
|
50
|
+
cognify_code-0.2.4.dist-info/licenses/LICENSE,sha256=5yu_kWq2bK-XKhWo79Eykdg4Qf3O8V2Ys7cpOO7GyyE,1063
|
|
51
|
+
cognify_code-0.2.4.dist-info/METADATA,sha256=_S3by3PNwTZmPdVbxZDBTLdKk7MZuoRttLOYZiXIYkI,11862
|
|
52
|
+
cognify_code-0.2.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
53
|
+
cognify_code-0.2.4.dist-info/entry_points.txt,sha256=MrBnnWPHZVozqqKyTlnJO63YN2kE5yPWKlr2nnRFRks,94
|
|
54
|
+
cognify_code-0.2.4.dist-info/top_level.txt,sha256=dD_r1x-oX0s1uspYY72kig4jfIsjh3oDKwOBCMYXqpo,18
|
|
55
|
+
cognify_code-0.2.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|