cognify-code 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,7 @@
2
2
 
3
3
  from dataclasses import dataclass
4
4
  from pathlib import Path
5
- from typing import Callable, List, Optional, Tuple
5
+ from typing import Callable, Iterator, List, Optional, Tuple
6
6
 
7
7
  from ai_code_assistant.agent.file_manager import FileContextManager
8
8
  from ai_code_assistant.agent.intent_classifier import IntentClassifier, Intent, IntentType
@@ -447,6 +447,670 @@ Provide a helpful, concise response. If the question is about code, you can sugg
447
447
 
448
448
  return AgentResponse(message=response, intent=intent)
449
449
 
450
+
451
+ def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
452
+ """
453
+ Process a user message with streaming output.
454
+
455
+ Yields tuples of (chunk, final_response).
456
+ During streaming, final_response is None.
457
+ The last yield will have the complete AgentResponse.
458
+ """
459
+ # Classify intent (non-streaming, it's fast)
460
+ if use_llm_classification:
461
+ intent = self.intent_classifier.classify_with_llm(message)
462
+ else:
463
+ intent = self.intent_classifier.classify(message)
464
+
465
+ # Route to appropriate streaming handler
466
+ streaming_handlers = {
467
+ IntentType.CODE_GENERATE: self._handle_generate_stream,
468
+ IntentType.CODE_EDIT: self._handle_edit_stream,
469
+ IntentType.CODE_REVIEW: self._handle_review_stream,
470
+ IntentType.CODE_EXPLAIN: self._handle_explain_stream,
471
+ IntentType.CODE_REFACTOR: self._handle_refactor_stream,
472
+ IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
473
+ }
474
+
475
+ handler = streaming_handlers.get(intent.type)
476
+
477
+ if handler:
478
+ yield from handler(message, intent)
479
+ else:
480
+ # Fall back to non-streaming for other intents
481
+ response = self.process(message, use_llm_classification)
482
+ yield (response.message, response)
483
+
484
+ def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
485
+ """Handle code explanation with streaming."""
486
+ if not intent.file_paths:
487
+ response = AgentResponse(
488
+ message="Please specify which file or code you want me to explain.",
489
+ intent=intent,
490
+ )
491
+ yield (response.message, response)
492
+ return
493
+
494
+ file_path = intent.file_paths[0]
495
+ content = self.file_manager.read_file(file_path)
496
+
497
+ if not content:
498
+ response = AgentResponse(
499
+ message=f"Cannot find file: {file_path}",
500
+ intent=intent,
501
+ )
502
+ yield (response.message, response)
503
+ return
504
+
505
+ prompt = f"""Explain the following code in a clear, educational way.
506
+
507
+ ## Code ({file_path})
508
+ ```
509
+ {content[:5000]}
510
+ ```
511
+
512
+ ## Instructions
513
+ 1. Start with a high-level overview
514
+ 2. Explain the main components/functions
515
+ 3. Describe the flow of execution
516
+ 4. Note any important patterns or techniques used
517
+ 5. Keep the explanation concise but thorough
518
+ """
519
+
520
+ # Stream the explanation
521
+ full_response = f"📖 **Explanation of {file_path}**\n\n"
522
+ yield (f"📖 **Explanation of {file_path}**\n\n", None)
523
+
524
+ for chunk in self.llm.stream(prompt):
525
+ full_response += chunk
526
+ yield (chunk, None)
527
+
528
+ # Final response
529
+ response = AgentResponse(
530
+ message=full_response,
531
+ intent=intent,
532
+ )
533
+ yield ("", response)
534
+
535
+ def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
536
+ """Handle code review with streaming."""
537
+ if not intent.file_paths:
538
+ context = self.file_manager.get_project_context()
539
+ py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
540
+
541
+ if py_files:
542
+ msg = f"Which file would you like me to review? Found these Python files:\n" + \
543
+ "\n".join(f" • {f}" for f in py_files)
544
+ else:
545
+ msg = "Please specify which file you want me to review."
546
+
547
+ response = AgentResponse(message=msg, intent=intent)
548
+ yield (msg, response)
549
+ return
550
+
551
+ file_path = intent.file_paths[0]
552
+ content = self.file_manager.read_file(file_path)
553
+
554
+ if not content:
555
+ response = AgentResponse(
556
+ message=f"Cannot find file: {file_path}",
557
+ intent=intent,
558
+ )
559
+ yield (response.message, response)
560
+ return
561
+
562
+ yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
563
+
564
+ # Use streaming for the review
565
+ prompt = f"""Review the following code for issues, bugs, and improvements.
566
+
567
+ ## Code ({file_path})
568
+ ```
569
+ {content[:5000]}
570
+ ```
571
+
572
+ ## Review Format
573
+ Provide a structured review with:
574
+ 1. **Summary** - Brief overview
575
+ 2. **Issues** - List any bugs, security issues, or problems
576
+ 3. **Suggestions** - Improvements and best practices
577
+ 4. **Score** - Rate the code quality (1-10)
578
+
579
+ Be specific and actionable.
580
+ """
581
+
582
+ full_response = f"🔍 **Reviewing {file_path}...**\n\n"
583
+
584
+ for chunk in self.llm.stream(prompt):
585
+ full_response += chunk
586
+ yield (chunk, None)
587
+
588
+ response = AgentResponse(
589
+ message=full_response,
590
+ intent=intent,
591
+ )
592
+ yield ("", response)
593
+
594
+ def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
595
+ """Handle code generation with streaming."""
596
+ yield ("🔨 **Generating code...**\n\n", None)
597
+
598
+ request = CodeGenerationRequest(
599
+ description=message,
600
+ language=intent.language,
601
+ file_path=intent.file_paths[0] if intent.file_paths else None,
602
+ )
603
+
604
+ # Generate code (this part streams)
605
+ full_code = ""
606
+ prompt = self.code_generator._build_prompt(request)
607
+
608
+ for chunk in self.llm.stream(prompt):
609
+ full_code += chunk
610
+ yield (chunk, None)
611
+
612
+ # Extract and create changeset
613
+ code = self._extract_code(full_code)
614
+ file_path = request.file_path or f"generated.{request.language or 'py'}"
615
+
616
+ generated = GeneratedCode(
617
+ code=code,
618
+ language=request.language or "python",
619
+ file_path=file_path,
620
+ description=request.description,
621
+ )
622
+
623
+ changeset = ChangeSet(description=f"Generate: {message[:50]}...")
624
+ diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
625
+ changeset.diffs.append(diff)
626
+
627
+ self._pending_changeset = changeset
628
+
629
+ response = AgentResponse(
630
+ message=f"\n\n✅ Code generated for {file_path}",
631
+ intent=intent,
632
+ generated_code=generated,
633
+ changeset=changeset,
634
+ requires_confirmation=True,
635
+ )
636
+ yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
637
+
638
+ def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
639
+ """Handle code editing with streaming."""
640
+ if not intent.file_paths:
641
+ response = AgentResponse(
642
+ message="Please specify which file you want to edit.",
643
+ intent=intent,
644
+ )
645
+ yield (response.message, response)
646
+ return
647
+
648
+ file_path = intent.file_paths[0]
649
+ original = self.file_manager.read_file(file_path)
650
+
651
+ if not original:
652
+ response = AgentResponse(
653
+ message=f"Cannot find file: {file_path}",
654
+ intent=intent,
655
+ )
656
+ yield (response.message, response)
657
+ return
658
+
659
+ yield (f"✏️ **Editing {file_path}...**\n\n", None)
660
+
661
+ prompt = f"""Edit the following code according to the user's request.
662
+
663
+ ## Original Code ({file_path})
664
+ ```
665
+ {original[:5000]}
666
+ ```
667
+
668
+ ## User Request
669
+ {message}
670
+
671
+ ## Instructions
672
+ Return the COMPLETE modified file.
673
+
674
+ ```
675
+ """
676
+
677
+ full_response = ""
678
+ for chunk in self.llm.stream(prompt):
679
+ full_response += chunk
680
+ yield (chunk, None)
681
+
682
+ new_code = self._extract_code(full_response)
683
+
684
+ changeset = ChangeSet(description=f"Edit: {message[:50]}...")
685
+ diff = self.diff_engine.create_diff(original, new_code, file_path)
686
+ changeset.diffs.append(diff)
687
+
688
+ self._pending_changeset = changeset
689
+
690
+ response = AgentResponse(
691
+ message=f"\n\n✅ Edit complete for {file_path}",
692
+ intent=intent,
693
+ changeset=changeset,
694
+ requires_confirmation=True,
695
+ )
696
+ yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
697
+
698
+ def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
699
+ """Handle code refactoring with streaming."""
700
+ if not intent.file_paths:
701
+ response = AgentResponse(
702
+ message="Please specify which file you want to refactor.",
703
+ intent=intent,
704
+ )
705
+ yield (response.message, response)
706
+ return
707
+
708
+ file_path = intent.file_paths[0]
709
+ original = self.file_manager.read_file(file_path)
710
+
711
+ if not original:
712
+ response = AgentResponse(
713
+ message=f"Cannot find file: {file_path}",
714
+ intent=intent,
715
+ )
716
+ yield (response.message, response)
717
+ return
718
+
719
+ yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
720
+
721
+ prompt = f"""Refactor the following code to improve its quality.
722
+
723
+ ## Original Code ({file_path})
724
+ ```
725
+ {original[:5000]}
726
+ ```
727
+
728
+ ## User Request
729
+ {message}
730
+
731
+ Return the COMPLETE refactored file.
732
+
733
+ ```
734
+ """
735
+
736
+ full_response = ""
737
+ for chunk in self.llm.stream(prompt):
738
+ full_response += chunk
739
+ yield (chunk, None)
740
+
741
+ new_code = self._extract_code(full_response)
742
+
743
+ changeset = ChangeSet(description=f"Refactor: {file_path}")
744
+ diff = self.diff_engine.create_diff(original, new_code, file_path)
745
+ changeset.diffs.append(diff)
746
+
747
+ self._pending_changeset = changeset
748
+
749
+ response = AgentResponse(
750
+ message=f"\n\n✅ Refactoring complete for {file_path}",
751
+ intent=intent,
752
+ changeset=changeset,
753
+ requires_confirmation=True,
754
+ )
755
+ yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
756
+
757
+ def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
758
+ """Handle general chat with streaming."""
759
+ context = self.file_manager.get_project_context()
760
+
761
+ prompt = f"""You are a helpful coding assistant. Answer the user's question.
762
+
763
+ Project context:
764
+ - Root: {context.root_path.name}
765
+ - Languages: {', '.join(context.languages)}
766
+ - Files: {context.total_code_files} code files
767
+
768
+ User: {message}
769
+
770
+ Provide a helpful, concise response.
771
+ """
772
+
773
+ full_response = ""
774
+ for chunk in self.llm.stream(prompt):
775
+ full_response += chunk
776
+ yield (chunk, None)
777
+
778
+ response = AgentResponse(message=full_response, intent=intent)
779
+ yield ("", response)
780
+
781
+
782
+
783
+ def process_stream(self, message: str, use_llm_classification: bool = True) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
784
+ """
785
+ Process a user message with streaming output.
786
+
787
+ Yields tuples of (chunk, final_response).
788
+ During streaming, final_response is None.
789
+ The last yield will have the complete AgentResponse.
790
+ """
791
+ # Classify intent (non-streaming, it's fast)
792
+ if use_llm_classification:
793
+ intent = self.intent_classifier.classify_with_llm(message)
794
+ else:
795
+ intent = self.intent_classifier.classify(message)
796
+
797
+ # Route to appropriate streaming handler
798
+ streaming_handlers = {
799
+ IntentType.CODE_GENERATE: self._handle_generate_stream,
800
+ IntentType.CODE_EDIT: self._handle_edit_stream,
801
+ IntentType.CODE_REVIEW: self._handle_review_stream,
802
+ IntentType.CODE_EXPLAIN: self._handle_explain_stream,
803
+ IntentType.CODE_REFACTOR: self._handle_refactor_stream,
804
+ IntentType.GENERAL_CHAT: self._handle_general_chat_stream,
805
+ }
806
+
807
+ handler = streaming_handlers.get(intent.type)
808
+
809
+ if handler:
810
+ yield from handler(message, intent)
811
+ else:
812
+ # Fall back to non-streaming for other intents
813
+ response = self.process(message, use_llm_classification)
814
+ yield (response.message, response)
815
+
816
+ def _handle_explain_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
817
+ """Handle code explanation with streaming."""
818
+ if not intent.file_paths:
819
+ response = AgentResponse(
820
+ message="Please specify which file or code you want me to explain.",
821
+ intent=intent,
822
+ )
823
+ yield (response.message, response)
824
+ return
825
+
826
+ file_path = intent.file_paths[0]
827
+ content = self.file_manager.read_file(file_path)
828
+
829
+ if not content:
830
+ response = AgentResponse(
831
+ message=f"Cannot find file: {file_path}",
832
+ intent=intent,
833
+ )
834
+ yield (response.message, response)
835
+ return
836
+
837
+ prompt = f"""Explain the following code in a clear, educational way.
838
+
839
+ ## Code ({file_path})
840
+ ```
841
+ {content[:5000]}
842
+ ```
843
+
844
+ ## Instructions
845
+ 1. Start with a high-level overview
846
+ 2. Explain the main components/functions
847
+ 3. Describe the flow of execution
848
+ 4. Note any important patterns or techniques used
849
+ 5. Keep the explanation concise but thorough
850
+ """
851
+
852
+ # Stream the explanation
853
+ full_response = f"📖 **Explanation of {file_path}**\n\n"
854
+ yield (f"📖 **Explanation of {file_path}**\n\n", None)
855
+
856
+ for chunk in self.llm.stream(prompt):
857
+ full_response += chunk
858
+ yield (chunk, None)
859
+
860
+ # Final response
861
+ response = AgentResponse(
862
+ message=full_response,
863
+ intent=intent,
864
+ )
865
+ yield ("", response)
866
+
867
+ def _handle_review_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
868
+ """Handle code review with streaming."""
869
+ if not intent.file_paths:
870
+ context = self.file_manager.get_project_context()
871
+ py_files = [f.relative_path for f in context.files if f.extension == ".py"][:5]
872
+
873
+ if py_files:
874
+ msg = f"Which file would you like me to review? Found these Python files:\n" + \
875
+ "\n".join(f" • {f}" for f in py_files)
876
+ else:
877
+ msg = "Please specify which file you want me to review."
878
+
879
+ response = AgentResponse(message=msg, intent=intent)
880
+ yield (msg, response)
881
+ return
882
+
883
+ file_path = intent.file_paths[0]
884
+ content = self.file_manager.read_file(file_path)
885
+
886
+ if not content:
887
+ response = AgentResponse(
888
+ message=f"Cannot find file: {file_path}",
889
+ intent=intent,
890
+ )
891
+ yield (response.message, response)
892
+ return
893
+
894
+ yield (f"🔍 **Reviewing {file_path}...**\n\n", None)
895
+
896
+ # Use streaming for the review
897
+ prompt = f"""Review the following code for issues, bugs, and improvements.
898
+
899
+ ## Code ({file_path})
900
+ ```
901
+ {content[:5000]}
902
+ ```
903
+
904
+ ## Review Format
905
+ Provide a structured review with:
906
+ 1. **Summary** - Brief overview
907
+ 2. **Issues** - List any bugs, security issues, or problems
908
+ 3. **Suggestions** - Improvements and best practices
909
+ 4. **Score** - Rate the code quality (1-10)
910
+
911
+ Be specific and actionable.
912
+ """
913
+
914
+ full_response = f"🔍 **Reviewing {file_path}...**\n\n"
915
+
916
+ for chunk in self.llm.stream(prompt):
917
+ full_response += chunk
918
+ yield (chunk, None)
919
+
920
+ response = AgentResponse(
921
+ message=full_response,
922
+ intent=intent,
923
+ )
924
+ yield ("", response)
925
+
926
+ def _handle_generate_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
927
+ """Handle code generation with streaming."""
928
+ yield ("🔨 **Generating code...**\n\n", None)
929
+
930
+ request = CodeGenerationRequest(
931
+ description=message,
932
+ language=intent.language,
933
+ file_path=intent.file_paths[0] if intent.file_paths else None,
934
+ )
935
+
936
+ # Generate code (this part streams)
937
+ full_code = ""
938
+ prompt = self.code_generator._build_prompt(request)
939
+
940
+ for chunk in self.llm.stream(prompt):
941
+ full_code += chunk
942
+ yield (chunk, None)
943
+
944
+ # Extract and create changeset
945
+ code = self._extract_code(full_code)
946
+ file_path = request.file_path or f"generated.{request.language or 'py'}"
947
+
948
+ generated = GeneratedCode(
949
+ code=code,
950
+ language=request.language or "python",
951
+ file_path=file_path,
952
+ description=request.description,
953
+ )
954
+
955
+ changeset = ChangeSet(description=f"Generate: {message[:50]}...")
956
+ diff = self.diff_engine.create_file_diff(generated.file_path, generated.code)
957
+ changeset.diffs.append(diff)
958
+
959
+ self._pending_changeset = changeset
960
+
961
+ response = AgentResponse(
962
+ message=f"\n\n✅ Code generated for {file_path}",
963
+ intent=intent,
964
+ generated_code=generated,
965
+ changeset=changeset,
966
+ requires_confirmation=True,
967
+ )
968
+ yield ("\n\n✅ Code generated. Apply changes? (yes/no)", response)
969
+
970
+ def _handle_edit_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
971
+ """Handle code editing with streaming."""
972
+ if not intent.file_paths:
973
+ response = AgentResponse(
974
+ message="Please specify which file you want to edit.",
975
+ intent=intent,
976
+ )
977
+ yield (response.message, response)
978
+ return
979
+
980
+ file_path = intent.file_paths[0]
981
+ original = self.file_manager.read_file(file_path)
982
+
983
+ if not original:
984
+ response = AgentResponse(
985
+ message=f"Cannot find file: {file_path}",
986
+ intent=intent,
987
+ )
988
+ yield (response.message, response)
989
+ return
990
+
991
+ yield (f"✏️ **Editing {file_path}...**\n\n", None)
992
+
993
+ prompt = f"""Edit the following code according to the user's request.
994
+
995
+ ## Original Code ({file_path})
996
+ ```
997
+ {original[:5000]}
998
+ ```
999
+
1000
+ ## User Request
1001
+ {message}
1002
+
1003
+ ## Instructions
1004
+ Return the COMPLETE modified file.
1005
+
1006
+ ```
1007
+ """
1008
+
1009
+ full_response = ""
1010
+ for chunk in self.llm.stream(prompt):
1011
+ full_response += chunk
1012
+ yield (chunk, None)
1013
+
1014
+ new_code = self._extract_code(full_response)
1015
+
1016
+ changeset = ChangeSet(description=f"Edit: {message[:50]}...")
1017
+ diff = self.diff_engine.create_diff(original, new_code, file_path)
1018
+ changeset.diffs.append(diff)
1019
+
1020
+ self._pending_changeset = changeset
1021
+
1022
+ response = AgentResponse(
1023
+ message=f"\n\n✅ Edit complete for {file_path}",
1024
+ intent=intent,
1025
+ changeset=changeset,
1026
+ requires_confirmation=True,
1027
+ )
1028
+ yield ("\n\n✅ Edit complete. Apply changes? (yes/no)", response)
1029
+
1030
+ def _handle_refactor_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
1031
+ """Handle code refactoring with streaming."""
1032
+ if not intent.file_paths:
1033
+ response = AgentResponse(
1034
+ message="Please specify which file you want to refactor.",
1035
+ intent=intent,
1036
+ )
1037
+ yield (response.message, response)
1038
+ return
1039
+
1040
+ file_path = intent.file_paths[0]
1041
+ original = self.file_manager.read_file(file_path)
1042
+
1043
+ if not original:
1044
+ response = AgentResponse(
1045
+ message=f"Cannot find file: {file_path}",
1046
+ intent=intent,
1047
+ )
1048
+ yield (response.message, response)
1049
+ return
1050
+
1051
+ yield (f"🔄 **Refactoring {file_path}...**\n\n", None)
1052
+
1053
+ prompt = f"""Refactor the following code to improve its quality.
1054
+
1055
+ ## Original Code ({file_path})
1056
+ ```
1057
+ {original[:5000]}
1058
+ ```
1059
+
1060
+ ## User Request
1061
+ {message}
1062
+
1063
+ Return the COMPLETE refactored file.
1064
+
1065
+ ```
1066
+ """
1067
+
1068
+ full_response = ""
1069
+ for chunk in self.llm.stream(prompt):
1070
+ full_response += chunk
1071
+ yield (chunk, None)
1072
+
1073
+ new_code = self._extract_code(full_response)
1074
+
1075
+ changeset = ChangeSet(description=f"Refactor: {file_path}")
1076
+ diff = self.diff_engine.create_diff(original, new_code, file_path)
1077
+ changeset.diffs.append(diff)
1078
+
1079
+ self._pending_changeset = changeset
1080
+
1081
+ response = AgentResponse(
1082
+ message=f"\n\n✅ Refactoring complete for {file_path}",
1083
+ intent=intent,
1084
+ changeset=changeset,
1085
+ requires_confirmation=True,
1086
+ )
1087
+ yield ("\n\n✅ Refactoring complete. Apply changes? (yes/no)", response)
1088
+
1089
+ def _handle_general_chat_stream(self, message: str, intent: Intent) -> Iterator[Tuple[str, Optional[AgentResponse]]]:
1090
+ """Handle general chat with streaming."""
1091
+ context = self.file_manager.get_project_context()
1092
+
1093
+ prompt = f"""You are a helpful coding assistant. Answer the user's question.
1094
+
1095
+ Project context:
1096
+ - Root: {context.root_path.name}
1097
+ - Languages: {', '.join(context.languages)}
1098
+ - Files: {context.total_code_files} code files
1099
+
1100
+ User: {message}
1101
+
1102
+ Provide a helpful, concise response.
1103
+ """
1104
+
1105
+ full_response = ""
1106
+ for chunk in self.llm.stream(prompt):
1107
+ full_response += chunk
1108
+ yield (chunk, None)
1109
+
1110
+ response = AgentResponse(message=full_response, intent=intent)
1111
+ yield ("", response)
1112
+
1113
+
450
1114
  def _extract_code(self, response: str) -> str:
451
1115
  """Extract code from LLM response."""
452
1116
  import re
@@ -3,7 +3,7 @@
3
3
  from dataclasses import dataclass, field
4
4
  from datetime import datetime
5
5
  from pathlib import Path
6
- from typing import Dict, List, Literal, Optional
6
+ from typing import Dict, Iterator, List, Literal, Optional, Tuple
7
7
 
8
8
  from ai_code_assistant.config import Config
9
9
  from ai_code_assistant.llm import LLMManager
@@ -67,6 +67,96 @@ class AgentChatSession:
67
67
 
68
68
  return assistant_msg
69
69
 
70
+
71
+ def send_message_stream(self, user_input: str) -> Iterator[Tuple[str, Optional[AgentMessage]]]:
72
+ """
73
+ Process user message with streaming output.
74
+
75
+ Yields tuples of (chunk, final_message).
76
+ During streaming, final_message is None.
77
+ The last yield will have the complete AgentMessage.
78
+ """
79
+ # Add user message to history
80
+ user_msg = AgentMessage(role="user", content=user_input)
81
+ self.history.append(user_msg)
82
+
83
+ # Check for confirmation/rejection of pending changes
84
+ if self._awaiting_confirmation:
85
+ msg = self._handle_confirmation(user_input)
86
+ yield (msg.content, msg)
87
+ return
88
+
89
+ # Process through agent with streaming
90
+ full_content = ""
91
+ final_response = None
92
+
93
+ for chunk, response in self.agent.process_stream(user_input):
94
+ full_content += chunk
95
+ yield (chunk, None)
96
+ if response is not None:
97
+ final_response = response
98
+
99
+ # Create assistant message
100
+ assistant_msg = AgentMessage(
101
+ role="assistant",
102
+ content=full_content,
103
+ response=final_response,
104
+ pending_action=final_response.requires_confirmation if final_response else False,
105
+ )
106
+ self.history.append(assistant_msg)
107
+
108
+ # Track if we're awaiting confirmation
109
+ if final_response:
110
+ self._awaiting_confirmation = final_response.requires_confirmation
111
+
112
+ yield ("", assistant_msg)
113
+
114
+
115
+
116
+ def send_message_stream(self, user_input: str) -> Iterator[Tuple[str, Optional[AgentMessage]]]:
117
+ """
118
+ Process user message with streaming output.
119
+
120
+ Yields tuples of (chunk, final_message).
121
+ During streaming, final_message is None.
122
+ The last yield will have the complete AgentMessage.
123
+ """
124
+ # Add user message to history
125
+ user_msg = AgentMessage(role="user", content=user_input)
126
+ self.history.append(user_msg)
127
+
128
+ # Check for confirmation/rejection of pending changes
129
+ if self._awaiting_confirmation:
130
+ msg = self._handle_confirmation(user_input)
131
+ yield (msg.content, msg)
132
+ return
133
+
134
+ # Process through agent with streaming
135
+ full_content = ""
136
+ final_response = None
137
+
138
+ for chunk, response in self.agent.process_stream(user_input):
139
+ full_content += chunk
140
+ yield (chunk, None)
141
+ if response is not None:
142
+ final_response = response
143
+
144
+ # Create assistant message
145
+ assistant_msg = AgentMessage(
146
+ role="assistant",
147
+ content=full_content,
148
+ response=final_response,
149
+ pending_action=final_response.requires_confirmation if final_response else False,
150
+ )
151
+ self.history.append(assistant_msg)
152
+
153
+ # Track if we're awaiting confirmation
154
+ if final_response:
155
+ self._awaiting_confirmation = final_response.requires_confirmation
156
+
157
+ yield ("", assistant_msg)
158
+
159
+
70
160
  def _handle_confirmation(self, user_input: str) -> AgentMessage:
71
161
  """Handle user confirmation or rejection of pending changes."""
72
162
  lower_input = user_input.lower().strip()
ai_code_assistant/cli.py CHANGED
@@ -156,41 +156,91 @@ def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
156
156
  type=click.Choice(["console", "markdown", "json"]))
157
157
  @click.option("--source", "-s", type=click.Path(exists=True, path_type=Path),
158
158
  help="Source file (for test mode)")
159
+ @click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
159
160
  @click.pass_context
160
161
  def generate(ctx, description: str, mode: str, language: str, name: Optional[str],
161
162
  params: Optional[str], output: Optional[Path], output_format: str,
162
- source: Optional[Path]):
163
+ source: Optional[Path], stream: bool):
163
164
  """Generate code from natural language description."""
165
+ from rich.live import Live
166
+ from rich.markdown import Markdown
167
+ from rich.panel import Panel
168
+
164
169
  config, llm = get_components(ctx.obj.get("config_path"))
165
170
  generator = CodeGenerator(config, llm)
166
171
  formatter = get_formatter(output_format, config.output.use_colors)
167
172
 
168
173
  console.print(f"\n[bold]Generating {mode} in {language}...[/bold]\n")
169
174
 
170
- with console.status("[bold green]Generating code..."):
171
- if mode == "function":
172
- result = generator.generate_function(
173
- description=description, name=name or "generated_function",
174
- language=language, parameters=params or "",
175
- )
176
- elif mode == "class":
177
- result = generator.generate_class(
178
- description=description, name=name or "GeneratedClass", language=language,
179
- )
180
- elif mode == "script":
181
- result = generator.generate_script(
182
- description=description, requirements=[description], language=language,
183
- )
184
- elif mode == "test":
185
- if not source:
186
- console.print("[red]Error:[/red] --source required for test mode")
187
- sys.exit(1)
188
- source_code = source.read_text()
189
- result = generator.generate_tests(source_code=source_code, language=language)
190
- else:
191
- result = generator.generate(description=description, language=language)
175
+ # Handle test mode source requirement
176
+ source_code = ""
177
+ if mode == "test":
178
+ if not source:
179
+ console.print("[red]Error:[/red] --source required for test mode")
180
+ sys.exit(1)
181
+ source_code = source.read_text()
192
182
 
193
- formatted = formatter.format_generation(result)
183
+ if stream:
184
+ # Streaming mode - show output as it generates
185
+ full_response = ""
186
+ final_code = ""
187
+
188
+ console.print("[dim]Streaming response...[/dim]\n")
189
+
190
+ for chunk, is_complete in generator.generate_stream(
191
+ description=description,
192
+ mode=mode,
193
+ language=language,
194
+ name=name or "",
195
+ parameters=params or "",
196
+ source_code=source_code,
197
+ ):
198
+ if is_complete:
199
+ final_code = chunk
200
+ else:
201
+ console.print(chunk, end="", highlight=False)
202
+ full_response += chunk
203
+
204
+ console.print("\n")
205
+
206
+ # Create result for formatting
207
+ from ai_code_assistant.generator import GenerationResult
208
+ result = GenerationResult(
209
+ code=final_code,
210
+ language=language,
211
+ mode=mode,
212
+ description=description,
213
+ raw_response=full_response,
214
+ )
215
+
216
+ # Show extracted code in a panel
217
+ console.print(Panel(
218
+ final_code,
219
+ title=f"[bold green]Generated {mode.title()}[/bold green]",
220
+ border_style="green",
221
+ ))
222
+ else:
223
+ # Non-streaming mode (original behavior)
224
+ with console.status("[bold green]Generating code..."):
225
+ if mode == "function":
226
+ result = generator.generate_function(
227
+ description=description, name=name or "generated_function",
228
+ language=language, parameters=params or "",
229
+ )
230
+ elif mode == "class":
231
+ result = generator.generate_class(
232
+ description=description, name=name or "GeneratedClass", language=language,
233
+ )
234
+ elif mode == "script":
235
+ result = generator.generate_script(
236
+ description=description, requirements=[description], language=language,
237
+ )
238
+ elif mode == "test":
239
+ result = generator.generate_tests(source_code=source_code, language=language)
240
+ else:
241
+ result = generator.generate(description=description, language=language)
242
+
243
+ formatted = formatter.format_generation(result)
194
244
 
195
245
  if output and result.success:
196
246
  output.parent.mkdir(parents=True, exist_ok=True)
@@ -1441,21 +1491,20 @@ def agent(ctx, path: Path):
1441
1491
  console.print("[green]No pending changes.[/green]")
1442
1492
  continue
1443
1493
 
1444
- # Process through agent
1445
- with console.status("[bold green]Thinking..."):
1446
- response = session.send_message(user_input)
1447
-
1448
- # Display response
1494
+ # Process through agent with streaming
1449
1495
  console.print(f"\n[bold green]Agent[/bold green]")
1450
1496
 
1451
- # Use markdown rendering for better formatting
1452
- try:
1453
- console.print(Markdown(response.content))
1454
- except Exception:
1455
- console.print(response.content)
1497
+ final_msg = None
1498
+ for chunk, msg in session.send_message_stream(user_input):
1499
+ if chunk:
1500
+ console.print(chunk, end="")
1501
+ if msg is not None:
1502
+ final_msg = msg
1503
+
1504
+ console.print() # Newline after streaming
1456
1505
 
1457
1506
  # Show confirmation prompt if needed
1458
- if response.pending_action:
1507
+ if final_msg and final_msg.pending_action:
1459
1508
  console.print("\n[yellow]Apply these changes? (yes/no)[/yellow]")
1460
1509
 
1461
1510
  except KeyboardInterrupt:
@@ -1515,13 +1564,15 @@ def _show_agent_help():
1515
1564
  @click.argument("file", type=click.Path(exists=True, path_type=Path))
1516
1565
  @click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
1517
1566
  default=".", help="Project root path")
1567
+ @click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
1518
1568
  @click.pass_context
1519
- def agent_review(ctx, file: Path, path: Path):
1569
+ def agent_review(ctx, file: Path, path: Path, stream: bool):
1520
1570
  """Quick code review using the agent.
1521
1571
 
1522
1572
  Examples:
1523
1573
  ai-assist agent-review src/main.py
1524
1574
  ai-assist agent-review utils.py --path ./my-project
1575
+ ai-assist agent-review main.py --no-stream
1525
1576
  """
1526
1577
  from ai_code_assistant.agent import CodeAgent
1527
1578
 
@@ -1530,10 +1581,15 @@ def agent_review(ctx, file: Path, path: Path):
1530
1581
 
1531
1582
  console.print(f"\n[bold]Reviewing {file}...[/bold]\n")
1532
1583
 
1533
- with console.status("[bold green]Analyzing..."):
1534
- response = agent.process(f"review {file}")
1535
-
1536
- console.print(response.message)
1584
+ if stream:
1585
+ for chunk, response in agent.process_stream(f"review {file}"):
1586
+ if chunk:
1587
+ console.print(chunk, end="")
1588
+ console.print()
1589
+ else:
1590
+ with console.status("[bold green]Analyzing..."):
1591
+ response = agent.process(f"review {file}")
1592
+ console.print(response.message)
1537
1593
 
1538
1594
 
1539
1595
  @main.command("agent-generate")
@@ -1543,15 +1599,17 @@ def agent_review(ctx, file: Path, path: Path):
1543
1599
  @click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
1544
1600
  default=".", help="Project root path")
1545
1601
  @click.option("--apply", "-a", is_flag=True, help="Apply changes without confirmation")
1602
+ @click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
1546
1603
  @click.pass_context
1547
1604
  def agent_generate(ctx, description: str, file: Optional[Path], language: Optional[str],
1548
- path: Path, apply: bool):
1605
+ path: Path, apply: bool, stream: bool):
1549
1606
  """Generate code using the agent.
1550
1607
 
1551
1608
  Examples:
1552
1609
  ai-assist agent-generate "a function to validate email"
1553
1610
  ai-assist agent-generate "REST API for users" -f src/api.py
1554
1611
  ai-assist agent-generate "sorting algorithm" -l python
1612
+ ai-assist agent-generate "hello world" --no-stream
1555
1613
  """
1556
1614
  from ai_code_assistant.agent import CodeAgent
1557
1615
 
@@ -1567,12 +1625,20 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
1567
1625
 
1568
1626
  console.print(f"\n[bold]Generating code...[/bold]\n")
1569
1627
 
1570
- with console.status("[bold green]Generating..."):
1571
- response = agent.process(request)
1572
-
1573
- console.print(response.message)
1628
+ final_response = None
1629
+ if stream:
1630
+ for chunk, response in agent.process_stream(request):
1631
+ if chunk:
1632
+ console.print(chunk, end="")
1633
+ if response is not None:
1634
+ final_response = response
1635
+ console.print()
1636
+ else:
1637
+ with console.status("[bold green]Generating..."):
1638
+ final_response = agent.process(request)
1639
+ console.print(final_response.message)
1574
1640
 
1575
- if response.requires_confirmation:
1641
+ if final_response and final_response.requires_confirmation:
1576
1642
  if apply:
1577
1643
  success, msg = agent.confirm_changes()
1578
1644
  console.print(f"\n{msg}")
@@ -1588,29 +1654,36 @@ def agent_generate(ctx, description: str, file: Optional[Path], language: Option
1588
1654
  @click.argument("file", type=click.Path(exists=True, path_type=Path))
1589
1655
  @click.option("--path", "-p", type=click.Path(exists=True, path_type=Path),
1590
1656
  default=".", help="Project root path")
1657
+ @click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
1591
1658
  @click.pass_context
1592
- def agent_explain(ctx, file: Path, path: Path):
1659
+ def agent_explain(ctx, file: Path, path: Path, stream: bool):
1593
1660
  """Explain code using the agent.
1594
1661
 
1595
1662
  Examples:
1596
1663
  ai-assist agent-explain src/main.py
1597
1664
  ai-assist agent-explain config.py --path ./my-project
1665
+ ai-assist agent-explain main.py --no-stream
1598
1666
  """
1599
1667
  from ai_code_assistant.agent import CodeAgent
1600
- from rich.markdown import Markdown
1601
1668
 
1602
1669
  config, llm = get_components(ctx.obj.get("config_path"))
1603
1670
  agent = CodeAgent(llm, path.resolve())
1604
1671
 
1605
1672
  console.print(f"\n[bold]Explaining {file}...[/bold]\n")
1606
1673
 
1607
- with console.status("[bold green]Analyzing..."):
1608
- response = agent.process(f"explain {file}")
1609
-
1610
- try:
1611
- console.print(Markdown(response.message))
1612
- except Exception:
1613
- console.print(response.message)
1674
+ if stream:
1675
+ for chunk, response in agent.process_stream(f"explain {file}"):
1676
+ if chunk:
1677
+ console.print(chunk, end="")
1678
+ console.print()
1679
+ else:
1680
+ from rich.markdown import Markdown
1681
+ with console.status("[bold green]Analyzing..."):
1682
+ response = agent.process(f"explain {file}")
1683
+ try:
1684
+ console.print(Markdown(response.message))
1685
+ except Exception:
1686
+ console.print(response.message)
1614
1687
 
1615
1688
 
1616
1689
  if __name__ == "__main__":
@@ -3,7 +3,7 @@
3
3
  import re
4
4
  from dataclasses import dataclass, field
5
5
  from pathlib import Path
6
- from typing import List, Literal, Optional
6
+ from typing import Iterator, List, Literal, Optional, Tuple
7
7
 
8
8
  from ai_code_assistant.config import Config
9
9
  from ai_code_assistant.llm import LLMManager
@@ -214,6 +214,160 @@ class CodeGenerator:
214
214
  error=str(e),
215
215
  )
216
216
 
217
+
218
+ def generate_stream(
219
+ self,
220
+ description: str,
221
+ mode: str = "generic",
222
+ language: str = "python",
223
+ name: str = "",
224
+ parameters: str = "",
225
+ source_code: str = "",
226
+ ) -> Iterator[Tuple[str, bool]]:
227
+ """
228
+ Stream code generation with real-time output.
229
+
230
+ Yields tuples of (chunk, is_complete).
231
+ The final yield will have is_complete=True and contain the extracted code.
232
+ """
233
+ # Build the prompt based on mode
234
+ if mode == "function":
235
+ prompt_template = GENERATION_PROMPTS["function"]
236
+ formatted = prompt_template.format(
237
+ language=language,
238
+ description=description,
239
+ name=name or "generated_function",
240
+ parameters=parameters or "None specified",
241
+ return_type="appropriate type",
242
+ include_type_hints=self.config.generation.include_type_hints,
243
+ include_docstrings=self.config.generation.include_docstrings,
244
+ )
245
+ elif mode == "class":
246
+ prompt_template = GENERATION_PROMPTS["class"]
247
+ formatted = prompt_template.format(
248
+ language=language,
249
+ description=description,
250
+ name=name or "GeneratedClass",
251
+ attributes="None specified",
252
+ methods="None specified",
253
+ include_type_hints=self.config.generation.include_type_hints,
254
+ include_docstrings=self.config.generation.include_docstrings,
255
+ )
256
+ elif mode == "script":
257
+ prompt_template = GENERATION_PROMPTS["script"]
258
+ formatted = prompt_template.format(
259
+ language=language,
260
+ description=description,
261
+ requirements=f"- {description}",
262
+ include_type_hints=self.config.generation.include_type_hints,
263
+ include_docstrings=self.config.generation.include_docstrings,
264
+ )
265
+ elif mode == "test":
266
+ prompt_template = GENERATION_PROMPTS["test"]
267
+ formatted = prompt_template.format(
268
+ language=language,
269
+ source_code=source_code,
270
+ test_framework="pytest",
271
+ )
272
+ else: # generic
273
+ prompt_template = GENERATION_PROMPTS["generic"]
274
+ formatted = prompt_template.format(
275
+ language=language,
276
+ description=description,
277
+ include_type_hints=self.config.generation.include_type_hints,
278
+ include_docstrings=self.config.generation.include_docstrings,
279
+ )
280
+
281
+ # Stream the response
282
+ full_response = ""
283
+ try:
284
+ for chunk in self.llm.stream(formatted):
285
+ full_response += chunk
286
+ yield (chunk, False)
287
+
288
+ # Extract code and yield final result
289
+ code = self._extract_code(full_response, language)
290
+ yield (code, True)
291
+ except Exception as e:
292
+ yield (f"Error: {str(e)}", True)
293
+
294
+
295
+ def generate_stream(
296
+ self,
297
+ description: str,
298
+ mode: str = "generic",
299
+ language: str = "python",
300
+ name: str = "",
301
+ parameters: str = "",
302
+ source_code: str = "",
303
+ ) -> Iterator[Tuple[str, bool]]:
304
+ """
305
+ Stream code generation with real-time output.
306
+
307
+ Yields tuples of (chunk, is_complete).
308
+ The final yield will have is_complete=True and contain the extracted code.
309
+ """
310
+ # Build the prompt based on mode
311
+ if mode == "function":
312
+ prompt_template = GENERATION_PROMPTS["function"]
313
+ formatted = prompt_template.format(
314
+ language=language,
315
+ description=description,
316
+ name=name or "generated_function",
317
+ parameters=parameters or "None specified",
318
+ return_type="appropriate type",
319
+ include_type_hints=self.config.generation.include_type_hints,
320
+ include_docstrings=self.config.generation.include_docstrings,
321
+ )
322
+ elif mode == "class":
323
+ prompt_template = GENERATION_PROMPTS["class"]
324
+ formatted = prompt_template.format(
325
+ language=language,
326
+ description=description,
327
+ name=name or "GeneratedClass",
328
+ attributes="None specified",
329
+ methods="None specified",
330
+ include_type_hints=self.config.generation.include_type_hints,
331
+ include_docstrings=self.config.generation.include_docstrings,
332
+ )
333
+ elif mode == "script":
334
+ prompt_template = GENERATION_PROMPTS["script"]
335
+ formatted = prompt_template.format(
336
+ language=language,
337
+ description=description,
338
+ requirements=f"- {description}",
339
+ include_type_hints=self.config.generation.include_type_hints,
340
+ include_docstrings=self.config.generation.include_docstrings,
341
+ )
342
+ elif mode == "test":
343
+ prompt_template = GENERATION_PROMPTS["test"]
344
+ formatted = prompt_template.format(
345
+ language=language,
346
+ source_code=source_code,
347
+ test_framework="pytest",
348
+ )
349
+ else: # generic
350
+ prompt_template = GENERATION_PROMPTS["generic"]
351
+ formatted = prompt_template.format(
352
+ language=language,
353
+ description=description,
354
+ include_type_hints=self.config.generation.include_type_hints,
355
+ include_docstrings=self.config.generation.include_docstrings,
356
+ )
357
+
358
+ # Stream the response
359
+ full_response = ""
360
+ try:
361
+ for chunk in self.llm.stream(formatted):
362
+ full_response += chunk
363
+ yield (chunk, False)
364
+
365
+ # Extract code and yield final result
366
+ code = self._extract_code(full_response, language)
367
+ yield (code, True)
368
+ except Exception as e:
369
+ yield (f"Error: {str(e)}", True)
370
+
217
371
  def _extract_code(self, response: str, language: str) -> str:
218
372
  """Extract code block from LLM response."""
219
373
  # Try to find language-specific code block
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognify-code
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy.
5
5
  Author-email: Ashok Kumar <akkssy@users.noreply.github.com>
6
6
  Maintainer-email: Ashok Kumar <akkssy@users.noreply.github.com>
@@ -1,24 +1,24 @@
1
1
  ai_code_assistant/__init__.py,sha256=XnpG4h-2gW3cXseFvqQT_-XyOmVJtikVMrHUnmy8XKI,409
2
- ai_code_assistant/cli.py,sha256=47TI6XMkcgojYLSLADnk3BphxUffSgmV0tCTHIu4W7o,61350
2
+ ai_code_assistant/cli.py,sha256=XSpwAQgWXhhLX9c628PwdSfx5czK3p5qS4nNNgESnWw,64153
3
3
  ai_code_assistant/config.py,sha256=6sAufexwzfCu2JNWvt9KevS9k_gMcjj1TAnwuaO1ZFw,4727
4
4
  ai_code_assistant/llm.py,sha256=DfcWJf6zEAUsPSEZLdEmb9o6BQNf1Ja88nswjpy6cOw,4209
5
5
  ai_code_assistant/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  ai_code_assistant/agent/__init__.py,sha256=BcVe4Ebopv_J01ApnRl05oN5yOet5mEefBrQmdPsUj0,1284
7
- ai_code_assistant/agent/code_agent.py,sha256=0_T3bFzPosMKKczYx49UGIWJtMBEuKy4ey-gT-qdfYQ,16155
7
+ ai_code_assistant/agent/code_agent.py,sha256=y0Osc8wzpIAW1B1NGrlsSw_vIYu-ZlMOae05IhM2XYM,38463
8
8
  ai_code_assistant/agent/code_generator.py,sha256=rAaziRU-mJ5NooERjR_Cd6_hwO0kuULw3Sp8Ca9kR48,13138
9
9
  ai_code_assistant/agent/code_reviewer.py,sha256=YiM7lRJhoN-vBnQb29jF-5nmE9ppL-OJffvx4ocTHEU,12066
10
10
  ai_code_assistant/agent/diff_engine.py,sha256=A5jszowc5VmWbdidpIW_QhswG_Hats3FYuemP8VoYv4,11018
11
11
  ai_code_assistant/agent/file_manager.py,sha256=Inyfo-UXT4joms1ADIMA_TKtIHEjEkBz4I4U2iK5_jI,10742
12
12
  ai_code_assistant/agent/intent_classifier.py,sha256=MuIcyWQntocrTlCb4CD54mhc3JfSsWGfulsPYGUoz6E,10667
13
13
  ai_code_assistant/chat/__init__.py,sha256=KntIXcjbPgznax1E0fvdrA3XtKF-hCz5Fr1tcRbdl7U,279
14
- ai_code_assistant/chat/agent_session.py,sha256=VcutqSq8GKwldJhhAs_4_kXOuUvtnjRXZML1Mamm9SM,5495
14
+ ai_code_assistant/chat/agent_session.py,sha256=-sW78d0nifRBNO6PRDiqdd8Sqpkv98kedvZbBQzK3lo,8674
15
15
  ai_code_assistant/chat/session.py,sha256=5JRd1DuLjxbtckmsMeHzNjoEZnJS9lx9NoX6z03F0xE,5500
16
16
  ai_code_assistant/editor/__init__.py,sha256=892BfTIo6kLdfZdhnvl4OFe0QSnxE4EyfkBoyLdA5rc,340
17
17
  ai_code_assistant/editor/diff_handler.py,sha256=LeI-00GuH7ASIetsUzT3Y_pDq4K1wmycuu4UFu5ZkGg,8759
18
18
  ai_code_assistant/editor/file_editor.py,sha256=csD8MW0jrfXAek5blWNuot_QWlhkgTTmtQtf8rbIdhY,11143
19
19
  ai_code_assistant/editor/prompts.py,sha256=wryxwb4dNaeSbl5mHkDuw2uTAZxkdrdyZ89Gfogafow,4356
20
20
  ai_code_assistant/generator/__init__.py,sha256=CfCO58CBye-BlZHjOfLLShovp2TVXg_GHKJuXe6ihu0,273
21
- ai_code_assistant/generator/code_gen.py,sha256=vJ4xeiG4_1hOZ8TW7YZ-gTAVKoiHwgUzzdcWMvhgeRo,8609
21
+ ai_code_assistant/generator/code_gen.py,sha256=Sp_j1IdR0vbU5xRZHhvTBCbxfNxtvNMU1tEg0NLV00k,14790
22
22
  ai_code_assistant/generator/prompts.py,sha256=uoEDpcRzpTd-4TLHNW_EbSHJiADMlu9SoGWZvvo1Adk,3384
23
23
  ai_code_assistant/git/__init__.py,sha256=YgqmzneAnZyRrbazMqGoFSPIk5Yf5OTm2LXPbkQmecU,232
24
24
  ai_code_assistant/git/commit_generator.py,sha256=CzDH5ZPqEaXyPznBg8FgTz8wbV4adALUQD__kl8au6o,4135
@@ -47,9 +47,9 @@ ai_code_assistant/reviewer/prompts.py,sha256=9RrHEBttS5ngxY2BNsUvqGC6-cTxco-kDPb
47
47
  ai_code_assistant/utils/__init__.py,sha256=3HO-1Bj4VvUtM7W1C3MKR4DzQ9Xc875QKSHHkHwuqVs,368
48
48
  ai_code_assistant/utils/file_handler.py,sha256=jPxvtI5dJxkpPjELgRJ11WXamtyKKmZANQ1fcfMVtiU,5239
49
49
  ai_code_assistant/utils/formatters.py,sha256=5El9ew9HS6JLBucBUxxcw4fO5nLpOucgNJrJj2NC3zw,8945
50
- cognify_code-0.2.2.dist-info/licenses/LICENSE,sha256=5yu_kWq2bK-XKhWo79Eykdg4Qf3O8V2Ys7cpOO7GyyE,1063
51
- cognify_code-0.2.2.dist-info/METADATA,sha256=HRjZsc18DNcoS-NOJw9mCXB3qvrwuP1GhkgeLHmucOg,11862
52
- cognify_code-0.2.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
53
- cognify_code-0.2.2.dist-info/entry_points.txt,sha256=MrBnnWPHZVozqqKyTlnJO63YN2kE5yPWKlr2nnRFRks,94
54
- cognify_code-0.2.2.dist-info/top_level.txt,sha256=dD_r1x-oX0s1uspYY72kig4jfIsjh3oDKwOBCMYXqpo,18
55
- cognify_code-0.2.2.dist-info/RECORD,,
50
+ cognify_code-0.2.4.dist-info/licenses/LICENSE,sha256=5yu_kWq2bK-XKhWo79Eykdg4Qf3O8V2Ys7cpOO7GyyE,1063
51
+ cognify_code-0.2.4.dist-info/METADATA,sha256=_S3by3PNwTZmPdVbxZDBTLdKk7MZuoRttLOYZiXIYkI,11862
52
+ cognify_code-0.2.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
53
+ cognify_code-0.2.4.dist-info/entry_points.txt,sha256=MrBnnWPHZVozqqKyTlnJO63YN2kE5yPWKlr2nnRFRks,94
54
+ cognify_code-0.2.4.dist-info/top_level.txt,sha256=dD_r1x-oX0s1uspYY72kig4jfIsjh3oDKwOBCMYXqpo,18
55
+ cognify_code-0.2.4.dist-info/RECORD,,