webscout 3.4__py3-none-any.whl → 3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -477,6 +477,495 @@ class AsyncPhindSearch(AsyncProvider):
477
477
 
478
478
  return for_stream() if stream else await for_non_stream()
479
479
 
480
+ async def get_message(self, response: dict) -> str:
481
+ """Retrieves message only from response
482
+
483
+ Args:
484
+ response (dict): Response generated by `self.ask`
485
+
486
+ Returns:
487
+ str: Message extracted
488
+ """
489
+ assert isinstance(response, dict), "Response should be of dict data-type only"
490
+ if response.get("type", "") == "metadata":
491
+ return
492
+
493
+ delta: dict = response["choices"][0]["delta"]
494
+
495
+ if not delta:
496
+ return ""
497
+
498
+ elif delta.get("function_call"):
499
+ if self.quiet:
500
+ return ""
501
+
502
+ function_call: dict = delta["function_call"]
503
+ if function_call.get("name"):
504
+ return function_call["name"]
505
+ elif function_call.get("arguments"):
506
+ return function_call.get("arguments")
507
+
508
+ elif delta.get("metadata"):
509
+ if self.quiet:
510
+ return ""
511
+ return yaml.dump(delta["metadata"])
512
+
513
+ else:
514
+ return (
515
+ response["choices"][0]["delta"].get("content")
516
+ if response["choices"][0].get("finish_reason") is None
517
+ else ""
518
+ )
519
+ class Phindv2(Provider):
520
+ def __init__(
521
+ self,
522
+ is_conversation: bool = True,
523
+ max_tokens: int = 8000,
524
+ timeout: int = 30,
525
+ intro: str = None,
526
+ filepath: str = None,
527
+ update_file: bool = True,
528
+ proxies: dict = {},
529
+ history_offset: int = 10250,
530
+ act: str = None,
531
+ model: str = "Phind Instant",
532
+ quiet: bool = False,
533
+ system_prompt: str = "Be Helpful and Friendly",
534
+ ):
535
+ """Instantiates Phindv2
536
+
537
+ Args:
538
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
539
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
540
+ timeout (int, optional): Http request timeout. Defaults to 30.
541
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
542
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
543
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
544
+ proxies (dict, optional): Http request proxies. Defaults to {}.
545
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
546
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
547
+ model (str, optional): Model name. Defaults to "Phind Model".
548
+ quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
549
+ system_prompt (str, optional): System prompt for Phindv2. Defaults to "Be Helpful and Friendly".
550
+ """
551
+ self.session = requests.Session()
552
+ self.max_tokens_to_sample = max_tokens
553
+ self.is_conversation = is_conversation
554
+ self.chat_endpoint = "https://https.extension.phind.com/agent/"
555
+ self.stream_chunk_size = 64
556
+ self.timeout = timeout
557
+ self.last_response = {}
558
+ self.model = model
559
+ self.quiet = quiet
560
+ self.system_prompt = system_prompt
561
+
562
+ self.headers = {
563
+ "Content-Type": "application/json",
564
+ "User-Agent": "",
565
+ "Accept": "*/*",
566
+ "Accept-Encoding": "Identity",
567
+ }
568
+
569
+ self.__available_optimizers = (
570
+ method
571
+ for method in dir(Optimizers)
572
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
573
+ )
574
+ self.session.headers.update(self.headers)
575
+ Conversation.intro = (
576
+ AwesomePrompts().get_act(
577
+ act, raise_not_found=True, default=None, case_insensitive=True
578
+ )
579
+ if act
580
+ else intro or Conversation.intro
581
+ )
582
+ self.conversation = Conversation(
583
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
584
+ )
585
+ self.conversation.history_offset = history_offset
586
+ self.session.proxies = proxies
587
+
588
+ def ask(
589
+ self,
590
+ prompt: str,
591
+ stream: bool = False,
592
+ raw: bool = False,
593
+ optimizer: str = None,
594
+ conversationally: bool = False,
595
+ ) -> dict:
596
+ """Chat with AI
597
+
598
+ Args:
599
+ prompt (str): Prompt to be send.
600
+ stream (bool, optional): Flag for streaming response. Defaults to False.
601
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
602
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
603
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
604
+ Returns:
605
+ dict : {}
606
+ ```json
607
+ {
608
+ "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
609
+ "object": "chat.completion.chunk",
610
+ "created": 1706775384,
611
+ "model": "trt-llm-phind-model-serving",
612
+ "choices": [
613
+ {
614
+ "index": 0,
615
+ "delta": {
616
+ "content": "Hello! How can I assist you with your programming today?"
617
+ },
618
+ "finish_reason": null
619
+ }
620
+ ]
621
+ }
622
+ ```
623
+ """
624
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
625
+ if optimizer:
626
+ if optimizer in self.__available_optimizers:
627
+ conversation_prompt = getattr(Optimizers, optimizer)(
628
+ conversation_prompt if conversationally else prompt
629
+ )
630
+ else:
631
+ raise Exception(
632
+ f"Optimizer is not one of {self.__available_optimizers}"
633
+ )
634
+
635
+ self.session.headers.update(self.headers)
636
+ payload = {
637
+ "additional_extension_context": "",
638
+ "allow_magic_buttons": True,
639
+ "is_vscode_extension": True,
640
+ "message_history": [
641
+ {"content": self.system_prompt, "metadata": {}, "role": "system"},
642
+ {"content": conversation_prompt, "metadata": {}, "role": "user"}
643
+ ],
644
+ "requested_model": self.model,
645
+ "user_input": prompt,
646
+ }
647
+
648
+ def for_stream():
649
+ response = self.session.post(
650
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
651
+ )
652
+ if (
653
+ not response.ok
654
+ or not response.headers.get("Content-Type")
655
+ == "text/event-stream; charset=utf-8"
656
+ ):
657
+ raise exceptions.FailedToGenerateResponseError(
658
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
659
+ )
660
+ streaming_text = ""
661
+ for value in response.iter_lines(
662
+ decode_unicode=True,
663
+ chunk_size=self.stream_chunk_size,
664
+ ):
665
+ try:
666
+ modified_value = re.sub("data:", "", value)
667
+ json_modified_value = json.loads(modified_value)
668
+ retrieved_text = self.get_message(json_modified_value)
669
+ if not retrieved_text:
670
+ continue
671
+ streaming_text += retrieved_text
672
+ json_modified_value["choices"][0]["delta"][
673
+ "content"
674
+ ] = streaming_text
675
+ self.last_response.update(json_modified_value)
676
+ yield value if raw else json_modified_value
677
+ except json.decoder.JSONDecodeError:
678
+ pass
679
+ self.conversation.update_chat_history(
680
+ prompt, self.get_message(self.last_response)
681
+ )
682
+
683
+ def for_non_stream():
684
+ for _ in for_stream():
685
+ pass
686
+ return self.last_response
687
+
688
+ return for_stream() if stream else for_non_stream()
689
+
690
+ def chat(
691
+ self,
692
+ prompt: str,
693
+ stream: bool = False,
694
+ optimizer: str = None,
695
+ conversationally: bool = False,
696
+ ) -> str:
697
+ """Generate response `str`
698
+ Args:
699
+ prompt (str): Prompt to be send.
700
+ stream (bool, optional): Flag for streaming response. Defaults to False.
701
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
702
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
703
+ Returns:
704
+ str: Response generated
705
+ """
706
+
707
+ def for_stream():
708
+ for response in self.ask(
709
+ prompt, True, optimizer=optimizer, conversationally=conversationally
710
+ ):
711
+ yield self.get_message(response)
712
+
713
+ def for_non_stream():
714
+ return self.get_message(
715
+ self.ask(
716
+ prompt,
717
+ False,
718
+ optimizer=optimizer,
719
+ conversationally=conversationally,
720
+ )
721
+ )
722
+
723
+ return for_stream() if stream else for_non_stream()
724
+
725
+ def get_message(self, response: dict) -> str:
726
+ """Retrieves message only from response
727
+
728
+ Args:
729
+ response (dict): Response generated by `self.ask`
730
+
731
+ Returns:
732
+ str: Message extracted
733
+ """
734
+ assert isinstance(response, dict), "Response should be of dict data-type only"
735
+ if response.get("type", "") == "metadata":
736
+ return
737
+
738
+ delta: dict = response["choices"][0]["delta"]
739
+
740
+ if not delta:
741
+ return ""
742
+
743
+ elif delta.get("function_call"):
744
+ if self.quiet:
745
+ return ""
746
+
747
+ function_call: dict = delta["function_call"]
748
+ if function_call.get("name"):
749
+ return function_call["name"]
750
+ elif function_call.get("arguments"):
751
+ return function_call.get("arguments")
752
+
753
+ elif delta.get("metadata"):
754
+ if self.quiet:
755
+ return ""
756
+ return yaml.dump(delta["metadata"])
757
+
758
+ else:
759
+ return (
760
+ response["choices"][0]["delta"].get("content")
761
+ if response["choices"][0].get("finish_reason") is None
762
+ else ""
763
+ )
764
+
765
+ class AsyncPhindv2(AsyncProvider):
766
+ def __init__(
767
+ self,
768
+ is_conversation: bool = True,
769
+ max_tokens: int = 600,
770
+ timeout: int = 30,
771
+ intro: str = None,
772
+ filepath: str = None,
773
+ update_file: bool = True,
774
+ proxies: dict = {},
775
+ history_offset: int = 10250,
776
+ act: str = None,
777
+ model: str = "Phind Instant",
778
+ quiet: bool = False,
779
+ system_prompt: str = "Be Helpful and Friendly",
780
+ ):
781
+ """Instantiates Phindv2
782
+
783
+ Args:
784
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
785
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
786
+ timeout (int, optional): Http request timeout. Defaults to 30.
787
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
788
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
789
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
790
+ proxies (dict, optional): Http request proxies. Defaults to {}.
791
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
792
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
793
+ model (str, optional): Model name. Defaults to "Phind Model".
794
+ quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
795
+ system_prompt (str, optional): System prompt for Phindv2. Defaults to "Be Helpful and Friendly".
796
+ """
797
+ self.max_tokens_to_sample = max_tokens
798
+ self.is_conversation = is_conversation
799
+ self.chat_endpoint = "https://https.extension.phind.com/agent/"
800
+ self.stream_chunk_size = 64
801
+ self.timeout = timeout
802
+ self.last_response = {}
803
+ self.model = model
804
+ self.quiet = quiet
805
+ self.system_prompt = system_prompt
806
+
807
+ self.headers = {
808
+ "Content-Type": "application/json",
809
+ "User-Agent": "",
810
+ "Accept": "*/*",
811
+ "Accept-Encoding": "Identity",
812
+ }
813
+
814
+ self.__available_optimizers = (
815
+ method
816
+ for method in dir(Optimizers)
817
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
818
+ )
819
+ Conversation.intro = (
820
+ AwesomePrompts().get_act(
821
+ act, raise_not_found=True, default=None, case_insensitive=True
822
+ )
823
+ if act
824
+ else intro or Conversation.intro
825
+ )
826
+ self.conversation = Conversation(
827
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
828
+ )
829
+ self.conversation.history_offset = history_offset
830
+ self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
831
+
832
+ async def ask(
833
+ self,
834
+ prompt: str,
835
+ stream: bool = False,
836
+ raw: bool = False,
837
+ optimizer: str = None,
838
+ conversationally: bool = False,
839
+ ) -> dict | AsyncGenerator:
840
+ """Chat with AI asynchronously.
841
+
842
+ Args:
843
+ prompt (str): Prompt to be send.
844
+ stream (bool, optional): Flag for streaming response. Defaults to False.
845
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
846
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
847
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
848
+ Returns:
849
+ dict|AsyncGenerator : ai content.
850
+ ```json
851
+ {
852
+ "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
853
+ "object": "chat.completion.chunk",
854
+ "created": 1706775384,
855
+ "model": "trt-llm-phind-model-serving",
856
+ "choices": [
857
+ {
858
+ "index": 0,
859
+ "delta": {
860
+ "content": "Hello! How can I assist you with your programming today?"
861
+ },
862
+ "finish_reason": null
863
+ }
864
+ ]
865
+ }
866
+ ```
867
+ """
868
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
869
+ if optimizer:
870
+ if optimizer in self.__available_optimizers:
871
+ conversation_prompt = getattr(Optimizers, optimizer)(
872
+ conversation_prompt if conversationally else prompt
873
+ )
874
+ else:
875
+ raise Exception(
876
+ f"Optimizer is not one of {self.__available_optimizers}"
877
+ )
878
+
879
+ payload = {
880
+ "additional_extension_context": "",
881
+ "allow_magic_buttons": True,
882
+ "is_vscode_extension": True,
883
+ "message_history": [
884
+ {"content": self.system_prompt, "metadata": {}, "role": "system"},
885
+ {"content": conversation_prompt, "metadata": {}, "role": "user"}
886
+ ],
887
+ "requested_model": self.model,
888
+ "user_input": prompt,
889
+ }
890
+
891
+ async def for_stream():
892
+ async with self.session.stream(
893
+ "POST",
894
+ self.chat_endpoint,
895
+ json=payload,
896
+ timeout=self.timeout,
897
+ ) as response:
898
+ if (
899
+ not response.is_success
900
+ or not response.headers.get("Content-Type")
901
+ == "text/event-stream; charset=utf-8"
902
+ ):
903
+ raise exceptions.FailedToGenerateResponseError(
904
+ f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
905
+ )
906
+ streaming_text = ""
907
+ async for value in response.aiter_lines():
908
+ try:
909
+ modified_value = re.sub("data:", "", value)
910
+ json_modified_value = json.loads(modified_value)
911
+ retrieved_text = await self.get_message(json_modified_value)
912
+ if not retrieved_text:
913
+ continue
914
+ streaming_text += retrieved_text
915
+ json_modified_value["choices"][0]["delta"][
916
+ "content"
917
+ ] = streaming_text
918
+ self.last_response.update(json_modified_value)
919
+ yield value if raw else json_modified_value
920
+ except json.decoder.JSONDecodeError:
921
+ pass
922
+ self.conversation.update_chat_history(
923
+ prompt, await self.get_message(self.last_response)
924
+ )
925
+
926
+ async def for_non_stream():
927
+ async for _ in for_stream():
928
+ pass
929
+ return self.last_response
930
+
931
+ return for_stream() if stream else await for_non_stream()
932
+
933
+ async def chat(
934
+ self,
935
+ prompt: str,
936
+ stream: bool = False,
937
+ optimizer: str = None,
938
+ conversationally: bool = False,
939
+ ) -> str | AsyncGenerator:
940
+ """Generate response `str`
941
+ Args:
942
+ prompt (str): Prompt to be send.
943
+ stream (bool, optional): Flag for streaming response. Defaults to False.
944
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
945
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
946
+ Returns:
947
+ str|AsyncGenerator: Response generated
948
+ """
949
+
950
+ async def for_stream():
951
+ ask_resp = await self.ask(
952
+ prompt, True, optimizer=optimizer, conversationally=conversationally
953
+ )
954
+ async for response in ask_resp:
955
+ yield await self.get_message(response)
956
+
957
+ async def for_non_stream():
958
+ return await self.get_message(
959
+ await self.ask(
960
+ prompt,
961
+ False,
962
+ optimizer=optimizer,
963
+ conversationally=conversationally,
964
+ )
965
+ )
966
+
967
+ return for_stream() if stream else await for_non_stream()
968
+
480
969
  async def get_message(self, response: dict) -> str:
481
970
  """Retrieves message only from response
482
971