webscout 1.3.9__tar.gz → 1.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-1.3.9 → webscout-1.4.0}/PKG-INFO +60 -41
- {webscout-1.3.9 → webscout-1.4.0}/README.md +57 -39
- {webscout-1.3.9 → webscout-1.4.0}/setup.py +2 -2
- {webscout-1.3.9 → webscout-1.4.0}/webscout/AI.py +228 -1
- webscout-1.4.0/webscout/version.py +2 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout.egg-info/PKG-INFO +60 -41
- {webscout-1.3.9 → webscout-1.4.0}/webscout.egg-info/requires.txt +2 -1
- webscout-1.3.9/webscout/version.py +0 -2
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/__init__.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/documents/__init__.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/documents/query_results_extractor.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/networks/__init__.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/networks/filepath_converter.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/networks/google_searcher.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/networks/network_configs.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/networks/webpage_fetcher.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/utilsdw/__init__.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/utilsdw/enver.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/DeepWEBS/utilsdw/logger.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/LICENSE.md +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/setup.cfg +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/AIbase.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/AIutel.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/DWEBS.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/LLM.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/__init__.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/__main__.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/async_providers.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/cli.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/exceptions.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/g4f.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/models.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/transcriber.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/utils.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/voice.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/webai.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/webscout_search.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout/webscout_search_async.py +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout.egg-info/SOURCES.txt +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-1.3.9 → webscout-1.4.0}/webscout.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -47,7 +47,8 @@ Requires-Dist: tiktoken
|
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
48
|
Requires-Dist: orjson
|
|
49
49
|
Requires-Dist: PyYAML
|
|
50
|
-
Requires-Dist:
|
|
50
|
+
Requires-Dist: appdirs
|
|
51
|
+
Requires-Dist: GoogleBard1>=2.1.4
|
|
51
52
|
Provides-Extra: dev
|
|
52
53
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
53
54
|
Requires-Dist: pytest>=7.4.2; extra == "dev"
|
|
@@ -90,7 +91,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
90
91
|
- [usage of webscout.AI](#usage-of-webscoutai)
|
|
91
92
|
- [1. `PhindSearch` - Search using Phind.com](#1-phindsearch---search-using-phindcom)
|
|
92
93
|
- [2. `YepChat` - Chat with mistral 8x7b powered by yepchat](#2-yepchat---chat-with-mistral-8x7b-powered-by-yepchat)
|
|
93
|
-
- [3. `You.com` - search with you.com](#3-youcom---search-with-youcom)
|
|
94
|
+
- [3. `You.com` - search with you.com -NOT WORKING](#3-youcom---search-with-youcom--not-working)
|
|
94
95
|
- [4. `Gemini` - search with google gemini](#4-gemini---search-with-google-gemini)
|
|
95
96
|
- [usage of image generator from Webscout.AI](#usage-of-image-generator-from-webscoutai)
|
|
96
97
|
- [5. `Prodia` - make image using prodia](#5-prodia---make-image-using-prodia)
|
|
@@ -100,7 +101,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
100
101
|
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
101
102
|
- [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
|
|
102
103
|
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
103
|
-
- [`LLM`](#llm)
|
|
104
|
+
- [`LLM` --not working](#llm---not-working)
|
|
104
105
|
- [`LLM` with internet](#llm-with-internet)
|
|
105
106
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
106
107
|
|
|
@@ -552,26 +553,22 @@ message = ph.get_message(response)
|
|
|
552
553
|
print(message)
|
|
553
554
|
```
|
|
554
555
|
### 2. `YepChat` - Chat with mistral 8x7b powered by yepchat
|
|
555
|
-
Thanks To Divyansh Shukla for This code
|
|
556
556
|
```python
|
|
557
|
-
from webscout.AI import
|
|
557
|
+
from webscout.AI import YEPCHAT
|
|
558
558
|
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
print(processed_response)
|
|
559
|
+
# Instantiate the YEPCHAT class with default parameters
|
|
560
|
+
YEPCHAT = YEPCHAT()
|
|
561
|
+
|
|
562
|
+
# Define a prompt to send to the AI
|
|
563
|
+
prompt = "What is the capital of France?"
|
|
564
|
+
|
|
565
|
+
# Use the 'cha' method to get a response from the AI
|
|
566
|
+
r = YEPCHAT.chat(prompt)
|
|
567
|
+
print(r)
|
|
569
568
|
|
|
570
|
-
if __name__ == "__main__":
|
|
571
|
-
main()
|
|
572
569
|
```
|
|
573
570
|
|
|
574
|
-
### 3. `You.com` - search with you.com
|
|
571
|
+
### 3. `You.com` - search with you.com -NOT WORKING
|
|
575
572
|
```python
|
|
576
573
|
from webscout.AI import youChat
|
|
577
574
|
|
|
@@ -597,15 +594,34 @@ while True:
|
|
|
597
594
|
### 4. `Gemini` - search with google gemini
|
|
598
595
|
|
|
599
596
|
```python
|
|
600
|
-
|
|
597
|
+
import webscout
|
|
598
|
+
from webscout.AI import GEMINI
|
|
599
|
+
|
|
600
|
+
# Replace with the path to your bard.google.com.cookies.json file
|
|
601
|
+
COOKIE_FILE = "path/to/bard.google.com.cookies.json"
|
|
602
|
+
|
|
603
|
+
# Optional: Provide proxy details if needed
|
|
604
|
+
PROXIES = {
|
|
605
|
+
"http": "http://proxy_server:port",
|
|
606
|
+
"https": "https://proxy_server:port",
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
# Initialize GEMINI with cookie file and optional proxies
|
|
610
|
+
gemini = GEMINI(cookie_file=COOKIE_FILE, proxy=PROXIES)
|
|
611
|
+
|
|
612
|
+
# Ask a question and print the response
|
|
613
|
+
response = gemini.chat("What is the meaning of life?")
|
|
614
|
+
print(response)
|
|
601
615
|
|
|
602
|
-
#
|
|
603
|
-
gemini =
|
|
616
|
+
# Ask another question, this time streaming the response
|
|
617
|
+
for chunk in gemini.chat("Tell me a story", stream=True):
|
|
618
|
+
print(chunk, end="")
|
|
604
619
|
|
|
605
|
-
#
|
|
606
|
-
|
|
620
|
+
# Reset the conversation to start a new interaction
|
|
621
|
+
gemini.reset()
|
|
607
622
|
|
|
608
|
-
#
|
|
623
|
+
# Ask a question with the code optimizer
|
|
624
|
+
response = gemini.chat("Write Python code to print 'Hello, world!'", optimizer="code")
|
|
609
625
|
print(response)
|
|
610
626
|
```
|
|
611
627
|
## usage of image generator from Webscout.AI
|
|
@@ -637,17 +653,18 @@ ai = BLACKBOXAI(
|
|
|
637
653
|
model=None # You can specify a model if needed
|
|
638
654
|
)
|
|
639
655
|
|
|
640
|
-
#
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
#
|
|
650
|
-
|
|
656
|
+
# Start an infinite loop for continuous interaction
|
|
657
|
+
while True:
|
|
658
|
+
# Define a prompt to send to the AI
|
|
659
|
+
prompt = input("Enter your prompt: ")
|
|
660
|
+
|
|
661
|
+
# Check if the user wants to exit the loop
|
|
662
|
+
if prompt.lower() == "exit":
|
|
663
|
+
break
|
|
664
|
+
|
|
665
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
666
|
+
r = ai.chat(prompt)
|
|
667
|
+
print(r)
|
|
651
668
|
```
|
|
652
669
|
### 7. `PERPLEXITY` - Search With PERPLEXITY
|
|
653
670
|
```python
|
|
@@ -665,10 +682,12 @@ print(response)
|
|
|
665
682
|
from webscout.AI import OPENGPT
|
|
666
683
|
|
|
667
684
|
opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
print
|
|
685
|
+
while True:
|
|
686
|
+
# Prompt the user for input
|
|
687
|
+
prompt = input("Enter your prompt: ")
|
|
688
|
+
# Send the prompt to the OPENGPT model and print the response
|
|
689
|
+
response_str = opengpt.chat(prompt)
|
|
690
|
+
print(response_str)
|
|
672
691
|
```
|
|
673
692
|
### 9. `KOBOLDIA` -
|
|
674
693
|
```python
|
|
@@ -711,7 +730,7 @@ response_str = a.chat(prompt)
|
|
|
711
730
|
print(response_str)
|
|
712
731
|
```
|
|
713
732
|
|
|
714
|
-
### `LLM`
|
|
733
|
+
### `LLM` --not working
|
|
715
734
|
```python
|
|
716
735
|
from webscout.LLM import LLM
|
|
717
736
|
|
|
@@ -36,7 +36,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
36
36
|
- [usage of webscout.AI](#usage-of-webscoutai)
|
|
37
37
|
- [1. `PhindSearch` - Search using Phind.com](#1-phindsearch---search-using-phindcom)
|
|
38
38
|
- [2. `YepChat` - Chat with mistral 8x7b powered by yepchat](#2-yepchat---chat-with-mistral-8x7b-powered-by-yepchat)
|
|
39
|
-
- [3. `You.com` - search with you.com](#3-youcom---search-with-youcom)
|
|
39
|
+
- [3. `You.com` - search with you.com -NOT WORKING](#3-youcom---search-with-youcom--not-working)
|
|
40
40
|
- [4. `Gemini` - search with google gemini](#4-gemini---search-with-google-gemini)
|
|
41
41
|
- [usage of image generator from Webscout.AI](#usage-of-image-generator-from-webscoutai)
|
|
42
42
|
- [5. `Prodia` - make image using prodia](#5-prodia---make-image-using-prodia)
|
|
@@ -46,7 +46,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
46
46
|
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
47
47
|
- [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
|
|
48
48
|
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
49
|
-
- [`LLM`](#llm)
|
|
49
|
+
- [`LLM` --not working](#llm---not-working)
|
|
50
50
|
- [`LLM` with internet](#llm-with-internet)
|
|
51
51
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
52
52
|
|
|
@@ -498,26 +498,22 @@ message = ph.get_message(response)
|
|
|
498
498
|
print(message)
|
|
499
499
|
```
|
|
500
500
|
### 2. `YepChat` - Chat with mistral 8x7b powered by yepchat
|
|
501
|
-
Thanks To Divyansh Shukla for This code
|
|
502
501
|
```python
|
|
503
|
-
from webscout.AI import
|
|
502
|
+
from webscout.AI import YEPCHAT
|
|
504
503
|
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
print(processed_response)
|
|
504
|
+
# Instantiate the YEPCHAT class with default parameters
|
|
505
|
+
YEPCHAT = YEPCHAT()
|
|
506
|
+
|
|
507
|
+
# Define a prompt to send to the AI
|
|
508
|
+
prompt = "What is the capital of France?"
|
|
509
|
+
|
|
510
|
+
# Use the 'cha' method to get a response from the AI
|
|
511
|
+
r = YEPCHAT.chat(prompt)
|
|
512
|
+
print(r)
|
|
515
513
|
|
|
516
|
-
if __name__ == "__main__":
|
|
517
|
-
main()
|
|
518
514
|
```
|
|
519
515
|
|
|
520
|
-
### 3. `You.com` - search with you.com
|
|
516
|
+
### 3. `You.com` - search with you.com -NOT WORKING
|
|
521
517
|
```python
|
|
522
518
|
from webscout.AI import youChat
|
|
523
519
|
|
|
@@ -543,15 +539,34 @@ while True:
|
|
|
543
539
|
### 4. `Gemini` - search with google gemini
|
|
544
540
|
|
|
545
541
|
```python
|
|
546
|
-
|
|
542
|
+
import webscout
|
|
543
|
+
from webscout.AI import GEMINI
|
|
544
|
+
|
|
545
|
+
# Replace with the path to your bard.google.com.cookies.json file
|
|
546
|
+
COOKIE_FILE = "path/to/bard.google.com.cookies.json"
|
|
547
|
+
|
|
548
|
+
# Optional: Provide proxy details if needed
|
|
549
|
+
PROXIES = {
|
|
550
|
+
"http": "http://proxy_server:port",
|
|
551
|
+
"https": "https://proxy_server:port",
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
# Initialize GEMINI with cookie file and optional proxies
|
|
555
|
+
gemini = GEMINI(cookie_file=COOKIE_FILE, proxy=PROXIES)
|
|
556
|
+
|
|
557
|
+
# Ask a question and print the response
|
|
558
|
+
response = gemini.chat("What is the meaning of life?")
|
|
559
|
+
print(response)
|
|
547
560
|
|
|
548
|
-
#
|
|
549
|
-
gemini =
|
|
561
|
+
# Ask another question, this time streaming the response
|
|
562
|
+
for chunk in gemini.chat("Tell me a story", stream=True):
|
|
563
|
+
print(chunk, end="")
|
|
550
564
|
|
|
551
|
-
#
|
|
552
|
-
|
|
565
|
+
# Reset the conversation to start a new interaction
|
|
566
|
+
gemini.reset()
|
|
553
567
|
|
|
554
|
-
#
|
|
568
|
+
# Ask a question with the code optimizer
|
|
569
|
+
response = gemini.chat("Write Python code to print 'Hello, world!'", optimizer="code")
|
|
555
570
|
print(response)
|
|
556
571
|
```
|
|
557
572
|
## usage of image generator from Webscout.AI
|
|
@@ -583,17 +598,18 @@ ai = BLACKBOXAI(
|
|
|
583
598
|
model=None # You can specify a model if needed
|
|
584
599
|
)
|
|
585
600
|
|
|
586
|
-
#
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
#
|
|
596
|
-
|
|
601
|
+
# Start an infinite loop for continuous interaction
|
|
602
|
+
while True:
|
|
603
|
+
# Define a prompt to send to the AI
|
|
604
|
+
prompt = input("Enter your prompt: ")
|
|
605
|
+
|
|
606
|
+
# Check if the user wants to exit the loop
|
|
607
|
+
if prompt.lower() == "exit":
|
|
608
|
+
break
|
|
609
|
+
|
|
610
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
611
|
+
r = ai.chat(prompt)
|
|
612
|
+
print(r)
|
|
597
613
|
```
|
|
598
614
|
### 7. `PERPLEXITY` - Search With PERPLEXITY
|
|
599
615
|
```python
|
|
@@ -611,10 +627,12 @@ print(response)
|
|
|
611
627
|
from webscout.AI import OPENGPT
|
|
612
628
|
|
|
613
629
|
opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
print
|
|
630
|
+
while True:
|
|
631
|
+
# Prompt the user for input
|
|
632
|
+
prompt = input("Enter your prompt: ")
|
|
633
|
+
# Send the prompt to the OPENGPT model and print the response
|
|
634
|
+
response_str = opengpt.chat(prompt)
|
|
635
|
+
print(response_str)
|
|
618
636
|
```
|
|
619
637
|
### 9. `KOBOLDIA` -
|
|
620
638
|
```python
|
|
@@ -657,7 +675,7 @@ response_str = a.chat(prompt)
|
|
|
657
675
|
print(response_str)
|
|
658
676
|
```
|
|
659
677
|
|
|
660
|
-
### `LLM`
|
|
678
|
+
### `LLM` --not working
|
|
661
679
|
```python
|
|
662
680
|
from webscout.LLM import LLM
|
|
663
681
|
|
|
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="webscout",
|
|
8
|
-
version="1.
|
|
8
|
+
version="1.4.0",
|
|
9
9
|
description="Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support",
|
|
10
10
|
long_description=README,
|
|
11
11
|
long_description_content_type="text/markdown",
|
|
@@ -51,7 +51,7 @@ setup(
|
|
|
51
51
|
"tldextract",
|
|
52
52
|
"orjson",
|
|
53
53
|
"PyYAML",
|
|
54
|
-
"appdirs"
|
|
54
|
+
"appdirs",
|
|
55
55
|
"GoogleBard1>=2.1.4"
|
|
56
56
|
],
|
|
57
57
|
entry_points={
|
|
@@ -64,6 +64,7 @@ class LLAMA2(Provider):
|
|
|
64
64
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
65
65
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
66
66
|
"""
|
|
67
|
+
self.session = requests.Session()
|
|
67
68
|
self.is_conversation = is_conversation
|
|
68
69
|
self.max_tokens_to_sample = max_tokens
|
|
69
70
|
self.model = model
|
|
@@ -4158,7 +4159,7 @@ class YEPCHAT(Provider):
|
|
|
4158
4159
|
presence_penalty: int = 0,
|
|
4159
4160
|
frequency_penalty: int = 0,
|
|
4160
4161
|
top_p: float = 0.7,
|
|
4161
|
-
model: str ="Mixtral-8x7B-Instruct-v0.1",
|
|
4162
|
+
model: str = "Mixtral-8x7B-Instruct-v0.1",
|
|
4162
4163
|
timeout: int = 30,
|
|
4163
4164
|
intro: str = None,
|
|
4164
4165
|
filepath: str = None,
|
|
@@ -4185,6 +4186,7 @@ class YEPCHAT(Provider):
|
|
|
4185
4186
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
4186
4187
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
4187
4188
|
"""
|
|
4189
|
+
self.session = requests.Session()
|
|
4188
4190
|
self.is_conversation = is_conversation
|
|
4189
4191
|
self.max_tokens_to_sample = max_tokens
|
|
4190
4192
|
self.model = model
|
|
@@ -4371,6 +4373,231 @@ class YEPCHAT(Provider):
|
|
|
4371
4373
|
return response["choices"][0]["message"]["content"]
|
|
4372
4374
|
except KeyError:
|
|
4373
4375
|
return ""
|
|
4376
|
+
|
|
4377
|
+
|
|
4378
|
+
class AsyncYEPCHAT(AsyncProvider):
|
|
4379
|
+
def __init__(
|
|
4380
|
+
self,
|
|
4381
|
+
is_conversation: bool = True,
|
|
4382
|
+
max_tokens: int = 600,
|
|
4383
|
+
temperature: float = 0.6,
|
|
4384
|
+
presence_penalty: int = 0,
|
|
4385
|
+
frequency_penalty: int = 0,
|
|
4386
|
+
top_p: float = 0.7,
|
|
4387
|
+
model: str = "Mixtral-8x7B-Instruct-v0.1",
|
|
4388
|
+
timeout: int = 30,
|
|
4389
|
+
intro: str = None,
|
|
4390
|
+
filepath: str = None,
|
|
4391
|
+
update_file: bool = True,
|
|
4392
|
+
proxies: dict = {},
|
|
4393
|
+
history_offset: int = 10250,
|
|
4394
|
+
act: str = None,
|
|
4395
|
+
):
|
|
4396
|
+
"""Instantiates YEPCHAT
|
|
4397
|
+
|
|
4398
|
+
Args:
|
|
4399
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
4400
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
4401
|
+
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
|
|
4402
|
+
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
|
|
4403
|
+
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
|
|
4404
|
+
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
|
|
4405
|
+
model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
|
|
4406
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
4407
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
4408
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
4409
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
4410
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
4411
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
4412
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
4413
|
+
"""
|
|
4414
|
+
self.session = requests.Session()
|
|
4415
|
+
self.is_conversation = is_conversation
|
|
4416
|
+
self.max_tokens_to_sample = max_tokens
|
|
4417
|
+
self.model = model
|
|
4418
|
+
self.temperature = temperature
|
|
4419
|
+
self.presence_penalty = presence_penalty
|
|
4420
|
+
self.frequency_penalty = frequency_penalty
|
|
4421
|
+
self.top_p = top_p
|
|
4422
|
+
self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
4423
|
+
self.stream_chunk_size = 64
|
|
4424
|
+
self.timeout = timeout
|
|
4425
|
+
self.last_response = {}
|
|
4426
|
+
self.headers = {
|
|
4427
|
+
"Accept": "*/*",
|
|
4428
|
+
"Accept-Encoding": "gzip, deflate",
|
|
4429
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
4430
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
4431
|
+
"Origin": "https://yep.com",
|
|
4432
|
+
"Referer": "https://yep.com/",
|
|
4433
|
+
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
4434
|
+
}
|
|
4435
|
+
|
|
4436
|
+
self.__available_optimizers = (
|
|
4437
|
+
method
|
|
4438
|
+
for method in dir(Optimizers)
|
|
4439
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
4440
|
+
)
|
|
4441
|
+
Conversation.intro = (
|
|
4442
|
+
AwesomePrompts().get_act(
|
|
4443
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
4444
|
+
)
|
|
4445
|
+
if act
|
|
4446
|
+
else intro or Conversation.intro
|
|
4447
|
+
)
|
|
4448
|
+
self.conversation = Conversation(
|
|
4449
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
4450
|
+
)
|
|
4451
|
+
self.conversation.history_offset = history_offset
|
|
4452
|
+
self.session = httpx.AsyncClient(
|
|
4453
|
+
headers=self.headers,
|
|
4454
|
+
proxies=proxies,
|
|
4455
|
+
)
|
|
4456
|
+
|
|
4457
|
+
async def ask(
|
|
4458
|
+
self,
|
|
4459
|
+
prompt: str,
|
|
4460
|
+
stream: bool = False,
|
|
4461
|
+
raw: bool = False,
|
|
4462
|
+
optimizer: str = None,
|
|
4463
|
+
conversationally: bool = False,
|
|
4464
|
+
) -> dict:
|
|
4465
|
+
"""Chat with AI asynchronously.
|
|
4466
|
+
|
|
4467
|
+
Args:
|
|
4468
|
+
prompt (str): Prompt to be send.
|
|
4469
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
4470
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
4471
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
4472
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
4473
|
+
Returns:
|
|
4474
|
+
dict : {}
|
|
4475
|
+
```json
|
|
4476
|
+
{
|
|
4477
|
+
"id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
|
|
4478
|
+
"object": "chat.completion.chunk",
|
|
4479
|
+
"created": 1713876886,
|
|
4480
|
+
"model": "Mixtral-8x7B-Instruct-v0.1",
|
|
4481
|
+
"choices": [
|
|
4482
|
+
{
|
|
4483
|
+
"index": 0,
|
|
4484
|
+
"delta": {
|
|
4485
|
+
"role": null,
|
|
4486
|
+
"content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
|
|
4487
|
+
},
|
|
4488
|
+
"finish_reason": null
|
|
4489
|
+
}
|
|
4490
|
+
]
|
|
4491
|
+
}
|
|
4492
|
+
```
|
|
4493
|
+
"""
|
|
4494
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
4495
|
+
if optimizer:
|
|
4496
|
+
if optimizer in self.__available_optimizers:
|
|
4497
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
4498
|
+
conversation_prompt if conversationally else prompt
|
|
4499
|
+
)
|
|
4500
|
+
else:
|
|
4501
|
+
raise Exception(
|
|
4502
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
4503
|
+
)
|
|
4504
|
+
payload = {
|
|
4505
|
+
"stream": True,
|
|
4506
|
+
"max_tokens": 1280,
|
|
4507
|
+
"top_p": self.top_p,
|
|
4508
|
+
"temperature": self.temperature,
|
|
4509
|
+
"messages": [{"content": conversation_prompt, "role": "user"}],
|
|
4510
|
+
"model": self.model,
|
|
4511
|
+
}
|
|
4512
|
+
|
|
4513
|
+
async def for_stream():
|
|
4514
|
+
async with self.session.stream(
|
|
4515
|
+
"POST", self.chat_endpoint, json=payload, timeout=self.timeout
|
|
4516
|
+
) as response:
|
|
4517
|
+
if not response.is_success:
|
|
4518
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
4519
|
+
f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
|
|
4520
|
+
)
|
|
4521
|
+
|
|
4522
|
+
message_load = ""
|
|
4523
|
+
async for value in response.aiter_lines():
|
|
4524
|
+
try:
|
|
4525
|
+
resp = sanitize_stream(value)
|
|
4526
|
+
incomplete_message = await self.get_message(resp)
|
|
4527
|
+
if incomplete_message:
|
|
4528
|
+
message_load += incomplete_message
|
|
4529
|
+
resp["choices"][0]["delta"]["content"] = message_load
|
|
4530
|
+
self.last_response.update(resp)
|
|
4531
|
+
yield value if raw else resp
|
|
4532
|
+
elif raw:
|
|
4533
|
+
yield value
|
|
4534
|
+
except json.decoder.JSONDecodeError:
|
|
4535
|
+
pass
|
|
4536
|
+
|
|
4537
|
+
self.conversation.update_chat_history(
|
|
4538
|
+
prompt, await self.get_message(self.last_response)
|
|
4539
|
+
)
|
|
4540
|
+
|
|
4541
|
+
async def for_non_stream():
|
|
4542
|
+
async for _ in for_stream():
|
|
4543
|
+
pass
|
|
4544
|
+
return self.last_response
|
|
4545
|
+
|
|
4546
|
+
return for_stream() if stream else await for_non_stream()
|
|
4547
|
+
|
|
4548
|
+
async def chat(
|
|
4549
|
+
self,
|
|
4550
|
+
prompt: str,
|
|
4551
|
+
stream: bool = False,
|
|
4552
|
+
optimizer: str = None,
|
|
4553
|
+
conversationally: bool = False,
|
|
4554
|
+
) -> str:
|
|
4555
|
+
"""Generate response `str` asynchronously.
|
|
4556
|
+
Args:
|
|
4557
|
+
prompt (str): Prompt to be send.
|
|
4558
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
4559
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
4560
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
4561
|
+
Returns:
|
|
4562
|
+
str: Response generated
|
|
4563
|
+
"""
|
|
4564
|
+
|
|
4565
|
+
async def for_stream():
|
|
4566
|
+
async_ask = await self.ask(
|
|
4567
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
4568
|
+
)
|
|
4569
|
+
|
|
4570
|
+
async for response in async_ask:
|
|
4571
|
+
yield await self.get_message(response)
|
|
4572
|
+
|
|
4573
|
+
async def for_non_stream():
|
|
4574
|
+
return await self.get_message(
|
|
4575
|
+
await self.ask(
|
|
4576
|
+
prompt,
|
|
4577
|
+
False,
|
|
4578
|
+
optimizer=optimizer,
|
|
4579
|
+
conversationally=conversationally,
|
|
4580
|
+
)
|
|
4581
|
+
)
|
|
4582
|
+
|
|
4583
|
+
return for_stream() if stream else await for_non_stream()
|
|
4584
|
+
|
|
4585
|
+
async def get_message(self, response: dict) -> str:
|
|
4586
|
+
"""Retrieves message only from response
|
|
4587
|
+
|
|
4588
|
+
Args:
|
|
4589
|
+
response (dict): Response generated by `self.ask`
|
|
4590
|
+
|
|
4591
|
+
Returns:
|
|
4592
|
+
str: Message extracted
|
|
4593
|
+
"""
|
|
4594
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
4595
|
+
try:
|
|
4596
|
+
if response["choices"][0].get("delta"):
|
|
4597
|
+
return response["choices"][0]["delta"]["content"]
|
|
4598
|
+
return response["choices"][0]["message"]["content"]
|
|
4599
|
+
except KeyError:
|
|
4600
|
+
return ""
|
|
4374
4601
|
class AsyncYEPCHAT(AsyncProvider):
|
|
4375
4602
|
def __init__(
|
|
4376
4603
|
self,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -47,7 +47,8 @@ Requires-Dist: tiktoken
|
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
48
|
Requires-Dist: orjson
|
|
49
49
|
Requires-Dist: PyYAML
|
|
50
|
-
Requires-Dist:
|
|
50
|
+
Requires-Dist: appdirs
|
|
51
|
+
Requires-Dist: GoogleBard1>=2.1.4
|
|
51
52
|
Provides-Extra: dev
|
|
52
53
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
53
54
|
Requires-Dist: pytest>=7.4.2; extra == "dev"
|
|
@@ -90,7 +91,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
90
91
|
- [usage of webscout.AI](#usage-of-webscoutai)
|
|
91
92
|
- [1. `PhindSearch` - Search using Phind.com](#1-phindsearch---search-using-phindcom)
|
|
92
93
|
- [2. `YepChat` - Chat with mistral 8x7b powered by yepchat](#2-yepchat---chat-with-mistral-8x7b-powered-by-yepchat)
|
|
93
|
-
- [3. `You.com` - search with you.com](#3-youcom---search-with-youcom)
|
|
94
|
+
- [3. `You.com` - search with you.com -NOT WORKING](#3-youcom---search-with-youcom--not-working)
|
|
94
95
|
- [4. `Gemini` - search with google gemini](#4-gemini---search-with-google-gemini)
|
|
95
96
|
- [usage of image generator from Webscout.AI](#usage-of-image-generator-from-webscoutai)
|
|
96
97
|
- [5. `Prodia` - make image using prodia](#5-prodia---make-image-using-prodia)
|
|
@@ -100,7 +101,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
100
101
|
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
101
102
|
- [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
|
|
102
103
|
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
103
|
-
- [`LLM`](#llm)
|
|
104
|
+
- [`LLM` --not working](#llm---not-working)
|
|
104
105
|
- [`LLM` with internet](#llm-with-internet)
|
|
105
106
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
106
107
|
|
|
@@ -552,26 +553,22 @@ message = ph.get_message(response)
|
|
|
552
553
|
print(message)
|
|
553
554
|
```
|
|
554
555
|
### 2. `YepChat` - Chat with mistral 8x7b powered by yepchat
|
|
555
|
-
Thanks To Divyansh Shukla for This code
|
|
556
556
|
```python
|
|
557
|
-
from webscout.AI import
|
|
557
|
+
from webscout.AI import YEPCHAT
|
|
558
558
|
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
print(processed_response)
|
|
559
|
+
# Instantiate the YEPCHAT class with default parameters
|
|
560
|
+
YEPCHAT = YEPCHAT()
|
|
561
|
+
|
|
562
|
+
# Define a prompt to send to the AI
|
|
563
|
+
prompt = "What is the capital of France?"
|
|
564
|
+
|
|
565
|
+
# Use the 'cha' method to get a response from the AI
|
|
566
|
+
r = YEPCHAT.chat(prompt)
|
|
567
|
+
print(r)
|
|
569
568
|
|
|
570
|
-
if __name__ == "__main__":
|
|
571
|
-
main()
|
|
572
569
|
```
|
|
573
570
|
|
|
574
|
-
### 3. `You.com` - search with you.com
|
|
571
|
+
### 3. `You.com` - search with you.com -NOT WORKING
|
|
575
572
|
```python
|
|
576
573
|
from webscout.AI import youChat
|
|
577
574
|
|
|
@@ -597,15 +594,34 @@ while True:
|
|
|
597
594
|
### 4. `Gemini` - search with google gemini
|
|
598
595
|
|
|
599
596
|
```python
|
|
600
|
-
|
|
597
|
+
import webscout
|
|
598
|
+
from webscout.AI import GEMINI
|
|
599
|
+
|
|
600
|
+
# Replace with the path to your bard.google.com.cookies.json file
|
|
601
|
+
COOKIE_FILE = "path/to/bard.google.com.cookies.json"
|
|
602
|
+
|
|
603
|
+
# Optional: Provide proxy details if needed
|
|
604
|
+
PROXIES = {
|
|
605
|
+
"http": "http://proxy_server:port",
|
|
606
|
+
"https": "https://proxy_server:port",
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
# Initialize GEMINI with cookie file and optional proxies
|
|
610
|
+
gemini = GEMINI(cookie_file=COOKIE_FILE, proxy=PROXIES)
|
|
611
|
+
|
|
612
|
+
# Ask a question and print the response
|
|
613
|
+
response = gemini.chat("What is the meaning of life?")
|
|
614
|
+
print(response)
|
|
601
615
|
|
|
602
|
-
#
|
|
603
|
-
gemini =
|
|
616
|
+
# Ask another question, this time streaming the response
|
|
617
|
+
for chunk in gemini.chat("Tell me a story", stream=True):
|
|
618
|
+
print(chunk, end="")
|
|
604
619
|
|
|
605
|
-
#
|
|
606
|
-
|
|
620
|
+
# Reset the conversation to start a new interaction
|
|
621
|
+
gemini.reset()
|
|
607
622
|
|
|
608
|
-
#
|
|
623
|
+
# Ask a question with the code optimizer
|
|
624
|
+
response = gemini.chat("Write Python code to print 'Hello, world!'", optimizer="code")
|
|
609
625
|
print(response)
|
|
610
626
|
```
|
|
611
627
|
## usage of image generator from Webscout.AI
|
|
@@ -637,17 +653,18 @@ ai = BLACKBOXAI(
|
|
|
637
653
|
model=None # You can specify a model if needed
|
|
638
654
|
)
|
|
639
655
|
|
|
640
|
-
#
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
#
|
|
650
|
-
|
|
656
|
+
# Start an infinite loop for continuous interaction
|
|
657
|
+
while True:
|
|
658
|
+
# Define a prompt to send to the AI
|
|
659
|
+
prompt = input("Enter your prompt: ")
|
|
660
|
+
|
|
661
|
+
# Check if the user wants to exit the loop
|
|
662
|
+
if prompt.lower() == "exit":
|
|
663
|
+
break
|
|
664
|
+
|
|
665
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
666
|
+
r = ai.chat(prompt)
|
|
667
|
+
print(r)
|
|
651
668
|
```
|
|
652
669
|
### 7. `PERPLEXITY` - Search With PERPLEXITY
|
|
653
670
|
```python
|
|
@@ -665,10 +682,12 @@ print(response)
|
|
|
665
682
|
from webscout.AI import OPENGPT
|
|
666
683
|
|
|
667
684
|
opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
print
|
|
685
|
+
while True:
|
|
686
|
+
# Prompt the user for input
|
|
687
|
+
prompt = input("Enter your prompt: ")
|
|
688
|
+
# Send the prompt to the OPENGPT model and print the response
|
|
689
|
+
response_str = opengpt.chat(prompt)
|
|
690
|
+
print(response_str)
|
|
672
691
|
```
|
|
673
692
|
### 9. `KOBOLDIA` -
|
|
674
693
|
```python
|
|
@@ -711,7 +730,7 @@ response_str = a.chat(prompt)
|
|
|
711
730
|
print(response_str)
|
|
712
731
|
```
|
|
713
732
|
|
|
714
|
-
### `LLM`
|
|
733
|
+
### `LLM` --not working
|
|
715
734
|
```python
|
|
716
735
|
from webscout.LLM import LLM
|
|
717
736
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|