webscout 3.5__tar.gz → 3.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-3.5/webscout.egg-info → webscout-3.6}/PKG-INFO +124 -30
- {webscout-3.5 → webscout-3.6}/README.md +122 -29
- {webscout-3.5 → webscout-3.6}/setup.py +3 -2
- {webscout-3.5 → webscout-3.6}/webscout/AIutel.py +10 -7
- {webscout-3.5 → webscout-3.6}/webscout/Local/rawdog.py +945 -945
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Deepinfra.py +478 -478
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Deepseek.py +265 -265
- {webscout-3.5 → webscout-3.6}/webscout/Provider/OpenGPT.py +381 -1
- webscout-3.6/webscout/Provider/Phind.py +1007 -0
- webscout-3.6/webscout/Provider/VTLchat.py +252 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/__init__.py +9 -1
- {webscout-3.5 → webscout-3.6}/webscout/__init__.py +37 -28
- {webscout-3.5 → webscout-3.6}/webscout/websx_search.py +370 -370
- {webscout-3.5 → webscout-3.6/webscout.egg-info}/PKG-INFO +124 -30
- {webscout-3.5 → webscout-3.6}/webscout.egg-info/SOURCES.txt +1 -0
- {webscout-3.5 → webscout-3.6}/webscout.egg-info/requires.txt +1 -0
- webscout-3.5/webscout/Provider/Phind.py +0 -518
- {webscout-3.5 → webscout-3.6}/LICENSE.md +0 -0
- {webscout-3.5 → webscout-3.6}/setup.cfg +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/AIauto.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/AIbase.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/DWEBS.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/LLM.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/__init__.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/_version.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/formats.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/model.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/samplers.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/thread.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Local/utils.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/BasedGPT.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Berlin4h.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Blackboxai.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/ChatGPTUK.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Cohere.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Gemini.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Groq.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Koboldai.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Leo.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Llama2.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Openai.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Perplexity.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Poe.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Reka.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/ThinkAnyAI.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Xjai.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Yepchat.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/Provider/Youchat.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/__main__.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/async_providers.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/cli.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/exceptions.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/g4f.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/models.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/tempid.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/transcriber.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/utils.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/version.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/voice.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/webai.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/webscout_search.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout/webscout_search_async.py +0 -0
- {webscout-3.5 → webscout-3.6}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-3.5 → webscout-3.6}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-3.5 → webscout-3.6}/webscout.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.6
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -54,6 +54,7 @@ Requires-Dist: Helpingai-T2
|
|
|
54
54
|
Requires-Dist: playsound
|
|
55
55
|
Requires-Dist: poe_api_wrapper
|
|
56
56
|
Requires-Dist: pyreqwest_impersonate
|
|
57
|
+
Requires-Dist: ballyregan
|
|
57
58
|
Provides-Extra: dev
|
|
58
59
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
59
60
|
Requires-Dist: pytest>=7.4.2; extra == "dev"
|
|
@@ -999,6 +1000,23 @@ prompt = "write a essay on phind"
|
|
|
999
1000
|
# Use the 'ask' method to send the prompt and receive a response
|
|
1000
1001
|
response = ph.ask(prompt)
|
|
1001
1002
|
|
|
1003
|
+
# Extract and print the message from the response
|
|
1004
|
+
message = ph.get_message(response)
|
|
1005
|
+
print(message)
|
|
1006
|
+
```
|
|
1007
|
+
Using phindv2
|
|
1008
|
+
```python
|
|
1009
|
+
from webscout import Phindv2
|
|
1010
|
+
|
|
1011
|
+
# Create an instance of the PHIND class
|
|
1012
|
+
ph = Phindv2()
|
|
1013
|
+
|
|
1014
|
+
# Define a prompt to send to the AI
|
|
1015
|
+
prompt = ""
|
|
1016
|
+
|
|
1017
|
+
# Use the 'ask' method to send the prompt and receive a response
|
|
1018
|
+
response = ph.ask(prompt)
|
|
1019
|
+
|
|
1002
1020
|
# Extract and print the message from the response
|
|
1003
1021
|
message = ph.get_message(response)
|
|
1004
1022
|
print(message)
|
|
@@ -1019,7 +1037,7 @@ print(r)
|
|
|
1019
1037
|
|
|
1020
1038
|
```
|
|
1021
1039
|
|
|
1022
|
-
### 3. `You.com` - search/chat with you.com
|
|
1040
|
+
### 3. `You.com` - search/chat with you.com - Not working
|
|
1023
1041
|
```python
|
|
1024
1042
|
|
|
1025
1043
|
from webscout import YouChat
|
|
@@ -1143,6 +1161,45 @@ while True:
|
|
|
1143
1161
|
response_str = opengpt.chat(prompt)
|
|
1144
1162
|
print(response_str)
|
|
1145
1163
|
```
|
|
1164
|
+
```python
|
|
1165
|
+
from webscout import OPENGPTv2
|
|
1166
|
+
|
|
1167
|
+
# Initialize the bot with all specified settings
|
|
1168
|
+
bot = OPENGPTv2(
|
|
1169
|
+
generate_new_agents=True, # Set to True to generate new IDs, False to load from file
|
|
1170
|
+
assistant_name="My Custom Assistant",
|
|
1171
|
+
retrieval_description="Helpful information from my files.",
|
|
1172
|
+
agent_system_message="",
|
|
1173
|
+
enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
|
|
1174
|
+
enable_ddg_search=False, # Enable DuckDuckGo search tool
|
|
1175
|
+
enable_arxiv=False, # Assuming you want to disable Arxiv
|
|
1176
|
+
enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
|
|
1177
|
+
enable_pubmed=False, # Assuming you want to disable PubMed
|
|
1178
|
+
enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
|
|
1179
|
+
enable_retrieval=False, # Assuming you want to disable Retrieval
|
|
1180
|
+
enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
|
|
1181
|
+
enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
|
|
1182
|
+
enable_you_com_search=True, # Assuming you want to disable You.com Search
|
|
1183
|
+
enable_wikipedia=False, # Enable Wikipedia tool
|
|
1184
|
+
is_public=True,
|
|
1185
|
+
is_conversation=True,
|
|
1186
|
+
max_tokens=800,
|
|
1187
|
+
timeout=40,
|
|
1188
|
+
filepath="opengpt_conversation_history.txt",
|
|
1189
|
+
update_file=True,
|
|
1190
|
+
history_offset=10250,
|
|
1191
|
+
act=None,
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
# Example interaction loop
|
|
1195
|
+
while True:
|
|
1196
|
+
prompt = input("You: ")
|
|
1197
|
+
if prompt.strip().lower() == 'exit':
|
|
1198
|
+
break
|
|
1199
|
+
response = bot.chat(prompt)
|
|
1200
|
+
print(response)
|
|
1201
|
+
|
|
1202
|
+
```
|
|
1146
1203
|
### 9. `KOBOLDAI` -
|
|
1147
1204
|
```python
|
|
1148
1205
|
from webscout import KOBOLDAI
|
|
@@ -1258,8 +1315,26 @@ print(response)
|
|
|
1258
1315
|
Usage code similar to other proviers
|
|
1259
1316
|
|
|
1260
1317
|
### 16. `BasedGPT` - chat with GPT
|
|
1261
|
-
|
|
1318
|
+
```
|
|
1319
|
+
from webscout import BasedGPT
|
|
1320
|
+
|
|
1321
|
+
# Initialize the BasedGPT provider
|
|
1322
|
+
basedgpt = BasedGPT(
|
|
1323
|
+
is_conversation=True, # Chat conversationally
|
|
1324
|
+
max_tokens=600, # Maximum tokens to generate
|
|
1325
|
+
timeout=30, # HTTP request timeout
|
|
1326
|
+
intro="You are a helpful and friendly AI.", # Introductory prompt
|
|
1327
|
+
filepath="chat_history.txt", # File to store conversation history
|
|
1328
|
+
update_file=True, # Update the chat history file
|
|
1329
|
+
)
|
|
1262
1330
|
|
|
1331
|
+
# Send a prompt to the AI
|
|
1332
|
+
prompt = "What is the meaning of life?"
|
|
1333
|
+
response = basedgpt.chat(prompt)
|
|
1334
|
+
|
|
1335
|
+
# Print the AI's response
|
|
1336
|
+
print(response)
|
|
1337
|
+
```
|
|
1263
1338
|
### 17. `DeepSeek` -chat with deepseek
|
|
1264
1339
|
```python
|
|
1265
1340
|
from webscout import DeepSeek
|
|
@@ -1292,7 +1367,7 @@ while True:
|
|
|
1292
1367
|
r = ai.chat(prompt)
|
|
1293
1368
|
print(r)
|
|
1294
1369
|
```
|
|
1295
|
-
### 18. Deepinfra
|
|
1370
|
+
### 18. `Deepinfra`
|
|
1296
1371
|
```python
|
|
1297
1372
|
from webscout import DeepInfra
|
|
1298
1373
|
|
|
@@ -1318,30 +1393,33 @@ message = ai.get_message(response)
|
|
|
1318
1393
|
print(message)
|
|
1319
1394
|
```
|
|
1320
1395
|
|
|
1321
|
-
### 19. Deepinfra - VLM
|
|
1396
|
+
### 19. `Deepinfra` - VLM
|
|
1322
1397
|
```python
|
|
1323
|
-
from webscout import
|
|
1398
|
+
from webscout.Provider import VLM
|
|
1324
1399
|
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
model= "Qwen/Qwen2-72B-Instruct",
|
|
1328
|
-
max_tokens=800,
|
|
1329
|
-
timeout=30,
|
|
1330
|
-
intro=None,
|
|
1331
|
-
filepath=None,
|
|
1332
|
-
update_file=True,
|
|
1333
|
-
proxies={},
|
|
1334
|
-
history_offset=10250,
|
|
1335
|
-
act=None,
|
|
1336
|
-
)
|
|
1400
|
+
# Load your image
|
|
1401
|
+
image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
|
|
1337
1402
|
|
|
1338
|
-
|
|
1403
|
+
vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
|
|
1404
|
+
image_base64 = vlm_instance.encode_image_to_base64(image_path)
|
|
1339
1405
|
|
|
1340
|
-
|
|
1406
|
+
prompt = {
|
|
1407
|
+
"content": "What is in this image?",
|
|
1408
|
+
"image": image_base64
|
|
1409
|
+
}
|
|
1410
|
+
|
|
1411
|
+
# Generate a response
|
|
1412
|
+
response = vlm_instance.chat(prompt)
|
|
1413
|
+
print(response)
|
|
1341
1414
|
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1415
|
+
```
|
|
1416
|
+
### 20. `VTLchat` - Free gpt3.5
|
|
1417
|
+
```python
|
|
1418
|
+
from webscout import VTLchat
|
|
1419
|
+
|
|
1420
|
+
provider = VTLchat()
|
|
1421
|
+
response = provider.chat("Hello, how are you?")
|
|
1422
|
+
print(response)
|
|
1345
1423
|
```
|
|
1346
1424
|
### `LLM`
|
|
1347
1425
|
```python
|
|
@@ -1593,7 +1671,7 @@ class TaskExecutor:
|
|
|
1593
1671
|
self._proxy_path: str = None # Path to proxy configuration
|
|
1594
1672
|
|
|
1595
1673
|
# History Management
|
|
1596
|
-
self._history_filepath: str =
|
|
1674
|
+
self._history_filepath: str = "history.txt"
|
|
1597
1675
|
self._update_history_file: bool = True
|
|
1598
1676
|
self._history_offset: int = 10250
|
|
1599
1677
|
|
|
@@ -1604,7 +1682,7 @@ class TaskExecutor:
|
|
|
1604
1682
|
# Optional Features
|
|
1605
1683
|
self._web_search_enabled: bool = False # Enable web search
|
|
1606
1684
|
self._rawdog_enabled: bool = True
|
|
1607
|
-
self._internal_script_execution_enabled: bool =
|
|
1685
|
+
self._internal_script_execution_enabled: bool = True
|
|
1608
1686
|
self._script_confirmation_required: bool = False
|
|
1609
1687
|
self._selected_interpreter: str = "python"
|
|
1610
1688
|
self._selected_optimizer: str = "code"
|
|
@@ -1632,6 +1710,9 @@ class TaskExecutor:
|
|
|
1632
1710
|
"chatgptuk": webscout.ChatGPTUK,
|
|
1633
1711
|
"poe": webscout.POE,
|
|
1634
1712
|
"basedgpt": webscout.BasedGPT,
|
|
1713
|
+
"deepseek": webscout.DeepSeek,
|
|
1714
|
+
"deepinfra": webscout.DeepInfra,
|
|
1715
|
+
"opengenptv2": webscout.OPENGPTv2
|
|
1635
1716
|
}
|
|
1636
1717
|
|
|
1637
1718
|
# Initialize Rawdog if enabled
|
|
@@ -1761,13 +1842,26 @@ class TaskExecutor:
|
|
|
1761
1842
|
"""
|
|
1762
1843
|
try:
|
|
1763
1844
|
is_feedback = self._rawdog_instance.main(response)
|
|
1845
|
+
if is_feedback and "PREVIOUS SCRIPT EXCEPTION" in is_feedback:
|
|
1846
|
+
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1847
|
+
error_message = is_feedback.split("PREVIOUS SCRIPT EXCEPTION:\n")[1].strip()
|
|
1848
|
+
# Generate a solution for the error and execute it
|
|
1849
|
+
error_solution_query = (
|
|
1850
|
+
f"The following code was executed and resulted in an error:\n\n"
|
|
1851
|
+
f"{response}\n\n"
|
|
1852
|
+
f"Error: {error_message}\n\n"
|
|
1853
|
+
f"Please provide a solution to fix this error in the code and execute it."
|
|
1854
|
+
)
|
|
1855
|
+
try:
|
|
1856
|
+
new_response = self._ai_model.chat(error_solution_query)
|
|
1857
|
+
self._handle_rawdog_response(new_response)
|
|
1858
|
+
except webscout.exceptions.FailedToGenerateResponseError as e:
|
|
1859
|
+
self._console.print(Markdown(f"LLM: [red]Error while generating solution: {e}[/red]"))
|
|
1860
|
+
else:
|
|
1861
|
+
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1764
1862
|
except Exception as e:
|
|
1765
1863
|
self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
|
|
1766
|
-
|
|
1767
|
-
if is_feedback:
|
|
1768
|
-
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1769
|
-
else:
|
|
1770
|
-
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1864
|
+
|
|
1771
1865
|
|
|
1772
1866
|
async def process_async_query(self, query: str) -> None:
|
|
1773
1867
|
"""
|
|
@@ -934,6 +934,23 @@ prompt = "write a essay on phind"
|
|
|
934
934
|
# Use the 'ask' method to send the prompt and receive a response
|
|
935
935
|
response = ph.ask(prompt)
|
|
936
936
|
|
|
937
|
+
# Extract and print the message from the response
|
|
938
|
+
message = ph.get_message(response)
|
|
939
|
+
print(message)
|
|
940
|
+
```
|
|
941
|
+
Using phindv2
|
|
942
|
+
```python
|
|
943
|
+
from webscout import Phindv2
|
|
944
|
+
|
|
945
|
+
# Create an instance of the PHIND class
|
|
946
|
+
ph = Phindv2()
|
|
947
|
+
|
|
948
|
+
# Define a prompt to send to the AI
|
|
949
|
+
prompt = ""
|
|
950
|
+
|
|
951
|
+
# Use the 'ask' method to send the prompt and receive a response
|
|
952
|
+
response = ph.ask(prompt)
|
|
953
|
+
|
|
937
954
|
# Extract and print the message from the response
|
|
938
955
|
message = ph.get_message(response)
|
|
939
956
|
print(message)
|
|
@@ -954,7 +971,7 @@ print(r)
|
|
|
954
971
|
|
|
955
972
|
```
|
|
956
973
|
|
|
957
|
-
### 3. `You.com` - search/chat with you.com
|
|
974
|
+
### 3. `You.com` - search/chat with you.com - Not working
|
|
958
975
|
```python
|
|
959
976
|
|
|
960
977
|
from webscout import YouChat
|
|
@@ -1078,6 +1095,45 @@ while True:
|
|
|
1078
1095
|
response_str = opengpt.chat(prompt)
|
|
1079
1096
|
print(response_str)
|
|
1080
1097
|
```
|
|
1098
|
+
```python
|
|
1099
|
+
from webscout import OPENGPTv2
|
|
1100
|
+
|
|
1101
|
+
# Initialize the bot with all specified settings
|
|
1102
|
+
bot = OPENGPTv2(
|
|
1103
|
+
generate_new_agents=True, # Set to True to generate new IDs, False to load from file
|
|
1104
|
+
assistant_name="My Custom Assistant",
|
|
1105
|
+
retrieval_description="Helpful information from my files.",
|
|
1106
|
+
agent_system_message="",
|
|
1107
|
+
enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
|
|
1108
|
+
enable_ddg_search=False, # Enable DuckDuckGo search tool
|
|
1109
|
+
enable_arxiv=False, # Assuming you want to disable Arxiv
|
|
1110
|
+
enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
|
|
1111
|
+
enable_pubmed=False, # Assuming you want to disable PubMed
|
|
1112
|
+
enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
|
|
1113
|
+
enable_retrieval=False, # Assuming you want to disable Retrieval
|
|
1114
|
+
enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
|
|
1115
|
+
enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
|
|
1116
|
+
enable_you_com_search=True, # Assuming you want to disable You.com Search
|
|
1117
|
+
enable_wikipedia=False, # Enable Wikipedia tool
|
|
1118
|
+
is_public=True,
|
|
1119
|
+
is_conversation=True,
|
|
1120
|
+
max_tokens=800,
|
|
1121
|
+
timeout=40,
|
|
1122
|
+
filepath="opengpt_conversation_history.txt",
|
|
1123
|
+
update_file=True,
|
|
1124
|
+
history_offset=10250,
|
|
1125
|
+
act=None,
|
|
1126
|
+
)
|
|
1127
|
+
|
|
1128
|
+
# Example interaction loop
|
|
1129
|
+
while True:
|
|
1130
|
+
prompt = input("You: ")
|
|
1131
|
+
if prompt.strip().lower() == 'exit':
|
|
1132
|
+
break
|
|
1133
|
+
response = bot.chat(prompt)
|
|
1134
|
+
print(response)
|
|
1135
|
+
|
|
1136
|
+
```
|
|
1081
1137
|
### 9. `KOBOLDAI` -
|
|
1082
1138
|
```python
|
|
1083
1139
|
from webscout import KOBOLDAI
|
|
@@ -1193,8 +1249,26 @@ print(response)
|
|
|
1193
1249
|
Usage code similar to other proviers
|
|
1194
1250
|
|
|
1195
1251
|
### 16. `BasedGPT` - chat with GPT
|
|
1196
|
-
|
|
1252
|
+
```
|
|
1253
|
+
from webscout import BasedGPT
|
|
1254
|
+
|
|
1255
|
+
# Initialize the BasedGPT provider
|
|
1256
|
+
basedgpt = BasedGPT(
|
|
1257
|
+
is_conversation=True, # Chat conversationally
|
|
1258
|
+
max_tokens=600, # Maximum tokens to generate
|
|
1259
|
+
timeout=30, # HTTP request timeout
|
|
1260
|
+
intro="You are a helpful and friendly AI.", # Introductory prompt
|
|
1261
|
+
filepath="chat_history.txt", # File to store conversation history
|
|
1262
|
+
update_file=True, # Update the chat history file
|
|
1263
|
+
)
|
|
1197
1264
|
|
|
1265
|
+
# Send a prompt to the AI
|
|
1266
|
+
prompt = "What is the meaning of life?"
|
|
1267
|
+
response = basedgpt.chat(prompt)
|
|
1268
|
+
|
|
1269
|
+
# Print the AI's response
|
|
1270
|
+
print(response)
|
|
1271
|
+
```
|
|
1198
1272
|
### 17. `DeepSeek` -chat with deepseek
|
|
1199
1273
|
```python
|
|
1200
1274
|
from webscout import DeepSeek
|
|
@@ -1227,7 +1301,7 @@ while True:
|
|
|
1227
1301
|
r = ai.chat(prompt)
|
|
1228
1302
|
print(r)
|
|
1229
1303
|
```
|
|
1230
|
-
### 18. Deepinfra
|
|
1304
|
+
### 18. `Deepinfra`
|
|
1231
1305
|
```python
|
|
1232
1306
|
from webscout import DeepInfra
|
|
1233
1307
|
|
|
@@ -1253,30 +1327,33 @@ message = ai.get_message(response)
|
|
|
1253
1327
|
print(message)
|
|
1254
1328
|
```
|
|
1255
1329
|
|
|
1256
|
-
### 19. Deepinfra - VLM
|
|
1330
|
+
### 19. `Deepinfra` - VLM
|
|
1257
1331
|
```python
|
|
1258
|
-
from webscout import
|
|
1332
|
+
from webscout.Provider import VLM
|
|
1259
1333
|
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
model= "Qwen/Qwen2-72B-Instruct",
|
|
1263
|
-
max_tokens=800,
|
|
1264
|
-
timeout=30,
|
|
1265
|
-
intro=None,
|
|
1266
|
-
filepath=None,
|
|
1267
|
-
update_file=True,
|
|
1268
|
-
proxies={},
|
|
1269
|
-
history_offset=10250,
|
|
1270
|
-
act=None,
|
|
1271
|
-
)
|
|
1334
|
+
# Load your image
|
|
1335
|
+
image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
|
|
1272
1336
|
|
|
1273
|
-
|
|
1337
|
+
vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
|
|
1338
|
+
image_base64 = vlm_instance.encode_image_to_base64(image_path)
|
|
1274
1339
|
|
|
1275
|
-
|
|
1340
|
+
prompt = {
|
|
1341
|
+
"content": "What is in this image?",
|
|
1342
|
+
"image": image_base64
|
|
1343
|
+
}
|
|
1344
|
+
|
|
1345
|
+
# Generate a response
|
|
1346
|
+
response = vlm_instance.chat(prompt)
|
|
1347
|
+
print(response)
|
|
1276
1348
|
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1349
|
+
```
|
|
1350
|
+
### 20. `VTLchat` - Free gpt3.5
|
|
1351
|
+
```python
|
|
1352
|
+
from webscout import VTLchat
|
|
1353
|
+
|
|
1354
|
+
provider = VTLchat()
|
|
1355
|
+
response = provider.chat("Hello, how are you?")
|
|
1356
|
+
print(response)
|
|
1280
1357
|
```
|
|
1281
1358
|
### `LLM`
|
|
1282
1359
|
```python
|
|
@@ -1528,7 +1605,7 @@ class TaskExecutor:
|
|
|
1528
1605
|
self._proxy_path: str = None # Path to proxy configuration
|
|
1529
1606
|
|
|
1530
1607
|
# History Management
|
|
1531
|
-
self._history_filepath: str =
|
|
1608
|
+
self._history_filepath: str = "history.txt"
|
|
1532
1609
|
self._update_history_file: bool = True
|
|
1533
1610
|
self._history_offset: int = 10250
|
|
1534
1611
|
|
|
@@ -1539,7 +1616,7 @@ class TaskExecutor:
|
|
|
1539
1616
|
# Optional Features
|
|
1540
1617
|
self._web_search_enabled: bool = False # Enable web search
|
|
1541
1618
|
self._rawdog_enabled: bool = True
|
|
1542
|
-
self._internal_script_execution_enabled: bool =
|
|
1619
|
+
self._internal_script_execution_enabled: bool = True
|
|
1543
1620
|
self._script_confirmation_required: bool = False
|
|
1544
1621
|
self._selected_interpreter: str = "python"
|
|
1545
1622
|
self._selected_optimizer: str = "code"
|
|
@@ -1567,6 +1644,9 @@ class TaskExecutor:
|
|
|
1567
1644
|
"chatgptuk": webscout.ChatGPTUK,
|
|
1568
1645
|
"poe": webscout.POE,
|
|
1569
1646
|
"basedgpt": webscout.BasedGPT,
|
|
1647
|
+
"deepseek": webscout.DeepSeek,
|
|
1648
|
+
"deepinfra": webscout.DeepInfra,
|
|
1649
|
+
"opengenptv2": webscout.OPENGPTv2
|
|
1570
1650
|
}
|
|
1571
1651
|
|
|
1572
1652
|
# Initialize Rawdog if enabled
|
|
@@ -1696,13 +1776,26 @@ class TaskExecutor:
|
|
|
1696
1776
|
"""
|
|
1697
1777
|
try:
|
|
1698
1778
|
is_feedback = self._rawdog_instance.main(response)
|
|
1779
|
+
if is_feedback and "PREVIOUS SCRIPT EXCEPTION" in is_feedback:
|
|
1780
|
+
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1781
|
+
error_message = is_feedback.split("PREVIOUS SCRIPT EXCEPTION:\n")[1].strip()
|
|
1782
|
+
# Generate a solution for the error and execute it
|
|
1783
|
+
error_solution_query = (
|
|
1784
|
+
f"The following code was executed and resulted in an error:\n\n"
|
|
1785
|
+
f"{response}\n\n"
|
|
1786
|
+
f"Error: {error_message}\n\n"
|
|
1787
|
+
f"Please provide a solution to fix this error in the code and execute it."
|
|
1788
|
+
)
|
|
1789
|
+
try:
|
|
1790
|
+
new_response = self._ai_model.chat(error_solution_query)
|
|
1791
|
+
self._handle_rawdog_response(new_response)
|
|
1792
|
+
except webscout.exceptions.FailedToGenerateResponseError as e:
|
|
1793
|
+
self._console.print(Markdown(f"LLM: [red]Error while generating solution: {e}[/red]"))
|
|
1794
|
+
else:
|
|
1795
|
+
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1699
1796
|
except Exception as e:
|
|
1700
1797
|
self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
|
|
1701
|
-
|
|
1702
|
-
if is_feedback:
|
|
1703
|
-
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1704
|
-
else:
|
|
1705
|
-
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1798
|
+
|
|
1706
1799
|
|
|
1707
1800
|
async def process_async_query(self, query: str) -> None:
|
|
1708
1801
|
"""
|
|
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="webscout",
|
|
8
|
-
version="3.
|
|
8
|
+
version="3.6",
|
|
9
9
|
description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
|
|
10
10
|
long_description=README,
|
|
11
11
|
long_description_content_type="text/markdown",
|
|
@@ -57,7 +57,8 @@ setup(
|
|
|
57
57
|
"Helpingai-T2",
|
|
58
58
|
"playsound",
|
|
59
59
|
"poe_api_wrapper",
|
|
60
|
-
"pyreqwest_impersonate"
|
|
60
|
+
"pyreqwest_impersonate",
|
|
61
|
+
"ballyregan"
|
|
61
62
|
],
|
|
62
63
|
entry_points={
|
|
63
64
|
"console_scripts": [
|
|
@@ -648,14 +648,14 @@ Current Datetime : {datetime.datetime.now()}
|
|
|
648
648
|
else:
|
|
649
649
|
logging.info(message)
|
|
650
650
|
|
|
651
|
-
def main(self, response: str)
|
|
651
|
+
def main(self, response: str):
|
|
652
652
|
"""Exec code in response accordingly
|
|
653
653
|
|
|
654
654
|
Args:
|
|
655
|
-
response
|
|
655
|
+
response: AI response
|
|
656
656
|
|
|
657
657
|
Returns:
|
|
658
|
-
|
|
658
|
+
Optional[str]: None if script executed successfully else stdout data
|
|
659
659
|
"""
|
|
660
660
|
code_blocks = re.findall(r"```python.*?```", response, re.DOTALL)
|
|
661
661
|
if len(code_blocks) != 1:
|
|
@@ -691,6 +691,7 @@ Current Datetime : {datetime.datetime.now()}
|
|
|
691
691
|
self.log("Returning success feedback")
|
|
692
692
|
return f"LAST SCRIPT OUTPUT:\n{proc.stdout}"
|
|
693
693
|
else:
|
|
694
|
+
|
|
694
695
|
self.log("Returning error feedback", "error")
|
|
695
696
|
return f"PREVIOUS SCRIPT EXCEPTION:\n{proc.stderr}"
|
|
696
697
|
else:
|
|
@@ -701,12 +702,14 @@ Current Datetime : {datetime.datetime.now()}
|
|
|
701
702
|
self.log("Executing script internally")
|
|
702
703
|
exec(raw_code_plus)
|
|
703
704
|
except Exception as e:
|
|
705
|
+
error_message = str(e)
|
|
704
706
|
self.log(
|
|
705
|
-
"Exception occurred while executing script. Responding with error: "
|
|
706
|
-
|
|
707
|
-
"error",
|
|
707
|
+
f"Exception occurred while executing script. Responding with error: {error_message}",
|
|
708
|
+
"error"
|
|
708
709
|
)
|
|
709
|
-
|
|
710
|
+
# Return the exact error message
|
|
711
|
+
return f"PREVIOUS SCRIPT EXCEPTION:\n{error_message}"
|
|
712
|
+
|
|
710
713
|
class Audio:
|
|
711
714
|
# Request headers
|
|
712
715
|
headers: dict[str, str] = {
|