webscout 3.5__py3-none-any.whl → 3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +10 -7
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autollama.py +47 -0
- webscout/Extra/gguf.py +80 -0
- webscout/Local/_version.py +1 -1
- webscout/Local/rawdog.py +945 -945
- webscout/Provider/Deepinfra.py +478 -478
- webscout/Provider/Deepseek.py +265 -265
- webscout/Provider/OpenGPT.py +381 -1
- webscout/Provider/Phind.py +489 -0
- webscout/Provider/VTLchat.py +252 -0
- webscout/Provider/__init__.py +9 -1
- webscout/__init__.py +38 -29
- webscout/version.py +1 -1
- webscout/websx_search.py +370 -370
- {webscout-3.5.dist-info → webscout-3.7.dist-info}/METADATA +196 -35
- {webscout-3.5.dist-info → webscout-3.7.dist-info}/RECORD +21 -17
- {webscout-3.5.dist-info → webscout-3.7.dist-info}/WHEEL +1 -1
- {webscout-3.5.dist-info → webscout-3.7.dist-info}/LICENSE.md +0 -0
- {webscout-3.5.dist-info → webscout-3.7.dist-info}/entry_points.txt +0 -0
- {webscout-3.5.dist-info → webscout-3.7.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 3.
|
|
4
|
-
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
3
|
+
Version: 3.7
|
|
4
|
+
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
7
7
|
License: HelpingAI
|
|
@@ -54,6 +54,7 @@ Requires-Dist: Helpingai-T2
|
|
|
54
54
|
Requires-Dist: playsound
|
|
55
55
|
Requires-Dist: poe-api-wrapper
|
|
56
56
|
Requires-Dist: pyreqwest-impersonate
|
|
57
|
+
Requires-Dist: ballyregan
|
|
57
58
|
Provides-Extra: dev
|
|
58
59
|
Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
|
|
59
60
|
Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
|
|
@@ -61,7 +62,7 @@ Provides-Extra: local
|
|
|
61
62
|
Requires-Dist: llama-cpp-python ; extra == 'local'
|
|
62
63
|
Requires-Dist: colorama ; extra == 'local'
|
|
63
64
|
Requires-Dist: numpy ; extra == 'local'
|
|
64
|
-
Requires-Dist: huggingface-hub ; extra == 'local'
|
|
65
|
+
Requires-Dist: huggingface-hub[cli] ; extra == 'local'
|
|
65
66
|
|
|
66
67
|
<div align="center">
|
|
67
68
|
<!-- Replace `#` with your actual links -->
|
|
@@ -999,6 +1000,23 @@ prompt = "write a essay on phind"
|
|
|
999
1000
|
# Use the 'ask' method to send the prompt and receive a response
|
|
1000
1001
|
response = ph.ask(prompt)
|
|
1001
1002
|
|
|
1003
|
+
# Extract and print the message from the response
|
|
1004
|
+
message = ph.get_message(response)
|
|
1005
|
+
print(message)
|
|
1006
|
+
```
|
|
1007
|
+
Using phindv2
|
|
1008
|
+
```python
|
|
1009
|
+
from webscout import Phindv2
|
|
1010
|
+
|
|
1011
|
+
# Create an instance of the PHIND class
|
|
1012
|
+
ph = Phindv2()
|
|
1013
|
+
|
|
1014
|
+
# Define a prompt to send to the AI
|
|
1015
|
+
prompt = ""
|
|
1016
|
+
|
|
1017
|
+
# Use the 'ask' method to send the prompt and receive a response
|
|
1018
|
+
response = ph.ask(prompt)
|
|
1019
|
+
|
|
1002
1020
|
# Extract and print the message from the response
|
|
1003
1021
|
message = ph.get_message(response)
|
|
1004
1022
|
print(message)
|
|
@@ -1019,7 +1037,7 @@ print(r)
|
|
|
1019
1037
|
|
|
1020
1038
|
```
|
|
1021
1039
|
|
|
1022
|
-
### 3. `You.com` - search/chat with you.com
|
|
1040
|
+
### 3. `You.com` - search/chat with you.com - Not working
|
|
1023
1041
|
```python
|
|
1024
1042
|
|
|
1025
1043
|
from webscout import YouChat
|
|
@@ -1143,6 +1161,45 @@ while True:
|
|
|
1143
1161
|
response_str = opengpt.chat(prompt)
|
|
1144
1162
|
print(response_str)
|
|
1145
1163
|
```
|
|
1164
|
+
```python
|
|
1165
|
+
from webscout import OPENGPTv2
|
|
1166
|
+
|
|
1167
|
+
# Initialize the bot with all specified settings
|
|
1168
|
+
bot = OPENGPTv2(
|
|
1169
|
+
generate_new_agents=True, # Set to True to generate new IDs, False to load from file
|
|
1170
|
+
assistant_name="My Custom Assistant",
|
|
1171
|
+
retrieval_description="Helpful information from my files.",
|
|
1172
|
+
agent_system_message="",
|
|
1173
|
+
enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
|
|
1174
|
+
enable_ddg_search=False, # Enable DuckDuckGo search tool
|
|
1175
|
+
enable_arxiv=False, # Assuming you want to disable Arxiv
|
|
1176
|
+
enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
|
|
1177
|
+
enable_pubmed=False, # Assuming you want to disable PubMed
|
|
1178
|
+
enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
|
|
1179
|
+
enable_retrieval=False, # Assuming you want to disable Retrieval
|
|
1180
|
+
enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
|
|
1181
|
+
enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
|
|
1182
|
+
enable_you_com_search=True, # Assuming you want to disable You.com Search
|
|
1183
|
+
enable_wikipedia=False, # Enable Wikipedia tool
|
|
1184
|
+
is_public=True,
|
|
1185
|
+
is_conversation=True,
|
|
1186
|
+
max_tokens=800,
|
|
1187
|
+
timeout=40,
|
|
1188
|
+
filepath="opengpt_conversation_history.txt",
|
|
1189
|
+
update_file=True,
|
|
1190
|
+
history_offset=10250,
|
|
1191
|
+
act=None,
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
# Example interaction loop
|
|
1195
|
+
while True:
|
|
1196
|
+
prompt = input("You: ")
|
|
1197
|
+
if prompt.strip().lower() == 'exit':
|
|
1198
|
+
break
|
|
1199
|
+
response = bot.chat(prompt)
|
|
1200
|
+
print(response)
|
|
1201
|
+
|
|
1202
|
+
```
|
|
1146
1203
|
### 9. `KOBOLDAI` -
|
|
1147
1204
|
```python
|
|
1148
1205
|
from webscout import KOBOLDAI
|
|
@@ -1258,8 +1315,26 @@ print(response)
|
|
|
1258
1315
|
Usage code similar to other proviers
|
|
1259
1316
|
|
|
1260
1317
|
### 16. `BasedGPT` - chat with GPT
|
|
1261
|
-
|
|
1318
|
+
```
|
|
1319
|
+
from webscout import BasedGPT
|
|
1320
|
+
|
|
1321
|
+
# Initialize the BasedGPT provider
|
|
1322
|
+
basedgpt = BasedGPT(
|
|
1323
|
+
is_conversation=True, # Chat conversationally
|
|
1324
|
+
max_tokens=600, # Maximum tokens to generate
|
|
1325
|
+
timeout=30, # HTTP request timeout
|
|
1326
|
+
intro="You are a helpful and friendly AI.", # Introductory prompt
|
|
1327
|
+
filepath="chat_history.txt", # File to store conversation history
|
|
1328
|
+
update_file=True, # Update the chat history file
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
# Send a prompt to the AI
|
|
1332
|
+
prompt = "What is the meaning of life?"
|
|
1333
|
+
response = basedgpt.chat(prompt)
|
|
1262
1334
|
|
|
1335
|
+
# Print the AI's response
|
|
1336
|
+
print(response)
|
|
1337
|
+
```
|
|
1263
1338
|
### 17. `DeepSeek` -chat with deepseek
|
|
1264
1339
|
```python
|
|
1265
1340
|
from webscout import DeepSeek
|
|
@@ -1292,7 +1367,7 @@ while True:
|
|
|
1292
1367
|
r = ai.chat(prompt)
|
|
1293
1368
|
print(r)
|
|
1294
1369
|
```
|
|
1295
|
-
### 18. Deepinfra
|
|
1370
|
+
### 18. `Deepinfra`
|
|
1296
1371
|
```python
|
|
1297
1372
|
from webscout import DeepInfra
|
|
1298
1373
|
|
|
@@ -1318,30 +1393,33 @@ message = ai.get_message(response)
|
|
|
1318
1393
|
print(message)
|
|
1319
1394
|
```
|
|
1320
1395
|
|
|
1321
|
-
### 19. Deepinfra - VLM
|
|
1396
|
+
### 19. `Deepinfra` - VLM
|
|
1322
1397
|
```python
|
|
1323
|
-
from webscout import
|
|
1398
|
+
from webscout.Provider import VLM
|
|
1324
1399
|
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
model= "Qwen/Qwen2-72B-Instruct",
|
|
1328
|
-
max_tokens=800,
|
|
1329
|
-
timeout=30,
|
|
1330
|
-
intro=None,
|
|
1331
|
-
filepath=None,
|
|
1332
|
-
update_file=True,
|
|
1333
|
-
proxies={},
|
|
1334
|
-
history_offset=10250,
|
|
1335
|
-
act=None,
|
|
1336
|
-
)
|
|
1400
|
+
# Load your image
|
|
1401
|
+
image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
|
|
1337
1402
|
|
|
1338
|
-
|
|
1403
|
+
vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
|
|
1404
|
+
image_base64 = vlm_instance.encode_image_to_base64(image_path)
|
|
1339
1405
|
|
|
1340
|
-
|
|
1406
|
+
prompt = {
|
|
1407
|
+
"content": "What is in this image?",
|
|
1408
|
+
"image": image_base64
|
|
1409
|
+
}
|
|
1341
1410
|
|
|
1342
|
-
#
|
|
1343
|
-
|
|
1344
|
-
print(
|
|
1411
|
+
# Generate a response
|
|
1412
|
+
response = vlm_instance.chat(prompt)
|
|
1413
|
+
print(response)
|
|
1414
|
+
|
|
1415
|
+
```
|
|
1416
|
+
### 20. `VTLchat` - Free gpt3.5
|
|
1417
|
+
```python
|
|
1418
|
+
from webscout import VTLchat
|
|
1419
|
+
|
|
1420
|
+
provider = VTLchat()
|
|
1421
|
+
response = provider.chat("Hello, how are you?")
|
|
1422
|
+
print(response)
|
|
1345
1423
|
```
|
|
1346
1424
|
### `LLM`
|
|
1347
1425
|
```python
|
|
@@ -1369,13 +1447,19 @@ while True:
|
|
|
1369
1447
|
# Print the response
|
|
1370
1448
|
print("AI: ", response)
|
|
1371
1449
|
```
|
|
1372
|
-
|
|
1373
|
-
Local
|
|
1450
|
+
|
|
1451
|
+
## Local-LLM
|
|
1452
|
+
|
|
1453
|
+
Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
|
|
1454
|
+
|
|
1455
|
+
**Example:**
|
|
1456
|
+
|
|
1374
1457
|
```python
|
|
1375
1458
|
from webscout.Local.utils import download_model
|
|
1376
1459
|
from webscout.Local.model import Model
|
|
1377
1460
|
from webscout.Local.thread import Thread
|
|
1378
1461
|
from webscout.Local import formats
|
|
1462
|
+
|
|
1379
1463
|
# 1. Download the model
|
|
1380
1464
|
repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
|
|
1381
1465
|
filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
|
|
@@ -1391,7 +1475,11 @@ thread = Thread(model, formats.phi3)
|
|
|
1391
1475
|
thread.interact()
|
|
1392
1476
|
```
|
|
1393
1477
|
|
|
1394
|
-
|
|
1478
|
+
## Local-rawdog
|
|
1479
|
+
Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
|
|
1480
|
+
|
|
1481
|
+
**Example:**
|
|
1482
|
+
|
|
1395
1483
|
```python
|
|
1396
1484
|
import webscout.Local as ws
|
|
1397
1485
|
from webscout.Local.rawdog import RawDog
|
|
@@ -1478,6 +1566,63 @@ while True:
|
|
|
1478
1566
|
print(script_output)
|
|
1479
1567
|
|
|
1480
1568
|
```
|
|
1569
|
+
|
|
1570
|
+
## GGUF
|
|
1571
|
+
|
|
1572
|
+
Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
|
|
1573
|
+
|
|
1574
|
+
**Example:**
|
|
1575
|
+
|
|
1576
|
+
```python
|
|
1577
|
+
from webscout import gguf
|
|
1578
|
+
"""
|
|
1579
|
+
Valid quantization methods:
|
|
1580
|
+
"q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
|
|
1581
|
+
"q4_0", "q4_1", "q4_k_m", "q4_k_s",
|
|
1582
|
+
"q5_0", "q5_1", "q5_k_m", "q5_k_s",
|
|
1583
|
+
"q6_k", "q8_0"
|
|
1584
|
+
"""
|
|
1585
|
+
gguf.convert(
|
|
1586
|
+
model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
|
|
1587
|
+
username="Abhaykoul", # Replace with your Hugging Face username
|
|
1588
|
+
token="hf_token_write", # Replace with your Hugging Face token
|
|
1589
|
+
quantization_methods="q4_k_m" # Optional, adjust quantization methods
|
|
1590
|
+
)
|
|
1591
|
+
```
|
|
1592
|
+
|
|
1593
|
+
## Autollama
|
|
1594
|
+
|
|
1595
|
+
Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
|
|
1596
|
+
|
|
1597
|
+
**Example:**
|
|
1598
|
+
|
|
1599
|
+
```python
|
|
1600
|
+
from webscout import autollama
|
|
1601
|
+
|
|
1602
|
+
autollama.autollama(
|
|
1603
|
+
model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
|
|
1604
|
+
gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
|
|
1605
|
+
)
|
|
1606
|
+
```
|
|
1607
|
+
|
|
1608
|
+
**Command Line Usage:**
|
|
1609
|
+
|
|
1610
|
+
* **GGUF Conversion:**
|
|
1611
|
+
```bash
|
|
1612
|
+
python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
|
|
1613
|
+
```
|
|
1614
|
+
|
|
1615
|
+
* **Autollama:**
|
|
1616
|
+
```bash
|
|
1617
|
+
python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
|
|
1618
|
+
```
|
|
1619
|
+
|
|
1620
|
+
**Note:**
|
|
1621
|
+
|
|
1622
|
+
* Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
|
|
1623
|
+
* The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
|
|
1624
|
+
|
|
1625
|
+
|
|
1481
1626
|
### `LLM` with internet
|
|
1482
1627
|
```python
|
|
1483
1628
|
from __future__ import annotations
|
|
@@ -1593,7 +1738,7 @@ class TaskExecutor:
|
|
|
1593
1738
|
self._proxy_path: str = None # Path to proxy configuration
|
|
1594
1739
|
|
|
1595
1740
|
# History Management
|
|
1596
|
-
self._history_filepath: str =
|
|
1741
|
+
self._history_filepath: str = "history.txt"
|
|
1597
1742
|
self._update_history_file: bool = True
|
|
1598
1743
|
self._history_offset: int = 10250
|
|
1599
1744
|
|
|
@@ -1604,7 +1749,7 @@ class TaskExecutor:
|
|
|
1604
1749
|
# Optional Features
|
|
1605
1750
|
self._web_search_enabled: bool = False # Enable web search
|
|
1606
1751
|
self._rawdog_enabled: bool = True
|
|
1607
|
-
self._internal_script_execution_enabled: bool =
|
|
1752
|
+
self._internal_script_execution_enabled: bool = True
|
|
1608
1753
|
self._script_confirmation_required: bool = False
|
|
1609
1754
|
self._selected_interpreter: str = "python"
|
|
1610
1755
|
self._selected_optimizer: str = "code"
|
|
@@ -1632,6 +1777,9 @@ class TaskExecutor:
|
|
|
1632
1777
|
"chatgptuk": webscout.ChatGPTUK,
|
|
1633
1778
|
"poe": webscout.POE,
|
|
1634
1779
|
"basedgpt": webscout.BasedGPT,
|
|
1780
|
+
"deepseek": webscout.DeepSeek,
|
|
1781
|
+
"deepinfra": webscout.DeepInfra,
|
|
1782
|
+
"opengenptv2": webscout.OPENGPTv2
|
|
1635
1783
|
}
|
|
1636
1784
|
|
|
1637
1785
|
# Initialize Rawdog if enabled
|
|
@@ -1761,13 +1909,26 @@ class TaskExecutor:
|
|
|
1761
1909
|
"""
|
|
1762
1910
|
try:
|
|
1763
1911
|
is_feedback = self._rawdog_instance.main(response)
|
|
1912
|
+
if is_feedback and "PREVIOUS SCRIPT EXCEPTION" in is_feedback:
|
|
1913
|
+
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1914
|
+
error_message = is_feedback.split("PREVIOUS SCRIPT EXCEPTION:\n")[1].strip()
|
|
1915
|
+
# Generate a solution for the error and execute it
|
|
1916
|
+
error_solution_query = (
|
|
1917
|
+
f"The following code was executed and resulted in an error:\n\n"
|
|
1918
|
+
f"{response}\n\n"
|
|
1919
|
+
f"Error: {error_message}\n\n"
|
|
1920
|
+
f"Please provide a solution to fix this error in the code and execute it."
|
|
1921
|
+
)
|
|
1922
|
+
try:
|
|
1923
|
+
new_response = self._ai_model.chat(error_solution_query)
|
|
1924
|
+
self._handle_rawdog_response(new_response)
|
|
1925
|
+
except webscout.exceptions.FailedToGenerateResponseError as e:
|
|
1926
|
+
self._console.print(Markdown(f"LLM: [red]Error while generating solution: {e}[/red]"))
|
|
1927
|
+
else:
|
|
1928
|
+
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1764
1929
|
except Exception as e:
|
|
1765
1930
|
self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
|
|
1766
|
-
|
|
1767
|
-
if is_feedback:
|
|
1768
|
-
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1769
|
-
else:
|
|
1770
|
-
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1931
|
+
|
|
1771
1932
|
|
|
1772
1933
|
async def process_async_query(self, query: str) -> None:
|
|
1773
1934
|
"""
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
webscout/AIauto.py,sha256=xPGr_Z0h27XXNh4Wiufjn9TksDOqxqlaGcLUYKNP55w,18246
|
|
2
2
|
webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
3
|
-
webscout/AIutel.py,sha256=
|
|
3
|
+
webscout/AIutel.py,sha256=MMfUvTQXYDtaFXsXtwKgv9V_qMK6WgOxdx7Wagdm2Lw,33542
|
|
4
4
|
webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
|
|
5
5
|
webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
|
|
6
|
-
webscout/__init__.py,sha256=
|
|
6
|
+
webscout/__init__.py,sha256=WolS-VvktY4VezczjfbZuZR5iPCtCzPUV_WKqDFVIoA,2058
|
|
7
7
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
8
8
|
webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
|
|
9
9
|
webscout/cli.py,sha256=enw_dPTCG3sNC1TXt96XccnpRmF4Etr99nh-RbGYags,18784
|
|
@@ -13,17 +13,20 @@ webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
|
|
|
13
13
|
webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
|
|
14
14
|
webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
|
|
15
15
|
webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
|
|
16
|
-
webscout/version.py,sha256=
|
|
16
|
+
webscout/version.py,sha256=IuTIikIXiglYKmugXFivfp0USNzx9FUxd_CVlae9bgk,23
|
|
17
17
|
webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
|
|
18
18
|
webscout/webai.py,sha256=qkvhYdyF5wNdmW4rNdH3RbfQxabEWlGvCyAk2SbH04k,86602
|
|
19
19
|
webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,42804
|
|
20
20
|
webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
|
|
21
|
-
webscout/websx_search.py,sha256=
|
|
21
|
+
webscout/websx_search.py,sha256=n-qVwiHozJEF-GFRPcAfh4k1d_tscTmDe1dNL-1ngcU,12094
|
|
22
|
+
webscout/Extra/__init__.py,sha256=vlW4RoSl5v3d7j_Yq1XEMydrG9JM-On_afgK-HtRZsk,45
|
|
23
|
+
webscout/Extra/autollama.py,sha256=5OPVRETbRJomTerddMJtznE-GFAZvDx5BPRM9EQB9dU,1476
|
|
24
|
+
webscout/Extra/gguf.py,sha256=HrRF0hW3HZHwtu4OCFumlRTgMBqFpqK0JKyRTRyPWrs,3122
|
|
22
25
|
webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
|
|
23
|
-
webscout/Local/_version.py,sha256=
|
|
26
|
+
webscout/Local/_version.py,sha256=3sFn1tDa2mT9Pb1-OGW4K3_zbiJ0mhPRqB2rnLfp28Q,83
|
|
24
27
|
webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
|
|
25
28
|
webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
|
|
26
|
-
webscout/Local/rawdog.py,sha256=
|
|
29
|
+
webscout/Local/rawdog.py,sha256=ojY_O8Vb1KvR34OwWdfLgllgaAK_7HMf64ElMATvCXs,36689
|
|
27
30
|
webscout/Local/samplers.py,sha256=qXwU4eLXER-2aCYzcJcTgA6BeFmi5GMpTDUX1C9pTN4,4372
|
|
28
31
|
webscout/Local/thread.py,sha256=Lyf_N2CaGAn2usSWSiUXLPAgpWub8vUu_tgFgtnvZVA,27408
|
|
29
32
|
webscout/Local/utils.py,sha256=CSt9IqHhVGk_nJEnKvSFbLhC5nNf01e0MtwpgMmF9pA,6197
|
|
@@ -32,27 +35,28 @@ webscout/Provider/Berlin4h.py,sha256=zMpmWmdFCbcE3UWB-F9xbbTWZTfx4GnjnRf6sDoaiC0
|
|
|
32
35
|
webscout/Provider/Blackboxai.py,sha256=HUk0moEGsgGvidD1LF9tbfaKdx7bPnGU_SrYPdcfHU8,17182
|
|
33
36
|
webscout/Provider/ChatGPTUK.py,sha256=qmuCb_a71GNE5LelOb5AKJUBndvj7soebiNey4VdDvE,8570
|
|
34
37
|
webscout/Provider/Cohere.py,sha256=IXnRosYOaMAA65nvsKmN6ZkJGSdZFYQYBidzuNaCqX8,8711
|
|
35
|
-
webscout/Provider/Deepinfra.py,sha256=
|
|
36
|
-
webscout/Provider/Deepseek.py,sha256=
|
|
38
|
+
webscout/Provider/Deepinfra.py,sha256=kVnWARJdEtIeIsZwGw3POq8B2dO87bDcJso3uOeCeOA,18750
|
|
39
|
+
webscout/Provider/Deepseek.py,sha256=pnOB44ObuOfAsoi_bUGUvha3tfwd0rTJ9rnX-14QkL4,10550
|
|
37
40
|
webscout/Provider/Gemini.py,sha256=_4DHWvlWuNAmVHPwHB1RjmryjTZZCthLa6lvPEHLvkQ,8451
|
|
38
41
|
webscout/Provider/Groq.py,sha256=QfgP3hKUcqq5vUA4Pzuu3HAgpJkKwLWNjjsnxtkCYd8,21094
|
|
39
42
|
webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
|
|
40
43
|
webscout/Provider/Leo.py,sha256=wbuDR-vFjLptfRC6yDlk74tINqNvCOzpISsK92lIgGg,19987
|
|
41
44
|
webscout/Provider/Llama2.py,sha256=gVMotyiBaDSqliwuDtFefHoOBn9V5m5Ze_YVtV0trt8,17525
|
|
42
|
-
webscout/Provider/OpenGPT.py,sha256=
|
|
45
|
+
webscout/Provider/OpenGPT.py,sha256=ZymwLgNJSPlGZHW3msMlnRR7NxmALqJw9yuToqrRrhw,35515
|
|
43
46
|
webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
|
|
44
47
|
webscout/Provider/Perplexity.py,sha256=CPdKqkdlVejXDcf1uycNO4LPCVNUADSCetvyJEGepSw,8826
|
|
45
|
-
webscout/Provider/Phind.py,sha256=
|
|
48
|
+
webscout/Provider/Phind.py,sha256=bkgKVtggRJSbJAG1tXviW9BqDvcgqPBlSr88Q6rlFHw,39226
|
|
46
49
|
webscout/Provider/Poe.py,sha256=ObUxa-Fa2Dq7sJcV0hc65m09StS9uWsB2-bR2rSjXDY,7510
|
|
47
50
|
webscout/Provider/Reka.py,sha256=F0ZXENkhARprj5biK3mRxwiuPH0BW3ga7EWsi8agbtE,8917
|
|
48
51
|
webscout/Provider/ThinkAnyAI.py,sha256=_qFjj0djxxrranyEY33w14oizyRjzlVwMv_hzvVtwNc,11616
|
|
52
|
+
webscout/Provider/VTLchat.py,sha256=_sErGr-wOi16ZAfiGOo0bPsAEMkjzzwreEsIqjIZMIU,10041
|
|
49
53
|
webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,9039
|
|
50
54
|
webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
|
|
51
55
|
webscout/Provider/Youchat.py,sha256=UVGBuGSjv4uRibn1xflmCjYcfrRTKnDvX3adhag6T98,7976
|
|
52
|
-
webscout/Provider/__init__.py,sha256=
|
|
53
|
-
webscout-3.
|
|
54
|
-
webscout-3.
|
|
55
|
-
webscout-3.
|
|
56
|
-
webscout-3.
|
|
57
|
-
webscout-3.
|
|
58
|
-
webscout-3.
|
|
56
|
+
webscout/Provider/__init__.py,sha256=RaMdtYv7eQJ2vB8jXUHrkfNbx2DgRjbwc6DI40cOH1A,1809
|
|
57
|
+
webscout-3.7.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
|
|
58
|
+
webscout-3.7.dist-info/METADATA,sha256=FocHEEpfWeT2aX3bUXJ8mHw1cWugF3tijDNqUnehB-o,69115
|
|
59
|
+
webscout-3.7.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
|
|
60
|
+
webscout-3.7.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
|
|
61
|
+
webscout-3.7.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
62
|
+
webscout-3.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|