webscout 2.8__tar.gz → 3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-2.8 → webscout-3.0}/PKG-INFO +378 -29
- {webscout-2.8 → webscout-3.0}/README.md +376 -28
- {webscout-2.8 → webscout-3.0}/setup.py +2 -1
- {webscout-2.8 → webscout-3.0}/webscout/AIutel.py +1 -0
- {webscout-2.8 → webscout-3.0}/webscout/Local/_version.py +1 -1
- {webscout-2.8 → webscout-3.0}/webscout/Local/model.py +73 -4
- {webscout-2.8 → webscout-3.0}/webscout/Local/thread.py +10 -2
- {webscout-2.8 → webscout-3.0}/webscout/Local/utils.py +3 -2
- {webscout-2.8 → webscout-3.0}/webscout/Provider/BasedGPT.py +225 -225
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Berlin4h.py +210 -210
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Blackboxai.py +439 -439
- {webscout-2.8 → webscout-3.0}/webscout/Provider/ChatGPTUK.py +213 -213
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Cohere.py +222 -222
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Gemini.py +216 -216
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Groq.py +511 -511
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Koboldai.py +401 -401
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Leo.py +468 -468
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Llama2.py +436 -436
- {webscout-2.8 → webscout-3.0}/webscout/Provider/OpenGPT.py +486 -486
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Openai.py +510 -510
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Perplexity.py +229 -229
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Phind.py +517 -517
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Poe.py +207 -207
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Reka.py +225 -225
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Xjai.py +230 -230
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Yepchat.py +477 -477
- {webscout-2.8 → webscout-3.0}/webscout/Provider/Youchat.py +220 -220
- {webscout-2.8 → webscout-3.0}/webscout/Provider/__init__.py +61 -60
- {webscout-2.8 → webscout-3.0}/webscout/__init__.py +1 -0
- webscout-3.0/webscout/version.py +2 -0
- {webscout-2.8 → webscout-3.0}/webscout.egg-info/PKG-INFO +378 -29
- {webscout-2.8 → webscout-3.0}/webscout.egg-info/requires.txt +1 -0
- webscout-2.8/webscout/version.py +0 -2
- {webscout-2.8 → webscout-3.0}/DeepWEBS/__init__.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/documents/__init__.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/documents/query_results_extractor.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/networks/__init__.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/networks/filepath_converter.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/networks/google_searcher.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/networks/network_configs.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/networks/webpage_fetcher.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/utilsdw/__init__.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/utilsdw/enver.py +0 -0
- {webscout-2.8 → webscout-3.0}/DeepWEBS/utilsdw/logger.py +0 -0
- {webscout-2.8 → webscout-3.0}/LICENSE.md +0 -0
- {webscout-2.8 → webscout-3.0}/setup.cfg +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/AIauto.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/AIbase.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/DWEBS.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/LLM.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/Local/__init__.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/Local/formats.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/Local/samplers.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/Provider/ThinkAnyAI.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/__main__.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/async_providers.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/cli.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/exceptions.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/g4f.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/models.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/tempid.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/transcriber.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/utils.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/voice.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/webai.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/webscout_search.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout/webscout_search_async.py +0 -0
- {webscout-2.8 → webscout-3.0}/webscout.egg-info/SOURCES.txt +0 -0
- {webscout-2.8 → webscout-3.0}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-2.8 → webscout-3.0}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-2.8 → webscout-3.0}/webscout.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version:
|
|
3
|
+
Version: 3.0
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -61,6 +61,7 @@ Provides-Extra: local
|
|
|
61
61
|
Requires-Dist: llama-cpp-python; extra == "local"
|
|
62
62
|
Requires-Dist: colorama; extra == "local"
|
|
63
63
|
Requires-Dist: numpy; extra == "local"
|
|
64
|
+
Requires-Dist: huggingface_hub; extra == "local"
|
|
64
65
|
|
|
65
66
|
<div align="center">
|
|
66
67
|
<!-- Replace `#` with your actual links -->
|
|
@@ -141,6 +142,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
141
142
|
- [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
|
|
142
143
|
- [`LLM`](#llm)
|
|
143
144
|
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
145
|
+
- [`Function-calling-local-llm`](#function-calling-local-llm)
|
|
144
146
|
- [`LLM` with internet](#llm-with-internet)
|
|
145
147
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
146
148
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1262,6 +1264,78 @@ thread = Thread(model, formats.phi3)
|
|
|
1262
1264
|
# 4. Start interacting with the model
|
|
1263
1265
|
thread.interact()
|
|
1264
1266
|
```
|
|
1267
|
+
### `Function-calling-local-llm`
|
|
1268
|
+
```python
|
|
1269
|
+
from webscout.Local import Model, Thread, formats
|
|
1270
|
+
from webscout import DeepWEBS
|
|
1271
|
+
from webscout.Local.utils import download_model
|
|
1272
|
+
from webscout.Local.model import Model
|
|
1273
|
+
from webscout.Local.thread import Thread
|
|
1274
|
+
from webscout.Local import formats
|
|
1275
|
+
from webscout.Local.samplers import SamplerSettings
|
|
1276
|
+
def deepwebs_search(query, max_results=5):
|
|
1277
|
+
"""Performs a web search using DeepWEBS and returns results as JSON."""
|
|
1278
|
+
deepwebs = DeepWEBS()
|
|
1279
|
+
search_config = DeepWEBS.DeepSearch(
|
|
1280
|
+
queries=[query],
|
|
1281
|
+
max_results=max_results,
|
|
1282
|
+
extract_webpage=False,
|
|
1283
|
+
safe=False,
|
|
1284
|
+
types=["web"],
|
|
1285
|
+
overwrite_query_html=True,
|
|
1286
|
+
overwrite_webpage_html=True,
|
|
1287
|
+
)
|
|
1288
|
+
search_results = deepwebs.queries_to_search_results(search_config)
|
|
1289
|
+
formatted_results = []
|
|
1290
|
+
for result in search_results[0]: # Assuming only one query
|
|
1291
|
+
formatted_results.append(f"Title: {result['title']}\nURL: {result['url']}\n")
|
|
1292
|
+
return "\n".join(formatted_results)
|
|
1293
|
+
|
|
1294
|
+
# Load your model
|
|
1295
|
+
repo_id = "OEvortex/HelpingAI-9B"
|
|
1296
|
+
filename = "helpingai-9b.Q4_0.gguf"
|
|
1297
|
+
model_path = download_model(repo_id, filename, token='')
|
|
1298
|
+
|
|
1299
|
+
# 2. Load the model
|
|
1300
|
+
model = Model(model_path, n_gpu_layers=10)
|
|
1301
|
+
|
|
1302
|
+
# Create a Thread
|
|
1303
|
+
system_prompt = "You are a helpful AI assistant. Respond to user queries concisely. If a user asks for information that requires a web search, use the `deepwebs_search` tool. Do not call the tool if it is not necessary."
|
|
1304
|
+
sampler = SamplerSettings(temp=0.7, top_p=0.9) # Adjust these values as needed
|
|
1305
|
+
# 4. Create a custom chatml format with your system prompt
|
|
1306
|
+
custom_chatml = formats.chatml.copy()
|
|
1307
|
+
custom_chatml['system_content'] = system_prompt
|
|
1308
|
+
thread = Thread(model, custom_chatml, sampler=sampler)
|
|
1309
|
+
# Add the deepwebs_search tool
|
|
1310
|
+
thread.add_tool({
|
|
1311
|
+
"type": "function",
|
|
1312
|
+
"function": {
|
|
1313
|
+
"name": "deepwebs_search",
|
|
1314
|
+
"description": "Performs a web search using DeepWEBS and returns the title and URLs of the results.",
|
|
1315
|
+
"execute": deepwebs_search,
|
|
1316
|
+
"parameters": {
|
|
1317
|
+
"type": "object",
|
|
1318
|
+
"properties": {
|
|
1319
|
+
"query": {
|
|
1320
|
+
"type": "string",
|
|
1321
|
+
"description": "The query to search on the web",
|
|
1322
|
+
},
|
|
1323
|
+
"max_results": {
|
|
1324
|
+
"type": "integer",
|
|
1325
|
+
"description": "Maximum number of search results (default: 5)",
|
|
1326
|
+
},
|
|
1327
|
+
},
|
|
1328
|
+
"required": ["query"],
|
|
1329
|
+
},
|
|
1330
|
+
},
|
|
1331
|
+
})
|
|
1332
|
+
|
|
1333
|
+
# Start interacting with the model
|
|
1334
|
+
while True:
|
|
1335
|
+
user_input = input("You: ")
|
|
1336
|
+
response = thread.send(user_input)
|
|
1337
|
+
print("Bot: ", response)
|
|
1338
|
+
```
|
|
1265
1339
|
### `LLM` with internet
|
|
1266
1340
|
```python
|
|
1267
1341
|
from __future__ import annotations
|
|
@@ -1426,41 +1500,316 @@ if __name__ == "__main__":
|
|
|
1426
1500
|
## `Webai` - terminal gpt and a open interpeter
|
|
1427
1501
|
|
|
1428
1502
|
```python
|
|
1429
|
-
|
|
1503
|
+
import time
|
|
1504
|
+
import uuid
|
|
1505
|
+
from typing import Dict, Any, Optional, AsyncGenerator
|
|
1506
|
+
from rich.console import Console
|
|
1507
|
+
from rich.markdown import Markdown
|
|
1508
|
+
from rich.panel import Panel
|
|
1509
|
+
from rich.style import Style
|
|
1510
|
+
import webscout
|
|
1511
|
+
import webscout.AIutel
|
|
1512
|
+
import g4f
|
|
1513
|
+
from webscout.g4f import *
|
|
1514
|
+
from webscout.async_providers import mapper as async_provider_map
|
|
1430
1515
|
|
|
1431
|
-
|
|
1516
|
+
class TaskExecutor:
|
|
1432
1517
|
"""
|
|
1433
|
-
|
|
1434
|
-
|
|
1518
|
+
Manages an interactive chat session, handling user input, AI responses,
|
|
1519
|
+
and optional features like web search, code execution, and text-to-speech.
|
|
1435
1520
|
"""
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1521
|
+
|
|
1522
|
+
def __init__(self) -> None:
|
|
1523
|
+
"""Initializes the conversational assistant with default settings."""
|
|
1524
|
+
self._console: Console = Console()
|
|
1525
|
+
|
|
1526
|
+
# Session configuration
|
|
1527
|
+
self._selected_provider: str = "phind"
|
|
1528
|
+
self._selected_model: str = "Phind Model"
|
|
1529
|
+
self._conversation_enabled: bool = True
|
|
1530
|
+
self._max_tokens: int = 600
|
|
1531
|
+
self._temperature: float = 0.2
|
|
1532
|
+
self._top_k: int = -1
|
|
1533
|
+
self._top_p: float = 0.999
|
|
1534
|
+
self._timeout: int = 30
|
|
1535
|
+
self._auth_token: str = None # API key, if required
|
|
1536
|
+
self._chat_completion_enabled: bool = True # g4fauto
|
|
1537
|
+
self._ignore_working: bool = False # Ignore working status of providers
|
|
1538
|
+
self._proxy_path: str = None # Path to proxy configuration
|
|
1539
|
+
|
|
1540
|
+
# History Management
|
|
1541
|
+
self._history_filepath: str = None
|
|
1542
|
+
self._update_history_file: bool = True
|
|
1543
|
+
self._history_offset: int = 10250
|
|
1544
|
+
|
|
1545
|
+
# Prompt Engineering
|
|
1546
|
+
self._initial_prompt: str = None
|
|
1547
|
+
self._awesome_prompt_content: str = None
|
|
1548
|
+
|
|
1549
|
+
# Optional Features
|
|
1550
|
+
self._web_search_enabled: bool = False # Enable web search
|
|
1551
|
+
self._rawdog_enabled: bool = True
|
|
1552
|
+
self._internal_script_execution_enabled: bool = False
|
|
1553
|
+
self._script_confirmation_required: bool = False
|
|
1554
|
+
self._selected_interpreter: str = "python"
|
|
1555
|
+
self._selected_optimizer: str = "code"
|
|
1556
|
+
self._suppress_output: bool = False # Suppress verbose output
|
|
1557
|
+
|
|
1558
|
+
# AI provider mapping
|
|
1559
|
+
self._ai_provider_mapping: Dict[str, Any] = {
|
|
1560
|
+
"phind": webscout.PhindSearch,
|
|
1561
|
+
"opengpt": webscout.OPENGPT,
|
|
1562
|
+
"koboldai": webscout.KOBOLDAI,
|
|
1563
|
+
"blackboxai": webscout.BLACKBOXAI,
|
|
1564
|
+
"llama2": webscout.LLAMA2,
|
|
1565
|
+
"yepchat": webscout.YEPCHAT,
|
|
1566
|
+
"leo": webscout.LEO,
|
|
1567
|
+
"groq": webscout.GROQ,
|
|
1568
|
+
"openai": webscout.OPENAI,
|
|
1569
|
+
"perplexity": webscout.PERPLEXITY,
|
|
1570
|
+
"you": webscout.YouChat,
|
|
1571
|
+
"xjai": webscout.Xjai,
|
|
1572
|
+
"cohere": webscout.Cohere,
|
|
1573
|
+
"reka": webscout.REKA,
|
|
1574
|
+
"thinkany": webscout.ThinkAnyAI,
|
|
1575
|
+
"gemini": webscout.GEMINI,
|
|
1576
|
+
"berlin4h": webscout.Berlin4h,
|
|
1577
|
+
"chatgptuk": webscout.ChatGPTUK,
|
|
1578
|
+
"poe": webscout.POE,
|
|
1579
|
+
"basedgpt": webscout.BasedGPT,
|
|
1580
|
+
}
|
|
1581
|
+
|
|
1582
|
+
# Initialize Rawdog if enabled
|
|
1583
|
+
if self._rawdog_enabled:
|
|
1584
|
+
self._rawdog_instance: webscout.AIutel.RawDog = webscout.AIutel.RawDog(
|
|
1585
|
+
quiet=self._suppress_output,
|
|
1586
|
+
internal_exec=self._internal_script_execution_enabled,
|
|
1587
|
+
confirm_script=self._script_confirmation_required,
|
|
1588
|
+
interpreter=self._selected_interpreter,
|
|
1589
|
+
)
|
|
1590
|
+
|
|
1591
|
+
self._initial_prompt = self._rawdog_instance.intro_prompt
|
|
1592
|
+
|
|
1593
|
+
# Initialize the selected AI model
|
|
1594
|
+
self._ai_model = self._get_ai_model()
|
|
1595
|
+
|
|
1596
|
+
def _get_ai_model(self):
|
|
1597
|
+
"""
|
|
1598
|
+
Determines the appropriate AI model based on the selected provider,
|
|
1599
|
+
including automatic provider selection and g4fauto support.
|
|
1600
|
+
"""
|
|
1601
|
+
if self._selected_provider == "g4fauto":
|
|
1602
|
+
# Automatically select the best provider from g4f
|
|
1603
|
+
test = TestProviders(quiet=self._suppress_output, timeout=self._timeout)
|
|
1604
|
+
g4fauto = test.best if not self._ignore_working else test.auto
|
|
1605
|
+
if isinstance(g4fauto, str):
|
|
1606
|
+
self._selected_provider = "g4fauto+" + g4fauto
|
|
1607
|
+
self._ai_model = self._create_g4f_model(g4fauto)
|
|
1608
|
+
else:
|
|
1609
|
+
raise Exception(
|
|
1610
|
+
"No working g4f provider found. "
|
|
1611
|
+
"Consider running 'webscout.webai gpt4free test -y' first"
|
|
1612
|
+
)
|
|
1613
|
+
else:
|
|
1614
|
+
# Use the specified provider
|
|
1615
|
+
self._ai_model = self._ai_provider_mapping[self._selected_provider](
|
|
1616
|
+
is_conversation=self._conversation_enabled,
|
|
1617
|
+
max_tokens=self._max_tokens,
|
|
1618
|
+
timeout=self._timeout,
|
|
1619
|
+
intro=self._initial_prompt,
|
|
1620
|
+
filepath=self._history_filepath,
|
|
1621
|
+
update_file=self._update_history_file,
|
|
1622
|
+
proxies={}, # Load proxies from config if needed
|
|
1623
|
+
history_offset=self._history_offset,
|
|
1624
|
+
act=self._awesome_prompt_content,
|
|
1625
|
+
model=self._selected_model,
|
|
1626
|
+
quiet=self._suppress_output,
|
|
1627
|
+
# auth=self._auth_token, # Pass API key if required
|
|
1628
|
+
)
|
|
1629
|
+
return self._ai_model
|
|
1630
|
+
|
|
1631
|
+
def _create_g4f_model(self, provider: str):
|
|
1632
|
+
"""
|
|
1633
|
+
Creates a g4f model instance using the provided provider and webscout.WEBS for web search.
|
|
1634
|
+
"""
|
|
1635
|
+
return webscout.g4f.GPT4FREE(
|
|
1636
|
+
provider=provider,
|
|
1637
|
+
auth=self._auth_token,
|
|
1638
|
+
max_tokens=self._max_tokens,
|
|
1639
|
+
chat_completion=self._chat_completion_enabled,
|
|
1640
|
+
ignore_working=self._ignore_working,
|
|
1641
|
+
timeout=self._timeout,
|
|
1642
|
+
intro=self._initial_prompt,
|
|
1643
|
+
filepath=self._history_filepath,
|
|
1644
|
+
update_file=self._update_history_file,
|
|
1645
|
+
proxies={}, # Load proxies from config if needed
|
|
1646
|
+
history_offset=self._history_offset,
|
|
1647
|
+
act=self._awesome_prompt_content,
|
|
1455
1648
|
)
|
|
1456
|
-
webai_response = webai_bot.default(prompt)
|
|
1457
|
-
except Exception as e:
|
|
1458
|
-
print("Unexpected error:", e)
|
|
1459
1649
|
|
|
1650
|
+
def process_query(self, query: str) -> None:
|
|
1651
|
+
"""
|
|
1652
|
+
Processes a user query, potentially enhancing it with web search results,
|
|
1653
|
+
passing it to the AI model, and handling the response.
|
|
1654
|
+
|
|
1655
|
+
Args:
|
|
1656
|
+
query: The user's text input.
|
|
1657
|
+
|
|
1658
|
+
Returns:
|
|
1659
|
+
None
|
|
1660
|
+
"""
|
|
1661
|
+
if self._web_search_enabled:
|
|
1662
|
+
query = self._augment_query_with_web_search(query)
|
|
1663
|
+
|
|
1664
|
+
# Apply code optimization if configured
|
|
1665
|
+
if self._selected_optimizer == "code":
|
|
1666
|
+
query = webscout.AIutel.Optimizers.code(query)
|
|
1667
|
+
|
|
1668
|
+
try:
|
|
1669
|
+
response: str = self._ai_model.chat(query)
|
|
1670
|
+
except webscout.exceptions.FailedToGenerateResponseError as e:
|
|
1671
|
+
self._console.print(Markdown(f"LLM: [red]{e}[/red]"))
|
|
1672
|
+
return
|
|
1673
|
+
|
|
1674
|
+
# Handle Rawdog responses if enabled
|
|
1675
|
+
if self._rawdog_enabled:
|
|
1676
|
+
self._handle_rawdog_response(response)
|
|
1677
|
+
else:
|
|
1678
|
+
self._console.print(Markdown(f"LLM: {response}"))
|
|
1679
|
+
|
|
1680
|
+
def _augment_query_with_web_search(self, query: str) -> str:
|
|
1681
|
+
"""Performs a web search and appends the results to the query.
|
|
1682
|
+
|
|
1683
|
+
Args:
|
|
1684
|
+
query: The user's text input.
|
|
1685
|
+
|
|
1686
|
+
Returns:
|
|
1687
|
+
str: The augmented query with web search results.
|
|
1688
|
+
"""
|
|
1689
|
+
web_search_results = webscout.WEBS().text(query, max_results=3)
|
|
1690
|
+
if web_search_results:
|
|
1691
|
+
formatted_results = "\n".join(
|
|
1692
|
+
f"{i+1}. {result['title']} - {result['href']}\n\nBody: {result['body']}"
|
|
1693
|
+
for i, result in enumerate(web_search_results)
|
|
1694
|
+
)
|
|
1695
|
+
query += f"\n\n## Web Search Results are:\n\n{formatted_results}"
|
|
1696
|
+
return query
|
|
1697
|
+
|
|
1698
|
+
def _handle_rawdog_response(self, response: str) -> None:
|
|
1699
|
+
"""Handles AI responses, potentially executing them as code with Rawdog.
|
|
1700
|
+
|
|
1701
|
+
Args:
|
|
1702
|
+
response: The AI model's response.
|
|
1703
|
+
|
|
1704
|
+
Returns:
|
|
1705
|
+
None
|
|
1706
|
+
"""
|
|
1707
|
+
try:
|
|
1708
|
+
is_feedback = self._rawdog_instance.main(response)
|
|
1709
|
+
except Exception as e:
|
|
1710
|
+
self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
|
|
1711
|
+
return
|
|
1712
|
+
if is_feedback:
|
|
1713
|
+
self._console.print(Markdown(f"LLM: {is_feedback}"))
|
|
1714
|
+
else:
|
|
1715
|
+
self._console.print(Markdown("LLM: (Script executed successfully)"))
|
|
1716
|
+
|
|
1717
|
+
async def process_async_query(self, query: str) -> None:
|
|
1718
|
+
"""
|
|
1719
|
+
Asynchronously processes a user query, potentially enhancing it with web search results,
|
|
1720
|
+
passing it to the AI model, and handling the response.
|
|
1721
|
+
|
|
1722
|
+
Args:
|
|
1723
|
+
query: The user's text input.
|
|
1724
|
+
|
|
1725
|
+
Returns:
|
|
1726
|
+
None
|
|
1727
|
+
"""
|
|
1728
|
+
if self._web_search_enabled:
|
|
1729
|
+
query = self._augment_query_with_web_search(query)
|
|
1730
|
+
|
|
1731
|
+
# Apply code optimization if configured
|
|
1732
|
+
if self._selected_optimizer == "code":
|
|
1733
|
+
query = webscout.AIutel.Optimizers.code(query)
|
|
1734
|
+
|
|
1735
|
+
async_model = self._get_async_ai_model()
|
|
1736
|
+
|
|
1737
|
+
try:
|
|
1738
|
+
async for response in async_model.chat(query, stream=True):
|
|
1739
|
+
self._console.print(Markdown(f"LLM: {response}"), end="")
|
|
1740
|
+
except webscout.exceptions.FailedToGenerateResponseError as e:
|
|
1741
|
+
self._console.print(Markdown(f"LLM: [red]{e}[/red]"))
|
|
1742
|
+
return
|
|
1743
|
+
|
|
1744
|
+
# Handle Rawdog responses if enabled
|
|
1745
|
+
if self._rawdog_enabled:
|
|
1746
|
+
self._handle_rawdog_response(response)
|
|
1747
|
+
else:
|
|
1748
|
+
self._console.print(Markdown(f"LLM: {response}"))
|
|
1749
|
+
|
|
1750
|
+
def _get_async_ai_model(self):
|
|
1751
|
+
"""
|
|
1752
|
+
Determines the appropriate asynchronous AI model based on the selected provider.
|
|
1753
|
+
"""
|
|
1754
|
+
if self._selected_provider == "g4fauto":
|
|
1755
|
+
# Automatically select the best provider from g4f
|
|
1756
|
+
test = TestProviders(quiet=self._suppress_output, timeout=self._timeout)
|
|
1757
|
+
g4fauto = test.best if not self._ignore_working else test.auto
|
|
1758
|
+
if isinstance(g4fauto, str):
|
|
1759
|
+
self._selected_provider = "g4fauto+" + g4fauto
|
|
1760
|
+
self._ai_model = self._create_async_g4f_model(g4fauto)
|
|
1761
|
+
else:
|
|
1762
|
+
raise Exception(
|
|
1763
|
+
"No working g4f provider found. "
|
|
1764
|
+
"Consider running 'webscout gpt4free test -y' first"
|
|
1765
|
+
)
|
|
1766
|
+
else:
|
|
1767
|
+
# Use the specified provider
|
|
1768
|
+
if self._selected_provider in async_provider_map:
|
|
1769
|
+
self._ai_model = async_provider_map[self._selected_provider](
|
|
1770
|
+
is_conversation=self._conversation_enabled,
|
|
1771
|
+
max_tokens=self._max_tokens,
|
|
1772
|
+
timeout=self._timeout,
|
|
1773
|
+
intro=self._initial_prompt,
|
|
1774
|
+
filepath=self._history_filepath,
|
|
1775
|
+
update_file=self._update_history_file,
|
|
1776
|
+
proxies={}, # Load proxies from config if needed
|
|
1777
|
+
history_offset=self._history_offset,
|
|
1778
|
+
act=self._awesome_prompt_content,
|
|
1779
|
+
model=self._selected_model,
|
|
1780
|
+
quiet=self._suppress_output,
|
|
1781
|
+
auth=self._auth_token, # Pass API key if required
|
|
1782
|
+
)
|
|
1783
|
+
else:
|
|
1784
|
+
raise Exception(
|
|
1785
|
+
f"Asynchronous provider '{self._selected_provider}' is not yet supported"
|
|
1786
|
+
)
|
|
1787
|
+
return self._ai_model
|
|
1788
|
+
|
|
1789
|
+
def _create_async_g4f_model(self, provider: str):
|
|
1790
|
+
"""
|
|
1791
|
+
Creates an asynchronous g4f model instance using the provided provider and webscout.WEBS for web search.
|
|
1792
|
+
"""
|
|
1793
|
+
return webscout.g4f.AsyncGPT4FREE(
|
|
1794
|
+
provider=provider,
|
|
1795
|
+
auth=self._auth_token,
|
|
1796
|
+
max_tokens=self._max_tokens,
|
|
1797
|
+
chat_completion=self._chat_completion_enabled,
|
|
1798
|
+
ignore_working=self._ignore_working,
|
|
1799
|
+
timeout=self._timeout,
|
|
1800
|
+
intro=self._initial_prompt,
|
|
1801
|
+
filepath=self._history_filepath,
|
|
1802
|
+
update_file=self._update_history_file,
|
|
1803
|
+
proxies={}, # Load proxies from config if needed
|
|
1804
|
+
history_offset=self._history_offset,
|
|
1805
|
+
act=self._awesome_prompt_content,
|
|
1806
|
+
)
|
|
1460
1807
|
|
|
1461
1808
|
if __name__ == "__main__":
|
|
1462
|
-
|
|
1463
|
-
|
|
1809
|
+
assistant = TaskExecutor()
|
|
1810
|
+
while True:
|
|
1811
|
+
input_query = input("Enter your query: ")
|
|
1812
|
+
assistant.process_query(input_query)
|
|
1464
1813
|
|
|
1465
1814
|
```
|
|
1466
1815
|
```shell
|