webscout 2.9__py3-none-any.whl → 3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 2.9
3
+ Version: 3.0
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -142,6 +142,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
142
142
  - [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
143
143
  - [`LLM`](#llm)
144
144
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
145
+ - [`Function-calling-local-llm`](#function-calling-local-llm)
145
146
  - [`LLM` with internet](#llm-with-internet)
146
147
  - [LLM with deepwebs](#llm-with-deepwebs)
147
148
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
@@ -1263,6 +1264,78 @@ thread = Thread(model, formats.phi3)
1263
1264
  # 4. Start interacting with the model
1264
1265
  thread.interact()
1265
1266
  ```
1267
+ ### `Function-calling-local-llm`
1268
+ ```python
1269
+ from webscout.Local import Model, Thread, formats
1270
+ from webscout import DeepWEBS
1271
+ from webscout.Local.utils import download_model
1272
+ from webscout.Local.model import Model
1273
+ from webscout.Local.thread import Thread
1274
+ from webscout.Local import formats
1275
+ from webscout.Local.samplers import SamplerSettings
1276
+ def deepwebs_search(query, max_results=5):
1277
+ """Performs a web search using DeepWEBS and returns results as JSON."""
1278
+ deepwebs = DeepWEBS()
1279
+ search_config = DeepWEBS.DeepSearch(
1280
+ queries=[query],
1281
+ max_results=max_results,
1282
+ extract_webpage=False,
1283
+ safe=False,
1284
+ types=["web"],
1285
+ overwrite_query_html=True,
1286
+ overwrite_webpage_html=True,
1287
+ )
1288
+ search_results = deepwebs.queries_to_search_results(search_config)
1289
+ formatted_results = []
1290
+ for result in search_results[0]: # Assuming only one query
1291
+ formatted_results.append(f"Title: {result['title']}\nURL: {result['url']}\n")
1292
+ return "\n".join(formatted_results)
1293
+
1294
+ # Load your model
1295
+ repo_id = "OEvortex/HelpingAI-9B"
1296
+ filename = "helpingai-9b.Q4_0.gguf"
1297
+ model_path = download_model(repo_id, filename, token='')
1298
+
1299
+ # 2. Load the model
1300
+ model = Model(model_path, n_gpu_layers=10)
1301
+
1302
+ # Create a Thread
1303
+ system_prompt = "You are a helpful AI assistant. Respond to user queries concisely. If a user asks for information that requires a web search, use the `deepwebs_search` tool. Do not call the tool if it is not necessary."
1304
+ sampler = SamplerSettings(temp=0.7, top_p=0.9) # Adjust these values as needed
1305
+ # 4. Create a custom chatml format with your system prompt
1306
+ custom_chatml = formats.chatml.copy()
1307
+ custom_chatml['system_content'] = system_prompt
1308
+ thread = Thread(model, custom_chatml, sampler=sampler)
1309
+ # Add the deepwebs_search tool
1310
+ thread.add_tool({
1311
+ "type": "function",
1312
+ "function": {
1313
+ "name": "deepwebs_search",
1314
+ "description": "Performs a web search using DeepWEBS and returns the title and URLs of the results.",
1315
+ "execute": deepwebs_search,
1316
+ "parameters": {
1317
+ "type": "object",
1318
+ "properties": {
1319
+ "query": {
1320
+ "type": "string",
1321
+ "description": "The query to search on the web",
1322
+ },
1323
+ "max_results": {
1324
+ "type": "integer",
1325
+ "description": "Maximum number of search results (default: 5)",
1326
+ },
1327
+ },
1328
+ "required": ["query"],
1329
+ },
1330
+ },
1331
+ })
1332
+
1333
+ # Start interacting with the model
1334
+ while True:
1335
+ user_input = input("You: ")
1336
+ response = thread.send(user_input)
1337
+ print("Bot: ", response)
1338
+ ```
1266
1339
  ### `LLM` with internet
1267
1340
  ```python
1268
1341
  from __future__ import annotations
@@ -1427,41 +1500,316 @@ if __name__ == "__main__":
1427
1500
  ## `Webai` - terminal gpt and a open interpeter
1428
1501
 
1429
1502
  ```python
1430
- from webscout.webai import Main
1503
+ import time
1504
+ import uuid
1505
+ from typing import Dict, Any, Optional, AsyncGenerator
1506
+ from rich.console import Console
1507
+ from rich.markdown import Markdown
1508
+ from rich.panel import Panel
1509
+ from rich.style import Style
1510
+ import webscout
1511
+ import webscout.AIutel
1512
+ import g4f
1513
+ from webscout.g4f import *
1514
+ from webscout.async_providers import mapper as async_provider_map
1431
1515
 
1432
- def use_rawdog_with_webai(prompt):
1516
+ class TaskExecutor:
1433
1517
  """
1434
- Wrap the webscout default method in a try-except block to catch any unhandled
1435
- exceptions and print a helpful message.
1518
+ Manages an interactive chat session, handling user input, AI responses,
1519
+ and optional features like web search, code execution, and text-to-speech.
1436
1520
  """
1437
- try:
1438
- webai_bot = Main(
1439
- max_tokens=500,
1440
- provider="cohere",
1441
- temperature=0.7,
1442
- top_k=40,
1443
- top_p=0.95,
1444
- model="command-r-plus", # Replace with your desired model
1445
- auth=None, # Replace with your auth key/value (if needed)
1446
- timeout=30,
1447
- disable_conversation=True,
1448
- filepath=None,
1449
- update_file=True,
1450
- intro=None,
1451
- rawdog=True,
1452
- history_offset=10250,
1453
- awesome_prompt=None,
1454
- proxy_path=None,
1455
- quiet=True
1521
+
1522
+ def __init__(self) -> None:
1523
+ """Initializes the conversational assistant with default settings."""
1524
+ self._console: Console = Console()
1525
+
1526
+ # Session configuration
1527
+ self._selected_provider: str = "phind"
1528
+ self._selected_model: str = "Phind Model"
1529
+ self._conversation_enabled: bool = True
1530
+ self._max_tokens: int = 600
1531
+ self._temperature: float = 0.2
1532
+ self._top_k: int = -1
1533
+ self._top_p: float = 0.999
1534
+ self._timeout: int = 30
1535
+ self._auth_token: str = None # API key, if required
1536
+ self._chat_completion_enabled: bool = True # g4fauto
1537
+ self._ignore_working: bool = False # Ignore working status of providers
1538
+ self._proxy_path: str = None # Path to proxy configuration
1539
+
1540
+ # History Management
1541
+ self._history_filepath: str = None
1542
+ self._update_history_file: bool = True
1543
+ self._history_offset: int = 10250
1544
+
1545
+ # Prompt Engineering
1546
+ self._initial_prompt: str = None
1547
+ self._awesome_prompt_content: str = None
1548
+
1549
+ # Optional Features
1550
+ self._web_search_enabled: bool = False # Enable web search
1551
+ self._rawdog_enabled: bool = True
1552
+ self._internal_script_execution_enabled: bool = False
1553
+ self._script_confirmation_required: bool = False
1554
+ self._selected_interpreter: str = "python"
1555
+ self._selected_optimizer: str = "code"
1556
+ self._suppress_output: bool = False # Suppress verbose output
1557
+
1558
+ # AI provider mapping
1559
+ self._ai_provider_mapping: Dict[str, Any] = {
1560
+ "phind": webscout.PhindSearch,
1561
+ "opengpt": webscout.OPENGPT,
1562
+ "koboldai": webscout.KOBOLDAI,
1563
+ "blackboxai": webscout.BLACKBOXAI,
1564
+ "llama2": webscout.LLAMA2,
1565
+ "yepchat": webscout.YEPCHAT,
1566
+ "leo": webscout.LEO,
1567
+ "groq": webscout.GROQ,
1568
+ "openai": webscout.OPENAI,
1569
+ "perplexity": webscout.PERPLEXITY,
1570
+ "you": webscout.YouChat,
1571
+ "xjai": webscout.Xjai,
1572
+ "cohere": webscout.Cohere,
1573
+ "reka": webscout.REKA,
1574
+ "thinkany": webscout.ThinkAnyAI,
1575
+ "gemini": webscout.GEMINI,
1576
+ "berlin4h": webscout.Berlin4h,
1577
+ "chatgptuk": webscout.ChatGPTUK,
1578
+ "poe": webscout.POE,
1579
+ "basedgpt": webscout.BasedGPT,
1580
+ }
1581
+
1582
+ # Initialize Rawdog if enabled
1583
+ if self._rawdog_enabled:
1584
+ self._rawdog_instance: webscout.AIutel.RawDog = webscout.AIutel.RawDog(
1585
+ quiet=self._suppress_output,
1586
+ internal_exec=self._internal_script_execution_enabled,
1587
+ confirm_script=self._script_confirmation_required,
1588
+ interpreter=self._selected_interpreter,
1589
+ )
1590
+
1591
+ self._initial_prompt = self._rawdog_instance.intro_prompt
1592
+
1593
+ # Initialize the selected AI model
1594
+ self._ai_model = self._get_ai_model()
1595
+
1596
+ def _get_ai_model(self):
1597
+ """
1598
+ Determines the appropriate AI model based on the selected provider,
1599
+ including automatic provider selection and g4fauto support.
1600
+ """
1601
+ if self._selected_provider == "g4fauto":
1602
+ # Automatically select the best provider from g4f
1603
+ test = TestProviders(quiet=self._suppress_output, timeout=self._timeout)
1604
+ g4fauto = test.best if not self._ignore_working else test.auto
1605
+ if isinstance(g4fauto, str):
1606
+ self._selected_provider = "g4fauto+" + g4fauto
1607
+ self._ai_model = self._create_g4f_model(g4fauto)
1608
+ else:
1609
+ raise Exception(
1610
+ "No working g4f provider found. "
1611
+ "Consider running 'webscout.webai gpt4free test -y' first"
1612
+ )
1613
+ else:
1614
+ # Use the specified provider
1615
+ self._ai_model = self._ai_provider_mapping[self._selected_provider](
1616
+ is_conversation=self._conversation_enabled,
1617
+ max_tokens=self._max_tokens,
1618
+ timeout=self._timeout,
1619
+ intro=self._initial_prompt,
1620
+ filepath=self._history_filepath,
1621
+ update_file=self._update_history_file,
1622
+ proxies={}, # Load proxies from config if needed
1623
+ history_offset=self._history_offset,
1624
+ act=self._awesome_prompt_content,
1625
+ model=self._selected_model,
1626
+ quiet=self._suppress_output,
1627
+ # auth=self._auth_token, # Pass API key if required
1628
+ )
1629
+ return self._ai_model
1630
+
1631
+ def _create_g4f_model(self, provider: str):
1632
+ """
1633
+ Creates a g4f model instance using the provided provider and webscout.WEBS for web search.
1634
+ """
1635
+ return webscout.g4f.GPT4FREE(
1636
+ provider=provider,
1637
+ auth=self._auth_token,
1638
+ max_tokens=self._max_tokens,
1639
+ chat_completion=self._chat_completion_enabled,
1640
+ ignore_working=self._ignore_working,
1641
+ timeout=self._timeout,
1642
+ intro=self._initial_prompt,
1643
+ filepath=self._history_filepath,
1644
+ update_file=self._update_history_file,
1645
+ proxies={}, # Load proxies from config if needed
1646
+ history_offset=self._history_offset,
1647
+ act=self._awesome_prompt_content,
1456
1648
  )
1457
- webai_response = webai_bot.default(prompt)
1458
- except Exception as e:
1459
- print("Unexpected error:", e)
1460
1649
 
1650
+ def process_query(self, query: str) -> None:
1651
+ """
1652
+ Processes a user query, potentially enhancing it with web search results,
1653
+ passing it to the AI model, and handling the response.
1654
+
1655
+ Args:
1656
+ query: The user's text input.
1657
+
1658
+ Returns:
1659
+ None
1660
+ """
1661
+ if self._web_search_enabled:
1662
+ query = self._augment_query_with_web_search(query)
1663
+
1664
+ # Apply code optimization if configured
1665
+ if self._selected_optimizer == "code":
1666
+ query = webscout.AIutel.Optimizers.code(query)
1667
+
1668
+ try:
1669
+ response: str = self._ai_model.chat(query)
1670
+ except webscout.exceptions.FailedToGenerateResponseError as e:
1671
+ self._console.print(Markdown(f"LLM: [red]{e}[/red]"))
1672
+ return
1673
+
1674
+ # Handle Rawdog responses if enabled
1675
+ if self._rawdog_enabled:
1676
+ self._handle_rawdog_response(response)
1677
+ else:
1678
+ self._console.print(Markdown(f"LLM: {response}"))
1679
+
1680
+ def _augment_query_with_web_search(self, query: str) -> str:
1681
+ """Performs a web search and appends the results to the query.
1682
+
1683
+ Args:
1684
+ query: The user's text input.
1685
+
1686
+ Returns:
1687
+ str: The augmented query with web search results.
1688
+ """
1689
+ web_search_results = webscout.WEBS().text(query, max_results=3)
1690
+ if web_search_results:
1691
+ formatted_results = "\n".join(
1692
+ f"{i+1}. {result['title']} - {result['href']}\n\nBody: {result['body']}"
1693
+ for i, result in enumerate(web_search_results)
1694
+ )
1695
+ query += f"\n\n## Web Search Results are:\n\n{formatted_results}"
1696
+ return query
1697
+
1698
+ def _handle_rawdog_response(self, response: str) -> None:
1699
+ """Handles AI responses, potentially executing them as code with Rawdog.
1700
+
1701
+ Args:
1702
+ response: The AI model's response.
1703
+
1704
+ Returns:
1705
+ None
1706
+ """
1707
+ try:
1708
+ is_feedback = self._rawdog_instance.main(response)
1709
+ except Exception as e:
1710
+ self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
1711
+ return
1712
+ if is_feedback:
1713
+ self._console.print(Markdown(f"LLM: {is_feedback}"))
1714
+ else:
1715
+ self._console.print(Markdown("LLM: (Script executed successfully)"))
1716
+
1717
+ async def process_async_query(self, query: str) -> None:
1718
+ """
1719
+ Asynchronously processes a user query, potentially enhancing it with web search results,
1720
+ passing it to the AI model, and handling the response.
1721
+
1722
+ Args:
1723
+ query: The user's text input.
1724
+
1725
+ Returns:
1726
+ None
1727
+ """
1728
+ if self._web_search_enabled:
1729
+ query = self._augment_query_with_web_search(query)
1730
+
1731
+ # Apply code optimization if configured
1732
+ if self._selected_optimizer == "code":
1733
+ query = webscout.AIutel.Optimizers.code(query)
1734
+
1735
+ async_model = self._get_async_ai_model()
1736
+
1737
+ try:
1738
+ async for response in async_model.chat(query, stream=True):
1739
+ self._console.print(Markdown(f"LLM: {response}"), end="")
1740
+ except webscout.exceptions.FailedToGenerateResponseError as e:
1741
+ self._console.print(Markdown(f"LLM: [red]{e}[/red]"))
1742
+ return
1743
+
1744
+ # Handle Rawdog responses if enabled
1745
+ if self._rawdog_enabled:
1746
+ self._handle_rawdog_response(response)
1747
+ else:
1748
+ self._console.print(Markdown(f"LLM: {response}"))
1749
+
1750
+ def _get_async_ai_model(self):
1751
+ """
1752
+ Determines the appropriate asynchronous AI model based on the selected provider.
1753
+ """
1754
+ if self._selected_provider == "g4fauto":
1755
+ # Automatically select the best provider from g4f
1756
+ test = TestProviders(quiet=self._suppress_output, timeout=self._timeout)
1757
+ g4fauto = test.best if not self._ignore_working else test.auto
1758
+ if isinstance(g4fauto, str):
1759
+ self._selected_provider = "g4fauto+" + g4fauto
1760
+ self._ai_model = self._create_async_g4f_model(g4fauto)
1761
+ else:
1762
+ raise Exception(
1763
+ "No working g4f provider found. "
1764
+ "Consider running 'webscout gpt4free test -y' first"
1765
+ )
1766
+ else:
1767
+ # Use the specified provider
1768
+ if self._selected_provider in async_provider_map:
1769
+ self._ai_model = async_provider_map[self._selected_provider](
1770
+ is_conversation=self._conversation_enabled,
1771
+ max_tokens=self._max_tokens,
1772
+ timeout=self._timeout,
1773
+ intro=self._initial_prompt,
1774
+ filepath=self._history_filepath,
1775
+ update_file=self._update_history_file,
1776
+ proxies={}, # Load proxies from config if needed
1777
+ history_offset=self._history_offset,
1778
+ act=self._awesome_prompt_content,
1779
+ model=self._selected_model,
1780
+ quiet=self._suppress_output,
1781
+ auth=self._auth_token, # Pass API key if required
1782
+ )
1783
+ else:
1784
+ raise Exception(
1785
+ f"Asynchronous provider '{self._selected_provider}' is not yet supported"
1786
+ )
1787
+ return self._ai_model
1788
+
1789
+ def _create_async_g4f_model(self, provider: str):
1790
+ """
1791
+ Creates an asynchronous g4f model instance using the provided provider and webscout.WEBS for web search.
1792
+ """
1793
+ return webscout.g4f.AsyncGPT4FREE(
1794
+ provider=provider,
1795
+ auth=self._auth_token,
1796
+ max_tokens=self._max_tokens,
1797
+ chat_completion=self._chat_completion_enabled,
1798
+ ignore_working=self._ignore_working,
1799
+ timeout=self._timeout,
1800
+ intro=self._initial_prompt,
1801
+ filepath=self._history_filepath,
1802
+ update_file=self._update_history_file,
1803
+ proxies={}, # Load proxies from config if needed
1804
+ history_offset=self._history_offset,
1805
+ act=self._awesome_prompt_content,
1806
+ )
1461
1807
 
1462
1808
  if __name__ == "__main__":
1463
- user_prompt = input("Enter your prompt: ")
1464
- use_rawdog_with_webai(user_prompt)
1809
+ assistant = TaskExecutor()
1810
+ while True:
1811
+ input_query = input("Enter your query: ")
1812
+ assistant.process_query(input_query)
1465
1813
 
1466
1814
  ```
1467
1815
  ```shell
@@ -12,10 +12,10 @@ DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,247
12
12
  DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
13
13
  webscout/AIauto.py,sha256=xPGr_Z0h27XXNh4Wiufjn9TksDOqxqlaGcLUYKNP55w,18246
14
14
  webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
15
- webscout/AIutel.py,sha256=YirhjJGkPWtv1Ceh0Mu4gVczXcOwW-LDhjfesWPHDwI,33256
15
+ webscout/AIutel.py,sha256=5-Is9e-COeh0NX9wkugdctHdzrsjBVZ7lfl2aunt1YI,33272
16
16
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
17
  webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
18
- webscout/__init__.py,sha256=QCcZuzCLjMjk7229CU50V1hgvvp6pB_vDDE3NvXRVHg,1840
18
+ webscout/__init__.py,sha256=eqHBfAE3psYEi42ZXnbwZG2y3J23F9XZjhoAI0nOKlQ,1856
19
19
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
20
20
  webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
21
21
  webscout/cli.py,sha256=174iWc0NxwfYMq9vyIk_NNnd3Q8bkzEiCa_BE6a0WZY,18743
@@ -33,34 +33,34 @@ webscout/webscout_search_async.py,sha256=ecn9b0J6YtAxMER80iUF1cgn_eh3Ysj7jFpievJ
33
33
  webscout/Local/__init__.py,sha256=0yXXihFek7VCugUjjCI67i3yZ_PQ8mw3MMVlWGpMmLM,217
34
34
  webscout/Local/_version.py,sha256=_4faCzosNaazujtNZJP12bI38sKMaj4KxGdcGvcGPdY,83
35
35
  webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
36
- webscout/Local/model.py,sha256=f6Ug0tVH4MXCzelfevxBtHJXyml2C3ribnLGJ6HfCGU,27618
36
+ webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
37
37
  webscout/Local/samplers.py,sha256=qXwU4eLXER-2aCYzcJcTgA6BeFmi5GMpTDUX1C9pTN4,4372
38
- webscout/Local/thread.py,sha256=KkxTTQLhAiZqfcM4ZS9GCp5SNxVu984YFcmIL4a6pA8,26925
38
+ webscout/Local/thread.py,sha256=Lyf_N2CaGAn2usSWSiUXLPAgpWub8vUu_tgFgtnvZVA,27408
39
39
  webscout/Local/utils.py,sha256=CSt9IqHhVGk_nJEnKvSFbLhC5nNf01e0MtwpgMmF9pA,6197
40
- webscout/Provider/BasedGPT.py,sha256=eijTnqsecFQuHtspXDHryvh42caE_yANJI7gw9wiG7Y,8191
41
- webscout/Provider/Berlin4h.py,sha256=-O6BRkLusUEdYXcyQ09iY86dFl9WoiA4mlmZ_DLZbos,8342
42
- webscout/Provider/Blackboxai.py,sha256=8B5wT_eb86RVZ5uOqwvgVC5QATl0uEMCli0n4SDwt1M,16743
43
- webscout/Provider/ChatGPTUK.py,sha256=ozpWnuOlC_7jeDcTuUukFPcPkIksx-Bgq_6Rrf0Bwak,8357
44
- webscout/Provider/Cohere.py,sha256=6lxu0luoIaTTI0uEmJwY5hsiIIq0meZf35jaGcCvcSA,8489
45
- webscout/Provider/Gemini.py,sha256=UmFcU1MLNK7nwIRKS1pyA39JHeVLTZII0444LW-KmSM,8235
46
- webscout/Provider/Groq.py,sha256=vfaSEbzGY92YiADbeUufmy2OGshAoO0WKmmJ75c5uZY,20583
47
- webscout/Provider/Koboldai.py,sha256=49k7SxytSaw5qTFsLaBKOh8AgnxObWsxu093gY4_Hdc,15405
48
- webscout/Provider/Leo.py,sha256=l69_fDedNhCeaZyajc6N_groAXgekWkFQuQB0M32UnA,19519
49
- webscout/Provider/Llama2.py,sha256=uZpu6Kltif5dNb8lVy4zBm2ToSmjmu1868Ef462_LhU,17089
50
- webscout/Provider/OpenGPT.py,sha256=XkmunIY8pOA6id31vh7Swnu30zvZRiHfc7ocyymT1BE,18404
51
- webscout/Provider/Openai.py,sha256=_sSye4VHwRZsmSoDpP3SThgIR0kDqVTM4jNj5YCDMt0,20107
52
- webscout/Provider/Perplexity.py,sha256=zIPYqcjFkJ4-u0N9W8UZNCBbH29lV8RxwsNeuu2OYns,8597
53
- webscout/Provider/Phind.py,sha256=nOA6DqmRFK2voKNp6SNvM2fxAAxnnCo-WKUdBsXO0iw,19390
54
- webscout/Provider/Poe.py,sha256=UlynXNM0aDeuf0lW0ZAT8d3ewz2f9HxjS2akXmsDoBQ,7303
55
- webscout/Provider/Reka.py,sha256=-sIZredU-AMT7BIHBB9Zpct6FkAC4mNLGC0ueJCr_d4,8692
40
+ webscout/Provider/BasedGPT.py,sha256=LhC9WdRXhmzPEUaCYTNQF9CRFqhH4BeV1KtVf-B_Hc8,8416
41
+ webscout/Provider/Berlin4h.py,sha256=zMpmWmdFCbcE3UWB-F9xbbTWZTfx4GnjnRf6sDoaiC0,8552
42
+ webscout/Provider/Blackboxai.py,sha256=HUk0moEGsgGvidD1LF9tbfaKdx7bPnGU_SrYPdcfHU8,17182
43
+ webscout/Provider/ChatGPTUK.py,sha256=qmuCb_a71GNE5LelOb5AKJUBndvj7soebiNey4VdDvE,8570
44
+ webscout/Provider/Cohere.py,sha256=IXnRosYOaMAA65nvsKmN6ZkJGSdZFYQYBidzuNaCqX8,8711
45
+ webscout/Provider/Gemini.py,sha256=_4DHWvlWuNAmVHPwHB1RjmryjTZZCthLa6lvPEHLvkQ,8451
46
+ webscout/Provider/Groq.py,sha256=QfgP3hKUcqq5vUA4Pzuu3HAgpJkKwLWNjjsnxtkCYd8,21094
47
+ webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
48
+ webscout/Provider/Leo.py,sha256=wbuDR-vFjLptfRC6yDlk74tINqNvCOzpISsK92lIgGg,19987
49
+ webscout/Provider/Llama2.py,sha256=gVMotyiBaDSqliwuDtFefHoOBn9V5m5Ze_YVtV0trt8,17525
50
+ webscout/Provider/OpenGPT.py,sha256=SJskNkUGNNb3zdZY50xokzW-rwcSlHw8EN6WVv70dg8,18890
51
+ webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
52
+ webscout/Provider/Perplexity.py,sha256=CPdKqkdlVejXDcf1uycNO4LPCVNUADSCetvyJEGepSw,8826
53
+ webscout/Provider/Phind.py,sha256=NXiYNRs8h_6c3AGOUqFrvN01odBIQ_psSUBPaHiAUoE,19907
54
+ webscout/Provider/Poe.py,sha256=ObUxa-Fa2Dq7sJcV0hc65m09StS9uWsB2-bR2rSjXDY,7510
55
+ webscout/Provider/Reka.py,sha256=F0ZXENkhARprj5biK3mRxwiuPH0BW3ga7EWsi8agbtE,8917
56
56
  webscout/Provider/ThinkAnyAI.py,sha256=_qFjj0djxxrranyEY33w14oizyRjzlVwMv_hzvVtwNc,11616
57
- webscout/Provider/Xjai.py,sha256=gI9FqEodS-jHfFM_CsDPmTb_wL5NU2q__2fg9hqVoEc,8809
58
- webscout/Provider/Yepchat.py,sha256=E0tv3Zfoqs1Sw8Pe-6_5d--_1LESm8mjw536DWclJk8,19398
59
- webscout/Provider/Youchat.py,sha256=JAZYwcj0Kl1UUgqN0rD3TKaReA1G-cmIlW_4mog1j_c,7756
60
- webscout/Provider/__init__.py,sha256=FsMWjMRgwARRVbXU1nApZdYum3UDcAfPJizfvIFnCjk,1372
61
- webscout-2.9.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
62
- webscout-2.9.dist-info/METADATA,sha256=5BOE1otHk0pZ9Z-WGuUViM7Q9nyRkCvNNIpP_yxdyCI,47786
63
- webscout-2.9.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
64
- webscout-2.9.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
65
- webscout-2.9.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
66
- webscout-2.9.dist-info/RECORD,,
57
+ webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,9039
58
+ webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
59
+ webscout/Provider/Youchat.py,sha256=UVGBuGSjv4uRibn1xflmCjYcfrRTKnDvX3adhag6T98,7976
60
+ webscout/Provider/__init__.py,sha256=iUgo6NHk8i5i4l4eauq6VXgOlWO_V6Q2FwhnHS64lFA,1457
61
+ webscout-3.0.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
62
+ webscout-3.0.dist-info/METADATA,sha256=0P6IdIRTQPGrtCHj66osfRTu0KatBmyHmwndEBAJKIo,62005
63
+ webscout-3.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
64
+ webscout-3.0.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
65
+ webscout-3.0.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
66
+ webscout-3.0.dist-info/RECORD,,
File without changes