webscout 3.5__tar.gz → 3.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. {webscout-3.5/webscout.egg-info → webscout-3.7}/PKG-INFO +196 -35
  2. {webscout-3.5 → webscout-3.7}/README.md +192 -32
  3. {webscout-3.5 → webscout-3.7}/setup.py +5 -4
  4. {webscout-3.5 → webscout-3.7}/webscout/AIutel.py +10 -7
  5. webscout-3.7/webscout/Extra/__init__.py +2 -0
  6. webscout-3.7/webscout/Extra/autollama.py +47 -0
  7. webscout-3.7/webscout/Extra/gguf.py +80 -0
  8. {webscout-3.5 → webscout-3.7}/webscout/Local/_version.py +1 -1
  9. {webscout-3.5 → webscout-3.7}/webscout/Local/rawdog.py +945 -945
  10. {webscout-3.5 → webscout-3.7}/webscout/Provider/Deepinfra.py +478 -478
  11. {webscout-3.5 → webscout-3.7}/webscout/Provider/Deepseek.py +265 -265
  12. {webscout-3.5 → webscout-3.7}/webscout/Provider/OpenGPT.py +381 -1
  13. webscout-3.7/webscout/Provider/Phind.py +1007 -0
  14. webscout-3.7/webscout/Provider/VTLchat.py +252 -0
  15. {webscout-3.5 → webscout-3.7}/webscout/Provider/__init__.py +9 -1
  16. {webscout-3.5 → webscout-3.7}/webscout/__init__.py +38 -29
  17. webscout-3.7/webscout/version.py +2 -0
  18. {webscout-3.5 → webscout-3.7}/webscout/websx_search.py +370 -370
  19. {webscout-3.5 → webscout-3.7/webscout.egg-info}/PKG-INFO +196 -35
  20. {webscout-3.5 → webscout-3.7}/webscout.egg-info/SOURCES.txt +4 -0
  21. {webscout-3.5 → webscout-3.7}/webscout.egg-info/requires.txt +2 -1
  22. webscout-3.5/webscout/Provider/Phind.py +0 -518
  23. webscout-3.5/webscout/version.py +0 -2
  24. {webscout-3.5 → webscout-3.7}/LICENSE.md +0 -0
  25. {webscout-3.5 → webscout-3.7}/setup.cfg +0 -0
  26. {webscout-3.5 → webscout-3.7}/webscout/AIauto.py +0 -0
  27. {webscout-3.5 → webscout-3.7}/webscout/AIbase.py +0 -0
  28. {webscout-3.5 → webscout-3.7}/webscout/DWEBS.py +0 -0
  29. {webscout-3.5 → webscout-3.7}/webscout/LLM.py +0 -0
  30. {webscout-3.5 → webscout-3.7}/webscout/Local/__init__.py +0 -0
  31. {webscout-3.5 → webscout-3.7}/webscout/Local/formats.py +0 -0
  32. {webscout-3.5 → webscout-3.7}/webscout/Local/model.py +0 -0
  33. {webscout-3.5 → webscout-3.7}/webscout/Local/samplers.py +0 -0
  34. {webscout-3.5 → webscout-3.7}/webscout/Local/thread.py +0 -0
  35. {webscout-3.5 → webscout-3.7}/webscout/Local/utils.py +0 -0
  36. {webscout-3.5 → webscout-3.7}/webscout/Provider/BasedGPT.py +0 -0
  37. {webscout-3.5 → webscout-3.7}/webscout/Provider/Berlin4h.py +0 -0
  38. {webscout-3.5 → webscout-3.7}/webscout/Provider/Blackboxai.py +0 -0
  39. {webscout-3.5 → webscout-3.7}/webscout/Provider/ChatGPTUK.py +0 -0
  40. {webscout-3.5 → webscout-3.7}/webscout/Provider/Cohere.py +0 -0
  41. {webscout-3.5 → webscout-3.7}/webscout/Provider/Gemini.py +0 -0
  42. {webscout-3.5 → webscout-3.7}/webscout/Provider/Groq.py +0 -0
  43. {webscout-3.5 → webscout-3.7}/webscout/Provider/Koboldai.py +0 -0
  44. {webscout-3.5 → webscout-3.7}/webscout/Provider/Leo.py +0 -0
  45. {webscout-3.5 → webscout-3.7}/webscout/Provider/Llama2.py +0 -0
  46. {webscout-3.5 → webscout-3.7}/webscout/Provider/Openai.py +0 -0
  47. {webscout-3.5 → webscout-3.7}/webscout/Provider/Perplexity.py +0 -0
  48. {webscout-3.5 → webscout-3.7}/webscout/Provider/Poe.py +0 -0
  49. {webscout-3.5 → webscout-3.7}/webscout/Provider/Reka.py +0 -0
  50. {webscout-3.5 → webscout-3.7}/webscout/Provider/ThinkAnyAI.py +0 -0
  51. {webscout-3.5 → webscout-3.7}/webscout/Provider/Xjai.py +0 -0
  52. {webscout-3.5 → webscout-3.7}/webscout/Provider/Yepchat.py +0 -0
  53. {webscout-3.5 → webscout-3.7}/webscout/Provider/Youchat.py +0 -0
  54. {webscout-3.5 → webscout-3.7}/webscout/__main__.py +0 -0
  55. {webscout-3.5 → webscout-3.7}/webscout/async_providers.py +0 -0
  56. {webscout-3.5 → webscout-3.7}/webscout/cli.py +0 -0
  57. {webscout-3.5 → webscout-3.7}/webscout/exceptions.py +0 -0
  58. {webscout-3.5 → webscout-3.7}/webscout/g4f.py +0 -0
  59. {webscout-3.5 → webscout-3.7}/webscout/models.py +0 -0
  60. {webscout-3.5 → webscout-3.7}/webscout/tempid.py +0 -0
  61. {webscout-3.5 → webscout-3.7}/webscout/transcriber.py +0 -0
  62. {webscout-3.5 → webscout-3.7}/webscout/utils.py +0 -0
  63. {webscout-3.5 → webscout-3.7}/webscout/voice.py +0 -0
  64. {webscout-3.5 → webscout-3.7}/webscout/webai.py +0 -0
  65. {webscout-3.5 → webscout-3.7}/webscout/webscout_search.py +0 -0
  66. {webscout-3.5 → webscout-3.7}/webscout/webscout_search_async.py +0 -0
  67. {webscout-3.5 → webscout-3.7}/webscout.egg-info/dependency_links.txt +0 -0
  68. {webscout-3.5 → webscout-3.7}/webscout.egg-info/entry_points.txt +0 -0
  69. {webscout-3.5 → webscout-3.7}/webscout.egg-info/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.5
4
- Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
3
+ Version: 3.7
4
+ Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI
@@ -54,6 +54,7 @@ Requires-Dist: Helpingai-T2
54
54
  Requires-Dist: playsound
55
55
  Requires-Dist: poe_api_wrapper
56
56
  Requires-Dist: pyreqwest_impersonate
57
+ Requires-Dist: ballyregan
57
58
  Provides-Extra: dev
58
59
  Requires-Dist: ruff>=0.1.6; extra == "dev"
59
60
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -61,7 +62,7 @@ Provides-Extra: local
61
62
  Requires-Dist: llama-cpp-python; extra == "local"
62
63
  Requires-Dist: colorama; extra == "local"
63
64
  Requires-Dist: numpy; extra == "local"
64
- Requires-Dist: huggingface_hub; extra == "local"
65
+ Requires-Dist: huggingface_hub[cli]; extra == "local"
65
66
 
66
67
  <div align="center">
67
68
  <!-- Replace `#` with your actual links -->
@@ -999,6 +1000,23 @@ prompt = "write a essay on phind"
999
1000
  # Use the 'ask' method to send the prompt and receive a response
1000
1001
  response = ph.ask(prompt)
1001
1002
 
1003
+ # Extract and print the message from the response
1004
+ message = ph.get_message(response)
1005
+ print(message)
1006
+ ```
1007
+ Using phindv2
1008
+ ```python
1009
+ from webscout import Phindv2
1010
+
1011
+ # Create an instance of the PHIND class
1012
+ ph = Phindv2()
1013
+
1014
+ # Define a prompt to send to the AI
1015
+ prompt = ""
1016
+
1017
+ # Use the 'ask' method to send the prompt and receive a response
1018
+ response = ph.ask(prompt)
1019
+
1002
1020
  # Extract and print the message from the response
1003
1021
  message = ph.get_message(response)
1004
1022
  print(message)
@@ -1019,7 +1037,7 @@ print(r)
1019
1037
 
1020
1038
  ```
1021
1039
 
1022
- ### 3. `You.com` - search/chat with you.com
1040
+ ### 3. `You.com` - search/chat with you.com - Not working
1023
1041
  ```python
1024
1042
 
1025
1043
  from webscout import YouChat
@@ -1143,6 +1161,45 @@ while True:
1143
1161
  response_str = opengpt.chat(prompt)
1144
1162
  print(response_str)
1145
1163
  ```
1164
+ ```python
1165
+ from webscout import OPENGPTv2
1166
+
1167
+ # Initialize the bot with all specified settings
1168
+ bot = OPENGPTv2(
1169
+ generate_new_agents=True, # Set to True to generate new IDs, False to load from file
1170
+ assistant_name="My Custom Assistant",
1171
+ retrieval_description="Helpful information from my files.",
1172
+ agent_system_message="",
1173
+ enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
1174
+ enable_ddg_search=False, # Enable DuckDuckGo search tool
1175
+ enable_arxiv=False, # Assuming you want to disable Arxiv
1176
+ enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
1177
+ enable_pubmed=False, # Assuming you want to disable PubMed
1178
+ enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
1179
+ enable_retrieval=False, # Assuming you want to disable Retrieval
1180
+ enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
1181
+ enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
1182
+ enable_you_com_search=True, # Assuming you want to disable You.com Search
1183
+ enable_wikipedia=False, # Enable Wikipedia tool
1184
+ is_public=True,
1185
+ is_conversation=True,
1186
+ max_tokens=800,
1187
+ timeout=40,
1188
+ filepath="opengpt_conversation_history.txt",
1189
+ update_file=True,
1190
+ history_offset=10250,
1191
+ act=None,
1192
+ )
1193
+
1194
+ # Example interaction loop
1195
+ while True:
1196
+ prompt = input("You: ")
1197
+ if prompt.strip().lower() == 'exit':
1198
+ break
1199
+ response = bot.chat(prompt)
1200
+ print(response)
1201
+
1202
+ ```
1146
1203
  ### 9. `KOBOLDAI` -
1147
1204
  ```python
1148
1205
  from webscout import KOBOLDAI
@@ -1258,8 +1315,26 @@ print(response)
1258
1315
  Usage code similar to other proviers
1259
1316
 
1260
1317
  ### 16. `BasedGPT` - chat with GPT
1261
- Usage code similar to other providers
1318
+ ```
1319
+ from webscout import BasedGPT
1320
+
1321
+ # Initialize the BasedGPT provider
1322
+ basedgpt = BasedGPT(
1323
+ is_conversation=True, # Chat conversationally
1324
+ max_tokens=600, # Maximum tokens to generate
1325
+ timeout=30, # HTTP request timeout
1326
+ intro="You are a helpful and friendly AI.", # Introductory prompt
1327
+ filepath="chat_history.txt", # File to store conversation history
1328
+ update_file=True, # Update the chat history file
1329
+ )
1330
+
1331
+ # Send a prompt to the AI
1332
+ prompt = "What is the meaning of life?"
1333
+ response = basedgpt.chat(prompt)
1262
1334
 
1335
+ # Print the AI's response
1336
+ print(response)
1337
+ ```
1263
1338
  ### 17. `DeepSeek` -chat with deepseek
1264
1339
  ```python
1265
1340
  from webscout import DeepSeek
@@ -1292,7 +1367,7 @@ while True:
1292
1367
  r = ai.chat(prompt)
1293
1368
  print(r)
1294
1369
  ```
1295
- ### 18. Deepinfra
1370
+ ### 18. `Deepinfra`
1296
1371
  ```python
1297
1372
  from webscout import DeepInfra
1298
1373
 
@@ -1318,30 +1393,33 @@ message = ai.get_message(response)
1318
1393
  print(message)
1319
1394
  ```
1320
1395
 
1321
- ### 19. Deepinfra - VLM
1396
+ ### 19. `Deepinfra` - VLM
1322
1397
  ```python
1323
- from webscout import DeepInfra
1398
+ from webscout.Provider import VLM
1324
1399
 
1325
- ai = DeepInfra(
1326
- is_conversation=True,
1327
- model= "Qwen/Qwen2-72B-Instruct",
1328
- max_tokens=800,
1329
- timeout=30,
1330
- intro=None,
1331
- filepath=None,
1332
- update_file=True,
1333
- proxies={},
1334
- history_offset=10250,
1335
- act=None,
1336
- )
1400
+ # Load your image
1401
+ image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
1337
1402
 
1338
- prompt = "what is meaning of life"
1403
+ vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
1404
+ image_base64 = vlm_instance.encode_image_to_base64(image_path)
1339
1405
 
1340
- response = ai.ask(prompt)
1406
+ prompt = {
1407
+ "content": "What is in this image?",
1408
+ "image": image_base64
1409
+ }
1341
1410
 
1342
- # Extract and print the message from the response
1343
- message = ai.get_message(response)
1344
- print(message)
1411
+ # Generate a response
1412
+ response = vlm_instance.chat(prompt)
1413
+ print(response)
1414
+
1415
+ ```
1416
+ ### 20. `VTLchat` - Free gpt3.5
1417
+ ```python
1418
+ from webscout import VTLchat
1419
+
1420
+ provider = VTLchat()
1421
+ response = provider.chat("Hello, how are you?")
1422
+ print(response)
1345
1423
  ```
1346
1424
  ### `LLM`
1347
1425
  ```python
@@ -1369,13 +1447,19 @@ while True:
1369
1447
  # Print the response
1370
1448
  print("AI: ", response)
1371
1449
  ```
1372
- ### `Local-LLM` webscout can now run GGUF models
1373
- Local LLM's some functions are taken from easy-llama
1450
+
1451
+ ## Local-LLM
1452
+
1453
+ Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1454
+
1455
+ **Example:**
1456
+
1374
1457
  ```python
1375
1458
  from webscout.Local.utils import download_model
1376
1459
  from webscout.Local.model import Model
1377
1460
  from webscout.Local.thread import Thread
1378
1461
  from webscout.Local import formats
1462
+
1379
1463
  # 1. Download the model
1380
1464
  repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1381
1465
  filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
@@ -1391,7 +1475,11 @@ thread = Thread(model, formats.phi3)
1391
1475
  thread.interact()
1392
1476
  ```
1393
1477
 
1394
- ### `Local-rawdog`
1478
+ ## Local-rawdog
1479
+ Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1480
+
1481
+ **Example:**
1482
+
1395
1483
  ```python
1396
1484
  import webscout.Local as ws
1397
1485
  from webscout.Local.rawdog import RawDog
@@ -1478,6 +1566,63 @@ while True:
1478
1566
  print(script_output)
1479
1567
 
1480
1568
  ```
1569
+
1570
+ ## GGUF
1571
+
1572
+ Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1573
+
1574
+ **Example:**
1575
+
1576
+ ```python
1577
+ from webscout import gguf
1578
+ """
1579
+ Valid quantization methods:
1580
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
1581
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
1582
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
1583
+ "q6_k", "q8_0"
1584
+ """
1585
+ gguf.convert(
1586
+ model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
1587
+ username="Abhaykoul", # Replace with your Hugging Face username
1588
+ token="hf_token_write", # Replace with your Hugging Face token
1589
+ quantization_methods="q4_k_m" # Optional, adjust quantization methods
1590
+ )
1591
+ ```
1592
+
1593
+ ## Autollama
1594
+
1595
+ Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1596
+
1597
+ **Example:**
1598
+
1599
+ ```python
1600
+ from webscout import autollama
1601
+
1602
+ autollama.autollama(
1603
+ model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1604
+ gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1605
+ )
1606
+ ```
1607
+
1608
+ **Command Line Usage:**
1609
+
1610
+ * **GGUF Conversion:**
1611
+ ```bash
1612
+ python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1613
+ ```
1614
+
1615
+ * **Autollama:**
1616
+ ```bash
1617
+ python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1618
+ ```
1619
+
1620
+ **Note:**
1621
+
1622
+ * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1623
+ * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1624
+
1625
+
1481
1626
  ### `LLM` with internet
1482
1627
  ```python
1483
1628
  from __future__ import annotations
@@ -1593,7 +1738,7 @@ class TaskExecutor:
1593
1738
  self._proxy_path: str = None # Path to proxy configuration
1594
1739
 
1595
1740
  # History Management
1596
- self._history_filepath: str = None
1741
+ self._history_filepath: str = "history.txt"
1597
1742
  self._update_history_file: bool = True
1598
1743
  self._history_offset: int = 10250
1599
1744
 
@@ -1604,7 +1749,7 @@ class TaskExecutor:
1604
1749
  # Optional Features
1605
1750
  self._web_search_enabled: bool = False # Enable web search
1606
1751
  self._rawdog_enabled: bool = True
1607
- self._internal_script_execution_enabled: bool = False
1752
+ self._internal_script_execution_enabled: bool = True
1608
1753
  self._script_confirmation_required: bool = False
1609
1754
  self._selected_interpreter: str = "python"
1610
1755
  self._selected_optimizer: str = "code"
@@ -1632,6 +1777,9 @@ class TaskExecutor:
1632
1777
  "chatgptuk": webscout.ChatGPTUK,
1633
1778
  "poe": webscout.POE,
1634
1779
  "basedgpt": webscout.BasedGPT,
1780
+ "deepseek": webscout.DeepSeek,
1781
+ "deepinfra": webscout.DeepInfra,
1782
+ "opengenptv2": webscout.OPENGPTv2
1635
1783
  }
1636
1784
 
1637
1785
  # Initialize Rawdog if enabled
@@ -1761,13 +1909,26 @@ class TaskExecutor:
1761
1909
  """
1762
1910
  try:
1763
1911
  is_feedback = self._rawdog_instance.main(response)
1912
+ if is_feedback and "PREVIOUS SCRIPT EXCEPTION" in is_feedback:
1913
+ self._console.print(Markdown(f"LLM: {is_feedback}"))
1914
+ error_message = is_feedback.split("PREVIOUS SCRIPT EXCEPTION:\n")[1].strip()
1915
+ # Generate a solution for the error and execute it
1916
+ error_solution_query = (
1917
+ f"The following code was executed and resulted in an error:\n\n"
1918
+ f"{response}\n\n"
1919
+ f"Error: {error_message}\n\n"
1920
+ f"Please provide a solution to fix this error in the code and execute it."
1921
+ )
1922
+ try:
1923
+ new_response = self._ai_model.chat(error_solution_query)
1924
+ self._handle_rawdog_response(new_response)
1925
+ except webscout.exceptions.FailedToGenerateResponseError as e:
1926
+ self._console.print(Markdown(f"LLM: [red]Error while generating solution: {e}[/red]"))
1927
+ else:
1928
+ self._console.print(Markdown("LLM: (Script executed successfully)"))
1764
1929
  except Exception as e:
1765
1930
  self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
1766
- return
1767
- if is_feedback:
1768
- self._console.print(Markdown(f"LLM: {is_feedback}"))
1769
- else:
1770
- self._console.print(Markdown("LLM: (Script executed successfully)"))
1931
+
1771
1932
 
1772
1933
  async def process_async_query(self, query: str) -> None:
1773
1934
  """
@@ -934,6 +934,23 @@ prompt = "write a essay on phind"
934
934
  # Use the 'ask' method to send the prompt and receive a response
935
935
  response = ph.ask(prompt)
936
936
 
937
+ # Extract and print the message from the response
938
+ message = ph.get_message(response)
939
+ print(message)
940
+ ```
941
+ Using phindv2
942
+ ```python
943
+ from webscout import Phindv2
944
+
945
+ # Create an instance of the PHIND class
946
+ ph = Phindv2()
947
+
948
+ # Define a prompt to send to the AI
949
+ prompt = ""
950
+
951
+ # Use the 'ask' method to send the prompt and receive a response
952
+ response = ph.ask(prompt)
953
+
937
954
  # Extract and print the message from the response
938
955
  message = ph.get_message(response)
939
956
  print(message)
@@ -954,7 +971,7 @@ print(r)
954
971
 
955
972
  ```
956
973
 
957
- ### 3. `You.com` - search/chat with you.com
974
+ ### 3. `You.com` - search/chat with you.com - Not working
958
975
  ```python
959
976
 
960
977
  from webscout import YouChat
@@ -1078,6 +1095,45 @@ while True:
1078
1095
  response_str = opengpt.chat(prompt)
1079
1096
  print(response_str)
1080
1097
  ```
1098
+ ```python
1099
+ from webscout import OPENGPTv2
1100
+
1101
+ # Initialize the bot with all specified settings
1102
+ bot = OPENGPTv2(
1103
+ generate_new_agents=True, # Set to True to generate new IDs, False to load from file
1104
+ assistant_name="My Custom Assistant",
1105
+ retrieval_description="Helpful information from my files.",
1106
+ agent_system_message="",
1107
+ enable_action_server=False, # Assuming you want to disable Action Server by Robocorp
1108
+ enable_ddg_search=False, # Enable DuckDuckGo search tool
1109
+ enable_arxiv=False, # Assuming you want to disable Arxiv
1110
+ enable_press_releases=False, # Assuming you want to disable Press Releases (Kay.ai)
1111
+ enable_pubmed=False, # Assuming you want to disable PubMed
1112
+ enable_sec_filings=False, # Assuming you want to disable SEC Filings (Kay.ai)
1113
+ enable_retrieval=False, # Assuming you want to disable Retrieval
1114
+ enable_search_tavily=False, # Assuming you want to disable Search (Tavily)
1115
+ enable_search_short_answer_tavily=False, # Assuming you want to disable Search (short answer, Tavily)
1116
+ enable_you_com_search=True, # Assuming you want to disable You.com Search
1117
+ enable_wikipedia=False, # Enable Wikipedia tool
1118
+ is_public=True,
1119
+ is_conversation=True,
1120
+ max_tokens=800,
1121
+ timeout=40,
1122
+ filepath="opengpt_conversation_history.txt",
1123
+ update_file=True,
1124
+ history_offset=10250,
1125
+ act=None,
1126
+ )
1127
+
1128
+ # Example interaction loop
1129
+ while True:
1130
+ prompt = input("You: ")
1131
+ if prompt.strip().lower() == 'exit':
1132
+ break
1133
+ response = bot.chat(prompt)
1134
+ print(response)
1135
+
1136
+ ```
1081
1137
  ### 9. `KOBOLDAI` -
1082
1138
  ```python
1083
1139
  from webscout import KOBOLDAI
@@ -1193,8 +1249,26 @@ print(response)
1193
1249
  Usage code similar to other proviers
1194
1250
 
1195
1251
  ### 16. `BasedGPT` - chat with GPT
1196
- Usage code similar to other providers
1252
+ ```
1253
+ from webscout import BasedGPT
1254
+
1255
+ # Initialize the BasedGPT provider
1256
+ basedgpt = BasedGPT(
1257
+ is_conversation=True, # Chat conversationally
1258
+ max_tokens=600, # Maximum tokens to generate
1259
+ timeout=30, # HTTP request timeout
1260
+ intro="You are a helpful and friendly AI.", # Introductory prompt
1261
+ filepath="chat_history.txt", # File to store conversation history
1262
+ update_file=True, # Update the chat history file
1263
+ )
1264
+
1265
+ # Send a prompt to the AI
1266
+ prompt = "What is the meaning of life?"
1267
+ response = basedgpt.chat(prompt)
1197
1268
 
1269
+ # Print the AI's response
1270
+ print(response)
1271
+ ```
1198
1272
  ### 17. `DeepSeek` -chat with deepseek
1199
1273
  ```python
1200
1274
  from webscout import DeepSeek
@@ -1227,7 +1301,7 @@ while True:
1227
1301
  r = ai.chat(prompt)
1228
1302
  print(r)
1229
1303
  ```
1230
- ### 18. Deepinfra
1304
+ ### 18. `Deepinfra`
1231
1305
  ```python
1232
1306
  from webscout import DeepInfra
1233
1307
 
@@ -1253,30 +1327,33 @@ message = ai.get_message(response)
1253
1327
  print(message)
1254
1328
  ```
1255
1329
 
1256
- ### 19. Deepinfra - VLM
1330
+ ### 19. `Deepinfra` - VLM
1257
1331
  ```python
1258
- from webscout import DeepInfra
1332
+ from webscout.Provider import VLM
1259
1333
 
1260
- ai = DeepInfra(
1261
- is_conversation=True,
1262
- model= "Qwen/Qwen2-72B-Instruct",
1263
- max_tokens=800,
1264
- timeout=30,
1265
- intro=None,
1266
- filepath=None,
1267
- update_file=True,
1268
- proxies={},
1269
- history_offset=10250,
1270
- act=None,
1271
- )
1334
+ # Load your image
1335
+ image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
1272
1336
 
1273
- prompt = "what is meaning of life"
1337
+ vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
1338
+ image_base64 = vlm_instance.encode_image_to_base64(image_path)
1274
1339
 
1275
- response = ai.ask(prompt)
1340
+ prompt = {
1341
+ "content": "What is in this image?",
1342
+ "image": image_base64
1343
+ }
1276
1344
 
1277
- # Extract and print the message from the response
1278
- message = ai.get_message(response)
1279
- print(message)
1345
+ # Generate a response
1346
+ response = vlm_instance.chat(prompt)
1347
+ print(response)
1348
+
1349
+ ```
1350
+ ### 20. `VTLchat` - Free gpt3.5
1351
+ ```python
1352
+ from webscout import VTLchat
1353
+
1354
+ provider = VTLchat()
1355
+ response = provider.chat("Hello, how are you?")
1356
+ print(response)
1280
1357
  ```
1281
1358
  ### `LLM`
1282
1359
  ```python
@@ -1304,13 +1381,19 @@ while True:
1304
1381
  # Print the response
1305
1382
  print("AI: ", response)
1306
1383
  ```
1307
- ### `Local-LLM` webscout can now run GGUF models
1308
- Local LLM's some functions are taken from easy-llama
1384
+
1385
+ ## Local-LLM
1386
+
1387
+ Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1388
+
1389
+ **Example:**
1390
+
1309
1391
  ```python
1310
1392
  from webscout.Local.utils import download_model
1311
1393
  from webscout.Local.model import Model
1312
1394
  from webscout.Local.thread import Thread
1313
1395
  from webscout.Local import formats
1396
+
1314
1397
  # 1. Download the model
1315
1398
  repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1316
1399
  filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
@@ -1326,7 +1409,11 @@ thread = Thread(model, formats.phi3)
1326
1409
  thread.interact()
1327
1410
  ```
1328
1411
 
1329
- ### `Local-rawdog`
1412
+ ## Local-rawdog
1413
+ Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1414
+
1415
+ **Example:**
1416
+
1330
1417
  ```python
1331
1418
  import webscout.Local as ws
1332
1419
  from webscout.Local.rawdog import RawDog
@@ -1413,6 +1500,63 @@ while True:
1413
1500
  print(script_output)
1414
1501
 
1415
1502
  ```
1503
+
1504
+ ## GGUF
1505
+
1506
+ Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1507
+
1508
+ **Example:**
1509
+
1510
+ ```python
1511
+ from webscout import gguf
1512
+ """
1513
+ Valid quantization methods:
1514
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
1515
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
1516
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
1517
+ "q6_k", "q8_0"
1518
+ """
1519
+ gguf.convert(
1520
+ model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
1521
+ username="Abhaykoul", # Replace with your Hugging Face username
1522
+ token="hf_token_write", # Replace with your Hugging Face token
1523
+ quantization_methods="q4_k_m" # Optional, adjust quantization methods
1524
+ )
1525
+ ```
1526
+
1527
+ ## Autollama
1528
+
1529
+ Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1530
+
1531
+ **Example:**
1532
+
1533
+ ```python
1534
+ from webscout import autollama
1535
+
1536
+ autollama.autollama(
1537
+ model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1538
+ gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1539
+ )
1540
+ ```
1541
+
1542
+ **Command Line Usage:**
1543
+
1544
+ * **GGUF Conversion:**
1545
+ ```bash
1546
+ python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1547
+ ```
1548
+
1549
+ * **Autollama:**
1550
+ ```bash
1551
+ python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1552
+ ```
1553
+
1554
+ **Note:**
1555
+
1556
+ * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1557
+ * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1558
+
1559
+
1416
1560
  ### `LLM` with internet
1417
1561
  ```python
1418
1562
  from __future__ import annotations
@@ -1528,7 +1672,7 @@ class TaskExecutor:
1528
1672
  self._proxy_path: str = None # Path to proxy configuration
1529
1673
 
1530
1674
  # History Management
1531
- self._history_filepath: str = None
1675
+ self._history_filepath: str = "history.txt"
1532
1676
  self._update_history_file: bool = True
1533
1677
  self._history_offset: int = 10250
1534
1678
 
@@ -1539,7 +1683,7 @@ class TaskExecutor:
1539
1683
  # Optional Features
1540
1684
  self._web_search_enabled: bool = False # Enable web search
1541
1685
  self._rawdog_enabled: bool = True
1542
- self._internal_script_execution_enabled: bool = False
1686
+ self._internal_script_execution_enabled: bool = True
1543
1687
  self._script_confirmation_required: bool = False
1544
1688
  self._selected_interpreter: str = "python"
1545
1689
  self._selected_optimizer: str = "code"
@@ -1567,6 +1711,9 @@ class TaskExecutor:
1567
1711
  "chatgptuk": webscout.ChatGPTUK,
1568
1712
  "poe": webscout.POE,
1569
1713
  "basedgpt": webscout.BasedGPT,
1714
+ "deepseek": webscout.DeepSeek,
1715
+ "deepinfra": webscout.DeepInfra,
1716
+ "opengenptv2": webscout.OPENGPTv2
1570
1717
  }
1571
1718
 
1572
1719
  # Initialize Rawdog if enabled
@@ -1696,13 +1843,26 @@ class TaskExecutor:
1696
1843
  """
1697
1844
  try:
1698
1845
  is_feedback = self._rawdog_instance.main(response)
1846
+ if is_feedback and "PREVIOUS SCRIPT EXCEPTION" in is_feedback:
1847
+ self._console.print(Markdown(f"LLM: {is_feedback}"))
1848
+ error_message = is_feedback.split("PREVIOUS SCRIPT EXCEPTION:\n")[1].strip()
1849
+ # Generate a solution for the error and execute it
1850
+ error_solution_query = (
1851
+ f"The following code was executed and resulted in an error:\n\n"
1852
+ f"{response}\n\n"
1853
+ f"Error: {error_message}\n\n"
1854
+ f"Please provide a solution to fix this error in the code and execute it."
1855
+ )
1856
+ try:
1857
+ new_response = self._ai_model.chat(error_solution_query)
1858
+ self._handle_rawdog_response(new_response)
1859
+ except webscout.exceptions.FailedToGenerateResponseError as e:
1860
+ self._console.print(Markdown(f"LLM: [red]Error while generating solution: {e}[/red]"))
1861
+ else:
1862
+ self._console.print(Markdown("LLM: (Script executed successfully)"))
1699
1863
  except Exception as e:
1700
1864
  self._console.print(Markdown(f"LLM: [red]Error: {e}[/red]"))
1701
- return
1702
- if is_feedback:
1703
- self._console.print(Markdown(f"LLM: {is_feedback}"))
1704
- else:
1705
- self._console.print(Markdown("LLM: (Script executed successfully)"))
1865
+
1706
1866
 
1707
1867
  async def process_async_query(self, query: str) -> None:
1708
1868
  """
@@ -5,8 +5,8 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="3.5",
9
- description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
8
+ version="3.7",
9
+ description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
12
12
  author="OEvortex",
@@ -57,7 +57,8 @@ setup(
57
57
  "Helpingai-T2",
58
58
  "playsound",
59
59
  "poe_api_wrapper",
60
- "pyreqwest_impersonate"
60
+ "pyreqwest_impersonate",
61
+ "ballyregan"
61
62
  ],
62
63
  entry_points={
63
64
  "console_scripts": [
@@ -74,7 +75,7 @@ setup(
74
75
  'llama-cpp-python',
75
76
  'colorama',
76
77
  'numpy',
77
- 'huggingface_hub',
78
+ 'huggingface_hub[cli]',
78
79
  ],
79
80
  },
80
81
  license="HelpingAI",