webscout 4.2__tar.gz → 4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (75) hide show
  1. {webscout-4.2/webscout.egg-info → webscout-4.3}/PKG-INFO +29 -1
  2. {webscout-4.2 → webscout-4.3}/README.md +28 -0
  3. {webscout-4.2 → webscout-4.3}/setup.py +1 -1
  4. {webscout-4.2 → webscout-4.3}/webscout/AIauto.py +2 -2
  5. {webscout-4.2 → webscout-4.3}/webscout/AIutel.py +6 -8
  6. {webscout-4.2 → webscout-4.3}/webscout/Local/_version.py +1 -1
  7. webscout-4.3/webscout/Provider/FreeGemini.py +169 -0
  8. webscout-4.3/webscout/Provider/Llama.py +211 -0
  9. {webscout-4.2 → webscout-4.3}/webscout/Provider/__init__.py +5 -5
  10. {webscout-4.2 → webscout-4.3}/webscout/__init__.py +1 -1
  11. {webscout-4.2 → webscout-4.3}/webscout/async_providers.py +0 -2
  12. {webscout-4.2 → webscout-4.3}/webscout/version.py +1 -1
  13. {webscout-4.2 → webscout-4.3/webscout.egg-info}/PKG-INFO +29 -1
  14. {webscout-4.2 → webscout-4.3}/webscout.egg-info/SOURCES.txt +2 -1
  15. webscout-4.2/webscout/Provider/Llama2.py +0 -437
  16. {webscout-4.2 → webscout-4.3}/LICENSE.md +0 -0
  17. {webscout-4.2 → webscout-4.3}/setup.cfg +0 -0
  18. {webscout-4.2 → webscout-4.3}/webscout/AIbase.py +0 -0
  19. {webscout-4.2 → webscout-4.3}/webscout/DWEBS.py +0 -0
  20. {webscout-4.2 → webscout-4.3}/webscout/Extra/__init__.py +0 -0
  21. {webscout-4.2 → webscout-4.3}/webscout/Extra/autollama.py +0 -0
  22. {webscout-4.2 → webscout-4.3}/webscout/Extra/gguf.py +0 -0
  23. {webscout-4.2 → webscout-4.3}/webscout/Extra/weather.py +0 -0
  24. {webscout-4.2 → webscout-4.3}/webscout/Extra/weather_ascii.py +0 -0
  25. {webscout-4.2 → webscout-4.3}/webscout/LLM.py +0 -0
  26. {webscout-4.2 → webscout-4.3}/webscout/Local/__init__.py +0 -0
  27. {webscout-4.2 → webscout-4.3}/webscout/Local/formats.py +0 -0
  28. {webscout-4.2 → webscout-4.3}/webscout/Local/model.py +0 -0
  29. {webscout-4.2 → webscout-4.3}/webscout/Local/rawdog.py +0 -0
  30. {webscout-4.2 → webscout-4.3}/webscout/Local/samplers.py +0 -0
  31. {webscout-4.2 → webscout-4.3}/webscout/Local/thread.py +0 -0
  32. {webscout-4.2 → webscout-4.3}/webscout/Local/utils.py +0 -0
  33. {webscout-4.2 → webscout-4.3}/webscout/Provider/BasedGPT.py +0 -0
  34. {webscout-4.2 → webscout-4.3}/webscout/Provider/Berlin4h.py +0 -0
  35. {webscout-4.2 → webscout-4.3}/webscout/Provider/Blackboxai.py +0 -0
  36. {webscout-4.2 → webscout-4.3}/webscout/Provider/ChatGPTUK.py +0 -0
  37. {webscout-4.2 → webscout-4.3}/webscout/Provider/Cohere.py +0 -0
  38. {webscout-4.2 → webscout-4.3}/webscout/Provider/Deepinfra.py +0 -0
  39. {webscout-4.2 → webscout-4.3}/webscout/Provider/Deepseek.py +0 -0
  40. {webscout-4.2 → webscout-4.3}/webscout/Provider/Gemini.py +0 -0
  41. {webscout-4.2 → webscout-4.3}/webscout/Provider/Geminiflash.py +0 -0
  42. {webscout-4.2 → webscout-4.3}/webscout/Provider/Geminipro.py +0 -0
  43. {webscout-4.2 → webscout-4.3}/webscout/Provider/Groq.py +0 -0
  44. {webscout-4.2 → webscout-4.3}/webscout/Provider/Koboldai.py +0 -0
  45. {webscout-4.2 → webscout-4.3}/webscout/Provider/Leo.py +0 -0
  46. {webscout-4.2 → webscout-4.3}/webscout/Provider/OLLAMA.py +0 -0
  47. {webscout-4.2 → webscout-4.3}/webscout/Provider/OpenGPT.py +0 -0
  48. {webscout-4.2 → webscout-4.3}/webscout/Provider/Openai.py +0 -0
  49. {webscout-4.2 → webscout-4.3}/webscout/Provider/Perplexity.py +0 -0
  50. {webscout-4.2 → webscout-4.3}/webscout/Provider/Phind.py +0 -0
  51. {webscout-4.2 → webscout-4.3}/webscout/Provider/Poe.py +0 -0
  52. {webscout-4.2 → webscout-4.3}/webscout/Provider/Reka.py +0 -0
  53. {webscout-4.2 → webscout-4.3}/webscout/Provider/ThinkAnyAI.py +0 -0
  54. {webscout-4.2 → webscout-4.3}/webscout/Provider/VTLchat.py +0 -0
  55. {webscout-4.2 → webscout-4.3}/webscout/Provider/Xjai.py +0 -0
  56. {webscout-4.2 → webscout-4.3}/webscout/Provider/Yepchat.py +0 -0
  57. {webscout-4.2 → webscout-4.3}/webscout/Provider/Youchat.py +0 -0
  58. {webscout-4.2 → webscout-4.3}/webscout/YTdownloader.py +0 -0
  59. {webscout-4.2 → webscout-4.3}/webscout/__main__.py +0 -0
  60. {webscout-4.2 → webscout-4.3}/webscout/cli.py +0 -0
  61. {webscout-4.2 → webscout-4.3}/webscout/exceptions.py +0 -0
  62. {webscout-4.2 → webscout-4.3}/webscout/g4f.py +0 -0
  63. {webscout-4.2 → webscout-4.3}/webscout/models.py +0 -0
  64. {webscout-4.2 → webscout-4.3}/webscout/tempid.py +0 -0
  65. {webscout-4.2 → webscout-4.3}/webscout/transcriber.py +0 -0
  66. {webscout-4.2 → webscout-4.3}/webscout/utils.py +0 -0
  67. {webscout-4.2 → webscout-4.3}/webscout/voice.py +0 -0
  68. {webscout-4.2 → webscout-4.3}/webscout/webai.py +0 -0
  69. {webscout-4.2 → webscout-4.3}/webscout/webscout_search.py +0 -0
  70. {webscout-4.2 → webscout-4.3}/webscout/webscout_search_async.py +0 -0
  71. {webscout-4.2 → webscout-4.3}/webscout/websx_search.py +0 -0
  72. {webscout-4.2 → webscout-4.3}/webscout.egg-info/dependency_links.txt +0 -0
  73. {webscout-4.2 → webscout-4.3}/webscout.egg-info/entry_points.txt +0 -0
  74. {webscout-4.2 → webscout-4.3}/webscout.egg-info/requires.txt +0 -0
  75. {webscout-4.2 → webscout-4.3}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.2
3
+ Version: 4.3
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1472,6 +1472,34 @@ response = ollama_provider.chat("What is the meaning of life?")
1472
1472
  print(response)
1473
1473
  ```
1474
1474
 
1475
+ ### 22. GROQ
1476
+ ```python
1477
+ from webscout import GROQ
1478
+ ai = GROQ(api_key="")
1479
+ response = ai.chat("What is the meaning of life?")
1480
+ print(response)
1481
+
1482
+ ```
1483
+
1484
+ ### 23. Freegemini - chat with gemini for free
1485
+ ```python
1486
+ from webscout import FreeGemini
1487
+ ai = FreeGemini()
1488
+ response = ai.chat("What is the meaning of life?")
1489
+ print(response)
1490
+ ```
1491
+
1492
+ ### 24. LLama 70b - chat with meta's llama 3 70b
1493
+ ```python
1494
+
1495
+ from webscout import LLAMA
1496
+
1497
+ llama = LLAMA()
1498
+
1499
+ r = llama.chat("What is the meaning of life?")
1500
+ print(r)
1501
+ ```
1502
+
1475
1503
  ### `LLM`
1476
1504
  ```python
1477
1505
  from webscout.LLM import LLM
@@ -1406,6 +1406,34 @@ response = ollama_provider.chat("What is the meaning of life?")
1406
1406
  print(response)
1407
1407
  ```
1408
1408
 
1409
+ ### 22. GROQ
1410
+ ```python
1411
+ from webscout import GROQ
1412
+ ai = GROQ(api_key="")
1413
+ response = ai.chat("What is the meaning of life?")
1414
+ print(response)
1415
+
1416
+ ```
1417
+
1418
+ ### 23. Freegemini - chat with gemini for free
1419
+ ```python
1420
+ from webscout import FreeGemini
1421
+ ai = FreeGemini()
1422
+ response = ai.chat("What is the meaning of life?")
1423
+ print(response)
1424
+ ```
1425
+
1426
+ ### 24. LLama 70b - chat with meta's llama 3 70b
1427
+ ```python
1428
+
1429
+ from webscout import LLAMA
1430
+
1431
+ llama = LLAMA()
1432
+
1433
+ r = llama.chat("What is the meaning of life?")
1434
+ print(r)
1435
+ ```
1436
+
1409
1437
  ### `LLM`
1410
1438
  ```python
1411
1439
  from webscout.LLM import LLM
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="4.2",
8
+ version="4.3",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -1,8 +1,8 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
2
  from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
3
  from webscout.Provider.Xjai import Xjai
4
- from webscout.Provider.Llama2 import LLAMA2
5
- from webscout.Provider.Llama2 import AsyncLLAMA2
4
+ from webscout.Provider.Llama import LLAMA2
5
+ from webscout.Provider.Llama import AsyncLLAMA2
6
6
  from webscout.Provider.Leo import LEO
7
7
  from webscout.Provider.Leo import AsyncLEO
8
8
  from webscout.Provider.Koboldai import KOBOLDAI
@@ -220,17 +220,16 @@ class Conversation:
220
220
  ), f"File '{filepath}' does not exist"
221
221
  if not os.path.isfile(filepath):
222
222
  logging.debug(f"Creating new chat-history file - '{filepath}'")
223
- with open(filepath, "w") as fh: # Try creating new file
224
- # lets add intro here
223
+ with open(filepath, "w", encoding="utf-8") as fh: # Try creating new file with UTF-8 encoding
225
224
  fh.write(self.intro)
226
225
  else:
227
226
  logging.debug(f"Loading conversation from '{filepath}'")
228
- with open(filepath) as fh:
227
+ with open(filepath, encoding="utf-8") as fh: # Open with UTF-8 encoding
229
228
  file_contents = fh.readlines()
230
229
  if file_contents:
231
230
  self.intro = file_contents[0] # Presume first line is the intro.
232
231
  self.chat_history = "\n".join(file_contents[1:])
233
-
232
+
234
233
  def __trim_chat_history(self, chat_history: str, intro: str) -> str:
235
234
  """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
236
235
  len_of_intro = len(intro)
@@ -243,7 +242,6 @@ class Conversation:
243
242
  # Remove head of total (n) of chat_history
244
243
  trimmed_chat_history = chat_history[truncate_at:]
245
244
  return "... " + trimmed_chat_history
246
- # print(len(self.chat_history))
247
245
  else:
248
246
  return chat_history
249
247
 
@@ -281,12 +279,12 @@ class Conversation:
281
279
  new_history = self.history_format % dict(user=prompt, llm=response)
282
280
  if self.file and self.update_file:
283
281
  if os.path.exists(self.file):
284
- with open(self.file, "w") as fh:
282
+ with open(self.file, "w", encoding="utf-8") as fh: # Specify UTF-8 encoding
285
283
  fh.write(self.intro + "\n" + new_history)
286
284
  else:
287
- with open(self.file, "a") as fh:
285
+ with open(self.file, "a", encoding="utf-8") as fh: # Specify UTF-8 encoding
288
286
  fh.write(new_history)
289
- self.chat_history += new_history
287
+ self.chat_history += new_history
290
288
 
291
289
 
292
290
 
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '4.2'
3
+ __version__ = '4.3'
@@ -0,0 +1,169 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class FreeGemini(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 60,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates FreeGemini
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ """
57
+ self.session = requests.Session()
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.chat_endpoint = "https://api.safone.dev/bard"
61
+ self.timeout = timeout
62
+ self.last_response = {}
63
+
64
+ self.headers = {
65
+ "accept": "application/json",
66
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
67
+ }
68
+
69
+ self.__available_optimizers = (
70
+ method
71
+ for method in dir(Optimizers)
72
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
+ )
74
+ self.session.headers.update(self.headers)
75
+ Conversation.intro = (
76
+ AwesomePrompts().get_act(
77
+ act, raise_not_found=True, default=None, case_insensitive=True
78
+ )
79
+ if act
80
+ else intro or Conversation.intro
81
+ )
82
+ self.conversation = Conversation(
83
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
84
+ )
85
+ self.conversation.history_offset = history_offset
86
+ self.session.proxies = proxies
87
+
88
+ def ask(
89
+ self,
90
+ prompt: str,
91
+ stream: bool = False,
92
+ raw: bool = False,
93
+ optimizer: str = None,
94
+ conversationally: bool = False,
95
+ ) -> dict:
96
+ """Chat with AI
97
+
98
+ Args:
99
+ prompt (str): Prompt to be send.
100
+ stream (bool, optional): Flag for streaming response. Defaults to False.
101
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
102
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
103
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+ """
105
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
106
+ if optimizer:
107
+ if optimizer in self.__available_optimizers:
108
+ conversation_prompt = getattr(Optimizers, optimizer)(
109
+ conversation_prompt if conversationally else prompt
110
+ )
111
+ else:
112
+ raise Exception(
113
+ f"Optimizer is not one of {self.__available_optimizers}"
114
+ )
115
+
116
+ self.session.headers.update(self.headers)
117
+ payload = {"message": conversation_prompt}
118
+
119
+ response = self.session.post(
120
+ self.chat_endpoint, json=payload, timeout=self.timeout
121
+ )
122
+
123
+ if not response.ok:
124
+ raise exceptions.FailedToGenerateResponseError(
125
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
126
+ )
127
+
128
+ resp = response.json()
129
+ message_load = self.get_message(resp)
130
+ self.conversation.update_chat_history(
131
+ prompt, message_load
132
+ )
133
+ return resp
134
+
135
+ def chat(
136
+ self,
137
+ prompt: str,
138
+ stream: bool = False,
139
+ optimizer: str = None,
140
+ conversationally: bool = False,
141
+ ) -> str:
142
+ """Generate response `str`
143
+ Args:
144
+ prompt (str): Prompt to be send.
145
+ stream (bool, optional): Flag for streaming response. Defaults to False.
146
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
147
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
148
+ Returns:
149
+ str: Response generated
150
+ """
151
+ return self.get_message(
152
+ self.ask(
153
+ prompt,
154
+ optimizer=optimizer,
155
+ conversationally=conversationally,
156
+ )
157
+ )
158
+
159
+ def get_message(self, response: dict) -> str:
160
+ """Retrieves message only from response
161
+
162
+ Args:
163
+ response (dict): Response generated by `self.ask`
164
+
165
+ Returns:
166
+ str: Message extracted
167
+ """
168
+ assert isinstance(response, dict), "Response should be of dict data-type only"
169
+ return response["message"]
@@ -0,0 +1,211 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class LLAMA(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates LLAMA
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ model (str, optional): LLM model name. Defaults to "llama3-70b-8192".
57
+ """
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.timeout = timeout
61
+ self.last_response = {}
62
+ self.model = "llama3-70b-8192",
63
+ self.api_endpoint = "https://api.safone.dev/llama"
64
+ self.headers = {
65
+ "accept": "application/json",
66
+ }
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+ Conversation.intro = (
74
+ AwesomePrompts().get_act(
75
+ act, raise_not_found=True, default=None, case_insensitive=True
76
+ )
77
+ if act
78
+ else intro or Conversation.intro
79
+ )
80
+ self.conversation = Conversation(
81
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
82
+ )
83
+ self.conversation.history_offset = history_offset
84
+ self.session = requests.Session()
85
+ self.session.proxies = proxies
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> dict | AsyncGenerator:
95
+ """Chat with AI
96
+
97
+ Args:
98
+ prompt (str): Prompt to be send.
99
+ stream (bool, optional): Flag for streaming response. Defaults to False.
100
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
101
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
+ Returns:
104
+ dict|AsyncGenerator : ai content
105
+ ```json
106
+ {
107
+ "text" : "print('How may I help you today?')"
108
+ }
109
+ ```
110
+ """
111
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
+ if optimizer:
113
+ if optimizer in self.__available_optimizers:
114
+ conversation_prompt = getattr(Optimizers, optimizer)(
115
+ conversation_prompt if conversationally else prompt
116
+ )
117
+ else:
118
+ raise Exception(
119
+ f"Optimizer is not one of {self.__available_optimizers}"
120
+ )
121
+
122
+ self.session.headers.update(self.headers)
123
+ payload = {
124
+ "message": conversation_prompt
125
+ }
126
+
127
+ def for_stream():
128
+ response = self.session.get(
129
+ self.api_endpoint, params=payload, stream=True, timeout=self.timeout
130
+ )
131
+ if not response.ok:
132
+ raise exceptions.FailedToGenerateResponseError(
133
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
134
+ )
135
+
136
+ message_load = ""
137
+ for chunk in response.iter_lines():
138
+ try:
139
+ resp = json.loads(chunk)
140
+ message_load += resp['message']
141
+ yield chunk if raw else dict(text=message_load)
142
+ self.last_response.update(resp)
143
+ except:
144
+ pass
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+
149
+ def for_non_stream():
150
+ response = self.session.get(
151
+ self.api_endpoint, params=payload, stream=False, timeout=self.timeout
152
+ )
153
+ if not response.ok:
154
+ raise exceptions.FailedToGenerateResponseError(
155
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
+ )
157
+ resp = response.json()
158
+ self.last_response.update(resp)
159
+ self.conversation.update_chat_history(
160
+ prompt, self.get_message(self.last_response)
161
+ )
162
+ return resp
163
+
164
+ return for_stream() if stream else for_non_stream()
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> str | AsyncGenerator:
173
+ """Generate response `str`
174
+ Args:
175
+ prompt (str): Prompt to be send.
176
+ stream (bool, optional): Flag for streaming response. Defaults to False.
177
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
+ Returns:
180
+ str: Response generated
181
+ """
182
+
183
+ def for_stream():
184
+ for response in self.ask(
185
+ prompt, True, optimizer=optimizer, conversationally=conversationally
186
+ ):
187
+ yield self.get_message(response)
188
+
189
+ def for_non_stream():
190
+ return self.get_message(
191
+ self.ask(
192
+ prompt,
193
+ False,
194
+ optimizer=optimizer,
195
+ conversationally=conversationally,
196
+ )
197
+ )
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def get_message(self, response: dict) -> str:
202
+ """Retrieves message only from response
203
+
204
+ Args:
205
+ response (dict): Response generated by `self.ask`
206
+
207
+ Returns:
208
+ str: Message extracted
209
+ """
210
+ assert isinstance(response, dict), "Response should be of dict data-type only"
211
+ return response["message"]
@@ -2,8 +2,7 @@
2
2
 
3
3
  from .ThinkAnyAI import ThinkAnyAI
4
4
  from .Xjai import Xjai
5
- from .Llama2 import LLAMA2
6
- from .Llama2 import AsyncLLAMA2
5
+ from .Llama import LLAMA
7
6
  from .Cohere import Cohere
8
7
  from .Reka import REKA
9
8
  from .Groq import GROQ
@@ -38,11 +37,11 @@ from .VTLchat import VTLchat
38
37
  from .Geminipro import GEMINIPRO
39
38
  from .Geminiflash import GEMINIFLASH
40
39
  from .OLLAMA import OLLAMA
40
+ from .FreeGemini import FreeGemini
41
41
  __all__ = [
42
42
  'ThinkAnyAI',
43
43
  'Xjai',
44
- 'LLAMA2',
45
- 'AsyncLLAMA2',
44
+ 'LLAMA',
46
45
  'Cohere',
47
46
  'REKA',
48
47
  'GROQ',
@@ -78,7 +77,8 @@ __all__ = [
78
77
  'OPENGPTv2',
79
78
  'GEMINIPRO',
80
79
  'GEMINIFLASH',
81
- 'OLLAMA'
80
+ 'OLLAMA',
81
+ 'FreeGemini'
82
82
 
83
83
 
84
84
  ]
@@ -6,7 +6,7 @@ from .transcriber import transcriber
6
6
  from .voice import play_audio
7
7
  from .websx_search import WEBSX
8
8
 
9
- from .LLM import LLM
9
+ from .LLM import VLM, LLM
10
10
  from .YTdownloader import *
11
11
  # from .Local import *
12
12
  import g4f
@@ -2,7 +2,6 @@ from webscout import AsyncPhindSearch
2
2
  from webscout import AsyncYEPCHAT
3
3
  from webscout import AsyncOPENGPT
4
4
  from webscout import AsyncOPENAI
5
- from webscout import AsyncLLAMA2
6
5
  from webscout import AsyncLEO
7
6
  from webscout import AsyncKOBOLDAI
8
7
  from webscout import AsyncGROQ
@@ -15,7 +14,6 @@ mapper: dict[str, object] = {
15
14
  "koboldai": AsyncKOBOLDAI,
16
15
  "blackboxai": AsyncBLACKBOXAI,
17
16
  "gpt4free": AsyncGPT4FREE,
18
- "llama2": AsyncLLAMA2,
19
17
  "yepchat": AsyncYEPCHAT,
20
18
  "leo": AsyncLEO,
21
19
  "groq": AsyncGROQ,
@@ -1,2 +1,2 @@
1
- __version__ = "4.2"
1
+ __version__ = "4.3"
2
2
  __prog__ = "webscout"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.2
3
+ Version: 4.3
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1472,6 +1472,34 @@ response = ollama_provider.chat("What is the meaning of life?")
1472
1472
  print(response)
1473
1473
  ```
1474
1474
 
1475
+ ### 22. GROQ
1476
+ ```python
1477
+ from webscout import GROQ
1478
+ ai = GROQ(api_key="")
1479
+ response = ai.chat("What is the meaning of life?")
1480
+ print(response)
1481
+
1482
+ ```
1483
+
1484
+ ### 23. Freegemini - chat with gemini for free
1485
+ ```python
1486
+ from webscout import FreeGemini
1487
+ ai = FreeGemini()
1488
+ response = ai.chat("What is the meaning of life?")
1489
+ print(response)
1490
+ ```
1491
+
1492
+ ### 24. LLama 70b - chat with meta's llama 3 70b
1493
+ ```python
1494
+
1495
+ from webscout import LLAMA
1496
+
1497
+ llama = LLAMA()
1498
+
1499
+ r = llama.chat("What is the meaning of life?")
1500
+ print(r)
1501
+ ```
1502
+
1475
1503
  ### `LLM`
1476
1504
  ```python
1477
1505
  from webscout.LLM import LLM
@@ -49,13 +49,14 @@ webscout/Provider/ChatGPTUK.py
49
49
  webscout/Provider/Cohere.py
50
50
  webscout/Provider/Deepinfra.py
51
51
  webscout/Provider/Deepseek.py
52
+ webscout/Provider/FreeGemini.py
52
53
  webscout/Provider/Gemini.py
53
54
  webscout/Provider/Geminiflash.py
54
55
  webscout/Provider/Geminipro.py
55
56
  webscout/Provider/Groq.py
56
57
  webscout/Provider/Koboldai.py
57
58
  webscout/Provider/Leo.py
58
- webscout/Provider/Llama2.py
59
+ webscout/Provider/Llama.py
59
60
  webscout/Provider/OLLAMA.py
60
61
  webscout/Provider/OpenGPT.py
61
62
  webscout/Provider/Openai.py