webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,61 +1,90 @@
1
- from ..AIutel import Optimizers
2
- from ..AIutel import Conversation
3
- from ..AIutel import AwesomePrompts, sanitize_stream
4
- from ..AIbase import Provider, AsyncProvider
5
- from webscout import exceptions
6
- from typing import Any, AsyncGenerator, Dict
7
- import logging
8
- from ..Bard import Chatbot
9
- import logging
1
+
10
2
  from os import path
11
- from json import load
12
- from json import dumps
3
+ from json import load, dumps
13
4
  import warnings
14
- logging.getLogger("httpx").setLevel(logging.ERROR)
5
+ from typing import Any, Dict
6
+
7
+ # Import internal modules and dependencies
8
+ from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
9
+ from ..AIbase import Provider, AsyncProvider
10
+ from ..Bard import Chatbot, Model
11
+
12
+ # Import Logger and related classes (assumed similar to what is in yep.py)
13
+ from webscout import Logger, LogFormat
14
+
15
15
  warnings.simplefilter("ignore", category=UserWarning)
16
+
17
+ # Define model aliases for easy usage
18
+ MODEL_ALIASES: Dict[str, Model] = {
19
+ "unspecified": Model.UNSPECIFIED,
20
+ "flash": Model.G_2_0_FLASH,
21
+ "flash-exp": Model.G_2_0_FLASH_EXP,
22
+ "thinking": Model.G_2_0_FLASH_THINKING,
23
+ "thinking-with-apps": Model.G_2_0_FLASH_THINKING_WITH_APPS,
24
+ "exp-advanced": Model.G_2_0_EXP_ADVANCED,
25
+ "1.5-flash": Model.G_1_5_FLASH,
26
+ "1.5-pro": Model.G_1_5_PRO,
27
+ "1.5-pro-research": Model.G_1_5_PRO_RESEARCH,
28
+ }
29
+
30
+ # List of available models (friendly names)
31
+ AVAILABLE_MODELS = list(MODEL_ALIASES.keys())
32
+
16
33
  class GEMINI(Provider):
17
34
  def __init__(
18
35
  self,
19
36
  cookie_file: str,
37
+ model, # Accepts either a Model enum or a str alias.
20
38
  proxy: dict = {},
21
39
  timeout: int = 30,
40
+ logging: bool = False # Flag to enable Logger debugging.
22
41
  ):
23
- """Initializes GEMINI
42
+ """
43
+ Initializes GEMINI with model support and optional debugging.
24
44
 
25
45
  Args:
26
- cookie_file (str): Path to `bard.google.com.cookies.json` file
27
- proxy (dict, optional): Http request proxy. Defaults to {}.
28
- timeout (int, optional): Http request timeout. Defaults to 30.
46
+ cookie_file (str): Path to the cookies JSON file.
47
+ model (Model or str): Selected model for the session. Can be a Model enum
48
+ or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
49
+ exp-advanced, 1.5-flash, 1.5-pro, 1.5-pro-research.
50
+ proxy (dict, optional): HTTP request proxy. Defaults to {}.
51
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
52
+ logging (bool, optional): Flag to enable Logger debugging. Defaults to False.
29
53
  """
30
54
  self.conversation = Conversation(False)
31
- self.session_auth1 = None
32
- self.session_auth2 = None
33
- assert isinstance(
34
- cookie_file, str
35
- ), f"cookie_file should be of {str} only not '{type(cookie_file)}'"
36
- if path.isfile(cookie_file):
37
- # let's assume auth is a path to exported .json cookie-file
38
- with open(cookie_file) as fh:
39
- entries = load(fh)
40
- for entry in entries:
41
- if entry["name"] == "__Secure-1PSID":
42
- self.session_auth1 = entry["value"]
43
- elif entry["name"] == "__Secure-1PSIDTS":
44
- self.session_auth2 = entry["value"]
45
-
46
- assert all(
47
- [self.session_auth1, self.session_auth2]
48
- ), f"Failed to extract the required cookie value from file '{cookie_file}'"
49
- else:
55
+
56
+ # Initialize Logger only if logging is enabled; otherwise, set to None.
57
+ self.logger = Logger(name="GEMINI", format=LogFormat.MODERN_EMOJI) if logging else None
58
+
59
+ # Ensure cookie_file existence.
60
+ if not isinstance(cookie_file, str):
61
+ raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
62
+ if not path.isfile(cookie_file):
50
63
  raise Exception(f"{cookie_file} is not a valid file path")
51
64
 
52
- self.session = Chatbot(self.session_auth1, self.session_auth2, proxy, timeout)
65
+ # If model is provided as alias (str), convert to Model enum.
66
+ if isinstance(model, str):
67
+ alias = model.lower()
68
+ if alias in MODEL_ALIASES:
69
+ selected_model = MODEL_ALIASES[alias]
70
+ else:
71
+ raise Exception(f"Unknown model alias: '{model}'. Available aliases: {', '.join(AVAILABLE_MODELS)}")
72
+ elif isinstance(model, Model):
73
+ selected_model = model
74
+ else:
75
+ raise TypeError("model must be a string alias or an instance of Model")
76
+
77
+ # Initialize the Chatbot session using the cookie file.
78
+ self.session = Chatbot(cookie_file, proxy, timeout, selected_model)
53
79
  self.last_response = {}
54
80
  self.__available_optimizers = (
55
- method
56
- for method in dir(Optimizers)
57
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
81
+ method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
58
82
  )
83
+ if self.logger:
84
+ self.logger.debug("GEMINI initialized with model: {}".format(selected_model.model_name))
85
+ # Store cookies from Chatbot for later use (e.g. image generation)
86
+ self.session_auth1 = self.session.secure_1psid
87
+ self.session_auth2 = self.session.secure_1psidts
59
88
 
60
89
  def ask(
61
90
  self,
@@ -65,53 +94,17 @@ class GEMINI(Provider):
65
94
  optimizer: str = None,
66
95
  conversationally: bool = False,
67
96
  ) -> dict:
68
- """Chat with AI
69
-
70
- Args:
71
- prompt (str): Prompt to be send.
72
- stream (bool, optional): Flag for streaming response. Defaults to False.
73
- raw (bool, optional): Stream back raw response as received. Defaults to False.
74
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
75
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
76
- Returns:
77
- dict : {}
78
- ```json
79
- {
80
- "content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
81
- "conversation_id": "c_f13f6217f9a997aa",
82
- "response_id": "r_d3665f95975c368f",
83
- "factualityQueries": null,
84
- "textQuery": [
85
- "hello there",
86
- 1
87
- ],
88
- "choices": [
89
- {
90
- "id": "rc_ea075c9671bfd8cb",
91
- "content": [
92
- "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
93
- ]
94
- },
95
- {
96
- "id": "rc_de6dd3fb793a5402",
97
- "content": [
98
- "General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
99
- ]
100
- },
101
- {
102
- "id": "rc_a672ac089caf32db",
103
- "content": [
104
- "General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
105
- ]
106
- }
107
- ],
108
-
109
- "images": [
110
- "https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
111
- ]
112
- }
113
-
114
- ```
97
+ """Chat with AI.
98
+
99
+ Args:
100
+ prompt (str): Prompt to be sent.
101
+ stream (bool, optional): Flag for streaming response. Defaults to False.
102
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
103
+ optimizer (str, optional): Prompt optimizer name (e.g., 'code', 'shell_command'). Defaults to None.
104
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
+
106
+ Returns:
107
+ dict: Response generated by the underlying Chatbot.
115
108
  """
116
109
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
117
110
  if optimizer:
@@ -120,24 +113,21 @@ class GEMINI(Provider):
120
113
  conversation_prompt if conversationally else prompt
121
114
  )
122
115
  else:
123
- raise Exception(
124
- f"Optimizer is not one of {self.__available_optimizers}"
125
- )
116
+ raise Exception(f"Optimizer is not one of {', '.join(self.__available_optimizers)}")
126
117
 
127
118
  def for_stream():
128
119
  response = self.session.ask(prompt)
129
120
  self.last_response.update(response)
130
- self.conversation.update_chat_history(
131
- prompt, self.get_message(self.last_response)
132
- )
121
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
133
122
  yield dumps(response) if raw else response
134
123
 
135
124
  def for_non_stream():
136
- # let's make use of stream
137
125
  for _ in for_stream():
138
126
  pass
139
127
  return self.last_response
140
128
 
129
+ if self.logger:
130
+ self.logger.debug(f"Request sent: {prompt}")
141
131
  return for_stream() if stream else for_non_stream()
142
132
 
143
133
  def chat(
@@ -147,48 +137,43 @@ class GEMINI(Provider):
147
137
  optimizer: str = None,
148
138
  conversationally: bool = False,
149
139
  ) -> str:
150
- """Generate response `str`
140
+ """Generate response text.
141
+
151
142
  Args:
152
- prompt (str): Prompt to be send.
143
+ prompt (str): Prompt to be sent.
153
144
  stream (bool, optional): Flag for streaming response. Defaults to False.
154
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
145
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
155
146
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
147
+
156
148
  Returns:
157
- str: Response generated
149
+ str: Response generated.
158
150
  """
159
-
160
151
  def for_stream():
161
- for response in self.ask(
162
- prompt, True, optimizer=optimizer, conversationally=conversationally
163
- ):
152
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
164
153
  yield self.get_message(response)
165
154
 
166
155
  def for_non_stream():
167
- return self.get_message(
168
- self.ask(
169
- prompt,
170
- False,
171
- optimizer=optimizer,
172
- conversationally=conversationally,
173
- )
174
- )
156
+ return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
175
157
 
176
158
  return for_stream() if stream else for_non_stream()
177
159
 
178
160
  def get_message(self, response: dict) -> str:
179
- """Retrieves message only from response
161
+ """Retrieves message content from the response.
180
162
 
181
163
  Args:
182
- response (dict): Response generated by `self.ask`
164
+ response (dict): Response generated by `self.ask`.
183
165
 
184
166
  Returns:
185
- str: Message extracted
167
+ str: Extracted message content.
186
168
  """
187
- assert isinstance(response, dict), "Response should be of dict data-type only"
169
+ if not isinstance(response, dict):
170
+ raise TypeError("Response should be of type dict")
188
171
  return response["content"]
189
172
 
190
173
  def reset(self):
191
- """Reset the current conversation"""
174
+ """Reset the current conversation."""
192
175
  self.session.async_chatbot.conversation_id = ""
193
176
  self.session.async_chatbot.response_id = ""
194
- self.session.async_chatbot.choice_id = ""
177
+ self.session.async_chatbot.choice_id = ""
178
+ if self.logger:
179
+ self.logger.debug("Conversation reset")
@@ -2,49 +2,52 @@ import requests
2
2
  import json
3
3
  from typing import Any, Dict, Generator, Optional
4
4
 
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
6
  from webscout.AIbase import Provider
9
7
  from webscout import exceptions
8
+ from webscout.Litlogger import Logger, LogFormat
10
9
  from webscout import LitAgent as Lit
10
+
11
11
  class GliderAI(Provider):
12
12
  """
13
- A class to interact with the Glider.so API.
13
+ A class to interact with the Glider.so API with comprehensive logging.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = [
16
+ AVAILABLE_MODELS = {
17
17
  "chat-llama-3-1-70b",
18
18
  "chat-llama-3-1-8b",
19
19
  "chat-llama-3-2-3b",
20
20
  "deepseek-ai/DeepSeek-R1",
21
- ]
22
-
23
- model_aliases = {
24
- "llama-3.1-70b": "chat-llama-3-1-70b",
25
- "llama-3.1-8b": "chat-llama-3-1-8b",
26
- "llama-3.2-3b": "chat-llama-3-2-3b",
27
- "deepseek-r1": "deepseek-ai/DeepSeek-R1",
28
21
  }
29
22
 
30
-
31
23
  def __init__(
32
24
  self,
33
25
  is_conversation: bool = True,
34
26
  max_tokens: int = 600,
35
27
  timeout: int = 30,
36
- intro: str = None,
37
- filepath: str = None,
28
+ intro: Optional[str] = None,
29
+ filepath: Optional[str] = None,
38
30
  update_file: bool = True,
39
31
  proxies: dict = {},
40
32
  history_offset: int = 10250,
41
- act: str = None,
42
- model: str = "llama-3.1-70b",
33
+ act: Optional[str] = None,
34
+ model: str = "chat-llama-3-1-70b",
43
35
  system_prompt: str = "You are a helpful AI assistant.",
36
+ logging: bool = False
44
37
  ):
45
- """Initializes the GliderAI API client."""
46
- if model not in self.AVAILABLE_MODELS and model not in self.model_aliases:
38
+ """Initializes the GliderAI API client with logging capabilities."""
39
+ if model not in self.AVAILABLE_MODELS:
47
40
  raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
41
+
42
+ self.logger = Logger(
43
+ name="GliderAI",
44
+ format=LogFormat.MODERN_EMOJI,
45
+
46
+ ) if logging else None
47
+
48
+ if self.logger:
49
+ self.logger.info(f"Initializing GliderAI with model: {model}")
50
+
48
51
  self.session = requests.Session()
49
52
  self.is_conversation = is_conversation
50
53
  self.max_tokens_to_sample = max_tokens
@@ -52,7 +55,7 @@ class GliderAI(Provider):
52
55
  self.stream_chunk_size = 64
53
56
  self.timeout = timeout
54
57
  self.last_response = {}
55
- self.model = self.model_aliases.get(model,model)
58
+ self.model = model
56
59
  self.system_prompt = system_prompt
57
60
  self.headers = {
58
61
  "accept": "*/*",
@@ -66,10 +69,10 @@ class GliderAI(Provider):
66
69
  self.session.proxies = proxies
67
70
 
68
71
  self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
72
+ method for method in dir(Optimizers)
71
73
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
74
  )
75
+
73
76
  Conversation.intro = (
74
77
  AwesomePrompts().get_act(
75
78
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -82,40 +85,44 @@ class GliderAI(Provider):
82
85
  )
83
86
  self.conversation.history_offset = history_offset
84
87
 
88
+ if self.logger:
89
+ self.logger.info("GliderAI initialized successfully")
90
+
85
91
  def ask(
86
92
  self,
87
93
  prompt: str,
88
94
  stream: bool = False,
89
95
  raw: bool = False,
90
- optimizer: str = None,
96
+ optimizer: Optional[str] = None,
91
97
  conversationally: bool = False,
92
- ) -> Dict[str, Any] | Generator:
93
- """Chat with AI
98
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
99
+ """Chat with AI with logging capabilities.
94
100
 
95
101
  Args:
96
- prompt (str): Prompt to be send.
102
+ prompt (str): Prompt to be sent.
97
103
  stream (bool, optional): Flag for streaming response. Defaults to False.
98
- raw (bool, optional): Stream back raw response as received. Defaults to False.
99
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
100
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+ raw (bool, optional): Return raw response chunks instead of dict. Defaults to False.
105
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
106
+ conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
101
107
  Returns:
102
- dict : {}
103
- ```json
104
- {
105
- "text" : "How may I assist you today?"
106
- }
107
- ```
108
+ dict or Generator[dict, None, None]: The response from the API.
108
109
  """
110
+ if self.logger:
111
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
112
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
113
+
109
114
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
115
  if optimizer:
111
116
  if optimizer in self.__available_optimizers:
112
117
  conversation_prompt = getattr(Optimizers, optimizer)(
113
118
  conversation_prompt if conversationally else prompt
114
119
  )
120
+ if self.logger:
121
+ self.logger.debug(f"Applied optimizer: {optimizer}")
115
122
  else:
116
- raise Exception(
117
- f"Optimizer is not one of {self.__available_optimizers}"
118
- )
123
+ if self.logger:
124
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
125
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
119
126
 
120
127
  payload = {
121
128
  "messages": [
@@ -126,14 +133,19 @@ class GliderAI(Provider):
126
133
  }
127
134
 
128
135
  def for_stream():
136
+ if self.logger:
137
+ self.logger.debug("Initiating streaming request to API")
129
138
  response = self.session.post(
130
139
  self.api_endpoint, json=payload, stream=True, timeout=self.timeout
131
140
  )
132
141
  if not response.ok:
142
+ if self.logger:
143
+ self.logger.error(
144
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
145
+ )
133
146
  raise exceptions.FailedToGenerateResponseError(
134
147
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
135
148
  )
136
-
137
149
  streaming_text = ""
138
150
  for value in response.iter_lines(decode_unicode=True):
139
151
  if value:
@@ -143,46 +155,50 @@ class GliderAI(Provider):
143
155
  content = data['choices'][0].get('delta', {}).get("content", "")
144
156
  if content:
145
157
  streaming_text += content
146
- yield content if raw else dict(text=content)
158
+ yield content if raw else {"text": content}
147
159
  except json.JSONDecodeError:
148
- if "stop" in value :
149
- break
150
-
160
+ if "stop" in value:
161
+ break
151
162
  self.last_response.update(dict(text=streaming_text))
152
- self.conversation.update_chat_history(
153
- prompt, self.get_message(self.last_response)
154
- )
163
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
164
+ if self.logger:
165
+ self.logger.debug("Response processing completed")
166
+
155
167
  def for_non_stream():
168
+ if self.logger:
169
+ self.logger.debug("Processing non-streaming request")
156
170
  for _ in for_stream():
157
171
  pass
158
172
  return self.last_response
159
173
 
160
174
  return for_stream() if stream else for_non_stream()
161
175
 
162
-
163
176
  def chat(
164
177
  self,
165
178
  prompt: str,
166
179
  stream: bool = False,
167
- optimizer: str = None,
180
+ optimizer: Optional[str] = None,
168
181
  conversationally: bool = False,
169
182
  ) -> str | Generator[str, None, None]:
170
- """Generate response `str`
183
+ """Generate response as a string with logging.
184
+
171
185
  Args:
172
- prompt (str): Prompt to be send.
186
+ prompt (str): Prompt to be sent.
173
187
  stream (bool, optional): Flag for streaming response. Defaults to False.
174
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
188
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
189
+ conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
176
190
  Returns:
177
- str: Response generated
191
+ str or Generator[str, None, None]: The response generated.
178
192
  """
193
+ if self.logger:
194
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
179
195
  def for_stream():
180
196
  for response in self.ask(
181
197
  prompt, True, optimizer=optimizer, conversationally=conversationally
182
198
  ):
183
- yield self.get_message(response)
199
+ yield self.get_message(response)
184
200
  def for_non_stream():
185
- return self.get_message(
201
+ return self.get_message(
186
202
  self.ask(
187
203
  prompt,
188
204
  False,
@@ -192,16 +208,15 @@ class GliderAI(Provider):
192
208
  )
193
209
  return for_stream() if stream else for_non_stream()
194
210
 
195
-
196
211
  def get_message(self, response: dict) -> str:
197
- """Retrieves message only from response"""
212
+ """Retrieves message only from response."""
198
213
  assert isinstance(response, dict), "Response should be of dict data-type only"
199
214
  return response["text"]
200
215
 
201
-
202
216
  if __name__ == "__main__":
203
217
  from rich import print
204
- ai = GliderAI(model="llama-3.1-70b")
218
+ # For testing with logging enabled
219
+ ai = GliderAI(model="chat-llama-3-1-70b", logging=True)
205
220
  response = ai.chat("Meaning of Life", stream=True)
206
221
  for chunk in response:
207
222
  print(chunk, end="", flush=True)
webscout/Provider/Groq.py CHANGED
@@ -16,18 +16,26 @@ class GROQ(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "llama-3.1-405b-reasoning",
20
- "llama-3.1-70b-versatile",
21
- "llama-3.1-8b-instant",
22
- "llama3-groq-70b-8192-tool-use-preview",
23
- "llama3-groq-8b-8192-tool-use-preview",
24
- "llama-guard-3-8b",
19
+ # "whisper-large-v3",
25
20
  "llama3-70b-8192",
21
+ "qwen-2.5-32b",
22
+ "qwen-2.5-coder-32b",
23
+ "deepseek-r1-distill-qwen-32b",
24
+ "deepseek-r1-distill-llama-70b",
25
+ "llama-3.2-3b-preview",
26
+ "gemma2-9b-it",
27
+ "llama-3.2-11b-vision-preview",
26
28
  "llama3-8b-8192",
29
+ "llama-3.3-70b-versatile",
30
+ "llama-3.2-11b-vision-preview",
31
+ # "distil-whisper-large-v3-en",
27
32
  "mixtral-8x7b-32768",
28
- "gemma-7b-it",
29
- "gemma2-9b-it",
30
- "whisper-large-v3"
33
+ "llama-3.3-70b-specdec",
34
+ "llama-3.2-90b-vision-preview",
35
+ "llama-3.2-1b-preview",
36
+ # "whisper-large-v3-turbo",
37
+ "llama-3.1-8b-instant",
38
+ "llama-guard-3-8b"
31
39
  ]
32
40
 
33
41
  def __init__(
@@ -337,18 +345,22 @@ class AsyncGROQ(AsyncProvider):
337
345
  """
338
346
 
339
347
  AVAILABLE_MODELS = [
340
- "llama-3.1-405b-reasoning",
341
- "llama-3.1-70b-versatile",
342
- "llama-3.1-8b-instant",
343
- "llama3-groq-70b-8192-tool-use-preview",
344
- "llama3-groq-8b-8192-tool-use-preview",
345
- "llama-guard-3-8b",
348
+ # "whisper-large-v3",
346
349
  "llama3-70b-8192",
350
+ "llama-3.2-3b-preview",
351
+ "gemma2-9b-it",
352
+ "llama-3.2-11b-vision-preview",
347
353
  "llama3-8b-8192",
354
+ "llama-3.3-70b-versatile",
355
+ "deepseek-r1-distill-llama-70b",
356
+ # "distil-whisper-large-v3-en",
348
357
  "mixtral-8x7b-32768",
349
- "gemma-7b-it",
350
- "gemma2-9b-it",
351
- "whisper-large-v3"
358
+ "llama-3.3-70b-specdec",
359
+ "llama-3.2-90b-vision-preview",
360
+ "llama-3.2-1b-preview",
361
+ # "whisper-large-v3-turbo",
362
+ "llama-3.1-8b-instant",
363
+ "llama-guard-3-8b"
352
364
  ]
353
365
 
354
366
  def __init__(