webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (154) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +23 -0
  24. webscout/Litlogger/core/logger.py +166 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +33 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +173 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +249 -0
  31. webscout/Litlogger/styles/formats.py +460 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/AISEARCH/ISou.py +277 -0
  38. webscout/Provider/AISEARCH/__init__.py +2 -1
  39. webscout/Provider/Blackboxai.py +3 -3
  40. webscout/Provider/ChatGPTGratis.py +226 -0
  41. webscout/Provider/Cloudflare.py +3 -4
  42. webscout/Provider/DeepSeek.py +218 -0
  43. webscout/Provider/Deepinfra.py +40 -24
  44. webscout/Provider/Free2GPT.py +131 -124
  45. webscout/Provider/Gemini.py +100 -115
  46. webscout/Provider/Glider.py +3 -3
  47. webscout/Provider/Groq.py +5 -1
  48. webscout/Provider/Jadve.py +3 -3
  49. webscout/Provider/Marcus.py +191 -192
  50. webscout/Provider/Netwrck.py +3 -3
  51. webscout/Provider/PI.py +2 -2
  52. webscout/Provider/PizzaGPT.py +2 -3
  53. webscout/Provider/QwenLM.py +311 -0
  54. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  55. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  56. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  57. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  58. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
  59. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
  60. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  61. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  62. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  63. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  64. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  65. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  66. webscout/Provider/TTI/__init__.py +2 -1
  67. webscout/Provider/TTI/artbit/__init__.py +22 -22
  68. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  69. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  70. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  71. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  72. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  73. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  74. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  75. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  76. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  77. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  78. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  79. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  80. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  81. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  82. webscout/Provider/TTI/talkai/__init__.py +4 -4
  83. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  84. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  85. webscout/Provider/TTS/deepgram.py +182 -182
  86. webscout/Provider/TTS/elevenlabs.py +136 -136
  87. webscout/Provider/TTS/gesserit.py +150 -150
  88. webscout/Provider/TTS/murfai.py +138 -138
  89. webscout/Provider/TTS/parler.py +133 -134
  90. webscout/Provider/TTS/streamElements.py +360 -360
  91. webscout/Provider/TTS/utils.py +280 -280
  92. webscout/Provider/TTS/voicepod.py +116 -116
  93. webscout/Provider/TextPollinationsAI.py +28 -8
  94. webscout/Provider/WiseCat.py +193 -0
  95. webscout/Provider/__init__.py +146 -134
  96. webscout/Provider/cerebras.py +242 -227
  97. webscout/Provider/chatglm.py +204 -204
  98. webscout/Provider/dgaf.py +2 -3
  99. webscout/Provider/freeaichat.py +221 -0
  100. webscout/Provider/gaurish.py +2 -3
  101. webscout/Provider/geminiapi.py +208 -208
  102. webscout/Provider/granite.py +223 -0
  103. webscout/Provider/hermes.py +218 -218
  104. webscout/Provider/llama3mitril.py +179 -179
  105. webscout/Provider/llamatutor.py +3 -3
  106. webscout/Provider/llmchat.py +2 -3
  107. webscout/Provider/meta.py +794 -794
  108. webscout/Provider/multichat.py +331 -331
  109. webscout/Provider/typegpt.py +359 -359
  110. webscout/Provider/yep.py +3 -3
  111. webscout/__init__.py +1 -0
  112. webscout/__main__.py +5 -5
  113. webscout/cli.py +319 -319
  114. webscout/conversation.py +241 -242
  115. webscout/exceptions.py +328 -328
  116. webscout/litagent/__init__.py +28 -28
  117. webscout/litagent/agent.py +2 -3
  118. webscout/litprinter/__init__.py +0 -58
  119. webscout/scout/__init__.py +8 -8
  120. webscout/scout/core.py +884 -884
  121. webscout/scout/element.py +459 -459
  122. webscout/scout/parsers/__init__.py +69 -69
  123. webscout/scout/parsers/html5lib_parser.py +172 -172
  124. webscout/scout/parsers/html_parser.py +236 -236
  125. webscout/scout/parsers/lxml_parser.py +178 -178
  126. webscout/scout/utils.py +38 -38
  127. webscout/swiftcli/__init__.py +811 -811
  128. webscout/update_checker.py +2 -12
  129. webscout/version.py +1 -1
  130. webscout/webscout_search.py +87 -6
  131. webscout/webscout_search_async.py +58 -1
  132. webscout/yep_search.py +297 -0
  133. webscout/zeroart/__init__.py +54 -54
  134. webscout/zeroart/base.py +60 -60
  135. webscout/zeroart/effects.py +99 -99
  136. webscout/zeroart/fonts.py +816 -816
  137. {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
  138. webscout-7.3.dist-info/RECORD +223 -0
  139. {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
  140. webstoken/__init__.py +30 -30
  141. webstoken/classifier.py +189 -189
  142. webstoken/keywords.py +216 -216
  143. webstoken/language.py +128 -128
  144. webstoken/ner.py +164 -164
  145. webstoken/normalizer.py +35 -35
  146. webstoken/processor.py +77 -77
  147. webstoken/sentiment.py +206 -206
  148. webstoken/stemmer.py +73 -73
  149. webstoken/tagger.py +60 -60
  150. webstoken/tokenizer.py +158 -158
  151. webscout-7.1.dist-info/RECORD +0 -198
  152. {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
  153. {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
  154. {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,277 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ from typing import Dict, Optional, Generator, Union, Any
5
+ from webscout import LitAgent
6
+ from webscout import exceptions
7
+ from webscout.AIbase import AISearch
8
+
9
+
10
+ class Response:
11
+ """A wrapper class for Felo API responses.
12
+
13
+ This class automatically converts response objects to their text representation
14
+ when printed or converted to string.
15
+
16
+ Attributes:
17
+ text (str): The text content of the response
18
+
19
+ Example:
20
+ >>> response = Response("Hello, world!")
21
+ >>> print(response)
22
+ Hello, world!
23
+ >>> str(response)
24
+ 'Hello, world!'
25
+ """
26
+ def __init__(self, text: str):
27
+ self.text = text
28
+
29
+ def __str__(self):
30
+ return self.text
31
+
32
+ def __repr__(self):
33
+ return self.text
34
+ class Isou(AISearch):
35
+ """A class to interact with the Isou AI search API.
36
+
37
+ Isou provides a powerful search interface that returns AI-generated responses
38
+ based on web content. It supports both streaming and non-streaming responses.
39
+
40
+ Basic Usage:
41
+ >>> from webscout import Isou
42
+ >>> ai = Isou()
43
+ >>> # Non-streaming example
44
+ >>> response = ai.search("What is Python?")
45
+ >>> print(response)
46
+ Python is a high-level programming language...
47
+
48
+ >>> # Streaming example
49
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
50
+ ... print(chunk, end="", flush=True)
51
+ Artificial Intelligence is...
52
+
53
+ >>> # Raw response format
54
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
55
+ ... print(chunk)
56
+ {'text': 'Hello', 'links': ['http://...']}
57
+
58
+ Args:
59
+ timeout (int, optional): Request timeout in seconds. Defaults to 120.
60
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ timeout: int = 120,
66
+ proxies: Optional[dict] = None,
67
+ model: str = "siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
68
+ logging: bool = False
69
+ ):
70
+ """Initialize the Isou API client.
71
+
72
+ Args:
73
+ timeout (int, optional): Request timeout in seconds. Defaults to 120.
74
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
75
+ model (str, optional): Model to use for search. Defaults to DeepSeek-R1.
76
+ logging (bool, optional): Enable logging. Defaults to False.
77
+ """
78
+ self.available_models = [
79
+ "siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
80
+ "siliconflow:Qwen/Qwen2.5-72B-Instruct-128K",
81
+ "deepseek-reasoner"
82
+ ]
83
+
84
+ if model not in self.available_models:
85
+ raise ValueError(
86
+ f"Invalid model: {model}. Choose from: {self.available_models}"
87
+ )
88
+
89
+ self.session = requests.Session()
90
+ self.api_endpoint = "https://isou.chat/api/search"
91
+ self.stream_chunk_size = 64
92
+ self.timeout = timeout
93
+ self.last_response = {}
94
+ self.model = model
95
+ self.provider = "siliconflow" if model in self.available_models[:2] else "deepseek"
96
+ self.mode = "simple" # or "deep"
97
+ self.categories = "general" # or "science"
98
+ self.reload = False
99
+
100
+ self.headers = {
101
+ "accept": "text/event-stream",
102
+ "pragma": "no-cache",
103
+ "referer": "https://isou.chat/search?q=hi",
104
+ "accept-language": "en-US,en;q=0.9",
105
+ "content-type": "application/json",
106
+ "sec-ch-ua": '"Not(A:Brand";v="99", "Brave";v="133", "Chromium";v="133"',
107
+ "sec-ch-ua-mobile": "?0",
108
+ "sec-ch-ua-platform": '"Windows"',
109
+ "sec-fetch-dest": "empty",
110
+ "sec-fetch-mode": "cors",
111
+ "sec-fetch-site": "same-origin",
112
+ "sec-gpc": "1",
113
+ "user-agent": LitAgent().random(),
114
+ }
115
+ self.session.headers.update(self.headers)
116
+ self.proxies = proxies
117
+
118
+ # Initialize logger if enabled
119
+ if logging:
120
+ from webscout.Litlogger import Logger, LogFormat, ConsoleHandler
121
+ from webscout.Litlogger.core.level import LogLevel
122
+
123
+ console_handler = ConsoleHandler(
124
+ level=LogLevel.DEBUG,
125
+ )
126
+
127
+ self.logger = Logger(
128
+ name="Isou",
129
+ level=LogLevel.DEBUG,
130
+ handlers=[console_handler]
131
+ )
132
+ self.logger.info("Isou initialized successfully ✨")
133
+ else:
134
+ self.logger = None
135
+
136
+ def search(
137
+ self,
138
+ prompt: str,
139
+ stream: bool = False,
140
+ raw: bool = False,
141
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
142
+ """Search using the Isou API and get AI-generated responses.
143
+
144
+ Args:
145
+ prompt (str): The search query or prompt to send to the API.
146
+ stream (bool, optional): If True, yields response chunks as they arrive.
147
+ If False, returns complete response. Defaults to False.
148
+ raw (bool, optional): If True, returns raw response dictionaries.
149
+ If False, returns Response objects. Defaults to False.
150
+
151
+ Returns:
152
+ Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
153
+ - If stream=False: Returns complete response
154
+ - If stream=True: Yields response chunks as they arrive
155
+
156
+ Raises:
157
+ APIConnectionError: If the API request fails
158
+ """
159
+ self.provider = "siliconflow" if self.model in self.available_models[:2] else "deepseek"
160
+
161
+ payload = {
162
+ "categories": [self.categories],
163
+ "engine": "SEARXNG",
164
+ "language": "all",
165
+ "mode": self.mode,
166
+ "model": self.model,
167
+ "provider": self.provider,
168
+ "reload": self.reload,
169
+ "stream": stream,
170
+ }
171
+ params = {"q": prompt}
172
+
173
+ def for_stream() -> Generator[Dict[str, Any], None, None]:
174
+ full_text = ""
175
+ links = []
176
+ try:
177
+ with self.session.post(
178
+ self.api_endpoint,
179
+ params=params,
180
+ json=payload,
181
+ stream=True,
182
+ timeout=self.timeout,
183
+ ) as response:
184
+ response.raise_for_status()
185
+ for line in response.iter_lines(chunk_size=self.stream_chunk_size, decode_unicode=True):
186
+ if not line or not line.startswith('data:'):
187
+ continue
188
+ try:
189
+ data = json.loads(line[5:].strip())
190
+
191
+ # Handle nested data structure
192
+ if 'data' in data and isinstance(data['data'], str):
193
+ try:
194
+ nested_data = json.loads(data['data'])
195
+ data.update(nested_data)
196
+ except json.JSONDecodeError:
197
+ pass
198
+
199
+ # Extract content and ensure it's properly decoded
200
+ if 'content' in data:
201
+ content = data['content']
202
+ if isinstance(content, str):
203
+ # Get only the new text (delta)
204
+ delta = content[len(full_text):]
205
+ full_text = content
206
+
207
+ # Yield the chunk
208
+ if raw:
209
+ yield {"text": delta, "links": links}
210
+ else:
211
+ yield Response(delta)
212
+
213
+ # Extract links
214
+ if 'links' in data and isinstance(data['links'], list):
215
+ links.extend(data['links'])
216
+
217
+ except json.JSONDecodeError:
218
+ continue
219
+
220
+ except requests.exceptions.RequestException as e:
221
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
222
+
223
+ def for_non_stream():
224
+ full_response = ""
225
+ all_links = []
226
+ for chunk in for_stream():
227
+ if raw:
228
+ yield chunk
229
+ else:
230
+ full_response += str(chunk)
231
+ if isinstance(chunk, dict):
232
+ all_links.extend(chunk.get("links", []))
233
+
234
+ if not raw:
235
+ self.last_response = Response(full_response)
236
+ return self.last_response
237
+
238
+ return for_stream() if stream else for_non_stream()
239
+
240
+ @staticmethod
241
+ def format_response(text: str, links: list) -> str:
242
+ """Format the response text with numbered citations and link list.
243
+
244
+ Args:
245
+ text (str): The response text with citation markers
246
+ links (list): List of reference links
247
+
248
+ Returns:
249
+ str: Formatted text with numbered citations and link list
250
+ """
251
+ # Clean up text
252
+ text = re.sub(r'\s+', ' ', text).strip()
253
+
254
+ # Replace citations with numbers
255
+ link_map = {f"citation:{i}]]": f"[{i}]" for i, _ in enumerate(links, start=1)}
256
+ for key, value in link_map.items():
257
+ text = text.replace(key, value)
258
+ text = text.replace("[[[", "[")
259
+
260
+ # Format link list
261
+ link_list = "\n".join([f"{i}. {link}" for i, link in enumerate(links, start=1)])
262
+
263
+ return f"{text}\n\nLinks:\n{link_list}"
264
+
265
+ if __name__ == "__main__":
266
+ from rich import print
267
+
268
+ # Initialize with specific model and logging
269
+ ai = Isou(
270
+ model="siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
271
+ logging=False
272
+ )
273
+
274
+ response = ai.search(input(">>> "), stream=True, raw=False)
275
+ for chunk in response:
276
+ print(chunk, end="", flush=True)
277
+
@@ -1,2 +1,3 @@
1
1
  from .felo_search import Felo
2
- from .DeepFind import DeepFind
2
+ from .DeepFind import DeepFind
3
+ from .ISou import Isou
@@ -4,7 +4,7 @@ from typing import Any, Dict, Optional, Union, Generator, List
4
4
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
5
5
  from webscout.AIbase import Provider
6
6
  from webscout import exceptions
7
- from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
7
+ from webscout.Litlogger import Logger, LogFormat
8
8
 
9
9
  class BLACKBOXAI(Provider):
10
10
  """
@@ -46,10 +46,10 @@ class BLACKBOXAI(Provider):
46
46
  system_message: str = "You are a helpful AI assistant."
47
47
  ):
48
48
  """Initialize BlackboxAI with enhanced configuration options."""
49
- self.logger = LitLogger(
49
+ self.logger = Logger(
50
50
  name="BlackboxAI",
51
51
  format=LogFormat.MODERN_EMOJI,
52
- color_scheme=ColorScheme.CYBERPUNK
52
+
53
53
  ) if logging else None
54
54
 
55
55
  self.session = requests.Session()
@@ -0,0 +1,226 @@
1
+ from typing import Any, Dict, Generator, Optional
2
+ import requests
3
+ import json
4
+
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+ from webscout.Litlogger import Logger, LogFormat
9
+ from webscout import LitAgent as Lit
10
+
11
+
12
+ class ChatGPTGratis(Provider):
13
+ """
14
+ A class to interact with the chatgptgratis.eu backend API with logging and real-time streaming.
15
+ """
16
+ AVAILABLE_MODELS = [
17
+ "Meta-Llama-3.2-1B-Instruct",
18
+ "Meta-Llama-3.2-3B-Instruct",
19
+ "Meta-Llama-3.1-8B-Instruct",
20
+ "Meta-Llama-3.1-70B-Instruct",
21
+ "Meta-Llama-3.1-405B-Instruct",
22
+ "gpt4o"
23
+
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ model: str = "gpt4o",
29
+ timeout: int = 30,
30
+ logging: bool = False,
31
+ proxies: Optional[Dict[str, str]] = None,
32
+ intro: Optional[str] = None,
33
+ filepath: Optional[str] = None,
34
+ update_file: bool = True,
35
+ history_offset: int = 10250,
36
+ act: Optional[str] = None,
37
+ ) -> None:
38
+ """
39
+ Initializes the ChatGPTGratis.
40
+ """
41
+ if model not in self.AVAILABLE_MODELS:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
+
44
+ self.logger = Logger(
45
+ name="ChatGPTGratis",
46
+ format=LogFormat.MODERN_EMOJI,
47
+ ) if logging else None
48
+
49
+ if self.logger:
50
+ self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
51
+
52
+ self.session = requests.Session()
53
+ self.timeout = timeout
54
+ self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
55
+ self.model = model
56
+
57
+ # Set up headers similar to a browser request with dynamic User-Agent
58
+ self.headers = {
59
+ "Accept": "*/*",
60
+ "Content-Type": "application/json",
61
+ "Origin": "https://chatgptgratis.eu",
62
+ "Referer": "https://chatgptgratis.eu/chat.html",
63
+ "User-Agent": Lit().random(),
64
+ }
65
+ self.session.headers.update(self.headers)
66
+ self.session.proxies = proxies or {}
67
+
68
+ # Set up conversation history and prompts
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+ self.conversation = Conversation(
77
+ True, 8096, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+
81
+ if self.logger:
82
+ self.logger.info("ChatGPTGratis initialized successfully.")
83
+
84
+ def ask(
85
+ self,
86
+ prompt: str,
87
+ stream: bool = False,
88
+ raw: bool = False,
89
+ optimizer: Optional[str] = None,
90
+ conversationally: bool = False,
91
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
92
+ """
93
+ Sends a request to the API and returns the response.
94
+ If stream is True, yields response chunks as they are received.
95
+ """
96
+ if self.logger:
97
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
98
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
99
+
100
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
+ if optimizer:
102
+ available_opts = (
103
+ method for method in dir(Optimizers)
104
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
105
+ )
106
+ if optimizer in available_opts:
107
+ conversation_prompt = getattr(Optimizers, optimizer)(
108
+ conversation_prompt if conversationally else prompt
109
+ )
110
+ if self.logger:
111
+ self.logger.debug(f"Applied optimizer: {optimizer}")
112
+ else:
113
+ if self.logger:
114
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
115
+ raise Exception(f"Optimizer is not one of {list(available_opts)}")
116
+
117
+ payload = {
118
+ "message": conversation_prompt,
119
+ "model": self.model,
120
+
121
+ }
122
+
123
+ def for_stream() -> Generator[Dict[str, Any], None, None]:
124
+ if self.logger:
125
+ self.logger.debug("Initiating streaming request to API")
126
+ response = self.session.post(
127
+ self.api_endpoint,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self.timeout
131
+ )
132
+ if not response.ok:
133
+ if self.logger:
134
+ self.logger.error(
135
+ f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
136
+ )
137
+ raise exceptions.FailedToGenerateResponseError(
138
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
139
+ )
140
+ if self.logger:
141
+ self.logger.info(f"API connection established. Status: {response.status_code}")
142
+
143
+ full_response = ""
144
+ for line in response.iter_lines():
145
+ if line:
146
+ line_decoded = line.decode('utf-8').strip()
147
+ if line_decoded == "data: [DONE]":
148
+ if self.logger:
149
+ self.logger.debug("Stream completed.")
150
+ break
151
+ if line_decoded.startswith("data: "):
152
+ try:
153
+ json_data = json.loads(line_decoded[6:])
154
+ choices = json_data.get("choices", [])
155
+ if choices and "delta" in choices[0]:
156
+ content = choices[0]["delta"].get("content", "")
157
+ else:
158
+ content = ""
159
+ full_response += content
160
+ yield content if raw else {"text": content}
161
+ except json.JSONDecodeError as e:
162
+ if self.logger:
163
+ self.logger.error(f"JSON parsing error: {str(e)}")
164
+ continue
165
+ # Update last response and conversation history.
166
+ self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
167
+ if self.logger:
168
+ self.logger.debug("Response processing completed.")
169
+
170
+ def for_non_stream() -> Dict[str, Any]:
171
+ if self.logger:
172
+ self.logger.debug("Processing non-streaming request")
173
+ collected = ""
174
+ for chunk in for_stream():
175
+ collected += chunk["text"] if isinstance(chunk, dict) else chunk
176
+ return {"text": collected}
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def chat(
181
+ self,
182
+ prompt: str,
183
+ stream: bool = False,
184
+ optimizer: Optional[str] = None,
185
+ conversationally: bool = False,
186
+ ) -> str | Generator[str, None, None]:
187
+ """
188
+ Returns the response as a string.
189
+ For streaming requests, yields each response chunk as a string.
190
+ """
191
+ if self.logger:
192
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
193
+
194
+ def stream_response() -> Generator[str, None, None]:
195
+ for response in self.ask(
196
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
197
+ ):
198
+ yield self.get_message(response)
199
+
200
+ def non_stream_response() -> str:
201
+ return self.get_message(self.ask(
202
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
203
+ ))
204
+
205
+ return stream_response() if stream else non_stream_response()
206
+
207
+ def get_message(self, response: dict) -> str:
208
+ """
209
+ Extracts and returns the text message from the response dictionary.
210
+ """
211
+ assert isinstance(response, dict), "Response must be a dictionary."
212
+ return response.get("text", "")
213
+
214
+
215
+ if __name__ == "__main__":
216
+ from rich import print
217
+
218
+ # Create an instance of the ChatGPTGratis with logging enabled for testing.
219
+ client = ChatGPTGratis(
220
+ model="Meta-Llama-3.2-1B-Instruct",
221
+ logging=False
222
+ )
223
+ prompt_input = input(">>> ")
224
+ response = client.chat(prompt_input, stream=True)
225
+ for chunk in response:
226
+ print(chunk, end="", flush=True)
@@ -9,12 +9,12 @@ from webscout import exceptions
9
9
  from typing import Any, AsyncGenerator, Dict
10
10
  import cloudscraper
11
11
  from webscout import LitAgent
12
- from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
12
+ from webscout.Litlogger import Logger, LogFormat
13
13
 
14
14
  class Cloudflare(Provider):
15
15
  """
16
16
  Cloudflare provider to interact with Cloudflare's text generation API.
17
- Includes logging capabilities using LitLogger and uses LitAgent for user-agent.
17
+ Includes logging capabilities using Logger and uses LitAgent for user-agent.
18
18
  """
19
19
 
20
20
  # Updated AVAILABLE_MODELS from given JSON data
@@ -145,10 +145,9 @@ class Cloudflare(Provider):
145
145
  self.conversation.history_offset = history_offset
146
146
 
147
147
  # Initialize logger if logging is enabled
148
- self.logger = LitLogger(
148
+ self.logger = Logger(
149
149
  name="Cloudflare",
150
150
  format=LogFormat.MODERN_EMOJI,
151
- color_scheme=ColorScheme.CYBERPUNK
152
151
  ) if logging else None
153
152
 
154
153
  if self.logger: