webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (137) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +432 -0
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/Cloudflare.py +18 -21
  24. webscout/Provider/DeepSeek.py +27 -48
  25. webscout/Provider/Deepinfra.py +129 -53
  26. webscout/Provider/Gemini.py +1 -1
  27. webscout/Provider/GithubChat.py +362 -0
  28. webscout/Provider/Glider.py +25 -8
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +38 -5
  31. webscout/Provider/HuggingFaceChat.py +462 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/Marcus.py +7 -50
  34. webscout/Provider/Netwrck.py +43 -67
  35. webscout/Provider/PI.py +4 -2
  36. webscout/Provider/Perplexitylabs.py +26 -6
  37. webscout/Provider/Phind.py +29 -3
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  51. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  52. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  53. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  54. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  55. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  56. webscout/Provider/TTS/__init__.py +2 -2
  57. webscout/Provider/TTS/deepgram.py +12 -39
  58. webscout/Provider/TTS/elevenlabs.py +14 -40
  59. webscout/Provider/TTS/gesserit.py +11 -35
  60. webscout/Provider/TTS/murfai.py +13 -39
  61. webscout/Provider/TTS/parler.py +17 -40
  62. webscout/Provider/TTS/speechma.py +180 -0
  63. webscout/Provider/TTS/streamElements.py +17 -44
  64. webscout/Provider/TextPollinationsAI.py +39 -59
  65. webscout/Provider/Venice.py +217 -200
  66. webscout/Provider/WiseCat.py +27 -5
  67. webscout/Provider/Youchat.py +63 -36
  68. webscout/Provider/__init__.py +13 -8
  69. webscout/Provider/akashgpt.py +28 -10
  70. webscout/Provider/copilot.py +416 -0
  71. webscout/Provider/flowith.py +196 -0
  72. webscout/Provider/freeaichat.py +32 -45
  73. webscout/Provider/granite.py +17 -53
  74. webscout/Provider/koala.py +20 -5
  75. webscout/Provider/llamatutor.py +7 -47
  76. webscout/Provider/llmchat.py +36 -53
  77. webscout/Provider/multichat.py +92 -98
  78. webscout/Provider/talkai.py +1 -0
  79. webscout/Provider/turboseek.py +3 -0
  80. webscout/Provider/tutorai.py +2 -0
  81. webscout/Provider/typegpt.py +154 -64
  82. webscout/Provider/x0gpt.py +3 -1
  83. webscout/Provider/yep.py +102 -20
  84. webscout/__init__.py +3 -0
  85. webscout/cli.py +4 -40
  86. webscout/conversation.py +1 -10
  87. webscout/exceptions.py +19 -9
  88. webscout/litagent/__init__.py +2 -2
  89. webscout/litagent/agent.py +351 -20
  90. webscout/litagent/constants.py +34 -5
  91. webscout/litprinter/__init__.py +0 -3
  92. webscout/models.py +181 -0
  93. webscout/optimizers.py +1 -1
  94. webscout/prompt_manager.py +2 -8
  95. webscout/scout/core/scout.py +1 -4
  96. webscout/scout/core/search_result.py +1 -1
  97. webscout/scout/core/text_utils.py +1 -1
  98. webscout/scout/core.py +2 -5
  99. webscout/scout/element.py +1 -1
  100. webscout/scout/parsers/html_parser.py +1 -1
  101. webscout/scout/utils.py +0 -1
  102. webscout/swiftcli/__init__.py +1 -3
  103. webscout/tempid.py +1 -1
  104. webscout/update_checker.py +55 -95
  105. webscout/version.py +1 -1
  106. webscout/webscout_search_async.py +1 -2
  107. webscout/yep_search.py +297 -297
  108. webscout-7.6.dist-info/LICENSE.md +146 -0
  109. {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
  110. {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
  111. webscout/Extra/autollama.py +0 -231
  112. webscout/Local/__init__.py +0 -10
  113. webscout/Local/_version.py +0 -3
  114. webscout/Local/formats.py +0 -747
  115. webscout/Local/model.py +0 -1368
  116. webscout/Local/samplers.py +0 -125
  117. webscout/Local/thread.py +0 -539
  118. webscout/Local/ui.py +0 -401
  119. webscout/Local/utils.py +0 -388
  120. webscout/Provider/Amigo.py +0 -274
  121. webscout/Provider/Bing.py +0 -243
  122. webscout/Provider/DiscordRocks.py +0 -253
  123. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  124. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  125. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  126. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  127. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  128. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  129. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  130. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  131. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  132. webscout/Provider/TTS/voicepod.py +0 -117
  133. webscout/Provider/dgaf.py +0 -214
  134. webscout-7.4.dist-info/LICENSE.md +0 -211
  135. {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  136. {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  137. {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,432 @@
1
+ import requests
2
+ import uuid
3
+ import json
4
+ import time
5
+ import random
6
+ import re
7
+ from typing import Any, Dict, List, Optional, Union, Generator
8
+
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class C4ai(Provider):
15
+ """
16
+ A class to interact with the Hugging Face Chat API.
17
+ """
18
+ # Default available models
19
+ AVAILABLE_MODELS = [
20
+ 'command-a-03-2025',
21
+ 'command-r-plus-08-2024',
22
+ 'command-r-08-2024',
23
+ 'command-r-plus',
24
+ 'command-r',
25
+ 'command-r7b-12-2024',
26
+ 'command-r7b-arabic-02-2025'
27
+ ] # Placeholder for available models, It will be updated in the constructor
28
+
29
+ def __repr__(self) -> str:
30
+ return f"C4ai({self.model})"
31
+
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 2000,
36
+ timeout: int = 60,
37
+ filepath: str = None,
38
+ update_file: bool = True,
39
+ proxies: dict = {},
40
+ model: str = "command-a-03-2025",
41
+ system_prompt: str = "You are a helpful assistant.",
42
+ ):
43
+ """Initialize the C4ai client."""
44
+ self.url = "https://cohereforai-c4ai-command.hf.space"
45
+ self.session = requests.Session()
46
+ self.session.proxies.update(proxies)
47
+
48
+ # Set up headers for all requests
49
+ self.headers = {
50
+ "Content-Type": "application/json",
51
+ "User-Agent": LitAgent().random(),
52
+ "Accept": "*/*",
53
+ "Accept-Encoding": "gzip, deflate, br, zstd",
54
+ "Accept-Language": "en-US,en;q=0.9",
55
+ "Origin": "https://cohereforai-c4ai-command.hf.space",
56
+ "Referer": "https://cohereforai-c4ai-command.hf.space/",
57
+ "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
58
+ "Sec-Ch-Ua-Mobile": "?0",
59
+ "Sec-Ch-Ua-Platform": "\"Windows\"",
60
+ "Sec-Fetch-Dest": "empty",
61
+ "Sec-Fetch-Mode": "cors",
62
+ "Sec-Fetch-Site": "same-origin",
63
+ "DNT": "1",
64
+ "Priority": "u=1, i"
65
+ }
66
+
67
+ # Update available models
68
+ self.update_available_models()
69
+
70
+ # Set default model if none provided
71
+ self.model = model
72
+ # Provider settings
73
+ self.is_conversation = is_conversation
74
+ self.max_tokens_to_sample = max_tokens
75
+ self.timeout = timeout
76
+ self.last_response = {}
77
+
78
+ # Initialize a simplified conversation history for file saving only
79
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
80
+
81
+ # Store conversation data for different models
82
+ self._conversation_data = {}
83
+ self.preprompt = system_prompt
84
+
85
+ def update_available_models(self):
86
+ """Update the available models list from HuggingFace"""
87
+ try:
88
+ models = self.get_models()
89
+ if models and len(models) > 0:
90
+ self.AVAILABLE_MODELS = models
91
+ except Exception:
92
+ # Fallback to default models list if fetching fails
93
+ pass
94
+
95
+ @classmethod
96
+ def get_models(cls):
97
+ """Fetch available models from HuggingFace."""
98
+ try:
99
+ response = requests.get("https://cohereforai-c4ai-command.hf.space/")
100
+ text = response.text
101
+ models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
102
+
103
+ if not models_match:
104
+ return cls.AVAILABLE_MODELS
105
+
106
+ models_text = models_match.group(1)
107
+ models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
108
+ models_text = models_text.replace('void 0', 'null')
109
+
110
+ def add_quotation_mark(match):
111
+ return f'{match.group(1)}"{match.group(2)}":'
112
+
113
+ models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
114
+
115
+ models_data = json.loads(models_text)
116
+ # print([model["id"] for model in models_data])
117
+ return [model["id"] for model in models_data]
118
+ except Exception:
119
+ return cls.AVAILABLE_MODELS
120
+
121
+ def create_conversation(self, model: str):
122
+ """Create a new conversation with the specified model."""
123
+ url = "https://cohereforai-c4ai-command.hf.space/conversation"
124
+ payload = {"model": model, "preprompt": self.preprompt,}
125
+
126
+ # Update referer for this specific request
127
+ headers = self.headers.copy()
128
+ headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/"
129
+
130
+ try:
131
+ response = self.session.post(url, json=payload, headers=headers)
132
+
133
+ if response.status_code == 401:
134
+ raise exceptions.AuthenticationError("Authentication failed.")
135
+
136
+ # Handle other error codes
137
+ if response.status_code != 200:
138
+ return None
139
+
140
+ data = response.json()
141
+ conversation_id = data.get("conversationId")
142
+
143
+ # Store conversation data
144
+ if model not in self._conversation_data:
145
+ self._conversation_data[model] = {
146
+ "conversationId": conversation_id,
147
+ "messageId": str(uuid.uuid4()) # Initial message ID
148
+ }
149
+
150
+ return conversation_id
151
+ except requests.exceptions.RequestException:
152
+ return None
153
+
154
+ def fetch_message_id(self, conversation_id: str) -> str:
155
+ """Fetch the latest message ID for a conversation."""
156
+ try:
157
+ url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
158
+ response = self.session.get(url, headers=self.headers)
159
+ response.raise_for_status()
160
+
161
+ # Parse the JSON data from the response
162
+ json_data = None
163
+ for line in response.text.split('\n'):
164
+ if line.strip():
165
+ try:
166
+ parsed = json.loads(line)
167
+ if isinstance(parsed, dict) and "nodes" in parsed:
168
+ json_data = parsed
169
+ break
170
+ except json.JSONDecodeError:
171
+ continue
172
+
173
+ if not json_data:
174
+ # Fall back to a UUID if we can't parse the response
175
+ return str(uuid.uuid4())
176
+
177
+ # Extract message ID using the same pattern as in the example
178
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
179
+ return str(uuid.uuid4())
180
+
181
+ data = json_data["nodes"][1]["data"]
182
+ keys = data[data[0]["messages"]]
183
+ message_keys = data[keys[-1]]
184
+ message_id = data[message_keys["id"]]
185
+
186
+ return message_id
187
+
188
+ except Exception:
189
+ # Fall back to a UUID if there's an error
190
+ return str(uuid.uuid4())
191
+
192
+ def generate_boundary(self):
193
+ """Generate a random boundary for multipart/form-data requests"""
194
+ boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
195
+ boundary = "----WebKitFormBoundary"
196
+ boundary += "".join(random.choice(boundary_chars) for _ in range(16))
197
+ return boundary
198
+
199
+ def process_response(self, response, prompt: str):
200
+ """Process streaming response and extract content."""
201
+ full_text = ""
202
+ sources = None
203
+ reasoning_text = ""
204
+ has_reasoning = False
205
+
206
+ for line in response.iter_lines(decode_unicode=True):
207
+ if not line:
208
+ continue
209
+
210
+ try:
211
+ # Parse each line as JSON
212
+ data = json.loads(line)
213
+
214
+ # Handle different response types
215
+ if "type" not in data:
216
+ continue
217
+
218
+ if data["type"] == "stream" and "token" in data:
219
+ token = data["token"].replace("\u0000", "")
220
+ full_text += token
221
+ resp = {"text": token}
222
+ yield resp
223
+ elif data["type"] == "finalAnswer":
224
+ final_text = data.get("text", "")
225
+ if final_text and not full_text:
226
+ full_text = final_text
227
+ resp = {"text": final_text}
228
+ yield resp
229
+ elif data["type"] == "webSearch" and "sources" in data:
230
+ sources = data["sources"]
231
+ elif data["type"] == "reasoning":
232
+ has_reasoning = True
233
+ if data.get("subtype") == "stream" and "token" in data:
234
+ reasoning_text += data["token"]
235
+ # elif data.get("subtype") == "status":
236
+ # # For status updates in reasoning, we can just append them as a comment
237
+ # if data.get("status"):
238
+ # reasoning_text += f"\n# {data['status']}"
239
+
240
+ # If we have reasoning, prepend it to the next text output
241
+ if reasoning_text and not full_text:
242
+ resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
243
+ yield resp
244
+
245
+ except json.JSONDecodeError:
246
+ continue
247
+
248
+ # Update conversation history only for saving to file if needed
249
+ if full_text and self.conversation.file:
250
+ if has_reasoning:
251
+ full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
252
+ self.last_response = {"text": full_text_with_reasoning}
253
+ self.conversation.update_chat_history(prompt, full_text_with_reasoning)
254
+ else:
255
+ self.last_response = {"text": full_text}
256
+ self.conversation.update_chat_history(prompt, full_text)
257
+
258
+ return full_text
259
+
260
+ def ask(
261
+ self,
262
+ prompt: str,
263
+ stream: bool = False,
264
+ raw: bool = False,
265
+ optimizer: str = None,
266
+ conversationally: bool = False,
267
+ web_search: bool = False,
268
+ ) -> Union[Dict[str, Any], Generator]:
269
+ """Send a message to the HuggingFace Chat API"""
270
+ model = self.model
271
+
272
+ # Check if we have a conversation for this model
273
+ if model not in self._conversation_data:
274
+ conversation_id = self.create_conversation(model)
275
+ if not conversation_id:
276
+ raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
277
+ else:
278
+ conversation_id = self._conversation_data[model]["conversationId"]
279
+ # Refresh message ID
280
+ self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
281
+
282
+ url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
283
+ message_id = self._conversation_data[model]["messageId"]
284
+
285
+ # Data to send - use the prompt directly without generating a complete prompt
286
+ # since HuggingFace maintains conversation state internally
287
+ request_data = {
288
+ "inputs": prompt,
289
+ "id": message_id,
290
+ "is_retry": False,
291
+ "is_continue": False,
292
+ "web_search": web_search,
293
+ "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
294
+ }
295
+
296
+ # Update headers for this specific request
297
+ headers = self.headers.copy()
298
+ headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
299
+
300
+ # Create multipart form data
301
+ boundary = self.generate_boundary()
302
+ multipart_headers = headers.copy()
303
+ multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
304
+
305
+ # Serialize the data to JSON
306
+ data_json = json.dumps(request_data, separators=(',', ':'))
307
+
308
+ # Create the multipart form data body
309
+ body = f"--{boundary}\r\n"
310
+ body += f'Content-Disposition: form-data; name="data"\r\n'
311
+ body += f"Content-Type: application/json\r\n\r\n"
312
+ body += f"{data_json}\r\n"
313
+ body += f"--{boundary}--\r\n"
314
+
315
+ multipart_headers["Content-Length"] = str(len(body))
316
+
317
+ def for_stream():
318
+ try:
319
+ # Try with multipart/form-data first
320
+ response = None
321
+ try:
322
+ response = self.session.post(
323
+ url,
324
+ data=body,
325
+ headers=multipart_headers,
326
+ stream=True,
327
+ timeout=self.timeout
328
+ )
329
+ except requests.exceptions.RequestException:
330
+ pass
331
+
332
+ # If multipart fails or returns error, try with regular JSON
333
+ if not response or response.status_code != 200:
334
+ response = self.session.post(
335
+ url,
336
+ json=request_data,
337
+ headers=headers,
338
+ stream=True,
339
+ timeout=self.timeout
340
+ )
341
+
342
+ # If both methods fail, raise exception
343
+ if response.status_code != 200:
344
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
345
+
346
+ # Process the streaming response
347
+ yield from self.process_response(response, prompt)
348
+
349
+ except Exception as e:
350
+ if isinstance(e, requests.exceptions.RequestException):
351
+ if hasattr(e, 'response') and e.response is not None:
352
+ status_code = e.response.status_code
353
+ if status_code == 401:
354
+ raise exceptions.AuthenticationError("Authentication failed.")
355
+
356
+ # Try another model if current one fails
357
+ if len(self.AVAILABLE_MODELS) > 1:
358
+ current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
359
+ next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
360
+ self.model = self.AVAILABLE_MODELS[next_model_index]
361
+
362
+ # Create new conversation with the alternate model
363
+ conversation_id = self.create_conversation(self.model)
364
+ if conversation_id:
365
+ # Try again with the new model
366
+ yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
367
+ conversationally=conversationally, web_search=web_search)
368
+ return
369
+
370
+ # If we get here, all models failed
371
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
372
+
373
+ def for_non_stream():
374
+ response_text = ""
375
+ for response in for_stream():
376
+ if "text" in response:
377
+ response_text += response["text"]
378
+ self.last_response = {"text": response_text}
379
+ return self.last_response
380
+
381
+ return for_stream() if stream else for_non_stream()
382
+
383
+ def chat(
384
+ self,
385
+ prompt: str,
386
+ stream: bool = False,
387
+ optimizer: str = None,
388
+ conversationally: bool = False,
389
+ web_search: bool = False
390
+ ) -> Union[str, Generator]:
391
+ """Generate a response to a prompt"""
392
+ def for_stream():
393
+ for response in self.ask(
394
+ prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
395
+ ):
396
+ yield self.get_message(response)
397
+
398
+ def for_non_stream():
399
+ return self.get_message(
400
+ self.ask(
401
+ prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
402
+ )
403
+ )
404
+
405
+ return for_stream() if stream else for_non_stream()
406
+
407
+ def get_message(self, response: dict) -> str:
408
+ """Extract message text from response"""
409
+ assert isinstance(response, dict), "Response should be of dict data-type only"
410
+ return response.get("text", "")
411
+
412
+ if __name__ == "__main__":
413
+ print("-" * 80)
414
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
415
+ print("-" * 80)
416
+
417
+ for model in C4ai.AVAILABLE_MODELS:
418
+ try:
419
+ test_ai = C4ai(model=model, timeout=60)
420
+ response = test_ai.chat("Say 'Hello' in one word")
421
+ response_text = response
422
+
423
+ if response_text and len(response_text.strip()) > 0:
424
+ status = "✓"
425
+ # Truncate response if too long
426
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
427
+ else:
428
+ status = "✗"
429
+ display_text = "Empty or invalid response"
430
+ print(f"{model:<50} {status:<10} {display_text}")
431
+ except Exception as e:
432
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -5,13 +5,12 @@ import json
5
5
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
- from webscout.Litlogger import Logger, LogFormat
9
8
  from webscout import LitAgent as Lit
10
9
 
11
10
 
12
11
  class ChatGPTGratis(Provider):
13
12
  """
14
- A class to interact with the chatgptgratis.eu backend API with logging and real-time streaming.
13
+ A class to interact with the chatgptgratis.eu backend API with real-time streaming.
15
14
  """
16
15
  AVAILABLE_MODELS = [
17
16
  "Meta-Llama-3.2-1B-Instruct",
@@ -20,14 +19,12 @@ class ChatGPTGratis(Provider):
20
19
  "Meta-Llama-3.1-70B-Instruct",
21
20
  "Meta-Llama-3.1-405B-Instruct",
22
21
  "gpt4o"
23
-
24
22
  ]
25
23
 
26
24
  def __init__(
27
25
  self,
28
- model: str = "gpt4o",
26
+ model: str = "Meta-Llama-3.2-1B-Instruct",
29
27
  timeout: int = 30,
30
- logging: bool = False,
31
28
  proxies: Optional[Dict[str, str]] = None,
32
29
  intro: Optional[str] = None,
33
30
  filepath: Optional[str] = None,
@@ -41,14 +38,6 @@ class ChatGPTGratis(Provider):
41
38
  if model not in self.AVAILABLE_MODELS:
42
39
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
40
 
44
- self.logger = Logger(
45
- name="ChatGPTGratis",
46
- format=LogFormat.MODERN_EMOJI,
47
- ) if logging else None
48
-
49
- if self.logger:
50
- self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
51
-
52
41
  self.session = requests.Session()
53
42
  self.timeout = timeout
54
43
  self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
@@ -78,9 +67,6 @@ class ChatGPTGratis(Provider):
78
67
  )
79
68
  self.conversation.history_offset = history_offset
80
69
 
81
- if self.logger:
82
- self.logger.info("ChatGPTGratis initialized successfully.")
83
-
84
70
  def ask(
85
71
  self,
86
72
  prompt: str,
@@ -93,10 +79,6 @@ class ChatGPTGratis(Provider):
93
79
  Sends a request to the API and returns the response.
94
80
  If stream is True, yields response chunks as they are received.
95
81
  """
96
- if self.logger:
97
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
98
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
99
-
100
82
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
83
  if optimizer:
102
84
  available_opts = (
@@ -107,22 +89,15 @@ class ChatGPTGratis(Provider):
107
89
  conversation_prompt = getattr(Optimizers, optimizer)(
108
90
  conversation_prompt if conversationally else prompt
109
91
  )
110
- if self.logger:
111
- self.logger.debug(f"Applied optimizer: {optimizer}")
112
92
  else:
113
- if self.logger:
114
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
115
93
  raise Exception(f"Optimizer is not one of {list(available_opts)}")
116
94
 
117
95
  payload = {
118
96
  "message": conversation_prompt,
119
97
  "model": self.model,
120
-
121
98
  }
122
99
 
123
100
  def for_stream() -> Generator[Dict[str, Any], None, None]:
124
- if self.logger:
125
- self.logger.debug("Initiating streaming request to API")
126
101
  response = self.session.post(
127
102
  self.api_endpoint,
128
103
  json=payload,
@@ -130,23 +105,15 @@ class ChatGPTGratis(Provider):
130
105
  timeout=self.timeout
131
106
  )
132
107
  if not response.ok:
133
- if self.logger:
134
- self.logger.error(
135
- f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
136
- )
137
108
  raise exceptions.FailedToGenerateResponseError(
138
109
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
139
110
  )
140
- if self.logger:
141
- self.logger.info(f"API connection established. Status: {response.status_code}")
142
111
 
143
112
  full_response = ""
144
113
  for line in response.iter_lines():
145
114
  if line:
146
115
  line_decoded = line.decode('utf-8').strip()
147
116
  if line_decoded == "data: [DONE]":
148
- if self.logger:
149
- self.logger.debug("Stream completed.")
150
117
  break
151
118
  if line_decoded.startswith("data: "):
152
119
  try:
@@ -158,18 +125,12 @@ class ChatGPTGratis(Provider):
158
125
  content = ""
159
126
  full_response += content
160
127
  yield content if raw else {"text": content}
161
- except json.JSONDecodeError as e:
162
- if self.logger:
163
- self.logger.error(f"JSON parsing error: {str(e)}")
128
+ except json.JSONDecodeError:
164
129
  continue
165
130
  # Update last response and conversation history.
166
131
  self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
167
- if self.logger:
168
- self.logger.debug("Response processing completed.")
169
132
 
170
133
  def for_non_stream() -> Dict[str, Any]:
171
- if self.logger:
172
- self.logger.debug("Processing non-streaming request")
173
134
  collected = ""
174
135
  for chunk in for_stream():
175
136
  collected += chunk["text"] if isinstance(chunk, dict) else chunk
@@ -188,9 +149,6 @@ class ChatGPTGratis(Provider):
188
149
  Returns the response as a string.
189
150
  For streaming requests, yields each response chunk as a string.
190
151
  """
191
- if self.logger:
192
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
193
-
194
152
  def stream_response() -> Generator[str, None, None]:
195
153
  for response in self.ask(
196
154
  prompt, stream=True, optimizer=optimizer, conversationally=conversationally
@@ -213,14 +171,24 @@ class ChatGPTGratis(Provider):
213
171
 
214
172
 
215
173
  if __name__ == "__main__":
216
- from rich import print
217
-
218
- # Create an instance of the ChatGPTGratis with logging enabled for testing.
219
- client = ChatGPTGratis(
220
- model="Meta-Llama-3.2-1B-Instruct",
221
- logging=False
222
- )
223
- prompt_input = input(">>> ")
224
- response = client.chat(prompt_input, stream=True)
225
- for chunk in response:
226
- print(chunk, end="", flush=True)
174
+ print("-" * 80)
175
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
176
+ print("-" * 80)
177
+
178
+ for model in ChatGPTGratis.AVAILABLE_MODELS:
179
+ try:
180
+ test_ai = ChatGPTGratis(model=model, timeout=60)
181
+ response = test_ai.chat("Say 'Hello' in one word")
182
+ response_text = response
183
+
184
+ if response_text and len(response_text.strip()) > 0:
185
+ status = "✓"
186
+ # Clean and truncate response
187
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
188
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
189
+ else:
190
+ status = "✗"
191
+ display_text = "Empty or invalid response"
192
+ print(f"{model:<50} {status:<10} {display_text}")
193
+ except Exception as e:
194
+ print(f"{model:<50} {'✗':<10} {str(e)}")