webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (66) hide show
  1. webscout/Bard.py +5 -25
  2. webscout/DWEBS.py +476 -476
  3. webscout/Extra/GitToolkit/__init__.py +10 -0
  4. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  5. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  6. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  7. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  8. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  9. webscout/Extra/__init__.py +2 -0
  10. webscout/Extra/autocoder/__init__.py +1 -1
  11. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
  12. webscout/Extra/tempmail/__init__.py +26 -0
  13. webscout/Extra/tempmail/async_utils.py +141 -0
  14. webscout/Extra/tempmail/base.py +156 -0
  15. webscout/Extra/tempmail/cli.py +187 -0
  16. webscout/Extra/tempmail/mail_tm.py +361 -0
  17. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  18. webscout/Provider/AISEARCH/__init__.py +5 -1
  19. webscout/Provider/AISEARCH/hika_search.py +194 -0
  20. webscout/Provider/AISEARCH/monica_search.py +246 -0
  21. webscout/Provider/AISEARCH/scira_search.py +320 -0
  22. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  23. webscout/Provider/AllenAI.py +255 -122
  24. webscout/Provider/DeepSeek.py +1 -2
  25. webscout/Provider/Deepinfra.py +296 -286
  26. webscout/Provider/ElectronHub.py +709 -716
  27. webscout/Provider/ExaAI.py +261 -0
  28. webscout/Provider/ExaChat.py +28 -6
  29. webscout/Provider/Gemini.py +167 -165
  30. webscout/Provider/GithubChat.py +2 -1
  31. webscout/Provider/Groq.py +38 -24
  32. webscout/Provider/LambdaChat.py +2 -1
  33. webscout/Provider/Netwrck.py +3 -2
  34. webscout/Provider/OpenGPT.py +199 -0
  35. webscout/Provider/PI.py +39 -24
  36. webscout/Provider/TextPollinationsAI.py +232 -230
  37. webscout/Provider/Youchat.py +326 -296
  38. webscout/Provider/__init__.py +10 -4
  39. webscout/Provider/ai4chat.py +58 -56
  40. webscout/Provider/akashgpt.py +34 -22
  41. webscout/Provider/copilot.py +427 -427
  42. webscout/Provider/freeaichat.py +9 -2
  43. webscout/Provider/labyrinth.py +121 -20
  44. webscout/Provider/llmchatco.py +306 -0
  45. webscout/Provider/scira_chat.py +271 -0
  46. webscout/Provider/typefully.py +280 -0
  47. webscout/Provider/uncovr.py +312 -299
  48. webscout/Provider/yep.py +64 -12
  49. webscout/__init__.py +38 -36
  50. webscout/cli.py +293 -293
  51. webscout/conversation.py +350 -17
  52. webscout/litprinter/__init__.py +59 -667
  53. webscout/optimizers.py +419 -419
  54. webscout/update_checker.py +14 -12
  55. webscout/version.py +1 -1
  56. webscout/webscout_search.py +1346 -1282
  57. webscout/webscout_search_async.py +877 -813
  58. {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
  59. {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
  60. webscout/Provider/DARKAI.py +0 -225
  61. webscout/Provider/EDITEE.py +0 -192
  62. webscout/litprinter/colors.py +0 -54
  63. {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
  64. {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
  65. {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
  66. {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
@@ -18,6 +18,7 @@ class FreeAIChat(Provider):
18
18
  AVAILABLE_MODELS = [
19
19
  # OpenAI Models
20
20
  "GPT 4o",
21
+ "GPT 4.5 Preview",
21
22
  "GPT 4o Latest",
22
23
  "GPT 4o mini",
23
24
  "GPT 4o Search Preview",
@@ -28,6 +29,7 @@ class FreeAIChat(Provider):
28
29
  "O3 Mini Low",
29
30
 
30
31
  # Anthropic Models
32
+ "Claude 3.5 haiku",
31
33
  "claude 3.5 sonnet",
32
34
  "Claude 3.7 Sonnet",
33
35
  "Claude 3.7 Sonnet (Thinking)",
@@ -41,18 +43,22 @@ class FreeAIChat(Provider):
41
43
  # Google Models
42
44
  "Gemini 1.5 Flash",
43
45
  "Gemini 1.5 Pro",
44
- "Gemini 2.0 Pro",
45
46
  "Gemini 2.0 Flash",
47
+ "Gemini 2.0 Pro",
46
48
  "Gemini 2.5 Pro",
47
49
 
48
50
  # Llama Models
49
51
  "Llama 3.1 405B",
50
52
  "Llama 3.1 70B Fast",
51
53
  "Llama 3.3 70B",
54
+ "Llama 3.2 90B Vision",
55
+ "Llama 4 Scout",
56
+ "Llama 4 Maverick",
52
57
 
53
58
  # Mistral Models
54
59
  "Mistral Large",
55
60
  "Mistral Nemo",
61
+ "Mixtral 8x22B",
56
62
 
57
63
  # Qwen Models
58
64
  "Qwen Max",
@@ -62,7 +68,8 @@ class FreeAIChat(Provider):
62
68
  "QwQ Plus",
63
69
 
64
70
  # XAI Models
65
- "Grok 2"
71
+ "Grok 2",
72
+ "Grok 3",
66
73
  ]
67
74
 
68
75
  def __init__(
@@ -1,7 +1,9 @@
1
+ from typing import Union, Any, Dict, Generator
2
+ from uuid import uuid4
1
3
  import requests
4
+ import re
2
5
  import json
3
- import uuid
4
- from typing import Any, Dict, Optional, Generator, Union
6
+
5
7
  from webscout.AIutel import Optimizers
6
8
  from webscout.AIutel import Conversation
7
9
  from webscout.AIutel import AwesomePrompts
@@ -12,6 +14,16 @@ from webscout.litagent import LitAgent
12
14
  class LabyrinthAI(Provider):
13
15
  """
14
16
  A class to interact with the Labyrinth AI chat API.
17
+
18
+ Attributes:
19
+ system_prompt (str): The system prompt to define the assistant's role.
20
+
21
+ Examples:
22
+ >>> from webscout.Provider.labyrinth import LabyrinthAI
23
+ >>> ai = LabyrinthAI()
24
+ >>> response = ai.chat("What's the weather today?")
25
+ >>> print(response)
26
+ 'The weather today is sunny with a high of 75°F.'
15
27
  """
16
28
 
17
29
  # AVAILABLE_MODELS = [
@@ -29,20 +41,42 @@ class LabyrinthAI(Provider):
29
41
  proxies: dict = {},
30
42
  history_offset: int = 10250,
31
43
  act: str = None,
44
+ system_prompt: str = "You are a helpful assistant.",
32
45
  # model: str = "gemini-2.0-flash",
33
46
  browser: str = "chrome"
34
47
  ):
35
- """Initializes the Labyrinth AI API client."""
48
+ """
49
+ Initializes the Labyrinth AI API with given parameters.
50
+
51
+ Args:
52
+ is_conversation (bool): Whether the provider is in conversation mode.
53
+ max_tokens (int): Maximum number of tokens to sample.
54
+ timeout (int): Timeout for API requests.
55
+ intro (str): Introduction message for the conversation.
56
+ filepath (str): Filepath for storing conversation history.
57
+ update_file (bool): Whether to update the conversation history file.
58
+ proxies (dict): Proxies for the API requests.
59
+ history_offset (int): Offset for conversation history.
60
+ act (str): Act for the conversation.
61
+ system_prompt (str): The system prompt to define the assistant's role.
62
+ browser (str): Browser type to emulate in the user agent.
63
+
64
+ Examples:
65
+ >>> ai = LabyrinthAI(system_prompt="You are a friendly assistant.")
66
+ >>> print(ai.system_prompt)
67
+ 'You are a friendly assistant.'
68
+ """
36
69
  # if model not in self.AVAILABLE_MODELS:
37
70
  # raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
38
-
71
+
39
72
  self.url = "https://labyrinth-ebon.vercel.app/api/chat"
40
-
73
+ self.system_prompt = system_prompt
74
+
41
75
  # Initialize LitAgent for user agent generation
42
76
  self.agent = LitAgent()
43
77
  # Use fingerprinting to create a consistent browser identity
44
78
  self.fingerprint = self.agent.generate_fingerprint(browser)
45
-
79
+
46
80
  # Use the fingerprint for headers
47
81
  self.headers = {
48
82
  "Accept": self.fingerprint["accept"],
@@ -61,7 +95,7 @@ class LabyrinthAI(Provider):
61
95
  "Sec-Fetch-Site": "same-origin",
62
96
  "Sec-GPC": "1"
63
97
  }
64
-
98
+
65
99
  self.session = requests.Session()
66
100
  self.session.headers.update(self.headers)
67
101
  self.session.proxies.update(proxies)
@@ -93,13 +127,13 @@ class LabyrinthAI(Provider):
93
127
  def refresh_identity(self, browser: str = None):
94
128
  """
95
129
  Refreshes the browser identity fingerprint.
96
-
130
+
97
131
  Args:
98
132
  browser: Specific browser to use for the new fingerprint
99
133
  """
100
134
  browser = browser or self.fingerprint.get("browser_type", "chrome")
101
135
  self.fingerprint = self.agent.generate_fingerprint(browser)
102
-
136
+
103
137
  # Update headers with new fingerprint
104
138
  self.headers.update({
105
139
  "Accept": self.fingerprint["accept"],
@@ -108,11 +142,11 @@ class LabyrinthAI(Provider):
108
142
  "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
109
143
  "User-Agent": self.fingerprint["user_agent"],
110
144
  })
111
-
145
+
112
146
  # Update session headers
113
147
  for header, value in self.headers.items():
114
148
  self.session.headers[header] = value
115
-
149
+
116
150
  return self.fingerprint
117
151
 
118
152
  def ask(
@@ -123,6 +157,25 @@ class LabyrinthAI(Provider):
123
157
  optimizer: str = None,
124
158
  conversationally: bool = False,
125
159
  ) -> Union[Dict[str, Any], Generator]:
160
+ """
161
+ Sends a prompt to the Labyrinth AI API and returns the response.
162
+
163
+ Args:
164
+ prompt (str): The prompt to send to the API.
165
+ stream (bool): Whether to stream the response.
166
+ raw (bool): Whether to return the raw response.
167
+ optimizer (str): Optimizer to use for the prompt.
168
+ conversationally (bool): Whether to generate the prompt conversationally.
169
+
170
+ Returns:
171
+ Union[Dict[str, Any], Generator]: The API response.
172
+
173
+ Examples:
174
+ >>> ai = LabyrinthAI()
175
+ >>> response = ai.ask("Tell me a joke!")
176
+ >>> print(response)
177
+ {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
178
+ """
126
179
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
180
  if optimizer:
128
181
  if optimizer in self.__available_optimizers:
@@ -134,8 +187,12 @@ class LabyrinthAI(Provider):
134
187
 
135
188
  # Prepare the request payload
136
189
  payload = {
137
- "id": str(uuid.uuid4()),
190
+ "id": str(uuid4()),
138
191
  "messages": [
192
+ {
193
+ "role": "system",
194
+ "content": self.system_prompt
195
+ },
139
196
  {
140
197
  "role": "user",
141
198
  "content": conversation_prompt,
@@ -163,23 +220,24 @@ class LabyrinthAI(Provider):
163
220
  raise exceptions.FailedToGenerateResponseError(
164
221
  f"Request failed with status code {response.status_code}"
165
222
  )
166
-
223
+
167
224
  streaming_text = ""
168
225
  for line in response.iter_lines():
169
226
  if line:
170
227
  try:
171
228
  line = line.decode('utf-8')
172
- if line.startswith('0:'):
173
- content = line[2:].strip('"')
229
+ match = re.search(r'0:"(.*?)"', line)
230
+ if match:
231
+ content = match.group(1)
174
232
  streaming_text += content
175
233
  resp = dict(text=content)
176
234
  yield resp if raw else resp
177
235
  except UnicodeDecodeError:
178
236
  continue
179
-
237
+
180
238
  self.last_response = {"text": streaming_text}
181
239
  self.conversation.update_chat_history(prompt, streaming_text)
182
-
240
+
183
241
  except requests.RequestException as e:
184
242
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
185
243
 
@@ -204,8 +262,9 @@ class LabyrinthAI(Provider):
204
262
  if line:
205
263
  try:
206
264
  line = line.decode('utf-8')
207
- if line.startswith('0:'):
208
- content = line[2:].strip('"')
265
+ match = re.search(r'0:"(.*?)"', line)
266
+ if match:
267
+ content = match.group(1)
209
268
  full_response += content
210
269
  except UnicodeDecodeError:
211
270
  continue
@@ -225,6 +284,24 @@ class LabyrinthAI(Provider):
225
284
  optimizer: str = None,
226
285
  conversationally: bool = False,
227
286
  ) -> Union[str, Generator[str, None, None]]:
287
+ """
288
+ Generates a response from the Labyrinth AI API.
289
+
290
+ Args:
291
+ prompt (str): The prompt to send to the API.
292
+ stream (bool): Whether to stream the response.
293
+ optimizer (str): Optimizer to use for the prompt.
294
+ conversationally (bool): Whether to generate the prompt conversationally.
295
+
296
+ Returns:
297
+ Union[str, Generator[str, None, None]]: The API response.
298
+
299
+ Examples:
300
+ >>> ai = LabyrinthAI()
301
+ >>> response = ai.chat("What's the weather today?")
302
+ >>> print(response)
303
+ 'The weather today is sunny with a high of 75°F.'
304
+ """
228
305
  def for_stream():
229
306
  for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
230
307
  yield self.get_message(response)
@@ -235,5 +312,29 @@ class LabyrinthAI(Provider):
235
312
  return for_stream() if stream else for_non_stream()
236
313
 
237
314
  def get_message(self, response: dict) -> str:
315
+ """
316
+ Extracts the message from the API response.
317
+
318
+ Args:
319
+ response (dict): The API response.
320
+
321
+ Returns:
322
+ str: The message content.
323
+
324
+ Examples:
325
+ >>> ai = LabyrinthAI()
326
+ >>> response = ai.ask("Tell me a joke!")
327
+ >>> message = ai.get_message(response)
328
+ >>> print(message)
329
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
330
+ """
238
331
  assert isinstance(response, dict), "Response should be of dict data-type only"
239
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
332
+ formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
333
+ return formatted_text
334
+
335
+ if __name__ == "__main__":
336
+ from rich import print
337
+ ai = LabyrinthAI()
338
+ resp = ai.chat("What is the capital of France?", stream=True)
339
+ for message in resp:
340
+ print(message, end='', flush=True)
@@ -0,0 +1,306 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ import re
5
+ from typing import Union, Any, Dict, Optional, Generator, List
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent as Lit
13
+
14
+ class LLMChatCo(Provider):
15
+ """
16
+ A class to interact with the LLMChat.co API
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "gemini-flash-2.0", # Default model
21
+ "llama-4-scout",
22
+ "gpt-4o-mini",
23
+ # "o3-mini",
24
+ # "claude-3-5-sonnet",
25
+ # "deepseek-r1",
26
+ # "claude-3-7-sonnet",
27
+ # "deep", # deep research mode
28
+ # "pro" # pro research mode
29
+
30
+ ]
31
+
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 2048,
36
+ timeout: int = 60,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ model: str = "gemini-flash-2.0",
44
+ system_prompt: str = "You are a helpful assistant."
45
+ ):
46
+ """
47
+ Initializes the LLMChat.co API with given parameters.
48
+ """
49
+
50
+ if model not in self.AVAILABLE_MODELS:
51
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
52
+
53
+ self.session = requests.Session()
54
+ self.is_conversation = is_conversation
55
+ self.max_tokens_to_sample = max_tokens
56
+ self.api_endpoint = "https://llmchat.co/api/completion"
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.system_prompt = system_prompt
61
+ self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
62
+
63
+ # Create LitAgent instance for user agent generation
64
+ lit_agent = Lit()
65
+
66
+ # Headers based on the provided request
67
+ self.headers = {
68
+ "Content-Type": "application/json",
69
+ "Accept": "text/event-stream",
70
+ "User-Agent": lit_agent.random(),
71
+ "Accept-Language": "en-US,en;q=0.9",
72
+ "Origin": "https://llmchat.co",
73
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}",
74
+ "DNT": "1",
75
+ "Sec-Fetch-Dest": "empty",
76
+ "Sec-Fetch-Mode": "cors",
77
+ "Sec-Fetch-Site": "same-origin"
78
+ }
79
+
80
+ self.__available_optimizers = (
81
+ method
82
+ for method in dir(Optimizers)
83
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
+ )
85
+
86
+ Conversation.intro = (
87
+ AwesomePrompts().get_act(
88
+ act, raise_not_found=True, default=None, case_insensitive=True
89
+ )
90
+ if act
91
+ else intro or Conversation.intro
92
+ )
93
+
94
+ self.conversation = Conversation(
95
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
96
+ )
97
+ self.conversation.history_offset = history_offset
98
+ self.session.proxies = proxies
99
+ # Store message history for conversation context
100
+ self.last_assistant_response = ""
101
+
102
+ def parse_sse(self, data):
103
+ """Parse Server-Sent Events data"""
104
+ if not data or not data.strip():
105
+ return None
106
+
107
+ # Check if it's an event line
108
+ if data.startswith('event:'):
109
+ return {'event': data[6:].strip()}
110
+
111
+ # Check if it's data
112
+ if data.startswith('data:'):
113
+ data_content = data[5:].strip()
114
+ if data_content:
115
+ try:
116
+ return {'data': json.loads(data_content)}
117
+ except json.JSONDecodeError:
118
+ return {'data': data_content}
119
+
120
+ return None
121
+
122
+ def ask(
123
+ self,
124
+ prompt: str,
125
+ stream: bool = True, # Default to stream as the API uses SSE
126
+ raw: bool = False,
127
+ optimizer: str = None,
128
+ conversationally: bool = False,
129
+ web_search: bool = False,
130
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
131
+ """Chat with LLMChat.co with streaming capabilities"""
132
+
133
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
134
+ if optimizer:
135
+ if optimizer in self.__available_optimizers:
136
+ conversation_prompt = getattr(Optimizers, optimizer)(
137
+ conversation_prompt if conversationally else prompt
138
+ )
139
+ else:
140
+ raise exceptions.FailedToGenerateResponseError(
141
+ f"Optimizer is not one of {self.__available_optimizers}"
142
+ )
143
+
144
+
145
+ # Generate a unique ID for this message
146
+ thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
147
+ messages = [
148
+ {"role": "system", "content": self.system_prompt},
149
+ {"role": "user", "content": prompt},
150
+ ]
151
+ # Prepare payload for the API request based on observed request format
152
+ payload = {
153
+ "mode": self.model,
154
+ "prompt": prompt,
155
+ "threadId": self.thread_id,
156
+ "messages": messages,
157
+ "mcpConfig": {},
158
+ "threadItemId": thread_item_id,
159
+ "parentThreadItemId": "",
160
+ "webSearch": web_search,
161
+ "showSuggestions": True
162
+ }
163
+
164
+ def for_stream():
165
+ try:
166
+ # Set up the streaming request
167
+ response = self.session.post(
168
+ self.api_endpoint,
169
+ json=payload,
170
+ headers=self.headers,
171
+ stream=True,
172
+ timeout=self.timeout
173
+ )
174
+ response.raise_for_status()
175
+
176
+ # Process the SSE stream
177
+ full_response = ""
178
+ current_event = None
179
+ buffer = ""
180
+
181
+ # Use a raw read approach to handle SSE
182
+ for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
183
+ if not chunk:
184
+ continue
185
+
186
+ # Decode the chunk and add to buffer
187
+ buffer += chunk.decode('utf-8')
188
+
189
+ # Process complete lines in the buffer
190
+ while '\n' in buffer:
191
+ line, buffer = buffer.split('\n', 1)
192
+ line = line.strip()
193
+
194
+ if not line:
195
+ continue
196
+
197
+ if line.startswith('event:'):
198
+ current_event = line[6:].strip()
199
+ elif line.startswith('data:'):
200
+ data_content = line[5:].strip()
201
+ if data_content and current_event == 'answer':
202
+ try:
203
+ json_data = json.loads(data_content)
204
+ if "answer" in json_data and "text" in json_data["answer"]:
205
+ text_chunk = json_data["answer"]["text"]
206
+ # If there's a fullText, use it as it's more complete
207
+ if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
208
+ text_chunk = json_data["answer"]["fullText"]
209
+
210
+ # Extract only new content since last chunk
211
+ new_text = text_chunk[len(full_response):]
212
+ if new_text:
213
+ full_response = text_chunk
214
+ yield new_text if raw else dict(text=new_text)
215
+ except json.JSONDecodeError:
216
+ continue
217
+ elif data_content and current_event == 'done':
218
+ break
219
+
220
+ self.last_response.update(dict(text=full_response))
221
+ self.last_assistant_response = full_response
222
+ self.conversation.update_chat_history(
223
+ prompt, self.get_message(self.last_response)
224
+ )
225
+
226
+ except requests.exceptions.RequestException as e:
227
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
228
+ except Exception as e:
229
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}")
230
+
231
+ def for_non_stream():
232
+ full_response = ""
233
+ try:
234
+ for chunk in for_stream():
235
+ if not raw:
236
+ full_response += chunk.get('text', '')
237
+ else:
238
+ full_response += chunk
239
+ except Exception as e:
240
+ if not full_response:
241
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
242
+
243
+ return dict(text=full_response)
244
+
245
+ return for_stream() if stream else for_non_stream()
246
+
247
+ def chat(
248
+ self,
249
+ prompt: str,
250
+ stream: bool = False,
251
+ optimizer: str = None,
252
+ conversationally: bool = False,
253
+ web_search: bool = False,
254
+ ) -> Union[str, Generator[str, None, None]]:
255
+ """Generate response with streaming capabilities"""
256
+
257
+ def for_stream():
258
+ for response in self.ask(
259
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
260
+ web_search=web_search
261
+ ):
262
+ yield self.get_message(response)
263
+
264
+ def for_non_stream():
265
+ return self.get_message(
266
+ self.ask(
267
+ prompt,
268
+ False,
269
+ optimizer=optimizer,
270
+ conversationally=conversationally,
271
+ web_search=web_search
272
+ )
273
+ )
274
+
275
+ return for_stream() if stream else for_non_stream()
276
+
277
+ def get_message(self, response: Dict[str, Any]) -> str:
278
+ """Retrieves message from response with validation"""
279
+ assert isinstance(response, dict), "Response should be of dict data-type only"
280
+ return response["text"]
281
+
282
+ if __name__ == "__main__":
283
+ print("-" * 80)
284
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
285
+ print("-" * 80)
286
+
287
+ # Test all available models
288
+ working = 0
289
+ total = len(LLMChatCo.AVAILABLE_MODELS)
290
+
291
+ for model in LLMChatCo.AVAILABLE_MODELS:
292
+ try:
293
+ test_ai = LLMChatCo(model=model, timeout=60)
294
+ response = test_ai.chat("Say 'Hello' in one word")
295
+ response_text = response
296
+
297
+ if response_text and len(response_text.strip()) > 0:
298
+ status = "✓"
299
+ # Truncate response if too long
300
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
301
+ else:
302
+ status = "✗"
303
+ display_text = "Empty or invalid response"
304
+ print(f"{model:<50} {status:<10} {display_text}")
305
+ except Exception as e:
306
+ print(f"{model:<50} {'✗':<10} {str(e)}")