webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,306 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ import re
5
+ from typing import Union, Any, Dict, Optional, Generator, List
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent as Lit
13
+
14
+ class LLMChatCo(Provider):
15
+ """
16
+ A class to interact with the LLMChat.co API
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "gemini-flash-2.0", # Default model
21
+ "llama-4-scout",
22
+ "gpt-4o-mini",
23
+ # "o3-mini",
24
+ # "claude-3-5-sonnet",
25
+ # "deepseek-r1",
26
+ # "claude-3-7-sonnet",
27
+ # "deep", # deep research mode
28
+ # "pro" # pro research mode
29
+
30
+ ]
31
+
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 2048,
36
+ timeout: int = 60,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ model: str = "gemini-flash-2.0",
44
+ system_prompt: str = "You are a helpful assistant."
45
+ ):
46
+ """
47
+ Initializes the LLMChat.co API with given parameters.
48
+ """
49
+
50
+ if model not in self.AVAILABLE_MODELS:
51
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
52
+
53
+ self.session = requests.Session()
54
+ self.is_conversation = is_conversation
55
+ self.max_tokens_to_sample = max_tokens
56
+ self.api_endpoint = "https://llmchat.co/api/completion"
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.system_prompt = system_prompt
61
+ self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
62
+
63
+ # Create LitAgent instance for user agent generation
64
+ lit_agent = Lit()
65
+
66
+ # Headers based on the provided request
67
+ self.headers = {
68
+ "Content-Type": "application/json",
69
+ "Accept": "text/event-stream",
70
+ "User-Agent": lit_agent.random(),
71
+ "Accept-Language": "en-US,en;q=0.9",
72
+ "Origin": "https://llmchat.co",
73
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}",
74
+ "DNT": "1",
75
+ "Sec-Fetch-Dest": "empty",
76
+ "Sec-Fetch-Mode": "cors",
77
+ "Sec-Fetch-Site": "same-origin"
78
+ }
79
+
80
+ self.__available_optimizers = (
81
+ method
82
+ for method in dir(Optimizers)
83
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
+ )
85
+
86
+ Conversation.intro = (
87
+ AwesomePrompts().get_act(
88
+ act, raise_not_found=True, default=None, case_insensitive=True
89
+ )
90
+ if act
91
+ else intro or Conversation.intro
92
+ )
93
+
94
+ self.conversation = Conversation(
95
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
96
+ )
97
+ self.conversation.history_offset = history_offset
98
+ self.session.proxies = proxies
99
+ # Store message history for conversation context
100
+ self.last_assistant_response = ""
101
+
102
+ def parse_sse(self, data):
103
+ """Parse Server-Sent Events data"""
104
+ if not data or not data.strip():
105
+ return None
106
+
107
+ # Check if it's an event line
108
+ if data.startswith('event:'):
109
+ return {'event': data[6:].strip()}
110
+
111
+ # Check if it's data
112
+ if data.startswith('data:'):
113
+ data_content = data[5:].strip()
114
+ if data_content:
115
+ try:
116
+ return {'data': json.loads(data_content)}
117
+ except json.JSONDecodeError:
118
+ return {'data': data_content}
119
+
120
+ return None
121
+
122
+ def ask(
123
+ self,
124
+ prompt: str,
125
+ stream: bool = True, # Default to stream as the API uses SSE
126
+ raw: bool = False,
127
+ optimizer: str = None,
128
+ conversationally: bool = False,
129
+ web_search: bool = False,
130
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
131
+ """Chat with LLMChat.co with streaming capabilities"""
132
+
133
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
134
+ if optimizer:
135
+ if optimizer in self.__available_optimizers:
136
+ conversation_prompt = getattr(Optimizers, optimizer)(
137
+ conversation_prompt if conversationally else prompt
138
+ )
139
+ else:
140
+ raise exceptions.FailedToGenerateResponseError(
141
+ f"Optimizer is not one of {self.__available_optimizers}"
142
+ )
143
+
144
+
145
+ # Generate a unique ID for this message
146
+ thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
147
+ messages = [
148
+ {"role": "system", "content": self.system_prompt},
149
+ {"role": "user", "content": prompt},
150
+ ]
151
+ # Prepare payload for the API request based on observed request format
152
+ payload = {
153
+ "mode": self.model,
154
+ "prompt": prompt,
155
+ "threadId": self.thread_id,
156
+ "messages": messages,
157
+ "mcpConfig": {},
158
+ "threadItemId": thread_item_id,
159
+ "parentThreadItemId": "",
160
+ "webSearch": web_search,
161
+ "showSuggestions": True
162
+ }
163
+
164
+ def for_stream():
165
+ try:
166
+ # Set up the streaming request
167
+ response = self.session.post(
168
+ self.api_endpoint,
169
+ json=payload,
170
+ headers=self.headers,
171
+ stream=True,
172
+ timeout=self.timeout
173
+ )
174
+ response.raise_for_status()
175
+
176
+ # Process the SSE stream
177
+ full_response = ""
178
+ current_event = None
179
+ buffer = ""
180
+
181
+ # Use a raw read approach to handle SSE
182
+ for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
183
+ if not chunk:
184
+ continue
185
+
186
+ # Decode the chunk and add to buffer
187
+ buffer += chunk.decode('utf-8')
188
+
189
+ # Process complete lines in the buffer
190
+ while '\n' in buffer:
191
+ line, buffer = buffer.split('\n', 1)
192
+ line = line.strip()
193
+
194
+ if not line:
195
+ continue
196
+
197
+ if line.startswith('event:'):
198
+ current_event = line[6:].strip()
199
+ elif line.startswith('data:'):
200
+ data_content = line[5:].strip()
201
+ if data_content and current_event == 'answer':
202
+ try:
203
+ json_data = json.loads(data_content)
204
+ if "answer" in json_data and "text" in json_data["answer"]:
205
+ text_chunk = json_data["answer"]["text"]
206
+ # If there's a fullText, use it as it's more complete
207
+ if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
208
+ text_chunk = json_data["answer"]["fullText"]
209
+
210
+ # Extract only new content since last chunk
211
+ new_text = text_chunk[len(full_response):]
212
+ if new_text:
213
+ full_response = text_chunk
214
+ yield new_text if raw else dict(text=new_text)
215
+ except json.JSONDecodeError:
216
+ continue
217
+ elif data_content and current_event == 'done':
218
+ break
219
+
220
+ self.last_response.update(dict(text=full_response))
221
+ self.last_assistant_response = full_response
222
+ self.conversation.update_chat_history(
223
+ prompt, self.get_message(self.last_response)
224
+ )
225
+
226
+ except requests.exceptions.RequestException as e:
227
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
228
+ except Exception as e:
229
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}")
230
+
231
+ def for_non_stream():
232
+ full_response = ""
233
+ try:
234
+ for chunk in for_stream():
235
+ if not raw:
236
+ full_response += chunk.get('text', '')
237
+ else:
238
+ full_response += chunk
239
+ except Exception as e:
240
+ if not full_response:
241
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
242
+
243
+ return dict(text=full_response)
244
+
245
+ return for_stream() if stream else for_non_stream()
246
+
247
+ def chat(
248
+ self,
249
+ prompt: str,
250
+ stream: bool = False,
251
+ optimizer: str = None,
252
+ conversationally: bool = False,
253
+ web_search: bool = False,
254
+ ) -> Union[str, Generator[str, None, None]]:
255
+ """Generate response with streaming capabilities"""
256
+
257
+ def for_stream():
258
+ for response in self.ask(
259
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
260
+ web_search=web_search
261
+ ):
262
+ yield self.get_message(response)
263
+
264
+ def for_non_stream():
265
+ return self.get_message(
266
+ self.ask(
267
+ prompt,
268
+ False,
269
+ optimizer=optimizer,
270
+ conversationally=conversationally,
271
+ web_search=web_search
272
+ )
273
+ )
274
+
275
+ return for_stream() if stream else for_non_stream()
276
+
277
+ def get_message(self, response: Dict[str, Any]) -> str:
278
+ """Retrieves message from response with validation"""
279
+ assert isinstance(response, dict), "Response should be of dict data-type only"
280
+ return response["text"]
281
+
282
+ if __name__ == "__main__":
283
+ print("-" * 80)
284
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
285
+ print("-" * 80)
286
+
287
+ # Test all available models
288
+ working = 0
289
+ total = len(LLMChatCo.AVAILABLE_MODELS)
290
+
291
+ for model in LLMChatCo.AVAILABLE_MODELS:
292
+ try:
293
+ test_ai = LLMChatCo(model=model, timeout=60)
294
+ response = test_ai.chat("Say 'Hello' in one word")
295
+ response_text = response
296
+
297
+ if response_text and len(response_text.strip()) > 0:
298
+ status = "✓"
299
+ # Truncate response if too long
300
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
301
+ else:
302
+ status = "✗"
303
+ display_text = "Empty or invalid response"
304
+ print(f"{model:<50} {status:<10} {display_text}")
305
+ except Exception as e:
306
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,274 @@
1
+ from os import system
2
+ import requests
3
+ import json
4
+ import uuid
5
+ import re
6
+ from typing import Any, Dict, Optional, Union
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class SciraAI(Provider):
15
+ """
16
+ A class to interact with the Scira AI chat API.
17
+ """
18
+
19
+ AVAILABLE_MODELS = {
20
+ "scira-default": "Grok3",
21
+ "scira-grok-3-mini": "Grok3-mini", # thinking model
22
+ "scira-vision" : "Grok2-Vision", # vision model
23
+ "scira-claude": "Sonnet-3.7",
24
+ "scira-optimus": "optimus",
25
+
26
+ }
27
+
28
+ def __init__(
29
+ self,
30
+ is_conversation: bool = True,
31
+ max_tokens: int = 2049,
32
+ timeout: int = 30,
33
+ intro: str = None,
34
+ filepath: str = None,
35
+ update_file: bool = True,
36
+ proxies: dict = {},
37
+ history_offset: int = 10250,
38
+ act: str = None,
39
+ model: str = "scira-default",
40
+ chat_id: str = None,
41
+ user_id: str = None,
42
+ browser: str = "chrome",
43
+ system_prompt: str = "You are a helpful assistant.",
44
+ ):
45
+ """Initializes the Scira AI API client.
46
+
47
+ Args:
48
+ is_conversation (bool): Whether to maintain conversation history.
49
+ max_tokens (int): Maximum number of tokens to generate.
50
+ timeout (int): Request timeout in seconds.
51
+ intro (str): Introduction text for the conversation.
52
+ filepath (str): Path to save conversation history.
53
+ update_file (bool): Whether to update the conversation history file.
54
+ proxies (dict): Proxy configuration for requests.
55
+ history_offset (int): Maximum history length in characters.
56
+ act (str): Persona for the AI to adopt.
57
+ model (str): Model to use, must be one of AVAILABLE_MODELS.
58
+ chat_id (str): Unique identifier for the chat session.
59
+ user_id (str): Unique identifier for the user.
60
+ browser (str): Browser to emulate in requests.
61
+ system_prompt (str): System prompt for the AI.
62
+
63
+ """
64
+ if model not in self.AVAILABLE_MODELS:
65
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
66
+
67
+ self.url = "https://scira.ai/api/search"
68
+
69
+ # Initialize LitAgent for user agent generation
70
+ self.agent = LitAgent()
71
+ # Use fingerprinting to create a consistent browser identity
72
+ self.fingerprint = self.agent.generate_fingerprint(browser)
73
+ self.system_prompt = system_prompt
74
+
75
+ # Use the fingerprint for headers
76
+ self.headers = {
77
+ "Accept": self.fingerprint["accept"],
78
+ "Accept-Encoding": "gzip, deflate, br, zstd",
79
+ "Accept-Language": self.fingerprint["accept_language"],
80
+ "Content-Type": "application/json",
81
+ "Origin": "https://scira.ai",
82
+ "Referer": "https://scira.ai/",
83
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
84
+ "Sec-CH-UA-Mobile": "?0",
85
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
86
+ "User-Agent": self.fingerprint["user_agent"],
87
+ "Sec-Fetch-Dest": "empty",
88
+ "Sec-Fetch-Mode": "cors",
89
+ "Sec-Fetch-Site": "same-origin"
90
+ }
91
+
92
+ self.session = requests.Session()
93
+ self.session.headers.update(self.headers)
94
+ self.session.proxies.update(proxies)
95
+
96
+ self.is_conversation = is_conversation
97
+ self.max_tokens_to_sample = max_tokens
98
+ self.timeout = timeout
99
+ self.last_response = {}
100
+ self.model = model
101
+ self.chat_id = chat_id or str(uuid.uuid4())
102
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
103
+
104
+ # Always use chat mode (no web search)
105
+ self.search_mode = "chat"
106
+
107
+ self.__available_optimizers = (
108
+ method
109
+ for method in dir(Optimizers)
110
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
111
+ )
112
+ Conversation.intro = (
113
+ AwesomePrompts().get_act(
114
+ act, raise_not_found=True, default=None, case_insensitive=True
115
+ )
116
+ if act
117
+ else intro or Conversation.intro
118
+ )
119
+
120
+ self.conversation = Conversation(
121
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
122
+ )
123
+ self.conversation.history_offset = history_offset
124
+
125
+ def refresh_identity(self, browser: str = None):
126
+ """
127
+ Refreshes the browser identity fingerprint.
128
+
129
+ Args:
130
+ browser: Specific browser to use for the new fingerprint
131
+ """
132
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
133
+ self.fingerprint = self.agent.generate_fingerprint(browser)
134
+
135
+ # Update headers with new fingerprint
136
+ self.headers.update({
137
+ "Accept": self.fingerprint["accept"],
138
+ "Accept-Language": self.fingerprint["accept_language"],
139
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
140
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
141
+ "User-Agent": self.fingerprint["user_agent"],
142
+ })
143
+
144
+ # Update session headers
145
+ for header, value in self.headers.items():
146
+ self.session.headers[header] = value
147
+
148
+ return self.fingerprint
149
+
150
+ def ask(
151
+ self,
152
+ prompt: str,
153
+ optimizer: str = None,
154
+ conversationally: bool = False,
155
+ ) -> Dict[str, Any]:
156
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
157
+ if optimizer:
158
+ if optimizer in self.__available_optimizers:
159
+ conversation_prompt = getattr(Optimizers, optimizer)(
160
+ conversation_prompt if conversationally else prompt
161
+ )
162
+ else:
163
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
164
+
165
+ messages = [
166
+ {"role": "system", "content": self.system_prompt},
167
+ {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
168
+ ]
169
+
170
+ # Prepare the request payload
171
+ payload = {
172
+ "id": self.chat_id,
173
+ "messages": messages,
174
+ "model": self.model,
175
+ "group": self.search_mode,
176
+ "user_id": self.user_id,
177
+ "timezone": "Asia/Calcutta"
178
+ }
179
+
180
+ try:
181
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
182
+ if response.status_code != 200:
183
+ # Try to get response content for better error messages
184
+ try:
185
+ error_content = response.text
186
+ except:
187
+ error_content = "<could not read response content>"
188
+
189
+ if response.status_code in [403, 429]:
190
+ print(f"Received status code {response.status_code}, refreshing identity...")
191
+ self.refresh_identity()
192
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
193
+ if not response.ok:
194
+ raise exceptions.FailedToGenerateResponseError(
195
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
196
+ )
197
+ print("Identity refreshed successfully.")
198
+ else:
199
+ raise exceptions.FailedToGenerateResponseError(
200
+ f"Request failed with status code {response.status_code}. Response: {error_content}"
201
+ )
202
+
203
+ full_response = ""
204
+ debug_lines = []
205
+
206
+ # Collect the first few lines for debugging
207
+ for i, line in enumerate(response.iter_lines()):
208
+ if line:
209
+ try:
210
+ line_str = line.decode('utf-8')
211
+ debug_lines.append(line_str)
212
+
213
+ # Format 2: 0:"content" (quoted format)
214
+ match = re.search(r'0:"(.*?)"', line_str)
215
+ if match:
216
+ content = match.group(1)
217
+ full_response += content
218
+ continue
219
+
220
+
221
+ except: pass
222
+ self.last_response = {"text": full_response}
223
+ self.conversation.update_chat_history(prompt, full_response)
224
+ return {"text": full_response}
225
+ except Exception as e:
226
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
227
+
228
+ def chat(
229
+ self,
230
+ prompt: str,
231
+ optimizer: str = None,
232
+ conversationally: bool = False,
233
+ ) -> str:
234
+ return self.get_message(
235
+ self.ask(
236
+ prompt, optimizer=optimizer, conversationally=conversationally
237
+ )
238
+ )
239
+
240
+ def get_message(self, response: dict) -> str:
241
+ assert isinstance(response, dict), "Response should be of dict data-type only"
242
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
243
+
244
+ if __name__ == "__main__":
245
+ print("-" * 100)
246
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
247
+ print("-" * 100)
248
+
249
+ test_prompt = "Say 'Hello' in one word"
250
+
251
+ # Test each model
252
+ for model in SciraAI.AVAILABLE_MODELS:
253
+ print(f"\rTesting {model}...", end="")
254
+
255
+ try:
256
+ test_ai = SciraAI(model=model, timeout=120) # Increased timeout
257
+ response = test_ai.chat(test_prompt)
258
+
259
+ if response and len(response.strip()) > 0:
260
+ status = "✓"
261
+ # Clean and truncate response
262
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
263
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
264
+ else:
265
+ status = "✗"
266
+ display_text = "Empty or invalid response"
267
+
268
+ print(f"\r{model:<50} {status:<10} {display_text}")
269
+ except Exception as e:
270
+ error_msg = str(e)
271
+ # Truncate very long error messages
272
+ if len(error_msg) > 100:
273
+ error_msg = error_msg[:97] + "..."
274
+ print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")