webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,187 @@
1
+ import requests
2
+ import json
3
+ import secrets
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class SCNet(Provider):
11
+ """
12
+ Provider for SCNet chatbot API.
13
+ """
14
+ AVAILABLE_MODELS = [
15
+ {"modelId": 2, "name": "Deepseek-r1-7B"},
16
+ {"modelId": 3, "name": "Deepseek-r1-32B"},
17
+ {"modelId": 5, "name": "Deepseek-r1-70B"},
18
+ {"modelId": 7, "name": "QWQ-32B"},
19
+ {"modelId": 8, "name": "minimax-text-01-456B"},
20
+ # Add more models here as needed
21
+ ]
22
+ MODEL_NAME_TO_ID = {m["name"]: m["modelId"] for m in AVAILABLE_MODELS}
23
+ MODEL_ID_TO_NAME = {m["modelId"]: m["name"] for m in AVAILABLE_MODELS}
24
+
25
+ def __init__(
26
+ self,
27
+ model: str = "QWQ-32B",
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2048,
30
+ timeout: int = 30,
31
+ intro: Optional[str] = None,
32
+ filepath: Optional[str] = None,
33
+ update_file: bool = True,
34
+ proxies: Optional[dict] = None,
35
+ history_offset: int = 0,
36
+ act: Optional[str] = None,
37
+ system_prompt: str = (
38
+ "You are a helpful, advanced LLM assistant. "
39
+ "You must always answer in English, regardless of the user's language. "
40
+ "If the user asks in another language, politely respond in English only. "
41
+ "Be clear, concise, and helpful."
42
+ ),
43
+ ):
44
+ if model not in self.MODEL_NAME_TO_ID:
45
+ raise ValueError(f"Invalid model: {model}. Choose from: {list(self.MODEL_NAME_TO_ID.keys())}")
46
+ self.model = model
47
+ self.modelId = self.MODEL_NAME_TO_ID[model]
48
+ self.system_prompt = system_prompt
49
+ self.session = requests.Session()
50
+ self.is_conversation = is_conversation
51
+ self.max_tokens_to_sample = max_tokens
52
+ self.timeout = timeout
53
+ self.last_response: Dict[str, Any] = {}
54
+ self.proxies = proxies or {}
55
+ self.cookies = {
56
+ "Token": secrets.token_hex(16),
57
+ }
58
+ self.headers = {
59
+ "accept": "text/event-stream",
60
+ "content-type": "application/json",
61
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
62
+ "referer": "https://www.scnet.cn/ui/chatbot/temp_1744712663464",
63
+ "origin": "https://www.scnet.cn",
64
+ }
65
+ self.url = "https://www.scnet.cn/acx/chatbot/v1/chat/completion"
66
+ self.__available_optimizers = (
67
+ method for method in dir(Optimizers)
68
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
69
+ )
70
+ Conversation.intro = (
71
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
76
+ self.conversation.history_offset = history_offset
77
+
78
+ def ask(
79
+ self,
80
+ prompt: str,
81
+ stream: bool = False,
82
+ raw: bool = False,
83
+ optimizer: Optional[str] = None,
84
+ conversationally: bool = False,
85
+ ) -> Union[Dict[str, Any], Generator]:
86
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
87
+ if optimizer:
88
+ if optimizer in self.__available_optimizers:
89
+ conversation_prompt = getattr(Optimizers, optimizer)(
90
+ conversation_prompt if conversationally else prompt
91
+ )
92
+ else:
93
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {list(self.__available_optimizers)}")
94
+
95
+ payload = {
96
+ "conversationId": "",
97
+ "content": f"SYSTEM: {self.system_prompt} USER: {conversation_prompt}",
98
+ "thinking": 0,
99
+ "online": 0,
100
+ "modelId": self.modelId,
101
+ "textFile": [],
102
+ "imageFile": [],
103
+ "clusterId": ""
104
+ }
105
+
106
+ def for_stream():
107
+ try:
108
+ with self.session.post(
109
+ self.url,
110
+ headers=self.headers,
111
+ cookies=self.cookies,
112
+ json=payload,
113
+ stream=True,
114
+ timeout=self.timeout,
115
+ proxies=self.proxies
116
+ ) as resp:
117
+ streaming_text = ""
118
+ for line in resp.iter_lines(decode_unicode=True):
119
+ if line and line.startswith("data:"):
120
+ data = line[5:].strip()
121
+ if data and data != "[done]":
122
+ try:
123
+ obj = json.loads(data)
124
+ content = obj.get("content", "")
125
+ streaming_text += content
126
+ yield {"text": content} if raw else {"text": content}
127
+ except Exception:
128
+ continue
129
+ elif data == "[done]":
130
+ break
131
+ self.last_response = {"text": streaming_text}
132
+ self.conversation.update_chat_history(prompt, streaming_text)
133
+ except Exception as e:
134
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
135
+
136
+ def for_non_stream():
137
+ text = ""
138
+ for chunk in for_stream():
139
+ text += chunk["text"]
140
+ return {"text": text}
141
+
142
+ return for_stream() if stream else for_non_stream()
143
+
144
+ def chat(
145
+ self,
146
+ prompt: str,
147
+ stream: bool = False,
148
+ optimizer: Optional[str] = None,
149
+ conversationally: bool = False,
150
+ ) -> Union[str, Generator[str, None, None]]:
151
+ def for_stream():
152
+ for response in self.ask(
153
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
154
+ ):
155
+ yield self.get_message(response)
156
+ def for_non_stream():
157
+ return self.get_message(
158
+ self.ask(
159
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
160
+ )
161
+ )
162
+ return for_stream() if stream else for_non_stream()
163
+
164
+ def get_message(self, response: dict) -> str:
165
+ assert isinstance(response, dict), "Response should be of dict data-type only"
166
+ return response["text"]
167
+
168
+ if __name__ == "__main__":
169
+ print("-" * 80)
170
+ print(f"{'ModelId':<10} {'Model':<30} {'Status':<10} {'Response'}")
171
+ print("-" * 80)
172
+ for model in SCNet.AVAILABLE_MODELS:
173
+ try:
174
+ test_ai = SCNet(model=model["name"], timeout=60)
175
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
176
+ response_text = ""
177
+ for chunk in response:
178
+ response_text += chunk
179
+ if response_text and len(response_text.strip()) > 0:
180
+ status = "✓"
181
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
182
+ else:
183
+ status = "✗"
184
+ display_text = "Empty or invalid response"
185
+ print(f"{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
186
+ except Exception as e:
187
+ print(f"{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
@@ -0,0 +1,320 @@
1
+ import re
2
+ import requests
3
+ import uuid
4
+ import base64
5
+ import json
6
+ import random
7
+ import string
8
+ import time
9
+ from datetime import datetime
10
+ from typing import Any, Dict, Optional, Generator, Union, List
11
+
12
+ from webscout.AIutel import Optimizers
13
+ from webscout.AIutel import Conversation
14
+ from webscout.AIutel import AwesomePrompts
15
+ from webscout.AIbase import Provider, AsyncProvider
16
+ from webscout import exceptions
17
+
18
+ class Toolbaz(Provider):
19
+ """
20
+ A class to interact with the Toolbaz API. Supports streaming responses.
21
+ """
22
+
23
+ AVAILABLE_MODELS = [
24
+ "gemini-2.0-flash-thinking",
25
+ "gemini-2.0-flash",
26
+ "gemini-1.5-flash",
27
+ "gpt-4o-latest",
28
+ "gpt-4o-mini",
29
+ "gpt-4o",
30
+ "deepseek-r1",
31
+ "Llama-3.3-70B",
32
+ "Llama-3.1-405B",
33
+ "Llama-3.1-70B",
34
+ "Qwen2.5-72B",
35
+ "Qwen2-72B",
36
+ "grok-2-1212",
37
+ "grok-beta",
38
+ "toolbaz_v3.5_pro",
39
+ "toolbaz_v3",
40
+ "mixtral_8x22b",
41
+ "L3-70B-Euryale-v2.1",
42
+ "midnight-rose",
43
+ "unity",
44
+ "unfiltered_x"
45
+ ]
46
+
47
+ def __init__(
48
+ self,
49
+ is_conversation: bool = True,
50
+ max_tokens: int = 600,
51
+ timeout: int = 30,
52
+ intro: str = None,
53
+ filepath: str = None,
54
+ update_file: bool = True,
55
+ proxies: dict = {},
56
+ history_offset: int = 10250,
57
+ act: str = None,
58
+ model: str = "gemini-2.0-flash",
59
+ system_prompt: str = "You are a helpful AI assistant."
60
+ ):
61
+ """
62
+ Initializes the Toolbaz API with given parameters.
63
+ """
64
+ if model not in self.AVAILABLE_MODELS:
65
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
66
+
67
+ self.session = requests.Session()
68
+ self.is_conversation = is_conversation
69
+ self.max_tokens_to_sample = max_tokens
70
+ self.timeout = timeout
71
+ self.last_response = {}
72
+ self.system_prompt = system_prompt
73
+ self.model = model
74
+ self.proxies = proxies
75
+
76
+ # Set up headers
77
+ self.session.headers.update({
78
+ "user-agent": "Mozilla/5.0 (Linux; Android 10)",
79
+ "accept": "*/*",
80
+ "accept-language": "en-US",
81
+ "cache-control": "no-cache",
82
+ "connection": "keep-alive",
83
+ "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
84
+ "origin": "https://toolbaz.com",
85
+ "pragma": "no-cache",
86
+ "referer": "https://toolbaz.com/",
87
+ "sec-fetch-mode": "cors"
88
+ })
89
+
90
+ # Initialize conversation history
91
+ self.__available_optimizers = (
92
+ method
93
+ for method in dir(Optimizers)
94
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
95
+ )
96
+
97
+ Conversation.intro = (
98
+ AwesomePrompts().get_act(
99
+ act, raise_not_found=True, default=None, case_insensitive=True
100
+ )
101
+ if act
102
+ else intro or Conversation.intro
103
+ )
104
+
105
+ self.conversation = Conversation(
106
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
107
+ )
108
+ self.conversation.history_offset = history_offset
109
+
110
+ def random_string(self, length):
111
+ return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
112
+
113
+ def generate_token(self):
114
+ payload = {
115
+ "bR6wF": {
116
+ "nV5kP": "Mozilla/5.0 (Linux; Android 10)",
117
+ "lQ9jX": "en-US",
118
+ "sD2zR": "431x958",
119
+ "tY4hL": time.tzname[0] if time.tzname else "UTC",
120
+ "pL8mC": "Linux armv81",
121
+ "cQ3vD": datetime.now().year,
122
+ "hK7jN": datetime.now().hour
123
+ },
124
+ "uT4bX": {
125
+ "mM9wZ": [],
126
+ "kP8jY": []
127
+ },
128
+ "tuTcS": int(time.time()),
129
+ "tDfxy": None,
130
+ "RtyJt": str(uuid.uuid4())
131
+ }
132
+ return "d8TW0v" + base64.b64encode(json.dumps(payload).encode()).decode()
133
+
134
+ def get_auth(self):
135
+ try:
136
+ session_id = self.random_string(36)
137
+ token = self.generate_token()
138
+ data = {
139
+ "session_id": session_id,
140
+ "token": token
141
+ }
142
+ resp = self.session.post("https://data.toolbaz.com/token.php", data=data)
143
+ resp.raise_for_status()
144
+ result = resp.json()
145
+ if result.get("success"):
146
+ return {"token": result["token"], "session_id": session_id}
147
+ return None
148
+ except Exception:
149
+ return None
150
+
151
+ def ask(
152
+ self,
153
+ prompt: str,
154
+ stream: bool = False,
155
+ raw: bool = False, # Kept for compatibility with other providers
156
+ optimizer: Optional[str] = None,
157
+ conversationally: bool = False,
158
+ ) -> Union[Dict[str, Any], Generator]:
159
+ """Sends a prompt to the Toolbaz API and returns the response."""
160
+ if optimizer and optimizer not in self.__available_optimizers:
161
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {self.__available_optimizers}")
162
+
163
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
164
+ if optimizer:
165
+ conversation_prompt = getattr(Optimizers, optimizer)(
166
+ conversation_prompt if conversationally else prompt
167
+ )
168
+
169
+ auth = self.get_auth()
170
+ if not auth:
171
+ raise exceptions.ProviderConnectionError("Failed to authenticate with Toolbaz API")
172
+
173
+ data = {
174
+ "text": conversation_prompt,
175
+ "capcha": auth["token"],
176
+ "model": self.model,
177
+ "session_id": auth["session_id"]
178
+ }
179
+
180
+ def for_stream():
181
+ try:
182
+ resp = self.session.post(
183
+ "https://data.toolbaz.com/writing.php",
184
+ data=data,
185
+ stream=True,
186
+ proxies=self.proxies,
187
+ timeout=self.timeout
188
+ )
189
+ resp.raise_for_status()
190
+
191
+ buffer = ""
192
+ tag_start = "[model:"
193
+ streaming_text = ""
194
+
195
+ for chunk in resp.iter_content(chunk_size=1):
196
+ if chunk:
197
+ text = chunk.decode(errors="ignore")
198
+ buffer += text
199
+ # Remove all complete [model: ...] tags in buffer
200
+ while True:
201
+ match = re.search(r"\[model:.*?\]", buffer)
202
+ if not match:
203
+ break
204
+ buffer = buffer[:match.start()] + buffer[match.end():]
205
+ # Only yield up to the last possible start of a tag
206
+ last_tag = buffer.rfind(tag_start)
207
+ if last_tag == -1 or last_tag + len(tag_start) > len(buffer):
208
+ if buffer:
209
+ streaming_text += buffer
210
+ yield {"text": buffer}
211
+ buffer = ""
212
+ else:
213
+ if buffer[:last_tag]:
214
+ streaming_text += buffer[:last_tag]
215
+ yield {"text": buffer[:last_tag]}
216
+ buffer = buffer[last_tag:]
217
+
218
+ # Remove any remaining [model: ...] tag in the buffer
219
+ buffer = re.sub(r"\[model:.*?\]", "", buffer)
220
+ if buffer:
221
+ streaming_text += buffer
222
+ yield {"text": buffer}
223
+
224
+ self.last_response = {"text": streaming_text}
225
+ self.conversation.update_chat_history(prompt, streaming_text)
226
+
227
+ except requests.exceptions.RequestException as e:
228
+ raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
229
+ except Exception as e:
230
+ raise exceptions.ProviderConnectionError(f"Unexpected error: {str(e)}") from e
231
+
232
+ def for_non_stream():
233
+ try:
234
+ resp = self.session.post(
235
+ "https://data.toolbaz.com/writing.php",
236
+ data=data,
237
+ proxies=self.proxies,
238
+ timeout=self.timeout
239
+ )
240
+ resp.raise_for_status()
241
+
242
+ text = resp.text
243
+ # Remove [model: ...] tags
244
+ text = re.sub(r"\[model:.*?\]", "", text)
245
+
246
+ self.last_response = {"text": text}
247
+ self.conversation.update_chat_history(prompt, text)
248
+
249
+ return self.last_response
250
+
251
+ except requests.exceptions.RequestException as e:
252
+ raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
253
+ except Exception as e:
254
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
255
+
256
+ return for_stream() if stream else for_non_stream()
257
+
258
+ def chat(
259
+ self,
260
+ prompt: str,
261
+ stream: bool = False,
262
+ optimizer: Optional[str] = None,
263
+ conversationally: bool = False,
264
+ ) -> Union[str, Generator[str, None, None]]:
265
+ """Generates a response from the Toolbaz API."""
266
+ def for_stream():
267
+ for response in self.ask(
268
+ prompt,
269
+ stream=True,
270
+ optimizer=optimizer,
271
+ conversationally=conversationally
272
+ ):
273
+ yield self.get_message(response)
274
+
275
+ def for_non_stream():
276
+ return self.get_message(
277
+ self.ask(
278
+ prompt,
279
+ stream=False,
280
+ optimizer=optimizer,
281
+ conversationally=conversationally,
282
+ )
283
+ )
284
+
285
+ return for_stream() if stream else for_non_stream()
286
+
287
+ def get_message(self, response: Dict[str, Any]) -> str:
288
+ """Extract the message from the response.
289
+
290
+ Args:
291
+ response: Response dictionary
292
+
293
+ Returns:
294
+ str: Message extracted
295
+ """
296
+ assert isinstance(response, dict), "Response should be of dict data-type only"
297
+ return response.get("text", "")
298
+
299
+ # Example usage
300
+ if __name__ == "__main__":
301
+ # Test the provider with different models
302
+ for model in Toolbaz.AVAILABLE_MODELS:
303
+ try:
304
+ test_ai = Toolbaz(model=model, timeout=60)
305
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
306
+ response_text = ""
307
+ for chunk in response:
308
+ response_text += chunk
309
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
310
+
311
+ if response_text and len(response_text.strip()) > 0:
312
+ status = "✓"
313
+ # Truncate response if too long
314
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
315
+ else:
316
+ status = "✗"
317
+ display_text = "Empty or invalid response"
318
+ print(f"\r{model:<50} {status:<10} {display_text}")
319
+ except Exception as e:
320
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")