webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,440 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import os
5
- from uuid import uuid4
6
- from typing import Any, Dict, Optional, Generator, Union
7
-
8
- from webscout.AIutel import Optimizers
9
- from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts, sanitize_stream
11
- from webscout.AIbase import Provider, AsyncProvider
12
- from webscout import exceptions
13
- from webscout.litagent import LitAgent
14
-
15
- class AllenAI(Provider):
16
- """
17
- A class to interact with the AllenAI (Ai2 Playground) API.
18
- """
19
-
20
- AVAILABLE_MODELS = [
21
- 'olmo-2-0325-32b-instruct',
22
- 'tulu3-405b'
23
- ]
24
-
25
- # Default model options from JS implementation
26
- DEFAULT_OPTIONS = {
27
- "max_tokens": 2048,
28
- "temperature": 0.7,
29
- "top_p": 1,
30
- "n": 1,
31
- "stop": None,
32
- "logprobs": None
33
- }
34
-
35
- # Host mapping for models - some models work best with specific hosts
36
- MODEL_HOST_MAP = {
37
- 'olmo-2-0325-32b-instruct': 'modal',
38
- 'tulu3-405b': 'inferd'
39
- }
40
-
41
- def __init__(
42
- self,
43
- is_conversation: bool = True,
44
- max_tokens: int = 2048,
45
- timeout: int = 30,
46
- intro: str = None,
47
- filepath: str = None,
48
- update_file: bool = True,
49
- proxies: dict = {},
50
- history_offset: int = 10250,
51
- act: str = None,
52
- model: str = "OLMo-2-1124-13B-Instruct",
53
- host: str = None
54
- ):
55
- """Initializes the AllenAI API client."""
56
- if model not in self.AVAILABLE_MODELS:
57
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
-
59
- self.url = "https://playground.allenai.org"
60
- self.api_endpoint = "https://olmo-api.allen.ai/v3/message/stream"
61
- self.whoami_endpoint = "https://olmo-api.allen.ai/v3/whoami"
62
-
63
- # Updated headers (remove those handled by impersonate)
64
- self.headers = {
65
- 'Accept': '*/*',
66
- 'Accept-Language': 'id-ID,id;q=0.9',
67
- 'Origin': self.url,
68
- 'Referer': f"{self.url}/",
69
- 'Cache-Control': 'no-cache',
70
- 'Pragma': 'no-cache',
71
- 'Priority': 'u=1, i',
72
- 'Sec-Fetch-Dest': 'empty',
73
- 'Sec-Fetch-Mode': 'cors',
74
- 'Sec-Fetch-Site': 'cross-site',
75
- 'Content-Type': 'application/json'
76
- }
77
-
78
- # Initialize curl_cffi Session
79
- self.session = Session()
80
- # Update curl_cffi session headers and proxies
81
- self.session.headers.update(self.headers)
82
- self.session.proxies = proxies
83
-
84
- self.model = model
85
-
86
- # Auto-detect host if not provided
87
- if not host:
88
- # Use the preferred host from the model-host map, or default to modal
89
- self.host = self.MODEL_HOST_MAP.get(model, 'modal')
90
- else:
91
- self.host = host
92
-
93
- self.is_conversation = is_conversation
94
- self.max_tokens_to_sample = max_tokens
95
- self.timeout = timeout
96
- self.last_response = {}
97
- # Generate user ID if needed
98
- self.x_anonymous_user_id = None
99
- self.parent = None
100
-
101
- # Default options
102
- self.options = self.DEFAULT_OPTIONS.copy()
103
- self.options["max_tokens"] = max_tokens
104
-
105
- self.__available_optimizers = (
106
- method
107
- for method in dir(Optimizers)
108
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
109
- )
110
- Conversation.intro = (
111
- AwesomePrompts().get_act(
112
- act, raise_not_found=True, default=None, case_insensitive=True
113
- )
114
- if act
115
- else intro or Conversation.intro
116
- )
117
-
118
- self.conversation = Conversation(
119
- is_conversation, self.max_tokens_to_sample, filepath, update_file
120
- )
121
- self.conversation.history_offset = history_offset
122
-
123
- def whoami(self):
124
- """Gets or creates a user ID for authentication with Allen AI API"""
125
- temp_id = str(uuid4())
126
- request_headers = self.session.headers.copy() # Use session headers as base
127
- request_headers.update({"x-anonymous-user-id": temp_id})
128
-
129
- try:
130
- # Use curl_cffi session get with impersonate
131
- response = self.session.get(
132
- self.whoami_endpoint,
133
- headers=request_headers, # Pass updated headers
134
- timeout=self.timeout,
135
- impersonate="chrome110" # Use a common impersonation profile
136
- )
137
- response.raise_for_status() # Check for HTTP errors
138
-
139
- data = response.json()
140
- self.x_anonymous_user_id = data.get("client", temp_id)
141
- return data
142
-
143
- except CurlError as e: # Catch CurlError
144
- self.x_anonymous_user_id = temp_id
145
- return {"client": temp_id, "error": f"CurlError: {e}"}
146
- except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
147
- self.x_anonymous_user_id = temp_id
148
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
149
- return {"client": temp_id, "error": f"{type(e).__name__}: {e} - {err_text}"}
150
-
151
- @staticmethod
152
- def _allenai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
153
- """Extracts content from AllenAI stream JSON objects."""
154
- if isinstance(chunk, dict):
155
- if chunk.get("message", "").startswith("msg_") and "content" in chunk:
156
- return chunk.get("content")
157
- elif "message" in chunk and chunk.get("content"): # Legacy handling
158
- return chunk.get("content")
159
- return None
160
-
161
- def ask(
162
- self,
163
- prompt: str,
164
- stream: bool = False, # API supports streaming
165
- raw: bool = False,
166
- optimizer: str = None,
167
- conversationally: bool = False,
168
- host: str = None,
169
- private: bool = False,
170
- top_p: float = None,
171
- temperature: float = None,
172
- options: dict = None,
173
- ) -> Union[Dict[str, Any], Generator]:
174
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
175
- if optimizer:
176
- if optimizer in self.__available_optimizers:
177
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
178
- else:
179
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
180
-
181
- # Ensure we have a user ID
182
- if not self.x_anonymous_user_id:
183
- self.whoami()
184
- # Check if whoami failed and we still don't have an ID
185
- if not self.x_anonymous_user_id:
186
- raise exceptions.AuthenticationError("Failed to obtain anonymous user ID.")
187
-
188
- # Prepare the API request headers for this specific request
189
- request_headers = self.session.headers.copy()
190
- request_headers.update({
191
- "x-anonymous-user-id": self.x_anonymous_user_id,
192
- "Content-Type": "application/json" # Ensure Content-Type is set
193
- })
194
-
195
- # Create options dictionary
196
- opts = self.options.copy()
197
- if temperature is not None:
198
- opts["temperature"] = temperature
199
- if top_p is not None:
200
- opts["top_p"] = top_p
201
- if options:
202
- opts.update(options)
203
-
204
- # Use the host param or the default host
205
- use_host = host or self.host
206
-
207
- # List of hosts to try - start with provided host, then try alternative hosts
208
- hosts_to_try = [use_host]
209
- if use_host == 'modal':
210
- hosts_to_try.append('inferd')
211
- else:
212
- hosts_to_try.append('modal')
213
-
214
- last_error = None
215
-
216
- # Try each host until one works
217
- for current_host in hosts_to_try:
218
- # Create the JSON payload as per the JS implementation
219
- payload = {
220
- "content": conversation_prompt,
221
- "private": private,
222
- "model": self.model,
223
- "host": current_host,
224
- "opts": opts
225
- }
226
- payload["host"] = current_host # Ensure host is updated in payload
227
-
228
- try:
229
- if stream:
230
- # Pass request_headers to the stream method
231
- return self._stream_request(payload, prompt, request_headers, raw)
232
- else:
233
- # Pass request_headers to the non-stream method
234
- return self._non_stream_request(payload, prompt, request_headers, raw)
235
- except (exceptions.FailedToGenerateResponseError, CurlError, Exception) as e:
236
- last_error = e
237
- # Log the error but continue to try other hosts
238
- print(f"Host '{current_host}' failed for model '{self.model}' ({type(e).__name__}), trying next host...")
239
- continue
240
-
241
- # If we've tried all hosts and none worked, raise the last error
242
- raise last_error or exceptions.FailedToGenerateResponseError("All hosts failed. Unable to complete request.")
243
-
244
- def _stream_request(self, payload, prompt, request_headers, raw=False):
245
- """Handle streaming requests with the given payload and headers"""
246
- streaming_text = "" # Initialize outside try block
247
- current_parent = None # Initialize outside try block
248
- try:
249
- # Use curl_cffi session post with impersonate
250
- response = self.session.post(
251
- self.api_endpoint,
252
- headers=request_headers, # Use headers passed to this method
253
- json=payload,
254
- stream=True,
255
- timeout=self.timeout,
256
- impersonate="chrome110" # Use a common impersonation profile
257
- )
258
- response.raise_for_status() # Check for HTTP errors
259
-
260
- # Use sanitize_stream
261
- processed_stream = sanitize_stream(
262
- data=response.iter_content(chunk_size=None), # Pass byte iterator
263
- intro_value=None, # No prefix
264
- to_json=True, # Stream sends JSON lines
265
- content_extractor=self._allenai_extractor, # Use the specific extractor
266
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
267
- )
268
-
269
- for content_chunk in processed_stream:
270
- # content_chunk is the string extracted by _allenai_extractor
271
- if content_chunk and isinstance(content_chunk, str):
272
- streaming_text += content_chunk
273
- resp = dict(text=content_chunk)
274
- yield resp if not raw else content_chunk
275
-
276
- # Try to extract parent ID from the *last* raw line (less reliable than before)
277
- # This part is tricky as sanitize_stream consumes the raw lines.
278
- # We might need to re-fetch or adjust if parent ID is critical per stream.
279
- # For now, we'll rely on the non-stream request to update parent ID more reliably.
280
- # Example placeholder logic (might not work reliably):
281
- try:
282
- last_line_data = json.loads(response.text.splitlines()[-1]) # Get last line if possible
283
- if last_line_data.get("id"):
284
- current_parent = last_line_data.get("id")
285
- elif last_line_data.get("children"):
286
- for child in last_line_data["children"]: # Use last_line_data here
287
- if child.get("role") == "assistant":
288
- current_parent = child.get("id")
289
- break
290
-
291
- # Handle completion
292
- if last_line_data.get("final") or last_line_data.get("finish_reason") == "stop":
293
- if current_parent:
294
- self.parent = current_parent
295
-
296
- # Update conversation history only if not empty
297
- if streaming_text.strip():
298
- self.conversation.update_chat_history(prompt, streaming_text)
299
- self.last_response = {"text": streaming_text} # Update last response here
300
- return # End the generator
301
- except Exception as e:
302
- # Log the error but continue with the rest of the function
303
- print(f"Error processing response data: {str(e)}")
304
-
305
- # If loop finishes without returning (e.g., no final message), update history
306
- if current_parent:
307
- self.parent = current_parent
308
- self.conversation.update_chat_history(prompt, streaming_text)
309
- self.last_response = {"text": streaming_text}
310
-
311
- except CurlError as e: # Catch CurlError
312
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
313
- except Exception as e: # Catch other potential exceptions (like HTTPError)
314
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
315
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
316
-
317
-
318
- def _non_stream_request(self, payload, prompt, request_headers, raw=False):
319
- """Handle non-streaming requests with the given payload and headers"""
320
- try:
321
- # Use curl_cffi session post with impersonate
322
- response = self.session.post(
323
- self.api_endpoint,
324
- headers=request_headers, # Use headers passed to this method
325
- json=payload,
326
- stream=False, # Explicitly set stream to False
327
- timeout=self.timeout,
328
- impersonate="chrome110" # Use a common impersonation profile
329
- )
330
- response.raise_for_status() # Check for HTTP errors
331
-
332
- raw_response = response.text # Get raw text
333
-
334
- # Process the full text using sanitize_stream line by line
335
- processed_stream = sanitize_stream(
336
- data=raw_response.splitlines(), # Split into lines
337
- intro_value=None,
338
- to_json=True,
339
- content_extractor=self._allenai_extractor,
340
- yield_raw_on_error=False
341
- )
342
- # Aggregate the results
343
- parsed_response = "".join(list(processed_stream))
344
-
345
- # Update parent ID from the full response if possible (might need adjustment based on actual non-stream response structure)
346
- # This part is speculative as the non-stream structure isn't fully clear from the stream logic
347
- try:
348
- lines = raw_response.splitlines()
349
- if lines:
350
- last_line_data = json.loads(lines[-1])
351
- if last_line_data.get("id"):
352
- self.parent = last_line_data.get("id")
353
- elif last_line_data.get("children"):
354
- for child in last_line_data["children"]:
355
- if child.get("role") == "assistant":
356
- self.parent = child.get("id")
357
- break
358
- except (json.JSONDecodeError, IndexError):
359
- pass # Ignore errors parsing parent ID from non-stream
360
-
361
- self.conversation.update_chat_history(prompt, parsed_response)
362
- self.last_response = {"text": parsed_response}
363
- return self.last_response if not raw else parsed_response # Return dict or raw string
364
-
365
- except CurlError as e: # Catch CurlError
366
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
367
- except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
368
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
369
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
370
-
371
-
372
- def chat(
373
- self,
374
- prompt: str,
375
- stream: bool = False,
376
- optimizer: str = None,
377
- conversationally: bool = False,
378
- host: str = None,
379
- options: dict = None,
380
- ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
381
- def for_stream_chat(): # Renamed inner function
382
- # ask() yields dicts or strings when streaming
383
- gen = self.ask(
384
- prompt,
385
- stream=True,
386
- raw=False, # Ensure ask yields dicts
387
- optimizer=optimizer,
388
- conversationally=conversationally,
389
- host=host,
390
- options=options
391
- )
392
- for response_dict in gen:
393
- yield self.get_message(response_dict) # get_message expects dict
394
-
395
- def for_non_stream_chat(): # Renamed inner function
396
- # ask() returns dict or str when not streaming
397
- response_data = self.ask(
398
- prompt,
399
- stream=False,
400
- raw=False, # Ensure ask returns dict
401
- optimizer=optimizer,
402
- conversationally=conversationally,
403
- host=host,
404
- options=options
405
- )
406
- return self.get_message(response_data) # get_message expects dict
407
-
408
- return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
409
-
410
- def get_message(self, response: dict) -> str:
411
- assert isinstance(response, dict), "Response should be of dict data-type only"
412
- return response["text"]
413
-
414
-
415
-
416
- if __name__ == "__main__":
417
- # Ensure curl_cffi is installed
418
- print("-" * 80)
419
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
420
- print("-" * 80)
421
-
422
- for model in AllenAI.AVAILABLE_MODELS:
423
- try:
424
- # Auto-detect host
425
- test_ai = AllenAI(model=model, timeout=60)
426
- # Pass the host explicitly to display accurate error messages
427
- response = test_ai.chat("Say 'Hello' in one word")
428
- response_text = response
429
-
430
- if response_text and len(response_text.strip()) > 0:
431
- status = "✓"
432
- # Truncate response if too long
433
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
434
- print(f"{model:<50} {status:<10} {display_text} (host: {test_ai.host})")
435
- else:
436
- status = "✗"
437
- display_text = "Empty or invalid response"
438
- print(f"{model:<50} {status:<10} {display_text}")
439
- except Exception as e:
440
- print(f"{model:<50} {'✗':<10} {str(e)}")