webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Local/utils.py DELETED
@@ -1,93 +0,0 @@
1
- """
2
- Utility functions for webscout.local
3
- """
4
-
5
- import base64
6
- import logging
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
- def parse_duration(duration_str: str) -> float:
11
- """
12
- Parse a duration string into seconds.
13
-
14
- Args:
15
- duration_str (str): Duration string (e.g., '5m', '1h', '30s', '500ms', '0').
16
- Returns:
17
- float: Duration in seconds.
18
- """
19
- if not duration_str:
20
- return 300.0 # Default 5 minutes
21
- if duration_str.endswith("ms"):
22
- return int(duration_str[:-2]) / 1000.0
23
- elif duration_str.endswith("s"):
24
- return int(duration_str[:-1])
25
- elif duration_str.endswith("m"):
26
- return int(duration_str[:-1]) * 60
27
- elif duration_str.endswith("h"):
28
- return int(duration_str[:-1]) * 3600
29
- elif duration_str == "0":
30
- return 0.0
31
- else:
32
- try:
33
- return float(duration_str)
34
- except ValueError:
35
- return 300.0 # Default 5 minutes
36
-
37
- def format_duration(seconds: float) -> str:
38
- """
39
- Format seconds into a human-readable duration string.
40
- Args:
41
- seconds (float): Duration in seconds.
42
- Returns:
43
- str: Human-readable duration string.
44
- """
45
- if seconds < 1:
46
- return f"{int(seconds * 1000)}ms"
47
- elif seconds < 60:
48
- return f"{int(seconds)}s"
49
- elif seconds < 3600:
50
- return f"{int(seconds / 60)}m"
51
- else:
52
- return f"{int(seconds / 3600)}h"
53
-
54
- def decode_image(image_str: str) -> bytes:
55
- """
56
- Decode a base64 image string to bytes.
57
- Args:
58
- image_str (str): Base64-encoded image string (optionally with data URI prefix).
59
- Returns:
60
- bytes: Decoded image bytes.
61
- """
62
- if image_str.startswith("data:"):
63
- image_str = image_str.split(",", 1)[1]
64
- return base64.b64decode(image_str)
65
-
66
- def encode_image(image_bytes: bytes, mime_type: str = "image/png") -> str:
67
- """
68
- Encode image bytes to a base64 data URI.
69
- Args:
70
- image_bytes (bytes): Image data.
71
- mime_type (str): MIME type for the image.
72
- Returns:
73
- str: Base64-encoded data URI string.
74
- """
75
- encoded = base64.b64encode(image_bytes).decode("utf-8")
76
- return f"data:{mime_type};base64,{encoded}"
77
-
78
- def get_file_size_str(size_bytes: int) -> str:
79
- """
80
- Convert file size in bytes to a human-readable string.
81
- Args:
82
- size_bytes (int): File size in bytes.
83
- Returns:
84
- str: Human-readable file size string.
85
- """
86
- if size_bytes < 1024:
87
- return f"{size_bytes} B"
88
- elif size_bytes < 1024 * 1024:
89
- return f"{size_bytes / 1024:.2f} KB"
90
- elif size_bytes < 1024 * 1024 * 1024:
91
- return f"{size_bytes / (1024 * 1024):.2f} MB"
92
- else:
93
- return f"{size_bytes / (1024 * 1024 * 1024):.2f} GB"
webscout/Provider/C4ai.py DELETED
@@ -1,432 +0,0 @@
1
- import requests
2
- import uuid
3
- import json
4
- import time
5
- import random
6
- import re
7
- from typing import Any, Dict, List, Optional, Union, Generator
8
-
9
- from webscout.AIutel import Conversation
10
- from webscout.AIbase import Provider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent
13
-
14
- class C4ai(Provider):
15
- """
16
- A class to interact with the Hugging Face Chat API.
17
- """
18
- # Default available models
19
- AVAILABLE_MODELS = [
20
- 'command-a-03-2025',
21
- 'command-r-plus-08-2024',
22
- 'command-r-08-2024',
23
- 'command-r-plus',
24
- 'command-r',
25
- 'command-r7b-12-2024',
26
- 'command-r7b-arabic-02-2025'
27
- ] # Placeholder for available models, It will be updated in the constructor
28
-
29
- def __repr__(self) -> str:
30
- return f"C4ai({self.model})"
31
-
32
- def __init__(
33
- self,
34
- is_conversation: bool = True,
35
- max_tokens: int = 2000,
36
- timeout: int = 60,
37
- filepath: str = None,
38
- update_file: bool = True,
39
- proxies: dict = {},
40
- model: str = "command-a-03-2025",
41
- system_prompt: str = "You are a helpful assistant.",
42
- ):
43
- """Initialize the C4ai client."""
44
- self.url = "https://cohereforai-c4ai-command.hf.space"
45
- self.session = requests.Session()
46
- self.session.proxies.update(proxies)
47
-
48
- # Set up headers for all requests
49
- self.headers = {
50
- "Content-Type": "application/json",
51
- "User-Agent": LitAgent().random(),
52
- "Accept": "*/*",
53
- "Accept-Encoding": "gzip, deflate, br, zstd",
54
- "Accept-Language": "en-US,en;q=0.9",
55
- "Origin": "https://cohereforai-c4ai-command.hf.space",
56
- "Referer": "https://cohereforai-c4ai-command.hf.space/",
57
- "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
58
- "Sec-Ch-Ua-Mobile": "?0",
59
- "Sec-Ch-Ua-Platform": "\"Windows\"",
60
- "Sec-Fetch-Dest": "empty",
61
- "Sec-Fetch-Mode": "cors",
62
- "Sec-Fetch-Site": "same-origin",
63
- "DNT": "1",
64
- "Priority": "u=1, i"
65
- }
66
-
67
- # Update available models
68
- self.update_available_models()
69
-
70
- # Set default model if none provided
71
- self.model = model
72
- # Provider settings
73
- self.is_conversation = is_conversation
74
- self.max_tokens_to_sample = max_tokens
75
- self.timeout = timeout
76
- self.last_response = {}
77
-
78
- # Initialize a simplified conversation history for file saving only
79
- self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
80
-
81
- # Store conversation data for different models
82
- self._conversation_data = {}
83
- self.preprompt = system_prompt
84
-
85
- def update_available_models(self):
86
- """Update the available models list from HuggingFace"""
87
- try:
88
- models = self.get_models()
89
- if models and len(models) > 0:
90
- self.AVAILABLE_MODELS = models
91
- except Exception:
92
- # Fallback to default models list if fetching fails
93
- pass
94
-
95
- @classmethod
96
- def get_models(cls):
97
- """Fetch available models from HuggingFace."""
98
- try:
99
- response = requests.get("https://cohereforai-c4ai-command.hf.space/")
100
- text = response.text
101
- models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
102
-
103
- if not models_match:
104
- return cls.AVAILABLE_MODELS
105
-
106
- models_text = models_match.group(1)
107
- models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
108
- models_text = models_text.replace('void 0', 'null')
109
-
110
- def add_quotation_mark(match):
111
- return f'{match.group(1)}"{match.group(2)}":'
112
-
113
- models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
114
-
115
- models_data = json.loads(models_text)
116
- # print([model["id"] for model in models_data])
117
- return [model["id"] for model in models_data]
118
- except Exception:
119
- return cls.AVAILABLE_MODELS
120
-
121
- def create_conversation(self, model: str):
122
- """Create a new conversation with the specified model."""
123
- url = "https://cohereforai-c4ai-command.hf.space/conversation"
124
- payload = {"model": model, "preprompt": self.preprompt,}
125
-
126
- # Update referer for this specific request
127
- headers = self.headers.copy()
128
- headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/"
129
-
130
- try:
131
- response = self.session.post(url, json=payload, headers=headers)
132
-
133
- if response.status_code == 401:
134
- raise exceptions.AuthenticationError("Authentication failed.")
135
-
136
- # Handle other error codes
137
- if response.status_code != 200:
138
- return None
139
-
140
- data = response.json()
141
- conversation_id = data.get("conversationId")
142
-
143
- # Store conversation data
144
- if model not in self._conversation_data:
145
- self._conversation_data[model] = {
146
- "conversationId": conversation_id,
147
- "messageId": str(uuid.uuid4()) # Initial message ID
148
- }
149
-
150
- return conversation_id
151
- except requests.exceptions.RequestException:
152
- return None
153
-
154
- def fetch_message_id(self, conversation_id: str) -> str:
155
- """Fetch the latest message ID for a conversation."""
156
- try:
157
- url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
158
- response = self.session.get(url, headers=self.headers)
159
- response.raise_for_status()
160
-
161
- # Parse the JSON data from the response
162
- json_data = None
163
- for line in response.text.split('\n'):
164
- if line.strip():
165
- try:
166
- parsed = json.loads(line)
167
- if isinstance(parsed, dict) and "nodes" in parsed:
168
- json_data = parsed
169
- break
170
- except json.JSONDecodeError:
171
- continue
172
-
173
- if not json_data:
174
- # Fall back to a UUID if we can't parse the response
175
- return str(uuid.uuid4())
176
-
177
- # Extract message ID using the same pattern as in the example
178
- if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
179
- return str(uuid.uuid4())
180
-
181
- data = json_data["nodes"][1]["data"]
182
- keys = data[data[0]["messages"]]
183
- message_keys = data[keys[-1]]
184
- message_id = data[message_keys["id"]]
185
-
186
- return message_id
187
-
188
- except Exception:
189
- # Fall back to a UUID if there's an error
190
- return str(uuid.uuid4())
191
-
192
- def generate_boundary(self):
193
- """Generate a random boundary for multipart/form-data requests"""
194
- boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
195
- boundary = "----WebKitFormBoundary"
196
- boundary += "".join(random.choice(boundary_chars) for _ in range(16))
197
- return boundary
198
-
199
- def process_response(self, response, prompt: str):
200
- """Process streaming response and extract content."""
201
- full_text = ""
202
- sources = None
203
- reasoning_text = ""
204
- has_reasoning = False
205
-
206
- for line in response.iter_lines(decode_unicode=True):
207
- if not line:
208
- continue
209
-
210
- try:
211
- # Parse each line as JSON
212
- data = json.loads(line)
213
-
214
- # Handle different response types
215
- if "type" not in data:
216
- continue
217
-
218
- if data["type"] == "stream" and "token" in data:
219
- token = data["token"].replace("\u0000", "")
220
- full_text += token
221
- resp = {"text": token}
222
- yield resp
223
- elif data["type"] == "finalAnswer":
224
- final_text = data.get("text", "")
225
- if final_text and not full_text:
226
- full_text = final_text
227
- resp = {"text": final_text}
228
- yield resp
229
- elif data["type"] == "webSearch" and "sources" in data:
230
- sources = data["sources"]
231
- elif data["type"] == "reasoning":
232
- has_reasoning = True
233
- if data.get("subtype") == "stream" and "token" in data:
234
- reasoning_text += data["token"]
235
- # elif data.get("subtype") == "status":
236
- # # For status updates in reasoning, we can just append them as a comment
237
- # if data.get("status"):
238
- # reasoning_text += f"\n# {data['status']}"
239
-
240
- # If we have reasoning, prepend it to the next text output
241
- if reasoning_text and not full_text:
242
- resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
243
- yield resp
244
-
245
- except json.JSONDecodeError:
246
- continue
247
-
248
- # Update conversation history only for saving to file if needed
249
- if full_text and self.conversation.file:
250
- if has_reasoning:
251
- full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
252
- self.last_response = {"text": full_text_with_reasoning}
253
- self.conversation.update_chat_history(prompt, full_text_with_reasoning)
254
- else:
255
- self.last_response = {"text": full_text}
256
- self.conversation.update_chat_history(prompt, full_text)
257
-
258
- return full_text
259
-
260
- def ask(
261
- self,
262
- prompt: str,
263
- stream: bool = False,
264
- raw: bool = False,
265
- optimizer: str = None,
266
- conversationally: bool = False,
267
- web_search: bool = False,
268
- ) -> Union[Dict[str, Any], Generator]:
269
- """Send a message to the HuggingFace Chat API"""
270
- model = self.model
271
-
272
- # Check if we have a conversation for this model
273
- if model not in self._conversation_data:
274
- conversation_id = self.create_conversation(model)
275
- if not conversation_id:
276
- raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
277
- else:
278
- conversation_id = self._conversation_data[model]["conversationId"]
279
- # Refresh message ID
280
- self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
281
-
282
- url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
283
- message_id = self._conversation_data[model]["messageId"]
284
-
285
- # Data to send - use the prompt directly without generating a complete prompt
286
- # since HuggingFace maintains conversation state internally
287
- request_data = {
288
- "inputs": prompt,
289
- "id": message_id,
290
- "is_retry": False,
291
- "is_continue": False,
292
- "web_search": web_search,
293
- "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
294
- }
295
-
296
- # Update headers for this specific request
297
- headers = self.headers.copy()
298
- headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
299
-
300
- # Create multipart form data
301
- boundary = self.generate_boundary()
302
- multipart_headers = headers.copy()
303
- multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
304
-
305
- # Serialize the data to JSON
306
- data_json = json.dumps(request_data, separators=(',', ':'))
307
-
308
- # Create the multipart form data body
309
- body = f"--{boundary}\r\n"
310
- body += f'Content-Disposition: form-data; name="data"\r\n'
311
- body += f"Content-Type: application/json\r\n\r\n"
312
- body += f"{data_json}\r\n"
313
- body += f"--{boundary}--\r\n"
314
-
315
- multipart_headers["Content-Length"] = str(len(body))
316
-
317
- def for_stream():
318
- try:
319
- # Try with multipart/form-data first
320
- response = None
321
- try:
322
- response = self.session.post(
323
- url,
324
- data=body,
325
- headers=multipart_headers,
326
- stream=True,
327
- timeout=self.timeout
328
- )
329
- except requests.exceptions.RequestException:
330
- pass
331
-
332
- # If multipart fails or returns error, try with regular JSON
333
- if not response or response.status_code != 200:
334
- response = self.session.post(
335
- url,
336
- json=request_data,
337
- headers=headers,
338
- stream=True,
339
- timeout=self.timeout
340
- )
341
-
342
- # If both methods fail, raise exception
343
- if response.status_code != 200:
344
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
345
-
346
- # Process the streaming response
347
- yield from self.process_response(response, prompt)
348
-
349
- except Exception as e:
350
- if isinstance(e, requests.exceptions.RequestException):
351
- if hasattr(e, 'response') and e.response is not None:
352
- status_code = e.response.status_code
353
- if status_code == 401:
354
- raise exceptions.AuthenticationError("Authentication failed.")
355
-
356
- # Try another model if current one fails
357
- if len(self.AVAILABLE_MODELS) > 1:
358
- current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
359
- next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
360
- self.model = self.AVAILABLE_MODELS[next_model_index]
361
-
362
- # Create new conversation with the alternate model
363
- conversation_id = self.create_conversation(self.model)
364
- if conversation_id:
365
- # Try again with the new model
366
- yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
367
- conversationally=conversationally, web_search=web_search)
368
- return
369
-
370
- # If we get here, all models failed
371
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
372
-
373
- def for_non_stream():
374
- response_text = ""
375
- for response in for_stream():
376
- if "text" in response:
377
- response_text += response["text"]
378
- self.last_response = {"text": response_text}
379
- return self.last_response
380
-
381
- return for_stream() if stream else for_non_stream()
382
-
383
- def chat(
384
- self,
385
- prompt: str,
386
- stream: bool = False,
387
- optimizer: str = None,
388
- conversationally: bool = False,
389
- web_search: bool = False
390
- ) -> Union[str, Generator]:
391
- """Generate a response to a prompt"""
392
- def for_stream():
393
- for response in self.ask(
394
- prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
395
- ):
396
- yield self.get_message(response)
397
-
398
- def for_non_stream():
399
- return self.get_message(
400
- self.ask(
401
- prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
402
- )
403
- )
404
-
405
- return for_stream() if stream else for_non_stream()
406
-
407
- def get_message(self, response: dict) -> str:
408
- """Extract message text from response"""
409
- assert isinstance(response, dict), "Response should be of dict data-type only"
410
- return response.get("text", "")
411
-
412
- if __name__ == "__main__":
413
- print("-" * 80)
414
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
415
- print("-" * 80)
416
-
417
- for model in C4ai.AVAILABLE_MODELS:
418
- try:
419
- test_ai = C4ai(model=model, timeout=60)
420
- response = test_ai.chat("Say 'Hello' in one word")
421
- response_text = response
422
-
423
- if response_text and len(response_text.strip()) > 0:
424
- status = "✓"
425
- # Truncate response if too long
426
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
427
- else:
428
- status = "✗"
429
- display_text = "Empty or invalid response"
430
- print(f"{model:<50} {status:<10} {display_text}")
431
- except Exception as e:
432
- print(f"{model:<50} {'✗':<10} {str(e)}")