webscout 7.6__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (36) hide show
  1. webscout/Extra/autocoder/__init__.py +9 -9
  2. webscout/Extra/autocoder/autocoder_utiles.py +193 -195
  3. webscout/Extra/autocoder/rawdog.py +789 -649
  4. webscout/Extra/gguf.py +54 -24
  5. webscout/Provider/AISEARCH/ISou.py +0 -21
  6. webscout/Provider/AllenAI.py +4 -21
  7. webscout/Provider/ChatGPTClone.py +226 -0
  8. webscout/Provider/Glider.py +8 -4
  9. webscout/Provider/Hunyuan.py +272 -0
  10. webscout/Provider/LambdaChat.py +391 -0
  11. webscout/Provider/OLLAMA.py +256 -32
  12. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +18 -45
  13. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +34 -46
  14. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  15. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  16. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  17. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  18. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  19. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  20. webscout/Provider/WebSim.py +227 -0
  21. webscout/Provider/__init__.py +12 -1
  22. webscout/Provider/flowith.py +13 -2
  23. webscout/Provider/labyrinth.py +239 -0
  24. webscout/Provider/learnfastai.py +28 -15
  25. webscout/Provider/sonus.py +208 -0
  26. webscout/Provider/typegpt.py +1 -1
  27. webscout/Provider/uncovr.py +297 -0
  28. webscout/cli.py +49 -0
  29. webscout/litagent/agent.py +14 -9
  30. webscout/version.py +1 -1
  31. {webscout-7.6.dist-info → webscout-7.7.dist-info}/METADATA +33 -22
  32. {webscout-7.6.dist-info → webscout-7.7.dist-info}/RECORD +36 -29
  33. {webscout-7.6.dist-info → webscout-7.7.dist-info}/LICENSE.md +0 -0
  34. {webscout-7.6.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  35. {webscout-7.6.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  36. {webscout-7.6.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,272 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ import time
6
+ import uuid
7
+ import re
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIbase import Provider, AsyncProvider
13
+ from webscout import exceptions
14
+ from webscout import LitAgent
15
+
16
+ class Hunyuan(Provider):
17
+ """
18
+ A class to interact with the Tencent Hunyuan API with LitAgent user-agent.
19
+ """
20
+
21
+ AVAILABLE_MODELS = [
22
+ "hunyuan-t1-latest",
23
+ # Add more models as they become available
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2048,
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "hunyuan-t1-latest",
38
+ browser: str = "chrome",
39
+ api_key: str = None,
40
+ system_prompt: str = "You are a helpful assistant.",
41
+ ):
42
+
43
+ """Initializes the Hunyuan API client."""
44
+ if model not in self.AVAILABLE_MODELS:
45
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
+
47
+ self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
48
+
49
+ # Initialize LitAgent for user agent generation
50
+ self.agent = LitAgent()
51
+ # Use fingerprinting to create a consistent browser identity
52
+ self.fingerprint = self.agent.generate_fingerprint(browser)
53
+
54
+ # Use the fingerprint for headers
55
+ self.headers = {
56
+ "Accept": "*/*",
57
+ "Accept-Encoding": "gzip, deflate, br, zstd",
58
+ "Accept-Language": self.fingerprint["accept_language"],
59
+ "Content-Type": "application/json",
60
+ "DNT": "1",
61
+ "Origin": "https://llm.hunyuan.tencent.com",
62
+ "Referer": "https://llm.hunyuan.tencent.com/",
63
+ "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
64
+ "Sec-CH-UA-Mobile": "?0",
65
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
66
+ "Sec-Fetch-Dest": "empty",
67
+ "Sec-Fetch-Mode": "cors",
68
+ "Sec-Fetch-Site": "same-origin",
69
+ "Sec-GPC": "1",
70
+ "User-Agent": self.fingerprint["user_agent"],
71
+ }
72
+
73
+ # Add authorization if API key is provided
74
+ if api_key:
75
+ self.headers["Authorization"] = f"Bearer {api_key}"
76
+ else:
77
+ # Default test key (may not work long-term)
78
+ self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
79
+
80
+ self.session = requests.Session()
81
+ self.session.headers.update(self.headers)
82
+ self.session.proxies.update(proxies)
83
+ self.system_message = system_prompt
84
+ self.is_conversation = is_conversation
85
+ self.max_tokens_to_sample = max_tokens
86
+ self.timeout = timeout
87
+ self.last_response = {}
88
+ self.model = model
89
+
90
+ self.__available_optimizers = (
91
+ method
92
+ for method in dir(Optimizers)
93
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
94
+ )
95
+ Conversation.intro = (
96
+ AwesomePrompts().get_act(
97
+ act, raise_not_found=True, default=None, case_insensitive=True
98
+ )
99
+ if act
100
+ else intro or Conversation.intro
101
+ )
102
+
103
+ self.conversation = Conversation(
104
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
105
+ )
106
+ self.conversation.history_offset = history_offset
107
+
108
+ def refresh_identity(self, browser: str = None):
109
+ """
110
+ Refreshes the browser identity fingerprint.
111
+
112
+ Args:
113
+ browser: Specific browser to use for the new fingerprint
114
+ """
115
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
116
+ self.fingerprint = self.agent.generate_fingerprint(browser)
117
+
118
+ # Update headers with new fingerprint
119
+ self.headers.update({
120
+ "Accept-Language": self.fingerprint["accept_language"],
121
+ "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or self.headers["Sec-CH-UA"],
122
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
123
+ "User-Agent": self.fingerprint["user_agent"],
124
+ })
125
+
126
+ # Update session headers
127
+ for header, value in self.headers.items():
128
+ self.session.headers[header] = value
129
+
130
+ return self.fingerprint
131
+
132
+ def ask(
133
+ self,
134
+ prompt: str,
135
+ stream: bool = False,
136
+ raw: bool = False,
137
+ optimizer: str = None,
138
+ conversationally: bool = False,
139
+ ) -> Union[Dict[str, Any], Generator]:
140
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
141
+ if optimizer:
142
+ if optimizer in self.__available_optimizers:
143
+ conversation_prompt = getattr(Optimizers, optimizer)(
144
+ conversation_prompt if conversationally else prompt
145
+ )
146
+ else:
147
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
148
+
149
+ # Generate a unique query ID for each request
150
+ query_id = ''.join(re.findall(r'[a-z0-9]', str(uuid.uuid4())[:18]))
151
+
152
+
153
+ # Payload construction
154
+ payload = {
155
+ "stream": stream,
156
+ "model": self.model,
157
+ "query_id": query_id,
158
+ "messages": [
159
+ {"role": "system", "content": self.system_message},
160
+ {"role": "user", "content": "Always response in English\n\n" + conversation_prompt},
161
+ ],
162
+ "stream_moderation": True,
163
+ "enable_enhancement": False
164
+ }
165
+
166
+ def for_stream():
167
+ try:
168
+ with self.session.post(self.url, data=json.dumps(payload), stream=True, timeout=self.timeout, verify=False) as response:
169
+ if response.status_code != 200:
170
+ raise exceptions.FailedToGenerateResponseError(
171
+ f"Request failed with status code {response.status_code}"
172
+ )
173
+
174
+ streaming_text = ""
175
+ for line in response.iter_lines(decode_unicode=True):
176
+ if line:
177
+ line = line.strip()
178
+ if line.startswith("data: "):
179
+ json_str = line[6:]
180
+ if json_str == "[DONE]":
181
+ break
182
+ try:
183
+ json_data = json.loads(json_str)
184
+ if 'choices' in json_data:
185
+ choice = json_data['choices'][0]
186
+ if 'delta' in choice and 'content' in choice['delta']:
187
+ content = choice['delta']['content']
188
+ streaming_text += content
189
+ resp = dict(text=content)
190
+ yield resp if raw else resp
191
+ except json.JSONDecodeError:
192
+ continue
193
+
194
+ self.last_response = {"text": streaming_text}
195
+ self.conversation.update_chat_history(prompt, streaming_text)
196
+
197
+ except requests.RequestException as e:
198
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
199
+
200
+ def for_non_stream():
201
+ try:
202
+ response = self.session.post(self.url, data=json.dumps(payload), timeout=self.timeout, verify=False)
203
+ if response.status_code != 200:
204
+ raise exceptions.FailedToGenerateResponseError(
205
+ f"Request failed with status code {response.status_code}"
206
+ )
207
+
208
+ # Process non-streaming response (need to parse all lines)
209
+ full_text = ""
210
+ for line in response.text.split('\n'):
211
+ if line.startswith("data: ") and line[6:] != "[DONE]":
212
+ try:
213
+ json_data = json.loads(line[6:])
214
+ if 'choices' in json_data:
215
+ choice = json_data['choices'][0]
216
+ if 'delta' in choice and 'content' in choice['delta']:
217
+ full_text += choice['delta']['content']
218
+ except json.JSONDecodeError:
219
+ continue
220
+
221
+ self.last_response = {"text": full_text}
222
+ self.conversation.update_chat_history(prompt, full_text)
223
+ return {"text": full_text}
224
+ except Exception as e:
225
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
226
+
227
+ return for_stream() if stream else for_non_stream()
228
+
229
+ def chat(
230
+ self,
231
+ prompt: str,
232
+ stream: bool = False,
233
+ optimizer: str = None,
234
+ conversationally: bool = False,
235
+ ) -> Union[str, Generator[str, None, None]]:
236
+ def for_stream():
237
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
238
+ yield self.get_message(response)
239
+ def for_non_stream():
240
+ return self.get_message(
241
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
242
+ )
243
+ return for_stream() if stream else for_non_stream()
244
+
245
+ def get_message(self, response: dict) -> str:
246
+ assert isinstance(response, dict), "Response should be of dict data-type only"
247
+ return response["text"]
248
+
249
+ if __name__ == "__main__":
250
+ print("-" * 80)
251
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
252
+ print("-" * 80)
253
+
254
+ for model in Hunyuan.AVAILABLE_MODELS:
255
+ try:
256
+ test_ai = Hunyuan(model=model, timeout=60)
257
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
258
+ response_text = ""
259
+ for chunk in response:
260
+ response_text += chunk
261
+
262
+ if response_text and len(response_text.strip()) > 0:
263
+ status = "✓"
264
+ # Clean and truncate response
265
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
266
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
267
+ else:
268
+ status = "✗"
269
+ display_text = "Empty or invalid response"
270
+ print(f"\r{model:<50} {status:<10} {display_text}")
271
+ except Exception as e:
272
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,391 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import random
5
+ import re
6
+ import uuid
7
+ from typing import Any, Dict, List, Optional, Union, Generator
8
+
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class LambdaChat(Provider):
15
+ """
16
+ A class to interact with the Lambda Chat API.
17
+ Supports streaming responses.
18
+ """
19
+ url = "https://lambda.chat"
20
+
21
+ AVAILABLE_MODELS = [
22
+ "deepseek-llama3.3-70b",
23
+ "deepseek-r1",
24
+ "hermes-3-llama-3.1-405b-fp8",
25
+ "llama3.1-nemotron-70b-instruct",
26
+ "lfm-40b",
27
+ "llama3.3-70b-instruct-fp8"
28
+ ]
29
+
30
+ def __init__(
31
+ self,
32
+ is_conversation: bool = True,
33
+ max_tokens: int = 2000,
34
+ timeout: int = 60,
35
+ filepath: str = None,
36
+ update_file: bool = True,
37
+ proxies: dict = {},
38
+ model: str = "deepseek-llama3.3-70b",
39
+ assistantId: str = None,
40
+ system_prompt: str = "You are a helpful assistant. Please answer the following question.",
41
+ ):
42
+ """Initialize the LambdaChat client."""
43
+ if model not in self.AVAILABLE_MODELS:
44
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
45
+
46
+ self.model = model
47
+ self.session = requests.Session()
48
+ self.session.proxies.update(proxies)
49
+ self.assistantId = assistantId
50
+ self.system_prompt = system_prompt
51
+
52
+ # Set up headers for all requests
53
+ self.headers = {
54
+ "Content-Type": "application/json",
55
+ "User-Agent": LitAgent().random(),
56
+ "Accept": "*/*",
57
+ "Accept-Encoding": "gzip, deflate, br, zstd",
58
+ "Accept-Language": "en-US,en;q=0.9",
59
+ "Origin": self.url,
60
+ "Referer": f"{self.url}/",
61
+ "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
62
+ "Sec-Ch-Ua-Mobile": "?0",
63
+ "Sec-Ch-Ua-Platform": "\"Windows\"",
64
+ "Sec-Fetch-Dest": "empty",
65
+ "Sec-Fetch-Mode": "cors",
66
+ "Sec-Fetch-Site": "same-origin",
67
+ "DNT": "1",
68
+ "Priority": "u=1, i"
69
+ }
70
+
71
+ # Provider settings
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.timeout = timeout
75
+ self.last_response = {}
76
+
77
+ # Initialize conversation history
78
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
79
+
80
+ # Store conversation data for different models
81
+ self._conversation_data = {}
82
+
83
+ def create_conversation(self, model: str):
84
+ """Create a new conversation with the specified model."""
85
+ url = f"{self.url}/conversation"
86
+ payload = {
87
+ "model": model
88
+ }
89
+
90
+ # Update referer for this specific request
91
+ headers = self.headers.copy()
92
+ headers["Referer"] = f"{self.url}/models/{model}"
93
+
94
+ try:
95
+ response = self.session.post(url, json=payload, headers=headers)
96
+
97
+ if response.status_code == 401:
98
+ raise exceptions.AuthenticationError("Authentication failed.")
99
+
100
+ # Handle other error codes
101
+ if response.status_code != 200:
102
+ return None
103
+
104
+ data = response.json()
105
+ conversation_id = data.get("conversationId")
106
+
107
+ # Store conversation data
108
+ if model not in self._conversation_data:
109
+ self._conversation_data[model] = {
110
+ "conversationId": conversation_id,
111
+ "messageId": str(uuid.uuid4()) # Initial message ID
112
+ }
113
+
114
+ return conversation_id
115
+ except requests.exceptions.RequestException:
116
+ return None
117
+
118
+ def fetch_message_id(self, conversation_id: str) -> str:
119
+ """Fetch the latest message ID for a conversation."""
120
+ try:
121
+ url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
122
+ response = self.session.get(url, headers=self.headers)
123
+ response.raise_for_status()
124
+
125
+ # Parse the JSON data from the response
126
+ json_data = None
127
+ for line in response.text.split('\n'):
128
+ if line.strip():
129
+ try:
130
+ parsed = json.loads(line)
131
+ if isinstance(parsed, dict) and "nodes" in parsed:
132
+ json_data = parsed
133
+ break
134
+ except json.JSONDecodeError:
135
+ continue
136
+
137
+ if not json_data:
138
+ # Fall back to a UUID if we can't parse the response
139
+ return str(uuid.uuid4())
140
+
141
+ # Extract message ID using the same pattern as in the example
142
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
143
+ return str(uuid.uuid4())
144
+
145
+ data = json_data["nodes"][1]["data"]
146
+ keys = data[data[0]["messages"]]
147
+ message_keys = data[keys[-1]]
148
+ message_id = data[message_keys["id"]]
149
+
150
+ return message_id
151
+
152
+ except Exception:
153
+ # Fall back to a UUID if there's an error
154
+ return str(uuid.uuid4())
155
+
156
+ def generate_boundary(self):
157
+ """Generate a random boundary for multipart/form-data requests"""
158
+ boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
159
+ boundary = "----WebKitFormBoundary"
160
+ boundary += "".join(random.choice(boundary_chars) for _ in range(16))
161
+ return boundary
162
+
163
+ def process_response(self, response, prompt: str):
164
+ """Process streaming response and extract content."""
165
+ full_text = ""
166
+ sources = None
167
+ reasoning_text = ""
168
+ has_reasoning = False
169
+
170
+ for line in response.iter_lines(decode_unicode=True):
171
+ if not line:
172
+ continue
173
+
174
+ try:
175
+ # Parse each line as JSON
176
+ data = json.loads(line)
177
+
178
+ # Handle different response types
179
+ if "type" not in data:
180
+ continue
181
+
182
+ if data["type"] == "stream" and "token" in data:
183
+ token = data["token"].replace("\u0000", "")
184
+ full_text += token
185
+ resp = {"text": token}
186
+ yield resp
187
+ elif data["type"] == "finalAnswer":
188
+ final_text = data.get("text", "")
189
+ if final_text and not full_text:
190
+ full_text = final_text
191
+ resp = {"text": final_text}
192
+ yield resp
193
+ elif data["type"] == "webSearch" and "sources" in data:
194
+ sources = data["sources"]
195
+ elif data["type"] == "reasoning":
196
+ has_reasoning = True
197
+ if data.get("subtype") == "stream" and "token" in data:
198
+ reasoning_text += data["token"]
199
+
200
+ # If we have reasoning, prepend it to the next text output
201
+ if reasoning_text and not full_text:
202
+ resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
203
+ yield resp
204
+
205
+ except json.JSONDecodeError:
206
+ continue
207
+
208
+ # Update conversation history only for saving to file if needed
209
+ if full_text and self.conversation.file:
210
+ if has_reasoning:
211
+ full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
212
+ self.last_response = {"text": full_text_with_reasoning}
213
+ self.conversation.update_chat_history(prompt, full_text_with_reasoning)
214
+ else:
215
+ self.last_response = {"text": full_text}
216
+ self.conversation.update_chat_history(prompt, full_text)
217
+
218
+ return full_text
219
+
220
+ def ask(
221
+ self,
222
+ prompt: str,
223
+ stream: bool = False,
224
+ raw: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ web_search: bool = False,
228
+ ) -> Union[Dict[str, Any], Generator]:
229
+ """Send a message to the Lambda Chat API"""
230
+ model = self.model
231
+
232
+ # Check if we have a conversation for this model
233
+ if model not in self._conversation_data:
234
+ conversation_id = self.create_conversation(model)
235
+ if not conversation_id:
236
+ raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
237
+ else:
238
+ conversation_id = self._conversation_data[model]["conversationId"]
239
+ # Refresh message ID
240
+ self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
241
+
242
+ url = f"{self.url}/conversation/{conversation_id}"
243
+ message_id = self._conversation_data[model]["messageId"]
244
+
245
+ # Data to send
246
+ request_data = {
247
+ "inputs": prompt,
248
+ "id": message_id,
249
+ "is_retry": False,
250
+ "is_continue": False,
251
+ "web_search": web_search,
252
+ "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
253
+ }
254
+
255
+ # Update headers for this specific request
256
+ headers = self.headers.copy()
257
+ headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
258
+
259
+ # Create multipart form data
260
+ boundary = self.generate_boundary()
261
+ multipart_headers = headers.copy()
262
+ multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
263
+
264
+ # Serialize the data to JSON
265
+ data_json = json.dumps(request_data, separators=(',', ':'))
266
+
267
+ # Create the multipart form data body
268
+ body = f"--{boundary}\r\n"
269
+ body += f'Content-Disposition: form-data; name="data"\r\n'
270
+ body += f"Content-Type: application/json\r\n\r\n"
271
+ body += f"{data_json}\r\n"
272
+ body += f"--{boundary}--\r\n"
273
+
274
+ multipart_headers["Content-Length"] = str(len(body))
275
+
276
+ def for_stream():
277
+ try:
278
+ # Try with multipart/form-data first
279
+ response = None
280
+ try:
281
+ response = self.session.post(
282
+ url,
283
+ data=body,
284
+ headers=multipart_headers,
285
+ stream=True,
286
+ timeout=self.timeout
287
+ )
288
+ except requests.exceptions.RequestException:
289
+ pass
290
+
291
+ # If multipart fails or returns error, try with regular JSON
292
+ if not response or response.status_code != 200:
293
+ response = self.session.post(
294
+ url,
295
+ json=request_data,
296
+ headers=headers,
297
+ stream=True,
298
+ timeout=self.timeout
299
+ )
300
+
301
+ # If both methods fail, raise exception
302
+ if response.status_code != 200:
303
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
304
+
305
+ # Process the streaming response
306
+ yield from self.process_response(response, prompt)
307
+
308
+ except Exception as e:
309
+ if isinstance(e, requests.exceptions.RequestException):
310
+ if hasattr(e, 'response') and e.response is not None:
311
+ status_code = e.response.status_code
312
+ if status_code == 401:
313
+ raise exceptions.AuthenticationError("Authentication failed.")
314
+
315
+ # Try another model if current one fails
316
+ if len(self.AVAILABLE_MODELS) > 1:
317
+ current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
318
+ next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
319
+ self.model = self.AVAILABLE_MODELS[next_model_index]
320
+
321
+ # Create new conversation with the alternate model
322
+ conversation_id = self.create_conversation(self.model)
323
+ if conversation_id:
324
+ # Try again with the new model
325
+ yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
326
+ conversationally=conversationally, web_search=web_search)
327
+ return
328
+
329
+ # If we get here, all models failed
330
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
331
+
332
+ def for_non_stream():
333
+ response_text = ""
334
+ for response in for_stream():
335
+ if "text" in response:
336
+ response_text += response["text"]
337
+ self.last_response = {"text": response_text}
338
+ return self.last_response
339
+
340
+ return for_stream() if stream else for_non_stream()
341
+
342
+ def chat(
343
+ self,
344
+ prompt: str,
345
+ stream: bool = False,
346
+ optimizer: str = None,
347
+ conversationally: bool = False,
348
+ web_search: bool = False
349
+ ) -> Union[str, Generator]:
350
+ """Generate a response to a prompt"""
351
+ def for_stream():
352
+ for response in self.ask(
353
+ prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
354
+ ):
355
+ yield self.get_message(response)
356
+
357
+ def for_non_stream():
358
+ return self.get_message(
359
+ self.ask(
360
+ prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
361
+ )
362
+ )
363
+
364
+ return for_stream() if stream else for_non_stream()
365
+
366
+ def get_message(self, response: dict) -> str:
367
+ """Extract message text from response"""
368
+ assert isinstance(response, dict), "Response should be of dict data-type only"
369
+ return response.get("text", "")
370
+
371
+ if __name__ == "__main__":
372
+ print("-" * 80)
373
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
374
+ print("-" * 80)
375
+
376
+ for model in LambdaChat.AVAILABLE_MODELS:
377
+ try:
378
+ test_ai = LambdaChat(model=model, timeout=60)
379
+ response = test_ai.chat("Say 'Hello' in one word")
380
+ response_text = response
381
+
382
+ if response_text and len(response_text.strip()) > 0:
383
+ status = "✓"
384
+ # Truncate response if too long
385
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
386
+ else:
387
+ status = "✗"
388
+ display_text = "Empty or invalid response"
389
+ print(f"{model:<50} {status:<10} {display_text}")
390
+ except Exception as e:
391
+ print(f"{model:<50} {'✗':<10} {str(e)}")