webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,342 @@
1
+ from typing import Optional, Union, Any, Dict, Generator, List
2
+ from uuid import uuid4
3
+ import json
4
+ import re
5
+ import random
6
+ from curl_cffi import CurlError
7
+ from curl_cffi.requests import Session
8
+ from curl_cffi.const import CurlHttpVersion
9
+
10
+ from webscout.AIutel import sanitize_stream
11
+ from webscout.AIutel import Optimizers
12
+ from webscout.AIutel import Conversation
13
+ from webscout.AIutel import AwesomePrompts
14
+ from webscout.AIbase import Provider
15
+ from webscout import exceptions
16
+ from webscout.litagent import LitAgent
17
+
18
+ class ChatSandbox(Provider):
19
+ """
20
+ Sends a chat message to the specified model via the chatsandbox API.
21
+
22
+ This provider allows you to interact with various AI models through the chatsandbox.com
23
+ interface, supporting different models/models like OpenAI, DeepSeek, Llama, etc.
24
+
25
+ Attributes:
26
+ model (str): The model to chat with (e.g., "openai", "deepseek", "llama").
27
+
28
+ Examples:
29
+ >>> from webscout.Provider.chatsandbox import ChatSandbox
30
+ >>> ai = ChatSandbox(model="openai")
31
+ >>> response = ai.chat("Hello, how are you?")
32
+ >>> print(response)
33
+ 'I'm doing well, thank you for asking! How can I assist you today?'
34
+ """
35
+ AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
36
+
37
+ def __init__(
38
+ self,
39
+ model: str = "openai",
40
+ is_conversation: bool = True,
41
+ max_tokens: int = 600,
42
+ timeout: int = 30,
43
+ intro: str = None,
44
+ filepath: str = None,
45
+ update_file: bool = True,
46
+ proxies: dict = {},
47
+ history_offset: int = 10250,
48
+ act: str = None,
49
+ ):
50
+ """
51
+ Initializes the ChatSandbox API with given parameters.
52
+
53
+ Args:
54
+ model (str): The model to chat with (e.g., "openai", "deepseek", "llama").
55
+ is_conversation (bool): Whether the provider is in conversation mode.
56
+ max_tokens (int): Maximum number of tokens to sample.
57
+ timeout (int): Timeout for API requests.
58
+ intro (str): Introduction message for the conversation.
59
+ filepath (str): Filepath for storing conversation history.
60
+ update_file (bool): Whether to update the conversation history file.
61
+ proxies (dict): Proxies for the API requests.
62
+ history_offset (int): Offset for conversation history.
63
+ act (str): Act for the conversation.
64
+
65
+ Examples:
66
+ >>> ai = ChatSandbox(model="openai", system_prompt="You are a friendly assistant.")
67
+ >>> print(ai.model)
68
+ 'openai'
69
+ """
70
+ if model not in self.AVAILABLE_MODELS:
71
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
72
+
73
+ # Initialize curl_cffi Session
74
+ self.session = Session()
75
+ self.model = model
76
+ self.is_conversation = is_conversation
77
+ self.max_tokens_to_sample = max_tokens
78
+ self.api_endpoint = "https://chatsandbox.com/api/chat"
79
+ self.timeout = timeout
80
+ self.last_response = {}
81
+
82
+ # Initialize LitAgent for user agent generation
83
+ self.agent = LitAgent()
84
+
85
+ # Set up headers
86
+ self.headers = {
87
+ 'authority': 'chatsandbox.com',
88
+ 'accept': '*/*',
89
+ 'accept-encoding': 'gzip, deflate, br',
90
+ 'accept-language': 'en-US,en;q=0.9',
91
+ 'content-type': 'application/json',
92
+ 'origin': 'https://chatsandbox.com',
93
+ 'referer': f'https://chatsandbox.com/chat/{self.model}',
94
+ 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
95
+ 'sec-ch-ua-mobile': '?0',
96
+ 'sec-ch-ua-platform': '"Windows"',
97
+ 'sec-fetch-dest': 'empty',
98
+ 'sec-fetch-mode': 'cors',
99
+ 'sec-fetch-site': 'same-origin',
100
+ 'user-agent': self.agent.random(),
101
+ 'dnt': '1',
102
+ 'sec-gpc': '1',
103
+ }
104
+
105
+ self.__available_optimizers = (
106
+ method
107
+ for method in dir(Optimizers)
108
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
109
+ )
110
+
111
+ # Update curl_cffi session headers and proxies
112
+ self.session.headers.update(self.headers)
113
+ self.session.proxies = proxies
114
+
115
+ Conversation.intro = (
116
+ AwesomePrompts().get_act(
117
+ act, raise_not_found=True, default=None, case_insensitive=True
118
+ )
119
+ if act
120
+ else intro or Conversation.intro
121
+ )
122
+ self.conversation = Conversation(
123
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
124
+ )
125
+ self.conversation.history_offset = history_offset
126
+
127
+ @staticmethod
128
+ def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
129
+ """Extracts content from the chatsandbox stream format."""
130
+ if isinstance(chunk, str):
131
+ try:
132
+ data = json.loads(chunk)
133
+ if isinstance(data, dict) and "reasoning_content" in data:
134
+ return data["reasoning_content"]
135
+ return chunk
136
+ except json.JSONDecodeError:
137
+ return chunk
138
+ return None
139
+
140
+ def ask(
141
+ self,
142
+ prompt: str,
143
+ stream: bool = False,
144
+ raw: bool = False,
145
+ optimizer: str = None,
146
+ conversationally: bool = False,
147
+ ) -> Union[Dict[str, Any], Generator]:
148
+ """
149
+ Sends a prompt to the ChatSandbox API and returns the response.
150
+
151
+ Args:
152
+ prompt (str): The prompt to send to the API.
153
+ stream (bool): Whether to stream the response.
154
+ raw (bool): Whether to return the raw response.
155
+ optimizer (str): Optimizer to use for the prompt.
156
+ conversationally (bool): Whether to generate the prompt conversationally.
157
+
158
+ Returns:
159
+ Union[Dict[str, Any], Generator]: The API response.
160
+
161
+ Examples:
162
+ >>> ai = ChatSandbox()
163
+ >>> response = ai.ask("Tell me a joke!")
164
+ >>> print(response)
165
+ {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
166
+ """
167
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
168
+ if optimizer:
169
+ if optimizer in self.__available_optimizers:
170
+ conversation_prompt = getattr(Optimizers, optimizer)(
171
+ conversation_prompt if conversationally else prompt
172
+ )
173
+ else:
174
+ raise Exception(
175
+ f"Optimizer is not one of {self.__available_optimizers}"
176
+ )
177
+
178
+ # Prepare the payload
179
+ payload = {
180
+ "messages": [conversation_prompt],
181
+ "character": self.model
182
+ }
183
+
184
+ def for_stream():
185
+ try:
186
+ # Use curl_cffi session post with updated impersonate and http_version
187
+ response = self.session.post(
188
+ self.api_endpoint,
189
+ headers=self.headers,
190
+ json=payload,
191
+ stream=True,
192
+ timeout=self.timeout,
193
+ impersonate="chrome120", # Try a different impersonation profile
194
+ http_version=CurlHttpVersion.V1_1 # Force HTTP/1.1
195
+ )
196
+ if not response.ok:
197
+ raise exceptions.FailedToGenerateResponseError(
198
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
199
+ )
200
+
201
+ streaming_response = ""
202
+ # Use sanitize_stream with the custom extractor
203
+ processed_stream = sanitize_stream(
204
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
205
+ intro_value=None, # No simple prefix to remove here
206
+ to_json=False, # Content is not JSON
207
+ content_extractor=self._chatsandbox_extractor # Use the specific extractor
208
+ )
209
+
210
+ for content_chunk in processed_stream:
211
+ if content_chunk and isinstance(content_chunk, str):
212
+ streaming_response += content_chunk
213
+ yield content_chunk if raw else dict(text=content_chunk)
214
+
215
+ self.last_response.update(dict(text=streaming_response))
216
+ self.conversation.update_chat_history(
217
+ prompt, self.get_message(self.last_response)
218
+ )
219
+ except CurlError as e: # Catch CurlError
220
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
221
+ except Exception as e: # Catch other potential exceptions
222
+ # Include the original exception type in the message for clarity
223
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
224
+
225
+ def for_non_stream():
226
+ # This function implicitly uses the updated for_stream
227
+ for _ in for_stream():
228
+ pass
229
+ return self.last_response
230
+
231
+ return for_stream() if stream else for_non_stream()
232
+
233
+ def chat(
234
+ self,
235
+ prompt: str,
236
+ stream: bool = False,
237
+ optimizer: str = None,
238
+ conversationally: bool = False,
239
+ ) -> str:
240
+ """
241
+ Generates a response from the ChatSandbox API.
242
+
243
+ Args:
244
+ prompt (str): The prompt to send to the API.
245
+ stream (bool): Whether to stream the response.
246
+ optimizer (str): Optimizer to use for the prompt.
247
+ conversationally (bool): Whether to generate the prompt conversationally.
248
+
249
+ Returns:
250
+ str: The API response.
251
+
252
+ Examples:
253
+ >>> ai = ChatSandbox()
254
+ >>> response = ai.chat("What's the weather today?")
255
+ >>> print(response)
256
+ 'I don't have real-time weather data, but I can help you find weather information online.'
257
+ """
258
+ def for_stream():
259
+ for response in self.ask(
260
+ prompt,
261
+ stream=True,
262
+ raw=False,
263
+ optimizer=optimizer,
264
+ conversationally=conversationally,
265
+ ):
266
+ yield response.get("text", "")
267
+
268
+ if stream:
269
+ return for_stream()
270
+ else:
271
+ return self.get_message(
272
+ self.ask(
273
+ prompt,
274
+ stream=False,
275
+ raw=False,
276
+ optimizer=optimizer,
277
+ conversationally=conversationally,
278
+ )
279
+ )
280
+
281
+ def get_message(self, response: Dict[str, Any]) -> str:
282
+ """
283
+ Extract the message from the API response.
284
+
285
+ Args:
286
+ response (Dict[str, Any]): The API response.
287
+
288
+ Returns:
289
+ str: The extracted message.
290
+ """
291
+ if not isinstance(response, dict):
292
+ return str(response)
293
+
294
+ raw_text = response.get("text", "")
295
+
296
+ # Try to parse as JSON
297
+ try:
298
+ data = json.loads(raw_text)
299
+ if isinstance(data, dict):
300
+ # Check for different response formats
301
+ if "reasoning_content" in data:
302
+ return data["reasoning_content"]
303
+ elif "content" in data:
304
+ return data["content"]
305
+ elif "message" in data:
306
+ return data["message"]
307
+ elif "response" in data:
308
+ return data["response"]
309
+ elif "text" in data:
310
+ return data["text"]
311
+ # Return the whole JSON if no specific field is found
312
+ return json.dumps(data, ensure_ascii=False)
313
+ except json.JSONDecodeError:
314
+ # If it's not JSON, return the raw text
315
+ pass
316
+
317
+ return raw_text.strip()
318
+
319
+ # --- Example Usage ---
320
+ if __name__ == "__main__":
321
+ from rich import print
322
+ # Ensure curl_cffi is installed
323
+ print("-" * 80)
324
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
325
+ print("-" * 80)
326
+
327
+ for model in ChatSandbox.AVAILABLE_MODELS:
328
+ try:
329
+ test_ai = ChatSandbox(model=model, timeout=60)
330
+ response = test_ai.chat("Say 'Hello' in one word")
331
+ response_text = response
332
+
333
+ if response_text and len(response_text.strip()) > 0:
334
+ status = "✓"
335
+ # Truncate response if too long
336
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
337
+ else:
338
+ status = "✗"
339
+ display_text = "Empty or invalid response"
340
+ print(f"{model:<50} {status:<10} {display_text}")
341
+ except Exception as e:
342
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -1,13 +1,15 @@
1
1
  import json
2
2
  from uuid import uuid4
3
- import webscout
3
+
4
+ import re # Import re
5
+ from curl_cffi import CurlError
4
6
  from webscout.AIutel import Optimizers
5
7
  from webscout.AIutel import Conversation
6
- from webscout.AIutel import AwesomePrompts, sanitize_stream
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
7
9
  from webscout.AIbase import Provider, AsyncProvider
8
10
  from webscout import exceptions
9
- from typing import Union, Any, AsyncGenerator, Dict
10
- import cloudscraper
11
+ from typing import Optional, Union, Any, AsyncGenerator, Dict
12
+ from curl_cffi.requests import Session
11
13
  from webscout.litagent import LitAgent
12
14
 
13
15
  class Cloudflare(Provider):
@@ -96,7 +98,7 @@ class Cloudflare(Provider):
96
98
  if model not in self.AVAILABLE_MODELS:
97
99
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
98
100
 
99
- self.scraper = cloudscraper.create_scraper()
101
+ self.session = Session() # Use curl_cffi Session
100
102
  self.is_conversation = is_conversation
101
103
  self.max_tokens_to_sample = max_tokens
102
104
  self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
@@ -136,7 +138,7 @@ class Cloudflare(Provider):
136
138
  )
137
139
 
138
140
  # Initialize session and apply proxies
139
- self.session = cloudscraper.create_scraper()
141
+ # self.session = cloudscraper.create_scraper() # Replaced above
140
142
  self.session.headers.update(self.headers)
141
143
  self.session.proxies = proxies
142
144
 
@@ -156,6 +158,19 @@ class Cloudflare(Provider):
156
158
  # if self.logger:
157
159
  # self.logger.info("Cloudflare initialized successfully")
158
160
 
161
+ @staticmethod
162
+ def _cloudflare_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
163
+ """Extracts content from Cloudflare stream JSON objects."""
164
+ # Updated for the 0:"..." format
165
+ if isinstance(chunk, str):
166
+ # Use re.search to find the pattern 0:"<content>"
167
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
168
+ if match:
169
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
170
+ content = match.group(1).encode().decode('unicode_escape')
171
+ return content.replace('\\\\', '\\').replace('\\"', '"')
172
+ return None
173
+
159
174
  def ask(
160
175
  self,
161
176
  prompt: str,
@@ -201,37 +216,69 @@ class Cloudflare(Provider):
201
216
 
202
217
  def for_stream():
203
218
  # if self.logger:
204
- # self.logger.debug("Sending streaming request to Cloudflare API...")
205
- response = self.scraper.post(
206
- self.chat_endpoint,
207
- headers=self.headers,
208
- cookies=self.cookies,
209
- data=json.dumps(payload),
210
- stream=True,
211
- timeout=self.timeout
212
- )
213
- if not response.ok:
214
- # if self.logger:
215
- # self.logger.error(f"Request failed: ({response.status_code}, {response.reason})")
216
- raise exceptions.FailedToGenerateResponseError(
217
- f"Failed to generate response - ({response.status_code}, {response.reason})"
219
+ # self.logger.debug("Sending streaming request to Cloudflare API...")
220
+ streaming_text = "" # Initialize outside try block
221
+ try:
222
+ response = self.session.post(
223
+ self.chat_endpoint,
224
+ headers=self.headers,
225
+ cookies=self.cookies,
226
+ data=json.dumps(payload),
227
+ stream=True,
228
+ timeout=self.timeout,
229
+ impersonate="chrome120" # Add impersonate
230
+ )
231
+ response.raise_for_status()
232
+
233
+ # Use sanitize_stream
234
+ processed_stream = sanitize_stream(
235
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
236
+ intro_value=None,
237
+ to_json=False,
238
+ skip_markers=None,
239
+ content_extractor=self._cloudflare_extractor, # Use the specific extractor
240
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
218
241
  )
219
- streaming_response = ""
220
- for line in response.iter_lines(decode_unicode=True):
221
- if line.startswith('data: ') and line != 'data: [DONE]':
222
- data = json.loads(line[6:])
223
- content = data.get('response', '')
224
- streaming_response += content
225
- yield content if raw else dict(text=content)
226
- self.last_response.update(dict(text=streaming_response))
242
+
243
+ for content_chunk in processed_stream:
244
+ if content_chunk and isinstance(content_chunk, str):
245
+ streaming_text += content_chunk
246
+ yield content_chunk if raw else dict(text=content_chunk)
247
+
248
+ except CurlError as e:
249
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
250
+ except Exception as e:
251
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
252
+ finally:
253
+ # Update history after stream finishes or fails
254
+ self.last_response.update(dict(text=streaming_text))
255
+ self.conversation.update_chat_history(prompt, streaming_text)
256
+
257
+ def for_non_stream():
258
+ # Aggregate the stream using the updated for_stream logic
259
+ full_text = ""
260
+ last_response_dict = {}
227
261
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
228
262
  # if self.logger:
229
263
  # self.logger.info("Streaming response completed successfully")
264
+ try:
265
+ # Ensure raw=False so for_stream yields dicts
266
+ for chunk_data in for_stream():
267
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
268
+ full_text += chunk_data["text"]
269
+ last_response_dict = {"text": full_text} # Keep track of last dict structure
270
+ # Handle raw string case if raw=True was passed
271
+ elif raw and isinstance(chunk_data, str):
272
+ full_text += chunk_data
273
+ last_response_dict = {"text": full_text} # Update dict even for raw
274
+ except Exception as e:
275
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
276
+ if not full_text:
277
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
230
278
 
231
- def for_non_stream():
232
- for _ in for_stream():
233
- pass
234
- return self.last_response
279
+ # last_response and history are updated within for_stream's finally block
280
+ # Return the final aggregated response dict or raw text
281
+ return full_text if raw else last_response_dict
235
282
 
236
283
  return for_stream() if stream else for_non_stream()
237
284