webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,12 +1,12 @@
1
1
  import os
2
2
  import json
3
- from typing import Optional, Union, Generator
3
+ from typing import Any, Dict, Optional, Union, Generator
4
4
  import uuid
5
- import requests
6
- import cloudscraper
5
+ from curl_cffi.requests import Session
6
+ from curl_cffi import CurlError
7
7
 
8
8
  from webscout.AIutel import Optimizers
9
- from webscout.AIutel import Conversation
9
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
10
10
  from webscout.AIutel import AwesomePrompts
11
11
  from webscout.AIbase import Provider
12
12
  from webscout import exceptions
@@ -20,7 +20,7 @@ class LearnFast(Provider):
20
20
  def __init__(
21
21
  self,
22
22
  is_conversation: bool = True,
23
- max_tokens: int = 600,
23
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
24
24
  timeout: int = 30,
25
25
  intro: str = None,
26
26
  filepath: str = None,
@@ -28,12 +28,13 @@ class LearnFast(Provider):
28
28
  proxies: dict = {},
29
29
  history_offset: int = 10250,
30
30
  act: str = None,
31
- system_prompt: str = "You are a helpful AI assistant.",
31
+ system_prompt: str = "You are a helpful AI assistant.", # Note: system_prompt is not used by this API
32
32
  ):
33
33
  """
34
34
  Initializes the LearnFast.ai API with given parameters.
35
35
  """
36
- self.session = cloudscraper.create_scraper()
36
+ # Initialize curl_cffi Session
37
+ self.session = Session()
37
38
  self.is_conversation = is_conversation
38
39
  self.max_tokens_to_sample = max_tokens
39
40
  self.api_endpoint = 'https://autosite.erweima.ai/api/v1/chat'
@@ -44,21 +45,17 @@ class LearnFast(Provider):
44
45
  self.headers = {
45
46
  "authority": "autosite.erweima.ai",
46
47
  "accept": "*/*",
47
- "accept-encoding": "gzip, deflate, br, zstd",
48
48
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
49
49
  "authorization": "", # Always empty
50
50
  "content-type": "application/json",
51
51
  "dnt": "1",
52
52
  "origin": "https://learnfast.ai",
53
- "priority": "u=1, i",
53
+ "priority": "u=1, i", # Keep priority header if needed
54
54
  "referer": "https://learnfast.ai/",
55
- "sec-ch-ua": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
56
- "sec-ch-ua-mobile": "?0",
57
- "sec-ch-ua-platform": '"Windows"',
58
55
  "sec-fetch-dest": "empty",
59
56
  "sec-fetch-mode": "cors",
60
57
  "sec-fetch-site": "cross-site",
61
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
58
+ # uniqueid will be added dynamically in ask()
62
59
  }
63
60
 
64
61
  self.__available_optimizers = (
@@ -66,7 +63,10 @@ class LearnFast(Provider):
66
63
  for method in dir(Optimizers)
67
64
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
65
  )
66
+ # Update curl_cffi session headers and proxies
69
67
  self.session.headers.update(self.headers)
68
+ self.session.proxies = proxies # Assign proxies directly
69
+
70
70
  Conversation.intro = (
71
71
  AwesomePrompts().get_act(
72
72
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -78,7 +78,13 @@ class LearnFast(Provider):
78
78
  is_conversation, self.max_tokens_to_sample, filepath, update_file
79
79
  )
80
80
  self.conversation.history_offset = history_offset
81
- self.session.proxies = proxies
81
+
82
+ @staticmethod
83
+ def _learnfast_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
84
+ """Extracts message content from LearnFast stream JSON objects."""
85
+ if isinstance(chunk, dict) and chunk.get('code') == 200 and chunk.get('data'):
86
+ return chunk['data'].get('message')
87
+ return None
82
88
 
83
89
  def generate_unique_id(self) -> str:
84
90
  """Generate a 32-character hexadecimal unique ID."""
@@ -98,14 +104,21 @@ class LearnFast(Provider):
98
104
  with open(image_path, "rb") as img_file:
99
105
  files = {"file": img_file}
100
106
  try:
101
- response = requests.post("https://0x0.st", files=files)
107
+ response = self.session.post(
108
+ "https://0x0.st",
109
+ files=files,
110
+ # Add impersonate if using the main session
111
+ impersonate="chrome110"
112
+ )
102
113
  response.raise_for_status()
103
114
  image_url = response.text.strip()
104
115
  if not image_url.startswith("http"):
105
116
  raise ValueError("Received an invalid URL from 0x0.st.")
106
117
  return image_url
107
- except requests.exceptions.RequestException as e:
108
- raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
118
+ except CurlError as e: # Catch CurlError
119
+ raise Exception(f"Failed to upload image to 0x0.st (CurlError): {e}") from e
120
+ except Exception as e: # Catch other potential errors
121
+ raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
109
122
 
110
123
  def create_payload(
111
124
  self,
@@ -135,7 +148,7 @@ class LearnFast(Provider):
135
148
  def ask(
136
149
  self,
137
150
  prompt: str,
138
- stream: bool = False,
151
+ stream: bool = False, # API supports streaming
139
152
  raw: bool = False,
140
153
  optimizer: str = None,
141
154
  conversationally: bool = False,
@@ -170,8 +183,9 @@ class LearnFast(Provider):
170
183
  unique_id = self.generate_unique_id()
171
184
  session_id = self.generate_session_id()
172
185
 
173
- # Update headers with the unique ID
174
- self.headers["uniqueid"] = unique_id
186
+ # Update headers with the unique ID for this request
187
+ current_headers = self.headers.copy()
188
+ current_headers["uniqueid"] = unique_id
175
189
 
176
190
  # Upload image and get URL if image_path is provided
177
191
  image_url = None
@@ -187,35 +201,70 @@ class LearnFast(Provider):
187
201
  # Convert the payload to a JSON string
188
202
  data = json.dumps(payload)
189
203
 
190
- try:
191
- # Send the POST request with streaming enabled
192
- response = self.session.post(self.api_endpoint, headers=self.headers, data=data, stream=True, timeout=self.timeout)
193
- response.raise_for_status() # Check for HTTP errors
194
-
195
- # Process the streamed response
196
- full_response = ""
197
- for line in response.iter_lines(decode_unicode=True):
198
- if line:
199
- line = line.strip()
200
- if line == "[DONE]":
201
- break
202
- try:
203
- json_response = json.loads(line)
204
- if json_response.get('code') == 200 and json_response.get('data'):
205
- message = json_response['data'].get('message', '')
206
- if message:
207
- full_response += message
208
- if stream:
209
- yield {"text": message}
210
- except json.JSONDecodeError:
211
- pass
212
- self.last_response.update({"text": full_response})
213
- self.conversation.update_chat_history(prompt, full_response)
214
-
215
- if not stream:
216
- return self.last_response
217
- except requests.exceptions.RequestException as e:
218
- raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
204
+ def for_stream():
205
+ full_response = "" # Initialize outside try block
206
+ try:
207
+ # Use curl_cffi session post with impersonate
208
+ response = self.session.post(
209
+ self.api_endpoint,
210
+ headers=current_headers, # Use headers with uniqueid
211
+ data=data,
212
+ stream=True,
213
+ timeout=self.timeout,
214
+ # proxies are set on the session
215
+ impersonate="chrome110" # Use a common impersonation profile
216
+ )
217
+ response.raise_for_status() # Check for HTTP errors
218
+
219
+ # Use sanitize_stream
220
+ processed_stream = sanitize_stream(
221
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
222
+ intro_value=None, # No prefix
223
+ to_json=True, # Stream sends JSON lines
224
+ skip_markers=["[DONE]"],
225
+ content_extractor=self._learnfast_extractor, # Use the specific extractor
226
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
227
+ )
228
+
229
+ for content_chunk in processed_stream:
230
+ # content_chunk is the string extracted by _learnfast_extractor
231
+ if content_chunk and isinstance(content_chunk, str):
232
+ full_response += content_chunk
233
+ resp = {"text": content_chunk}
234
+ yield resp if not raw else content_chunk
235
+
236
+ # Update history after stream finishes
237
+ self.last_response = {"text": full_response}
238
+ self.conversation.update_chat_history(prompt, full_response)
239
+
240
+ except CurlError as e: # Catch CurlError
241
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred (CurlError): {e}") from e
242
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
243
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
244
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred ({type(e).__name__}): {e} - {err_text}") from e
245
+
246
+ def for_non_stream():
247
+ # Aggregate the stream using the updated for_stream logic
248
+ full_response_text = ""
249
+ try:
250
+ # Ensure raw=False so for_stream yields dicts
251
+ for chunk_data in for_stream():
252
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
253
+ full_response_text += chunk_data["text"]
254
+ # Handle raw string case if raw=True was passed
255
+ elif raw and isinstance(chunk_data, str):
256
+ full_response_text += chunk_data
257
+ except Exception as e:
258
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
259
+ if not full_response_text:
260
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
261
+
262
+ # last_response and history are updated within for_stream
263
+ # Return the final aggregated response dict or raw string
264
+ return full_response_text if raw else self.last_response
265
+
266
+
267
+ return for_stream() if stream else for_non_stream()
219
268
 
220
269
  def chat(
221
270
  self,
@@ -237,14 +286,23 @@ class LearnFast(Provider):
237
286
  Union[str, Generator[str, None, None]]: Response generated
238
287
  """
239
288
  try:
240
- response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
289
+ # ask() yields dicts or strings when streaming
290
+ response_gen = self.ask(
291
+ prompt, stream=stream, raw=False, # Ensure ask yields dicts/dict
292
+ optimizer=optimizer, conversationally=conversationally,
293
+ image_path=image_path
294
+ )
241
295
  if stream:
242
- for chunk in response:
243
- yield chunk["text"]
296
+ def stream_wrapper():
297
+ for chunk_dict in response_gen:
298
+ yield self.get_message(chunk_dict) # get_message expects dict
299
+ return stream_wrapper()
244
300
  else:
245
- return str(response)
301
+ # response_gen is the final dict in non-stream mode
302
+ return self.get_message(response_gen) # get_message expects dict
246
303
  except Exception as e:
247
- return f"Error: {str(e)}"
304
+ # Return error message directly, consider raising instead for better error handling upstream
305
+ return f"Error: {str(e)}"
248
306
 
249
307
  def get_message(self, response: dict) -> str:
250
308
  """Retrieves message only from response
@@ -259,6 +317,7 @@ class LearnFast(Provider):
259
317
  return response["text"]
260
318
 
261
319
  if __name__ == "__main__":
320
+ # Ensure curl_cffi is installed
262
321
  from rich import print
263
322
  ai = LearnFast()
264
323
  response = ai.chat(input(">>> "), stream=True)
@@ -1,13 +1,13 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
- import re
4
- from typing import Union, Any, Dict, Optional, Generator
4
+ from typing import Union, Any, Dict, Generator
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
- from webscout.litagent import LitAgent as Lit
10
+
11
11
 
12
12
  class Llama3Mitril(Provider):
13
13
  """
@@ -29,7 +29,7 @@ class Llama3Mitril(Provider):
29
29
  temperature: float = 0.8,
30
30
  ):
31
31
  """Initializes the Llama3Mitril API."""
32
- self.session = requests.Session()
32
+ self.session = Session()
33
33
  self.is_conversation = is_conversation
34
34
  self.max_tokens = max_tokens
35
35
  self.temperature = temperature
@@ -40,7 +40,6 @@ class Llama3Mitril(Provider):
40
40
  self.headers = {
41
41
  "Content-Type": "application/json",
42
42
  "DNT": "1",
43
- "User-Agent": Lit().random(),
44
43
  }
45
44
  self.__available_optimizers = (
46
45
  method
@@ -58,6 +57,8 @@ class Llama3Mitril(Provider):
58
57
  is_conversation, self.max_tokens, filepath, update_file
59
58
  )
60
59
  self.conversation.history_offset = history_offset
60
+ # Update curl_cffi session headers and proxies
61
+ self.session.headers.update(self.headers)
61
62
  self.session.proxies = proxies
62
63
 
63
64
  def _format_prompt(self, prompt: str) -> str:
@@ -73,7 +74,7 @@ class Llama3Mitril(Provider):
73
74
  def ask(
74
75
  self,
75
76
  prompt: str,
76
- stream: bool = True,
77
+ stream: bool = True, # API supports streaming
77
78
  raw: bool = False,
78
79
  optimizer: str = None,
79
80
  conversationally: bool = False,
@@ -100,66 +101,99 @@ class Llama3Mitril(Provider):
100
101
  }
101
102
 
102
103
  def for_stream():
103
- response = self.session.post(
104
- self.api_endpoint,
105
- headers=self.headers,
106
- json=data,
107
- stream=True,
108
- timeout=self.timeout
109
- )
110
- if not response.ok:
111
- raise exceptions.FailedToGenerateResponseError(
112
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
104
+ streaming_response = "" # Initialize outside try block
105
+ try:
106
+ # Use curl_cffi session post with impersonate
107
+ response = self.session.post(
108
+ self.api_endpoint,
109
+ # headers are set on the session
110
+ json=data,
111
+ stream=True,
112
+ timeout=self.timeout,
113
+ # proxies are set on the session
114
+ impersonate="chrome110" # Use a common impersonation profile
115
+ )
116
+ response.raise_for_status() # Check for HTTP errors
117
+
118
+ # Iterate over bytes and decode manually
119
+ for line_bytes in response.iter_lines():
120
+ if line_bytes:
121
+ try:
122
+ line = line_bytes.decode('utf-8')
123
+ if line.startswith('data: '):
124
+ chunk_str = line.split('data: ', 1)[1]
125
+ chunk = json.loads(chunk_str)
126
+ if token_text := chunk.get('token', {}).get('text'):
127
+ if '<|eot_id|>' not in token_text:
128
+ streaming_response += token_text
129
+ resp = {"text": token_text}
130
+ # Yield dict or raw string chunk
131
+ yield resp if not raw else token_text
132
+ except (json.JSONDecodeError, IndexError, UnicodeDecodeError) as e:
133
+ # Ignore errors in parsing specific lines
134
+ continue
135
+
136
+ # Update history after stream finishes
137
+ self.last_response = {"text": streaming_response}
138
+ self.conversation.update_chat_history(
139
+ prompt, streaming_response
113
140
  )
114
141
 
115
- streaming_response = ""
116
- for line in response.iter_lines(decode_unicode=True):
117
- if line:
118
- try:
119
- chunk = json.loads(line.split('data: ')[1])
120
- if token_text := chunk.get('token', {}).get('text'):
121
- if '<|eot_id|>' not in token_text:
122
- streaming_response += token_text
123
- yield token_text if raw else {"text": token_text}
124
- except (json.JSONDecodeError, IndexError) as e:
125
- continue
126
-
127
- self.last_response.update({"text": streaming_response})
128
- self.conversation.update_chat_history(
129
- prompt, self.get_message(self.last_response)
130
- )
142
+ except CurlError as e: # Catch CurlError
143
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
144
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
145
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
146
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
131
147
 
132
148
  def for_non_stream():
133
- full_response = ""
134
- for chunk in for_stream():
135
- full_response += chunk if raw else chunk['text']
136
- return {"text": full_response}
149
+ # Aggregate the stream using the updated for_stream logic
150
+ full_response_text = ""
151
+ try:
152
+ # Ensure raw=False so for_stream yields dicts
153
+ for chunk_data in for_stream():
154
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
155
+ full_response_text += chunk_data["text"]
156
+ # Handle raw string case if raw=True was passed
157
+ elif raw and isinstance(chunk_data, str):
158
+ full_response_text += chunk_data
159
+ except Exception as e:
160
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
161
+ if not full_response_text:
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
163
+
164
+ # last_response and history are updated within for_stream
165
+ # Return the final aggregated response dict or raw string
166
+ return full_response_text if raw else self.last_response
137
167
 
138
168
  return for_stream() if stream else for_non_stream()
139
169
 
140
170
  def chat(
141
171
  self,
142
172
  prompt: str,
143
- stream: bool = True,
173
+ stream: bool = True, # Default to True as API supports it
144
174
  optimizer: str = None,
145
175
  conversationally: bool = False,
146
176
  ) -> Union[str, Generator[str, None, None]]:
147
177
  """Generates a response from the Llama3 Mitril API."""
148
178
 
149
- def for_stream():
150
- for response in self.ask(
151
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
152
- ):
153
- yield self.get_message(response)
154
-
155
- def for_non_stream():
156
- return self.get_message(
157
- self.ask(
158
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
159
- )
179
+ def for_stream_chat():
180
+ # ask() yields dicts or strings when streaming
181
+ gen = self.ask(
182
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
183
+ optimizer=optimizer, conversationally=conversationally
160
184
  )
185
+ for response_dict in gen:
186
+ yield self.get_message(response_dict) # get_message expects dict
187
+
188
+ def for_non_stream_chat():
189
+ # ask() returns dict or str when not streaming
190
+ response_data = self.ask(
191
+ prompt, stream=False, raw=False, # Ensure ask returns dict
192
+ optimizer=optimizer, conversationally=conversationally
193
+ )
194
+ return self.get_message(response_data) # get_message expects dict
161
195
 
162
- return for_stream() if stream else for_non_stream()
196
+ return for_stream_chat() if stream else for_non_stream_chat()
163
197
 
164
198
  def get_message(self, response: Dict[str, Any]) -> str:
165
199
  """Extracts the message from the API response."""
@@ -168,8 +202,9 @@ class Llama3Mitril(Provider):
168
202
 
169
203
 
170
204
  if __name__ == "__main__":
205
+ # Ensure curl_cffi is installed
171
206
  from rich import print
172
-
207
+
173
208
  ai = Llama3Mitril(
174
209
  max_tokens=2048,
175
210
  temperature=0.8,