webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,318 @@
1
+ import json
2
+ import uuid
3
+ from typing import Any, Dict, Generator, Union
4
+
5
+ # Use curl_cffi for requests
6
+ from curl_cffi.requests import Session
7
+ from curl_cffi import CurlError
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts
12
+ from webscout.AIbase import Provider
13
+ from webscout import exceptions
14
+ from webscout.litagent import LitAgent
15
+
16
+ class MCPCore(Provider):
17
+ """
18
+ A class to interact with the chat.mcpcore.xyz API.
19
+ Supports streaming responses.
20
+ """
21
+
22
+ # Add more models if known, starting with the one from the example
23
+ AVAILABLE_MODELS = [
24
+ "google/gemma-7b-it",
25
+ "deepseek-ai/deepseek-coder-33b-instruct",
26
+ "deepseek-ai/DeepSeek-R1",
27
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
28
+ "deepseek-ai/DeepSeek-v3-0324",
29
+ "fixie-ai/ultravox-v0_4_1-llama-3_1-8b",
30
+ "meta-llama/Llama-3.3-70B-Instruct",
31
+ "meta-llama/Llama-4-Maverick-Instruct",
32
+ "mistralai/Mistral-7B-Instruct-v0.2",
33
+ "qwen-max-latest",
34
+ "qwen-plus-latest",
35
+ "qwen2.5-coder-32b-instruct",
36
+ "qwen-turbo-latest",
37
+ "qwen2.5-14b-instruct-1m",
38
+ "GLM-4-32B",
39
+ "Z1-32B",
40
+ "Z1-Rumination",
41
+ "arena-model",
42
+ "qvq-72b-preview-0310",
43
+ "qwq-32b",
44
+ "qwen3-235b-a22b",
45
+ "qwen3-30b-a3b",
46
+ "qwen3-32b",
47
+ "deepseek-flash",
48
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
49
+ "任务专用",
50
+ ]
51
+
52
+ def __init__(
53
+ self,
54
+ cookies_path: str,
55
+ is_conversation: bool = True,
56
+ max_tokens: int = 2048,
57
+ timeout: int = 60,
58
+ intro: str = None,
59
+ filepath: str = None,
60
+ update_file: bool = True,
61
+ proxies: dict = {},
62
+ history_offset: int = 10250,
63
+ act: str = None,
64
+ model: str = "qwen3-32b",
65
+ system_prompt: str = "You are a helpful assistant.",
66
+ ):
67
+ """Initializes the MCPCore API client."""
68
+ if model not in self.AVAILABLE_MODELS:
69
+ print(f"Warning: Model '{model}' not in known AVAILABLE_MODELS. Attempting to use anyway.")
70
+
71
+ self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
72
+ self.model = model
73
+ self.system_prompt = system_prompt
74
+ self.cookies_path = cookies_path
75
+ self.cookie_string, self.token = self._load_cookies()
76
+
77
+ # Initialize curl_cffi Session
78
+ self.session = Session()
79
+
80
+ # Set up headers based on the provided request
81
+ self.headers = {
82
+ 'authority': 'chat.mcpcore.xyz',
83
+ 'accept': '*/*',
84
+ 'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
85
+ 'authorization': f'Bearer {self.token}' if self.token else '',
86
+ 'content-type': 'application/json',
87
+ 'dnt': '1',
88
+ 'origin': 'https://chat.mcpcore.xyz',
89
+ 'referer': 'https://chat.mcpcore.xyz/',
90
+ 'priority': 'u=1, i',
91
+ 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
92
+ 'sec-ch-ua-mobile': '?0',
93
+ 'sec-ch-ua-platform': '"Windows"',
94
+ 'sec-fetch-dest': 'empty',
95
+ 'sec-fetch-mode': 'cors',
96
+ 'sec-fetch-site': 'same-origin',
97
+ 'sec-gpc': '1',
98
+ 'user-agent': LitAgent().random(),
99
+ }
100
+
101
+ # Apply headers, proxies, and cookies to the session
102
+ self.session.headers.update(self.headers)
103
+ self.session.proxies = proxies
104
+ self.cookies = {
105
+ 'token': self.token,
106
+ }
107
+ for name, value in self.cookies.items():
108
+ self.session.cookies.set(name, value, domain="chat.mcpcore.xyz")
109
+
110
+ # Provider settings
111
+ self.is_conversation = is_conversation
112
+ self.max_tokens_to_sample = max_tokens
113
+ self.timeout = timeout
114
+ self.last_response = {}
115
+
116
+ # Initialize optimizers
117
+ self.__available_optimizers = (
118
+ method
119
+ for method in dir(Optimizers)
120
+ if callable(getattr(Optimizers, method))
121
+ and not method.startswith("__")
122
+ )
123
+ Conversation.intro = (
124
+ AwesomePrompts().get_act(
125
+ act, raise_not_found=True, default=None, case_insensitive=True
126
+ )
127
+ if act
128
+ else intro or Conversation.intro
129
+ )
130
+ self.conversation = Conversation(
131
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
132
+ )
133
+ self.conversation.history_offset = history_offset
134
+
135
+ def _load_cookies(self) -> tuple[str, str]:
136
+ """Load cookies from a JSON file and build a cookie header string."""
137
+ try:
138
+ with open(self.cookies_path, "r") as f:
139
+ cookies = json.load(f)
140
+ cookie_string = "; ".join(
141
+ f"{cookie['name']}={cookie['value']}" for cookie in cookies if 'name' in cookie and 'value' in cookie
142
+ )
143
+ token = next(
144
+ (cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
145
+ "",
146
+ )
147
+ return cookie_string, token
148
+ except FileNotFoundError:
149
+ raise exceptions.InvalidAuthenticationError(
150
+ f"Error: Cookies file not found at {self.cookies_path}!"
151
+ )
152
+ except json.JSONDecodeError:
153
+ raise exceptions.InvalidAuthenticationError(
154
+ f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
155
+ )
156
+
157
+ def ask(
158
+ self,
159
+ prompt: str,
160
+ stream: bool = False,
161
+ raw: bool = False,
162
+ optimizer: str = None,
163
+ conversationally: bool = False,
164
+ **kwargs
165
+ ) -> Union[Dict[str, Any], Generator]:
166
+ """Sends a prompt to the MCPCore API and returns the response."""
167
+
168
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
169
+
170
+ if optimizer:
171
+ if optimizer in self.__available_optimizers:
172
+ conversation_prompt = getattr(Optimizers, optimizer)(
173
+ conversation_prompt if conversationally else prompt
174
+ )
175
+ else:
176
+ raise exceptions.InvalidOptimizerError(
177
+ f"Optimizer is not one of {self.__available_optimizers}"
178
+ )
179
+
180
+ chat_id = kwargs.get("chat_id", str(uuid.uuid4()))
181
+ message_id = str(uuid.uuid4())
182
+
183
+ payload = {
184
+ "stream": stream,
185
+ "model": self.model,
186
+ "messages": [
187
+ {"role": "system", "content": self.system_prompt},
188
+ {"role": "user", "content": conversation_prompt}
189
+ ],
190
+ "params": kwargs.get("params", {}),
191
+ "tool_servers": kwargs.get("tool_servers", []),
192
+ "features": kwargs.get("features", {"web_search": False}),
193
+ "chat_id": chat_id,
194
+ "id": message_id,
195
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
196
+ }
197
+
198
+ def for_stream():
199
+ streaming_text = ""
200
+ try:
201
+ response = self.session.post(
202
+ self.api_endpoint,
203
+ json=payload,
204
+ stream=True,
205
+ timeout=self.timeout,
206
+ impersonate="chrome110"
207
+ )
208
+ response.raise_for_status()
209
+
210
+ for line_bytes in response.iter_lines():
211
+ if line_bytes:
212
+ try:
213
+ line = line_bytes.decode('utf-8').strip()
214
+ if line.startswith("data: "):
215
+ json_str = line[6:]
216
+ if json_str == "[DONE]":
217
+ break
218
+ json_data = json.loads(json_str)
219
+ if 'choices' in json_data and len(json_data['choices']) > 0:
220
+ delta = json_data['choices'][0].get('delta', {})
221
+ content = delta.get('content')
222
+ if content:
223
+ streaming_text += content
224
+ resp = dict(text=content)
225
+ yield resp if not raw else content
226
+ except (json.JSONDecodeError, UnicodeDecodeError):
227
+ continue
228
+
229
+ self.last_response = {"text": streaming_text}
230
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
231
+
232
+ except CurlError as e:
233
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
234
+ except Exception as e:
235
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
236
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
237
+
238
+ def for_non_stream():
239
+ full_text = ""
240
+ try:
241
+ stream_generator = self.ask(
242
+ prompt, stream=True, raw=False, optimizer=optimizer, conversationally=conversationally, **kwargs
243
+ )
244
+ for chunk_data in stream_generator:
245
+ if isinstance(chunk_data, dict):
246
+ full_text += chunk_data["text"]
247
+ elif isinstance(chunk_data, str):
248
+ full_text += chunk_data
249
+ except Exception as e:
250
+ raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {str(e)}") from e
251
+
252
+ return full_text if raw else self.last_response
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def chat(
257
+ self,
258
+ prompt: str,
259
+ stream: bool = False,
260
+ optimizer: str = None,
261
+ conversationally: bool = False,
262
+ **kwargs
263
+ ) -> Union[str, Generator[str, None, None]]:
264
+ """Generates a response from the MCPCore API."""
265
+
266
+ def for_stream_chat() -> Generator[str, None, None]:
267
+ gen = self.ask(
268
+ prompt, stream=True, raw=False,
269
+ optimizer=optimizer, conversationally=conversationally, **kwargs
270
+ )
271
+ for response_dict in gen:
272
+ yield self.get_message(response_dict)
273
+
274
+ def for_non_stream_chat() -> str:
275
+ response_data = self.ask(
276
+ prompt, stream=False, raw=False,
277
+ optimizer=optimizer, conversationally=conversationally, **kwargs
278
+ )
279
+ return self.get_message(response_data)
280
+
281
+ return for_stream_chat() if stream else for_non_stream_chat()
282
+
283
+ def get_message(self, response: Dict[str, Any]) -> str:
284
+ """Extracts the message from the API response."""
285
+ assert isinstance(response, dict), "Response should be of dict data-type only"
286
+ return response.get("text", "")
287
+
288
+ # Example usage (remember to create a cookies.json file)
289
+ if __name__ == "__main__":
290
+ from rich import print
291
+
292
+ cookies_file_path = "cookies.json"
293
+
294
+ print("-" * 80)
295
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
296
+ print("-" * 80)
297
+
298
+ for model in MCPCore.AVAILABLE_MODELS:
299
+ try:
300
+ test_ai = MCPCore(cookies_path=cookies_file_path, model=model, timeout=60)
301
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
302
+ response_text = ""
303
+ # Accumulate the response text without printing in the loop
304
+ for chunk in response:
305
+ response_text += chunk
306
+
307
+ if response_text and len(response_text.strip()) > 0:
308
+ status = "✓"
309
+ # Truncate response if too long
310
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
311
+ else:
312
+ status = "✗"
313
+ display_text = "Empty or invalid response"
314
+ # Print the final status and response, overwriting the "Testing..." line
315
+ print(f"\r{model:<50} {status:<10} {display_text}")
316
+ except Exception as e:
317
+ # Print error, overwriting the "Testing..." line
318
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Optional, Generator
4
5
 
@@ -17,7 +18,7 @@ class Marcus(Provider):
17
18
  def __init__(
18
19
  self,
19
20
  is_conversation: bool = True,
20
- max_tokens: int = 2048,
21
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
21
22
  timeout: int = 30,
22
23
  intro: str = None,
23
24
  filepath: str = None,
@@ -27,7 +28,8 @@ class Marcus(Provider):
27
28
  act: str = None
28
29
  ):
29
30
  """Initializes the Marcus API."""
30
- self.session = requests.Session()
31
+ # Initialize curl_cffi Session
32
+ self.session = Session()
31
33
  self.is_conversation = is_conversation
32
34
  self.max_tokens_to_sample = max_tokens
33
35
  self.api_endpoint = "https://www.askmarcus.app/api/response"
@@ -39,8 +41,11 @@ class Marcus(Provider):
39
41
  'accept': '*/*',
40
42
  'origin': 'https://www.askmarcus.app',
41
43
  'referer': 'https://www.askmarcus.app/chat',
42
- 'user-agent': 'Mozilla/5.0',
43
44
  }
45
+
46
+ # Update curl_cffi session headers and proxies
47
+ self.session.headers.update(self.headers)
48
+ self.session.proxies = proxies # Assign proxies directly
44
49
 
45
50
  self.__available_optimizers = (
46
51
  method
@@ -60,7 +65,6 @@ class Marcus(Provider):
60
65
  is_conversation, self.max_tokens_to_sample, filepath, update_file
61
66
  )
62
67
  self.conversation.history_offset = history_offset
63
- self.session.proxies = proxies
64
68
 
65
69
  def ask(
66
70
  self,
@@ -85,31 +89,70 @@ class Marcus(Provider):
85
89
  data = {"message": conversation_prompt}
86
90
 
87
91
  def for_stream():
92
+ streaming_text = "" # Initialize outside try block
88
93
  try:
89
- with requests.post(
94
+ # Use curl_cffi session post with impersonate
95
+ response = self.session.post(
90
96
  self.api_endpoint,
91
- headers=self.headers,
97
+ # headers are set on the session
92
98
  json=data,
93
99
  stream=True,
94
- timeout=self.timeout
95
- ) as response:
96
- response.raise_for_status()
97
- for line in response.iter_lines():
98
- if line:
99
- yield line.decode('utf-8')
100
- self.conversation.update_chat_history(
101
- prompt, self.get_message(self.last_response)
102
- )
103
-
104
- except requests.exceptions.RequestException as e:
105
- raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
100
+ timeout=self.timeout,
101
+ # proxies are set on the session
102
+ impersonate="chrome110" # Use a common impersonation profile
103
+ )
104
+ response.raise_for_status() # Check for HTTP errors
105
+
106
+ # Iterate over bytes and decode manually
107
+ for line_bytes in response.iter_lines():
108
+ if line_bytes:
109
+ try:
110
+ decoded_line = line_bytes.decode('utf-8')
111
+ streaming_text += decoded_line # Aggregate text
112
+ resp = {"text": decoded_line}
113
+ # Yield dict or raw string chunk
114
+ yield resp if not raw else decoded_line
115
+ except UnicodeDecodeError:
116
+ continue # Ignore decoding errors
117
+
118
+ # Update history after stream finishes
119
+ self.last_response = {"text": streaming_text} # Store aggregated text
120
+ self.conversation.update_chat_history(
121
+ prompt, streaming_text
122
+ )
123
+
124
+ except CurlError as e: # Catch CurlError
125
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus (CurlError): {str(e)}") from e
126
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
127
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
128
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus ({type(e).__name__}): {str(e)} - {err_text}") from e
106
129
 
107
130
  def for_non_stream():
108
- full_response = ""
109
- for line in for_stream():
110
- full_response += line
111
- self.last_response = {"text": full_response}
112
- return self.last_response
131
+ try:
132
+ # Use curl_cffi session post with impersonate
133
+ response = self.session.post(
134
+ self.api_endpoint,
135
+ # headers are set on the session
136
+ json=data,
137
+ timeout=self.timeout,
138
+ # proxies are set on the session
139
+ impersonate="chrome110" # Use a common impersonation profile
140
+ )
141
+ response.raise_for_status() # Check for HTTP errors
142
+
143
+ # Use response.text which is already decoded
144
+ full_response = response.text
145
+ self.last_response = {"text": full_response}
146
+ self.conversation.update_chat_history(prompt, full_response)
147
+ # Return dict or raw string
148
+ return full_response if raw else self.last_response
149
+
150
+ except CurlError as e: # Catch CurlError
151
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus (CurlError): {str(e)}") from e
152
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
153
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
154
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus ({type(e).__name__}): {str(e)} - {err_text}") from e
155
+
113
156
 
114
157
  return for_stream() if stream else for_non_stream()
115
158
 
@@ -121,19 +164,24 @@ class Marcus(Provider):
121
164
  conversationally: bool = False,
122
165
  ) -> Union[str, Generator[str, None, None]]:
123
166
  """Generates a response from the AskMarcus API."""
124
- def for_stream():
125
- for response_chunk in self.ask(
126
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
127
- ):
128
- yield response_chunk
129
-
130
- def for_non_stream():
131
- response = self.ask(
132
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
167
+ def for_stream_chat():
168
+ # ask() yields dicts or strings when streaming
169
+ gen = self.ask(
170
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
171
+ optimizer=optimizer, conversationally=conversationally
172
+ )
173
+ for response_dict in gen:
174
+ yield self.get_message(response_dict) # get_message expects dict
175
+
176
+ def for_non_stream_chat():
177
+ # ask() returns dict or str when not streaming
178
+ response_data = self.ask(
179
+ prompt, stream=False, raw=False, # Ensure ask returns dict
180
+ optimizer=optimizer, conversationally=conversationally
133
181
  )
134
- return self.get_message(response)
182
+ return self.get_message(response_data) # get_message expects dict
135
183
 
136
- return for_stream() if stream else for_non_stream()
184
+ return for_stream_chat() if stream else for_non_stream_chat()
137
185
 
138
186
  def get_message(self, response: Dict[str, Any]) -> str:
139
187
  """Extracts the message from the API response."""
@@ -141,8 +189,9 @@ class Marcus(Provider):
141
189
  return response.get("text", "")
142
190
 
143
191
  if __name__ == "__main__":
192
+ # Ensure curl_cffi is installed
144
193
  from rich import print
145
194
  ai = Marcus()
146
- response = ai.chat(input(">>> "), stream=True)
195
+ response = ai.chat("hi", stream=True)
147
196
  for chunk in response:
148
197
  print(chunk, end="", flush=True)