webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -0,0 +1,315 @@
1
+ import json
2
+ import uuid
3
+ from typing import Any, Dict, Generator, Union
4
+
5
+ # Use curl_cffi for requests
6
+ from curl_cffi.requests import Session
7
+ from curl_cffi import CurlError
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIbase import Provider
13
+ from webscout import exceptions
14
+ from webscout.litagent import LitAgent
15
+
16
+ class MCPCore(Provider):
17
+ """
18
+ A class to interact with the chat.mcpcore.xyz API.
19
+ Supports streaming responses.
20
+ """
21
+
22
+ # Add more models if known, starting with the one from the example
23
+ AVAILABLE_MODELS = [
24
+ "google/gemma-7b-it",
25
+ "deepseek-ai/deepseek-coder-33b-instruct",
26
+ "deepseek-ai/DeepSeek-R1",
27
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
28
+ "deepseek-ai/DeepSeek-v3-0324",
29
+ "fixie-ai/ultravox-v0_4_1-llama-3_1-8b",
30
+ "meta-llama/Llama-3.3-70B-Instruct",
31
+ "meta-llama/Llama-4-Maverick-Instruct",
32
+ "mistralai/Mistral-7B-Instruct-v0.2",
33
+ "qwen-max-latest",
34
+ "qwen-plus-latest",
35
+ "qwen2.5-coder-32b-instruct",
36
+ "qwen-turbo-latest",
37
+ "qwen2.5-14b-instruct-1m",
38
+ "GLM-4-32B",
39
+ "Z1-32B",
40
+ "Z1-Rumination",
41
+ "arena-model",
42
+ "qvq-72b-preview-0310",
43
+ "qwq-32b",
44
+ "qwen3-235b-a22b",
45
+ "qwen3-30b-a3b",
46
+ "qwen3-32b",
47
+ "deepseek-flash",
48
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
49
+ "任务专用",
50
+ ]
51
+
52
+ def __init__(
53
+ self,
54
+ cookies_path: str,
55
+ is_conversation: bool = True,
56
+ max_tokens: int = 2048,
57
+ timeout: int = 60,
58
+ intro: str = None,
59
+ filepath: str = None,
60
+ update_file: bool = True,
61
+ proxies: dict = {},
62
+ history_offset: int = 10250,
63
+ act: str = None,
64
+ model: str = "qwen3-32b",
65
+ system_prompt: str = "You are a helpful assistant.",
66
+ ):
67
+ """Initializes the MCPCore API client."""
68
+ if model not in self.AVAILABLE_MODELS:
69
+ print(f"Warning: Model '{model}' not in known AVAILABLE_MODELS. Attempting to use anyway.")
70
+
71
+ self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
72
+ self.model = model
73
+ self.system_prompt = system_prompt
74
+ self.cookies_path = cookies_path
75
+ self.cookie_string, self.token = self._load_cookies()
76
+
77
+ # Initialize curl_cffi Session
78
+ self.session = Session()
79
+
80
+ # Set up headers based on the provided request
81
+ self.headers = {
82
+ 'authority': 'chat.mcpcore.xyz',
83
+ 'accept': '*/*',
84
+ 'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
85
+ 'authorization': f'Bearer {self.token}' if self.token else '',
86
+ 'content-type': 'application/json',
87
+ 'dnt': '1',
88
+ 'origin': 'https://chat.mcpcore.xyz',
89
+ 'referer': 'https://chat.mcpcore.xyz/',
90
+ 'priority': 'u=1, i',
91
+ 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
92
+ 'sec-ch-ua-mobile': '?0',
93
+ 'sec-ch-ua-platform': '"Windows"',
94
+ 'sec-fetch-dest': 'empty',
95
+ 'sec-fetch-mode': 'cors',
96
+ 'sec-fetch-site': 'same-origin',
97
+ 'sec-gpc': '1',
98
+ 'user-agent': LitAgent().random(),
99
+ }
100
+
101
+ # Apply headers, proxies, and cookies to the session
102
+ self.session.headers.update(self.headers)
103
+ self.session.proxies = proxies
104
+ self.cookies = {
105
+ 'token': self.token,
106
+ }
107
+ for name, value in self.cookies.items():
108
+ self.session.cookies.set(name, value, domain="chat.mcpcore.xyz")
109
+
110
+ # Provider settings
111
+ self.is_conversation = is_conversation
112
+ self.max_tokens_to_sample = max_tokens
113
+ self.timeout = timeout
114
+ self.last_response = {}
115
+
116
+ # Initialize optimizers
117
+ self.__available_optimizers = (
118
+ method
119
+ for method in dir(Optimizers)
120
+ if callable(getattr(Optimizers, method))
121
+ and not method.startswith("__")
122
+ )
123
+ Conversation.intro = (
124
+ AwesomePrompts().get_act(
125
+ act, raise_not_found=True, default=None, case_insensitive=True
126
+ )
127
+ if act
128
+ else intro or Conversation.intro
129
+ )
130
+ self.conversation = Conversation(
131
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
132
+ )
133
+ self.conversation.history_offset = history_offset
134
+
135
+ def _load_cookies(self) -> tuple[str, str]:
136
+ """Load cookies from a JSON file and build a cookie header string."""
137
+ try:
138
+ with open(self.cookies_path, "r") as f:
139
+ cookies = json.load(f)
140
+ cookie_string = "; ".join(
141
+ f"{cookie['name']}={cookie['value']}" for cookie in cookies if 'name' in cookie and 'value' in cookie
142
+ )
143
+ token = next(
144
+ (cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
145
+ "",
146
+ )
147
+ return cookie_string, token
148
+ except FileNotFoundError:
149
+ raise exceptions.FailedToGenerateResponseError(
150
+ f"Error: Cookies file not found at {self.cookies_path}!"
151
+ )
152
+ except json.JSONDecodeError:
153
+ raise exceptions.FailedToGenerateResponseError(
154
+ f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
155
+ )
156
+
157
+ def ask(
158
+ self,
159
+ prompt: str,
160
+ stream: bool = False,
161
+ raw: bool = False,
162
+ optimizer: str = None,
163
+ conversationally: bool = False,
164
+ **kwargs
165
+ ) -> Union[Dict[str, Any], Generator]:
166
+ """Sends a prompt to the MCPCore API and returns the response."""
167
+
168
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
169
+
170
+ if optimizer:
171
+ if optimizer in self.__available_optimizers:
172
+ conversation_prompt = getattr(Optimizers, optimizer)(
173
+ conversation_prompt if conversationally else prompt
174
+ )
175
+ else:
176
+ raise exceptions.InvalidOptimizerError(
177
+ f"Optimizer is not one of {self.__available_optimizers}"
178
+ )
179
+
180
+ chat_id = kwargs.get("chat_id", str(uuid.uuid4()))
181
+ message_id = str(uuid.uuid4())
182
+
183
+ payload = {
184
+ "stream": stream,
185
+ "model": self.model,
186
+ "messages": [
187
+ {"role": "system", "content": self.system_prompt},
188
+ {"role": "user", "content": conversation_prompt}
189
+ ],
190
+ "params": kwargs.get("params", {}),
191
+ "tool_servers": kwargs.get("tool_servers", []),
192
+ "features": kwargs.get("features", {"web_search": False}),
193
+ "chat_id": chat_id,
194
+ "id": message_id,
195
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
196
+ }
197
+
198
+ def for_stream():
199
+ streaming_text = ""
200
+ try:
201
+ response = self.session.post(
202
+ self.api_endpoint,
203
+ json=payload,
204
+ stream=True,
205
+ timeout=self.timeout,
206
+ impersonate="chrome110"
207
+ )
208
+ response.raise_for_status()
209
+
210
+ # Use sanitize_stream
211
+ processed_stream = sanitize_stream(
212
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
213
+ intro_value="data:",
214
+ to_json=True, # Stream sends JSON
215
+ skip_markers=["[DONE]"],
216
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
217
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
218
+ )
219
+
220
+ for content_chunk in processed_stream:
221
+ # content_chunk is the string extracted by the content_extractor
222
+ if content_chunk and isinstance(content_chunk, str):
223
+ streaming_text += content_chunk
224
+ yield dict(text=content_chunk) if not raw else content_chunk
225
+
226
+ self.last_response = {"text": streaming_text}
227
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
228
+
229
+ except CurlError as e:
230
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
231
+ except Exception as e:
232
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
233
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
234
+
235
+ def for_non_stream():
236
+ full_text = ""
237
+ try:
238
+ stream_generator = self.ask(
239
+ prompt, stream=True, raw=False, optimizer=optimizer, conversationally=conversationally, **kwargs
240
+ )
241
+ for chunk_data in stream_generator:
242
+ if isinstance(chunk_data, dict):
243
+ full_text += chunk_data["text"]
244
+ elif isinstance(chunk_data, str):
245
+ full_text += chunk_data
246
+ except Exception as e:
247
+ raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {str(e)}") from e
248
+
249
+ return full_text if raw else self.last_response
250
+
251
+ return for_stream() if stream else for_non_stream()
252
+
253
+ def chat(
254
+ self,
255
+ prompt: str,
256
+ stream: bool = False,
257
+ optimizer: str = None,
258
+ conversationally: bool = False,
259
+ **kwargs
260
+ ) -> Union[str, Generator[str, None, None]]:
261
+ """Generates a response from the MCPCore API."""
262
+
263
+ def for_stream_chat() -> Generator[str, None, None]:
264
+ gen = self.ask(
265
+ prompt, stream=True, raw=False,
266
+ optimizer=optimizer, conversationally=conversationally, **kwargs
267
+ )
268
+ for response_dict in gen:
269
+ yield self.get_message(response_dict)
270
+
271
+ def for_non_stream_chat() -> str:
272
+ response_data = self.ask(
273
+ prompt, stream=False, raw=False,
274
+ optimizer=optimizer, conversationally=conversationally, **kwargs
275
+ )
276
+ return self.get_message(response_data)
277
+
278
+ return for_stream_chat() if stream else for_non_stream_chat()
279
+
280
+ def get_message(self, response: Dict[str, Any]) -> str:
281
+ """Extracts the message from the API response."""
282
+ assert isinstance(response, dict), "Response should be of dict data-type only"
283
+ return response.get("text", "")
284
+
285
+ # Example usage (remember to create a cookies.json file)
286
+ if __name__ == "__main__":
287
+ from rich import print
288
+
289
+ cookies_file_path = "cookies.json"
290
+
291
+ print("-" * 80)
292
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
293
+ print("-" * 80)
294
+
295
+ for model in MCPCore.AVAILABLE_MODELS:
296
+ try:
297
+ test_ai = MCPCore(cookies_path=cookies_file_path, model=model, timeout=60)
298
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
299
+ response_text = ""
300
+ # Accumulate the response text without printing in the loop
301
+ for chunk in response:
302
+ response_text += chunk
303
+
304
+ if response_text and len(response_text.strip()) > 0:
305
+ status = "✓"
306
+ # Truncate response if too long
307
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
308
+ else:
309
+ status = "✗"
310
+ display_text = "Empty or invalid response"
311
+ # Print the final status and response, overwriting the "Testing..." line
312
+ print(f"\r{model:<50} {status:<10} {display_text}")
313
+ except Exception as e:
314
+ # Print error, overwriting the "Testing..." line
315
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,10 +1,11 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Optional, Generator
4
5
 
5
6
  from webscout.AIutel import Optimizers
6
7
  from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
10
11
 
@@ -17,7 +18,7 @@ class Marcus(Provider):
17
18
  def __init__(
18
19
  self,
19
20
  is_conversation: bool = True,
20
- max_tokens: int = 2048,
21
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
21
22
  timeout: int = 30,
22
23
  intro: str = None,
23
24
  filepath: str = None,
@@ -27,7 +28,8 @@ class Marcus(Provider):
27
28
  act: str = None
28
29
  ):
29
30
  """Initializes the Marcus API."""
30
- self.session = requests.Session()
31
+ # Initialize curl_cffi Session
32
+ self.session = Session()
31
33
  self.is_conversation = is_conversation
32
34
  self.max_tokens_to_sample = max_tokens
33
35
  self.api_endpoint = "https://www.askmarcus.app/api/response"
@@ -39,8 +41,11 @@ class Marcus(Provider):
39
41
  'accept': '*/*',
40
42
  'origin': 'https://www.askmarcus.app',
41
43
  'referer': 'https://www.askmarcus.app/chat',
42
- 'user-agent': 'Mozilla/5.0',
43
44
  }
45
+
46
+ # Update curl_cffi session headers and proxies
47
+ self.session.headers.update(self.headers)
48
+ self.session.proxies = proxies # Assign proxies directly
44
49
 
45
50
  self.__available_optimizers = (
46
51
  method
@@ -60,7 +65,6 @@ class Marcus(Provider):
60
65
  is_conversation, self.max_tokens_to_sample, filepath, update_file
61
66
  )
62
67
  self.conversation.history_offset = history_offset
63
- self.session.proxies = proxies
64
68
 
65
69
  def ask(
66
70
  self,
@@ -85,31 +89,79 @@ class Marcus(Provider):
85
89
  data = {"message": conversation_prompt}
86
90
 
87
91
  def for_stream():
92
+ streaming_text = "" # Initialize outside try block
88
93
  try:
89
- with requests.post(
94
+ # Use curl_cffi session post with impersonate
95
+ response = self.session.post(
90
96
  self.api_endpoint,
91
- headers=self.headers,
97
+ # headers are set on the session
92
98
  json=data,
93
99
  stream=True,
94
- timeout=self.timeout
95
- ) as response:
96
- response.raise_for_status()
97
- for line in response.iter_lines():
98
- if line:
99
- yield line.decode('utf-8')
100
- self.conversation.update_chat_history(
101
- prompt, self.get_message(self.last_response)
102
- )
103
-
104
- except requests.exceptions.RequestException as e:
105
- raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
100
+ timeout=self.timeout,
101
+ # proxies are set on the session
102
+ impersonate="chrome110" # Use a common impersonation profile
103
+ )
104
+ response.raise_for_status() # Check for HTTP errors
105
+
106
+ # Use sanitize_stream to decode bytes and yield text chunks
107
+ processed_stream = sanitize_stream(
108
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
109
+ intro_value=None, # No prefix
110
+ to_json=False, # It's plain text
111
+ yield_raw_on_error=True
112
+ )
113
+
114
+ for content_chunk in processed_stream:
115
+ if content_chunk and isinstance(content_chunk, str):
116
+ streaming_text += content_chunk # Aggregate text
117
+ yield {"text": content_chunk} if not raw else content_chunk
118
+ # Update history after stream finishes
119
+ self.last_response = {"text": streaming_text} # Store aggregated text
120
+ self.conversation.update_chat_history(
121
+ prompt, streaming_text
122
+ )
123
+
124
+ except CurlError as e: # Catch CurlError
125
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus (CurlError): {str(e)}") from e
126
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
127
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
128
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus ({type(e).__name__}): {str(e)} - {err_text}") from e
106
129
 
107
130
  def for_non_stream():
108
- full_response = ""
109
- for line in for_stream():
110
- full_response += line
111
- self.last_response = {"text": full_response}
112
- return self.last_response
131
+ try:
132
+ # Use curl_cffi session post with impersonate
133
+ response = self.session.post(
134
+ self.api_endpoint,
135
+ # headers are set on the session
136
+ json=data,
137
+ timeout=self.timeout,
138
+ # proxies are set on the session
139
+ impersonate="chrome110" # Use a common impersonation profile
140
+ )
141
+ response.raise_for_status() # Check for HTTP errors
142
+
143
+ response_text_raw = response.text # Get raw text
144
+
145
+ # Process the text using sanitize_stream (even though it's not streaming)
146
+ processed_stream = sanitize_stream(
147
+ data=response_text_raw,
148
+ intro_value=None, # No prefix
149
+ to_json=False # It's plain text
150
+ )
151
+ # Aggregate the single result
152
+ full_response = "".join(list(processed_stream))
153
+
154
+ self.last_response = {"text": full_response}
155
+ self.conversation.update_chat_history(prompt, full_response)
156
+ # Return dict or raw string
157
+ return full_response if raw else self.last_response
158
+
159
+ except CurlError as e: # Catch CurlError
160
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus (CurlError): {str(e)}") from e
161
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
162
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
163
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus ({type(e).__name__}): {str(e)} - {err_text}") from e
164
+
113
165
 
114
166
  return for_stream() if stream else for_non_stream()
115
167
 
@@ -121,19 +173,24 @@ class Marcus(Provider):
121
173
  conversationally: bool = False,
122
174
  ) -> Union[str, Generator[str, None, None]]:
123
175
  """Generates a response from the AskMarcus API."""
124
- def for_stream():
125
- for response_chunk in self.ask(
126
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
127
- ):
128
- yield response_chunk
129
-
130
- def for_non_stream():
131
- response = self.ask(
132
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
176
+ def for_stream_chat():
177
+ # ask() yields dicts or strings when streaming
178
+ gen = self.ask(
179
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
180
+ optimizer=optimizer, conversationally=conversationally
133
181
  )
134
- return self.get_message(response)
182
+ for response_dict in gen:
183
+ yield self.get_message(response_dict) # get_message expects dict
184
+
185
+ def for_non_stream_chat():
186
+ # ask() returns dict or str when not streaming
187
+ response_data = self.ask(
188
+ prompt, stream=False, raw=False, # Ensure ask returns dict
189
+ optimizer=optimizer, conversationally=conversationally
190
+ )
191
+ return self.get_message(response_data) # get_message expects dict
135
192
 
136
- return for_stream() if stream else for_non_stream()
193
+ return for_stream_chat() if stream else for_non_stream_chat()
137
194
 
138
195
  def get_message(self, response: Dict[str, Any]) -> str:
139
196
  """Extracts the message from the API response."""
@@ -141,8 +198,9 @@ class Marcus(Provider):
141
198
  return response.get("text", "")
142
199
 
143
200
  if __name__ == "__main__":
201
+ # Ensure curl_cffi is installed
144
202
  from rich import print
145
203
  ai = Marcus()
146
- response = ai.chat(input(">>> "), stream=True)
204
+ response = ai.chat("hi", stream=True)
147
205
  for chunk in response:
148
206
  print(chunk, end="", flush=True)