webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,10 +1,11 @@
1
1
 
2
2
  import re
3
- import requests
3
+ import curl_cffi
4
+ from curl_cffi.requests import Session
4
5
  import json
5
6
  import os
6
7
  from typing import Any, Dict, Optional, Generator, List, Union
7
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
10
11
  from webscout.litagent import LitAgent as UserAgent
@@ -17,7 +18,9 @@ class Cerebras(Provider):
17
18
  AVAILABLE_MODELS = [
18
19
  "llama3.1-8b",
19
20
  "llama-3.3-70b",
20
- "deepseek-r1-distill-llama-70b"
21
+ "deepseek-r1-distill-llama-70b",
22
+ "llama-4-scout-17b-16e-instruct"
23
+
21
24
  ]
22
25
 
23
26
  def __init__(
@@ -49,6 +52,8 @@ class Cerebras(Provider):
49
52
  self.max_tokens_to_sample = max_tokens
50
53
  self.last_response = {}
51
54
 
55
+ self.session = Session() # Initialize curl_cffi session
56
+
52
57
  # Get API key first
53
58
  try:
54
59
  self.api_key = self.get_demo_api_key(cookie_path)
@@ -74,6 +79,9 @@ class Cerebras(Provider):
74
79
  is_conversation, self.max_tokens_to_sample, filepath, update_file
75
80
  )
76
81
  self.conversation.history_offset = history_offset
82
+
83
+ # Apply proxies to the session
84
+ self.session.proxies = proxies
77
85
 
78
86
  # Rest of the class implementation remains the same...
79
87
  @staticmethod
@@ -88,7 +96,14 @@ class Cerebras(Provider):
88
96
  """Refines the input text by removing surrounding quotes."""
89
97
  return text.strip('"')
90
98
 
91
- def get_demo_api_key(self, cookie_path: str) -> str:
99
+ @staticmethod
100
+ def _cerebras_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
101
+ """Extracts content from Cerebras stream JSON objects."""
102
+ if isinstance(chunk, dict):
103
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
104
+ return None
105
+
106
+ def get_demo_api_key(self, cookie_path: str) -> str: # Keep this using requests or switch to curl_cffi
92
107
  """Retrieves the demo API key using the provided cookie."""
93
108
  try:
94
109
  with open(cookie_path, "r") as file:
@@ -114,17 +129,19 @@ class Cerebras(Provider):
114
129
  }
115
130
 
116
131
  try:
117
- response = requests.post(
132
+ # Use the initialized curl_cffi session
133
+ response = self.session.post(
118
134
  "https://inference.cerebras.ai/api/graphql",
119
135
  cookies=cookies,
120
136
  headers=headers,
121
137
  json=json_data,
122
138
  timeout=self.timeout,
139
+ impersonate="chrome120" # Add impersonate
123
140
  )
124
141
  response.raise_for_status()
125
- api_key = response.json()["data"]["GetMyDemoApiKey"]
142
+ api_key = response.json().get("data", {}).get("GetMyDemoApiKey")
126
143
  return api_key
127
- except requests.exceptions.RequestException as e:
144
+ except curl_cffi.CurlError as e:
128
145
  raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
129
146
  except KeyError:
130
147
  raise exceptions.InvalidResponseError("API key not found in response.")
@@ -144,41 +161,48 @@ class Cerebras(Provider):
144
161
  }
145
162
 
146
163
  try:
147
- response = requests.post(
164
+ # Use the initialized curl_cffi session
165
+ response = self.session.post(
148
166
  "https://api.cerebras.ai/v1/chat/completions",
149
167
  headers=headers,
150
168
  json=payload,
151
169
  stream=stream,
152
- timeout=self.timeout
170
+ timeout=self.timeout,
171
+ impersonate="chrome120" # Add impersonate
153
172
  )
154
173
  response.raise_for_status()
155
174
 
156
175
  if stream:
157
176
  def generate_stream():
158
- for line in response.iter_lines():
159
- if line:
160
- line = line.decode('utf-8')
161
- if line.startswith('data:'):
162
- try:
163
- data = json.loads(line[6:])
164
- if data.get('choices') and data['choices'][0].get('delta', {}).get('content'):
165
- content = data['choices'][0]['delta']['content']
166
- yield content
167
- except json.JSONDecodeError:
168
- continue
177
+ # Use sanitize_stream
178
+ processed_stream = sanitize_stream(
179
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
180
+ intro_value="data:",
181
+ to_json=True, # Stream sends JSON
182
+ content_extractor=self._cerebras_extractor, # Use the specific extractor
183
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
184
+ )
185
+ for content_chunk in processed_stream:
186
+ if content_chunk and isinstance(content_chunk, str):
187
+ yield content_chunk # Yield the extracted text chunk
169
188
 
170
189
  return generate_stream()
171
190
  else:
172
191
  response_json = response.json()
173
- return response_json['choices'][0]['message']['content']
192
+ # Extract content for non-streaming response
193
+ content = response_json.get("choices", [{}])[0].get("message", {}).get("content")
194
+ return content if content else "" # Return empty string if not found
174
195
 
175
- except requests.exceptions.RequestException as e:
196
+ except curl_cffi.CurlError as e:
197
+ raise exceptions.APIConnectionError(f"Request failed (CurlError): {e}") from e
198
+ except Exception as e: # Catch other potential errors
176
199
  raise exceptions.APIConnectionError(f"Request failed: {e}")
177
200
 
178
201
  def ask(
179
202
  self,
180
203
  prompt: str,
181
204
  stream: bool = False,
205
+ raw: bool = False, # Add raw parameter for consistency
182
206
  optimizer: str = None,
183
207
  conversationally: bool = False,
184
208
  ) -> Union[Dict, Generator]:
@@ -199,11 +223,23 @@ class Cerebras(Provider):
199
223
 
200
224
  try:
201
225
  response = self._make_request(messages, stream)
202
- if stream:
203
- return response
204
226
 
205
- self.last_response = response
206
- return response
227
+ if stream:
228
+ # Wrap the generator to yield dicts or raw strings
229
+ def stream_wrapper():
230
+ full_text = ""
231
+ for chunk in response:
232
+ full_text += chunk
233
+ yield chunk if raw else {"text": chunk}
234
+ # Update history after stream finishes
235
+ self.last_response = {"text": full_text}
236
+ self.conversation.update_chat_history(prompt, full_text)
237
+ return stream_wrapper()
238
+ else:
239
+ # Non-streaming response is already the full text string
240
+ self.last_response = {"text": response}
241
+ self.conversation.update_chat_history(prompt, response)
242
+ return self.last_response if not raw else response # Return dict or raw string
207
243
 
208
244
  except Exception as e:
209
245
  raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
@@ -216,14 +252,24 @@ class Cerebras(Provider):
216
252
  conversationally: bool = False,
217
253
  ) -> Union[str, Generator]:
218
254
  """Chat with the model."""
219
- response = self.ask(prompt, stream, optimizer, conversationally)
255
+ # Ask returns a generator for stream=True, dict/str for stream=False
256
+ response_gen_or_dict = self.ask(prompt, stream, raw=False, optimizer=optimizer, conversationally=conversationally)
257
+
220
258
  if stream:
221
- return response
222
- return response
259
+ # Wrap the generator from ask() to get message text
260
+ def stream_wrapper():
261
+ for chunk_dict in response_gen_or_dict:
262
+ yield self.get_message(chunk_dict)
263
+ return stream_wrapper()
264
+ else:
265
+ # Non-streaming response is already a dict
266
+ return self.get_message(response_gen_or_dict)
223
267
 
224
268
  def get_message(self, response: str) -> str:
225
269
  """Retrieves message from response."""
226
- return response
270
+ # Updated to handle dict input from ask()
271
+ assert isinstance(response, dict), "Response should be of dict data-type only for get_message"
272
+ return response.get("text", "")
227
273
 
228
274
 
229
275
  if __name__ == "__main__":
@@ -231,7 +277,7 @@ if __name__ == "__main__":
231
277
 
232
278
  # Example usage
233
279
  cerebras = Cerebras(
234
- cookie_path='cookie.json',
280
+ cookie_path=r'cookies.json',
235
281
  model='llama3.1-8b',
236
282
  system_prompt="You are a helpful AI assistant."
237
283
  )
@@ -1,11 +1,12 @@
1
- import requests
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session
2
3
  import json
3
4
  from typing import Any, Dict, Optional, Generator, List, Union
4
5
  import uuid
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
@@ -29,7 +30,7 @@ class ChatGLM(Provider):
29
30
  plus_model: bool = True,
30
31
  ):
31
32
  """Initializes the ChatGLM API client."""
32
- self.session = requests.Session()
33
+ self.session = Session() # Use curl_cffi Session
33
34
  self.is_conversation = is_conversation
34
35
  self.max_tokens_to_sample = max_tokens
35
36
  self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
@@ -55,7 +56,7 @@ class ChatGLM(Provider):
55
56
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
56
57
  )
57
58
  self.session.headers.update(self.headers)
58
- Conversation.intro = (
59
+ Conversation.intro = ( # type: ignore
59
60
  AwesomePrompts().get_act(
60
61
  act, raise_not_found=True, default=None, case_insensitive=True
61
62
  )
@@ -66,7 +67,16 @@ class ChatGLM(Provider):
66
67
  is_conversation, self.max_tokens_to_sample, filepath, update_file
67
68
  )
68
69
  self.conversation.history_offset = history_offset
69
- self.session.proxies = proxies
70
+ self.session.proxies = proxies # Assign proxies directly
71
+
72
+ @staticmethod
73
+ def _chatglm_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
74
+ """Extracts content from ChatGLM stream JSON objects."""
75
+ if isinstance(chunk, dict):
76
+ parts = chunk.get('parts', [])
77
+ if parts and isinstance(parts[0].get('content'), list) and parts[0]['content']:
78
+ return parts[0]['content'][0].get('text')
79
+ return None
70
80
 
71
81
  def ask(
72
82
  self,
@@ -119,45 +129,45 @@ class ChatGLM(Provider):
119
129
  }
120
130
 
121
131
  def for_stream():
132
+ streaming_text = "" # Initialize outside try block
133
+ last_processed_content = "" # Track the last processed content
122
134
  try:
123
- with self.session.post(
124
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
125
- ) as response:
126
- response.raise_for_status()
127
-
128
- streaming_text = ""
129
- last_processed_content = "" # Track the last processed content
130
- for chunk in response.iter_lines():
131
- if chunk:
132
- decoded_chunk = chunk.decode('utf-8')
133
- if decoded_chunk.startswith('data: '):
134
- try:
135
- json_data = json.loads(decoded_chunk[6:])
136
- parts = json_data.get('parts', [])
137
- if parts:
138
- content = parts[0].get('content', [])
139
- if content:
140
- text = content[0].get('text', '')
141
- new_text = text[len(last_processed_content):]
142
- if new_text: # Check for new content
143
- streaming_text += new_text
144
- last_processed_content = text
145
- yield new_text if raw else dict(text=new_text)
146
- except json.JSONDecodeError:
147
- continue
135
+ response = self.session.post(
136
+ self.api_endpoint, json=payload, stream=True, timeout=self.timeout,
137
+ impersonate="chrome120" # Add impersonate
138
+ )
139
+ response.raise_for_status()
140
+
141
+ # Use sanitize_stream
142
+ processed_stream = sanitize_stream(
143
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
144
+ intro_value="data:",
145
+ to_json=True, # Stream sends JSON
146
+ content_extractor=self._chatglm_extractor, # Use the specific extractor
147
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
148
+ )
148
149
 
150
+ for current_full_text in processed_stream:
151
+ # current_full_text is the full text extracted by _chatglm_extractor
152
+ if current_full_text and isinstance(current_full_text, str):
153
+ new_text = current_full_text[len(last_processed_content):]
154
+ if new_text: # Check for new content
155
+ streaming_text += new_text
156
+ last_processed_content = current_full_text # Update tracker
157
+ yield new_text if raw else dict(text=new_text)
158
+
159
+ except CurlError as e:
160
+ raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
161
+ except Exception as e:
162
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
163
+ finally:
164
+ # Update history after stream finishes or fails
165
+ if streaming_text:
149
166
  self.last_response.update(dict(text=streaming_text))
150
167
  self.conversation.update_chat_history(
151
168
  prompt, self.get_message(self.last_response)
152
169
  )
153
170
 
154
- except requests.exceptions.RequestException as e:
155
- raise exceptions.ProviderConnectionError(f"Request failed: {e}")
156
- except json.JSONDecodeError as e:
157
- raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
158
- except Exception as e:
159
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
160
-
161
171
  def for_non_stream():
162
172
  for _ in for_stream():
163
173
  pass
@@ -2,7 +2,6 @@ import os
2
2
  import json
3
3
  import base64
4
4
  import asyncio
5
- import requests
6
5
  from urllib.parse import quote
7
6
  from typing import Optional, Dict, Any, List, Union, Generator
8
7
 
@@ -287,8 +286,6 @@ class Copilot(Provider):
287
286
  finally:
288
287
  wss.close()
289
288
 
290
- except requests.RequestException as e:
291
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
292
289
  except Exception as e:
293
290
  raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
294
291
 
webscout/Provider/elmo.py CHANGED
@@ -1,9 +1,15 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ from typing import Optional, Union, Any, Dict, Generator
5
+ from webscout import exceptions
2
6
  from webscout.AIutel import Optimizers
3
- from webscout.AIutel import Conversation
7
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
4
8
  from webscout.AIutel import AwesomePrompts
5
9
  from webscout.AIbase import Provider
6
10
  from webscout.litagent import LitAgent
11
+ import re # Import re for the extractor
12
+
7
13
 
8
14
  class Elmo(Provider):
9
15
  """
@@ -13,7 +19,7 @@ class Elmo(Provider):
13
19
  def __init__(
14
20
  self,
15
21
  is_conversation: bool = True,
16
- max_tokens: int = 600,
22
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
17
23
  timeout: int = 30,
18
24
  intro: str = None,
19
25
  filepath: str = None,
@@ -22,7 +28,6 @@ class Elmo(Provider):
22
28
  history_offset: int = 10250,
23
29
  act: str = None,
24
30
  system_prompt: str = "You are a helpful AI assistant. Provide clear, concise, and well-structured information. Organize your responses into paragraphs for better readability.",
25
-
26
31
  ) -> None:
27
32
  """Instantiates Elmo
28
33
 
@@ -37,9 +42,9 @@ class Elmo(Provider):
37
42
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
38
43
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
39
44
  system_prompt (str, optional): System prompt for Elmo. Defaults to the provided string.
40
- web_search (bool, optional): Enables web search mode when True. Defaults to False.
41
45
  """
42
- self.session = requests.Session()
46
+ # Initialize curl_cffi Session
47
+ self.session = Session()
43
48
  self.is_conversation = is_conversation
44
49
  self.max_tokens_to_sample = max_tokens
45
50
  self.api_endpoint = "https://www.elmo.chat/api/v1/prompt"
@@ -49,20 +54,14 @@ class Elmo(Provider):
49
54
  self.system_prompt = system_prompt
50
55
  self.headers = {
51
56
  "accept": "*/*",
52
- "accept-encoding": "gzip, deflate, br, zstd",
53
57
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
54
- "content-length": "763",
55
58
  "content-type": "text/plain;charset=UTF-8",
56
59
  "dnt": "1",
57
60
  "origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
58
61
  "priority": "u=1, i",
59
- "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
60
- "sec-ch-ua-mobile": "?0",
61
- "sec-ch-ua-platform": '"Windows"',
62
62
  "sec-fetch-dest": "empty",
63
63
  "sec-fetch-mode": "cors",
64
64
  "sec-fetch-site": "cross-site",
65
- "user-agent": LitAgent().random(),
66
65
  }
67
66
 
68
67
  self.__available_optimizers = (
@@ -70,7 +69,10 @@ class Elmo(Provider):
70
69
  for method in dir(Optimizers)
71
70
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
71
  )
72
+ # Update curl_cffi session headers and proxies
73
73
  self.session.headers.update(self.headers)
74
+ self.session.proxies = proxies # Assign proxies directly
75
+
74
76
  Conversation.intro = (
75
77
  AwesomePrompts().get_act(
76
78
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -82,16 +84,26 @@ class Elmo(Provider):
82
84
  is_conversation, self.max_tokens_to_sample, filepath, update_file
83
85
  )
84
86
  self.conversation.history_offset = history_offset
85
- self.session.proxies = proxies
87
+
88
+ @staticmethod
89
+ def _elmo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
90
+ """Extracts content from the Elmo stream format '0:"..."'."""
91
+ if isinstance(chunk, str):
92
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
93
+ if match:
94
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
95
+ content = match.group(1).encode().decode('unicode_escape')
96
+ return content.replace('\\\\', '\\').replace('\\"', '"')
97
+ return None
86
98
 
87
99
  def ask(
88
100
  self,
89
101
  prompt: str,
90
- stream: bool = False,
102
+ stream: bool = False, # API supports streaming
91
103
  raw: bool = False,
92
104
  optimizer: str = None,
93
105
  conversationally: bool = False,
94
- ) -> dict:
106
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
95
107
  """Chat with AI
96
108
 
97
109
  Args:
@@ -106,7 +118,7 @@ class Elmo(Provider):
106
118
  {
107
119
  "text" : "How may I assist you today?"
108
120
  }
109
- ```
121
+ ```json
110
122
  """
111
123
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
124
  if optimizer:
@@ -119,7 +131,6 @@ class Elmo(Provider):
119
131
  f"Optimizer is not one of {self.__available_optimizers}"
120
132
  )
121
133
 
122
-
123
134
  payload = {
124
135
  "metadata": {
125
136
  "system": {"language": "en-US"},
@@ -145,36 +156,69 @@ class Elmo(Provider):
145
156
  }
146
157
 
147
158
  def for_stream():
148
- response = self.session.post(
149
- self.api_endpoint,
150
- headers=self.headers,
151
- json=payload,
152
- stream=True,
153
- timeout=self.timeout,
154
- )
155
- if not response.ok:
156
- raise Exception(
157
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
159
+ streaming_text = "" # Initialize outside try block
160
+ try:
161
+ # Use curl_cffi session post with impersonate
162
+ # Note: The API expects 'text/plain' but we send JSON.
163
+ # If this fails, try sending json.dumps(payload) as data with 'Content-Type': 'application/json'
164
+ response = self.session.post(
165
+ self.api_endpoint,
166
+ # headers are set on the session, but content-type might need override if sending JSON
167
+ json=payload, # Sending as JSON
168
+ stream=True,
169
+ timeout=self.timeout,
170
+ impersonate="chrome110" # Use a common impersonation profile
171
+ )
172
+ response.raise_for_status() # Check for HTTP errors
173
+
174
+ # Use sanitize_stream
175
+ processed_stream = sanitize_stream(
176
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
177
+ intro_value=None, # No simple prefix
178
+ to_json=False, # Content is text after extraction
179
+ content_extractor=self._elmo_extractor, # Use the specific extractor
180
+ yield_raw_on_error=True
181
+ )
182
+
183
+ for content_chunk in processed_stream:
184
+ if content_chunk and isinstance(content_chunk, str):
185
+ streaming_text += content_chunk
186
+ resp = dict(text=content_chunk)
187
+ yield resp if not raw else content_chunk
188
+
189
+ except CurlError as e: # Catch CurlError
190
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
191
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
192
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
193
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
194
+ finally:
195
+ # Update history after stream finishes
196
+ self.last_response = dict(text=streaming_text)
197
+ self.conversation.update_chat_history(
198
+ prompt, streaming_text
158
199
  )
159
- full_response = ""
160
- for line in response.iter_lines(decode_unicode=True):
161
- if line:
162
- if line.startswith('0:'):
163
- chunk = line.split(':"')[1].strip('"')
164
- formatted_output = (
165
- chunk.replace("\\n", "\n").replace("\\n\\n", "\n\n")
166
- )
167
- full_response += formatted_output
168
- self.last_response.update(dict(text=full_response))
169
- yield formatted_output if raw else dict(text=formatted_output)
170
- self.conversation.update_chat_history(
171
- prompt, self.get_message(self.last_response)
172
- )
173
200
 
174
201
  def for_non_stream():
175
- for _ in for_stream():
176
- pass
177
- return self.last_response
202
+ # Aggregate the stream using the updated for_stream logic
203
+ collected_text = ""
204
+ try:
205
+ # Ensure raw=False so for_stream yields dicts
206
+ for chunk_data in for_stream():
207
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
208
+ collected_text += chunk_data["text"]
209
+ # Handle raw string case if raw=True was passed
210
+ elif raw and isinstance(chunk_data, str):
211
+ collected_text += chunk_data
212
+ except Exception as e:
213
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
214
+ if not collected_text:
215
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
216
+
217
+ # Update last_response and history *after* aggregation for non-stream
218
+ self.last_response = {"text": collected_text}
219
+ self.conversation.update_chat_history(prompt, collected_text)
220
+ # Return the final aggregated response dict or raw string
221
+ return collected_text if raw else self.last_response
178
222
 
179
223
  return for_stream() if stream else for_non_stream()
180
224
 
@@ -184,7 +228,7 @@ class Elmo(Provider):
184
228
  stream: bool = False,
185
229
  optimizer: str = None,
186
230
  conversationally: bool = False,
187
- ) -> str:
231
+ ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
188
232
  """Generate response `str`
189
233
  Args:
190
234
  prompt (str): Prompt to be send.
@@ -195,23 +239,27 @@ class Elmo(Provider):
195
239
  str: Response generated
196
240
  """
197
241
 
198
- def for_stream():
199
- for response in self.ask(
200
- prompt, True, optimizer=optimizer, conversationally=conversationally
201
- ):
202
- yield self.get_message(response)
242
+ def for_stream_chat(): # Renamed inner function
243
+ # ask() yields dicts or strings when streaming
244
+ gen = self.ask(
245
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
246
+ optimizer=optimizer, conversationally=conversationally
247
+ )
248
+ for response_dict in gen:
249
+ yield self.get_message(response_dict) # get_message expects dict
203
250
 
204
- def for_non_stream():
205
- return self.get_message(
206
- self.ask(
207
- prompt,
208
- False,
209
- optimizer=optimizer,
210
- conversationally=conversationally,
211
- )
251
+ def for_non_stream_chat(): # Renamed inner function
252
+ # ask() returns dict or str when not streaming
253
+ response_data = self.ask(
254
+ prompt,
255
+ stream=False,
256
+ raw=False, # Ensure ask returns dict
257
+ optimizer=optimizer,
258
+ conversationally=conversationally,
212
259
  )
260
+ return self.get_message(response_data) # get_message expects dict
213
261
 
214
- return for_stream() if stream else for_non_stream()
262
+ return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
215
263
 
216
264
  def get_message(self, response: dict) -> str:
217
265
  """Retrieves message only from response
@@ -223,10 +271,11 @@ class Elmo(Provider):
223
271
  str: Message extracted
224
272
  """
225
273
  assert isinstance(response, dict), "Response should be of dict data-type only"
226
- return response["text"]
274
+ return response.get("text", "") # Use .get for safety
227
275
 
228
276
 
229
277
  if __name__ == "__main__":
278
+ # Ensure curl_cffi is installed
230
279
  from rich import print
231
280
  ai = Elmo()
232
281
  response = ai.chat("write a poem about AI", stream=True)