webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,6 +1,7 @@
1
+ import re
1
2
  import requests
2
3
  import json
3
- import time
4
+ import uuid
4
5
  from typing import Any, Dict, Optional, Generator, Union
5
6
 
6
7
  from webscout.AIutel import Optimizers
@@ -27,6 +28,12 @@ class FreeAIChat(Provider):
27
28
  "O3 Mini",
28
29
  "O3 Mini High",
29
30
  "O3 Mini Low",
31
+ "O4 Mini",
32
+ "O4 Mini High",
33
+ "GPT 4.1",
34
+ "o3",
35
+ "GPT 4.1 Mini",
36
+
30
37
 
31
38
  # Anthropic Models
32
39
  "Claude 3.5 haiku",
@@ -74,8 +81,9 @@ class FreeAIChat(Provider):
74
81
 
75
82
  def __init__(
76
83
  self,
84
+ api_key: str,
77
85
  is_conversation: bool = True,
78
- max_tokens: int = 2049,
86
+ max_tokens: int = 150,
79
87
  timeout: int = 30,
80
88
  intro: str = None,
81
89
  filepath: str = None,
@@ -83,8 +91,9 @@ class FreeAIChat(Provider):
83
91
  proxies: dict = {},
84
92
  history_offset: int = 10250,
85
93
  act: str = None,
86
- model: str = "GPT-4o",
94
+ model: str = "GPT 4o",
87
95
  system_prompt: str = "You are a helpful AI assistant.",
96
+ temperature: float = 0.7,
88
97
  ):
89
98
  """Initializes the FreeAIChat API client."""
90
99
  if model not in self.AVAILABLE_MODELS:
@@ -105,11 +114,13 @@ class FreeAIChat(Provider):
105
114
  self.session.proxies.update(proxies)
106
115
 
107
116
  self.is_conversation = is_conversation
108
- self.max_tokens_to_sample = max_tokens
117
+ self.max_tokens = max_tokens
109
118
  self.timeout = timeout
110
119
  self.last_response = {}
111
120
  self.model = model
112
121
  self.system_prompt = system_prompt
122
+ self.temperature = temperature
123
+ self.api_key = api_key
113
124
 
114
125
  self.__available_optimizers = (
115
126
  method
@@ -125,10 +136,21 @@ class FreeAIChat(Provider):
125
136
  )
126
137
 
127
138
  self.conversation = Conversation(
128
- is_conversation, self.max_tokens_to_sample, filepath, update_file
139
+ is_conversation, self.max_tokens, filepath, update_file
129
140
  )
130
141
  self.conversation.history_offset = history_offset
131
142
 
143
+ @staticmethod
144
+ def _extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
145
+ """Extracts content from the x0gpt stream format '0:"..."'."""
146
+ if isinstance(chunk, str):
147
+ match = re.search(r'0:"(.*?)"', chunk)
148
+ if match:
149
+ # Decode potential unicode escapes like \u00e9
150
+ content = match.group(1).encode().decode('unicode_escape')
151
+ return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
152
+ return None
153
+
132
154
  def ask(
133
155
  self,
134
156
  prompt: str,
@@ -146,24 +168,19 @@ class FreeAIChat(Provider):
146
168
  else:
147
169
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
148
170
 
149
- messages = [
150
- {
151
- "id": str(int(time.time() * 1000)),
171
+ payload = {
172
+ "id": str(uuid.uuid4()),
173
+ "messages": [{
152
174
  "role": "user",
153
175
  "content": conversation_prompt,
154
- "model": {
155
- # "id": "14",
156
- "name": self.model,
157
- # "icon": "https://cdn-avatars.huggingface.co/v1/production/uploads/1620805164087-5ec0135ded25d76864d553f1.png",
158
- # "provider": "openAI",
159
- # "contextWindow": 63920
160
- }
161
- }
162
- ]
163
-
164
- payload = {
176
+ "parts": [{"type": "text", "text": conversation_prompt}]
177
+ }],
165
178
  "model": self.model,
166
- "messages": messages
179
+ "config": {
180
+ "temperature": self.temperature,
181
+ "maxTokens": self.max_tokens
182
+ },
183
+ "apiKey": self.api_key
167
184
  }
168
185
 
169
186
  def for_stream():
@@ -174,28 +191,25 @@ class FreeAIChat(Provider):
174
191
  f"Request failed with status code {response.status_code}"
175
192
  )
176
193
 
177
- streaming_text = ""
178
- for line in response.iter_lines(decode_unicode=True):
179
- if line:
180
- line = line.strip()
181
- if line.startswith("data: "):
182
- json_str = line[6:] # Remove "data: " prefix
183
- if json_str == "[DONE]":
184
- break
185
- try:
186
- json_data = json.loads(json_str)
187
- if 'choices' in json_data:
188
- choice = json_data['choices'][0]
189
- if 'delta' in choice and 'content' in choice['delta']:
190
- content = choice['delta']['content']
191
- streaming_text += content
192
- resp = dict(text=content)
193
- yield resp if raw else resp
194
- except json.JSONDecodeError:
195
- pass
196
-
197
- self.conversation.update_chat_history(prompt, streaming_text)
198
-
194
+ streaming_response = ""
195
+ processed_stream = sanitize_stream(
196
+ data=response.iter_lines(decode_unicode=True),
197
+ intro_value=None,
198
+ to_json=False,
199
+ content_extractor=self._extractor,
200
+ skip_markers=None
201
+ )
202
+
203
+ for content_chunk in processed_stream:
204
+ if content_chunk and isinstance(content_chunk, str):
205
+ streaming_response += content_chunk
206
+ yield dict(text=content_chunk) if raw else dict(text=content_chunk)
207
+
208
+ self.last_response.update(dict(text=streaming_response))
209
+ self.conversation.update_chat_history(
210
+ prompt, self.get_message(self.last_response)
211
+ )
212
+
199
213
  except requests.RequestException as e:
200
214
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
201
215
 
@@ -268,4 +282,4 @@ if __name__ == "__main__":
268
282
  display_text = "Empty or invalid response"
269
283
  print(f"\r{model:<50} {status:<10} {display_text}")
270
284
  except Exception as e:
271
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
285
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,9 +1,9 @@
1
1
  from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
- from typing import Union, Any, Dict, Generator
4
+ from typing import Optional, Union, Any, Dict, Generator
5
5
 
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
7
7
  from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
9
  from webscout.litagent import LitAgent as Lit
@@ -77,6 +77,13 @@ class IBMGranite(Provider):
77
77
  self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
78
78
  self.conversation.history_offset = history_offset
79
79
 
80
+ @staticmethod
81
+ def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
+ """Extracts content from IBM Granite stream JSON lists [3, "text"]."""
83
+ if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
84
+ return chunk[1]
85
+ return None
86
+
80
87
  def ask(
81
88
  self,
82
89
  prompt: str,
@@ -127,25 +134,21 @@ class IBMGranite(Provider):
127
134
  )
128
135
  response.raise_for_status() # Check for HTTP errors
129
136
 
130
- # Iterate over bytes and decode manually
131
- for line_bytes in response.iter_lines():
132
- if line_bytes:
133
- try:
134
- line = line_bytes.decode('utf-8')
135
- data = json.loads(line)
136
- # Check the specific format [3, "text_chunk"]
137
- if isinstance(data, list) and len(data) == 2 and data[0] == 3 and isinstance(data[1], str):
138
- content = data[1]
139
- if content: # Ensure content is not None or empty
140
- streaming_text += content
141
- resp = dict(text=content)
142
- # Yield dict or raw string chunk
143
- yield resp if not raw else content
144
- else:
145
- # Skip unrecognized lines/formats
146
- pass
147
- except (json.JSONDecodeError, UnicodeDecodeError):
148
- continue # Ignore lines that are not valid JSON or cannot be decoded
137
+ # Use sanitize_stream
138
+ processed_stream = sanitize_stream(
139
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
140
+ intro_value=None, # No prefix
141
+ to_json=True, # Stream sends JSON lines (which are lists)
142
+ content_extractor=self._granite_extractor, # Use the specific extractor
143
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
144
+ )
145
+
146
+ for content_chunk in processed_stream:
147
+ # content_chunk is the string extracted by _granite_extractor
148
+ if content_chunk and isinstance(content_chunk, str):
149
+ streaming_text += content_chunk
150
+ resp = dict(text=content_chunk)
151
+ yield resp if not raw else content_chunk
149
152
 
150
153
  # Update history after stream finishes
151
154
  self.last_response = dict(text=streaming_text)
@@ -4,7 +4,7 @@ import json
4
4
  from typing import Union, Any, Dict, Generator, Optional
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
7
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
@@ -102,6 +102,13 @@ class NousHermes(Provider):
102
102
  print(f"Warning: Error loading cookies: {e}")
103
103
  return None
104
104
 
105
+ @staticmethod
106
+ def _hermes_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
107
+ """Extracts content from Hermes stream JSON objects."""
108
+ if isinstance(chunk, dict) and chunk.get('type') == 'llm_response':
109
+ return chunk.get('content')
110
+ return None
111
+
105
112
 
106
113
  def ask(
107
114
  self,
@@ -145,36 +152,36 @@ class NousHermes(Provider):
145
152
  "top_p": self.top_p,
146
153
  }
147
154
  def for_stream():
148
- full_response = ""
155
+ streaming_text = "" # Initialize outside try block
149
156
  try:
150
157
  response = self.session.post(
151
158
  self.api_endpoint,
152
159
  json=payload,
153
160
  stream=True,
154
161
  timeout=self.timeout,
155
- impersonate="chrome110"
162
+ impersonate="chrome110" # Keep impersonate
156
163
  )
157
164
  response.raise_for_status()
158
165
 
159
- for line_bytes in response.iter_lines():
160
- if line_bytes:
161
- try:
162
- decoded_line = line_bytes.decode('utf-8')
163
- if decoded_line.startswith('data: '):
164
- data_str = decoded_line.replace('data: ', '', 1)
165
- data = json.loads(data_str)
166
- if data.get('type') == 'llm_response':
167
- content = data.get('content', '')
168
- if content:
169
- full_response += content
170
- resp = dict(text=content)
171
- yield resp if not raw else content
172
- except (json.JSONDecodeError, UnicodeDecodeError):
173
- continue
166
+ # Use sanitize_stream
167
+ processed_stream = sanitize_stream(
168
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
169
+ intro_value="data:",
170
+ to_json=True, # Stream sends JSON
171
+ content_extractor=self._hermes_extractor, # Use the specific extractor
172
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
173
+ )
174
+
175
+ for content_chunk in processed_stream:
176
+ # content_chunk is the string extracted by _hermes_extractor
177
+ if content_chunk and isinstance(content_chunk, str):
178
+ streaming_text += content_chunk
179
+ resp = dict(text=content_chunk)
180
+ yield resp if not raw else content_chunk
174
181
 
175
- self.last_response = dict(text=full_response)
182
+ self.last_response = dict(text=streaming_text) # Use streaming_text
176
183
  self.conversation.update_chat_history(
177
- prompt, full_response
184
+ prompt, streaming_text # Use streaming_text
178
185
  )
179
186
 
180
187
  except CurlError as e:
@@ -1,12 +1,12 @@
1
1
  import os
2
2
  import json
3
- from typing import Optional, Union, Generator
3
+ from typing import Any, Dict, Optional, Union, Generator
4
4
  import uuid
5
5
  from curl_cffi.requests import Session
6
6
  from curl_cffi import CurlError
7
7
 
8
8
  from webscout.AIutel import Optimizers
9
- from webscout.AIutel import Conversation
9
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
10
10
  from webscout.AIutel import AwesomePrompts
11
11
  from webscout.AIbase import Provider
12
12
  from webscout import exceptions
@@ -79,6 +79,13 @@ class LearnFast(Provider):
79
79
  )
80
80
  self.conversation.history_offset = history_offset
81
81
 
82
+ @staticmethod
83
+ def _learnfast_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
84
+ """Extracts message content from LearnFast stream JSON objects."""
85
+ if isinstance(chunk, dict) and chunk.get('code') == 200 and chunk.get('data'):
86
+ return chunk['data'].get('message')
87
+ return None
88
+
82
89
  def generate_unique_id(self) -> str:
83
90
  """Generate a 32-character hexadecimal unique ID."""
84
91
  return uuid.uuid4().hex
@@ -209,24 +216,22 @@ class LearnFast(Provider):
209
216
  )
210
217
  response.raise_for_status() # Check for HTTP errors
211
218
 
212
- # Process the streamed response
213
- # Iterate over bytes and decode manually
214
- for line_bytes in response.iter_lines():
215
- if line_bytes:
216
- try:
217
- line = line_bytes.decode('utf-8').strip()
218
- if line == "[DONE]":
219
- break
220
- json_response = json.loads(line)
221
- if json_response.get('code') == 200 and json_response.get('data'):
222
- message = json_response['data'].get('message', '')
223
- if message:
224
- full_response += message
225
- resp = {"text": message}
226
- # Yield dict or raw string chunk
227
- yield resp if not raw else message
228
- except (json.JSONDecodeError, UnicodeDecodeError):
229
- pass # Ignore lines that are not valid JSON or cannot be decoded
219
+ # Use sanitize_stream
220
+ processed_stream = sanitize_stream(
221
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
222
+ intro_value=None, # No prefix
223
+ to_json=True, # Stream sends JSON lines
224
+ skip_markers=["[DONE]"],
225
+ content_extractor=self._learnfast_extractor, # Use the specific extractor
226
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
227
+ )
228
+
229
+ for content_chunk in processed_stream:
230
+ # content_chunk is the string extracted by _learnfast_extractor
231
+ if content_chunk and isinstance(content_chunk, str):
232
+ full_response += content_chunk
233
+ resp = {"text": content_chunk}
234
+ yield resp if not raw else content_chunk
230
235
 
231
236
  # Update history after stream finishes
232
237
  self.last_response = {"text": full_response}
@@ -5,7 +5,7 @@ import uuid
5
5
  import re
6
6
  from typing import Union, Any, Dict, Optional, Generator, List
7
7
 
8
- from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
9
9
  from webscout.AIutel import Conversation
10
10
  from webscout.AIutel import AwesomePrompts
11
11
  from webscout.AIbase import Provider
@@ -66,15 +66,15 @@ class LLMChatCo(Provider):
66
66
  self.model = model
67
67
  self.system_prompt = system_prompt
68
68
  self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
69
-
69
+
70
70
  # Create LitAgent instance (keep if needed for other headers)
71
71
  lit_agent = Lit()
72
-
72
+
73
73
  # Headers based on the provided request
74
74
  self.headers = {
75
75
  "Content-Type": "application/json",
76
76
  "Accept": "text/event-stream",
77
- "User-Agent": lit_agent.random(),
77
+ "User-Agent": lit_agent.random(),
78
78
  "Accept-Language": "en-US,en;q=0.9",
79
79
  "Origin": "https://llmchat.co",
80
80
  "Referer": f"https://llmchat.co/chat/{self.thread_id}",
@@ -109,24 +109,16 @@ class LLMChatCo(Provider):
109
109
  # Store message history for conversation context
110
110
  self.last_assistant_response = ""
111
111
 
112
- def parse_sse(self, data):
113
- """Parse Server-Sent Events data"""
114
- if not data or not data.strip():
115
- return None
116
-
117
- # Check if it's an event line
118
- if data.startswith('event:'):
119
- return {'event': data[6:].strip()}
120
-
121
- # Check if it's data
122
- if data.startswith('data:'):
123
- data_content = data[5:].strip()
124
- if data_content:
125
- try:
126
- return {'data': json.loads(data_content)}
127
- except json.JSONDecodeError:
128
- return {'data': data_content}
129
-
112
+ @staticmethod
113
+ def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts text content from LLMChat.co stream JSON objects."""
115
+ if isinstance(chunk, dict) and "answer" in chunk:
116
+ answer = chunk["answer"]
117
+ # Prefer fullText if available and status is COMPLETED
118
+ if answer.get("fullText") and answer.get("status") == "COMPLETED":
119
+ return answer["fullText"]
120
+ elif "text" in answer:
121
+ return answer["text"]
130
122
  return None
131
123
 
132
124
  def ask(
@@ -176,62 +168,40 @@ class LLMChatCo(Provider):
176
168
  try:
177
169
  # Use curl_cffi session post with impersonate
178
170
  response = self.session.post(
179
- self.api_endpoint,
180
- json=payload,
171
+ self.api_endpoint,
172
+ json=payload,
181
173
  # headers are set on the session
182
- stream=True,
174
+ stream=True,
183
175
  timeout=self.timeout,
184
176
  # proxies are set on the session
185
177
  impersonate="chrome110" # Use a common impersonation profile
186
178
  )
187
179
  response.raise_for_status() # Check for HTTP errors
188
-
189
- # Process the SSE stream
190
- current_event = None
191
- buffer = ""
192
-
193
- # Iterate over bytes and decode manually
194
- for chunk in response.iter_content(chunk_size=None, decode_unicode=False): # Use chunk_size=None for better SSE handling
195
- if not chunk:
196
- continue
197
-
198
- # Decode the chunk and add to buffer
199
- buffer += chunk.decode('utf-8', errors='replace') # Use replace for potential errors
200
-
201
- # Process complete lines in the buffer
202
- while '\n' in buffer:
203
- line, buffer = buffer.split('\n', 1)
204
- line = line.strip()
205
-
206
- if not line:
207
- continue
208
-
209
- if line.startswith('event:'):
210
- current_event = line[6:].strip()
211
- elif line.startswith('data:'):
212
- data_content = line[5:].strip()
213
- if data_content and current_event == 'answer':
214
- try:
215
- json_data = json.loads(data_content)
216
- if "answer" in json_data and "text" in json_data["answer"]:
217
- text_chunk = json_data["answer"]["text"]
218
- # If there's a fullText, use it as it's more complete
219
- if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
220
- text_chunk = json_data["answer"]["fullText"]
221
-
222
- # Extract only new content since last chunk
223
- new_text = text_chunk[len(full_response):]
224
- if new_text:
225
- full_response = text_chunk # Update full response tracker
226
- resp = dict(text=new_text)
227
- # Yield dict or raw string chunk
228
- yield resp if not raw else new_text
229
- except json.JSONDecodeError:
230
- continue # Ignore invalid JSON data
231
- elif data_content and current_event == 'done':
232
- # Handle potential final data before done event if needed
233
- break # Exit loop on 'done' event
234
-
180
+
181
+ # Use sanitize_stream
182
+ # Note: This won't handle SSE 'event:' lines, only 'data:' lines.
183
+ # The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
184
+ processed_stream = sanitize_stream(
185
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
186
+ intro_value="data:",
187
+ to_json=True, # Stream sends JSON
188
+ content_extractor=self._llmchatco_extractor, # Use the specific extractor
189
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
190
+ )
191
+
192
+ last_yielded_text = ""
193
+ for current_full_text in processed_stream:
194
+ # current_full_text is the full text extracted by _llmchatco_extractor
195
+ if current_full_text and isinstance(current_full_text, str):
196
+ # Calculate the new part of the text
197
+ new_text = current_full_text[len(last_yielded_text):]
198
+ if new_text:
199
+ full_response = current_full_text # Keep track of the latest full text
200
+ last_yielded_text = current_full_text # Update tracker
201
+ resp = dict(text=new_text)
202
+ # Yield dict or raw string chunk
203
+ yield resp if not raw else new_text
204
+
235
205
  # Update history after stream finishes
236
206
  self.last_response = dict(text=full_response)
237
207
  self.last_assistant_response = full_response
@@ -244,7 +214,7 @@ class LLMChatCo(Provider):
244
214
  except Exception as e: # Catch other potential exceptions (like HTTPError)
245
215
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
246
216
  raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
247
-
217
+
248
218
  def for_non_stream():
249
219
  # Aggregate the stream using the updated for_stream logic
250
220
  full_response_text = ""
@@ -261,7 +231,7 @@ class LLMChatCo(Provider):
261
231
  # If aggregation fails but some text was received, use it. Otherwise, re-raise.
262
232
  if not full_response_text:
263
233
  raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
264
-
234
+
265
235
  # last_response and history are updated within for_stream
266
236
  # Return the final aggregated response dict or raw string
267
237
  return full_response_text if raw else self.last_response
@@ -313,17 +283,17 @@ if __name__ == "__main__":
313
283
  print("-" * 80)
314
284
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
315
285
  print("-" * 80)
316
-
286
+
317
287
  # Test all available models
318
288
  working = 0
319
289
  total = len(LLMChatCo.AVAILABLE_MODELS)
320
-
290
+
321
291
  for model in LLMChatCo.AVAILABLE_MODELS:
322
292
  try:
323
293
  test_ai = LLMChatCo(model=model, timeout=60)
324
294
  response = test_ai.chat("Say 'Hello' in one word")
325
295
  response_text = response
326
-
296
+
327
297
  if response_text and len(response_text.strip()) > 0:
328
298
  status = "✓"
329
299
  # Truncate response if too long
@@ -333,4 +303,4 @@ if __name__ == "__main__":
333
303
  display_text = "Empty or invalid response"
334
304
  print(f"{model:<50} {status:<10} {display_text}")
335
305
  except Exception as e:
336
- print(f"{model:<50} {'✗':<10} {str(e)}")
306
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -4,7 +4,7 @@ import json
4
4
  import uuid
5
5
  from typing import Any, Dict, Union
6
6
  from datetime import datetime
7
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout.litagent import LitAgent
@@ -279,8 +279,18 @@ class MultiChatAI(Provider):
279
279
  response = self._make_request(payload)
280
280
  try:
281
281
  # Use response.text which is already decoded
282
- full_response = response.text.strip()
283
- self.last_response = {"text": full_response}
282
+ response_text_raw = response.text # Get raw text
283
+
284
+ # Process the text using sanitize_stream (even though it's not streaming)
285
+ processed_stream = sanitize_stream(
286
+ data=response_text_raw,
287
+ intro_value=None, # No prefix
288
+ to_json=False # It's plain text
289
+ )
290
+ # Aggregate the single result
291
+ full_response = "".join(list(processed_stream)).strip()
292
+
293
+ self.last_response = {"text": full_response} # Store processed text
284
294
  self.conversation.update_chat_history(prompt, full_response)
285
295
  # Return dict or raw string based on raw flag
286
296
  return full_response if raw else self.last_response