webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -129,8 +129,8 @@ class LLMChatCo(Provider):
129
129
  optimizer: str = None,
130
130
  conversationally: bool = False,
131
131
  web_search: bool = False,
132
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
133
- """Chat with LLMChat.co with streaming capabilities"""
132
+ ) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
133
+ """Chat with LLMChat.co with streaming capabilities and raw output support using sanitize_stream."""
134
134
 
135
135
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
136
136
  if optimizer:
@@ -143,7 +143,6 @@ class LLMChatCo(Provider):
143
143
  f"Optimizer is not one of {self.__available_optimizers}"
144
144
  )
145
145
 
146
-
147
146
  # Generate a unique ID for this message
148
147
  thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
149
148
  messages = [
@@ -164,79 +163,59 @@ class LLMChatCo(Provider):
164
163
  }
165
164
 
166
165
  def for_stream():
167
- full_response = "" # Initialize outside try block
166
+ full_response = ""
168
167
  try:
169
- # Use curl_cffi session post with impersonate
170
168
  response = self.session.post(
171
169
  self.api_endpoint,
172
170
  json=payload,
173
- # headers are set on the session
174
171
  stream=True,
175
172
  timeout=self.timeout,
176
- # proxies are set on the session
177
- impersonate="chrome110" # Use a common impersonation profile
173
+ impersonate="chrome110"
178
174
  )
179
- response.raise_for_status() # Check for HTTP errors
175
+ response.raise_for_status()
180
176
 
181
- # Use sanitize_stream
182
- # Note: This won't handle SSE 'event:' lines, only 'data:' lines.
183
- # The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
184
177
  processed_stream = sanitize_stream(
185
- data=response.iter_content(chunk_size=None), # Pass byte iterator
178
+ data=response.iter_content(chunk_size=None),
186
179
  intro_value="data:",
187
- to_json=True, # Stream sends JSON
188
- content_extractor=self._llmchatco_extractor, # Use the specific extractor
189
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
180
+ to_json=True,
181
+ content_extractor=self._llmchatco_extractor,
182
+ yield_raw_on_error=False,
183
+ raw=raw
190
184
  )
191
185
 
192
186
  last_yielded_text = ""
193
187
  for current_full_text in processed_stream:
194
- # current_full_text is the full text extracted by _llmchatco_extractor
195
188
  if current_full_text and isinstance(current_full_text, str):
196
- # Calculate the new part of the text
197
189
  new_text = current_full_text[len(last_yielded_text):]
198
190
  if new_text:
199
- full_response = current_full_text # Keep track of the latest full text
200
- last_yielded_text = current_full_text # Update tracker
201
- resp = dict(text=new_text)
202
- # Yield dict or raw string chunk
203
- yield resp if not raw else new_text
204
-
205
- # Update history after stream finishes
191
+ full_response = current_full_text
192
+ last_yielded_text = current_full_text
193
+ if raw:
194
+ yield new_text
195
+ else:
196
+ yield dict(text=new_text)
206
197
  self.last_response = dict(text=full_response)
207
198
  self.last_assistant_response = full_response
208
199
  self.conversation.update_chat_history(
209
200
  prompt, full_response
210
201
  )
211
-
212
- except CurlError as e: # Catch CurlError
202
+ except CurlError as e:
213
203
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
214
- except Exception as e: # Catch other potential exceptions (like HTTPError)
204
+ except Exception as e:
215
205
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
216
206
  raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
217
-
218
207
  def for_non_stream():
219
- # Aggregate the stream using the updated for_stream logic
220
208
  full_response_text = ""
221
209
  try:
222
- # Ensure raw=False so for_stream yields dicts
223
210
  for chunk_data in for_stream():
224
- if isinstance(chunk_data, dict) and "text" in chunk_data:
211
+ if raw and isinstance(chunk_data, str):
212
+ full_response_text += chunk_data
213
+ elif isinstance(chunk_data, dict) and "text" in chunk_data:
225
214
  full_response_text += chunk_data["text"]
226
- # Handle raw string case if raw=True was passed
227
- elif raw and isinstance(chunk_data, str):
228
- full_response_text += chunk_data
229
-
230
215
  except Exception as e:
231
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
232
216
  if not full_response_text:
233
217
  raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
234
-
235
- # last_response and history are updated within for_stream
236
- # Return the final aggregated response dict or raw string
237
218
  return full_response_text if raw else self.last_response
238
-
239
-
240
219
  return for_stream() if stream else for_non_stream()
241
220
 
242
221
  def chat(
@@ -246,31 +225,33 @@ class LLMChatCo(Provider):
246
225
  optimizer: str = None,
247
226
  conversationally: bool = False,
248
227
  web_search: bool = False,
228
+ raw: bool = False
249
229
  ) -> Union[str, Generator[str, None, None]]:
250
- """Generate response with streaming capabilities"""
251
-
230
+ """Generate response with streaming capabilities and raw output support"""
252
231
  def for_stream_chat():
253
- # ask() yields dicts or strings when streaming
254
232
  gen = self.ask(
255
- prompt, stream=True, raw=False, # Ensure ask yields dicts
233
+ prompt, stream=True, raw=raw,
256
234
  optimizer=optimizer, conversationally=conversationally,
257
235
  web_search=web_search
258
236
  )
259
- for response_dict in gen:
260
- yield self.get_message(response_dict) # get_message expects dict
261
-
237
+ for response in gen:
238
+ if raw:
239
+ yield response
240
+ else:
241
+ yield self.get_message(response)
262
242
  def for_non_stream_chat():
263
- # ask() returns dict or str when not streaming
264
243
  response_data = self.ask(
265
244
  prompt,
266
245
  stream=False,
267
- raw=False, # Ensure ask returns dict
246
+ raw=raw,
268
247
  optimizer=optimizer,
269
248
  conversationally=conversationally,
270
249
  web_search=web_search
271
250
  )
272
- return self.get_message(response_data) # get_message expects dict
273
-
251
+ if raw:
252
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
253
+ else:
254
+ return self.get_message(response_data)
274
255
  return for_stream_chat() if stream else for_non_stream_chat()
275
256
 
276
257
  def get_message(self, response: Dict[str, Any]) -> str:
@@ -279,28 +260,32 @@ class LLMChatCo(Provider):
279
260
  return response["text"]
280
261
 
281
262
  if __name__ == "__main__":
282
- # Ensure curl_cffi is installed
283
- print("-" * 80)
284
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
285
- print("-" * 80)
286
-
287
- # Test all available models
288
- working = 0
289
- total = len(LLMChatCo.AVAILABLE_MODELS)
290
-
291
- for model in LLMChatCo.AVAILABLE_MODELS:
292
- try:
293
- test_ai = LLMChatCo(model=model, timeout=60)
294
- response = test_ai.chat("Say 'Hello' in one word")
295
- response_text = response
296
-
297
- if response_text and len(response_text.strip()) > 0:
298
- status = "✓"
299
- # Truncate response if too long
300
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
301
- else:
302
- status = "✗"
303
- display_text = "Empty or invalid response"
304
- print(f"{model:<50} {status:<10} {display_text}")
305
- except Exception as e:
306
- print(f"{model:<50} {'✗':<10} {str(e)}")
263
+ # # Ensure curl_cffi is installed
264
+ # print("-" * 80)
265
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
266
+ # print("-" * 80)
267
+
268
+ # # Test all available models
269
+ # working = 0
270
+ # total = len(LLMChatCo.AVAILABLE_MODELS)
271
+
272
+ # for model in LLMChatCo.AVAILABLE_MODELS:
273
+ # try:
274
+ # test_ai = LLMChatCo(model=model, timeout=60)
275
+ # response = test_ai.chat("Say 'Hello' in one word")
276
+ # response_text = response
277
+
278
+ # if response_text and len(response_text.strip()) > 0:
279
+ # status = "✓"
280
+ # # Truncate response if too long
281
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
282
+ # else:
283
+ # status = "✗"
284
+ # display_text = "Empty or invalid response"
285
+ # print(f"{model:<50} {status:<10} {display_text}")
286
+ # except Exception as e:
287
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
288
+ ai = LLMChatCo()
289
+ response = ai.chat("yooo", stream=True, raw=False)
290
+ for chunk in response:
291
+ print(chunk, end="", flush=True)
@@ -0,0 +1,275 @@
1
+ from typing import Generator, Optional, Union, Any, Dict
2
+ from uuid import uuid4
3
+ from curl_cffi import CurlError
4
+ from curl_cffi.requests import Session
5
+ import re
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class MonoChat(Provider):
15
+ """
16
+ MonoChat provider for interacting with the gg.is-a-furry.dev API (OpenAI-compatible).
17
+ """
18
+ AVAILABLE_MODELS = [
19
+ "deepseek-r1",
20
+ "deepseek-v3",
21
+ "uncensored-r1-32b",
22
+ "o3-pro",
23
+ "o4-mini",
24
+ "o3",
25
+ "gpt-4.5-preview",
26
+ "gpt-4.1",
27
+ "gpt-4.1-mini",
28
+ "gpt-4.1-nano",
29
+ "gpt-4o",
30
+ "gpt-4o-mini",
31
+ "gpt-4o-search-preview",
32
+ "gpt-4o-mini-search-preview",
33
+ "gpt-4-turbo"
34
+ ]
35
+
36
+ def __init__(
37
+ self,
38
+ is_conversation: bool = True,
39
+ max_tokens: int = 2049,
40
+ timeout: int = 30,
41
+ intro: str = None,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ history_offset: int = 10250,
46
+ act: str = None,
47
+ model: str = "gpt-4.1",
48
+ system_prompt: str = "You are a helpful assistant.",
49
+ browser: str = "chrome"
50
+ ):
51
+ if model not in self.AVAILABLE_MODELS:
52
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
53
+ self.session = Session()
54
+ self.is_conversation = is_conversation
55
+ self.max_tokens_to_sample = max_tokens
56
+ self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.system_prompt = system_prompt
61
+ self.agent = LitAgent()
62
+ self.fingerprint = self.agent.generate_fingerprint(browser)
63
+ self.headers = {
64
+ "accept": "*/*",
65
+ "accept-encoding": "gzip, deflate, br, zstd",
66
+ "accept-language": self.fingerprint["accept_language"],
67
+ "content-type": "application/json",
68
+ "origin": "https://gg.is-a-furry.dev",
69
+ "referer": "https://gg.is-a-furry.dev/",
70
+ "user-agent": self.fingerprint["user_agent"]
71
+ }
72
+ self.session.headers.update(self.headers)
73
+ self.session.proxies = proxies
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+ Conversation.intro = (
80
+ AwesomePrompts().get_act(
81
+ act, raise_not_found=True, default=None, case_insensitive=True
82
+ )
83
+ if act
84
+ else intro or Conversation.intro
85
+ )
86
+ self.conversation = Conversation(
87
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
88
+ )
89
+ self.conversation.history_offset = history_offset
90
+
91
+ def refresh_identity(self, browser: str = None):
92
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
93
+ self.fingerprint = self.agent.generate_fingerprint(browser)
94
+ self.headers.update({
95
+ "accept-language": self.fingerprint["accept_language"],
96
+ "user-agent": self.fingerprint["user_agent"]
97
+ })
98
+ self.session.headers.update(self.headers)
99
+ return self.fingerprint
100
+
101
+ def ask(
102
+ self,
103
+ prompt: str,
104
+ stream: bool = False,
105
+ raw: bool = False,
106
+ optimizer: str = None,
107
+ conversationally: bool = False,
108
+ ) -> Union[Dict[str, Any], Generator]:
109
+ """
110
+ Sends a prompt to the gg.is-a-furry.dev API and returns the response.
111
+
112
+ Args:
113
+ prompt (str): The prompt to send to the API.
114
+ stream (bool): Whether to stream the response.
115
+ raw (bool): Whether to return the raw response.
116
+ optimizer (str): Optimizer to use for the prompt.
117
+ conversationally (bool): Whether to generate the prompt conversationally.
118
+
119
+ Returns:
120
+ Dict[str, Any]: The API response.
121
+ """
122
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
123
+ if optimizer:
124
+ if optimizer in self.__available_optimizers:
125
+ conversation_prompt = getattr(Optimizers, optimizer)(
126
+ conversation_prompt if conversationally else prompt
127
+ )
128
+ else:
129
+ raise Exception(
130
+ f"Optimizer is not one of {self.__available_optimizers}"
131
+ )
132
+
133
+ payload = {
134
+ "messages": [
135
+ {"role": "system", "content": self.system_prompt},
136
+ {"role": "user", "content": conversation_prompt}
137
+ ],
138
+ "model": self.model,
139
+ "max_tokens": self.max_tokens_to_sample
140
+ }
141
+
142
+ def for_stream():
143
+ try:
144
+ response = self.session.post(
145
+ self.api_endpoint,
146
+ headers=self.headers,
147
+ json=payload,
148
+ stream=True,
149
+ timeout=self.timeout
150
+ )
151
+ if not response.ok:
152
+ raise exceptions.FailedToGenerateResponseError(
153
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
154
+ )
155
+ streaming_response = ""
156
+ # Use sanitize_stream with regex-based extraction and filtering (like x0gpt)
157
+ processed_stream = sanitize_stream(
158
+ data=response.iter_content(chunk_size=None),
159
+ intro_value=None,
160
+ to_json=False,
161
+ extract_regexes=[r'0:"(.*?)"'],
162
+ skip_regexes=[
163
+ r'^f:',
164
+ r'^e:',
165
+ r'^d:',
166
+ r'^\s*$',
167
+ r'data:\s*\[DONE\]',
168
+ r'event:\s*',
169
+ r'^\d+:\s*$',
170
+ r'^:\s*$',
171
+ r'^\s*[\x00-\x1f]+\s*$',
172
+ ],
173
+ raw=raw
174
+ )
175
+
176
+ for content_chunk in processed_stream:
177
+ if isinstance(content_chunk, bytes):
178
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
179
+ if raw:
180
+ yield content_chunk
181
+ else:
182
+ if content_chunk and isinstance(content_chunk, str):
183
+ try:
184
+ clean_content = content_chunk.encode().decode('unicode_escape')
185
+ clean_content = clean_content.replace('\\\\', '\\').replace('\\"', '"')
186
+ streaming_response += clean_content
187
+ yield dict(text=clean_content)
188
+ except (UnicodeDecodeError, UnicodeEncodeError):
189
+ streaming_response += content_chunk
190
+ yield dict(text=content_chunk)
191
+
192
+ self.last_response.update(dict(text=streaming_response))
193
+ self.conversation.update_chat_history(
194
+ prompt, self.get_message(self.last_response)
195
+ )
196
+ except CurlError as e:
197
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
198
+ except Exception as e:
199
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
200
+
201
+ def for_non_stream():
202
+ if stream:
203
+ return for_stream()
204
+ for _ in for_stream():
205
+ pass
206
+ return self.last_response
207
+
208
+ return for_stream() if stream else for_non_stream()
209
+
210
+ def chat(
211
+ self,
212
+ prompt: str,
213
+ stream: bool = False,
214
+ optimizer: str = None,
215
+ conversationally: bool = False,
216
+ raw: bool = False,
217
+ ) -> Union[str, Generator[str, None, None]]:
218
+ """
219
+ Generates a response from the MonoChat API.
220
+
221
+ Args:
222
+ prompt (str): The prompt to send to the API.
223
+ stream (bool): Whether to stream the response.
224
+ optimizer (str): Optimizer to use for the prompt.
225
+ conversationally (bool): Whether to generate the prompt conversationally.
226
+ raw (bool): Whether to return raw response chunks.
227
+
228
+ Returns:
229
+ str: The API response.
230
+ """
231
+
232
+ def for_stream():
233
+ for response in self.ask(
234
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
235
+ ):
236
+ if raw:
237
+ yield response
238
+ else:
239
+ yield self.get_message(response)
240
+
241
+ def for_non_stream():
242
+ result = self.ask(
243
+ prompt,
244
+ False,
245
+ raw=raw,
246
+ optimizer=optimizer,
247
+ conversationally=conversationally,
248
+ )
249
+ if raw:
250
+ return result
251
+ else:
252
+ return self.get_message(result)
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def get_message(self, response: dict) -> str:
257
+ """
258
+ Extracts the message from the API response.
259
+
260
+ Args:
261
+ response (dict): The API response.
262
+
263
+ Returns:
264
+ str: The message content.
265
+ """
266
+ assert isinstance(response, dict), "Response should be of dict data-type only"
267
+ text = response.get("text", "")
268
+ return text
269
+
270
+ if __name__ == "__main__":
271
+ from rich import print
272
+ ai = MonoChat(timeout=60)
273
+ response = ai.chat("In points tell me about humans", stream=True, raw=False)
274
+ for chunk in response:
275
+ print(chunk, end="", flush=True)
@@ -2,7 +2,7 @@ from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
4
  import uuid
5
- from typing import Any, Dict, Union
5
+ from typing import Any, Dict, Union, Generator
6
6
  from datetime import datetime
7
7
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIbase import Provider
@@ -257,10 +257,9 @@ class MultiChatAI(Provider):
257
257
  raw: bool = False, # Keep raw param for interface consistency
258
258
  optimizer: str = None,
259
259
  conversationally: bool = False,
260
- # Add stream parameter for consistency, though API doesn't stream
261
260
  stream: bool = False
262
- ) -> Dict[str, Any]:
263
- """Sends a prompt to the MultiChatAI API and returns the response."""
261
+ ) -> Union[Dict[str, Any], str, Generator[str, None, None]]:
262
+ """Sends a prompt to the MultiChatAI API and returns the response. Supports raw output and direct text streaming."""
264
263
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
265
264
  if optimizer:
266
265
  if optimizer in self.__available_optimizers:
@@ -275,26 +274,32 @@ class MultiChatAI(Provider):
275
274
  "customModelId": "",
276
275
  }
277
276
 
278
- # API does not stream, implement non-stream logic directly
279
277
  response = self._make_request(payload)
280
278
  try:
281
- # Use response.text which is already decoded
282
- response_text_raw = response.text # Get raw text
283
-
284
- # Process the text using sanitize_stream (even though it's not streaming)
285
- processed_stream = sanitize_stream(
286
- data=response_text_raw,
287
- intro_value=None, # No prefix
288
- to_json=False # It's plain text
289
- )
290
- # Aggregate the single result
291
- full_response = "".join(list(processed_stream)).strip()
292
-
293
- self.last_response = {"text": full_response} # Store processed text
294
- self.conversation.update_chat_history(prompt, full_response)
295
- # Return dict or raw string based on raw flag
296
- return full_response if raw else self.last_response
297
- except Exception as e: # Catch potential errors during text processing
279
+ response_text_raw = response.text
280
+ if stream:
281
+ chunk_size = 64
282
+ text = response_text_raw
283
+ for i in range(0, len(text), chunk_size):
284
+ chunk = text[i:i+chunk_size]
285
+ if raw:
286
+ yield chunk
287
+ else:
288
+ yield {"text": chunk}
289
+ self.last_response = {"text": text}
290
+ self.conversation.update_chat_history(prompt, text)
291
+ else:
292
+ processed_stream = sanitize_stream(
293
+ data=response_text_raw,
294
+ intro_value=None,
295
+ to_json=False,
296
+ raw=raw
297
+ )
298
+ full_response = "".join(list(processed_stream)).strip()
299
+ self.last_response = {"text": full_response}
300
+ self.conversation.update_chat_history(prompt, full_response)
301
+ return full_response if raw else self.last_response
302
+ except Exception as e:
298
303
  raise exceptions.FailedToGenerateResponseError(f"Failed to process response: {e}") from e
299
304
 
300
305
  def chat(
@@ -302,26 +307,32 @@ class MultiChatAI(Provider):
302
307
  prompt: str,
303
308
  optimizer: str = None,
304
309
  conversationally: bool = False,
305
- # Add stream parameter for consistency
306
- stream: bool = False
307
- ) -> str:
308
- """Generate response."""
309
- # Since ask() now handles both stream=True/False by returning the full response dict/str:
310
- response_data = self.ask(
311
- prompt,
312
- stream=False, # Call ask in non-stream mode internally
313
- raw=False, # Ensure ask returns dict
314
- optimizer=optimizer,
315
- conversationally=conversationally
316
- )
317
- # If stream=True was requested, simulate streaming by yielding the full message at once
310
+ stream: bool = False,
311
+ raw: bool = False
312
+ ) -> Union[str, Generator[str, None, None]]:
313
+ """Generate response. Supports raw output and streaming."""
318
314
  if stream:
319
- def stream_wrapper():
320
- yield self.get_message(response_data)
321
- return stream_wrapper()
315
+ # Streaming mode: yield chunks from ask
316
+ return self.ask(
317
+ prompt,
318
+ raw=raw,
319
+ optimizer=optimizer,
320
+ conversationally=conversationally,
321
+ stream=True
322
+ )
322
323
  else:
323
- # If stream=False, return the full message directly
324
- return self.get_message(response_data)
324
+ # Non-streaming mode: return full message
325
+ response_data = self.ask(
326
+ prompt,
327
+ raw=raw,
328
+ optimizer=optimizer,
329
+ conversationally=conversationally,
330
+ stream=False
331
+ )
332
+ if raw:
333
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
334
+ else:
335
+ return self.get_message(response_data)
325
336
 
326
337
  def get_message(self, response: Union[Dict[str, Any], str]) -> str:
327
338
  """
@@ -303,7 +303,7 @@ class oivscode(Provider):
303
303
  if __name__ == "__main__":
304
304
  from rich import print
305
305
  chatbot = oivscode()
306
- print(chatbot.fetch_available_models())
306
+ chatbot.fetch_available_models()
307
307
  response = chatbot.chat(input(">>> "), stream=True)
308
308
  for chunk in response:
309
309
  print(chunk, end="", flush=True)