webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -57,7 +57,7 @@ class HeckAI(Provider):
57
57
  proxies: dict = {},
58
58
  history_offset: int = 10250,
59
59
  act: str = None,
60
- model: str = "google/gemini-2.0-flash-001",
60
+ model: str = "google/gemini-2.5-flash-preview",
61
61
  language: str = "English"
62
62
  ):
63
63
  """
@@ -177,79 +177,73 @@ class HeckAI(Provider):
177
177
  def for_stream():
178
178
  streaming_text = "" # Initialize outside try block
179
179
  try:
180
- # Use curl_cffi session post with impersonate
181
180
  response = self.session.post(
182
181
  self.url,
183
- # headers are set on the session
184
182
  data=json.dumps(payload),
185
183
  stream=True,
186
184
  timeout=self.timeout,
187
- impersonate="chrome110" # Use a common impersonation profile
185
+ impersonate="chrome110"
188
186
  )
189
- response.raise_for_status() # Check for HTTP errors
187
+ response.raise_for_status()
190
188
 
191
- # Use sanitize_stream to process the stream
192
189
  processed_stream = sanitize_stream(
193
- data=response.iter_content(chunk_size=1024), # Pass byte iterator
194
- intro_value="data: ", # Prefix to remove (note the space)
195
- to_json=False, # Content is text
196
- start_marker="data: [ANSWER_START]",
197
- end_marker="data: [ANSWER_DONE]",
198
- skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
199
- yield_raw_on_error=True,
200
- strip_chars=" \n\r\t" # Strip whitespace characters from chunks
190
+ data=response.iter_content(chunk_size=1024),
191
+ intro_value="data: ",
192
+ to_json=False,
193
+ start_marker="data: [ANSWER_START]",
194
+ end_marker="data: [ANSWER_DONE]",
195
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
196
+ yield_raw_on_error=True,
197
+ strip_chars=" \n\r\t",
198
+ raw=raw
201
199
  )
202
200
 
203
201
  for content_chunk in processed_stream:
204
- # content_chunk is the text between ANSWER_START and ANSWER_DONE
205
202
  if content_chunk and isinstance(content_chunk, str):
206
- streaming_text += content_chunk
207
- yield dict(text=content_chunk) if not raw else content_chunk
203
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
204
+ if raw:
205
+ if content_chunk and isinstance(content_chunk, str):
206
+ streaming_text += content_chunk
207
+ yield content_chunk
208
+ else:
209
+ if content_chunk and isinstance(content_chunk, str):
210
+ streaming_text += content_chunk
211
+ yield dict(text=content_chunk)
208
212
 
209
213
  # Only update history if we received a valid response
210
214
  if streaming_text:
211
- # Update history and previous answer after stream finishes
212
215
  self.previous_answer = streaming_text
213
- # Convert to simple text before updating conversation
214
216
  try:
215
- # Ensure content is valid before updating conversation
216
217
  if streaming_text and isinstance(streaming_text, str):
217
- # Sanitize the content to ensure it's valid
218
218
  sanitized_text = streaming_text.strip()
219
- if sanitized_text: # Only update if we have non-empty content
219
+ if sanitized_text:
220
220
  self.conversation.update_chat_history(prompt, sanitized_text)
221
221
  except Exception as e:
222
- # If conversation update fails, log but don't crash
223
222
  print(f"Warning: Failed to update conversation history: {str(e)}")
224
-
225
- except CurlError as e: # Catch CurlError
223
+ except CurlError as e:
226
224
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
227
- except Exception as e: # Catch other potential exceptions (like HTTPError)
225
+ except Exception as e:
228
226
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
229
227
  raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
230
228
 
231
-
232
229
  def for_non_stream():
233
- # Aggregate the stream using the updated for_stream logic
234
230
  full_text = ""
235
231
  try:
236
- # Ensure raw=False so for_stream yields dicts
237
232
  for chunk_data in for_stream():
238
- if isinstance(chunk_data, dict) and "text" in chunk_data:
239
- full_text += chunk_data["text"]
240
- # Handle raw string case if raw=True was passed
241
- elif raw and isinstance(chunk_data, str):
242
- full_text += chunk_data
233
+ if raw:
234
+ if isinstance(chunk_data, str):
235
+ chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
236
+ full_text += chunk_data
237
+ else:
238
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
239
+ text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
240
+ full_text += text
243
241
  except Exception as e:
244
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
245
- if not full_text:
246
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
247
-
248
- # Return the final aggregated response dict or raw string
249
- self.last_response = {"text": full_text} # Update last_response here
242
+ if not full_text:
243
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
244
+ self.last_response = {"text": full_text}
250
245
  return full_text if raw else self.last_response
251
246
 
252
-
253
247
  return for_stream() if stream else for_non_stream()
254
248
 
255
249
  @staticmethod
@@ -266,15 +260,15 @@ class HeckAI(Provider):
266
260
  if isinstance(text, dict) and "text" in text:
267
261
  try:
268
262
  text["text"] = text["text"].encode("latin1").decode("utf-8")
269
- return text
263
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
270
264
  except (UnicodeError, AttributeError) as e:
271
- return text
265
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
272
266
  elif isinstance(text, str):
273
267
  try:
274
268
  return text.encode("latin1").decode("utf-8")
275
269
  except (UnicodeError, AttributeError) as e:
276
- return text
277
- return text
270
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
271
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
278
272
 
279
273
  def chat(
280
274
  self,
@@ -282,6 +276,7 @@ class HeckAI(Provider):
282
276
  stream: bool = False,
283
277
  optimizer: str = None,
284
278
  conversationally: bool = False,
279
+ raw: bool = False,
285
280
  ) -> Union[str, Generator[str, None, None]]:
286
281
  """
287
282
  Sends a prompt to the HeckAI API and returns only the message text.
@@ -298,18 +293,23 @@ class HeckAI(Provider):
298
293
  def for_stream_chat():
299
294
  # ask() yields dicts or strings when streaming
300
295
  gen = self.ask(
301
- prompt, stream=True, raw=False, # Ensure ask yields dicts
296
+ prompt, stream=True, raw=raw,
302
297
  optimizer=optimizer, conversationally=conversationally
303
298
  )
304
- for response_dict in gen:
305
- yield self.get_message(response_dict) # get_message expects dict
299
+ for response in gen:
300
+ if raw:
301
+ yield response
302
+ else:
303
+ yield self.get_message(response)
306
304
 
307
305
  def for_non_stream_chat():
308
306
  # ask() returns dict or str when not streaming
309
307
  response_data = self.ask(
310
- prompt, stream=False, raw=False, # Ensure ask returns dict
308
+ prompt, stream=False, raw=raw,
311
309
  optimizer=optimizer, conversationally=conversationally
312
310
  )
311
+ if raw:
312
+ return response_data if isinstance(response_data, str) else str(response_data)
313
313
  return self.get_message(response_data) # get_message expects dict
314
314
 
315
315
  return for_stream_chat() if stream else for_non_stream_chat()
@@ -338,38 +338,43 @@ class HeckAI(Provider):
338
338
  # Ensure text is a string
339
339
  text = response["text"]
340
340
  if not isinstance(text, str):
341
- return str(text)
341
+ text = str(text)
342
342
 
343
- return text
343
+ return text.replace('\\\\', '\\').replace('\\"', '"')
344
344
 
345
345
  if __name__ == "__main__":
346
- # Ensure curl_cffi is installed
347
- print("-" * 80)
348
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
- print("-" * 80)
350
-
351
- for model in HeckAI.AVAILABLE_MODELS:
352
- try:
353
- test_ai = HeckAI(model=model, timeout=60)
354
- # Use non-streaming mode first to avoid potential streaming issues
355
- try:
356
- response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
- print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
- except Exception as e1:
359
- # Fall back to streaming if non-streaming fails
360
- print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
- response_text = ""
363
- for chunk in response:
364
- if chunk and isinstance(chunk, str):
365
- response_text += chunk
346
+ # # Ensure curl_cffi is installed
347
+ # print("-" * 80)
348
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
+ # print("-" * 80)
350
+
351
+ # for model in HeckAI.AVAILABLE_MODELS:
352
+ # try:
353
+ # test_ai = HeckAI(model=model, timeout=60)
354
+ # # Use non-streaming mode first to avoid potential streaming issues
355
+ # try:
356
+ # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
+ # print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
+ # except Exception as e1:
359
+ # # Fall back to streaming if non-streaming fails
360
+ # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
+ # response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
+ # response_text = ""
363
+ # for chunk in response:
364
+ # if chunk and isinstance(chunk, str):
365
+ # response_text += chunk
366
366
 
367
- if response_text and len(response_text.strip()) > 0:
368
- status = "✓"
369
- # Truncate response if too long
370
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
- print(f"\r{model:<50} {status:<10} {display_text}")
372
- else:
373
- raise ValueError("Empty or invalid response")
374
- except Exception as e:
375
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
367
+ # if response_text and len(response_text.strip()) > 0:
368
+ # status = "✓"
369
+ # # Truncate response if too long
370
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
+ # print(f"\r{model:<50} {status:<10} {display_text}")
372
+ # else:
373
+ # raise ValueError("Empty or invalid response")
374
+ # except Exception as e:
375
+ # print(f"\r{model:<50} {'✗':<10} {str(e)}")
376
+ from rich import print
377
+ ai = HeckAI()
378
+ response = ai.chat("tell me about humans", stream=True, raw=False)
379
+ for chunk in response:
380
+ print(chunk, end='', flush=True)
@@ -170,16 +170,20 @@ class JadveOpenAI(Provider):
170
170
  intro_value=None, # No simple prefix
171
171
  to_json=False, # Content is text after extraction
172
172
  content_extractor=self._jadve_extractor, # Use the specific extractor
173
- # end_marker="e:", # Add if 'e:' reliably marks the end
174
- yield_raw_on_error=True
173
+ yield_raw_on_error=True,
174
+ raw=raw
175
175
  )
176
176
 
177
177
  for content_chunk in processed_stream:
178
- # content_chunk is the string extracted by _jadve_extractor
179
- if content_chunk and isinstance(content_chunk, str):
180
- full_response_text += content_chunk
181
- resp = {"text": content_chunk}
182
- yield resp if not raw else content_chunk
178
+ if raw:
179
+ if content_chunk and isinstance(content_chunk, str):
180
+ full_response_text += content_chunk
181
+ yield content_chunk
182
+ else:
183
+ if content_chunk and isinstance(content_chunk, str):
184
+ full_response_text += content_chunk
185
+ resp = {"text": content_chunk}
186
+ yield resp
183
187
 
184
188
  # Update history after stream finishes
185
189
  self.last_response = {"text": full_response_text}
@@ -191,30 +195,22 @@ class JadveOpenAI(Provider):
191
195
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
192
196
  raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
193
197
 
194
-
195
198
  def for_non_stream():
196
- # Aggregate the stream using the updated for_stream logic
197
199
  collected_text = ""
198
200
  try:
199
- # Ensure raw=False so for_stream yields dicts
200
201
  for chunk_data in for_stream():
201
- if isinstance(chunk_data, dict) and "text" in chunk_data:
202
- collected_text += chunk_data["text"]
203
- # Handle raw string case if raw=True was passed
204
- elif raw and isinstance(chunk_data, str):
205
- collected_text += chunk_data
202
+ if raw:
203
+ if isinstance(chunk_data, str):
204
+ collected_text += chunk_data
205
+ else:
206
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
207
+ collected_text += chunk_data["text"]
206
208
  except Exception as e:
207
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
208
- if not collected_text:
209
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
210
-
209
+ if not collected_text:
210
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
211
211
  # last_response and history are updated within for_stream
212
- # Return the final aggregated response dict or raw string
213
212
  return collected_text if raw else self.last_response
214
213
 
215
-
216
- # Since the API endpoint suggests streaming, always call the stream generator.
217
- # The non-stream wrapper will handle aggregation if stream=False.
218
214
  return for_stream() if stream else for_non_stream()
219
215
 
220
216
  def chat(
@@ -223,6 +219,7 @@ class JadveOpenAI(Provider):
223
219
  stream: bool = False,
224
220
  optimizer: str = None,
225
221
  conversationally: bool = False,
222
+ raw: bool = False,
226
223
  ) -> Union[str, Generator[str, None, None]]:
227
224
  """
228
225
  Generate a chat response (string).
@@ -232,25 +229,29 @@ class JadveOpenAI(Provider):
232
229
  stream (bool, optional): Flag for streaming response. Defaults to False.
233
230
  optimizer (str, optional): Prompt optimizer name. Defaults to None.
234
231
  conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
232
+ raw (bool, optional): Return raw response. Defaults to False.
235
233
  Returns:
236
234
  str or generator: Generated response string or generator yielding response chunks.
237
235
  """
238
236
  def for_stream_chat():
239
- # ask() yields dicts or strings when streaming
240
237
  gen = self.ask(
241
- prompt, stream=True, raw=False, # Ensure ask yields dicts
238
+ prompt, stream=True, raw=raw,
242
239
  optimizer=optimizer, conversationally=conversationally
243
240
  )
244
- for response_dict in gen:
245
- yield self.get_message(response_dict) # get_message expects dict
241
+ for response in gen:
242
+ if raw:
243
+ yield response
244
+ else:
245
+ yield self.get_message(response)
246
246
 
247
247
  def for_non_stream_chat():
248
- # ask() returns dict or str when not streaming
249
248
  response_data = self.ask(
250
- prompt, stream=False, raw=False, # Ensure ask returns dict
249
+ prompt, stream=False, raw=raw,
251
250
  optimizer=optimizer, conversationally=conversationally
252
251
  )
253
- return self.get_message(response_data) # get_message expects dict
252
+ if raw:
253
+ return response_data if isinstance(response_data, str) else str(response_data)
254
+ return self.get_message(response_data)
254
255
 
255
256
  return for_stream_chat() if stream else for_non_stream_chat()
256
257
 
@@ -268,24 +269,29 @@ class JadveOpenAI(Provider):
268
269
  return response.get("text", "")
269
270
 
270
271
  if __name__ == "__main__":
271
- # Ensure curl_cffi is installed
272
- print("-" * 80)
273
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
274
- print("-" * 80)
275
-
276
- for model in JadveOpenAI.AVAILABLE_MODELS:
277
- try:
278
- test_ai = JadveOpenAI(model=model, timeout=60)
279
- response = test_ai.chat("Say 'Hello' in one word")
280
- response_text = response
272
+ # # Ensure curl_cffi is installed
273
+ # print("-" * 80)
274
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
275
+ # print("-" * 80)
276
+
277
+ # for model in JadveOpenAI.AVAILABLE_MODELS:
278
+ # try:
279
+ # test_ai = JadveOpenAI(model=model, timeout=60)
280
+ # response = test_ai.chat("Say 'Hello' in one word")
281
+ # response_text = response
281
282
 
282
- if response_text and len(response_text.strip()) > 0:
283
- status = "✓"
284
- # Truncate response if too long
285
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
286
- else:
287
- status = "✗"
288
- display_text = "Empty or invalid response"
289
- print(f"{model:<50} {status:<10} {display_text}")
290
- except Exception as e:
291
- print(f"{model:<50} {'✗':<10} {str(e)}")
283
+ # if response_text and len(response_text.strip()) > 0:
284
+ # status = "✓"
285
+ # # Truncate response if too long
286
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
287
+ # else:
288
+ # status = "✗"
289
+ # display_text = "Empty or invalid response"
290
+ # print(f"{model:<50} {status:<10} {display_text}")
291
+ # except Exception as e:
292
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
293
+ from rich import print
294
+ ai = JadveOpenAI()
295
+ response = ai.chat("tell me about humans", stream=True, raw=False)
296
+ for chunk in response:
297
+ print(chunk, end='', flush=True)
@@ -0,0 +1,207 @@
1
+ import os
2
+ import json
3
+ import requests
4
+ from typing import Any, Dict, Optional, Union, Generator
5
+ from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+
9
+ class MiniMax(Provider):
10
+ """
11
+ Provider for MiniMax-Reasoning-01 API, following the standard provider interface.
12
+ """
13
+ AVAILABLE_MODELS = ["MiniMax-Reasoning-01"]
14
+ API_URL = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
15
+ # TODO: Move API_KEY to env/config for security
16
+ API_KEY = os.environ.get("MINIMAX_API_KEY") or """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"""
17
+ MODEL_CONTROL_DEFAULTS = {"tokens_to_generate": 40000, "temperature": 1, "top_p": 0.95}
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2049,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "MiniMax-Reasoning-01",
31
+ system_prompt: str = "You are a helpful assistant, always respond in english",
32
+ ):
33
+ if model not in self.AVAILABLE_MODELS:
34
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
+ self.model = model
36
+ self.api_url = self.API_URL
37
+ self.api_key = self.API_KEY
38
+ self.timeout = timeout
39
+ self.is_conversation = is_conversation
40
+ self.max_tokens_to_sample = max_tokens
41
+ self.last_response = {}
42
+ self.system_prompt = system_prompt
43
+ self.proxies = proxies
44
+ self.__available_optimizers = tuple(
45
+ method for method in dir(Optimizers)
46
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
47
+ )
48
+ Conversation.intro = (
49
+ AwesomePrompts().get_act(
50
+ act, raise_not_found=True, default=None, case_insensitive=True
51
+ )
52
+ if act
53
+ else intro or Conversation.intro
54
+ )
55
+ self.conversation = Conversation(
56
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
57
+ )
58
+ self.conversation.history_offset = history_offset
59
+
60
+ @staticmethod
61
+ def _extract_content(chunk: Any) -> Optional[dict]:
62
+ if not isinstance(chunk, dict):
63
+ return None
64
+ choice = chunk.get('choices', [{}])[0]
65
+ delta = choice.get('delta', {})
66
+ content = delta.get('content')
67
+ reasoning = delta.get('reasoning_content')
68
+ result = {}
69
+ if content:
70
+ result['content'] = content
71
+ if reasoning:
72
+ result['reasoning_content'] = reasoning
73
+ return result if result else None
74
+
75
+ def ask(
76
+ self,
77
+ prompt: str,
78
+ stream: bool = True,
79
+ raw: bool = False,
80
+ optimizer: str = None,
81
+ conversationally: bool = False,
82
+ ) -> Union[Dict[str, Any], Generator]:
83
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
+ if optimizer:
85
+ if optimizer in self.__available_optimizers:
86
+ conversation_prompt = getattr(Optimizers, optimizer)(
87
+ conversation_prompt if conversationally else prompt
88
+ )
89
+ else:
90
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
91
+ messages = [
92
+ {'role': 'system', 'content': self.system_prompt},
93
+ {'role': 'user', 'content': conversation_prompt}
94
+ ]
95
+ data = {
96
+ 'model': self.model,
97
+ 'messages': messages,
98
+ 'stream': True,
99
+ 'max_tokens': self.MODEL_CONTROL_DEFAULTS.get('tokens_to_generate', 512),
100
+ 'temperature': self.MODEL_CONTROL_DEFAULTS.get('temperature', 1.0),
101
+ 'top_p': self.MODEL_CONTROL_DEFAULTS.get('top_p', 1.0),
102
+ }
103
+ headers = {
104
+ 'Content-Type': 'application/json',
105
+ 'Authorization': f'Bearer {self.api_key}',
106
+ }
107
+ def for_stream():
108
+ try:
109
+ response = requests.post(
110
+ self.api_url,
111
+ headers=headers,
112
+ data=json.dumps(data),
113
+ stream=True,
114
+ timeout=self.timeout,
115
+ proxies=self.proxies if self.proxies else None
116
+ )
117
+ if not response.ok:
118
+ raise exceptions.FailedToGenerateResponseError(
119
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
120
+ )
121
+ streaming_response = ""
122
+ last_content = ""
123
+ last_reasoning = ""
124
+ in_think = False
125
+ processed_stream = sanitize_stream(
126
+ response.iter_lines(),
127
+ intro_value="data:",
128
+ to_json=True,
129
+ content_extractor=self._extract_content,
130
+ raw=False # always process as dict for logic
131
+ )
132
+ for chunk in processed_stream:
133
+ if not chunk:
134
+ continue
135
+ content = chunk.get('content') if isinstance(chunk, dict) else None
136
+ reasoning = chunk.get('reasoning_content') if isinstance(chunk, dict) else None
137
+ # Handle reasoning_content with <think> tags
138
+ if reasoning and reasoning != last_reasoning:
139
+ if not in_think:
140
+ yield "<think>\n\n"
141
+ in_think = True
142
+ yield reasoning
143
+ last_reasoning = reasoning
144
+ # If we were in <think> and now have new content, close <think>
145
+ if in_think and content and content != last_content:
146
+ yield "</think>\n\n"
147
+ in_think = False
148
+ # Handle normal content
149
+ if content and content != last_content:
150
+ yield content
151
+ streaming_response += content
152
+ last_content = content
153
+ if not raw:
154
+ self.last_response = {"text": streaming_response}
155
+ self.conversation.update_chat_history(prompt, streaming_response)
156
+ except Exception as e:
157
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
158
+ def for_non_stream():
159
+ full_response = ""
160
+ for chunk in for_stream():
161
+ if isinstance(chunk, dict) and "text" in chunk:
162
+ full_response += chunk["text"]
163
+ elif isinstance(chunk, str):
164
+ full_response += chunk
165
+ if not raw:
166
+ self.last_response = {"text": full_response}
167
+ self.conversation.update_chat_history(prompt, full_response)
168
+ return {"text": full_response}
169
+ else:
170
+ return full_response
171
+ return for_stream() if stream else for_non_stream()
172
+
173
+ def chat(
174
+ self,
175
+ prompt: str,
176
+ stream: bool = True,
177
+ optimizer: str = None,
178
+ conversationally: bool = False,
179
+ raw: bool = False,
180
+ ) -> Union[str, Generator[str, None, None]]:
181
+ def for_stream():
182
+ for response in self.ask(
183
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
184
+ ):
185
+ if raw:
186
+ yield response
187
+ else:
188
+ yield response
189
+ def for_non_stream():
190
+ result = self.ask(
191
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
192
+ )
193
+ if raw:
194
+ return result
195
+ else:
196
+ return self.get_message(result)
197
+ return for_stream() if stream else for_non_stream()
198
+
199
+ def get_message(self, response: dict) -> str:
200
+ assert isinstance(response, dict), "Response should be of dict data-type only"
201
+ return response.get("text", "")
202
+
203
+ if __name__ == "__main__":
204
+ ai = MiniMax()
205
+ resp = ai.chat("What is the capital of France?", stream=True, raw=False)
206
+ for chunk in resp:
207
+ print(chunk, end="", flush=True)