webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
webscout/AIutel.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import codecs
2
2
  import json
3
+ import re
3
4
  from typing import (
4
5
  Any,
5
6
  AsyncGenerator,
@@ -48,17 +49,6 @@ def _process_chunk(
48
49
  error_handler (Optional[Callable[[Exception, str], Optional[Any]]]): An optional callback function that is called when JSON parsing fails.
49
50
  It receives the exception and the sanitized chunk as arguments. It should return a value to yield instead of the raw chunk, or None to ignore.
50
51
 
51
-
52
- Args:
53
- chunk: Chunk of text to process.
54
- intro_value: Prefix to remove from the chunk.
55
- to_json: Parse the chunk as JSON if True.
56
- skip_markers: List of markers to skip.
57
- strip_chars: Characters to strip from the chunk.
58
- yield_raw_on_error: Whether to return the raw chunk on parse errors.
59
- error_handler: Optional callback ``Callable[[Exception, str], Optional[Any]]``
60
- invoked when JSON parsing fails. The callback should return a value to
61
- yield instead of the raw chunk, or ``None`` to ignore.
62
52
  """
63
53
  if not isinstance(chunk, str):
64
54
  return None
@@ -128,11 +118,6 @@ def _decode_byte_stream(
128
118
  Defaults to 'replace'.
129
119
  buffer_size (int): The size of the internal buffer used for decoding.
130
120
 
131
- Args:
132
- byte_iterator: Iterator yielding bytes
133
- encoding: Character encoding to use
134
- errors: How to handle encoding errors ('strict', 'ignore', 'replace')
135
- buffer_size: Size of internal buffer for performance tuning
136
121
  """
137
122
  # Initialize decoder with the specified encoding
138
123
  try:
@@ -252,7 +237,7 @@ def _sanitize_stream_sync(
252
237
  Args:
253
238
  data: String, iterable of strings, or iterable of bytes to process.
254
239
  intro_value: Prefix indicating the start of meaningful data.
255
- to_json: Parse JSON content if ``True``.
240
+ to_json: Parse the chunk as JSON if True.
256
241
  skip_markers: Lines containing any of these markers are skipped.
257
242
  strip_chars: Characters to strip from each line.
258
243
  start_marker: Begin processing only after this marker is found.
@@ -611,10 +596,17 @@ async def _sanitize_stream_async(
611
596
  def sanitize_stream(
612
597
  data: Union[
613
598
  str,
599
+ bytes,
614
600
  Iterable[str],
615
601
  Iterable[bytes],
616
602
  AsyncIterable[str],
617
603
  AsyncIterable[bytes],
604
+ dict,
605
+ list,
606
+ int,
607
+ float,
608
+ bool,
609
+ None,
618
610
  ],
619
611
  intro_value: str = "data:",
620
612
  to_json: bool = True,
@@ -629,19 +621,16 @@ def sanitize_stream(
629
621
  buffer_size: int = 8192,
630
622
  line_delimiter: Optional[str] = None,
631
623
  error_handler: Optional[Callable[[Exception, str], Optional[Any]]] = None,
624
+ object_mode: Literal["as_is", "json", "str"] = "json",
625
+ raw: bool = False,
632
626
  ) -> Union[Generator[Any, None, None], AsyncGenerator[Any, None]]:
633
627
  """
634
628
  Processes streaming data (strings or bytes) in either synchronous or asynchronous mode.
635
-
636
- This function acts as a unified interface for handling both synchronous and
637
- asynchronous data streams. It automatically detects the type of input data and
638
- dispatches it to the appropriate processing function (`_sanitize_stream_sync` or
639
- `_sanitize_stream_async`).
629
+ Now supports non-iterable and miscellaneous input types (dict, list, int, float, bool, None).
640
630
 
641
631
  Args:
642
- data (Union[str, Iterable[str], Iterable[bytes], AsyncIterable[str], AsyncIterable[bytes]]):
643
- The data to be processed. Can be a string, a synchronous iterable of strings or bytes,
644
- or an asynchronous iterable of strings or bytes.
632
+ data: The data to be processed. Can be a string, bytes, a synchronous iterable of strings or bytes,
633
+ an asynchronous iterable of strings or bytes, or a single object (dict, list, int, float, bool, None).
645
634
  intro_value (str): Prefix indicating the start of meaningful data. Defaults to "data:".
646
635
  to_json (bool): Parse JSON content if ``True``. Defaults to True.
647
636
  skip_markers (Optional[List[str]]): Lines containing any of these markers are skipped. Defaults to None.
@@ -659,34 +648,154 @@ def sanitize_stream(
659
648
  error_handler (Optional[Callable[[Exception, str], Optional[Any]]]):
660
649
  Callback invoked with ``(Exception, str)`` when JSON parsing fails.
661
650
  If the callback returns a value, it is yielded in place of the raw line. Defaults to None.
651
+ object_mode (Literal["as_is", "json", "str"]): How to handle non-string, non-iterable objects.
652
+ "json" (default) yields as JSON string, "str" yields as str(obj), "as_is" yields the object as-is.
653
+ raw (bool): If True, yields the raw response as returned by the API, chunk by chunk (no splitting or joining).
662
654
 
663
655
  Returns:
664
656
  Union[Generator[Any, None, None], AsyncGenerator[Any, None]]:
665
- A generator or an asynchronous generator yielding the processed data.
666
- """
667
- # Determine the actual data payload to process
668
- payload: Any # The type of payload can change based on data's attributes
657
+ A generator or an asynchronous generator yielding the processed data, or raw data if raw=True.
658
+ """ # --- RAW MODE: yield each chunk exactly as returned by the API ---
659
+ if raw:
660
+ def _raw_passthrough_sync(source_iter):
661
+ for chunk in source_iter:
662
+ if isinstance(chunk, (bytes, bytearray)):
663
+ # Decode bytes preserving all whitespace and newlines
664
+ yield chunk.decode(encoding, encoding_errors)
665
+ elif chunk is not None:
666
+ # Yield string chunks as-is, preserving all formatting
667
+ yield chunk
668
+ # Skip None chunks entirely
669
+ async def _raw_passthrough_async(source_aiter):
670
+ async for chunk in source_aiter:
671
+ if isinstance(chunk, (bytes, bytearray)):
672
+ # Decode bytes preserving all whitespace and newlines
673
+ yield chunk.decode(encoding, encoding_errors)
674
+ elif chunk is not None:
675
+ # Yield string chunks as-is, preserving all formatting
676
+ yield chunk
677
+ # Skip None chunks entirely
678
+ # Sync iterable (but not str/bytes)
679
+ if hasattr(data, "__iter__") and not isinstance(data, (str, bytes)):
680
+ return _raw_passthrough_sync(data)
681
+ # Async iterable
682
+ if hasattr(data, "__aiter__"):
683
+ return _raw_passthrough_async(data)
684
+ # Single string or bytes
685
+ if isinstance(data, (bytes, bytearray)):
686
+ def _yield_single():
687
+ yield data.decode(encoding, encoding_errors)
688
+ return _yield_single()
689
+ else:
690
+ def _yield_single():
691
+ if data is not None:
692
+ yield data
693
+ return _yield_single()
694
+ # --- END RAW MODE ---
669
695
 
670
696
  text_attr = getattr(data, "text", None)
671
697
  content_attr = getattr(data, "content", None)
672
698
 
699
+ # Handle None
700
+ if data is None:
701
+ def _empty_gen():
702
+ if False:
703
+ yield None
704
+ return _empty_gen()
705
+
706
+ # Handle bytes directly
707
+ if isinstance(data, bytes):
708
+ try:
709
+ payload = data.decode(encoding, encoding_errors)
710
+ except Exception:
711
+ payload = str(data)
712
+ return _sanitize_stream_sync(
713
+ payload, intro_value, to_json, skip_markers, strip_chars,
714
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
715
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
716
+ )
717
+
718
+ # Handle string directly
719
+ if isinstance(data, str):
720
+ return _sanitize_stream_sync(
721
+ data, intro_value, to_json, skip_markers, strip_chars,
722
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
723
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
724
+ )
725
+
726
+ # Handle dict, list, int, float, bool (non-iterable, non-string/bytes)
727
+ if isinstance(data, (dict, list, int, float, bool)):
728
+ if object_mode == "as_is":
729
+ def _as_is_gen():
730
+ yield data
731
+ return _as_is_gen()
732
+ elif object_mode == "str":
733
+ return _sanitize_stream_sync(
734
+ str(data), intro_value, to_json, skip_markers, strip_chars,
735
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
736
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
737
+ )
738
+ else: # "json"
739
+ try:
740
+ json_str = json.dumps(data)
741
+ except Exception:
742
+ json_str = str(data)
743
+ return _sanitize_stream_sync(
744
+ json_str, intro_value, to_json, skip_markers, strip_chars,
745
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
746
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
747
+ )
748
+
749
+ # Handle file-like objects (optional, treat as string if .read exists)
750
+ if hasattr(data, "read") and callable(data.read):
751
+ try:
752
+ file_content = data.read()
753
+ if isinstance(file_content, bytes):
754
+ file_content = file_content.decode(encoding, encoding_errors)
755
+ return _sanitize_stream_sync(
756
+ file_content, intro_value, to_json, skip_markers, strip_chars,
757
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
758
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
759
+ )
760
+ except Exception:
761
+ pass # fallback to next
762
+
763
+ # Handle .text or .content attributes
673
764
  if isinstance(text_attr, str):
674
765
  payload = text_attr
766
+ return _sanitize_stream_sync(
767
+ payload, intro_value, to_json, skip_markers, strip_chars,
768
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
769
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
770
+ )
675
771
  elif isinstance(content_attr, bytes):
676
- payload = content_attr.decode(encoding, encoding_errors)
677
- else:
678
- # Use the original data if .text or .content are not applicable or not found
679
- payload = data
772
+ try:
773
+ payload = content_attr.decode(encoding, encoding_errors)
774
+ except Exception:
775
+ payload = str(content_attr)
776
+ return _sanitize_stream_sync(
777
+ payload, intro_value, to_json, skip_markers, strip_chars,
778
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
779
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
780
+ )
680
781
 
681
- # Dispatch to sync or async worker based on the nature of the 'payload'
682
- if hasattr(payload, "__aiter__"):
782
+ # Handle async iterables
783
+ if hasattr(data, "__aiter__"):
683
784
  return _sanitize_stream_async(
684
- payload, intro_value, to_json, skip_markers, strip_chars,
785
+ data, intro_value, to_json, skip_markers, strip_chars,
786
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
787
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
788
+ )
789
+ # Handle sync iterables (but not strings/bytes)
790
+ if hasattr(data, "__iter__"):
791
+ return _sanitize_stream_sync(
792
+ data, intro_value, to_json, skip_markers, strip_chars,
685
793
  start_marker, end_marker, content_extractor, yield_raw_on_error,
686
794
  encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
687
795
  )
796
+ # Fallback: treat as string
688
797
  return _sanitize_stream_sync(
689
- payload, intro_value, to_json, skip_markers, strip_chars,
798
+ str(data), intro_value, to_json, skip_markers, strip_chars,
690
799
  start_marker, end_marker, content_extractor, yield_raw_on_error,
691
800
  encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
692
801
  )
webscout/Bing_search.py CHANGED
@@ -175,8 +175,7 @@ class BingSearch:
175
175
  url = self._first_page(keywords)['url']
176
176
  urls_to_fetch = [url]
177
177
  while len(fetched_results) < max_results and urls_to_fetch:
178
- with self._executor as executor:
179
- html_pages = list(executor.map(fetch_page, urls_to_fetch))
178
+ html_pages = list(self._executor.map(fetch_page, urls_to_fetch))
180
179
  urls_to_fetch = []
181
180
  for html in html_pages:
182
181
  soup = BeautifulSoup(html, "html.parser")
@@ -1,3 +1,4 @@
1
+ from .stellar_search import *
1
2
  from .felo_search import *
2
3
  from .DeepFind import *
3
4
  from .genspark_search import *
@@ -0,0 +1,132 @@
1
+ import requests
2
+ import re
3
+ from typing import Dict, Optional, Generator, Union, Any
4
+ from webscout.AIbase import AISearch, SearchResponse
5
+ from webscout import exceptions
6
+ from webscout.litagent import LitAgent
7
+ from webscout.AIutel import sanitize_stream
8
+
9
+ class Stellar(AISearch):
10
+ """AI Search provider for stellar.chatastra.ai"""
11
+ def __init__(self, timeout: int = 30, proxies: Optional[dict] = None):
12
+ self.api_endpoint = "https://stellar.chatastra.ai/search/x1GUVzl"
13
+ self.timeout = timeout
14
+ self.proxies = proxies
15
+ self.session = requests.Session()
16
+ self.headers = {
17
+ "accept": "text/x-component",
18
+ "accept-encoding": "gzip, deflate, br, zstd",
19
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
20
+ "content-type": "multipart/form-data; boundary=----WebKitFormBoundaryQsWD5Qs3QqDkNBPH",
21
+ "dnt": "1",
22
+ "next-action": "efc2643ed9bafe182a010b58ebea17f068ad3985",
23
+ "next-router-state-tree": "%5B%22%22%2C%7B%22children%22%3A%5B%22__PAGE__%22%2C%7B%7D%2C%22%2F%22%2C%22refresh%22%5D%7D%2Cnull%2Cnull%2Ctrue%5D",
24
+ "origin": "https://stellar.chatastra.ai",
25
+ "priority": "u=1, i",
26
+ "referer": "https://stellar.chatastra.ai/search/x1GUVzl",
27
+ "sec-ch-ua": '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
28
+ "sec-ch-ua-mobile": "?0",
29
+ "sec-ch-ua-platform": '"Windows"',
30
+ "sec-fetch-dest": "empty",
31
+ "sec-fetch-mode": "cors",
32
+ "sec-fetch-site": "same-origin",
33
+ "sec-gpc": "1",
34
+ "user-agent": LitAgent().random(),
35
+ "cookie": "__client_uat=0; __client_uat_K90aduOv=0",
36
+ }
37
+ self.session.headers.update(self.headers)
38
+ if proxies:
39
+ self.session.proxies = proxies
40
+
41
+ def _make_payload(self, prompt: str) -> bytes: # This is a static payload for the demo; in production, generate dynamically as needed
42
+ boundary = "----WebKitFormBoundaryQsWD5Qs3QqDkNBPH"
43
+ parts = [
44
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n{{\"id\":\"71bb616ba5b7cbcac2308fe0c249a9f2d51825b7\",\"bound\":null}}\r\n",
45
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"2\"\r\n\r\n{{\"id\":\"8bcca1d0cb933b14fefde88dacb2865be3d1d525\",\"bound\":null}}\r\n",
46
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_input\"\r\n\r\n{prompt}\r\n",
47
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_id\"\r\n\r\nx1GUVzl\r\n",
48
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_userId\"\r\n\r\nnull\r\n",
49
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"0\"\r\n\r\n[{{\"action\":\"$F1\",\"options\":{{\"onSetAIState\":\"$F2\"}}}},{{\"messages\":[],\"chatId\":\"\"}},\"$K3\"]\r\n",
50
+ f"--{boundary}--\r\n"
51
+ ]
52
+ return "".join(parts).encode("utf-8")
53
+
54
+ @staticmethod
55
+ def _stellar_extractor(chunk: Union[str, bytes, Dict[str, Any]]) -> Optional[str]:
56
+ """Extracts content from the Stellar stream format with hex keys and diff arrays. Handles both str and bytes input."""
57
+ if isinstance(chunk, bytes):
58
+ try:
59
+ chunk = chunk.decode('utf-8', errors='replace')
60
+ except Exception:
61
+ return None
62
+ if not isinstance(chunk, str):
63
+ return None
64
+ # Match patterns like 6e:{"diff":[0," empathy"],"next":"$@6f"}
65
+ pattern = r'[0-9a-f]+:\{"diff":\[0,"([^"\\]*)"\]'
66
+ matches = re.findall(pattern, chunk)
67
+ if matches:
68
+ extracted_text = ''.join(matches)
69
+ # Fix escaped newlines
70
+ extracted_text = extracted_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
71
+ return extracted_text if extracted_text.strip() else None
72
+ return None
73
+
74
+ def search(self, prompt: str, stream: bool = False, raw: bool = False) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse, str], None, None]]:
75
+ payload = self._make_payload(prompt)
76
+ try:
77
+ response = self.session.post(
78
+ self.api_endpoint,
79
+ data=payload,
80
+ timeout=self.timeout,
81
+ proxies=self.proxies,
82
+ stream=stream,
83
+ )
84
+ if not response.ok:
85
+ raise exceptions.APIConnectionError(f"Failed to get response: {response.status_code} {response.text}")
86
+
87
+ def _yield_stream():
88
+ # Use sanitize_stream for real-time extraction from the response iterator
89
+ processed_stream = sanitize_stream(
90
+ data=response.iter_lines(decode_unicode=True),
91
+ intro_value=None,
92
+ to_json=False,
93
+ content_extractor=self._stellar_extractor
94
+ )
95
+ full_response = ""
96
+ for content in processed_stream:
97
+ if content and isinstance(content, str):
98
+ full_response += content
99
+ if raw:
100
+ yield {"text": content}
101
+ else:
102
+ yield content
103
+ # Do NOT yield SearchResponse(full_response) in streaming mode to avoid duplicate output
104
+
105
+ if stream:
106
+ return _yield_stream()
107
+ else:
108
+ # Use sanitize_stream for the full response text
109
+ processed_stream = sanitize_stream(
110
+ data=response.text.splitlines(),
111
+ intro_value=None,
112
+ to_json=False,
113
+ content_extractor=self._stellar_extractor
114
+ )
115
+ full_response = ""
116
+ for content in processed_stream:
117
+ if content and isinstance(content, str):
118
+ full_response += content
119
+ if raw:
120
+ return {"text": full_response}
121
+ else:
122
+ return SearchResponse(full_response)
123
+ except requests.RequestException as e:
124
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
125
+
126
+ if __name__ == "__main__":
127
+ from rich import print
128
+ ai = Stellar()
129
+ user_query = input(">>> ")
130
+ response = ai.search(user_query, stream=True, raw=False)
131
+ for chunk in response:
132
+ print(chunk, end="", flush=True)
@@ -2,7 +2,7 @@ from curl_cffi import CurlError
2
2
  from curl_cffi.requests import Session, Response # Import Response
3
3
  import json
4
4
  import uuid
5
- from typing import Any, Dict, Union, Optional, List
5
+ from typing import Any, Dict, Union, Optional, List, Generator
6
6
  from datetime import datetime
7
7
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIbase import Provider
@@ -264,10 +264,11 @@ class ExaChat(Provider):
264
264
  def ask(
265
265
  self,
266
266
  prompt: str,
267
+ stream: bool = False,
267
268
  raw: bool = False,
268
269
  optimizer: str = None,
269
270
  conversationally: bool = False,
270
- ) -> Dict[str, Any]:
271
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
271
272
  """Sends a prompt to the API and returns the response."""
272
273
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
273
274
  if optimizer:
@@ -281,78 +282,103 @@ class ExaChat(Provider):
281
282
 
282
283
  payload = self._build_payload(conversation_prompt)
283
284
  response = self._make_request(payload)
284
-
285
- try:
285
+ processed_stream = sanitize_stream(
286
+ data=response.iter_content(chunk_size=None),
287
+ intro_value=None,
288
+ to_json=True,
289
+ content_extractor=self._exachat_extractor,
290
+ yield_raw_on_error=False,
291
+ raw=raw
292
+ )
293
+ if stream:
294
+ streaming_text = ""
295
+ for content_chunk in processed_stream:
296
+ if content_chunk and isinstance(content_chunk, str):
297
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
298
+ if raw:
299
+ if content_chunk and isinstance(content_chunk, str):
300
+ streaming_text += content_chunk
301
+ yield content_chunk
302
+ else:
303
+ if content_chunk and isinstance(content_chunk, str):
304
+ streaming_text += content_chunk
305
+ yield dict(text=content_chunk)
306
+ self.last_response = {"text": streaming_text}
307
+ self.conversation.update_chat_history(prompt, streaming_text)
308
+ else:
286
309
  full_response = ""
287
- # Use sanitize_stream to process the response
288
- processed_stream = sanitize_stream(
289
- data=response.iter_content(chunk_size=None), # Pass byte iterator
290
- intro_value=None, # API doesn't seem to use 'data:' prefix
291
- to_json=True, # Stream sends JSON lines
292
- content_extractor=self._exachat_extractor, # Use the specific extractor
293
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
294
- )
295
-
296
310
  for content_chunk in processed_stream:
297
- # content_chunk is the string extracted by _exachat_extractor
298
311
  if content_chunk and isinstance(content_chunk, str):
299
- full_response += content_chunk
300
-
312
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
313
+ if raw:
314
+ if content_chunk and isinstance(content_chunk, str):
315
+ full_response += content_chunk
316
+ else:
317
+ if content_chunk and isinstance(content_chunk, str):
318
+ full_response += content_chunk
301
319
  self.last_response = {"text": full_response}
302
320
  self.conversation.update_chat_history(prompt, full_response)
303
- return self.last_response if not raw else full_response # Return dict or raw string
304
-
305
- except json.JSONDecodeError as e:
306
- raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
321
+ return self.last_response if not raw else full_response
307
322
 
308
323
  def chat(
309
324
  self,
310
325
  prompt: str,
326
+ stream: bool = False,
311
327
  optimizer: str = None,
312
328
  conversationally: bool = False,
313
- ) -> str:
314
- """Generate response."""
315
- response = self.ask(
316
- prompt, optimizer=optimizer, conversationally=conversationally
317
- )
318
- return self.get_message(response)
329
+ raw: bool = False,
330
+ ) -> Union[str, Generator[str, None, None]]:
331
+ def for_stream():
332
+ for response in self.ask(
333
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
334
+ ):
335
+ if raw:
336
+ yield response
337
+ else:
338
+ yield self.get_message(response)
339
+ def for_non_stream():
340
+ result = self.ask(
341
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
342
+ )
343
+ if raw:
344
+ return result if isinstance(result, str) else str(result)
345
+ return self.get_message(result)
346
+ return for_stream() if stream else for_non_stream()
319
347
 
320
348
  def get_message(self, response: Union[Dict[str, Any], str]) -> str:
321
- """
322
- Retrieves message from response.
323
-
324
- Args:
325
- response (Union[Dict[str, Any], str]): The response to extract the message from
326
-
327
- Returns:
328
- str: The extracted message text
329
- """
330
349
  if isinstance(response, dict):
331
- return response.get("text", "")
332
- return str(response)
350
+ text = response.get("text", "")
351
+ else:
352
+ text = str(response)
353
+ return text.replace('\\\\', '\\').replace('\\"', '"')
333
354
 
334
355
  if __name__ == "__main__":
335
- print("-" * 80)
336
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
337
- print("-" * 80)
356
+ # print("-" * 80)
357
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
358
+ # print("-" * 80)
338
359
 
339
- # Test all available models
340
- working = 0
341
- total = len(ExaChat.AVAILABLE_MODELS)
360
+ # # Test all available models
361
+ # working = 0
362
+ # total = len(ExaChat.AVAILABLE_MODELS)
342
363
 
343
- for model in ExaChat.AVAILABLE_MODELS:
344
- try:
345
- test_ai = ExaChat(model=model, timeout=60)
346
- response = test_ai.chat("Say 'Hello' in one word")
347
- response_text = response
364
+ # for model in ExaChat.AVAILABLE_MODELS:
365
+ # try:
366
+ # test_ai = ExaChat(model=model, timeout=60)
367
+ # response = test_ai.chat("Say 'Hello' in one word")
368
+ # response_text = response
348
369
 
349
- if response_text and len(response_text.strip()) > 0:
350
- status = "✓"
351
- # Truncate response if too long
352
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
353
- else:
354
- status = "✗"
355
- display_text = "Empty or invalid response"
356
- print(f"{model:<50} {status:<10} {display_text}")
357
- except Exception as e:
358
- print(f"{model:<50} {'✗':<10} {str(e)}")
370
+ # if response_text and len(response_text.strip()) > 0:
371
+ # status = "✓"
372
+ # # Truncate response if too long
373
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
374
+ # else:
375
+ # status = "✗"
376
+ # display_text = "Empty or invalid response"
377
+ # print(f"{model:<50} {status:<10} {display_text}")
378
+ # except Exception as e:
379
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
380
+ from rich import print
381
+ ai = ExaChat(model="gemini-2.0-flash")
382
+ response = ai.chat("tell me a joke", stream=True, raw=False)
383
+ for chunk in response:
384
+ print(chunk, end='', flush=True)