webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,94 @@
1
+ import time
2
+ import requests
3
+ import pathlib
4
+ import tempfile
5
+ from io import BytesIO
6
+ from webscout import exceptions
7
+ from webscout.litagent import LitAgent
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+ from webscout.Provider.TTS import utils
10
+ from webscout.Provider.TTS.base import BaseTTSProvider
11
+
12
+ class SthirTTS(BaseTTSProvider):
13
+ """
14
+ Text-to-speech provider using the Sthir.org TTS API.
15
+ """
16
+ headers = {
17
+ "Content-Type": "application/json",
18
+ "User-Agent": LitAgent().random(),
19
+ }
20
+
21
+ all_voices = {
22
+ "aura-luna-en": "Sophie (American, Feminine)",
23
+ "aura-stella-en": "Isabella (American, Feminine)",
24
+ "aura-athena-en": "Emma (British, Feminine)",
25
+ "aura-hera-en": "Victoria (American, Feminine)",
26
+ "aura-asteria-en": "Maria (American, Feminine)",
27
+ "aura-arcas-en": "Alex (American, Masculine)",
28
+ "aura-zeus-en": "Thomas (American, Masculine)",
29
+ "aura-perseus-en": "Michael (American, Masculine)",
30
+ "aura-angus-en": "Connor (Irish, Masculine)",
31
+ "aura-orpheus-en": "James (American, Masculine)",
32
+ "aura-helios-en": "William (British, Masculine)",
33
+ "aura-orion-en": "Daniel (American, Masculine)",
34
+ }
35
+
36
+ def __init__(self, timeout: int = 20, proxies: dict = None):
37
+ """Initializes the SthirTTS client."""
38
+ super().__init__()
39
+ self.api_url = "https://sthir.org/com.api/tts-api.php"
40
+ self.session = requests.Session()
41
+ self.session.headers.update(self.headers)
42
+ if proxies:
43
+ self.session.proxies.update(proxies)
44
+ self.timeout = timeout
45
+
46
+ def tts(self, text: str, voice: str = "aura-luna-en") -> str:
47
+ """
48
+ Converts text to speech using the Sthir.org API and saves it to a file.
49
+
50
+ Args:
51
+ text (str): The text to convert to speech
52
+ voice (str): The voice to use for TTS (default: "aura-luna-en")
53
+
54
+ Returns:
55
+ str: Path to the generated audio file
56
+
57
+ Raises:
58
+ exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
59
+ """
60
+ assert (
61
+ voice in self.all_voices
62
+ ), f"Voice '{voice}' not one of [{', '.join(self.all_voices.keys())}]"
63
+
64
+ filename = pathlib.Path(tempfile.mktemp(suffix=".mp3", dir=self.temp_dir))
65
+ payload = {"text": text, "voice": voice}
66
+
67
+ try:
68
+ response = self.session.post(
69
+ self.api_url,
70
+ headers=self.headers,
71
+ json=payload,
72
+ timeout=self.timeout
73
+ )
74
+ if response.status_code == 200 and len(response.content) > 0:
75
+ with open(filename, "wb") as f:
76
+ f.write(response.content)
77
+ return filename.as_posix()
78
+ else:
79
+ try:
80
+ error_data = response.json()
81
+ if "error" in error_data:
82
+ raise exceptions.FailedToGenerateResponseError(f"API error: {error_data['error']}")
83
+ except Exception:
84
+ pass
85
+ raise exceptions.FailedToGenerateResponseError(f"Sthir API error: {response.text}")
86
+ except Exception as e:
87
+ raise exceptions.FailedToGenerateResponseError(f"Failed to perform the operation: {e}")
88
+
89
+ # Example usage
90
+ if __name__ == "__main__":
91
+ sthir = SthirTTS()
92
+ text = "This is a test of the Sthir.org text-to-speech API. It supports multiple voices."
93
+ audio_file = sthir.tts(text, voice="aura-luna-en")
94
+ print(f"Audio saved to: {audio_file}")
@@ -163,18 +163,16 @@ class TeachAnything(Provider):
163
163
  stream: bool = False, # Keep stream param for interface consistency
164
164
  optimizer: str = None,
165
165
  conversationally: bool = False,
166
- ) -> str:
167
- """Generate response `str`
166
+ ) -> Union[str, Any]:
167
+ """Generate response `str` or yield for streaming compatibility
168
168
  Args:
169
169
  prompt (str): Prompt to be send.
170
170
  stream (bool, optional): Flag for streaming response. Defaults to False.
171
171
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
172
172
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
173
173
  Returns:
174
- str: Response generated
174
+ str or generator: Response generated
175
175
  """
176
-
177
- # Since ask() now handles both stream=True/False by returning the full response dict/str:
178
176
  response_data = self.ask(
179
177
  prompt,
180
178
  stream=False, # Call ask in non-stream mode internally
@@ -182,13 +180,11 @@ class TeachAnything(Provider):
182
180
  optimizer=optimizer,
183
181
  conversationally=conversationally
184
182
  )
185
- # If stream=True was requested, simulate streaming by yielding the full message at once
186
183
  if stream:
187
184
  def stream_wrapper():
188
185
  yield self.get_message(response_data)
189
186
  return stream_wrapper()
190
187
  else:
191
- # If stream=False, return the full message directly
192
188
  return self.get_message(response_data)
193
189
 
194
190
  def get_message(self, response: Union[dict, str]) -> str:
@@ -16,24 +16,26 @@ class TextPollinationsAI(Provider):
16
16
 
17
17
  AVAILABLE_MODELS = [
18
18
  "openai",
19
+ "openai-fast",
19
20
  "openai-large",
21
+ "openai-roblox",
20
22
  "qwen-coder",
21
23
  "llama",
22
24
  "llamascout",
23
25
  "mistral",
24
26
  "unity",
27
+ "mirexa",
25
28
  "midijourney",
26
29
  "rtist",
27
30
  "searchgpt",
28
31
  "evil",
29
32
  "deepseek-reasoning",
30
- "deepseek-reasoning-large",
31
33
  "phi",
32
- "llama-vision",
33
34
  "hormoz",
34
35
  "hypnosis-tracy",
35
36
  "deepseek",
36
37
  "sur",
38
+ "bidara",
37
39
  "openai-audio",
38
40
  ]
39
41
  _models_url = "https://text.pollinations.ai/models"
@@ -8,17 +8,30 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
11
 
13
- class AIMathGPT(Provider):
12
+ class ChatHub(Provider):
14
13
  """
15
- A class to interact with the AIMathGPT API.
14
+ A class to interact with the ChatHub API.
16
15
  """
17
16
 
17
+ AVAILABLE_MODELS = [
18
+ 'meta/llama3.1-8b',
19
+ 'mistral/mixtral-8x7b',
20
+ 'google/gemma-2',
21
+ 'perplexity/sonar-online',
22
+ ]
23
+ model_aliases = { # Aliases for shorter model names
24
+ "llama3.1-8b": 'meta/llama3.1-8b',
25
+ "mixtral-8x7b": 'mistral/mixtral-8x7b',
26
+ "gemma-2": 'google/gemma-2',
27
+ "sonar-online": 'perplexity/sonar-online',
28
+ }
29
+
30
+
18
31
  def __init__(
19
32
  self,
20
33
  is_conversation: bool = True,
21
- max_tokens: int = 2049,
34
+ max_tokens: int = 2049,
22
35
  timeout: int = 30,
23
36
  intro: str = None,
24
37
  filepath: str = None,
@@ -26,47 +39,25 @@ class AIMathGPT(Provider):
26
39
  proxies: dict = {},
27
40
  history_offset: int = 10250,
28
41
  act: str = None,
29
- model: str = "llama3", # Default model
30
- system_prompt: str = "You are a helpful AI assistant.",
42
+ model: str = "sonar-online",
31
43
  ):
32
- """
33
- Initializes the AIMathGPT API with the given parameters.
34
- """
35
- self.url = "https://aimathgpt.forit.ai/api/ai"
44
+ """Initializes the ChatHub API client."""
45
+ self.url = "https://app.chathub.gg"
46
+ self.api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
36
47
  self.headers = {
37
- "authority": "aimathgpt.forit.ai",
38
- "method": "POST",
39
- "path": "/api/ai",
40
- "scheme": "https",
41
- "accept": "*/*",
42
- "accept-encoding": "gzip, deflate, br, zstd",
43
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
44
- "content-type": "application/json",
45
- "cookie": (
46
- "NEXT_LOCALE=en; _ga=GA1.1.1515823701.1726936796; "
47
- "_ga_1F3ZVN96B1=GS1.1.1726936795.1.1.1726936833.0.0.0"
48
- ),
49
- "dnt": "1",
50
- "origin": "https://aimathgpt.forit.ai",
51
- "priority": "u=1, i",
52
- "referer": "https://aimathgpt.forit.ai/?ref=taaft&utm_source=taaft&utm_medium=referral",
53
- "sec-ch-ua": (
54
- "\"Microsoft Edge\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\""
55
- ),
56
- "sec-ch-ua-mobile": "?0",
57
- "sec-ch-ua-platform": "\"Windows\"",
58
- "sec-fetch-dest": "empty",
59
- "sec-fetch-mode": "cors",
60
- "sec-fetch-site": "same-origin",
61
- "user-agent": LitAgent().random(),
48
+ 'Accept': '*/*',
49
+ 'Accept-Language': 'en-US,en;q=0.9',
50
+ 'Content-Type': 'application/json',
51
+ 'Origin': self.url,
52
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
53
+ 'X-App-Id': 'web'
62
54
  }
63
55
  self.session = requests.Session()
64
- self.session.headers.update(self.headers)
65
- self.session.proxies.update(proxies)
56
+ self.session.headers.update(self.headers)
57
+ self.session.proxies.update(proxies)
66
58
  self.timeout = timeout
67
59
  self.last_response = {}
68
- self.model = model
69
- self.system_prompt = system_prompt
60
+
70
61
  self.is_conversation = is_conversation
71
62
  self.max_tokens_to_sample = max_tokens
72
63
  self.__available_optimizers = (
@@ -81,11 +72,29 @@ class AIMathGPT(Provider):
81
72
  if act
82
73
  else intro or Conversation.intro
83
74
  )
75
+
84
76
  self.conversation = Conversation(
85
77
  is_conversation, self.max_tokens_to_sample, filepath, update_file
86
78
  )
87
79
  self.conversation.history_offset = history_offset
88
80
 
81
+ #Resolve the model
82
+ self.model = self.get_model(model)
83
+
84
+
85
+ def get_model(self, model: str) -> str:
86
+ """
87
+ Resolves the model name using aliases or defaults.
88
+ """
89
+
90
+ if model in self.AVAILABLE_MODELS:
91
+ return model
92
+ elif model in self.model_aliases:
93
+ return self.model_aliases[model]
94
+ else:
95
+ print(f"Model '{model}' not found. Using default model '{self.default_model}'.")
96
+ return self.default_model # Use class-level default
97
+
89
98
  def ask(
90
99
  self,
91
100
  prompt: str,
@@ -93,10 +102,9 @@ class AIMathGPT(Provider):
93
102
  raw: bool = False,
94
103
  optimizer: str = None,
95
104
  conversationally: bool = False,
96
- ) -> Union[Dict, Generator]:
97
- """Sends a chat completion request to the AIMathGPT API."""
98
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
105
+ ) -> Union[Dict[str, Any], Generator]:
99
106
 
107
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
100
108
  if optimizer:
101
109
  if optimizer in self.__available_optimizers:
102
110
  conversation_prompt = getattr(Optimizers, optimizer)(
@@ -106,39 +114,44 @@ class AIMathGPT(Provider):
106
114
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
107
115
 
108
116
 
109
- payload = {
110
- "messages": [
111
- {"role": "system", "content": self.system_prompt},
112
- {"role": "user", "content": conversation_prompt},
113
- ],
117
+ data = {
114
118
  "model": self.model,
119
+ "messages": [{"role": "user", "content": conversation_prompt}],
120
+ "tools": []
115
121
  }
116
122
 
123
+ # Set the Referer header dynamically based on the resolved model
124
+ self.headers['Referer'] = f"{self.url}/chat/{self.model}"
125
+
117
126
 
118
127
  def for_stream():
119
128
  try:
120
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
121
- if response.status_code != 200:
122
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}: {response.text}")
123
-
129
+ with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
130
+ response.raise_for_status()
124
131
  streaming_text = ""
132
+
125
133
  for line in response.iter_lines(decode_unicode=True):
126
134
  if line:
127
- try:
128
- data = json.loads(line)
129
- if 'result' in data and 'response' in data['result']:
130
- content = data['result']['response']
131
- streaming_text += content
132
- resp = dict(text=content) # Yield only the new content
133
- yield resp if raw else resp
134
- else:
135
- pass
136
- except json.JSONDecodeError:
137
- pass
135
+ decoded_line = line.strip()
136
+ if decoded_line.startswith('data:'):
137
+ data_str = decoded_line[5:].strip()
138
+ if data_str == '[DONE]':
139
+ break
140
+ try:
141
+ data_json = json.loads(data_str)
142
+ text_delta = data_json.get('textDelta')
143
+ if text_delta:
144
+ streaming_text += text_delta
145
+ resp = dict(text=text_delta)
146
+ yield resp if raw else resp
147
+
148
+ except json.JSONDecodeError:
149
+ continue
138
150
  self.conversation.update_chat_history(prompt, streaming_text)
139
151
  self.last_response.update({"text": streaming_text})
140
152
  except requests.exceptions.RequestException as e:
141
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
153
+ raise exceptions.FailedToGenerateResponseError(f"Request error: {e}")
154
+
142
155
 
143
156
  def for_non_stream():
144
157
  for _ in for_stream():
@@ -149,6 +162,7 @@ class AIMathGPT(Provider):
149
162
 
150
163
 
151
164
 
165
+
152
166
  def chat(
153
167
  self,
154
168
  prompt: str,
@@ -156,6 +170,7 @@ class AIMathGPT(Provider):
156
170
  optimizer: str = None,
157
171
  conversationally: bool = False,
158
172
  ) -> Union[str, Generator]:
173
+ """Generate response `str`"""
159
174
 
160
175
  def for_stream():
161
176
  for response in self.ask(
@@ -166,24 +181,29 @@ class AIMathGPT(Provider):
166
181
  def for_non_stream():
167
182
  return self.get_message(
168
183
  self.ask(
169
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
184
+ prompt,
185
+ stream=False, # Pass stream=False
186
+ optimizer=optimizer,
187
+ conversationally=conversationally,
170
188
  )
171
189
  )
172
190
 
173
191
  return for_stream() if stream else for_non_stream()
174
192
 
193
+
194
+
175
195
  def get_message(self, response: dict) -> str:
176
196
  """Retrieves message only from response"""
177
197
  assert isinstance(response, dict), "Response should be of dict data-type only"
178
- return response["text"]
198
+ return response.get("text", "")
179
199
 
180
200
 
181
201
  if __name__ == "__main__":
182
202
  from rich import print
183
- bot = AIMathGPT()
203
+ bot = ChatHub()
184
204
  try:
185
- response = bot.chat("What is the capital of France?", stream=True)
205
+ response = bot.chat("tell me about Abhay koul, HelpingAI", stream=True)
186
206
  for chunk in response:
187
207
  print(chunk, end="", flush=True)
188
208
  except Exception as e:
189
- print(f"An error occurred: {e}")
209
+ print(f"An error occurred: {e}")
@@ -0,0 +1,263 @@
1
+ import requests
2
+ import json
3
+ from typing import Dict, Optional, Generator, Union, Any
4
+ from uuid import uuid4
5
+ import time
6
+ import base64
7
+ import random
8
+
9
+ from webscout.AIbase import AISearch
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+
14
+ class Response:
15
+ """A wrapper class for Liner API responses.
16
+
17
+ This class automatically converts response objects to their text representation
18
+ when printed or converted to string.
19
+
20
+ Attributes:
21
+ text (str): The text content of the response
22
+
23
+ Example:
24
+ >>> response = Response("Hello, world!")
25
+ >>> print(response)
26
+ Hello, world!
27
+ >>> str(response)
28
+ 'Hello, world!'
29
+ """
30
+ def __init__(self, text: str):
31
+ self.text = text
32
+
33
+ def __str__(self):
34
+ return self.text
35
+
36
+ def __repr__(self):
37
+ return self.text
38
+
39
+
40
+ class Liner(AISearch):
41
+ """A class to interact with the Liner AI search API.
42
+
43
+ Liner provides a powerful search interface that returns AI-generated responses
44
+ based on web content. It supports both streaming and non-streaming responses.
45
+
46
+ Basic Usage:
47
+ >>> from webscout import Liner
48
+ >>> ai = Liner(cookies_path="cookies.json")
49
+ >>> # Non-streaming example
50
+ >>> response = ai.search("What is Python?")
51
+ >>> print(response)
52
+ Python is a high-level programming language...
53
+
54
+ >>> # Streaming example
55
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
56
+ ... print(chunk, end="", flush=True)
57
+ Artificial Intelligence is...
58
+
59
+ >>> # Raw response format
60
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
61
+ ... print(chunk)
62
+ {'text': 'Hello'}
63
+ {'text': ' there!'}
64
+
65
+ Args:
66
+ cookies_path (str): Path to the cookies JSON file
67
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
68
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
69
+ deep_search (bool, optional): Enable deep research mode. Defaults to True.
70
+ reasoning_mode (bool, optional): Enable reasoning mode. Defaults to False.
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ cookies_path: str,
76
+ timeout: int = 600,
77
+ proxies: Optional[dict] = None,
78
+ deep_search: bool = True,
79
+ reasoning_mode: bool = False,
80
+ ):
81
+ """Initialize the Liner API client.
82
+
83
+ Args:
84
+ cookies_path (str): Path to the cookies JSON file
85
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
86
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
87
+ deep_search (bool, optional): Enable deep research mode. Defaults to True.
88
+ reasoning_mode (bool, optional): Enable reasoning mode. Defaults to False.
89
+ """
90
+ self.session = requests.Session()
91
+ self.chat_endpoint = "https://getliner.com/lisa/v1/answer"
92
+ self.stream_chunk_size = 64
93
+ self.timeout = timeout
94
+ self.last_response = {}
95
+ self.cookies_path = cookies_path
96
+ self.deep_search = deep_search
97
+ self.reasoning_mode = reasoning_mode
98
+
99
+ # Generate random IDs
100
+ self.space_id = random.randint(10000000, 99999999)
101
+ self.thread_id = random.randint(10000000, 99999999)
102
+ self.user_message_id = random.randint(100000000, 999999999)
103
+ self.user_id = random.randint(1000000, 9999999)
104
+
105
+ self.headers = {
106
+ "accept": "text/event-stream",
107
+ "accept-encoding": "gzip, deflate, br, zstd",
108
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
109
+ "content-type": "application/json",
110
+ "dnt": "1",
111
+ "origin": "https://getliner.com",
112
+ "referer": f"https://getliner.com/search/s/{self.space_id}/t/t_{uuid4()}?mode=temp&msg-entry-type=main&build-id=kwJaNRjnCKjh7PijZgqV2",
113
+ "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
114
+ "sec-ch-ua-mobile": "?0",
115
+ "sec-ch-ua-platform": '"Windows"',
116
+ "sec-fetch-dest": "empty",
117
+ "sec-fetch-mode": "cors",
118
+ "sec-fetch-site": "same-origin",
119
+ "sec-gpc": "1",
120
+ "user-agent": LitAgent().random()
121
+ }
122
+
123
+ # Load cookies from JSON file
124
+ self.cookies = self._load_cookies()
125
+ if not self.cookies:
126
+ raise ValueError("Failed to load cookies from file")
127
+
128
+ # Set headers and cookies in session
129
+ self.session.headers.update(self.headers)
130
+ self.session.cookies.update(self.cookies)
131
+ self.session.proxies = proxies or {}
132
+
133
+ def _load_cookies(self) -> Optional[Dict[str, str]]:
134
+ """Load cookies from a JSON file.
135
+
136
+ Returns:
137
+ Optional[Dict[str, str]]: Dictionary of cookies if successful, None otherwise
138
+ """
139
+ try:
140
+ with open(self.cookies_path, 'r') as f:
141
+ cookies_data = json.load(f)
142
+ return {cookie['name']: cookie['value'] for cookie in cookies_data}
143
+ except FileNotFoundError:
144
+ print(f"Error: {self.cookies_path} file not found!")
145
+ return None
146
+ except json.JSONDecodeError:
147
+ print(f"Error: Invalid JSON format in {self.cookies_path}!")
148
+ return None
149
+ except KeyError:
150
+ print(f"Error: Invalid cookie format in {self.cookies_path}! Each cookie must have 'name' and 'value' keys.")
151
+ return None
152
+
153
+ def search(
154
+ self,
155
+ prompt: str,
156
+ stream: bool = False,
157
+ raw: bool = False,
158
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
159
+ """Search using the Liner API and get AI-generated responses.
160
+
161
+ Args:
162
+ prompt (str): The search query or prompt to send to the API.
163
+ stream (bool, optional): If True, yields response chunks as they arrive.
164
+ If False, returns complete response. Defaults to False.
165
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
166
+ If False, returns Response objects that convert to text automatically.
167
+ Defaults to False.
168
+
169
+ Returns:
170
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
171
+ - If stream=False: Returns complete response
172
+ - If stream=True: Yields response chunks as they arrive
173
+
174
+ Raises:
175
+ APIConnectionError: If the API request fails
176
+ """
177
+ payload = {
178
+ "spaceId": self.space_id,
179
+ "threadId": self.thread_id,
180
+ "userMessageId": self.user_message_id,
181
+ "userId": self.user_id,
182
+ "query": prompt,
183
+ "agentId": "liner",
184
+ "platform": "web",
185
+ "regenerate": False,
186
+ "showReferenceChunks": True,
187
+ "mode": "general",
188
+ "answerMode": "search",
189
+ "isReasoningMode": self.reasoning_mode,
190
+ "experimentId": random.randint(80, 90),
191
+ "modelType": "liner",
192
+ "experimentVariants": [],
193
+ "isDeepResearchMode": self.deep_search
194
+ }
195
+
196
+ def for_stream():
197
+ try:
198
+ with self.session.post(
199
+ self.chat_endpoint,
200
+ json=payload,
201
+ stream=True,
202
+ timeout=self.timeout,
203
+ ) as response:
204
+ if not response.ok:
205
+ raise exceptions.APIConnectionError(
206
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
207
+ )
208
+
209
+ current_reasoning = ""
210
+ current_answer = ""
211
+
212
+ for line in response.iter_lines(decode_unicode=True):
213
+ if line == "event:finish_answer":
214
+ break
215
+
216
+ if line.startswith('data:'):
217
+ try:
218
+ data = json.loads(line[5:]) # Remove 'data:' prefix
219
+
220
+ # Handle reasoning updates if enabled
221
+ if self.reasoning_mode and 'reasoning' in data:
222
+ current_reasoning += data['reasoning']
223
+ if raw:
224
+ yield {"text": data['reasoning']}
225
+ else:
226
+ yield Response(data['reasoning'])
227
+
228
+ # Handle answer updates
229
+ if 'answer' in data:
230
+ current_answer += data['answer']
231
+ if raw:
232
+ yield {"text": data['answer']}
233
+ else:
234
+ yield Response(data['answer'])
235
+
236
+ except json.JSONDecodeError:
237
+ continue
238
+
239
+ except requests.exceptions.RequestException as e:
240
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
241
+
242
+ def for_non_stream():
243
+ full_response = ""
244
+ for chunk in for_stream():
245
+ if raw:
246
+ yield chunk
247
+ else:
248
+ full_response += str(chunk)
249
+
250
+ if not raw:
251
+ self.last_response = Response(full_response)
252
+ return self.last_response
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+
257
+ if __name__ == "__main__":
258
+ from rich import print
259
+
260
+ ai = Liner(cookies_path="cookies.json")
261
+ response = ai.search(input(">>> "), stream=True, raw=False)
262
+ for chunk in response:
263
+ print(chunk, end="", flush=True)