webscout 7.6__py3-none-any.whl → 7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (124) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +14 -11
  3. webscout/DWEBS.py +431 -415
  4. webscout/Extra/autocoder/autocoder_utiles.py +183 -47
  5. webscout/Extra/autocoder/rawdog.py +848 -649
  6. webscout/Extra/gguf.py +682 -652
  7. webscout/Provider/AI21.py +1 -1
  8. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  9. webscout/Provider/AISEARCH/ISou.py +2 -23
  10. webscout/Provider/AISEARCH/felo_search.py +6 -6
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/Aitopia.py +292 -0
  13. webscout/Provider/AllenAI.py +5 -22
  14. webscout/Provider/Andi.py +3 -3
  15. webscout/Provider/C4ai.py +1 -1
  16. webscout/Provider/ChatGPTClone.py +226 -0
  17. webscout/Provider/ChatGPTES.py +3 -5
  18. webscout/Provider/ChatGPTGratis.py +4 -4
  19. webscout/Provider/Chatify.py +2 -2
  20. webscout/Provider/Cloudflare.py +3 -2
  21. webscout/Provider/DARKAI.py +3 -2
  22. webscout/Provider/DeepSeek.py +2 -2
  23. webscout/Provider/Deepinfra.py +1 -1
  24. webscout/Provider/EDITEE.py +1 -1
  25. webscout/Provider/ElectronHub.py +178 -96
  26. webscout/Provider/ExaChat.py +310 -0
  27. webscout/Provider/Free2GPT.py +2 -2
  28. webscout/Provider/Gemini.py +5 -19
  29. webscout/Provider/GithubChat.py +1 -1
  30. webscout/Provider/Glider.py +12 -8
  31. webscout/Provider/Groq.py +3 -3
  32. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  33. webscout/Provider/HeckAI.py +1 -1
  34. webscout/Provider/HuggingFaceChat.py +1 -1
  35. webscout/Provider/Hunyuan.py +272 -0
  36. webscout/Provider/Jadve.py +3 -3
  37. webscout/Provider/Koboldai.py +3 -3
  38. webscout/Provider/LambdaChat.py +391 -0
  39. webscout/Provider/Llama.py +3 -5
  40. webscout/Provider/Llama3.py +4 -12
  41. webscout/Provider/Marcus.py +3 -3
  42. webscout/Provider/OLLAMA.py +260 -36
  43. webscout/Provider/Openai.py +7 -3
  44. webscout/Provider/PI.py +1 -1
  45. webscout/Provider/Perplexitylabs.py +1 -1
  46. webscout/Provider/Phind.py +1 -1
  47. webscout/Provider/PizzaGPT.py +1 -1
  48. webscout/Provider/QwenLM.py +4 -7
  49. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +21 -46
  50. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +37 -49
  51. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  52. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  53. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  54. webscout/Provider/TTI/__init__.py +3 -1
  55. webscout/Provider/TTI/artbit/async_artbit.py +4 -33
  56. webscout/Provider/TTI/artbit/sync_artbit.py +4 -32
  57. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  58. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  59. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  60. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  61. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  62. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  63. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  65. webscout/Provider/TTS/utils.py +1 -1
  66. webscout/Provider/TeachAnything.py +1 -1
  67. webscout/Provider/TextPollinationsAI.py +4 -4
  68. webscout/Provider/TwoAI.py +1 -2
  69. webscout/Provider/Venice.py +4 -2
  70. webscout/Provider/VercelAI.py +234 -0
  71. webscout/Provider/WebSim.py +228 -0
  72. webscout/Provider/WiseCat.py +10 -12
  73. webscout/Provider/Youchat.py +1 -1
  74. webscout/Provider/__init__.py +22 -1
  75. webscout/Provider/ai4chat.py +1 -1
  76. webscout/Provider/aimathgpt.py +2 -6
  77. webscout/Provider/akashgpt.py +1 -1
  78. webscout/Provider/askmyai.py +4 -4
  79. webscout/Provider/asksteve.py +203 -0
  80. webscout/Provider/bagoodex.py +2 -2
  81. webscout/Provider/cerebras.py +1 -1
  82. webscout/Provider/chatglm.py +4 -4
  83. webscout/Provider/cleeai.py +1 -0
  84. webscout/Provider/copilot.py +427 -415
  85. webscout/Provider/elmo.py +1 -1
  86. webscout/Provider/flowith.py +14 -3
  87. webscout/Provider/freeaichat.py +57 -31
  88. webscout/Provider/gaurish.py +3 -5
  89. webscout/Provider/geminiprorealtime.py +1 -1
  90. webscout/Provider/granite.py +4 -4
  91. webscout/Provider/hermes.py +5 -5
  92. webscout/Provider/julius.py +1 -1
  93. webscout/Provider/koala.py +1 -1
  94. webscout/Provider/labyrinth.py +239 -0
  95. webscout/Provider/learnfastai.py +28 -15
  96. webscout/Provider/lepton.py +1 -1
  97. webscout/Provider/llama3mitril.py +4 -4
  98. webscout/Provider/llamatutor.py +1 -1
  99. webscout/Provider/llmchat.py +3 -3
  100. webscout/Provider/meta.py +1 -1
  101. webscout/Provider/multichat.py +10 -10
  102. webscout/Provider/promptrefine.py +1 -1
  103. webscout/Provider/searchchat.py +293 -0
  104. webscout/Provider/sonus.py +208 -0
  105. webscout/Provider/talkai.py +2 -2
  106. webscout/Provider/turboseek.py +1 -1
  107. webscout/Provider/tutorai.py +1 -1
  108. webscout/Provider/typegpt.py +6 -43
  109. webscout/Provider/uncovr.py +299 -0
  110. webscout/Provider/x0gpt.py +1 -1
  111. webscout/__init__.py +36 -36
  112. webscout/cli.py +293 -283
  113. webscout/litagent/agent.py +14 -9
  114. webscout/tempid.py +11 -11
  115. webscout/utils.py +2 -2
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +1282 -1223
  118. webscout/webscout_search_async.py +813 -692
  119. {webscout-7.6.dist-info → webscout-7.8.dist-info}/METADATA +76 -44
  120. {webscout-7.6.dist-info → webscout-7.8.dist-info}/RECORD +124 -106
  121. {webscout-7.6.dist-info → webscout-7.8.dist-info}/LICENSE.md +0 -0
  122. {webscout-7.6.dist-info → webscout-7.8.dist-info}/WHEEL +0 -0
  123. {webscout-7.6.dist-info → webscout-7.8.dist-info}/entry_points.txt +0 -0
  124. {webscout-7.6.dist-info → webscout-7.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,272 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ import time
6
+ import uuid
7
+ import re
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIbase import Provider, AsyncProvider
13
+ from webscout import exceptions
14
+ from webscout.litagent import LitAgent
15
+
16
+ class Hunyuan(Provider):
17
+ """
18
+ A class to interact with the Tencent Hunyuan API with LitAgent user-agent.
19
+ """
20
+
21
+ AVAILABLE_MODELS = [
22
+ "hunyuan-t1-latest",
23
+ # Add more models as they become available
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2048,
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "hunyuan-t1-latest",
38
+ browser: str = "chrome",
39
+ api_key: str = None,
40
+ system_prompt: str = "You are a helpful assistant.",
41
+ ):
42
+
43
+ """Initializes the Hunyuan API client."""
44
+ if model not in self.AVAILABLE_MODELS:
45
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
+
47
+ self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
48
+
49
+ # Initialize LitAgent for user agent generation
50
+ self.agent = LitAgent()
51
+ # Use fingerprinting to create a consistent browser identity
52
+ self.fingerprint = self.agent.generate_fingerprint(browser)
53
+
54
+ # Use the fingerprint for headers
55
+ self.headers = {
56
+ "Accept": "*/*",
57
+ "Accept-Encoding": "gzip, deflate, br, zstd",
58
+ "Accept-Language": self.fingerprint["accept_language"],
59
+ "Content-Type": "application/json",
60
+ "DNT": "1",
61
+ "Origin": "https://llm.hunyuan.tencent.com",
62
+ "Referer": "https://llm.hunyuan.tencent.com/",
63
+ "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
64
+ "Sec-CH-UA-Mobile": "?0",
65
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
66
+ "Sec-Fetch-Dest": "empty",
67
+ "Sec-Fetch-Mode": "cors",
68
+ "Sec-Fetch-Site": "same-origin",
69
+ "Sec-GPC": "1",
70
+ "User-Agent": self.fingerprint["user_agent"],
71
+ }
72
+
73
+ # Add authorization if API key is provided
74
+ if api_key:
75
+ self.headers["Authorization"] = f"Bearer {api_key}"
76
+ else:
77
+ # Default test key (may not work long-term)
78
+ self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
79
+
80
+ self.session = requests.Session()
81
+ self.session.headers.update(self.headers)
82
+ self.session.proxies.update(proxies)
83
+ self.system_message = system_prompt
84
+ self.is_conversation = is_conversation
85
+ self.max_tokens_to_sample = max_tokens
86
+ self.timeout = timeout
87
+ self.last_response = {}
88
+ self.model = model
89
+
90
+ self.__available_optimizers = (
91
+ method
92
+ for method in dir(Optimizers)
93
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
94
+ )
95
+ Conversation.intro = (
96
+ AwesomePrompts().get_act(
97
+ act, raise_not_found=True, default=None, case_insensitive=True
98
+ )
99
+ if act
100
+ else intro or Conversation.intro
101
+ )
102
+
103
+ self.conversation = Conversation(
104
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
105
+ )
106
+ self.conversation.history_offset = history_offset
107
+
108
+ def refresh_identity(self, browser: str = None):
109
+ """
110
+ Refreshes the browser identity fingerprint.
111
+
112
+ Args:
113
+ browser: Specific browser to use for the new fingerprint
114
+ """
115
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
116
+ self.fingerprint = self.agent.generate_fingerprint(browser)
117
+
118
+ # Update headers with new fingerprint
119
+ self.headers.update({
120
+ "Accept-Language": self.fingerprint["accept_language"],
121
+ "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or self.headers["Sec-CH-UA"],
122
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
123
+ "User-Agent": self.fingerprint["user_agent"],
124
+ })
125
+
126
+ # Update session headers
127
+ for header, value in self.headers.items():
128
+ self.session.headers[header] = value
129
+
130
+ return self.fingerprint
131
+
132
+ def ask(
133
+ self,
134
+ prompt: str,
135
+ stream: bool = False,
136
+ raw: bool = False,
137
+ optimizer: str = None,
138
+ conversationally: bool = False,
139
+ ) -> Union[Dict[str, Any], Generator]:
140
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
141
+ if optimizer:
142
+ if optimizer in self.__available_optimizers:
143
+ conversation_prompt = getattr(Optimizers, optimizer)(
144
+ conversation_prompt if conversationally else prompt
145
+ )
146
+ else:
147
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
148
+
149
+ # Generate a unique query ID for each request
150
+ query_id = ''.join(re.findall(r'[a-z0-9]', str(uuid.uuid4())[:18]))
151
+
152
+
153
+ # Payload construction
154
+ payload = {
155
+ "stream": stream,
156
+ "model": self.model,
157
+ "query_id": query_id,
158
+ "messages": [
159
+ {"role": "system", "content": self.system_message},
160
+ {"role": "user", "content": "Always response in English\n\n" + conversation_prompt},
161
+ ],
162
+ "stream_moderation": True,
163
+ "enable_enhancement": False
164
+ }
165
+
166
+ def for_stream():
167
+ try:
168
+ with self.session.post(self.url, data=json.dumps(payload), stream=True, timeout=self.timeout, verify=False) as response:
169
+ if response.status_code != 200:
170
+ raise exceptions.FailedToGenerateResponseError(
171
+ f"Request failed with status code {response.status_code}"
172
+ )
173
+
174
+ streaming_text = ""
175
+ for line in response.iter_lines(decode_unicode=True):
176
+ if line:
177
+ line = line.strip()
178
+ if line.startswith("data: "):
179
+ json_str = line[6:]
180
+ if json_str == "[DONE]":
181
+ break
182
+ try:
183
+ json_data = json.loads(json_str)
184
+ if 'choices' in json_data:
185
+ choice = json_data['choices'][0]
186
+ if 'delta' in choice and 'content' in choice['delta']:
187
+ content = choice['delta']['content']
188
+ streaming_text += content
189
+ resp = dict(text=content)
190
+ yield resp if raw else resp
191
+ except json.JSONDecodeError:
192
+ continue
193
+
194
+ self.last_response = {"text": streaming_text}
195
+ self.conversation.update_chat_history(prompt, streaming_text)
196
+
197
+ except requests.RequestException as e:
198
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
199
+
200
+ def for_non_stream():
201
+ try:
202
+ response = self.session.post(self.url, data=json.dumps(payload), timeout=self.timeout, verify=False)
203
+ if response.status_code != 200:
204
+ raise exceptions.FailedToGenerateResponseError(
205
+ f"Request failed with status code {response.status_code}"
206
+ )
207
+
208
+ # Process non-streaming response (need to parse all lines)
209
+ full_text = ""
210
+ for line in response.text.split('\n'):
211
+ if line.startswith("data: ") and line[6:] != "[DONE]":
212
+ try:
213
+ json_data = json.loads(line[6:])
214
+ if 'choices' in json_data:
215
+ choice = json_data['choices'][0]
216
+ if 'delta' in choice and 'content' in choice['delta']:
217
+ full_text += choice['delta']['content']
218
+ except json.JSONDecodeError:
219
+ continue
220
+
221
+ self.last_response = {"text": full_text}
222
+ self.conversation.update_chat_history(prompt, full_text)
223
+ return {"text": full_text}
224
+ except Exception as e:
225
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
226
+
227
+ return for_stream() if stream else for_non_stream()
228
+
229
+ def chat(
230
+ self,
231
+ prompt: str,
232
+ stream: bool = False,
233
+ optimizer: str = None,
234
+ conversationally: bool = False,
235
+ ) -> Union[str, Generator[str, None, None]]:
236
+ def for_stream():
237
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
238
+ yield self.get_message(response)
239
+ def for_non_stream():
240
+ return self.get_message(
241
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
242
+ )
243
+ return for_stream() if stream else for_non_stream()
244
+
245
+ def get_message(self, response: dict) -> str:
246
+ assert isinstance(response, dict), "Response should be of dict data-type only"
247
+ return response["text"]
248
+
249
+ if __name__ == "__main__":
250
+ print("-" * 80)
251
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
252
+ print("-" * 80)
253
+
254
+ for model in Hunyuan.AVAILABLE_MODELS:
255
+ try:
256
+ test_ai = Hunyuan(model=model, timeout=60)
257
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
258
+ response_text = ""
259
+ for chunk in response:
260
+ response_text += chunk
261
+
262
+ if response_text and len(response_text.strip()) > 0:
263
+ status = "✓"
264
+ # Clean and truncate response
265
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
266
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
267
+ else:
268
+ status = "✗"
269
+ display_text = "Empty or invalid response"
270
+ print(f"\r{model:<50} {status:<10} {display_text}")
271
+ except Exception as e:
272
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,7 +1,7 @@
1
1
  import requests
2
2
  import json
3
3
  import re
4
- from typing import Any, Dict, Optional, Generator
4
+ from typing import Union, Any, Dict, Optional, Generator
5
5
 
6
6
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
7
  from webscout.AIbase import Provider
@@ -105,7 +105,7 @@ class JadveOpenAI(Provider):
105
105
  raw: bool = False,
106
106
  optimizer: str = None,
107
107
  conversationally: bool = False,
108
- ) -> dict | Generator[dict, None, None]:
108
+ ) -> Union[dict, Generator[dict, None, None]]:
109
109
  """
110
110
  Chat with AI.
111
111
 
@@ -206,7 +206,7 @@ class JadveOpenAI(Provider):
206
206
  stream: bool = False,
207
207
  optimizer: str = None,
208
208
  conversationally: bool = False,
209
- ) -> str | Generator[str, None, None]:
209
+ ) -> Union[str, Generator[str, None, None]]:
210
210
  """
211
211
  Generate a chat response (string).
212
212
 
@@ -5,7 +5,7 @@ from ..AIutel import Conversation
5
5
  from ..AIutel import AwesomePrompts, sanitize_stream
6
6
  from ..AIbase import Provider, AsyncProvider
7
7
  from webscout import exceptions
8
- from typing import Any, AsyncGenerator, Dict
8
+ from typing import Union, Any, AsyncGenerator, Dict
9
9
  import httpx
10
10
  #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
11
11
  class KOBOLDAI(Provider):
@@ -266,7 +266,7 @@ class AsyncKOBOLDAI(AsyncProvider):
266
266
  raw: bool = False,
267
267
  optimizer: str = None,
268
268
  conversationally: bool = False,
269
- ) -> dict | AsyncGenerator:
269
+ ) -> Union[dict, AsyncGenerator]:
270
270
  """Chat with AI asynchronously.
271
271
 
272
272
  Args:
@@ -338,7 +338,7 @@ class AsyncKOBOLDAI(AsyncProvider):
338
338
  stream: bool = False,
339
339
  optimizer: str = None,
340
340
  conversationally: bool = False,
341
- ) -> str | AsyncGenerator:
341
+ ) -> Union[str, AsyncGenerator]:
342
342
  """Generate response `str` asynchronously.
343
343
  Args:
344
344
  prompt (str): Prompt to be send.