webscout 7.5__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (132) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/__init__.py +9 -9
  9. webscout/Extra/autocoder/autocoder_utiles.py +193 -199
  10. webscout/Extra/autocoder/rawdog.py +789 -677
  11. webscout/Extra/gguf.py +682 -428
  12. webscout/Extra/weather.py +178 -156
  13. webscout/Extra/weather_ascii.py +70 -17
  14. webscout/Litlogger/core/logger.py +1 -2
  15. webscout/Litlogger/handlers/file.py +1 -1
  16. webscout/Litlogger/styles/formats.py +0 -2
  17. webscout/Litlogger/utils/detectors.py +0 -1
  18. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  19. webscout/Provider/AISEARCH/ISou.py +1 -22
  20. webscout/Provider/AISEARCH/felo_search.py +0 -1
  21. webscout/Provider/AllenAI.py +28 -30
  22. webscout/Provider/C4ai.py +29 -11
  23. webscout/Provider/ChatGPTClone.py +226 -0
  24. webscout/Provider/ChatGPTGratis.py +24 -56
  25. webscout/Provider/DeepSeek.py +25 -17
  26. webscout/Provider/Deepinfra.py +115 -48
  27. webscout/Provider/Gemini.py +1 -1
  28. webscout/Provider/Glider.py +33 -12
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +23 -7
  31. webscout/Provider/Hunyuan.py +272 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/LambdaChat.py +391 -0
  34. webscout/Provider/Netwrck.py +42 -19
  35. webscout/Provider/OLLAMA.py +256 -32
  36. webscout/Provider/PI.py +4 -2
  37. webscout/Provider/Perplexitylabs.py +26 -6
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +179 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  51. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  52. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  53. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  54. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  55. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  56. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  57. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  58. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  59. webscout/Provider/TTS/__init__.py +2 -2
  60. webscout/Provider/TTS/deepgram.py +12 -39
  61. webscout/Provider/TTS/elevenlabs.py +14 -40
  62. webscout/Provider/TTS/gesserit.py +11 -35
  63. webscout/Provider/TTS/murfai.py +13 -39
  64. webscout/Provider/TTS/parler.py +17 -40
  65. webscout/Provider/TTS/speechma.py +180 -0
  66. webscout/Provider/TTS/streamElements.py +17 -44
  67. webscout/Provider/TextPollinationsAI.py +39 -59
  68. webscout/Provider/Venice.py +25 -8
  69. webscout/Provider/WebSim.py +227 -0
  70. webscout/Provider/WiseCat.py +27 -5
  71. webscout/Provider/Youchat.py +64 -37
  72. webscout/Provider/__init__.py +12 -7
  73. webscout/Provider/akashgpt.py +20 -5
  74. webscout/Provider/flowith.py +33 -7
  75. webscout/Provider/freeaichat.py +32 -45
  76. webscout/Provider/koala.py +20 -5
  77. webscout/Provider/labyrinth.py +239 -0
  78. webscout/Provider/learnfastai.py +28 -15
  79. webscout/Provider/llamatutor.py +1 -1
  80. webscout/Provider/llmchat.py +30 -8
  81. webscout/Provider/multichat.py +65 -9
  82. webscout/Provider/sonus.py +208 -0
  83. webscout/Provider/talkai.py +1 -0
  84. webscout/Provider/turboseek.py +3 -0
  85. webscout/Provider/tutorai.py +2 -0
  86. webscout/Provider/typegpt.py +155 -65
  87. webscout/Provider/uncovr.py +297 -0
  88. webscout/Provider/x0gpt.py +3 -1
  89. webscout/Provider/yep.py +102 -20
  90. webscout/__init__.py +3 -0
  91. webscout/cli.py +53 -40
  92. webscout/conversation.py +1 -10
  93. webscout/litagent/__init__.py +2 -2
  94. webscout/litagent/agent.py +356 -20
  95. webscout/litagent/constants.py +34 -5
  96. webscout/litprinter/__init__.py +0 -3
  97. webscout/models.py +181 -0
  98. webscout/optimizers.py +1 -1
  99. webscout/prompt_manager.py +2 -8
  100. webscout/scout/core/scout.py +1 -4
  101. webscout/scout/core/search_result.py +1 -1
  102. webscout/scout/core/text_utils.py +1 -1
  103. webscout/scout/core.py +2 -5
  104. webscout/scout/element.py +1 -1
  105. webscout/scout/parsers/html_parser.py +1 -1
  106. webscout/scout/utils.py +0 -1
  107. webscout/swiftcli/__init__.py +1 -3
  108. webscout/tempid.py +1 -1
  109. webscout/update_checker.py +1 -3
  110. webscout/version.py +1 -1
  111. webscout/webscout_search_async.py +1 -2
  112. webscout/yep_search.py +297 -297
  113. {webscout-7.5.dist-info → webscout-7.7.dist-info}/LICENSE.md +4 -4
  114. {webscout-7.5.dist-info → webscout-7.7.dist-info}/METADATA +127 -405
  115. {webscout-7.5.dist-info → webscout-7.7.dist-info}/RECORD +118 -117
  116. webscout/Extra/autollama.py +0 -231
  117. webscout/Provider/Amigo.py +0 -274
  118. webscout/Provider/Bing.py +0 -243
  119. webscout/Provider/DiscordRocks.py +0 -253
  120. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  121. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  122. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  123. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  124. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  125. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  126. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  127. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  128. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  129. webscout/Provider/TTS/voicepod.py +0 -117
  130. {webscout-7.5.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  131. {webscout-7.5.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  132. {webscout-7.5.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -16,46 +16,46 @@ class DeepInfra(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "anthropic/claude-3-7-sonnet-latest",
19
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
20
20
  "deepseek-ai/DeepSeek-R1",
21
21
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
22
22
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
23
23
  "deepseek-ai/DeepSeek-R1-Turbo",
24
24
  "deepseek-ai/DeepSeek-V3",
25
- "google/gemma-2-27b-it",
26
- "google/gemma-2-9b-it",
25
+ # "google/gemma-2-27b-it", # >>>> NOT WORKING
26
+ # "google/gemma-2-9b-it", # >>>> NOT WORKING
27
27
  "google/gemma-3-27b-it",
28
- "google/gemini-1.5-flash",
29
- "google/gemini-1.5-flash-8b",
30
- "google/gemini-2.0-flash-001",
31
- "Gryphe/MythoMax-L2-13b",
32
- "meta-llama/Llama-3.2-1B-Instruct",
33
- "meta-llama/Llama-3.2-3B-Instruct",
28
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
29
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
30
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
31
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
32
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
33
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
34
34
  "meta-llama/Llama-3.2-90B-Vision-Instruct",
35
35
  "meta-llama/Llama-3.2-11B-Vision-Instruct",
36
- "meta-llama/Meta-Llama-3-70B-Instruct",
37
- "meta-llama/Meta-Llama-3-8B-Instruct",
38
- "meta-llama/Meta-Llama-3.1-70B-Instruct",
36
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
37
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
38
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
39
39
  "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
40
40
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
41
41
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
42
- "meta-llama/Meta-Llama-3.1-405B-Instruct",
42
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
43
43
  "microsoft/phi-4",
44
44
  "microsoft/Phi-4-multimodal-instruct",
45
45
  "microsoft/WizardLM-2-8x22B",
46
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
47
- "mistralai/Mistral-7B-Instruct-v0.3",
48
- "mistralai/Mistral-Nemo-Instruct-2407",
46
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
47
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
48
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
49
49
  "mistralai/Mistral-Small-24B-Instruct-2501",
50
50
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
51
- "NousResearch/Hermes-3-Llama-3.1-405B",
52
- "NovaSky-AI/Sky-T1-32B-Preview",
51
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
52
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
53
53
  "Qwen/QwQ-32B",
54
- "Qwen/Qwen2.5-7B-Instruct",
54
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
55
55
  "Qwen/Qwen2.5-72B-Instruct",
56
56
  "Qwen/Qwen2.5-Coder-32B-Instruct",
57
- "Sao10K/L3.1-70B-Euryale-v2.2",
58
- "Sao10K/L3.3-70B-Euryale-v2.3",
57
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
58
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
59
59
  "meta-llama/Llama-3.3-70B-Instruct",
60
60
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
61
61
  ]
@@ -71,32 +71,41 @@ class DeepInfra(Provider):
71
71
  proxies: dict = {},
72
72
  history_offset: int = 10250,
73
73
  act: str = None,
74
- model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo" # Updated default model
74
+ model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
75
+ browser: str = "chrome"
75
76
  ):
76
77
  """Initializes the DeepInfra API client."""
77
78
  if model not in self.AVAILABLE_MODELS:
78
79
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
79
80
 
80
81
  self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
81
- # Use LitAgent for user-agent instead of hardcoded string.
82
+
83
+ # Initialize LitAgent for user agent generation
84
+ self.agent = LitAgent()
85
+ # Use fingerprinting to create a consistent browser identity
86
+ self.fingerprint = self.agent.generate_fingerprint(browser)
87
+
88
+ # Use the fingerprint for headers
82
89
  self.headers = {
83
- 'User-Agent': LitAgent().random(),
84
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
85
- 'Cache-Control': 'no-cache',
86
- 'Connection': 'keep-alive',
87
- 'Content-Type': 'application/json',
88
- 'Origin': 'https://deepinfra.com',
89
- 'Pragma': 'no-cache',
90
- 'Referer': 'https://deepinfra.com/',
91
- 'Sec-Fetch-Dest': 'empty',
92
- 'Sec-Fetch-Mode': 'cors',
93
- 'Sec-Fetch-Site': 'same-site',
94
- 'X-Deepinfra-Source': 'web-embed',
95
- 'accept': 'text/event-stream',
96
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
97
- 'sec-ch-ua-mobile': '?0',
98
- 'sec-ch-ua-platform': '"macOS"'
90
+ "Accept": self.fingerprint["accept"],
91
+ "Accept-Encoding": "gzip, deflate, br, zstd",
92
+ "Accept-Language": self.fingerprint["accept_language"],
93
+ "Content-Type": "application/json",
94
+ "Cache-Control": "no-cache",
95
+ "Connection": "keep-alive",
96
+ "Origin": "https://deepinfra.com",
97
+ "Pragma": "no-cache",
98
+ "Referer": "https://deepinfra.com/",
99
+ "Sec-Fetch-Dest": "empty",
100
+ "Sec-Fetch-Mode": "cors",
101
+ "Sec-Fetch-Site": "same-site",
102
+ "X-Deepinfra-Source": "web-embed",
103
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
104
+ "Sec-CH-UA-Mobile": "?0",
105
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
106
+ "User-Agent": self.fingerprint["user_agent"],
99
107
  }
108
+
100
109
  self.session = requests.Session()
101
110
  self.session.headers.update(self.headers)
102
111
  self.session.proxies.update(proxies)
@@ -125,6 +134,31 @@ class DeepInfra(Provider):
125
134
  )
126
135
  self.conversation.history_offset = history_offset
127
136
 
137
+ def refresh_identity(self, browser: str = None):
138
+ """
139
+ Refreshes the browser identity fingerprint.
140
+
141
+ Args:
142
+ browser: Specific browser to use for the new fingerprint
143
+ """
144
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
145
+ self.fingerprint = self.agent.generate_fingerprint(browser)
146
+
147
+ # Update headers with new fingerprint
148
+ self.headers.update({
149
+ "Accept": self.fingerprint["accept"],
150
+ "Accept-Language": self.fingerprint["accept_language"],
151
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
152
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
153
+ "User-Agent": self.fingerprint["user_agent"],
154
+ })
155
+
156
+ # Update session headers
157
+ for header, value in self.headers.items():
158
+ self.session.headers[header] = value
159
+
160
+ return self.fingerprint
161
+
128
162
  def ask(
129
163
  self,
130
164
  prompt: str,
@@ -180,15 +214,30 @@ class DeepInfra(Provider):
180
214
  except json.JSONDecodeError:
181
215
  continue
182
216
 
217
+ self.last_response = {"text": streaming_text}
183
218
  self.conversation.update_chat_history(prompt, streaming_text)
184
219
 
185
220
  except requests.RequestException as e:
186
221
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
187
222
 
188
223
  def for_non_stream():
189
- for _ in for_stream():
190
- pass
191
- return self.last_response
224
+ try:
225
+ response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
226
+ if response.status_code != 200:
227
+ raise exceptions.FailedToGenerateResponseError(
228
+ f"Request failed with status code {response.status_code}"
229
+ )
230
+
231
+ response_data = response.json()
232
+ if 'choices' in response_data and len(response_data['choices']) > 0:
233
+ content = response_data['choices'][0].get('message', {}).get('content', '')
234
+ self.last_response = {"text": content}
235
+ self.conversation.update_chat_history(prompt, content)
236
+ return {"text": content}
237
+ else:
238
+ raise exceptions.FailedToGenerateResponseError("No response content found")
239
+ except Exception as e:
240
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
192
241
 
193
242
  return for_stream() if stream else for_non_stream()
194
243
 
@@ -198,7 +247,7 @@ class DeepInfra(Provider):
198
247
  stream: bool = False,
199
248
  optimizer: str = None,
200
249
  conversationally: bool = False,
201
- ) -> str:
250
+ ) -> Union[str, Generator[str, None, None]]:
202
251
  def for_stream():
203
252
  for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
204
253
  yield self.get_message(response)
@@ -213,8 +262,26 @@ class DeepInfra(Provider):
213
262
  return response["text"]
214
263
 
215
264
  if __name__ == "__main__":
216
- from rich import print
217
- ai = DeepInfra(timeout=5000)
218
- response = ai.chat("write a poem about AI", stream=True)
219
- for chunk in response:
220
- print(chunk, end="", flush=True)
265
+ print("-" * 80)
266
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
267
+ print("-" * 80)
268
+
269
+ for model in DeepInfra.AVAILABLE_MODELS:
270
+ try:
271
+ test_ai = DeepInfra(model=model, timeout=60)
272
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
273
+ response_text = ""
274
+ for chunk in response:
275
+ response_text += chunk
276
+
277
+ if response_text and len(response_text.strip()) > 0:
278
+ status = "✓"
279
+ # Clean and truncate response
280
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
281
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
282
+ else:
283
+ status = "✗"
284
+ display_text = "Empty or invalid response"
285
+ print(f"\r{model:<50} {status:<10} {display_text}")
286
+ except Exception as e:
287
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -10,7 +10,7 @@ from ..AIbase import Provider, AsyncProvider
10
10
  from ..Bard import Chatbot, Model
11
11
 
12
12
  # Import Logger and related classes (assumed similar to what is in yep.py)
13
- from webscout import Logger, LogFormat
13
+ from webscout.Litlogger import Logger, LogFormat
14
14
 
15
15
  warnings.simplefilter("ignore", category=UserWarning)
16
16
 
@@ -12,12 +12,12 @@ class GliderAI(Provider):
12
12
  A class to interact with the Glider.so API.
13
13
  """
14
14
 
15
- AVAILABLE_MODELS = {
15
+ AVAILABLE_MODELS = [
16
16
  "chat-llama-3-1-70b",
17
17
  "chat-llama-3-1-8b",
18
18
  "chat-llama-3-2-3b",
19
19
  "deepseek-ai/DeepSeek-R1",
20
- }
20
+ ]
21
21
 
22
22
  def __init__(
23
23
  self,
@@ -124,10 +124,14 @@ class GliderAI(Provider):
124
124
  if value.startswith("data: "):
125
125
  try:
126
126
  data = json.loads(value[6:])
127
- content = data['choices'][0].get('delta', {}).get("content", "")
128
- if content:
129
- streaming_text += content
130
- yield content if raw else {"text": content}
127
+ # Handle both standard and DeepSeek response formats
128
+ if "choices" in data and len(data["choices"]) > 0:
129
+ choice = data["choices"][0]
130
+ if "delta" in choice and "content" in choice["delta"]:
131
+ content = choice["delta"]["content"]
132
+ if content:
133
+ streaming_text += content
134
+ yield content if raw else {"text": content}
131
135
  except json.JSONDecodeError:
132
136
  if "stop" in value:
133
137
  break
@@ -180,9 +184,26 @@ class GliderAI(Provider):
180
184
  return response["text"]
181
185
 
182
186
  if __name__ == "__main__":
183
- from rich import print
184
- # For testing
185
- ai = GliderAI(model="chat-llama-3-1-70b")
186
- response = ai.chat("Meaning of Life", stream=True)
187
- for chunk in response:
188
- print(chunk, end="", flush=True)
187
+ print("-" * 80)
188
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
189
+ print("-" * 80)
190
+
191
+ for model in GliderAI.AVAILABLE_MODELS:
192
+ try:
193
+ test_ai = GliderAI(model=model, timeout=60)
194
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
195
+ response_text = ""
196
+ for chunk in response:
197
+ response_text += chunk
198
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
199
+
200
+ if response_text and len(response_text.strip()) > 0:
201
+ status = "✓"
202
+ # Truncate response if too long
203
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
204
+ else:
205
+ status = "✗"
206
+ display_text = "Empty or invalid response"
207
+ print(f"\r{model:<50} {status:<10} {display_text}")
208
+ except Exception as e:
209
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,10 +1,10 @@
1
1
  from dataclasses import dataclass
2
- from enum import Enum, auto
2
+ from enum import Enum
3
3
  import requests
4
4
  import json
5
5
  import re
6
6
  import uuid
7
- from typing import List, Dict, Generator, Optional, Any, TypedDict, Literal, Union, Final
7
+ from typing import List, Dict, Generator, Optional, Any, TypedDict, Final
8
8
 
9
9
  # Type definitions
10
10
  class Role(Enum):
@@ -208,10 +208,26 @@ class HeckAI(Provider):
208
208
  return response["text"]
209
209
 
210
210
  if __name__ == "__main__":
211
- from rich import print
212
- ai = HeckAI(timeout=120)
213
- response = ai.chat("Write a short poem about artificial intelligence", stream=False)
214
- print(response)
215
- # for chunk in response:
216
- # chunk = ai.fix_encoding(chunk)
217
- # print(chunk, end="", flush=True)
211
+ print("-" * 80)
212
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
213
+ print("-" * 80)
214
+
215
+ for model in HeckAI.AVAILABLE_MODELS:
216
+ try:
217
+ test_ai = HeckAI(model=model, timeout=60)
218
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
219
+ response_text = ""
220
+ for chunk in response:
221
+ response_text += chunk
222
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
223
+
224
+ if response_text and len(response_text.strip()) > 0:
225
+ status = "✓"
226
+ # Truncate response if too long
227
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
228
+ else:
229
+ status = "✗"
230
+ display_text = "Empty or invalid response"
231
+ print(f"\r{model:<50} {status:<10} {display_text}")
232
+ except Exception as e:
233
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,272 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ import time
6
+ import uuid
7
+ import re
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIbase import Provider, AsyncProvider
13
+ from webscout import exceptions
14
+ from webscout import LitAgent
15
+
16
+ class Hunyuan(Provider):
17
+ """
18
+ A class to interact with the Tencent Hunyuan API with LitAgent user-agent.
19
+ """
20
+
21
+ AVAILABLE_MODELS = [
22
+ "hunyuan-t1-latest",
23
+ # Add more models as they become available
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2048,
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "hunyuan-t1-latest",
38
+ browser: str = "chrome",
39
+ api_key: str = None,
40
+ system_prompt: str = "You are a helpful assistant.",
41
+ ):
42
+
43
+ """Initializes the Hunyuan API client."""
44
+ if model not in self.AVAILABLE_MODELS:
45
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
+
47
+ self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
48
+
49
+ # Initialize LitAgent for user agent generation
50
+ self.agent = LitAgent()
51
+ # Use fingerprinting to create a consistent browser identity
52
+ self.fingerprint = self.agent.generate_fingerprint(browser)
53
+
54
+ # Use the fingerprint for headers
55
+ self.headers = {
56
+ "Accept": "*/*",
57
+ "Accept-Encoding": "gzip, deflate, br, zstd",
58
+ "Accept-Language": self.fingerprint["accept_language"],
59
+ "Content-Type": "application/json",
60
+ "DNT": "1",
61
+ "Origin": "https://llm.hunyuan.tencent.com",
62
+ "Referer": "https://llm.hunyuan.tencent.com/",
63
+ "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
64
+ "Sec-CH-UA-Mobile": "?0",
65
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
66
+ "Sec-Fetch-Dest": "empty",
67
+ "Sec-Fetch-Mode": "cors",
68
+ "Sec-Fetch-Site": "same-origin",
69
+ "Sec-GPC": "1",
70
+ "User-Agent": self.fingerprint["user_agent"],
71
+ }
72
+
73
+ # Add authorization if API key is provided
74
+ if api_key:
75
+ self.headers["Authorization"] = f"Bearer {api_key}"
76
+ else:
77
+ # Default test key (may not work long-term)
78
+ self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
79
+
80
+ self.session = requests.Session()
81
+ self.session.headers.update(self.headers)
82
+ self.session.proxies.update(proxies)
83
+ self.system_message = system_prompt
84
+ self.is_conversation = is_conversation
85
+ self.max_tokens_to_sample = max_tokens
86
+ self.timeout = timeout
87
+ self.last_response = {}
88
+ self.model = model
89
+
90
+ self.__available_optimizers = (
91
+ method
92
+ for method in dir(Optimizers)
93
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
94
+ )
95
+ Conversation.intro = (
96
+ AwesomePrompts().get_act(
97
+ act, raise_not_found=True, default=None, case_insensitive=True
98
+ )
99
+ if act
100
+ else intro or Conversation.intro
101
+ )
102
+
103
+ self.conversation = Conversation(
104
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
105
+ )
106
+ self.conversation.history_offset = history_offset
107
+
108
+ def refresh_identity(self, browser: str = None):
109
+ """
110
+ Refreshes the browser identity fingerprint.
111
+
112
+ Args:
113
+ browser: Specific browser to use for the new fingerprint
114
+ """
115
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
116
+ self.fingerprint = self.agent.generate_fingerprint(browser)
117
+
118
+ # Update headers with new fingerprint
119
+ self.headers.update({
120
+ "Accept-Language": self.fingerprint["accept_language"],
121
+ "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or self.headers["Sec-CH-UA"],
122
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
123
+ "User-Agent": self.fingerprint["user_agent"],
124
+ })
125
+
126
+ # Update session headers
127
+ for header, value in self.headers.items():
128
+ self.session.headers[header] = value
129
+
130
+ return self.fingerprint
131
+
132
+ def ask(
133
+ self,
134
+ prompt: str,
135
+ stream: bool = False,
136
+ raw: bool = False,
137
+ optimizer: str = None,
138
+ conversationally: bool = False,
139
+ ) -> Union[Dict[str, Any], Generator]:
140
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
141
+ if optimizer:
142
+ if optimizer in self.__available_optimizers:
143
+ conversation_prompt = getattr(Optimizers, optimizer)(
144
+ conversation_prompt if conversationally else prompt
145
+ )
146
+ else:
147
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
148
+
149
+ # Generate a unique query ID for each request
150
+ query_id = ''.join(re.findall(r'[a-z0-9]', str(uuid.uuid4())[:18]))
151
+
152
+
153
+ # Payload construction
154
+ payload = {
155
+ "stream": stream,
156
+ "model": self.model,
157
+ "query_id": query_id,
158
+ "messages": [
159
+ {"role": "system", "content": self.system_message},
160
+ {"role": "user", "content": "Always response in English\n\n" + conversation_prompt},
161
+ ],
162
+ "stream_moderation": True,
163
+ "enable_enhancement": False
164
+ }
165
+
166
+ def for_stream():
167
+ try:
168
+ with self.session.post(self.url, data=json.dumps(payload), stream=True, timeout=self.timeout, verify=False) as response:
169
+ if response.status_code != 200:
170
+ raise exceptions.FailedToGenerateResponseError(
171
+ f"Request failed with status code {response.status_code}"
172
+ )
173
+
174
+ streaming_text = ""
175
+ for line in response.iter_lines(decode_unicode=True):
176
+ if line:
177
+ line = line.strip()
178
+ if line.startswith("data: "):
179
+ json_str = line[6:]
180
+ if json_str == "[DONE]":
181
+ break
182
+ try:
183
+ json_data = json.loads(json_str)
184
+ if 'choices' in json_data:
185
+ choice = json_data['choices'][0]
186
+ if 'delta' in choice and 'content' in choice['delta']:
187
+ content = choice['delta']['content']
188
+ streaming_text += content
189
+ resp = dict(text=content)
190
+ yield resp if raw else resp
191
+ except json.JSONDecodeError:
192
+ continue
193
+
194
+ self.last_response = {"text": streaming_text}
195
+ self.conversation.update_chat_history(prompt, streaming_text)
196
+
197
+ except requests.RequestException as e:
198
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
199
+
200
+ def for_non_stream():
201
+ try:
202
+ response = self.session.post(self.url, data=json.dumps(payload), timeout=self.timeout, verify=False)
203
+ if response.status_code != 200:
204
+ raise exceptions.FailedToGenerateResponseError(
205
+ f"Request failed with status code {response.status_code}"
206
+ )
207
+
208
+ # Process non-streaming response (need to parse all lines)
209
+ full_text = ""
210
+ for line in response.text.split('\n'):
211
+ if line.startswith("data: ") and line[6:] != "[DONE]":
212
+ try:
213
+ json_data = json.loads(line[6:])
214
+ if 'choices' in json_data:
215
+ choice = json_data['choices'][0]
216
+ if 'delta' in choice and 'content' in choice['delta']:
217
+ full_text += choice['delta']['content']
218
+ except json.JSONDecodeError:
219
+ continue
220
+
221
+ self.last_response = {"text": full_text}
222
+ self.conversation.update_chat_history(prompt, full_text)
223
+ return {"text": full_text}
224
+ except Exception as e:
225
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
226
+
227
+ return for_stream() if stream else for_non_stream()
228
+
229
+ def chat(
230
+ self,
231
+ prompt: str,
232
+ stream: bool = False,
233
+ optimizer: str = None,
234
+ conversationally: bool = False,
235
+ ) -> Union[str, Generator[str, None, None]]:
236
+ def for_stream():
237
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
238
+ yield self.get_message(response)
239
+ def for_non_stream():
240
+ return self.get_message(
241
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
242
+ )
243
+ return for_stream() if stream else for_non_stream()
244
+
245
+ def get_message(self, response: dict) -> str:
246
+ assert isinstance(response, dict), "Response should be of dict data-type only"
247
+ return response["text"]
248
+
249
+ if __name__ == "__main__":
250
+ print("-" * 80)
251
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
252
+ print("-" * 80)
253
+
254
+ for model in Hunyuan.AVAILABLE_MODELS:
255
+ try:
256
+ test_ai = Hunyuan(model=model, timeout=60)
257
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
258
+ response_text = ""
259
+ for chunk in response:
260
+ response_text += chunk
261
+
262
+ if response_text and len(response_text.strip()) > 0:
263
+ status = "✓"
264
+ # Clean and truncate response
265
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
266
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
267
+ else:
268
+ status = "✗"
269
+ display_text = "Empty or invalid response"
270
+ print(f"\r{model:<50} {status:<10} {display_text}")
271
+ except Exception as e:
272
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -244,8 +244,23 @@ class JadveOpenAI(Provider):
244
244
  return response["text"]
245
245
 
246
246
  if __name__ == "__main__":
247
- from rich import print
248
- ai = JadveOpenAI(timeout=5000)
249
- response = ai.chat("Who made u?", stream=True)
250
- for chunk in response:
251
- print(chunk, end="", flush=True)
247
+ print("-" * 80)
248
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
249
+ print("-" * 80)
250
+
251
+ for model in JadveOpenAI.AVAILABLE_MODELS:
252
+ try:
253
+ test_ai = JadveOpenAI(model=model, timeout=60)
254
+ response = test_ai.chat("Say 'Hello' in one word")
255
+ response_text = response
256
+
257
+ if response_text and len(response_text.strip()) > 0:
258
+ status = "✓"
259
+ # Truncate response if too long
260
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
261
+ else:
262
+ status = "✗"
263
+ display_text = "Empty or invalid response"
264
+ print(f"{model:<50} {status:<10} {display_text}")
265
+ except Exception as e:
266
+ print(f"{model:<50} {'✗':<10} {str(e)}")