webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,283 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import os
5
- from typing import Any, Dict, Optional, Generator, Union
6
- import time
7
- import uuid
8
- import re
9
-
10
- from webscout.AIutel import Optimizers
11
- from webscout.AIutel import Conversation
12
- from webscout.AIutel import AwesomePrompts, sanitize_stream
13
- from webscout.AIbase import Provider, AsyncProvider
14
- from webscout import exceptions
15
- from webscout.litagent import LitAgent
16
-
17
- class Hunyuan(Provider):
18
- """
19
- A class to interact with the Tencent Hunyuan API with LitAgent user-agent.
20
- """
21
-
22
- AVAILABLE_MODELS = [
23
- "hunyuan-t1-latest",
24
- # Add more models as they become available
25
- ]
26
-
27
- def __init__(
28
- self,
29
- is_conversation: bool = True,
30
- max_tokens: int = 2048, # Note: max_tokens is not used by this API
31
- timeout: int = 30,
32
- intro: str = None,
33
- filepath: str = None,
34
- update_file: bool = True,
35
- proxies: dict = {},
36
- history_offset: int = 10250,
37
- act: str = None,
38
- model: str = "hunyuan-t1-latest",
39
- browser: str = "chrome", # Note: browser fingerprinting might be less effective with impersonate
40
- api_key: str = None,
41
- system_prompt: str = "You are a helpful assistant.",
42
- ):
43
-
44
- """Initializes the Hunyuan API client."""
45
- if model not in self.AVAILABLE_MODELS:
46
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
47
-
48
- self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
49
-
50
- # Initialize LitAgent (keep if needed for other headers or logic)
51
- self.agent = LitAgent()
52
- # Fingerprint generation might be less relevant with impersonate
53
- self.fingerprint = self.agent.generate_fingerprint(browser)
54
-
55
- # Use the fingerprint for headers (keep relevant ones)
56
- self.headers = {
57
- "Accept": "*/*",
58
- "Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
59
- "Content-Type": "application/json",
60
- "DNT": "1", # Keep DNT
61
- "Origin": "https://llm.hunyuan.tencent.com", # Keep Origin
62
- "Referer": "https://llm.hunyuan.tencent.com/", # Keep Referer
63
- "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
64
- "Sec-Fetch-Mode": "cors",
65
- "Sec-Fetch-Site": "same-origin",
66
- "Sec-GPC": "1", # Keep Sec-GPC
67
- }
68
-
69
- # Add authorization if API key is provided
70
- if api_key:
71
- self.headers["Authorization"] = f"Bearer {api_key}"
72
- else:
73
- # Default test key (may not work long-term)
74
- self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
75
-
76
- # Initialize curl_cffi Session
77
- self.session = Session()
78
- # Update curl_cffi session headers and proxies
79
- self.session.headers.update(self.headers)
80
- self.session.proxies = proxies # Assign proxies directly
81
- self.system_message = system_prompt
82
- self.is_conversation = is_conversation
83
- self.max_tokens_to_sample = max_tokens
84
- self.timeout = timeout
85
- self.last_response = {}
86
- self.model = model
87
-
88
- self.__available_optimizers = (
89
- method
90
- for method in dir(Optimizers)
91
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
- )
93
- Conversation.intro = (
94
- AwesomePrompts().get_act(
95
- act, raise_not_found=True, default=None, case_insensitive=True
96
- )
97
- if act
98
- else intro or Conversation.intro
99
- )
100
-
101
- self.conversation = Conversation(
102
- is_conversation, self.max_tokens_to_sample, filepath, update_file
103
- )
104
- self.conversation.history_offset = history_offset
105
-
106
- def refresh_identity(self, browser: str = None):
107
- """
108
- Refreshes the browser identity fingerprint.
109
-
110
- Args:
111
- browser: Specific browser to use for the new fingerprint
112
- """
113
- browser = browser or self.fingerprint.get("browser_type", "chrome")
114
- self.fingerprint = self.agent.generate_fingerprint(browser)
115
-
116
- # Update headers with new fingerprint (only relevant ones)
117
- self.headers.update({
118
- "Accept-Language": self.fingerprint["accept_language"],
119
- })
120
-
121
- # Update session headers
122
- self.session.headers.update(self.headers) # Update only relevant headers
123
-
124
- return self.fingerprint
125
-
126
- def ask(
127
- self,
128
- prompt: str,
129
- stream: bool = False, # API supports streaming
130
- raw: bool = False,
131
- optimizer: str = None,
132
- conversationally: bool = False,
133
- ) -> Union[Dict[str, Any], Generator]:
134
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
135
- if optimizer:
136
- if optimizer in self.__available_optimizers:
137
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
138
- else:
139
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
140
-
141
- # Generate a unique query ID for each request
142
- query_id = ''.join(re.findall(r'[a-z0-9]', str(uuid.uuid4())[:18]))
143
-
144
-
145
- # Payload construction
146
- payload = {
147
- "stream": True, # API seems to require stream=True based on response format
148
- "model": self.model,
149
- "query_id": query_id,
150
- "messages": [
151
- {"role": "system", "content": self.system_message},
152
- {"role": "user", "content": "Always response in English\n\n" + conversation_prompt},
153
- ],
154
- "stream_moderation": True,
155
- "enable_enhancement": False
156
- }
157
-
158
- def for_stream():
159
- streaming_text = "" # Initialize outside try block
160
- try:
161
- # Use curl_cffi session post with impersonate
162
- response = self.session.post(
163
- self.url,
164
- data=json.dumps(payload),
165
- stream=True,
166
- timeout=self.timeout,
167
- impersonate="chrome110" # Use a common impersonation profile
168
- )
169
- response.raise_for_status() # Check for HTTP errors
170
-
171
- # Iterate over bytes and decode manually
172
- for line_bytes in response.iter_lines():
173
- if line_bytes:
174
- try:
175
- line = line_bytes.decode('utf-8').strip()
176
- if line.startswith("data: "):
177
- json_str = line[6:]
178
- if json_str == "[DONE]":
179
- break
180
- json_data = json.loads(json_str)
181
- if 'choices' in json_data:
182
- choice = json_data['choices'][0]
183
- if 'delta' in choice and 'content' in choice['delta']:
184
- content = choice['delta']['content']
185
- if content: # Ensure content is not None or empty
186
- streaming_text += content
187
- resp = dict(text=content)
188
- # Yield dict or raw string chunk
189
- yield resp if not raw else content
190
- except (json.JSONDecodeError, UnicodeDecodeError):
191
- continue # Ignore lines that are not valid JSON or cannot be decoded
192
-
193
- # Update history after stream finishes
194
- self.last_response = {"text": streaming_text}
195
- self.conversation.update_chat_history(prompt, streaming_text)
196
-
197
- except CurlError as e: # Catch CurlError
198
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
199
- except Exception as e: # Catch other potential exceptions (like HTTPError)
200
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
201
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
202
-
203
-
204
- def for_non_stream():
205
- # Aggregate the stream using the updated for_stream logic
206
- full_text = ""
207
- try:
208
- # Ensure raw=False so for_stream yields dicts
209
- for chunk_data in for_stream():
210
- if isinstance(chunk_data, dict) and "text" in chunk_data:
211
- full_text += chunk_data["text"]
212
- # Handle raw string case if raw=True was passed
213
- elif raw and isinstance(chunk_data, str):
214
- full_text += chunk_data
215
- except Exception as e:
216
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
217
- if not full_text:
218
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
219
-
220
- # last_response and history are updated within for_stream
221
- # Return the final aggregated response dict or raw string
222
- return full_text if raw else self.last_response
223
-
224
-
225
- # Since the API endpoint suggests streaming, always call the stream generator.
226
- # The non-stream wrapper will handle aggregation if stream=False.
227
- return for_stream() if stream else for_non_stream()
228
-
229
- def chat(
230
- self,
231
- prompt: str,
232
- stream: bool = False,
233
- optimizer: str = None,
234
- conversationally: bool = False,
235
- ) -> Union[str, Generator[str, None, None]]:
236
- def for_stream_chat():
237
- # ask() yields dicts or strings when streaming
238
- gen = self.ask(
239
- prompt, stream=True, raw=False, # Ensure ask yields dicts
240
- optimizer=optimizer, conversationally=conversationally
241
- )
242
- for response_dict in gen:
243
- yield self.get_message(response_dict) # get_message expects dict
244
-
245
- def for_non_stream_chat():
246
- # ask() returns dict or str when not streaming
247
- response_data = self.ask(
248
- prompt, stream=False, raw=False, # Ensure ask returns dict
249
- optimizer=optimizer, conversationally=conversationally
250
- )
251
- return self.get_message(response_data) # get_message expects dict
252
-
253
- return for_stream_chat() if stream else for_non_stream_chat()
254
-
255
- def get_message(self, response: dict) -> str:
256
- assert isinstance(response, dict), "Response should be of dict data-type only"
257
- return response["text"]
258
-
259
- if __name__ == "__main__":
260
- # Ensure curl_cffi is installed
261
- print("-" * 80)
262
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
263
- print("-" * 80)
264
-
265
- for model in Hunyuan.AVAILABLE_MODELS:
266
- try:
267
- test_ai = Hunyuan(model=model, timeout=60)
268
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
269
- response_text = ""
270
- for chunk in response:
271
- response_text += chunk
272
-
273
- if response_text and len(response_text.strip()) > 0:
274
- status = "✓"
275
- # Clean and truncate response
276
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
277
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
278
- else:
279
- status = "✗"
280
- display_text = "Empty or invalid response"
281
- print(f"\r{model:<50} {status:<10} {display_text}")
282
- except Exception as e:
283
- print(f"\r{model:<50} {'✗':<10} {str(e)}")