webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
File without changes
@@ -0,0 +1,206 @@
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+ import requests
4
+ import json
5
+ import re
6
+ import uuid
7
+ from typing import Union, List, Dict, Generator, Optional, Any, TypedDict, Final
8
+
9
+ # Type definitions
10
+ class Role(Enum):
11
+ SYSTEM = "system"
12
+ USER = "user"
13
+ ASSISTANT = "assistant"
14
+
15
+ class Message(TypedDict):
16
+ role: str
17
+ content: str
18
+
19
+ class APIResponse(TypedDict):
20
+ event_id: str
21
+ fn_index: int
22
+ data: List[Any]
23
+
24
+ class StreamData(TypedDict):
25
+ msg: str
26
+ output: Dict[str, Any]
27
+
28
+ @dataclass
29
+ class APIConfig:
30
+ url: Final[str] = "https://qwen-qwen2-72b-instruct.hf.space"
31
+ api_endpoint: Final[str] = "https://qwen-qwen2-72b-instruct.hf.space/queue/join?"
32
+
33
+ @dataclass
34
+ class RequestHeaders:
35
+ join: Dict[str, str]
36
+ data: Dict[str, str]
37
+
38
+ @classmethod
39
+ def create_default(cls, base_url: str) -> 'RequestHeaders':
40
+ common_headers = {
41
+ 'accept-language': 'en-US,en;q=0.9',
42
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
43
+ }
44
+
45
+ return cls(
46
+ join={
47
+ **common_headers,
48
+ 'accept': '*/*',
49
+ 'content-type': 'application/json',
50
+ 'origin': base_url,
51
+ 'referer': f'{base_url}/',
52
+ },
53
+ data={
54
+ **common_headers,
55
+ 'accept': 'text/event-stream',
56
+ 'referer': f'{base_url}/',
57
+ }
58
+ )
59
+
60
+ class QwenAPI:
61
+ def __init__(self, config: APIConfig = APIConfig()):
62
+ self.config = config
63
+ self.headers = RequestHeaders.create_default(config.url)
64
+
65
+ @staticmethod
66
+ def generate_session_hash() -> str:
67
+ """Generate a unique session hash."""
68
+ return str(uuid.uuid4()).replace('-', '')[:12]
69
+
70
+ @staticmethod
71
+ def format_prompt(messages: List[Message]) -> str:
72
+ """
73
+ Formats a list of messages into a single prompt string.
74
+
75
+ Args:
76
+ messages: A list of message dictionaries with "role" and "content" keys.
77
+
78
+ Returns:
79
+ str: The formatted prompt.
80
+ """
81
+ return "\n".join(f"{message['role']}: {message['content']}" for message in messages)
82
+
83
+ def create_sync_generator(
84
+ self,
85
+ model: str,
86
+ messages: List[Message],
87
+ proxy: Optional[str] = None,
88
+ **kwargs: Any
89
+ ) -> Generator[str, None, None]:
90
+ """
91
+ Synchronously streams responses from the Qwen_Qwen2_72B_Instruct API.
92
+
93
+ Args:
94
+ model: The model to use for the request.
95
+ messages: A list of message dictionaries with "role" and "content" keys.
96
+ proxy: Optional proxy URL for the request.
97
+ **kwargs: Additional keyword arguments.
98
+
99
+ Yields:
100
+ str: Text chunks from the API response.
101
+
102
+ Raises:
103
+ requests.exceptions.RequestException: If the API request fails.
104
+ json.JSONDecodeError: If the response cannot be parsed as JSON.
105
+ """
106
+ session_hash: str = self.generate_session_hash()
107
+
108
+ # Prepare the prompt
109
+ system_messages: List[str] = [
110
+ message["content"]
111
+ for message in messages
112
+ if message["role"] == Role.SYSTEM.value
113
+ ]
114
+ system_prompt: str = "\n".join(system_messages)
115
+
116
+ user_messages: List[Message] = [
117
+ message
118
+ for message in messages
119
+ if message["role"] != Role.SYSTEM.value
120
+ ]
121
+ prompt: str = self.format_prompt(user_messages)
122
+
123
+ payload_join: Dict[str, Any] = {
124
+ "data": [prompt, [], system_prompt],
125
+ "event_data": None,
126
+ "fn_index": 0,
127
+ "trigger_id": 11,
128
+ "session_hash": session_hash
129
+ }
130
+
131
+ with requests.Session() as session:
132
+ # Send join request
133
+ response = session.post(
134
+ self.config.api_endpoint,
135
+ headers=self.headers.join,
136
+ json=payload_join
137
+ )
138
+ response.raise_for_status()
139
+ event_data: APIResponse = response.json()
140
+
141
+ # Prepare data stream request
142
+ url_data: str = f'{self.config.url}/queue/data'
143
+ params_data: Dict[str, str] = {'session_hash': session_hash}
144
+
145
+ # Send data stream request
146
+ full_response: str = ""
147
+ final_full_response: str = ""
148
+
149
+ with session.get(
150
+ url_data,
151
+ headers=self.headers.data,
152
+ params=params_data,
153
+ stream=True
154
+ ) as response:
155
+ response.raise_for_status()
156
+
157
+ for line in response.iter_lines():
158
+ if line:
159
+ decoded_line: str = line.decode('utf-8')
160
+ if decoded_line.startswith('data: '):
161
+ try:
162
+ json_data: StreamData = json.loads(decoded_line[6:])
163
+
164
+ if json_data.get('msg') == 'process_generating':
165
+ if 'output' in json_data and 'data' in json_data['output']:
166
+ output_data: List[Any] = json_data['output']['data']
167
+ if len(output_data) > 1 and len(output_data[1]) > 0:
168
+ for item in output_data[1]:
169
+ if isinstance(item, list) and len(item) > 1:
170
+ fragment: str = str(item[1])
171
+ if not re.match(r'^\[.*\]$', fragment) and not full_response.endswith(fragment):
172
+ full_response += fragment
173
+ yield fragment
174
+
175
+ if json_data.get('msg') == 'process_completed':
176
+ if 'output' in json_data and 'data' in json_data['output']:
177
+ output_data = json_data['output']['data']
178
+ if len(output_data) > 1 and len(output_data[1]) > 0:
179
+ final_full_response = output_data[1][0][1]
180
+
181
+ if final_full_response.startswith(full_response):
182
+ final_full_response = final_full_response[len(full_response):]
183
+
184
+ if final_full_response:
185
+ yield final_full_response
186
+ break
187
+
188
+ except json.JSONDecodeError as e:
189
+ print(f"Could not parse JSON: {decoded_line}")
190
+ raise e
191
+
192
+
193
+ def main() -> None:
194
+ messages: List[Message] = [
195
+ {"role": Role.SYSTEM.value, "content": "You are a helpful assistant."},
196
+ {"role": Role.USER.value, "content": "LOL"}
197
+ ]
198
+
199
+ api = QwenAPI()
200
+ for text in api.create_sync_generator("qwen-qwen2-72b-instruct", messages):
201
+ print(text, end="", flush=True)
202
+ print("\n---\n")
203
+
204
+
205
+ if __name__ == "__main__":
206
+ main()
@@ -0,0 +1,375 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class HeckAI(Provider):
15
+ """
16
+ Provides an interface to interact with the HeckAI API using a LitAgent user-agent.
17
+
18
+ This class supports conversational AI interactions with multiple available models,
19
+ manages session state, handles streaming and non-streaming responses, and integrates
20
+ with conversation history and prompt optimizers.
21
+
22
+ Attributes:
23
+ AVAILABLE_MODELS (list): List of supported model identifiers.
24
+ url (str): API endpoint URL.
25
+ session_id (str): Unique session identifier for the conversation.
26
+ language (str): Language for the conversation.
27
+ headers (dict): HTTP headers used for API requests.
28
+ session (Session): curl_cffi session for HTTP requests.
29
+ is_conversation (bool): Whether to maintain conversation history.
30
+ max_tokens_to_sample (int): Maximum tokens to sample (not used by API).
31
+ timeout (int): Request timeout in seconds.
32
+ last_response (dict): Stores the last API response.
33
+ model (str): Model identifier in use.
34
+ previous_question (str): Last question sent to the API.
35
+ previous_answer (str): Last answer received from the API.
36
+ conversation (Conversation): Conversation history manager.
37
+ """
38
+
39
+ AVAILABLE_MODELS = [
40
+ "google/gemini-2.0-flash-001",
41
+ "deepseek/deepseek-chat",
42
+ "deepseek/deepseek-r1",
43
+ "openai/gpt-4o-mini",
44
+ "openai/gpt-4.1-mini",
45
+ "x-ai/grok-3-mini-beta",
46
+ "meta-llama/llama-4-scout"
47
+ ]
48
+
49
+ def __init__(
50
+ self,
51
+ is_conversation: bool = True,
52
+ max_tokens: int = 2049,
53
+ timeout: int = 30,
54
+ intro: str = None,
55
+ filepath: str = None,
56
+ update_file: bool = True,
57
+ proxies: dict = {},
58
+ history_offset: int = 10250,
59
+ act: str = None,
60
+ model: str = "google/gemini-2.0-flash-001",
61
+ language: str = "English"
62
+ ):
63
+ """
64
+ Initializes the HeckAI API client.
65
+
66
+ Args:
67
+ is_conversation (bool): Whether to maintain conversation history.
68
+ max_tokens (int): Maximum tokens to sample (not used by this API).
69
+ timeout (int): Timeout for API requests in seconds.
70
+ intro (str, optional): Introductory prompt for the conversation.
71
+ filepath (str, optional): File path for storing conversation history.
72
+ update_file (bool): Whether to update the conversation file.
73
+ proxies (dict): Proxy settings for HTTP requests.
74
+ history_offset (int): Offset for conversation history truncation.
75
+ act (str, optional): Role or act for the conversation.
76
+ model (str): Model identifier to use.
77
+ language (str): Language for the conversation.
78
+
79
+ Raises:
80
+ ValueError: If the provided model is not in AVAILABLE_MODELS.
81
+ """
82
+ if model not in self.AVAILABLE_MODELS:
83
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
84
+
85
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
86
+ self.session_id = str(uuid.uuid4())
87
+ self.language = language
88
+
89
+ # Use LitAgent (keep if needed for other headers or logic)
90
+ self.headers = {
91
+ 'Content-Type': 'application/json',
92
+ 'Origin': 'https://heck.ai', # Keep Origin
93
+ 'Referer': 'https://heck.ai/', # Keep Referer
94
+ 'User-Agent': LitAgent().random(), # Use random user agent
95
+ }
96
+
97
+ # Initialize curl_cffi Session
98
+ self.session = Session()
99
+ # Update curl_cffi session headers and proxies
100
+ self.session.headers.update(self.headers)
101
+ self.session.proxies = proxies # Assign proxies directly
102
+
103
+ self.is_conversation = is_conversation
104
+ self.max_tokens_to_sample = max_tokens
105
+ self.timeout = timeout
106
+ self.last_response = {}
107
+ self.model = model
108
+ self.previous_question = None
109
+ self.previous_answer = None
110
+
111
+ self.__available_optimizers = (
112
+ method
113
+ for method in dir(Optimizers)
114
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
115
+ )
116
+ Conversation.intro = (
117
+ AwesomePrompts().get_act(
118
+ act, raise_not_found=True, default=None, case_insensitive=True
119
+ )
120
+ if act
121
+ else intro or Conversation.intro
122
+ )
123
+
124
+ self.conversation = Conversation(
125
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
126
+ )
127
+ self.conversation.history_offset = history_offset
128
+
129
+ def ask(
130
+ self,
131
+ prompt: str,
132
+ stream: bool = False,
133
+ raw: bool = False,
134
+ optimizer: str = None,
135
+ conversationally: bool = False,
136
+ ) -> Union[Dict[str, Any], Generator]:
137
+ """
138
+ Sends a prompt to the HeckAI API and returns the response.
139
+
140
+ Args:
141
+ prompt (str): The prompt or question to send to the API.
142
+ stream (bool): If True, yields streaming responses as they arrive.
143
+ raw (bool): If True, yields raw string chunks instead of dicts.
144
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
145
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
146
+
147
+ Returns:
148
+ Union[Dict[str, Any], Generator]: If stream is False, returns a dict with the response text.
149
+ If stream is True, yields response chunks as dicts or strings.
150
+
151
+ Raises:
152
+ Exception: If the optimizer is not available.
153
+ exceptions.FailedToGenerateResponseError: On API or network errors.
154
+ """
155
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
156
+ if optimizer:
157
+ if optimizer in self.__available_optimizers:
158
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
159
+ else:
160
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
161
+
162
+ # Payload construction
163
+ payload = {
164
+ "model": self.model,
165
+ "question": conversation_prompt,
166
+ "language": self.language,
167
+ "sessionId": self.session_id,
168
+ "previousQuestion": self.previous_question,
169
+ "previousAnswer": self.previous_answer,
170
+ "imgUrls": [],
171
+ "superSmartMode": False # Added based on API request data
172
+ }
173
+
174
+ # Store this message as previous for next request
175
+ self.previous_question = conversation_prompt
176
+
177
+ def for_stream():
178
+ streaming_text = "" # Initialize outside try block
179
+ try:
180
+ # Use curl_cffi session post with impersonate
181
+ response = self.session.post(
182
+ self.url,
183
+ # headers are set on the session
184
+ data=json.dumps(payload),
185
+ stream=True,
186
+ timeout=self.timeout,
187
+ impersonate="chrome110" # Use a common impersonation profile
188
+ )
189
+ response.raise_for_status() # Check for HTTP errors
190
+
191
+ # Use sanitize_stream to process the stream
192
+ processed_stream = sanitize_stream(
193
+ data=response.iter_content(chunk_size=1024), # Pass byte iterator
194
+ intro_value="data: ", # Prefix to remove (note the space)
195
+ to_json=False, # Content is text
196
+ start_marker="data: [ANSWER_START]",
197
+ end_marker="data: [ANSWER_DONE]",
198
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
199
+ yield_raw_on_error=True,
200
+ strip_chars=" \n\r\t" # Strip whitespace characters from chunks
201
+ )
202
+
203
+ for content_chunk in processed_stream:
204
+ # content_chunk is the text between ANSWER_START and ANSWER_DONE
205
+ if content_chunk and isinstance(content_chunk, str):
206
+ streaming_text += content_chunk
207
+ yield dict(text=content_chunk) if not raw else content_chunk
208
+
209
+ # Only update history if we received a valid response
210
+ if streaming_text:
211
+ # Update history and previous answer after stream finishes
212
+ self.previous_answer = streaming_text
213
+ # Convert to simple text before updating conversation
214
+ try:
215
+ # Ensure content is valid before updating conversation
216
+ if streaming_text and isinstance(streaming_text, str):
217
+ # Sanitize the content to ensure it's valid
218
+ sanitized_text = streaming_text.strip()
219
+ if sanitized_text: # Only update if we have non-empty content
220
+ self.conversation.update_chat_history(prompt, sanitized_text)
221
+ except Exception as e:
222
+ # If conversation update fails, log but don't crash
223
+ print(f"Warning: Failed to update conversation history: {str(e)}")
224
+
225
+ except CurlError as e: # Catch CurlError
226
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
227
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
228
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
229
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
230
+
231
+
232
+ def for_non_stream():
233
+ # Aggregate the stream using the updated for_stream logic
234
+ full_text = ""
235
+ try:
236
+ # Ensure raw=False so for_stream yields dicts
237
+ for chunk_data in for_stream():
238
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
239
+ full_text += chunk_data["text"]
240
+ # Handle raw string case if raw=True was passed
241
+ elif raw and isinstance(chunk_data, str):
242
+ full_text += chunk_data
243
+ except Exception as e:
244
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
245
+ if not full_text:
246
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
247
+
248
+ # Return the final aggregated response dict or raw string
249
+ self.last_response = {"text": full_text} # Update last_response here
250
+ return full_text if raw else self.last_response
251
+
252
+
253
+ return for_stream() if stream else for_non_stream()
254
+
255
+ @staticmethod
256
+ def fix_encoding(text):
257
+ """
258
+ Fixes encoding issues in the response text.
259
+
260
+ Args:
261
+ text (Union[str, dict]): The text or response dict to fix encoding for.
262
+
263
+ Returns:
264
+ Union[str, dict]: The text or dict with encoding corrected if possible.
265
+ """
266
+ if isinstance(text, dict) and "text" in text:
267
+ try:
268
+ text["text"] = text["text"].encode("latin1").decode("utf-8")
269
+ return text
270
+ except (UnicodeError, AttributeError) as e:
271
+ return text
272
+ elif isinstance(text, str):
273
+ try:
274
+ return text.encode("latin1").decode("utf-8")
275
+ except (UnicodeError, AttributeError) as e:
276
+ return text
277
+ return text
278
+
279
+ def chat(
280
+ self,
281
+ prompt: str,
282
+ stream: bool = False,
283
+ optimizer: str = None,
284
+ conversationally: bool = False,
285
+ ) -> Union[str, Generator[str, None, None]]:
286
+ """
287
+ Sends a prompt to the HeckAI API and returns only the message text.
288
+
289
+ Args:
290
+ prompt (str): The prompt or question to send to the API.
291
+ stream (bool): If True, yields streaming response text.
292
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
293
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
294
+
295
+ Returns:
296
+ Union[str, Generator[str, None, None]]: The response text, or a generator yielding text chunks.
297
+ """
298
+ def for_stream_chat():
299
+ # ask() yields dicts or strings when streaming
300
+ gen = self.ask(
301
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
302
+ optimizer=optimizer, conversationally=conversationally
303
+ )
304
+ for response_dict in gen:
305
+ yield self.get_message(response_dict) # get_message expects dict
306
+
307
+ def for_non_stream_chat():
308
+ # ask() returns dict or str when not streaming
309
+ response_data = self.ask(
310
+ prompt, stream=False, raw=False, # Ensure ask returns dict
311
+ optimizer=optimizer, conversationally=conversationally
312
+ )
313
+ return self.get_message(response_data) # get_message expects dict
314
+
315
+ return for_stream_chat() if stream else for_non_stream_chat()
316
+
317
+ def get_message(self, response: dict) -> str:
318
+ """
319
+ Extracts the message text from the API response.
320
+
321
+ Args:
322
+ response (dict): The API response dictionary.
323
+
324
+ Returns:
325
+ str: The extracted message text. Returns an empty string if not found.
326
+
327
+ Raises:
328
+ TypeError: If the response is not a dictionary.
329
+ """
330
+ # Validate response format
331
+ if not isinstance(response, dict):
332
+ raise TypeError(f"Expected dict response, got {type(response).__name__}")
333
+
334
+ # Handle missing text key gracefully
335
+ if "text" not in response:
336
+ return ""
337
+
338
+ # Ensure text is a string
339
+ text = response["text"]
340
+ if not isinstance(text, str):
341
+ return str(text)
342
+
343
+ return text
344
+
345
+ if __name__ == "__main__":
346
+ # Ensure curl_cffi is installed
347
+ print("-" * 80)
348
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
+ print("-" * 80)
350
+
351
+ for model in HeckAI.AVAILABLE_MODELS:
352
+ try:
353
+ test_ai = HeckAI(model=model, timeout=60)
354
+ # Use non-streaming mode first to avoid potential streaming issues
355
+ try:
356
+ response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
+ print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
+ except Exception as e1:
359
+ # Fall back to streaming if non-streaming fails
360
+ print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
+ response_text = ""
363
+ for chunk in response:
364
+ if chunk and isinstance(chunk, str):
365
+ response_text += chunk
366
+
367
+ if response_text and len(response_text.strip()) > 0:
368
+ status = "✓"
369
+ # Truncate response if too long
370
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
+ print(f"\r{model:<50} {status:<10} {display_text}")
372
+ else:
373
+ raise ValueError("Empty or invalid response")
374
+ except Exception as e:
375
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")