webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (273) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Apriel.py +306 -0
  65. webscout/Provider/ChatGPTClone.py +236 -236
  66. webscout/Provider/ChatSandbox.py +343 -343
  67. webscout/Provider/Cloudflare.py +324 -324
  68. webscout/Provider/Cohere.py +208 -208
  69. webscout/Provider/Deepinfra.py +370 -366
  70. webscout/Provider/ExaAI.py +260 -260
  71. webscout/Provider/ExaChat.py +308 -308
  72. webscout/Provider/Flowith.py +221 -221
  73. webscout/Provider/GMI.py +293 -0
  74. webscout/Provider/Gemini.py +164 -164
  75. webscout/Provider/GeminiProxy.py +167 -167
  76. webscout/Provider/GithubChat.py +371 -372
  77. webscout/Provider/Groq.py +800 -800
  78. webscout/Provider/HeckAI.py +383 -383
  79. webscout/Provider/Jadve.py +282 -282
  80. webscout/Provider/K2Think.py +307 -307
  81. webscout/Provider/Koboldai.py +205 -205
  82. webscout/Provider/LambdaChat.py +423 -423
  83. webscout/Provider/Nemotron.py +244 -244
  84. webscout/Provider/Netwrck.py +248 -248
  85. webscout/Provider/OLLAMA.py +395 -395
  86. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  87. webscout/Provider/OPENAI/FalconH1.py +451 -451
  88. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  89. webscout/Provider/OPENAI/K2Think.py +431 -431
  90. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  91. webscout/Provider/OPENAI/PI.py +427 -427
  92. webscout/Provider/OPENAI/README.md +959 -959
  93. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  94. webscout/Provider/OPENAI/TwoAI.py +465 -465
  95. webscout/Provider/OPENAI/__init__.py +33 -18
  96. webscout/Provider/OPENAI/base.py +248 -248
  97. webscout/Provider/OPENAI/chatglm.py +528 -0
  98. webscout/Provider/OPENAI/chatgpt.py +592 -592
  99. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  100. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  101. webscout/Provider/OPENAI/deepinfra.py +318 -314
  102. webscout/Provider/OPENAI/e2b.py +1665 -1665
  103. webscout/Provider/OPENAI/exaai.py +420 -420
  104. webscout/Provider/OPENAI/exachat.py +452 -452
  105. webscout/Provider/OPENAI/friendli.py +232 -232
  106. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  107. webscout/Provider/OPENAI/groq.py +364 -364
  108. webscout/Provider/OPENAI/heckai.py +314 -314
  109. webscout/Provider/OPENAI/llmchatco.py +337 -337
  110. webscout/Provider/OPENAI/netwrck.py +355 -355
  111. webscout/Provider/OPENAI/oivscode.py +290 -290
  112. webscout/Provider/OPENAI/opkfc.py +518 -518
  113. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  114. webscout/Provider/OPENAI/scirachat.py +535 -535
  115. webscout/Provider/OPENAI/sonus.py +308 -308
  116. webscout/Provider/OPENAI/standardinput.py +442 -442
  117. webscout/Provider/OPENAI/textpollinations.py +340 -340
  118. webscout/Provider/OPENAI/toolbaz.py +419 -416
  119. webscout/Provider/OPENAI/typefully.py +362 -362
  120. webscout/Provider/OPENAI/utils.py +295 -295
  121. webscout/Provider/OPENAI/venice.py +436 -436
  122. webscout/Provider/OPENAI/wisecat.py +387 -387
  123. webscout/Provider/OPENAI/writecream.py +166 -166
  124. webscout/Provider/OPENAI/x0gpt.py +378 -378
  125. webscout/Provider/OPENAI/yep.py +389 -389
  126. webscout/Provider/OpenGPT.py +230 -230
  127. webscout/Provider/Openai.py +243 -243
  128. webscout/Provider/PI.py +405 -405
  129. webscout/Provider/Perplexitylabs.py +430 -430
  130. webscout/Provider/QwenLM.py +272 -272
  131. webscout/Provider/STT/__init__.py +16 -1
  132. webscout/Provider/Sambanova.py +257 -257
  133. webscout/Provider/StandardInput.py +309 -309
  134. webscout/Provider/TTI/README.md +82 -82
  135. webscout/Provider/TTI/__init__.py +33 -18
  136. webscout/Provider/TTI/aiarta.py +413 -413
  137. webscout/Provider/TTI/base.py +136 -136
  138. webscout/Provider/TTI/bing.py +243 -243
  139. webscout/Provider/TTI/gpt1image.py +149 -149
  140. webscout/Provider/TTI/imagen.py +196 -196
  141. webscout/Provider/TTI/infip.py +211 -211
  142. webscout/Provider/TTI/magicstudio.py +232 -232
  143. webscout/Provider/TTI/monochat.py +219 -219
  144. webscout/Provider/TTI/piclumen.py +214 -214
  145. webscout/Provider/TTI/pixelmuse.py +232 -232
  146. webscout/Provider/TTI/pollinations.py +232 -232
  147. webscout/Provider/TTI/together.py +288 -288
  148. webscout/Provider/TTI/utils.py +12 -12
  149. webscout/Provider/TTI/venice.py +367 -367
  150. webscout/Provider/TTS/README.md +192 -192
  151. webscout/Provider/TTS/__init__.py +33 -18
  152. webscout/Provider/TTS/parler.py +110 -110
  153. webscout/Provider/TTS/streamElements.py +333 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TeachAnything.py +237 -237
  156. webscout/Provider/TextPollinationsAI.py +310 -310
  157. webscout/Provider/TogetherAI.py +356 -356
  158. webscout/Provider/TwoAI.py +312 -312
  159. webscout/Provider/TypliAI.py +311 -311
  160. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  161. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  162. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  163. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  164. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  165. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  166. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  167. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  168. webscout/Provider/UNFINISHED/liner.py +334 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  170. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  171. webscout/Provider/UNFINISHED/samurai.py +223 -223
  172. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  173. webscout/Provider/Venice.py +250 -250
  174. webscout/Provider/VercelAI.py +256 -256
  175. webscout/Provider/WiseCat.py +231 -231
  176. webscout/Provider/WrDoChat.py +366 -366
  177. webscout/Provider/__init__.py +33 -18
  178. webscout/Provider/ai4chat.py +174 -174
  179. webscout/Provider/akashgpt.py +331 -331
  180. webscout/Provider/cerebras.py +446 -446
  181. webscout/Provider/chatglm.py +394 -301
  182. webscout/Provider/cleeai.py +211 -211
  183. webscout/Provider/elmo.py +282 -282
  184. webscout/Provider/geminiapi.py +208 -208
  185. webscout/Provider/granite.py +261 -261
  186. webscout/Provider/hermes.py +263 -263
  187. webscout/Provider/julius.py +223 -223
  188. webscout/Provider/learnfastai.py +309 -309
  189. webscout/Provider/llama3mitril.py +214 -214
  190. webscout/Provider/llmchat.py +243 -243
  191. webscout/Provider/llmchatco.py +290 -290
  192. webscout/Provider/meta.py +801 -801
  193. webscout/Provider/oivscode.py +309 -309
  194. webscout/Provider/scira_chat.py +383 -383
  195. webscout/Provider/searchchat.py +292 -292
  196. webscout/Provider/sonus.py +258 -258
  197. webscout/Provider/toolbaz.py +370 -367
  198. webscout/Provider/turboseek.py +273 -273
  199. webscout/Provider/typefully.py +207 -207
  200. webscout/Provider/yep.py +372 -372
  201. webscout/__init__.py +30 -31
  202. webscout/__main__.py +5 -5
  203. webscout/auth/api_key_manager.py +189 -189
  204. webscout/auth/config.py +175 -175
  205. webscout/auth/models.py +185 -185
  206. webscout/auth/routes.py +664 -664
  207. webscout/auth/simple_logger.py +236 -236
  208. webscout/cli.py +523 -523
  209. webscout/conversation.py +438 -438
  210. webscout/exceptions.py +361 -361
  211. webscout/litagent/Readme.md +298 -298
  212. webscout/litagent/__init__.py +28 -28
  213. webscout/litagent/agent.py +581 -581
  214. webscout/litagent/constants.py +59 -59
  215. webscout/litprinter/__init__.py +58 -58
  216. webscout/models.py +181 -181
  217. webscout/optimizers.py +419 -419
  218. webscout/prompt_manager.py +288 -288
  219. webscout/sanitize.py +1078 -1078
  220. webscout/scout/README.md +401 -401
  221. webscout/scout/__init__.py +8 -8
  222. webscout/scout/core/__init__.py +6 -6
  223. webscout/scout/core/crawler.py +297 -297
  224. webscout/scout/core/scout.py +706 -706
  225. webscout/scout/core/search_result.py +95 -95
  226. webscout/scout/core/text_analyzer.py +62 -62
  227. webscout/scout/core/text_utils.py +277 -277
  228. webscout/scout/core/web_analyzer.py +51 -51
  229. webscout/scout/element.py +599 -599
  230. webscout/scout/parsers/__init__.py +69 -69
  231. webscout/scout/parsers/html5lib_parser.py +172 -172
  232. webscout/scout/parsers/html_parser.py +236 -236
  233. webscout/scout/parsers/lxml_parser.py +178 -178
  234. webscout/scout/utils.py +37 -37
  235. webscout/swiftcli/Readme.md +323 -323
  236. webscout/swiftcli/__init__.py +95 -95
  237. webscout/swiftcli/core/__init__.py +7 -7
  238. webscout/swiftcli/core/cli.py +308 -308
  239. webscout/swiftcli/core/context.py +104 -104
  240. webscout/swiftcli/core/group.py +241 -241
  241. webscout/swiftcli/decorators/__init__.py +28 -28
  242. webscout/swiftcli/decorators/command.py +221 -221
  243. webscout/swiftcli/decorators/options.py +220 -220
  244. webscout/swiftcli/decorators/output.py +302 -302
  245. webscout/swiftcli/exceptions.py +21 -21
  246. webscout/swiftcli/plugins/__init__.py +9 -9
  247. webscout/swiftcli/plugins/base.py +135 -135
  248. webscout/swiftcli/plugins/manager.py +269 -269
  249. webscout/swiftcli/utils/__init__.py +59 -59
  250. webscout/swiftcli/utils/formatting.py +252 -252
  251. webscout/swiftcli/utils/parsing.py +267 -267
  252. webscout/update_checker.py +117 -117
  253. webscout/version.py +1 -1
  254. webscout/webscout_search.py +1183 -1183
  255. webscout/webscout_search_async.py +649 -649
  256. webscout/yep_search.py +346 -346
  257. webscout/zeroart/README.md +89 -89
  258. webscout/zeroart/__init__.py +134 -134
  259. webscout/zeroart/base.py +66 -66
  260. webscout/zeroart/effects.py +100 -100
  261. webscout/zeroart/fonts.py +1238 -1238
  262. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
  263. webscout-2025.10.11.dist-info/RECORD +300 -0
  264. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  265. webscout/Provider/OPENAI/Qwen3.py +0 -303
  266. webscout/Provider/OPENAI/qodo.py +0 -630
  267. webscout/Provider/OPENAI/xenai.py +0 -514
  268. webscout/tempid.py +0 -134
  269. webscout-8.3.7.dist-info/RECORD +0 -301
  270. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  271. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  272. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  273. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,384 +1,384 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import uuid
5
- from typing import Any, Dict, Optional, Generator, Union
6
-
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider, AsyncProvider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent
13
-
14
- class HeckAI(Provider):
15
- """
16
- Provides an interface to interact with the HeckAI API using a LitAgent user-agent.
17
-
18
- This class supports conversational AI interactions with multiple available models,
19
- manages session state, handles streaming and non-streaming responses, and integrates
20
- with conversation history and prompt optimizers.
21
-
22
- Attributes:
23
- AVAILABLE_MODELS (list): List of supported model identifiers.
24
- url (str): API endpoint URL.
25
- session_id (str): Unique session identifier for the conversation.
26
- language (str): Language for the conversation.
27
- headers (dict): HTTP headers used for API requests.
28
- session (Session): curl_cffi session for HTTP requests.
29
- is_conversation (bool): Whether to maintain conversation history.
30
- max_tokens_to_sample (int): Maximum tokens to sample (not used by API).
31
- timeout (int): Request timeout in seconds.
32
- last_response (dict): Stores the last API response.
33
- model (str): Model identifier in use.
34
- previous_question (str): Last question sent to the API.
35
- previous_answer (str): Last answer received from the API.
36
- conversation (Conversation): Conversation history manager.
37
- """
38
- required_auth = False
39
- AVAILABLE_MODELS = [
40
- "google/gemini-2.5-flash-preview",
41
- "deepseek/deepseek-chat",
42
- "deepseek/deepseek-r1",
43
- "openai/gpt-4o-mini",
44
- "openai/gpt-4.1-mini",
45
- "x-ai/grok-3-mini-beta",
46
- "meta-llama/llama-4-scout",
47
- "openai/gpt-5-mini",
48
- "openai/gpt-5-nano"
49
-
50
-
51
- ]
52
-
53
- def __init__(
54
- self,
55
- is_conversation: bool = True,
56
- max_tokens: int = 2049,
57
- timeout: int = 30,
58
- intro: str = None,
59
- filepath: str = None,
60
- update_file: bool = True,
61
- proxies: dict = {},
62
- history_offset: int = 10250,
63
- act: str = None,
64
- model: str = "google/gemini-2.5-flash-preview",
65
- language: str = "English"
66
- ):
67
- """
68
- Initializes the HeckAI API client.
69
-
70
- Args:
71
- is_conversation (bool): Whether to maintain conversation history.
72
- max_tokens (int): Maximum tokens to sample (not used by this API).
73
- timeout (int): Timeout for API requests in seconds.
74
- intro (str, optional): Introductory prompt for the conversation.
75
- filepath (str, optional): File path for storing conversation history.
76
- update_file (bool): Whether to update the conversation file.
77
- proxies (dict): Proxy settings for HTTP requests.
78
- history_offset (int): Offset for conversation history truncation.
79
- act (str, optional): Role or act for the conversation.
80
- model (str): Model identifier to use.
81
- language (str): Language for the conversation.
82
-
83
- Raises:
84
- ValueError: If the provided model is not in AVAILABLE_MODELS.
85
- """
86
- if model not in self.AVAILABLE_MODELS:
87
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
88
-
89
- self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
90
- self.session_id = str(uuid.uuid4())
91
- self.language = language
92
-
93
- # Use LitAgent (keep if needed for other headers or logic)
94
- self.headers = {
95
- 'Content-Type': 'application/json',
96
- 'Origin': 'https://heck.ai', # Keep Origin
97
- 'Referer': 'https://heck.ai/', # Keep Referer
98
- 'User-Agent': LitAgent().random(), # Use random user agent
99
- }
100
-
101
- # Initialize curl_cffi Session
102
- self.session = Session()
103
- # Update curl_cffi session headers and proxies
104
- self.session.headers.update(self.headers)
105
- self.session.proxies = proxies # Assign proxies directly
106
-
107
- self.is_conversation = is_conversation
108
- self.max_tokens_to_sample = max_tokens
109
- self.timeout = timeout
110
- self.last_response = {}
111
- self.model = model
112
- self.previous_question = None
113
- self.previous_answer = None
114
-
115
- self.__available_optimizers = (
116
- method
117
- for method in dir(Optimizers)
118
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
119
- )
120
- Conversation.intro = (
121
- AwesomePrompts().get_act(
122
- act, raise_not_found=True, default=None, case_insensitive=True
123
- )
124
- if act
125
- else intro or Conversation.intro
126
- )
127
-
128
- self.conversation = Conversation(
129
- is_conversation, self.max_tokens_to_sample, filepath, update_file
130
- )
131
- self.conversation.history_offset = history_offset
132
-
133
- def ask(
134
- self,
135
- prompt: str,
136
- stream: bool = False,
137
- raw: bool = False,
138
- optimizer: str = None,
139
- conversationally: bool = False,
140
- ) -> Union[Dict[str, Any], Generator]:
141
- """
142
- Sends a prompt to the HeckAI API and returns the response.
143
-
144
- Args:
145
- prompt (str): The prompt or question to send to the API.
146
- stream (bool): If True, yields streaming responses as they arrive.
147
- raw (bool): If True, yields raw string chunks instead of dicts.
148
- optimizer (str, optional): Name of the optimizer to apply to the prompt.
149
- conversationally (bool): If True, optimizer is applied to the full conversation prompt.
150
-
151
- Returns:
152
- Union[Dict[str, Any], Generator]: If stream is False, returns a dict with the response text.
153
- If stream is True, yields response chunks as dicts or strings.
154
-
155
- Raises:
156
- Exception: If the optimizer is not available.
157
- exceptions.FailedToGenerateResponseError: On API or network errors.
158
- """
159
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
160
- if optimizer:
161
- if optimizer in self.__available_optimizers:
162
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
163
- else:
164
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
165
-
166
- # Payload construction
167
- payload = {
168
- "model": self.model,
169
- "question": conversation_prompt,
170
- "language": self.language,
171
- "sessionId": self.session_id,
172
- "previousQuestion": self.previous_question,
173
- "previousAnswer": self.previous_answer,
174
- "imgUrls": [],
175
- "superSmartMode": False # Added based on API request data
176
- }
177
-
178
- # Store this message as previous for next request
179
- self.previous_question = conversation_prompt
180
-
181
- def for_stream():
182
- streaming_text = "" # Initialize outside try block
183
- try:
184
- response = self.session.post(
185
- self.url,
186
- data=json.dumps(payload),
187
- stream=True,
188
- timeout=self.timeout,
189
- impersonate="chrome110"
190
- )
191
- response.raise_for_status()
192
-
193
- processed_stream = sanitize_stream(
194
- data=response.iter_content(chunk_size=1024),
195
- intro_value="data: ",
196
- to_json=False,
197
- start_marker="data: [ANSWER_START]",
198
- end_marker="data: [ANSWER_DONE]",
199
- skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
200
- yield_raw_on_error=True,
201
- strip_chars=" \n\r\t",
202
- raw=raw
203
- )
204
-
205
- for content_chunk in processed_stream:
206
- if content_chunk and isinstance(content_chunk, str):
207
- content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
208
- if raw:
209
- if content_chunk and isinstance(content_chunk, str):
210
- streaming_text += content_chunk
211
- yield content_chunk
212
- else:
213
- if content_chunk and isinstance(content_chunk, str):
214
- streaming_text += content_chunk
215
- yield dict(text=content_chunk)
216
-
217
- # Only update history if we received a valid response
218
- if streaming_text:
219
- self.previous_answer = streaming_text
220
- try:
221
- if streaming_text and isinstance(streaming_text, str):
222
- sanitized_text = streaming_text.strip()
223
- if sanitized_text:
224
- self.conversation.update_chat_history(prompt, sanitized_text)
225
- except Exception as e:
226
- print(f"Warning: Failed to update conversation history: {str(e)}")
227
- except CurlError as e:
228
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
229
- except Exception as e:
230
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
231
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
232
-
233
- def for_non_stream():
234
- full_text = ""
235
- try:
236
- for chunk_data in for_stream():
237
- if raw:
238
- if isinstance(chunk_data, str):
239
- chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
240
- full_text += chunk_data
241
- else:
242
- if isinstance(chunk_data, dict) and "text" in chunk_data:
243
- text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
244
- full_text += text
245
- except Exception as e:
246
- if not full_text:
247
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
248
- self.last_response = {"text": full_text}
249
- return full_text if raw else self.last_response
250
-
251
- return for_stream() if stream else for_non_stream()
252
-
253
- @staticmethod
254
- def fix_encoding(text):
255
- """
256
- Fixes encoding issues in the response text.
257
-
258
- Args:
259
- text (Union[str, dict]): The text or response dict to fix encoding for.
260
-
261
- Returns:
262
- Union[str, dict]: The text or dict with encoding corrected if possible.
263
- """
264
- if isinstance(text, dict) and "text" in text:
265
- try:
266
- text["text"] = text["text"].encode("latin1").decode("utf-8")
267
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
268
- except (UnicodeError, AttributeError) as e:
269
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
270
- elif isinstance(text, str):
271
- try:
272
- return text.encode("latin1").decode("utf-8")
273
- except (UnicodeError, AttributeError) as e:
274
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
275
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
276
-
277
- def chat(
278
- self,
279
- prompt: str,
280
- stream: bool = False,
281
- optimizer: str = None,
282
- conversationally: bool = False,
283
- raw: bool = False,
284
- ) -> Union[str, Generator[str, None, None]]:
285
- """
286
- Sends a prompt to the HeckAI API and returns only the message text.
287
-
288
- Args:
289
- prompt (str): The prompt or question to send to the API.
290
- stream (bool): If True, yields streaming response text.
291
- optimizer (str, optional): Name of the optimizer to apply to the prompt.
292
- conversationally (bool): If True, optimizer is applied to the full conversation prompt.
293
-
294
- Returns:
295
- Union[str, Generator[str, None, None]]: The response text, or a generator yielding text chunks.
296
- """
297
- def for_stream_chat():
298
- # ask() yields dicts or strings when streaming
299
- gen = self.ask(
300
- prompt, stream=True, raw=raw,
301
- optimizer=optimizer, conversationally=conversationally
302
- )
303
- for response in gen:
304
- if raw:
305
- yield response
306
- else:
307
- yield self.get_message(response)
308
-
309
- def for_non_stream_chat():
310
- # ask() returns dict or str when not streaming
311
- response_data = self.ask(
312
- prompt, stream=False, raw=raw,
313
- optimizer=optimizer, conversationally=conversationally
314
- )
315
- if raw:
316
- return response_data if isinstance(response_data, str) else str(response_data)
317
- return self.get_message(response_data) # get_message expects dict
318
-
319
- return for_stream_chat() if stream else for_non_stream_chat()
320
-
321
- def get_message(self, response: dict) -> str:
322
- """
323
- Extracts the message text from the API response.
324
-
325
- Args:
326
- response (dict): The API response dictionary.
327
-
328
- Returns:
329
- str: The extracted message text. Returns an empty string if not found.
330
-
331
- Raises:
332
- TypeError: If the response is not a dictionary.
333
- """
334
- # Validate response format
335
- if not isinstance(response, dict):
336
- raise TypeError(f"Expected dict response, got {type(response).__name__}")
337
-
338
- # Handle missing text key gracefully
339
- if "text" not in response:
340
- return ""
341
-
342
- # Ensure text is a string
343
- text = response["text"]
344
- if not isinstance(text, str):
345
- text = str(text)
346
-
347
- return text.replace('\\\\', '\\').replace('\\"', '"')
348
-
349
- if __name__ == "__main__":
350
- # # Ensure curl_cffi is installed
351
- # print("-" * 80)
352
- # print(f"{'Model':<50} {'Status':<10} {'Response'}")
353
- # print("-" * 80)
354
-
355
- # for model in HeckAI.AVAILABLE_MODELS:
356
- # try:
357
- # test_ai = HeckAI(model=model, timeout=60)
358
- # # Use non-streaming mode first to avoid potential streaming issues
359
- # try:
360
- # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
361
- # print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
362
- # except Exception as e1:
363
- # # Fall back to streaming if non-streaming fails
364
- # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
365
- # response = test_ai.chat("Say 'Hello' in one word", stream=True)
366
- # response_text = ""
367
- # for chunk in response:
368
- # if chunk and isinstance(chunk, str):
369
- # response_text += chunk
370
-
371
- # if response_text and len(response_text.strip()) > 0:
372
- # status = "✓"
373
- # # Truncate response if too long
374
- # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
375
- # print(f"\r{model:<50} {status:<10} {display_text}")
376
- # else:
377
- # raise ValueError("Empty or invalid response")
378
- # except Exception as e:
379
- # print(f"\r{model:<50} {'✗':<10} {str(e)}")
380
- from rich import print
381
- ai = HeckAI(model="openai/gpt-5-nano")
382
- response = ai.chat("tell me about humans", stream=True, raw=True)
383
- for chunk in response:
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class HeckAI(Provider):
15
+ """
16
+ Provides an interface to interact with the HeckAI API using a LitAgent user-agent.
17
+
18
+ This class supports conversational AI interactions with multiple available models,
19
+ manages session state, handles streaming and non-streaming responses, and integrates
20
+ with conversation history and prompt optimizers.
21
+
22
+ Attributes:
23
+ AVAILABLE_MODELS (list): List of supported model identifiers.
24
+ url (str): API endpoint URL.
25
+ session_id (str): Unique session identifier for the conversation.
26
+ language (str): Language for the conversation.
27
+ headers (dict): HTTP headers used for API requests.
28
+ session (Session): curl_cffi session for HTTP requests.
29
+ is_conversation (bool): Whether to maintain conversation history.
30
+ max_tokens_to_sample (int): Maximum tokens to sample (not used by API).
31
+ timeout (int): Request timeout in seconds.
32
+ last_response (dict): Stores the last API response.
33
+ model (str): Model identifier in use.
34
+ previous_question (str): Last question sent to the API.
35
+ previous_answer (str): Last answer received from the API.
36
+ conversation (Conversation): Conversation history manager.
37
+ """
38
+ required_auth = False
39
+ AVAILABLE_MODELS = [
40
+ "google/gemini-2.5-flash-preview",
41
+ "deepseek/deepseek-chat",
42
+ "deepseek/deepseek-r1",
43
+ "openai/gpt-4o-mini",
44
+ "openai/gpt-4.1-mini",
45
+ "x-ai/grok-3-mini-beta",
46
+ "meta-llama/llama-4-scout",
47
+ "openai/gpt-5-mini",
48
+ "openai/gpt-5-nano"
49
+
50
+
51
+ ]
52
+
53
+ def __init__(
54
+ self,
55
+ is_conversation: bool = True,
56
+ max_tokens: int = 2049,
57
+ timeout: int = 30,
58
+ intro: str = None,
59
+ filepath: str = None,
60
+ update_file: bool = True,
61
+ proxies: dict = {},
62
+ history_offset: int = 10250,
63
+ act: str = None,
64
+ model: str = "google/gemini-2.5-flash-preview",
65
+ language: str = "English"
66
+ ):
67
+ """
68
+ Initializes the HeckAI API client.
69
+
70
+ Args:
71
+ is_conversation (bool): Whether to maintain conversation history.
72
+ max_tokens (int): Maximum tokens to sample (not used by this API).
73
+ timeout (int): Timeout for API requests in seconds.
74
+ intro (str, optional): Introductory prompt for the conversation.
75
+ filepath (str, optional): File path for storing conversation history.
76
+ update_file (bool): Whether to update the conversation file.
77
+ proxies (dict): Proxy settings for HTTP requests.
78
+ history_offset (int): Offset for conversation history truncation.
79
+ act (str, optional): Role or act for the conversation.
80
+ model (str): Model identifier to use.
81
+ language (str): Language for the conversation.
82
+
83
+ Raises:
84
+ ValueError: If the provided model is not in AVAILABLE_MODELS.
85
+ """
86
+ if model not in self.AVAILABLE_MODELS:
87
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
88
+
89
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
90
+ self.session_id = str(uuid.uuid4())
91
+ self.language = language
92
+
93
+ # Use LitAgent (keep if needed for other headers or logic)
94
+ self.headers = {
95
+ 'Content-Type': 'application/json',
96
+ 'Origin': 'https://heck.ai', # Keep Origin
97
+ 'Referer': 'https://heck.ai/', # Keep Referer
98
+ 'User-Agent': LitAgent().random(), # Use random user agent
99
+ }
100
+
101
+ # Initialize curl_cffi Session
102
+ self.session = Session()
103
+ # Update curl_cffi session headers and proxies
104
+ self.session.headers.update(self.headers)
105
+ self.session.proxies = proxies # Assign proxies directly
106
+
107
+ self.is_conversation = is_conversation
108
+ self.max_tokens_to_sample = max_tokens
109
+ self.timeout = timeout
110
+ self.last_response = {}
111
+ self.model = model
112
+ self.previous_question = None
113
+ self.previous_answer = None
114
+
115
+ self.__available_optimizers = (
116
+ method
117
+ for method in dir(Optimizers)
118
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
119
+ )
120
+ Conversation.intro = (
121
+ AwesomePrompts().get_act(
122
+ act, raise_not_found=True, default=None, case_insensitive=True
123
+ )
124
+ if act
125
+ else intro or Conversation.intro
126
+ )
127
+
128
+ self.conversation = Conversation(
129
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
130
+ )
131
+ self.conversation.history_offset = history_offset
132
+
133
+ def ask(
134
+ self,
135
+ prompt: str,
136
+ stream: bool = False,
137
+ raw: bool = False,
138
+ optimizer: str = None,
139
+ conversationally: bool = False,
140
+ ) -> Union[Dict[str, Any], Generator]:
141
+ """
142
+ Sends a prompt to the HeckAI API and returns the response.
143
+
144
+ Args:
145
+ prompt (str): The prompt or question to send to the API.
146
+ stream (bool): If True, yields streaming responses as they arrive.
147
+ raw (bool): If True, yields raw string chunks instead of dicts.
148
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
149
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
150
+
151
+ Returns:
152
+ Union[Dict[str, Any], Generator]: If stream is False, returns a dict with the response text.
153
+ If stream is True, yields response chunks as dicts or strings.
154
+
155
+ Raises:
156
+ Exception: If the optimizer is not available.
157
+ exceptions.FailedToGenerateResponseError: On API or network errors.
158
+ """
159
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
160
+ if optimizer:
161
+ if optimizer in self.__available_optimizers:
162
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
163
+ else:
164
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
165
+
166
+ # Payload construction
167
+ payload = {
168
+ "model": self.model,
169
+ "question": conversation_prompt,
170
+ "language": self.language,
171
+ "sessionId": self.session_id,
172
+ "previousQuestion": self.previous_question,
173
+ "previousAnswer": self.previous_answer,
174
+ "imgUrls": [],
175
+ "superSmartMode": False # Added based on API request data
176
+ }
177
+
178
+ # Store this message as previous for next request
179
+ self.previous_question = conversation_prompt
180
+
181
+ def for_stream():
182
+ streaming_text = "" # Initialize outside try block
183
+ try:
184
+ response = self.session.post(
185
+ self.url,
186
+ data=json.dumps(payload),
187
+ stream=True,
188
+ timeout=self.timeout,
189
+ impersonate="chrome110"
190
+ )
191
+ response.raise_for_status()
192
+
193
+ processed_stream = sanitize_stream(
194
+ data=response.iter_content(chunk_size=1024),
195
+ intro_value="data: ",
196
+ to_json=False,
197
+ start_marker="data: [ANSWER_START]",
198
+ end_marker="data: [ANSWER_DONE]",
199
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
200
+ yield_raw_on_error=True,
201
+ strip_chars=" \n\r\t",
202
+ raw=raw
203
+ )
204
+
205
+ for content_chunk in processed_stream:
206
+ if content_chunk and isinstance(content_chunk, str):
207
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
208
+ if raw:
209
+ if content_chunk and isinstance(content_chunk, str):
210
+ streaming_text += content_chunk
211
+ yield content_chunk
212
+ else:
213
+ if content_chunk and isinstance(content_chunk, str):
214
+ streaming_text += content_chunk
215
+ yield dict(text=content_chunk)
216
+
217
+ # Only update history if we received a valid response
218
+ if streaming_text:
219
+ self.previous_answer = streaming_text
220
+ try:
221
+ if streaming_text and isinstance(streaming_text, str):
222
+ sanitized_text = streaming_text.strip()
223
+ if sanitized_text:
224
+ self.conversation.update_chat_history(prompt, sanitized_text)
225
+ except Exception as e:
226
+ print(f"Warning: Failed to update conversation history: {str(e)}")
227
+ except CurlError as e:
228
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
229
+ except Exception as e:
230
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
231
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
232
+
233
+ def for_non_stream():
234
+ full_text = ""
235
+ try:
236
+ for chunk_data in for_stream():
237
+ if raw:
238
+ if isinstance(chunk_data, str):
239
+ chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
240
+ full_text += chunk_data
241
+ else:
242
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
243
+ text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
244
+ full_text += text
245
+ except Exception as e:
246
+ if not full_text:
247
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
248
+ self.last_response = {"text": full_text}
249
+ return full_text if raw else self.last_response
250
+
251
+ return for_stream() if stream else for_non_stream()
252
+
253
+ @staticmethod
254
+ def fix_encoding(text):
255
+ """
256
+ Fixes encoding issues in the response text.
257
+
258
+ Args:
259
+ text (Union[str, dict]): The text or response dict to fix encoding for.
260
+
261
+ Returns:
262
+ Union[str, dict]: The text or dict with encoding corrected if possible.
263
+ """
264
+ if isinstance(text, dict) and "text" in text:
265
+ try:
266
+ text["text"] = text["text"].encode("latin1").decode("utf-8")
267
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
268
+ except (UnicodeError, AttributeError) as e:
269
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
270
+ elif isinstance(text, str):
271
+ try:
272
+ return text.encode("latin1").decode("utf-8")
273
+ except (UnicodeError, AttributeError) as e:
274
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
275
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
276
+
277
+ def chat(
278
+ self,
279
+ prompt: str,
280
+ stream: bool = False,
281
+ optimizer: str = None,
282
+ conversationally: bool = False,
283
+ raw: bool = False,
284
+ ) -> Union[str, Generator[str, None, None]]:
285
+ """
286
+ Sends a prompt to the HeckAI API and returns only the message text.
287
+
288
+ Args:
289
+ prompt (str): The prompt or question to send to the API.
290
+ stream (bool): If True, yields streaming response text.
291
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
292
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
293
+
294
+ Returns:
295
+ Union[str, Generator[str, None, None]]: The response text, or a generator yielding text chunks.
296
+ """
297
+ def for_stream_chat():
298
+ # ask() yields dicts or strings when streaming
299
+ gen = self.ask(
300
+ prompt, stream=True, raw=raw,
301
+ optimizer=optimizer, conversationally=conversationally
302
+ )
303
+ for response in gen:
304
+ if raw:
305
+ yield response
306
+ else:
307
+ yield self.get_message(response)
308
+
309
+ def for_non_stream_chat():
310
+ # ask() returns dict or str when not streaming
311
+ response_data = self.ask(
312
+ prompt, stream=False, raw=raw,
313
+ optimizer=optimizer, conversationally=conversationally
314
+ )
315
+ if raw:
316
+ return response_data if isinstance(response_data, str) else str(response_data)
317
+ return self.get_message(response_data) # get_message expects dict
318
+
319
+ return for_stream_chat() if stream else for_non_stream_chat()
320
+
321
+ def get_message(self, response: dict) -> str:
322
+ """
323
+ Extracts the message text from the API response.
324
+
325
+ Args:
326
+ response (dict): The API response dictionary.
327
+
328
+ Returns:
329
+ str: The extracted message text. Returns an empty string if not found.
330
+
331
+ Raises:
332
+ TypeError: If the response is not a dictionary.
333
+ """
334
+ # Validate response format
335
+ if not isinstance(response, dict):
336
+ raise TypeError(f"Expected dict response, got {type(response).__name__}")
337
+
338
+ # Handle missing text key gracefully
339
+ if "text" not in response:
340
+ return ""
341
+
342
+ # Ensure text is a string
343
+ text = response["text"]
344
+ if not isinstance(text, str):
345
+ text = str(text)
346
+
347
+ return text.replace('\\\\', '\\').replace('\\"', '"')
348
+
349
+ if __name__ == "__main__":
350
+ # # Ensure curl_cffi is installed
351
+ # print("-" * 80)
352
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
353
+ # print("-" * 80)
354
+
355
+ # for model in HeckAI.AVAILABLE_MODELS:
356
+ # try:
357
+ # test_ai = HeckAI(model=model, timeout=60)
358
+ # # Use non-streaming mode first to avoid potential streaming issues
359
+ # try:
360
+ # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
361
+ # print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
362
+ # except Exception as e1:
363
+ # # Fall back to streaming if non-streaming fails
364
+ # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
365
+ # response = test_ai.chat("Say 'Hello' in one word", stream=True)
366
+ # response_text = ""
367
+ # for chunk in response:
368
+ # if chunk and isinstance(chunk, str):
369
+ # response_text += chunk
370
+
371
+ # if response_text and len(response_text.strip()) > 0:
372
+ # status = "✓"
373
+ # # Truncate response if too long
374
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
375
+ # print(f"\r{model:<50} {status:<10} {display_text}")
376
+ # else:
377
+ # raise ValueError("Empty or invalid response")
378
+ # except Exception as e:
379
+ # print(f"\r{model:<50} {'✗':<10} {str(e)}")
380
+ from rich import print
381
+ ai = HeckAI(model="openai/gpt-5-nano")
382
+ response = ai.chat("tell me about humans", stream=True, raw=True)
383
+ for chunk in response:
384
384
  print(chunk, end='', flush=True)