webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,380 +1,384 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import uuid
5
- from typing import Any, Dict, Optional, Generator, Union
6
-
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider, AsyncProvider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent
13
-
14
- class HeckAI(Provider):
15
- """
16
- Provides an interface to interact with the HeckAI API using a LitAgent user-agent.
17
-
18
- This class supports conversational AI interactions with multiple available models,
19
- manages session state, handles streaming and non-streaming responses, and integrates
20
- with conversation history and prompt optimizers.
21
-
22
- Attributes:
23
- AVAILABLE_MODELS (list): List of supported model identifiers.
24
- url (str): API endpoint URL.
25
- session_id (str): Unique session identifier for the conversation.
26
- language (str): Language for the conversation.
27
- headers (dict): HTTP headers used for API requests.
28
- session (Session): curl_cffi session for HTTP requests.
29
- is_conversation (bool): Whether to maintain conversation history.
30
- max_tokens_to_sample (int): Maximum tokens to sample (not used by API).
31
- timeout (int): Request timeout in seconds.
32
- last_response (dict): Stores the last API response.
33
- model (str): Model identifier in use.
34
- previous_question (str): Last question sent to the API.
35
- previous_answer (str): Last answer received from the API.
36
- conversation (Conversation): Conversation history manager.
37
- """
38
-
39
- AVAILABLE_MODELS = [
40
- "google/gemini-2.5-flash-preview",
41
- "deepseek/deepseek-chat",
42
- "deepseek/deepseek-r1",
43
- "openai/gpt-4o-mini",
44
- "openai/gpt-4.1-mini",
45
- "x-ai/grok-3-mini-beta",
46
- "meta-llama/llama-4-scout"
47
- ]
48
-
49
- def __init__(
50
- self,
51
- is_conversation: bool = True,
52
- max_tokens: int = 2049,
53
- timeout: int = 30,
54
- intro: str = None,
55
- filepath: str = None,
56
- update_file: bool = True,
57
- proxies: dict = {},
58
- history_offset: int = 10250,
59
- act: str = None,
60
- model: str = "google/gemini-2.5-flash-preview",
61
- language: str = "English"
62
- ):
63
- """
64
- Initializes the HeckAI API client.
65
-
66
- Args:
67
- is_conversation (bool): Whether to maintain conversation history.
68
- max_tokens (int): Maximum tokens to sample (not used by this API).
69
- timeout (int): Timeout for API requests in seconds.
70
- intro (str, optional): Introductory prompt for the conversation.
71
- filepath (str, optional): File path for storing conversation history.
72
- update_file (bool): Whether to update the conversation file.
73
- proxies (dict): Proxy settings for HTTP requests.
74
- history_offset (int): Offset for conversation history truncation.
75
- act (str, optional): Role or act for the conversation.
76
- model (str): Model identifier to use.
77
- language (str): Language for the conversation.
78
-
79
- Raises:
80
- ValueError: If the provided model is not in AVAILABLE_MODELS.
81
- """
82
- if model not in self.AVAILABLE_MODELS:
83
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
84
-
85
- self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
86
- self.session_id = str(uuid.uuid4())
87
- self.language = language
88
-
89
- # Use LitAgent (keep if needed for other headers or logic)
90
- self.headers = {
91
- 'Content-Type': 'application/json',
92
- 'Origin': 'https://heck.ai', # Keep Origin
93
- 'Referer': 'https://heck.ai/', # Keep Referer
94
- 'User-Agent': LitAgent().random(), # Use random user agent
95
- }
96
-
97
- # Initialize curl_cffi Session
98
- self.session = Session()
99
- # Update curl_cffi session headers and proxies
100
- self.session.headers.update(self.headers)
101
- self.session.proxies = proxies # Assign proxies directly
102
-
103
- self.is_conversation = is_conversation
104
- self.max_tokens_to_sample = max_tokens
105
- self.timeout = timeout
106
- self.last_response = {}
107
- self.model = model
108
- self.previous_question = None
109
- self.previous_answer = None
110
-
111
- self.__available_optimizers = (
112
- method
113
- for method in dir(Optimizers)
114
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
115
- )
116
- Conversation.intro = (
117
- AwesomePrompts().get_act(
118
- act, raise_not_found=True, default=None, case_insensitive=True
119
- )
120
- if act
121
- else intro or Conversation.intro
122
- )
123
-
124
- self.conversation = Conversation(
125
- is_conversation, self.max_tokens_to_sample, filepath, update_file
126
- )
127
- self.conversation.history_offset = history_offset
128
-
129
- def ask(
130
- self,
131
- prompt: str,
132
- stream: bool = False,
133
- raw: bool = False,
134
- optimizer: str = None,
135
- conversationally: bool = False,
136
- ) -> Union[Dict[str, Any], Generator]:
137
- """
138
- Sends a prompt to the HeckAI API and returns the response.
139
-
140
- Args:
141
- prompt (str): The prompt or question to send to the API.
142
- stream (bool): If True, yields streaming responses as they arrive.
143
- raw (bool): If True, yields raw string chunks instead of dicts.
144
- optimizer (str, optional): Name of the optimizer to apply to the prompt.
145
- conversationally (bool): If True, optimizer is applied to the full conversation prompt.
146
-
147
- Returns:
148
- Union[Dict[str, Any], Generator]: If stream is False, returns a dict with the response text.
149
- If stream is True, yields response chunks as dicts or strings.
150
-
151
- Raises:
152
- Exception: If the optimizer is not available.
153
- exceptions.FailedToGenerateResponseError: On API or network errors.
154
- """
155
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
156
- if optimizer:
157
- if optimizer in self.__available_optimizers:
158
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
159
- else:
160
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
161
-
162
- # Payload construction
163
- payload = {
164
- "model": self.model,
165
- "question": conversation_prompt,
166
- "language": self.language,
167
- "sessionId": self.session_id,
168
- "previousQuestion": self.previous_question,
169
- "previousAnswer": self.previous_answer,
170
- "imgUrls": [],
171
- "superSmartMode": False # Added based on API request data
172
- }
173
-
174
- # Store this message as previous for next request
175
- self.previous_question = conversation_prompt
176
-
177
- def for_stream():
178
- streaming_text = "" # Initialize outside try block
179
- try:
180
- response = self.session.post(
181
- self.url,
182
- data=json.dumps(payload),
183
- stream=True,
184
- timeout=self.timeout,
185
- impersonate="chrome110"
186
- )
187
- response.raise_for_status()
188
-
189
- processed_stream = sanitize_stream(
190
- data=response.iter_content(chunk_size=1024),
191
- intro_value="data: ",
192
- to_json=False,
193
- start_marker="data: [ANSWER_START]",
194
- end_marker="data: [ANSWER_DONE]",
195
- skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
196
- yield_raw_on_error=True,
197
- strip_chars=" \n\r\t",
198
- raw=raw
199
- )
200
-
201
- for content_chunk in processed_stream:
202
- if content_chunk and isinstance(content_chunk, str):
203
- content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
204
- if raw:
205
- if content_chunk and isinstance(content_chunk, str):
206
- streaming_text += content_chunk
207
- yield content_chunk
208
- else:
209
- if content_chunk and isinstance(content_chunk, str):
210
- streaming_text += content_chunk
211
- yield dict(text=content_chunk)
212
-
213
- # Only update history if we received a valid response
214
- if streaming_text:
215
- self.previous_answer = streaming_text
216
- try:
217
- if streaming_text and isinstance(streaming_text, str):
218
- sanitized_text = streaming_text.strip()
219
- if sanitized_text:
220
- self.conversation.update_chat_history(prompt, sanitized_text)
221
- except Exception as e:
222
- print(f"Warning: Failed to update conversation history: {str(e)}")
223
- except CurlError as e:
224
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
225
- except Exception as e:
226
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
227
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
228
-
229
- def for_non_stream():
230
- full_text = ""
231
- try:
232
- for chunk_data in for_stream():
233
- if raw:
234
- if isinstance(chunk_data, str):
235
- chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
236
- full_text += chunk_data
237
- else:
238
- if isinstance(chunk_data, dict) and "text" in chunk_data:
239
- text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
240
- full_text += text
241
- except Exception as e:
242
- if not full_text:
243
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
244
- self.last_response = {"text": full_text}
245
- return full_text if raw else self.last_response
246
-
247
- return for_stream() if stream else for_non_stream()
248
-
249
- @staticmethod
250
- def fix_encoding(text):
251
- """
252
- Fixes encoding issues in the response text.
253
-
254
- Args:
255
- text (Union[str, dict]): The text or response dict to fix encoding for.
256
-
257
- Returns:
258
- Union[str, dict]: The text or dict with encoding corrected if possible.
259
- """
260
- if isinstance(text, dict) and "text" in text:
261
- try:
262
- text["text"] = text["text"].encode("latin1").decode("utf-8")
263
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
264
- except (UnicodeError, AttributeError) as e:
265
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
266
- elif isinstance(text, str):
267
- try:
268
- return text.encode("latin1").decode("utf-8")
269
- except (UnicodeError, AttributeError) as e:
270
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
271
- return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
272
-
273
- def chat(
274
- self,
275
- prompt: str,
276
- stream: bool = False,
277
- optimizer: str = None,
278
- conversationally: bool = False,
279
- raw: bool = False,
280
- ) -> Union[str, Generator[str, None, None]]:
281
- """
282
- Sends a prompt to the HeckAI API and returns only the message text.
283
-
284
- Args:
285
- prompt (str): The prompt or question to send to the API.
286
- stream (bool): If True, yields streaming response text.
287
- optimizer (str, optional): Name of the optimizer to apply to the prompt.
288
- conversationally (bool): If True, optimizer is applied to the full conversation prompt.
289
-
290
- Returns:
291
- Union[str, Generator[str, None, None]]: The response text, or a generator yielding text chunks.
292
- """
293
- def for_stream_chat():
294
- # ask() yields dicts or strings when streaming
295
- gen = self.ask(
296
- prompt, stream=True, raw=raw,
297
- optimizer=optimizer, conversationally=conversationally
298
- )
299
- for response in gen:
300
- if raw:
301
- yield response
302
- else:
303
- yield self.get_message(response)
304
-
305
- def for_non_stream_chat():
306
- # ask() returns dict or str when not streaming
307
- response_data = self.ask(
308
- prompt, stream=False, raw=raw,
309
- optimizer=optimizer, conversationally=conversationally
310
- )
311
- if raw:
312
- return response_data if isinstance(response_data, str) else str(response_data)
313
- return self.get_message(response_data) # get_message expects dict
314
-
315
- return for_stream_chat() if stream else for_non_stream_chat()
316
-
317
- def get_message(self, response: dict) -> str:
318
- """
319
- Extracts the message text from the API response.
320
-
321
- Args:
322
- response (dict): The API response dictionary.
323
-
324
- Returns:
325
- str: The extracted message text. Returns an empty string if not found.
326
-
327
- Raises:
328
- TypeError: If the response is not a dictionary.
329
- """
330
- # Validate response format
331
- if not isinstance(response, dict):
332
- raise TypeError(f"Expected dict response, got {type(response).__name__}")
333
-
334
- # Handle missing text key gracefully
335
- if "text" not in response:
336
- return ""
337
-
338
- # Ensure text is a string
339
- text = response["text"]
340
- if not isinstance(text, str):
341
- text = str(text)
342
-
343
- return text.replace('\\\\', '\\').replace('\\"', '"')
344
-
345
- if __name__ == "__main__":
346
- # # Ensure curl_cffi is installed
347
- # print("-" * 80)
348
- # print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
- # print("-" * 80)
350
-
351
- # for model in HeckAI.AVAILABLE_MODELS:
352
- # try:
353
- # test_ai = HeckAI(model=model, timeout=60)
354
- # # Use non-streaming mode first to avoid potential streaming issues
355
- # try:
356
- # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
- # print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
- # except Exception as e1:
359
- # # Fall back to streaming if non-streaming fails
360
- # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
- # response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
- # response_text = ""
363
- # for chunk in response:
364
- # if chunk and isinstance(chunk, str):
365
- # response_text += chunk
366
-
367
- # if response_text and len(response_text.strip()) > 0:
368
- # status = "✓"
369
- # # Truncate response if too long
370
- # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
- # print(f"\r{model:<50} {status:<10} {display_text}")
372
- # else:
373
- # raise ValueError("Empty or invalid response")
374
- # except Exception as e:
375
- # print(f"\r{model:<50} {'✗':<10} {str(e)}")
376
- from rich import print
377
- ai = HeckAI()
378
- response = ai.chat("tell me about humans", stream=True, raw=False)
379
- for chunk in response:
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class HeckAI(Provider):
15
+ """
16
+ Provides an interface to interact with the HeckAI API using a LitAgent user-agent.
17
+
18
+ This class supports conversational AI interactions with multiple available models,
19
+ manages session state, handles streaming and non-streaming responses, and integrates
20
+ with conversation history and prompt optimizers.
21
+
22
+ Attributes:
23
+ AVAILABLE_MODELS (list): List of supported model identifiers.
24
+ url (str): API endpoint URL.
25
+ session_id (str): Unique session identifier for the conversation.
26
+ language (str): Language for the conversation.
27
+ headers (dict): HTTP headers used for API requests.
28
+ session (Session): curl_cffi session for HTTP requests.
29
+ is_conversation (bool): Whether to maintain conversation history.
30
+ max_tokens_to_sample (int): Maximum tokens to sample (not used by API).
31
+ timeout (int): Request timeout in seconds.
32
+ last_response (dict): Stores the last API response.
33
+ model (str): Model identifier in use.
34
+ previous_question (str): Last question sent to the API.
35
+ previous_answer (str): Last answer received from the API.
36
+ conversation (Conversation): Conversation history manager.
37
+ """
38
+ required_auth = False
39
+ AVAILABLE_MODELS = [
40
+ "google/gemini-2.5-flash-preview",
41
+ "deepseek/deepseek-chat",
42
+ "deepseek/deepseek-r1",
43
+ "openai/gpt-4o-mini",
44
+ "openai/gpt-4.1-mini",
45
+ "x-ai/grok-3-mini-beta",
46
+ "meta-llama/llama-4-scout",
47
+ "openai/gpt-5-mini",
48
+ "openai/gpt-5-nano"
49
+
50
+
51
+ ]
52
+
53
+ def __init__(
54
+ self,
55
+ is_conversation: bool = True,
56
+ max_tokens: int = 2049,
57
+ timeout: int = 30,
58
+ intro: str = None,
59
+ filepath: str = None,
60
+ update_file: bool = True,
61
+ proxies: dict = {},
62
+ history_offset: int = 10250,
63
+ act: str = None,
64
+ model: str = "google/gemini-2.5-flash-preview",
65
+ language: str = "English"
66
+ ):
67
+ """
68
+ Initializes the HeckAI API client.
69
+
70
+ Args:
71
+ is_conversation (bool): Whether to maintain conversation history.
72
+ max_tokens (int): Maximum tokens to sample (not used by this API).
73
+ timeout (int): Timeout for API requests in seconds.
74
+ intro (str, optional): Introductory prompt for the conversation.
75
+ filepath (str, optional): File path for storing conversation history.
76
+ update_file (bool): Whether to update the conversation file.
77
+ proxies (dict): Proxy settings for HTTP requests.
78
+ history_offset (int): Offset for conversation history truncation.
79
+ act (str, optional): Role or act for the conversation.
80
+ model (str): Model identifier to use.
81
+ language (str): Language for the conversation.
82
+
83
+ Raises:
84
+ ValueError: If the provided model is not in AVAILABLE_MODELS.
85
+ """
86
+ if model not in self.AVAILABLE_MODELS:
87
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
88
+
89
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
90
+ self.session_id = str(uuid.uuid4())
91
+ self.language = language
92
+
93
+ # Use LitAgent (keep if needed for other headers or logic)
94
+ self.headers = {
95
+ 'Content-Type': 'application/json',
96
+ 'Origin': 'https://heck.ai', # Keep Origin
97
+ 'Referer': 'https://heck.ai/', # Keep Referer
98
+ 'User-Agent': LitAgent().random(), # Use random user agent
99
+ }
100
+
101
+ # Initialize curl_cffi Session
102
+ self.session = Session()
103
+ # Update curl_cffi session headers and proxies
104
+ self.session.headers.update(self.headers)
105
+ self.session.proxies = proxies # Assign proxies directly
106
+
107
+ self.is_conversation = is_conversation
108
+ self.max_tokens_to_sample = max_tokens
109
+ self.timeout = timeout
110
+ self.last_response = {}
111
+ self.model = model
112
+ self.previous_question = None
113
+ self.previous_answer = None
114
+
115
+ self.__available_optimizers = (
116
+ method
117
+ for method in dir(Optimizers)
118
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
119
+ )
120
+ Conversation.intro = (
121
+ AwesomePrompts().get_act(
122
+ act, raise_not_found=True, default=None, case_insensitive=True
123
+ )
124
+ if act
125
+ else intro or Conversation.intro
126
+ )
127
+
128
+ self.conversation = Conversation(
129
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
130
+ )
131
+ self.conversation.history_offset = history_offset
132
+
133
+ def ask(
134
+ self,
135
+ prompt: str,
136
+ stream: bool = False,
137
+ raw: bool = False,
138
+ optimizer: str = None,
139
+ conversationally: bool = False,
140
+ ) -> Union[Dict[str, Any], Generator]:
141
+ """
142
+ Sends a prompt to the HeckAI API and returns the response.
143
+
144
+ Args:
145
+ prompt (str): The prompt or question to send to the API.
146
+ stream (bool): If True, yields streaming responses as they arrive.
147
+ raw (bool): If True, yields raw string chunks instead of dicts.
148
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
149
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
150
+
151
+ Returns:
152
+ Union[Dict[str, Any], Generator]: If stream is False, returns a dict with the response text.
153
+ If stream is True, yields response chunks as dicts or strings.
154
+
155
+ Raises:
156
+ Exception: If the optimizer is not available.
157
+ exceptions.FailedToGenerateResponseError: On API or network errors.
158
+ """
159
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
160
+ if optimizer:
161
+ if optimizer in self.__available_optimizers:
162
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
163
+ else:
164
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
165
+
166
+ # Payload construction
167
+ payload = {
168
+ "model": self.model,
169
+ "question": conversation_prompt,
170
+ "language": self.language,
171
+ "sessionId": self.session_id,
172
+ "previousQuestion": self.previous_question,
173
+ "previousAnswer": self.previous_answer,
174
+ "imgUrls": [],
175
+ "superSmartMode": False # Added based on API request data
176
+ }
177
+
178
+ # Store this message as previous for next request
179
+ self.previous_question = conversation_prompt
180
+
181
+ def for_stream():
182
+ streaming_text = "" # Initialize outside try block
183
+ try:
184
+ response = self.session.post(
185
+ self.url,
186
+ data=json.dumps(payload),
187
+ stream=True,
188
+ timeout=self.timeout,
189
+ impersonate="chrome110"
190
+ )
191
+ response.raise_for_status()
192
+
193
+ processed_stream = sanitize_stream(
194
+ data=response.iter_content(chunk_size=1024),
195
+ intro_value="data: ",
196
+ to_json=False,
197
+ start_marker="data: [ANSWER_START]",
198
+ end_marker="data: [ANSWER_DONE]",
199
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
200
+ yield_raw_on_error=True,
201
+ strip_chars=" \n\r\t",
202
+ raw=raw
203
+ )
204
+
205
+ for content_chunk in processed_stream:
206
+ if content_chunk and isinstance(content_chunk, str):
207
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
208
+ if raw:
209
+ if content_chunk and isinstance(content_chunk, str):
210
+ streaming_text += content_chunk
211
+ yield content_chunk
212
+ else:
213
+ if content_chunk and isinstance(content_chunk, str):
214
+ streaming_text += content_chunk
215
+ yield dict(text=content_chunk)
216
+
217
+ # Only update history if we received a valid response
218
+ if streaming_text:
219
+ self.previous_answer = streaming_text
220
+ try:
221
+ if streaming_text and isinstance(streaming_text, str):
222
+ sanitized_text = streaming_text.strip()
223
+ if sanitized_text:
224
+ self.conversation.update_chat_history(prompt, sanitized_text)
225
+ except Exception as e:
226
+ print(f"Warning: Failed to update conversation history: {str(e)}")
227
+ except CurlError as e:
228
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
229
+ except Exception as e:
230
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
231
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
232
+
233
+ def for_non_stream():
234
+ full_text = ""
235
+ try:
236
+ for chunk_data in for_stream():
237
+ if raw:
238
+ if isinstance(chunk_data, str):
239
+ chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
240
+ full_text += chunk_data
241
+ else:
242
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
243
+ text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
244
+ full_text += text
245
+ except Exception as e:
246
+ if not full_text:
247
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
248
+ self.last_response = {"text": full_text}
249
+ return full_text if raw else self.last_response
250
+
251
+ return for_stream() if stream else for_non_stream()
252
+
253
+ @staticmethod
254
+ def fix_encoding(text):
255
+ """
256
+ Fixes encoding issues in the response text.
257
+
258
+ Args:
259
+ text (Union[str, dict]): The text or response dict to fix encoding for.
260
+
261
+ Returns:
262
+ Union[str, dict]: The text or dict with encoding corrected if possible.
263
+ """
264
+ if isinstance(text, dict) and "text" in text:
265
+ try:
266
+ text["text"] = text["text"].encode("latin1").decode("utf-8")
267
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
268
+ except (UnicodeError, AttributeError) as e:
269
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
270
+ elif isinstance(text, str):
271
+ try:
272
+ return text.encode("latin1").decode("utf-8")
273
+ except (UnicodeError, AttributeError) as e:
274
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
275
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
276
+
277
+ def chat(
278
+ self,
279
+ prompt: str,
280
+ stream: bool = False,
281
+ optimizer: str = None,
282
+ conversationally: bool = False,
283
+ raw: bool = False,
284
+ ) -> Union[str, Generator[str, None, None]]:
285
+ """
286
+ Sends a prompt to the HeckAI API and returns only the message text.
287
+
288
+ Args:
289
+ prompt (str): The prompt or question to send to the API.
290
+ stream (bool): If True, yields streaming response text.
291
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
292
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
293
+
294
+ Returns:
295
+ Union[str, Generator[str, None, None]]: The response text, or a generator yielding text chunks.
296
+ """
297
+ def for_stream_chat():
298
+ # ask() yields dicts or strings when streaming
299
+ gen = self.ask(
300
+ prompt, stream=True, raw=raw,
301
+ optimizer=optimizer, conversationally=conversationally
302
+ )
303
+ for response in gen:
304
+ if raw:
305
+ yield response
306
+ else:
307
+ yield self.get_message(response)
308
+
309
+ def for_non_stream_chat():
310
+ # ask() returns dict or str when not streaming
311
+ response_data = self.ask(
312
+ prompt, stream=False, raw=raw,
313
+ optimizer=optimizer, conversationally=conversationally
314
+ )
315
+ if raw:
316
+ return response_data if isinstance(response_data, str) else str(response_data)
317
+ return self.get_message(response_data) # get_message expects dict
318
+
319
+ return for_stream_chat() if stream else for_non_stream_chat()
320
+
321
+ def get_message(self, response: dict) -> str:
322
+ """
323
+ Extracts the message text from the API response.
324
+
325
+ Args:
326
+ response (dict): The API response dictionary.
327
+
328
+ Returns:
329
+ str: The extracted message text. Returns an empty string if not found.
330
+
331
+ Raises:
332
+ TypeError: If the response is not a dictionary.
333
+ """
334
+ # Validate response format
335
+ if not isinstance(response, dict):
336
+ raise TypeError(f"Expected dict response, got {type(response).__name__}")
337
+
338
+ # Handle missing text key gracefully
339
+ if "text" not in response:
340
+ return ""
341
+
342
+ # Ensure text is a string
343
+ text = response["text"]
344
+ if not isinstance(text, str):
345
+ text = str(text)
346
+
347
+ return text.replace('\\\\', '\\').replace('\\"', '"')
348
+
349
+ if __name__ == "__main__":
350
+ # # Ensure curl_cffi is installed
351
+ # print("-" * 80)
352
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
353
+ # print("-" * 80)
354
+
355
+ # for model in HeckAI.AVAILABLE_MODELS:
356
+ # try:
357
+ # test_ai = HeckAI(model=model, timeout=60)
358
+ # # Use non-streaming mode first to avoid potential streaming issues
359
+ # try:
360
+ # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
361
+ # print(f"\r{model:<50} {'':<10} {response_text.strip()[:50]}")
362
+ # except Exception as e1:
363
+ # # Fall back to streaming if non-streaming fails
364
+ # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
365
+ # response = test_ai.chat("Say 'Hello' in one word", stream=True)
366
+ # response_text = ""
367
+ # for chunk in response:
368
+ # if chunk and isinstance(chunk, str):
369
+ # response_text += chunk
370
+
371
+ # if response_text and len(response_text.strip()) > 0:
372
+ # status = "✓"
373
+ # # Truncate response if too long
374
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
375
+ # print(f"\r{model:<50} {status:<10} {display_text}")
376
+ # else:
377
+ # raise ValueError("Empty or invalid response")
378
+ # except Exception as e:
379
+ # print(f"\r{model:<50} {'✗':<10} {str(e)}")
380
+ from rich import print
381
+ ai = HeckAI(model="openai/gpt-5-nano")
382
+ response = ai.chat("tell me about humans", stream=True, raw=True)
383
+ for chunk in response:
380
384
  print(chunk, end='', flush=True)