webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,496 +1,244 @@
1
- import json
2
- from ..AIutel import Optimizers
3
- from ..AIutel import Conversation
4
- from ..AIutel import AwesomePrompts, sanitize_stream
5
- from ..AIbase import Provider, AsyncProvider
6
-
7
- from webscout import exceptions
8
- from typing import Any, AsyncGenerator, Dict, List, Optional, Union
9
- import requests
10
- import httpx
11
- #----------------------------------------------------------OpenAI-----------------------------------
12
- class OPENAI(Provider):
13
- def __init__(
14
- self,
15
- api_key: str,
16
- is_conversation: bool = True,
17
- max_tokens: int = 600,
18
- temperature: float = 1,
19
- presence_penalty: int = 0,
20
- frequency_penalty: int = 0,
21
- top_p: float = 1,
22
- model: str = "gpt-3.5-turbo",
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- base_url: str = "https://api.openai.com/v1/chat/completions",
31
- ):
32
- """Instantiates OPENAI
33
-
34
- Args:
35
- api_key (key): OpenAI's API key.
36
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
37
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
38
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
39
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
40
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
41
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
42
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
43
- timeout (int, optional): Http request timeout. Defaults to 30.
44
- intro (str, optional): Conversation introductory prompt. Defaults to None.
45
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
46
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
47
- proxies (dict, optional): Http request proxies. Defaults to {}.
48
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
49
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50
- """
51
- self.is_conversation = is_conversation
52
- self.max_tokens_to_sample = max_tokens
53
- self.api_key = api_key
54
- self.model = model
55
- self.temperature = temperature
56
- self.presence_penalty = presence_penalty
57
- self.frequency_penalty = frequency_penalty
58
- self.top_p = top_p
59
- self.chat_endpoint = base_url
60
- self.stream_chunk_size = 64
61
- self.timeout = timeout
62
- self.last_response = {}
63
- self.headers = {
64
- "Content-Type": "application/json",
65
- "Authorization": f"Bearer {self.api_key}",
66
- }
67
- self.session = requests.session()
68
- self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
71
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
- )
73
- self.session.headers.update(self.headers)
74
- Conversation.intro = (
75
- AwesomePrompts().get_act(
76
- act, raise_not_found=True, default=None, case_insensitive=True
77
- )
78
- if act
79
- else intro or Conversation.intro
80
- )
81
- self.conversation = Conversation(
82
- is_conversation, self.max_tokens_to_sample, filepath, update_file
83
- )
84
- self.conversation.history_offset = history_offset
85
- self.session.proxies = proxies
86
-
87
- def ask(
88
- self,
89
- prompt: str,
90
- stream: bool = False,
91
- raw: bool = False,
92
- optimizer: str = None,
93
- conversationally: bool = False,
94
- ) -> dict:
95
- """Chat with AI
96
-
97
- Args:
98
- prompt (str): Prompt to be send.
99
- stream (bool, optional): Flag for streaming response. Defaults to False.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
- Returns:
104
- dict : {}
105
- ```json
106
- {
107
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
108
- "object": "chat.completion",
109
- "created": 1704623244,
110
- "model": "gpt-3.5-turbo",
111
- "usage": {
112
- "prompt_tokens": 0,
113
- "completion_tokens": 0,
114
- "total_tokens": 0
115
- },
116
- "choices": [
117
- {
118
- "message": {
119
- "role": "assistant",
120
- "content": "Hello! How can I assist you today?"
121
- },
122
- "finish_reason": "stop",
123
- "index": 0
124
- }
125
- ]
126
- }
127
- ```
128
- """
129
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
130
- if optimizer:
131
- if optimizer in self.__available_optimizers:
132
- conversation_prompt = getattr(Optimizers, optimizer)(
133
- conversation_prompt if conversationally else prompt
134
- )
135
- else:
136
- raise exceptions.FailedToGenerateResponseError(
137
- f"Optimizer is not one of {self.__available_optimizers}"
138
- )
139
- self.session.headers.update(self.headers)
140
- payload = {
141
- "frequency_penalty": self.frequency_penalty,
142
- "messages": [{"content": conversation_prompt, "role": "user"}],
143
- "model": self.model,
144
- "presence_penalty": self.presence_penalty,
145
- "stream": stream,
146
- "temperature": self.temperature,
147
- "top_p": self.top_p,
148
- }
149
-
150
- def for_stream():
151
- response = self.session.post(
152
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
153
- )
154
- if not response.ok:
155
- raise exceptions.FailedToGenerateResponseError(
156
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
157
- )
158
-
159
- message_load = ""
160
- for value in response.iter_lines(
161
- decode_unicode=True,
162
- delimiter="" if raw else "data:",
163
- chunk_size=self.stream_chunk_size,
164
- ):
165
- try:
166
- resp = json.loads(value)
167
- incomplete_message = self.get_message(resp)
168
- if incomplete_message:
169
- message_load += incomplete_message
170
- resp["choices"][0]["delta"]["content"] = message_load
171
- self.last_response.update(resp)
172
- yield value if raw else resp
173
- elif raw:
174
- yield value
175
- except json.decoder.JSONDecodeError:
176
- pass
177
- self.conversation.update_chat_history(
178
- prompt, self.get_message(self.last_response)
179
- )
180
-
181
- def for_non_stream():
182
- response = self.session.post(
183
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
184
- )
185
- if (
186
- not response.ok
187
- or not response.headers.get("Content-Type", "") == "application/json"
188
- ):
189
- raise exceptions.FailedToGenerateResponseError(
190
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
191
- )
192
- resp = response.json()
193
- self.last_response.update(resp)
194
- self.conversation.update_chat_history(
195
- prompt, self.get_message(self.last_response)
196
- )
197
- return resp
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def chat(
202
- self,
203
- prompt: str,
204
- stream: bool = False,
205
- optimizer: str = None,
206
- conversationally: bool = False,
207
- ) -> str:
208
- """Generate response `str`
209
- Args:
210
- prompt (str): Prompt to be send.
211
- stream (bool, optional): Flag for streaming response. Defaults to False.
212
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
213
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
214
- Returns:
215
- str: Response generated
216
- """
217
-
218
- def for_stream():
219
- for response in self.ask(
220
- prompt, True, optimizer=optimizer, conversationally=conversationally
221
- ):
222
- yield self.get_message(response)
223
-
224
- def for_non_stream():
225
- return self.get_message(
226
- self.ask(
227
- prompt,
228
- False,
229
- optimizer=optimizer,
230
- conversationally=conversationally,
231
- )
232
- )
233
-
234
- return for_stream() if stream else for_non_stream()
235
-
236
- def get_message(self, response: dict) -> str:
237
- """Retrieves message only from response
238
-
239
- Args:
240
- response (dict): Response generated by `self.ask`
241
-
242
- Returns:
243
- str: Message extracted
244
- """
245
- assert isinstance(response, dict), "Response should be of dict data-type only"
246
- try:
247
- if response["choices"][0].get("delta"):
248
- return response["choices"][0]["delta"]["content"]
249
- return response["choices"][0]["message"]["content"]
250
- except KeyError:
251
- return ""
252
- class AsyncOPENAI(AsyncProvider):
253
- def __init__(
254
- self,
255
- api_key: str,
256
- is_conversation: bool = True,
257
- max_tokens: int = 600,
258
- temperature: float = 1,
259
- presence_penalty: int = 0,
260
- frequency_penalty: int = 0,
261
- top_p: float = 1,
262
- model: str = "gpt-3.5-turbo",
263
- timeout: int = 30,
264
- intro: str = None,
265
- filepath: str = None,
266
- update_file: bool = True,
267
- proxies: dict = {},
268
- history_offset: int = 10250,
269
- act: str = None,
270
- ):
271
- """Instantiates OPENAI
272
-
273
- Args:
274
- api_key (key): OpenAI's API key.
275
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
276
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
277
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
278
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
279
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
280
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
281
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
282
- timeout (int, optional): Http request timeout. Defaults to 30.
283
- intro (str, optional): Conversation introductory prompt. Defaults to None.
284
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
285
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
286
- proxies (dict, optional): Http request proxies. Defaults to {}.
287
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
288
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
289
- """
290
- self.is_conversation = is_conversation
291
- self.max_tokens_to_sample = max_tokens
292
- self.api_key = api_key
293
- self.model = model
294
- self.temperature = temperature
295
- self.presence_penalty = presence_penalty
296
- self.frequency_penalty = frequency_penalty
297
- self.top_p = top_p
298
- self.chat_endpoint = "https://api.openai.com/v1/chat/completions"
299
- self.stream_chunk_size = 64
300
- self.timeout = timeout
301
- self.last_response = {}
302
- self.headers = {
303
- "Content-Type": "application/json",
304
- "Authorization": f"Bearer {self.api_key}",
305
- }
306
-
307
- self.__available_optimizers = (
308
- method
309
- for method in dir(Optimizers)
310
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
311
- )
312
- Conversation.intro = (
313
- AwesomePrompts().get_act(
314
- act, raise_not_found=True, default=None, case_insensitive=True
315
- )
316
- if act
317
- else intro or Conversation.intro
318
- )
319
- self.conversation = Conversation(
320
- is_conversation, self.max_tokens_to_sample, filepath, update_file
321
- )
322
- self.conversation.history_offset = history_offset
323
- self.session = httpx.AsyncClient(
324
- headers=self.headers,
325
- proxies=proxies,
326
- )
327
-
328
- async def ask(
329
- self,
330
- prompt: str,
331
- stream: bool = False,
332
- raw: bool = False,
333
- optimizer: str = None,
334
- conversationally: bool = False,
335
- tools: Optional[List[Dict[str, Any]]] = None,
336
- ) -> Union[dict, AsyncGenerator]:
337
- """Chat with AI asynchronously.
338
-
339
- Args:
340
- prompt (str): Prompt to be send.
341
- stream (bool, optional): Flag for streaming response. Defaults to False.
342
- raw (bool, optional): Stream back raw response as received. Defaults to False.
343
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
344
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
345
- tools (Optional[List[Dict[str, Any]]], optional): List of tools to be used. Defaults to None.
346
- Returns:
347
- dict|AsyncGenerator : ai content.
348
- ```json
349
- {
350
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
351
- "object": "chat.completion",
352
- "created": 1704623244,
353
- "model": "gpt-3.5-turbo",
354
- "usage": {
355
- "prompt_tokens": 0,
356
- "completion_tokens": 0,
357
- "total_tokens": 0
358
- },
359
- "choices": [
360
- {
361
- "message": {
362
- "role": "assistant",
363
- "content": "Hello! How can I assist you today?"
364
- },
365
- "finish_reason": "stop",
366
- "index": 0
367
- }
368
- ]
369
- }
370
- ```
371
- """
372
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
373
- if optimizer:
374
- if optimizer in self.__available_optimizers:
375
- conversation_prompt = getattr(Optimizers, optimizer)(
376
- conversation_prompt if conversationally else prompt
377
- )
378
- else:
379
- raise Exception(
380
- f"Optimizer is not one of {self.__available_optimizers}"
381
- )
382
- payload = {
383
- "frequency_penalty": self.frequency_penalty,
384
- "messages": [{"content": conversation_prompt, "role": "user"}],
385
- "model": self.model,
386
- "presence_penalty": self.presence_penalty,
387
- "stream": stream,
388
- "temperature": self.temperature,
389
- "top_p": self.top_p,
390
- }
391
-
392
- async def for_stream():
393
- async with self.session.stream(
394
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
395
- ) as response:
396
- if not response.is_success:
397
- raise Exception(
398
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
399
- )
400
-
401
- message_load = ""
402
- async for value in response.aiter_lines():
403
- try:
404
-
405
- resp = sanitize_stream(value)
406
- incomplete_message = await self.get_message(resp)
407
- if incomplete_message:
408
- message_load += incomplete_message
409
- resp["choices"][0]["delta"]["content"] = message_load
410
- self.last_response.update(resp)
411
- yield value if raw else resp
412
- elif raw:
413
- yield value
414
- except json.decoder.JSONDecodeError:
415
- pass
416
- self.conversation.update_chat_history(
417
- prompt, await self.get_message(self.last_response)
418
- )
419
-
420
- async def for_non_stream():
421
- response = httpx.post(
422
- self.chat_endpoint,
423
- json=payload,
424
- timeout=self.timeout,
425
- headers=self.headers,
426
- )
427
- if (
428
- not response.is_success
429
- or not response.headers.get("Content-Type", "") == "application/json"
430
- ):
431
- raise Exception(
432
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
433
- )
434
- resp = response.json()
435
- self.last_response.update(resp)
436
- self.conversation.update_chat_history(
437
- prompt, await self.get_message(self.last_response)
438
- )
439
- return resp
440
-
441
- return for_stream() if stream else await for_non_stream()
442
-
443
- async def chat(
444
- self,
445
- prompt: str,
446
- stream: bool = False,
447
- optimizer: str = None,
448
- conversationally: bool = False,
449
- tools: Optional[List[Dict[str, Any]]] = None,
450
- ) -> Union[str, AsyncGenerator]:
451
- """Generate response `str` asynchronously.
452
- Args:
453
- prompt (str): Prompt to be send.
454
- stream (bool, optional): Flag for streaming response. Defaults to False.
455
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
456
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
457
- tools (Optional[List[Dict[str, Any]]], optional): List of tools to be used. Defaults to None.
458
- Returns:
459
- str|AsyncGenerator: Response generated
460
- """
461
-
462
- async def for_stream():
463
- async_ask = await self.ask(
464
- prompt, True, optimizer=optimizer, conversationally=conversationally
465
- )
466
- async for response in async_ask:
467
- yield await self.get_message(response)
468
-
469
- async def for_non_stream():
470
- return await self.get_message(
471
- await self.ask(
472
- prompt,
473
- False,
474
- optimizer=optimizer,
475
- conversationally=conversationally,
476
- )
477
- )
478
-
479
- return for_stream() if stream else await for_non_stream()
480
-
481
- async def get_message(self, response: dict) -> str:
482
- """Retrieves message only from response asynchronously.
483
-
484
- Args:
485
- response (dict): Response generated by `self.ask`
486
-
487
- Returns:
488
- str: Message extracted
489
- """
490
- assert isinstance(response, dict), "Response should be of dict data-type only"
491
- try:
492
- if response["choices"][0].get("delta"):
493
- return response["choices"][0]["delta"]["content"]
494
- return response["choices"][0]["message"]["content"]
495
- except KeyError:
496
- return ""
1
+ import json
2
+ import os
3
+ from typing import Any, Dict, Optional, Generator, Union, List
4
+
5
+ from curl_cffi.requests import Session
6
+ from curl_cffi import CurlError
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+ class OPENAI(Provider):
16
+ """
17
+ A class to interact with the OpenAI API with LitAgent user-agent.
18
+ """
19
+ required_auth = True
20
+ def __init__(
21
+ self,
22
+ api_key: str,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 600,
25
+ temperature: float = 1,
26
+ presence_penalty: int = 0,
27
+ frequency_penalty: int = 0,
28
+ top_p: float = 1,
29
+ model: str = "gpt-3.5-turbo",
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ base_url: str = "https://api.openai.com/v1/chat/completions",
38
+ system_prompt: str = "You are a helpful assistant.",
39
+ browser: str = "chrome"
40
+ ):
41
+ """Initializes the OpenAI API client."""
42
+ self.url = base_url
43
+
44
+ # Initialize LitAgent
45
+ self.agent = LitAgent()
46
+ self.fingerprint = self.agent.generate_fingerprint(browser)
47
+ self.api_key = api_key
48
+ # Use the fingerprint for headers
49
+ self.headers = {
50
+ "Accept": self.fingerprint["accept"],
51
+ "Accept-Language": self.fingerprint["accept_language"],
52
+ "User-Agent": self.fingerprint.get("user_agent", ""),
53
+ }
54
+ if self.api_key:
55
+ self.headers["Authorization"] = f"Bearer {self.api_key}"
56
+
57
+ # Initialize curl_cffi Session
58
+ self.session = Session()
59
+ # Update curl_cffi session headers and proxies
60
+ self.session.headers.update(self.headers)
61
+ self.session.proxies = proxies # Assign proxies directly
62
+ self.system_prompt = system_prompt
63
+ self.is_conversation = is_conversation
64
+ self.max_tokens_to_sample = max_tokens
65
+ self.timeout = timeout
66
+ self.last_response = {}
67
+ self.model = model
68
+ self.temperature = temperature
69
+ self.presence_penalty = presence_penalty
70
+ self.frequency_penalty = frequency_penalty
71
+ self.top_p = top_p
72
+
73
+ self.__available_optimizers = (
74
+ method
75
+ for method in dir(Optimizers)
76
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
77
+ )
78
+ Conversation.intro = (
79
+ AwesomePrompts().get_act(
80
+ act, raise_not_found=True, default=None, case_insensitive=True
81
+ )
82
+ if act
83
+ else intro or Conversation.intro
84
+ )
85
+
86
+ self.conversation = Conversation(
87
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
88
+ )
89
+ self.conversation.history_offset = history_offset
90
+
91
+ def refresh_identity(self, browser: str = None):
92
+ """
93
+ Refreshes the browser identity fingerprint.
94
+
95
+ Args:
96
+ browser: Specific browser to use for the new fingerprint
97
+ """
98
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
99
+ self.fingerprint = self.agent.generate_fingerprint(browser)
100
+
101
+ # Update headers with new fingerprint (only relevant ones)
102
+ self.headers.update({
103
+ "Accept": self.fingerprint["accept"],
104
+ "Accept-Language": self.fingerprint["accept_language"],
105
+ })
106
+
107
+ # Update session headers
108
+ self.session.headers.update(self.headers)
109
+
110
+ return self.fingerprint
111
+
112
+ def ask(
113
+ self,
114
+ prompt: str,
115
+ stream: bool = False,
116
+ raw: bool = False,
117
+ optimizer: str = None,
118
+ conversationally: bool = False,
119
+ ) -> Union[Dict[str, Any], Generator]:
120
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
+ if optimizer:
122
+ if optimizer in self.__available_optimizers:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+ else:
127
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {self.__available_optimizers}")
128
+
129
+ # Payload construction
130
+ payload = {
131
+ "model": self.model,
132
+ "messages": [
133
+ {"role": "system", "content": self.system_prompt},
134
+ {"role": "user", "content": conversation_prompt},
135
+ ],
136
+ "stream": stream,
137
+ "temperature": self.temperature,
138
+ "top_p": self.top_p,
139
+ "presence_penalty": self.presence_penalty,
140
+ "frequency_penalty": self.frequency_penalty,
141
+ }
142
+
143
+ def for_stream():
144
+ streaming_text = ""
145
+ try:
146
+ # Use curl_cffi session post with impersonate
147
+ response = self.session.post(
148
+ self.url,
149
+ data=json.dumps(payload),
150
+ stream=True,
151
+ timeout=self.timeout,
152
+ impersonate="chrome110"
153
+ )
154
+ response.raise_for_status()
155
+
156
+ # Use sanitize_stream
157
+ processed_stream = sanitize_stream(
158
+ data=response.iter_content(chunk_size=None),
159
+ intro_value="data:",
160
+ to_json=True,
161
+ skip_markers=["[DONE]"],
162
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("delta", {}).get("content") if isinstance(chunk, dict) else None,
163
+ yield_raw_on_error=False
164
+ )
165
+
166
+ for content_chunk in processed_stream:
167
+ if content_chunk and isinstance(content_chunk, str):
168
+ streaming_text += content_chunk
169
+ resp = dict(text=content_chunk)
170
+ yield resp if not raw else content_chunk
171
+
172
+ except CurlError as e:
173
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
174
+ except Exception as e:
175
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
176
+ finally:
177
+ if streaming_text:
178
+ self.last_response = {"text": streaming_text}
179
+ self.conversation.update_chat_history(prompt, streaming_text)
180
+
181
+ def for_non_stream():
182
+ try:
183
+ # Use curl_cffi session post with impersonate for non-streaming
184
+ response = self.session.post(
185
+ self.url,
186
+ data=json.dumps(payload),
187
+ timeout=self.timeout,
188
+ impersonate="chrome110"
189
+ )
190
+ response.raise_for_status()
191
+
192
+ response_text = response.text
193
+
194
+ # Use sanitize_stream to parse the non-streaming JSON response
195
+ processed_stream = sanitize_stream(
196
+ data=response_text,
197
+ to_json=True,
198
+ intro_value=None,
199
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
200
+ yield_raw_on_error=False
201
+ )
202
+ # Extract the single result
203
+ content = next(processed_stream, None)
204
+ content = content if isinstance(content, str) else ""
205
+
206
+ self.last_response = {"text": content}
207
+ self.conversation.update_chat_history(prompt, content)
208
+ return self.last_response if not raw else content
209
+
210
+ except CurlError as e:
211
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
212
+ except Exception as e:
213
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
214
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
215
+
216
+ return for_stream() if stream else for_non_stream()
217
+
218
+ def chat(
219
+ self,
220
+ prompt: str,
221
+ stream: bool = False,
222
+ optimizer: str = None,
223
+ conversationally: bool = False,
224
+ ) -> Union[str, Generator[str, None, None]]:
225
+ def for_stream_chat():
226
+ gen = self.ask(
227
+ prompt, stream=True, raw=False,
228
+ optimizer=optimizer, conversationally=conversationally
229
+ )
230
+ for response_dict in gen:
231
+ yield self.get_message(response_dict)
232
+
233
+ def for_non_stream_chat():
234
+ response_data = self.ask(
235
+ prompt, stream=False, raw=False,
236
+ optimizer=optimizer, conversationally=conversationally
237
+ )
238
+ return self.get_message(response_data)
239
+
240
+ return for_stream_chat() if stream else for_non_stream_chat()
241
+
242
+ def get_message(self, response: dict) -> str:
243
+ assert isinstance(response, dict), "Response should be of dict data-type only"
244
+ return response["text"]