webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,384 +1,206 @@
1
- import requests
2
- import json
3
- from ..AIutel import Optimizers
4
- from ..AIutel import Conversation
5
- from ..AIutel import AwesomePrompts, sanitize_stream
6
- from ..AIbase import Provider, AsyncProvider
7
- from webscout import exceptions
8
- from typing import Union, Any, AsyncGenerator, Dict
9
- import httpx
10
- #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
11
- class KOBOLDAI(Provider):
12
- def __init__(
13
- self,
14
- is_conversation: bool = True,
15
- max_tokens: int = 600,
16
- temperature: float = 1,
17
- top_p: float = 1,
18
- timeout: int = 30,
19
- intro: str = None,
20
- filepath: str = None,
21
- update_file: bool = True,
22
- proxies: dict = {},
23
- history_offset: int = 10250,
24
- act: str = None,
25
- ):
26
- """Instantiate TGPT
27
-
28
- Args:
29
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
30
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
31
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
32
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
33
- timeout (int, optional): Http requesting timeout. Defaults to 30
34
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
35
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
36
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
37
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
38
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
- """
41
- self.session = requests.Session()
42
- self.is_conversation = is_conversation
43
- self.max_tokens_to_sample = max_tokens
44
- self.temperature = temperature
45
- self.top_p = top_p
46
- self.chat_endpoint = (
47
- "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
48
- )
49
- self.stream_chunk_size = 64
50
- self.timeout = timeout
51
- self.last_response = {}
52
- self.headers = {
53
- "Content-Type": "application/json",
54
- "Accept": "application/json",
55
- }
56
-
57
- self.__available_optimizers = (
58
- method
59
- for method in dir(Optimizers)
60
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
61
- )
62
- self.session.headers.update(self.headers)
63
- Conversation.intro = (
64
- AwesomePrompts().get_act(
65
- act, raise_not_found=True, default=None, case_insensitive=True
66
- )
67
- if act
68
- else intro or Conversation.intro
69
- )
70
- self.conversation = Conversation(
71
- is_conversation, self.max_tokens_to_sample, filepath, update_file
72
- )
73
- self.conversation.history_offset = history_offset
74
- self.session.proxies = proxies
75
-
76
- def ask(
77
- self,
78
- prompt: str,
79
- stream: bool = False,
80
- raw: bool = False,
81
- optimizer: str = None,
82
- conversationally: bool = False,
83
- ) -> dict:
84
- """Chat with AI
85
-
86
- Args:
87
- prompt (str): Prompt to be send.
88
- stream (bool, optional): Flag for streaming response. Defaults to False.
89
- raw (bool, optional): Stream back raw response as received. Defaults to False.
90
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
91
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
92
- Returns:
93
- dict : {}
94
- ```json
95
- {
96
- "token" : "How may I assist you today?"
97
- }
98
- ```
99
- """
100
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
- if optimizer:
102
- if optimizer in self.__available_optimizers:
103
- conversation_prompt = getattr(Optimizers, optimizer)(
104
- conversation_prompt if conversationally else prompt
105
- )
106
- else:
107
- raise Exception(
108
- f"Optimizer is not one of {self.__available_optimizers}"
109
- )
110
-
111
- self.session.headers.update(self.headers)
112
- payload = {
113
- "prompt": conversation_prompt,
114
- "temperature": self.temperature,
115
- "top_p": self.top_p,
116
- }
117
-
118
- def for_stream():
119
- response = self.session.post(
120
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
121
- )
122
- if not response.ok:
123
- raise Exception(
124
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
125
- )
126
-
127
- message_load = ""
128
- final_resp = None
129
- for value in response.iter_lines(
130
- decode_unicode=True,
131
- delimiter="" if raw else "event: message\ndata:",
132
- chunk_size=self.stream_chunk_size,
133
- ):
134
- try:
135
- resp = json.loads(value)
136
- message_load += self.get_message(resp)
137
- resp["token"] = message_load
138
- self.last_response.update(resp)
139
- final_resp = resp # Always keep the latest
140
- except json.decoder.JSONDecodeError:
141
- pass
142
- if final_resp:
143
- yield final_resp if not raw else json.dumps(final_resp)
144
- self.conversation.update_chat_history(
145
- prompt, self.get_message(self.last_response)
146
- )
147
-
148
- def for_non_stream():
149
- # let's make use of stream
150
- for _ in for_stream():
151
- pass
152
- return self.last_response
153
-
154
- return for_stream() if stream else for_non_stream()
155
-
156
- def chat(
157
- self,
158
- prompt: str,
159
- stream: bool = False,
160
- optimizer: str = None,
161
- conversationally: bool = False,
162
- ) -> str:
163
- """Generate response `str`
164
- Args:
165
- prompt (str): Prompt to be send.
166
- stream (bool, optional): Flag for streaming response. Defaults to False.
167
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
168
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
169
- Returns:
170
- str: Response generated
171
- """
172
-
173
- def for_stream():
174
- for response in self.ask(
175
- prompt, True, optimizer=optimizer, conversationally=conversationally
176
- ):
177
- yield self.get_message(response)
178
-
179
- def for_non_stream():
180
- return self.get_message(
181
- self.ask(
182
- prompt,
183
- False,
184
- optimizer=optimizer,
185
- conversationally=conversationally,
186
- )
187
- )
188
-
189
- return for_stream() if stream else for_non_stream()
190
-
191
- def get_message(self, response: dict) -> str:
192
- """Retrieves message only from response
193
-
194
- Args:
195
- response (dict): Response generated by `self.ask`
196
-
197
- Returns:
198
- str: Message extracted
199
- """
200
- assert isinstance(response, dict), "Response should be of dict data-type only"
201
- return response.get("token")
202
- class AsyncKOBOLDAI(AsyncProvider):
203
- def __init__(
204
- self,
205
- is_conversation: bool = True,
206
- max_tokens: int = 600,
207
- temperature: float = 1,
208
- top_p: float = 1,
209
- timeout: int = 30,
210
- intro: str = None,
211
- filepath: str = None,
212
- update_file: bool = True,
213
- proxies: dict = {},
214
- history_offset: int = 10250,
215
- act: str = None,
216
- ):
217
- """Instantiate TGPT
218
-
219
- Args:
220
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
221
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
222
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
223
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
224
- timeout (int, optional): Http requesting timeout. Defaults to 30
225
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
226
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
227
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
228
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
229
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
230
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
231
- """
232
- self.is_conversation = is_conversation
233
- self.max_tokens_to_sample = max_tokens
234
- self.temperature = temperature
235
- self.top_p = top_p
236
- self.chat_endpoint = (
237
- "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
238
- )
239
- self.stream_chunk_size = 64
240
- self.timeout = timeout
241
- self.last_response = {}
242
- self.headers = {
243
- "Content-Type": "application/json",
244
- "Accept": "application/json",
245
- }
246
-
247
- self.__available_optimizers = (
248
- method
249
- for method in dir(Optimizers)
250
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
251
- )
252
- Conversation.intro = (
253
- AwesomePrompts().get_act(
254
- act, raise_not_found=True, default=None, case_insensitive=True
255
- )
256
- if act
257
- else intro or Conversation.intro
258
- )
259
- self.conversation = Conversation(
260
- is_conversation, self.max_tokens_to_sample, filepath, update_file
261
- )
262
- self.conversation.history_offset = history_offset
263
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
264
-
265
- async def ask(
266
- self,
267
- prompt: str,
268
- stream: bool = False,
269
- raw: bool = False,
270
- optimizer: str = None,
271
- conversationally: bool = False,
272
- ) -> Union[dict, AsyncGenerator]:
273
- """Chat with AI asynchronously.
274
-
275
- Args:
276
- prompt (str): Prompt to be send.
277
- stream (bool, optional): Flag for streaming response. Defaults to False.
278
- raw (bool, optional): Stream back raw response as received. Defaults to False.
279
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
280
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
281
- Returns:
282
- dict|AsyncGenerator : ai content
283
- ```json
284
- {
285
- "token" : "How may I assist you today?"
286
- }
287
- ```
288
- """
289
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
290
- if optimizer:
291
- if optimizer in self.__available_optimizers:
292
- conversation_prompt = getattr(Optimizers, optimizer)(
293
- conversation_prompt if conversationally else prompt
294
- )
295
- else:
296
- raise Exception(
297
- f"Optimizer is not one of {self.__available_optimizers}"
298
- )
299
-
300
- payload = {
301
- "prompt": conversation_prompt,
302
- "temperature": self.temperature,
303
- "top_p": self.top_p,
304
- }
305
-
306
- async def for_stream():
307
- async with self.session.stream(
308
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
309
- ) as response:
310
- if not response.is_success:
311
- raise exceptions.FailedToGenerateResponseError(
312
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
313
- )
314
-
315
- message_load = ""
316
- async for value in response.aiter_lines():
317
- try:
318
- resp = sanitize_stream(value)
319
- message_load += await self.get_message(resp)
320
- resp["token"] = message_load
321
- self.last_response.update(resp)
322
- yield value if raw else resp
323
- except json.decoder.JSONDecodeError:
324
- pass
325
-
326
- self.conversation.update_chat_history(
327
- prompt, await self.get_message(self.last_response)
328
- )
329
-
330
- async def for_non_stream():
331
- # let's make use of stream
332
- async for _ in for_stream():
333
- pass
334
- return self.last_response
335
-
336
- return for_stream() if stream else await for_non_stream()
337
-
338
- async def chat(
339
- self,
340
- prompt: str,
341
- stream: bool = False,
342
- optimizer: str = None,
343
- conversationally: bool = False,
344
- ) -> Union[str, AsyncGenerator]:
345
- """Generate response `str` asynchronously.
346
- Args:
347
- prompt (str): Prompt to be send.
348
- stream (bool, optional): Flag for streaming response. Defaults to False.
349
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
350
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
351
- Returns:
352
- str: Response generated
353
- """
354
-
355
- async def for_stream():
356
- async_ask = await self.ask(
357
- prompt, True, optimizer=optimizer, conversationally=conversationally
358
- )
359
- async for response in async_ask:
360
- yield await self.get_message(response)
361
-
362
- async def for_non_stream():
363
- return await self.get_message(
364
- await self.ask(
365
- prompt,
366
- False,
367
- optimizer=optimizer,
368
- conversationally=conversationally,
369
- )
370
- )
371
-
372
- return for_stream() if stream else await for_non_stream()
373
-
374
- async def get_message(self, response: dict) -> str:
375
- """Retrieves message only from response
376
-
377
- Args:
378
- response (dict): Response generated by `self.ask`
379
-
380
- Returns:
381
- str: Message extracted
382
- """
383
- assert isinstance(response, dict), "Response should be of dict data-type only"
384
- return response.get("token")
1
+ import requests
2
+ import json
3
+ from webscout.AIutel import Optimizers
4
+ from webscout.AIutel import Conversation
5
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
6
+ from webscout.AIbase import Provider, AsyncProvider
7
+ from webscout import exceptions
8
+ from typing import Union, Any, AsyncGenerator, Dict
9
+ import httpx
10
+ #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
11
+ class KOBOLDAI(Provider):
12
+ required_auth = False
13
+ def __init__(
14
+ self,
15
+ is_conversation: bool = True,
16
+ max_tokens: int = 600,
17
+ temperature: float = 1,
18
+ top_p: float = 1,
19
+ timeout: int = 30,
20
+ intro: str = None,
21
+ filepath: str = None,
22
+ update_file: bool = True,
23
+ proxies: dict = {},
24
+ history_offset: int = 10250,
25
+ act: str = None,
26
+ ):
27
+ """Instantiate TGPT
28
+
29
+ Args:
30
+ is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
31
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
32
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
33
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
34
+ timeout (int, optional): Http requesting timeout. Defaults to 30
35
+ intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
36
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
37
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
38
+ proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
39
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
40
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
41
+ """
42
+ self.session = requests.Session()
43
+ self.is_conversation = is_conversation
44
+ self.max_tokens_to_sample = max_tokens
45
+ self.temperature = temperature
46
+ self.top_p = top_p
47
+ self.chat_endpoint = (
48
+ "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
49
+ )
50
+ self.stream_chunk_size = 64
51
+ self.timeout = timeout
52
+ self.last_response = {}
53
+ self.headers = {
54
+ "Content-Type": "application/json",
55
+ "Accept": "application/json",
56
+ }
57
+
58
+ self.__available_optimizers = (
59
+ method
60
+ for method in dir(Optimizers)
61
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
62
+ )
63
+ self.session.headers.update(self.headers)
64
+ Conversation.intro = (
65
+ AwesomePrompts().get_act(
66
+ act, raise_not_found=True, default=None, case_insensitive=True
67
+ )
68
+ if act
69
+ else intro or Conversation.intro
70
+ )
71
+ self.conversation = Conversation(
72
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
73
+ )
74
+ self.conversation.history_offset = history_offset
75
+ self.session.proxies = proxies
76
+
77
+ def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ ) -> dict:
85
+ """Chat with AI
86
+
87
+ Args:
88
+ prompt (str): Prompt to be send.
89
+ stream (bool, optional): Flag for streaming response. Defaults to False.
90
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
91
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
92
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
93
+ Returns:
94
+ dict : {}
95
+ ```json
96
+ {
97
+ "token" : "How may I assist you today?"
98
+ }
99
+ ```
100
+ """
101
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
102
+ if optimizer:
103
+ if optimizer in self.__available_optimizers:
104
+ conversation_prompt = getattr(Optimizers, optimizer)(
105
+ conversation_prompt if conversationally else prompt
106
+ )
107
+ else:
108
+ raise Exception(
109
+ f"Optimizer is not one of {self.__available_optimizers}"
110
+ )
111
+
112
+ self.session.headers.update(self.headers)
113
+ payload = {
114
+ "prompt": conversation_prompt,
115
+ "temperature": self.temperature,
116
+ "top_p": self.top_p,
117
+ }
118
+
119
+ def for_stream():
120
+ response = self.session.post(
121
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
122
+ )
123
+ if not response.ok:
124
+ raise Exception(
125
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
126
+ )
127
+
128
+ message_load = ""
129
+ final_resp = None
130
+ for value in response.iter_lines(
131
+ decode_unicode=True,
132
+ delimiter="" if raw else "event: message\ndata:",
133
+ chunk_size=self.stream_chunk_size,
134
+ ):
135
+ try:
136
+ resp = json.loads(value)
137
+ message_load += self.get_message(resp)
138
+ resp["token"] = message_load
139
+ self.last_response.update(resp)
140
+ final_resp = resp # Always keep the latest
141
+ except json.decoder.JSONDecodeError:
142
+ pass
143
+ if final_resp:
144
+ yield final_resp if not raw else json.dumps(final_resp)
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+
149
+ def for_non_stream():
150
+ # let's make use of stream
151
+ for _ in for_stream():
152
+ pass
153
+ return self.last_response
154
+
155
+ return for_stream() if stream else for_non_stream()
156
+
157
+ def chat(
158
+ self,
159
+ prompt: str,
160
+ stream: bool = False,
161
+ optimizer: str = None,
162
+ conversationally: bool = False,
163
+ ) -> str:
164
+ """Generate response `str`
165
+ Args:
166
+ prompt (str): Prompt to be send.
167
+ stream (bool, optional): Flag for streaming response. Defaults to False.
168
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
169
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
170
+ Returns:
171
+ str: Response generated
172
+ """
173
+
174
+ def for_stream():
175
+ for response in self.ask(
176
+ prompt, True, optimizer=optimizer, conversationally=conversationally
177
+ ):
178
+ yield self.get_message(response)
179
+
180
+ def for_non_stream():
181
+ return self.get_message(
182
+ self.ask(
183
+ prompt,
184
+ False,
185
+ optimizer=optimizer,
186
+ conversationally=conversationally,
187
+ )
188
+ )
189
+
190
+ return for_stream() if stream else for_non_stream()
191
+
192
+ def get_message(self, response: dict) -> str:
193
+ """Retrieves message only from response
194
+
195
+ Args:
196
+ response (dict): Response generated by `self.ask`
197
+
198
+ Returns:
199
+ str: Message extracted
200
+ """
201
+ assert isinstance(response, dict), "Response should be of dict data-type only"
202
+ return response.get("token")
203
+
204
+ if __name__ == "__main__":
205
+ koboldai = KOBOLDAI(is_conversation=True, max_tokens=600, temperature=0.7)
206
+ print(koboldai.chat("Explain quantum computing in simple terms", stream=False))