webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -143
- webscout/AIbase.py +247 -123
- webscout/AIutel.py +68 -132
- webscout/Bard.py +1072 -535
- webscout/Extra/GitToolkit/__init__.py +2 -2
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -0
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +189 -18
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -682
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +237 -304
- webscout/Provider/AISEARCH/README.md +106 -0
- webscout/Provider/AISEARCH/__init__.py +16 -10
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +130 -209
- webscout/Provider/AISEARCH/monica_search.py +200 -246
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -0
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +343 -173
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +60 -54
- webscout/Provider/GithubChat.py +385 -367
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -670
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -233
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -266
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -381
- webscout/Provider/Netwrck.py +273 -228
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -0
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -0
- webscout/Provider/OPENAI/__init__.py +148 -25
- webscout/Provider/OPENAI/ai4chat.py +348 -0
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/ayle.py +365 -0
- webscout/Provider/OPENAI/base.py +253 -46
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +514 -193
- webscout/Provider/OPENAI/chatsandbox.py +233 -0
- webscout/Provider/OPENAI/deepinfra.py +403 -272
- webscout/Provider/OPENAI/e2b.py +2370 -1350
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +186 -138
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -0
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +100 -104
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -327
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +110 -84
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -0
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -0
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +126 -115
- webscout/Provider/OPENAI/textpollinations.py +218 -133
- webscout/Provider/OPENAI/toolbaz.py +136 -166
- webscout/Provider/OPENAI/typefully.py +419 -0
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -211
- webscout/Provider/OPENAI/wisecat.py +103 -125
- webscout/Provider/OPENAI/writecream.py +185 -156
- webscout/Provider/OPENAI/x0gpt.py +227 -136
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -344
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -0
- webscout/Provider/TTI/__init__.py +37 -12
- webscout/Provider/TTI/base.py +147 -0
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -0
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -0
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -0
- webscout/Provider/TTS/README.md +186 -0
- webscout/Provider/TTS/__init__.py +43 -7
- webscout/Provider/TTS/base.py +523 -0
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -0
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -180
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +221 -121
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -199
- webscout/Provider/TypliAI.py +311 -0
- webscout/Provider/UNFINISHED/ChatHub.py +219 -0
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
- webscout/Provider/UNFINISHED/GizAI.py +300 -0
- webscout/Provider/UNFINISHED/Marcus.py +218 -0
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/UNFINISHED/XenAI.py +330 -0
- webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
- webscout/Provider/UNFINISHED/samurai.py +231 -0
- webscout/Provider/WiseCat.py +256 -196
- webscout/Provider/WrDoChat.py +390 -0
- webscout/Provider/__init__.py +115 -198
- webscout/Provider/ai4chat.py +181 -202
- webscout/Provider/akashgpt.py +330 -342
- webscout/Provider/cerebras.py +397 -242
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -234
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -266
- webscout/Provider/llama3mitril.py +230 -180
- webscout/Provider/llmchat.py +308 -213
- webscout/Provider/llmchatco.py +321 -311
- webscout/Provider/meta.py +996 -794
- webscout/Provider/oivscode.py +332 -0
- webscout/Provider/searchchat.py +316 -293
- webscout/Provider/sonus.py +264 -208
- webscout/Provider/toolbaz.py +359 -320
- webscout/Provider/turboseek.py +332 -219
- webscout/Provider/typefully.py +262 -280
- webscout/Provider/x0gpt.py +332 -256
- webscout/__init__.py +31 -38
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -293
- webscout/client.py +1497 -0
- webscout/conversation.py +140 -565
- webscout/exceptions.py +383 -339
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +32 -378
- webscout/prompt_manager.py +376 -274
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -0
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -140
- webscout/scout/core/scout.py +800 -568
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -460
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -809
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +574 -0
- webscout/swiftcli/core/context.py +98 -0
- webscout/swiftcli/core/group.py +268 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +243 -0
- webscout/swiftcli/decorators/options.py +247 -0
- webscout/swiftcli/decorators/output.py +392 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +134 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +58 -0
- webscout/swiftcli/utils/formatting.py +251 -0
- webscout/swiftcli/utils/parsing.py +368 -0
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -55
- webscout/zeroart/base.py +70 -60
- webscout/zeroart/effects.py +155 -99
- webscout/zeroart/fonts.py +1799 -816
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- webscout-2026.1.19.dist-info/entry_points.txt +4 -0
- webscout-2026.1.19.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/DWEBS.py +0 -477
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/yep.py +0 -376
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/core.py +0 -881
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1346
- webscout/webscout_search_async.py +0 -877
- webscout/yep_search.py +0 -297
- webscout-8.2.2.dist-info/METADATA +0 -734
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import string
|
|
3
|
+
import uuid
|
|
4
|
+
import warnings
|
|
5
|
+
from typing import Any, Dict, Generator, Optional, Union, cast
|
|
6
|
+
|
|
7
|
+
import urllib3
|
|
8
|
+
from curl_cffi.requests import Session
|
|
9
|
+
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.AIbase import Provider, Response
|
|
12
|
+
from webscout.AIutel import AwesomePrompts, Conversation, Optimizers, sanitize_stream
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
# Suppress only the single InsecureRequestWarning from urllib3 needed for verify=False
|
|
16
|
+
warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning)
|
|
17
|
+
|
|
18
|
+
class XenAI(Provider):
|
|
19
|
+
|
|
20
|
+
# Add more models if known, starting with the one from the example
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"gemini-2.5-pro-preview-05-06",
|
|
23
|
+
"gemini-2.5-flash-preview-05-20",
|
|
24
|
+
"o4-mini-high",
|
|
25
|
+
"grok-3-mini-fast-beta",
|
|
26
|
+
"grok-3-fast-beta",
|
|
27
|
+
"gpt-4.1",
|
|
28
|
+
"o3-high",
|
|
29
|
+
"gpt-4o-search-preview",
|
|
30
|
+
"gpt-4o",
|
|
31
|
+
"claude-sonnet-4-20250514",
|
|
32
|
+
"claude-sonnet-4-20250514-thinking",
|
|
33
|
+
"deepseek-ai/DeepSeek-V3-0324",
|
|
34
|
+
"deepseek-ai/DeepSeek-R1-0528",
|
|
35
|
+
"groq/deepseek-r1-distill-llama-70b",
|
|
36
|
+
"deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
37
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
38
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
39
|
+
"cognitivecomputations/Dolphin3.0-Mistral-24B",
|
|
40
|
+
"sonar-pro",
|
|
41
|
+
"gpt-4o-mini",
|
|
42
|
+
"gemini-2.0-flash-lite-preview-02-05",
|
|
43
|
+
"claude-3-7-sonnet-20250219",
|
|
44
|
+
"claude-3-7-sonnet-20250219-thinking",
|
|
45
|
+
"claude-opus-4-20250514",
|
|
46
|
+
"claude-opus-4-20250514-thinking",
|
|
47
|
+
"chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
48
|
+
"chutesai/Llama-4-Scout-17B-16E-Instruct",
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
is_conversation: bool = True,
|
|
54
|
+
max_tokens: int = 2048,
|
|
55
|
+
timeout: int = 60,
|
|
56
|
+
intro: Optional[str] = None,
|
|
57
|
+
filepath: Optional[str] = None,
|
|
58
|
+
update_file: bool = True,
|
|
59
|
+
proxies: dict = {},
|
|
60
|
+
history_offset: int = 10250,
|
|
61
|
+
act: Optional[str] = None,
|
|
62
|
+
model: str = "gemini-2.5-pro-preview-05-06",
|
|
63
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
64
|
+
):
|
|
65
|
+
"""Initializes the xenai API client."""
|
|
66
|
+
if model not in self.AVAILABLE_MODELS:
|
|
67
|
+
print(f"Warning: Model '{model}' is not listed in AVAILABLE_MODELS. Proceeding with the provided model.")
|
|
68
|
+
|
|
69
|
+
self.api_endpoint = "https://chat.xenai.tech/api/chat/completions"
|
|
70
|
+
|
|
71
|
+
self.model = model
|
|
72
|
+
self.system_prompt = system_prompt
|
|
73
|
+
|
|
74
|
+
# Initialize curl_cffi Session
|
|
75
|
+
self.session = Session()
|
|
76
|
+
|
|
77
|
+
# Set up headers based on the provided request
|
|
78
|
+
self.headers = {
|
|
79
|
+
**LitAgent().generate_fingerprint(),
|
|
80
|
+
'origin': 'https://chat.xenai.tech',
|
|
81
|
+
'referer': 'https://chat.xenai.tech/',
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Apply headers, proxies, and cookies to the session
|
|
85
|
+
self.session.headers.update(self.headers)
|
|
86
|
+
self.session.proxies.update(proxies)
|
|
87
|
+
# Always disable SSL verification for this session
|
|
88
|
+
self.session.verify = False
|
|
89
|
+
|
|
90
|
+
# Provider settings
|
|
91
|
+
self.is_conversation = is_conversation
|
|
92
|
+
self.max_tokens_to_sample = max_tokens
|
|
93
|
+
self.timeout = timeout
|
|
94
|
+
self.last_response = {}
|
|
95
|
+
|
|
96
|
+
# Initialize optimizers
|
|
97
|
+
self.__available_optimizers = (
|
|
98
|
+
method
|
|
99
|
+
for method in dir(Optimizers)
|
|
100
|
+
if callable(getattr(Optimizers, method))
|
|
101
|
+
and not method.startswith("__")
|
|
102
|
+
)
|
|
103
|
+
self.conversation = Conversation(
|
|
104
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
105
|
+
)
|
|
106
|
+
act_prompt = (
|
|
107
|
+
AwesomePrompts().get_act(cast(Union[str, int], act), default=None, case_insensitive=True
|
|
108
|
+
)
|
|
109
|
+
if act
|
|
110
|
+
else intro
|
|
111
|
+
)
|
|
112
|
+
if act_prompt:
|
|
113
|
+
self.conversation.intro = act_prompt
|
|
114
|
+
self.conversation.history_offset = history_offset
|
|
115
|
+
|
|
116
|
+
# Token handling: always auto-fetch token, no cookies logic
|
|
117
|
+
self.token = self._auto_fetch_token()
|
|
118
|
+
|
|
119
|
+
# Set the Authorization header for the session
|
|
120
|
+
self.session.headers.update({
|
|
121
|
+
'authorization': f'Bearer {self.token}',
|
|
122
|
+
})
|
|
123
|
+
|
|
124
|
+
def _auto_fetch_token(self):
|
|
125
|
+
"""Automatically fetch a token from the signup endpoint using requests."""
|
|
126
|
+
session = Session()
|
|
127
|
+
session.verify = False # Always disable SSL verification for this session
|
|
128
|
+
def random_string(length=8):
|
|
129
|
+
return ''.join(random.choices(string.ascii_lowercase, k=length))
|
|
130
|
+
name = random_string(8)
|
|
131
|
+
email = f"{name}@gmail.com"
|
|
132
|
+
password = email
|
|
133
|
+
profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
|
|
134
|
+
payload = {
|
|
135
|
+
"name": name,
|
|
136
|
+
"email": email,
|
|
137
|
+
"password": password,
|
|
138
|
+
"profile_image_url": profile_image_url
|
|
139
|
+
}
|
|
140
|
+
headers = {
|
|
141
|
+
**LitAgent().generate_fingerprint(),
|
|
142
|
+
'origin': 'https://chat.xenai.tech',
|
|
143
|
+
'referer': 'https://chat.xenai.tech/auth',
|
|
144
|
+
}
|
|
145
|
+
try:
|
|
146
|
+
resp = session.post(
|
|
147
|
+
"https://chat.xenai.tech/api/v1/auths/signup",
|
|
148
|
+
headers=headers,
|
|
149
|
+
json=payload,
|
|
150
|
+
timeout=30,
|
|
151
|
+
verify=False # Disable SSL verification for testing
|
|
152
|
+
)
|
|
153
|
+
if resp.ok:
|
|
154
|
+
data = resp.json()
|
|
155
|
+
token = data.get("token")
|
|
156
|
+
if token:
|
|
157
|
+
return token
|
|
158
|
+
set_cookie = resp.headers.get("set-cookie", "")
|
|
159
|
+
if "token=" in set_cookie:
|
|
160
|
+
return set_cookie.split("token=")[1].split(";")[0]
|
|
161
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
|
|
162
|
+
except Exception as e:
|
|
163
|
+
raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
|
|
164
|
+
|
|
165
|
+
def ask(
|
|
166
|
+
self,
|
|
167
|
+
prompt: str,
|
|
168
|
+
stream: bool = False,
|
|
169
|
+
raw: bool = False,
|
|
170
|
+
optimizer: Optional[str] = None,
|
|
171
|
+
conversationally: bool = False,
|
|
172
|
+
**kwargs
|
|
173
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
174
|
+
"""Sends a prompt to the xenai API and returns the response."""
|
|
175
|
+
|
|
176
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
177
|
+
|
|
178
|
+
if optimizer:
|
|
179
|
+
if optimizer in self.__available_optimizers:
|
|
180
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
181
|
+
conversation_prompt if conversationally else prompt
|
|
182
|
+
)
|
|
183
|
+
else:
|
|
184
|
+
raise exceptions.InvalidOptimizerError(
|
|
185
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
chat_id = kwargs.get("chat_id", str(uuid.uuid4()))
|
|
189
|
+
message_id = str(uuid.uuid4())
|
|
190
|
+
|
|
191
|
+
payload = {
|
|
192
|
+
"stream": stream,
|
|
193
|
+
"model": self.model,
|
|
194
|
+
"messages": [
|
|
195
|
+
{"role": "system", "content": self.system_prompt},
|
|
196
|
+
{"role": "user", "content": conversation_prompt}
|
|
197
|
+
],
|
|
198
|
+
"params": kwargs.get("params", {}),
|
|
199
|
+
"tool_servers": kwargs.get("tool_servers", []),
|
|
200
|
+
"features": kwargs.get("features", {"web_search": False}),
|
|
201
|
+
"chat_id": chat_id,
|
|
202
|
+
"id": message_id,
|
|
203
|
+
"stream_options": kwargs.get("stream_options", {"include_usage": True})
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
def for_stream():
|
|
207
|
+
streaming_text = ""
|
|
208
|
+
try:
|
|
209
|
+
response = self.session.post(
|
|
210
|
+
self.api_endpoint,
|
|
211
|
+
json=payload,
|
|
212
|
+
stream=True,
|
|
213
|
+
timeout=self.timeout,
|
|
214
|
+
verify=False # Always disable SSL verification for this request
|
|
215
|
+
)
|
|
216
|
+
response.raise_for_status()
|
|
217
|
+
|
|
218
|
+
# Use sanitize_stream
|
|
219
|
+
processed_stream = sanitize_stream(
|
|
220
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
221
|
+
intro_value="data:",
|
|
222
|
+
to_json=True, # Stream sends JSON
|
|
223
|
+
skip_markers=["[DONE]"],
|
|
224
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
225
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
for content_chunk in processed_stream:
|
|
229
|
+
# content_chunk is the string extracted by the content_extractor
|
|
230
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
231
|
+
streaming_text += content_chunk
|
|
232
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
233
|
+
|
|
234
|
+
self.last_response = {"text": streaming_text}
|
|
235
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (requests): {e}") from e
|
|
239
|
+
except Exception as e:
|
|
240
|
+
err_text = ""
|
|
241
|
+
if hasattr(e, 'response'):
|
|
242
|
+
response_obj = getattr(e, 'response')
|
|
243
|
+
if hasattr(response_obj, 'text'):
|
|
244
|
+
err_text = getattr(response_obj, 'text')
|
|
245
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
246
|
+
|
|
247
|
+
def for_non_stream():
|
|
248
|
+
full_text = ""
|
|
249
|
+
try:
|
|
250
|
+
stream_generator = self.ask(
|
|
251
|
+
prompt, stream=True, raw=False, optimizer=optimizer, conversationally=conversationally, **kwargs
|
|
252
|
+
)
|
|
253
|
+
for chunk_data in stream_generator:
|
|
254
|
+
if isinstance(chunk_data, dict):
|
|
255
|
+
full_text += chunk_data["text"]
|
|
256
|
+
elif isinstance(chunk_data, str):
|
|
257
|
+
full_text += chunk_data
|
|
258
|
+
except Exception as e:
|
|
259
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response (requests): {str(e)}") from e
|
|
260
|
+
except Exception as e:
|
|
261
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {str(e)}") from e
|
|
262
|
+
|
|
263
|
+
return full_text if raw else self.last_response
|
|
264
|
+
|
|
265
|
+
return for_stream() if stream else for_non_stream()
|
|
266
|
+
|
|
267
|
+
def chat(
|
|
268
|
+
self,
|
|
269
|
+
prompt: str,
|
|
270
|
+
stream: bool = False,
|
|
271
|
+
optimizer: Optional[str] = None,
|
|
272
|
+
conversationally: bool = False,
|
|
273
|
+
**kwargs
|
|
274
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
275
|
+
"""Generates a response from the xenai API."""
|
|
276
|
+
|
|
277
|
+
def for_stream_chat() -> Generator[str, None, None]:
|
|
278
|
+
gen = self.ask(
|
|
279
|
+
prompt, stream=True, raw=False,
|
|
280
|
+
optimizer=optimizer, conversationally=conversationally, **kwargs
|
|
281
|
+
)
|
|
282
|
+
for response_item in gen:
|
|
283
|
+
yield self.get_message(response_item)
|
|
284
|
+
|
|
285
|
+
def for_non_stream_chat() -> str:
|
|
286
|
+
response_data = self.ask(
|
|
287
|
+
prompt, stream=False, raw=False,
|
|
288
|
+
optimizer=optimizer, conversationally=conversationally, **kwargs
|
|
289
|
+
)
|
|
290
|
+
return self.get_message(response_data)
|
|
291
|
+
|
|
292
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
293
|
+
|
|
294
|
+
def get_message(self, response: Response) -> str:
|
|
295
|
+
"""Extracts the message from the API response."""
|
|
296
|
+
if isinstance(response, str):
|
|
297
|
+
return response
|
|
298
|
+
if isinstance(response, dict):
|
|
299
|
+
return dict(response).get("text", "")
|
|
300
|
+
return str(response)
|
|
301
|
+
|
|
302
|
+
# Example usage (no cookies file needed)
|
|
303
|
+
if __name__ == "__main__":
|
|
304
|
+
from rich import print
|
|
305
|
+
|
|
306
|
+
print("-" * 80)
|
|
307
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
308
|
+
print("-" * 80)
|
|
309
|
+
|
|
310
|
+
for model in XenAI.AVAILABLE_MODELS:
|
|
311
|
+
try:
|
|
312
|
+
test_ai = XenAI(model=model, timeout=60)
|
|
313
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
314
|
+
response_text = ""
|
|
315
|
+
# Accumulate the response text without printing in the loop
|
|
316
|
+
for chunk in response:
|
|
317
|
+
response_text += chunk
|
|
318
|
+
|
|
319
|
+
if response_text and len(response_text.strip()) > 0:
|
|
320
|
+
status = "✓"
|
|
321
|
+
# Truncate response if too long
|
|
322
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
323
|
+
else:
|
|
324
|
+
status = "✗"
|
|
325
|
+
display_text = "Empty or invalid response"
|
|
326
|
+
# Print the final status and response, overwriting the "Testing..." line
|
|
327
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
328
|
+
except Exception as e:
|
|
329
|
+
# Print error, overwriting the "Testing..." line
|
|
330
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -1,13 +1,14 @@
|
|
|
1
|
-
from uuid import uuid4
|
|
2
|
-
import json
|
|
3
1
|
import datetime
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
from webscout import exceptions
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Generator, Optional, Union, cast
|
|
4
|
+
from uuid import uuid4
|
|
5
|
+
|
|
9
6
|
import cloudscraper
|
|
10
7
|
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.AIbase import Provider, Response
|
|
10
|
+
from webscout.AIutel import AwesomePrompts, Conversation, Optimizers
|
|
11
|
+
|
|
11
12
|
|
|
12
13
|
class YouChat(Provider):
|
|
13
14
|
"""
|
|
@@ -33,7 +34,7 @@ class YouChat(Provider):
|
|
|
33
34
|
# "deepseek_r1", # isProOnly: true
|
|
34
35
|
# "deepseek_v3", # isProOnly: true
|
|
35
36
|
# "gemini_2_5_pro_experimental", # isProOnly: true
|
|
36
|
-
|
|
37
|
+
|
|
37
38
|
# Free models (isProOnly: false)
|
|
38
39
|
"gpt_4o_mini",
|
|
39
40
|
"gpt_4o",
|
|
@@ -51,7 +52,7 @@ class YouChat(Provider):
|
|
|
51
52
|
"llama3_1_405b",
|
|
52
53
|
"mistral_large_2",
|
|
53
54
|
"command_r_plus",
|
|
54
|
-
|
|
55
|
+
|
|
55
56
|
# Free models not enabled for user chat modes
|
|
56
57
|
"llama3_3_70b", # isAllowedForUserChatModes: false
|
|
57
58
|
"llama3_2_90b", # isAllowedForUserChatModes: false
|
|
@@ -65,12 +66,12 @@ class YouChat(Provider):
|
|
|
65
66
|
is_conversation: bool = True,
|
|
66
67
|
max_tokens: int = 600,
|
|
67
68
|
timeout: int = 30,
|
|
68
|
-
intro: str = None,
|
|
69
|
-
filepath: str = None,
|
|
69
|
+
intro: Optional[str] = None,
|
|
70
|
+
filepath: Optional[str] = None,
|
|
70
71
|
update_file: bool = True,
|
|
71
72
|
proxies: dict = {},
|
|
72
73
|
history_offset: int = 10250,
|
|
73
|
-
act: str = None,
|
|
74
|
+
act: Optional[str] = None,
|
|
74
75
|
model: str = "gemini_2_flash",
|
|
75
76
|
):
|
|
76
77
|
"""Instantiates YouChat
|
|
@@ -123,26 +124,29 @@ class YouChat(Provider):
|
|
|
123
124
|
for method in dir(Optimizers)
|
|
124
125
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
125
126
|
)
|
|
126
|
-
Conversation.intro = (
|
|
127
|
-
AwesomePrompts().get_act(
|
|
128
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
129
|
-
)
|
|
130
|
-
if act
|
|
131
|
-
else intro or Conversation.intro
|
|
132
|
-
)
|
|
133
127
|
self.conversation = Conversation(
|
|
134
128
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
135
129
|
)
|
|
130
|
+
act_prompt = (
|
|
131
|
+
AwesomePrompts().get_act(cast(Union[str, int], act), default=None, case_insensitive=True
|
|
132
|
+
)
|
|
133
|
+
if act
|
|
134
|
+
else intro
|
|
135
|
+
)
|
|
136
|
+
if act_prompt:
|
|
137
|
+
self.conversation.intro = act_prompt
|
|
136
138
|
self.conversation.history_offset = history_offset
|
|
137
|
-
|
|
139
|
+
if proxies:
|
|
140
|
+
self.session.proxies.update(proxies)
|
|
138
141
|
|
|
139
142
|
def ask(
|
|
140
143
|
self,
|
|
141
144
|
prompt: str,
|
|
142
145
|
stream: bool = False,
|
|
143
146
|
raw: bool = False,
|
|
144
|
-
optimizer: str = None,
|
|
147
|
+
optimizer: Optional[str] = None,
|
|
145
148
|
conversationally: bool = False,
|
|
149
|
+
**kwargs: Any,
|
|
146
150
|
) -> dict:
|
|
147
151
|
"""Chat with AI
|
|
148
152
|
|
|
@@ -151,7 +155,8 @@ class YouChat(Provider):
|
|
|
151
155
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
152
156
|
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
153
157
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
154
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to
|
|
158
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to None.
|
|
159
|
+
**kwargs: Additional keyword arguments.
|
|
155
160
|
Returns:
|
|
156
161
|
dict : {}
|
|
157
162
|
```json
|
|
@@ -173,10 +178,10 @@ class YouChat(Provider):
|
|
|
173
178
|
|
|
174
179
|
trace_id = str(uuid4())
|
|
175
180
|
conversation_turn_id = str(uuid4())
|
|
176
|
-
|
|
181
|
+
|
|
177
182
|
# Current timestamp in ISO format for traceId
|
|
178
183
|
current_time = datetime.datetime.now().isoformat()
|
|
179
|
-
|
|
184
|
+
|
|
180
185
|
# Updated query parameters to match the new API format
|
|
181
186
|
params = {
|
|
182
187
|
"page": 1,
|
|
@@ -196,7 +201,7 @@ class YouChat(Provider):
|
|
|
196
201
|
"traceId": f"{trace_id}|{conversation_turn_id}|{current_time}",
|
|
197
202
|
"use_nested_youchat_updates": "true"
|
|
198
203
|
}
|
|
199
|
-
|
|
204
|
+
|
|
200
205
|
# New payload format is JSON
|
|
201
206
|
payload = {
|
|
202
207
|
"query": conversation_prompt,
|
|
@@ -205,12 +210,12 @@ class YouChat(Provider):
|
|
|
205
210
|
|
|
206
211
|
def for_stream():
|
|
207
212
|
response = self.session.post(
|
|
208
|
-
self.chat_endpoint,
|
|
209
|
-
headers=self.headers,
|
|
210
|
-
cookies=self.cookies,
|
|
213
|
+
self.chat_endpoint,
|
|
214
|
+
headers=self.headers,
|
|
215
|
+
cookies=self.cookies,
|
|
211
216
|
params=params,
|
|
212
217
|
data=json.dumps(payload),
|
|
213
|
-
stream=True,
|
|
218
|
+
stream=True,
|
|
214
219
|
timeout=self.timeout
|
|
215
220
|
)
|
|
216
221
|
if not response.ok:
|
|
@@ -253,7 +258,7 @@ class YouChat(Provider):
|
|
|
253
258
|
def for_non_stream():
|
|
254
259
|
for _ in for_stream():
|
|
255
260
|
pass
|
|
256
|
-
return self.last_response
|
|
261
|
+
return self.last_response if not raw else json.dumps(self.last_response)
|
|
257
262
|
|
|
258
263
|
return for_stream() if stream else for_non_stream()
|
|
259
264
|
|
|
@@ -261,54 +266,66 @@ class YouChat(Provider):
|
|
|
261
266
|
self,
|
|
262
267
|
prompt: str,
|
|
263
268
|
stream: bool = False,
|
|
264
|
-
optimizer: str = None,
|
|
269
|
+
optimizer: Optional[str] = None,
|
|
265
270
|
conversationally: bool = False,
|
|
266
|
-
|
|
271
|
+
raw: bool = False,
|
|
272
|
+
**kwargs: Any,
|
|
273
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
267
274
|
"""Generate response `str`
|
|
268
275
|
Args:
|
|
269
276
|
prompt (str): Prompt to be send.
|
|
270
277
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
271
278
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
272
279
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
280
|
+
raw (bool, optional): Return raw response chunks. Defaults to False.
|
|
281
|
+
**kwargs: Additional keyword arguments.
|
|
273
282
|
Returns:
|
|
274
283
|
str: Response generated
|
|
275
284
|
"""
|
|
276
285
|
|
|
277
286
|
def for_stream():
|
|
278
287
|
for response in self.ask(
|
|
279
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
288
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
280
289
|
):
|
|
281
|
-
|
|
290
|
+
if raw:
|
|
291
|
+
yield response
|
|
292
|
+
else:
|
|
293
|
+
yield self.get_message(response)
|
|
282
294
|
|
|
283
295
|
def for_non_stream():
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
)
|
|
296
|
+
result = self.ask(
|
|
297
|
+
prompt,
|
|
298
|
+
False,
|
|
299
|
+
raw=raw,
|
|
300
|
+
optimizer=optimizer,
|
|
301
|
+
conversationally=conversationally,
|
|
291
302
|
)
|
|
303
|
+
if raw:
|
|
304
|
+
return result
|
|
305
|
+
return self.get_message(result)
|
|
292
306
|
|
|
293
307
|
return for_stream() if stream else for_non_stream()
|
|
294
308
|
|
|
295
|
-
def get_message(self, response:
|
|
309
|
+
def get_message(self, response: Response) -> str:
|
|
296
310
|
"""Retrieves message only from response
|
|
297
311
|
|
|
298
312
|
str: Message extracted
|
|
299
313
|
"""
|
|
300
|
-
|
|
301
|
-
|
|
314
|
+
if isinstance(response, str):
|
|
315
|
+
return response
|
|
316
|
+
if isinstance(response, dict):
|
|
317
|
+
return dict(response)["text"]
|
|
318
|
+
return str(response)
|
|
302
319
|
|
|
303
320
|
if __name__ == '__main__':
|
|
304
321
|
print("-" * 80)
|
|
305
322
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
306
323
|
print("-" * 80)
|
|
307
|
-
|
|
324
|
+
|
|
308
325
|
# Test all available models
|
|
309
326
|
working = 0
|
|
310
327
|
total = len(YouChat.AVAILABLE_MODELS)
|
|
311
|
-
|
|
328
|
+
|
|
312
329
|
for model in YouChat.AVAILABLE_MODELS:
|
|
313
330
|
try:
|
|
314
331
|
test_ai = YouChat(model=model, timeout=60)
|
|
@@ -317,7 +334,7 @@ if __name__ == '__main__':
|
|
|
317
334
|
for chunk in response:
|
|
318
335
|
response_text += chunk
|
|
319
336
|
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
320
|
-
|
|
337
|
+
|
|
321
338
|
if response_text and len(response_text.strip()) > 0:
|
|
322
339
|
status = "✓"
|
|
323
340
|
# Truncate response if too long
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
from rich import print
|
|
5
|
+
|
|
6
|
+
from webscout.litagent import LitAgent
|
|
7
|
+
|
|
8
|
+
url = 'https://aihumanizer.work/api/v1/text/rewriter'
|
|
9
|
+
|
|
10
|
+
headers = {
|
|
11
|
+
'accept': 'application/json, text/plain, */*',
|
|
12
|
+
'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
13
|
+
'content-type': 'application/json; charset=UTF-8',
|
|
14
|
+
'dnt': '1',
|
|
15
|
+
'origin': 'https://aihumanizer.work',
|
|
16
|
+
'priority': 'u=1, i',
|
|
17
|
+
'referer': 'https://aihumanizer.work/?via=topaitools',
|
|
18
|
+
'sec-ch-ua': '"Not(A:Brand";v="8", "Chromium";v="144", "Microsoft Edge";v="144"',
|
|
19
|
+
'sec-ch-ua-mobile': '?0',
|
|
20
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
21
|
+
'sec-fetch-dest': 'empty',
|
|
22
|
+
'sec-fetch-mode': 'cors',
|
|
23
|
+
'sec-fetch-site': 'same-origin',
|
|
24
|
+
'sec-gpc': '1',
|
|
25
|
+
'user-agent': LitAgent().random(),
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
cookies = {
|
|
29
|
+
'_ga': 'GA1.1.830684681.1766055491',
|
|
30
|
+
'_ga_14V82CGVQ2': 'GS2.1.s1766055490$o1$g0$t1766055490$j90$l0$h0',
|
|
31
|
+
'anonymous_user_id': str(uuid.uuid4()),
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
json_data = {
|
|
35
|
+
'text': 'You are an Large Thinking and Reasoning Model (LTRM) called Dhanishtha-MAX by HelpingAI. Your purpose is to think deeply and reason carefully before answering user questions. You must follow the guidelines below strictly in every response.',
|
|
36
|
+
'tone': 0,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
response = requests.post(url, headers=headers, cookies=cookies, json=json_data)
|
|
40
|
+
|
|
41
|
+
print(response.json())
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def create_grammar_check_job(text: str):
|
|
5
|
+
url = 'https://api.aigrammarchecker.io/api/ai-check-grammar/create-job'
|
|
6
|
+
headers = {
|
|
7
|
+
'accept': '*/*',
|
|
8
|
+
'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
9
|
+
'dnt': '1',
|
|
10
|
+
'origin': 'https://aigrammarchecker.io',
|
|
11
|
+
'priority': 'u=1, i',
|
|
12
|
+
'product-code': '067003',
|
|
13
|
+
'product-serial': '6a4836a29e756bd24a74ebed31e405da',
|
|
14
|
+
'referer': 'https://aigrammarchecker.io/',
|
|
15
|
+
'sec-ch-ua': '"Not(A:Brand";v="8", "Chromium";v="120", "Microsoft Edge";v="120"',
|
|
16
|
+
'sec-ch-ua-mobile': '?0',
|
|
17
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
18
|
+
'sec-fetch-dest': 'empty',
|
|
19
|
+
'sec-fetch-mode': 'cors',
|
|
20
|
+
'sec-fetch-site': 'same-site',
|
|
21
|
+
'sec-gpc': '1',
|
|
22
|
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
files = {
|
|
26
|
+
'features': (None, 'check_grammar'),
|
|
27
|
+
'entertext': (None, text),
|
|
28
|
+
'translate_language': (None, 'English'),
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
session = Session()
|
|
32
|
+
response = session.post(url, headers=headers, files=files)
|
|
33
|
+
return response.json()
|
|
34
|
+
|
|
35
|
+
if __name__ == "__main__":
|
|
36
|
+
from rich import print as cprint
|
|
37
|
+
cprint(create_grammar_check_job("she gg to school"))
|