webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,448 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gradient Network OpenAI-compatible Provider
|
|
3
|
+
Reverse engineered from https://chat.gradient.network/
|
|
4
|
+
|
|
5
|
+
Provides OpenAI-compatible API interface for Gradient Network's distributed GPU clusters.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import time
|
|
10
|
+
import uuid
|
|
11
|
+
from typing import Any, Dict, Generator, List, Optional, Union, cast
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
|
|
15
|
+
from webscout.Provider.OPENAI.base import (
|
|
16
|
+
BaseChat,
|
|
17
|
+
BaseCompletions,
|
|
18
|
+
OpenAICompatibleProvider,
|
|
19
|
+
SimpleModelList,
|
|
20
|
+
)
|
|
21
|
+
from webscout.Provider.OPENAI.utils import (
|
|
22
|
+
ChatCompletion,
|
|
23
|
+
ChatCompletionChunk,
|
|
24
|
+
ChatCompletionMessage,
|
|
25
|
+
Choice,
|
|
26
|
+
ChoiceDelta,
|
|
27
|
+
CompletionUsage,
|
|
28
|
+
count_tokens,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# ANSI escape codes for formatting
|
|
32
|
+
BOLD = "\033[1m"
|
|
33
|
+
RED = "\033[91m"
|
|
34
|
+
RESET = "\033[0m"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class Completions(BaseCompletions):
|
|
38
|
+
"""Handles chat completions for Gradient Network."""
|
|
39
|
+
|
|
40
|
+
def __init__(self, client: "Gradient"):
|
|
41
|
+
self._client = client
|
|
42
|
+
|
|
43
|
+
def create(
|
|
44
|
+
self,
|
|
45
|
+
*,
|
|
46
|
+
model: str,
|
|
47
|
+
messages: List[Dict[str, str]],
|
|
48
|
+
max_tokens: Optional[int] = 2048,
|
|
49
|
+
stream: bool = False,
|
|
50
|
+
temperature: Optional[float] = None,
|
|
51
|
+
top_p: Optional[float] = None,
|
|
52
|
+
timeout: Optional[int] = None,
|
|
53
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
54
|
+
enable_thinking: Optional[bool] = None,
|
|
55
|
+
cluster_mode: Optional[str] = None,
|
|
56
|
+
**kwargs: Any,
|
|
57
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
58
|
+
"""
|
|
59
|
+
Creates a model response for the given chat conversation.
|
|
60
|
+
Mimics openai.chat.completions.create
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
model: The model to use for completion
|
|
64
|
+
messages: List of message dicts with 'role' and 'content'
|
|
65
|
+
max_tokens: Maximum tokens to generate (not used by Gradient API)
|
|
66
|
+
stream: Whether to stream the response
|
|
67
|
+
temperature: Sampling temperature (not used by Gradient API)
|
|
68
|
+
top_p: Top-p sampling (not used by Gradient API)
|
|
69
|
+
timeout: Request timeout in seconds
|
|
70
|
+
proxies: Proxy configuration
|
|
71
|
+
enable_thinking: Enable thinking/reasoning mode (default: client setting)
|
|
72
|
+
cluster_mode: GPU cluster mode (auto-detected based on model if None)
|
|
73
|
+
**kwargs: Additional arguments
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
ChatCompletion or Generator[ChatCompletionChunk] depending on stream
|
|
77
|
+
"""
|
|
78
|
+
# Convert model name and get appropriate cluster mode
|
|
79
|
+
converted_model = self._client.convert_model_name(model)
|
|
80
|
+
actual_cluster_mode = cluster_mode or self._client.MODEL_CLUSTERS.get(
|
|
81
|
+
converted_model, self._client.cluster_mode
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Build the payload - pass messages directly as the API accepts them
|
|
85
|
+
payload = {
|
|
86
|
+
"model": converted_model,
|
|
87
|
+
"clusterMode": actual_cluster_mode,
|
|
88
|
+
"messages": messages,
|
|
89
|
+
"enableThinking": enable_thinking
|
|
90
|
+
if enable_thinking is not None
|
|
91
|
+
else self._client.enable_thinking,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
95
|
+
created_time = int(time.time())
|
|
96
|
+
|
|
97
|
+
if stream:
|
|
98
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
99
|
+
else:
|
|
100
|
+
return self._create_non_stream(
|
|
101
|
+
request_id, created_time, model, payload, timeout, proxies
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
def _create_stream(
|
|
105
|
+
self,
|
|
106
|
+
request_id: str,
|
|
107
|
+
created_time: int,
|
|
108
|
+
model: str,
|
|
109
|
+
payload: Dict[str, Any],
|
|
110
|
+
timeout: Optional[int] = None,
|
|
111
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
112
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
113
|
+
"""Handle streaming response from Gradient API."""
|
|
114
|
+
try:
|
|
115
|
+
response = self._client.session.post(
|
|
116
|
+
self._client.base_url,
|
|
117
|
+
headers=self._client.headers,
|
|
118
|
+
json=payload,
|
|
119
|
+
stream=True,
|
|
120
|
+
timeout=timeout or self._client.timeout,
|
|
121
|
+
proxies=proxies or self._client.proxies or None,
|
|
122
|
+
)
|
|
123
|
+
response.raise_for_status()
|
|
124
|
+
|
|
125
|
+
completion_tokens = 0
|
|
126
|
+
prompt_tokens = count_tokens(str(payload.get("messages", [])))
|
|
127
|
+
first_chunk = True
|
|
128
|
+
|
|
129
|
+
for line in response.iter_lines():
|
|
130
|
+
if not line:
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
# Decode bytes to string
|
|
135
|
+
decoded_line = line.decode("utf-8") if isinstance(line, bytes) else line
|
|
136
|
+
decoded_line = decoded_line.strip()
|
|
137
|
+
if not decoded_line:
|
|
138
|
+
continue
|
|
139
|
+
|
|
140
|
+
# Parse JSON response
|
|
141
|
+
data = json.loads(decoded_line)
|
|
142
|
+
|
|
143
|
+
# Only process "reply" type chunks
|
|
144
|
+
chunk_type = data.get("type")
|
|
145
|
+
if chunk_type != "reply":
|
|
146
|
+
continue
|
|
147
|
+
|
|
148
|
+
# Extract content - prefer "content" over "reasoningContent"
|
|
149
|
+
reply_data = data.get("data", {})
|
|
150
|
+
content = reply_data.get("content") or reply_data.get("reasoningContent")
|
|
151
|
+
|
|
152
|
+
if content:
|
|
153
|
+
completion_tokens += count_tokens(content)
|
|
154
|
+
|
|
155
|
+
delta = ChoiceDelta(
|
|
156
|
+
content=content, role="assistant" if first_chunk else None
|
|
157
|
+
)
|
|
158
|
+
first_chunk = False
|
|
159
|
+
|
|
160
|
+
choice = Choice(index=0, delta=delta, finish_reason=None, logprobs=None)
|
|
161
|
+
|
|
162
|
+
chunk = ChatCompletionChunk(
|
|
163
|
+
id=request_id,
|
|
164
|
+
choices=[choice],
|
|
165
|
+
created=created_time,
|
|
166
|
+
model=model,
|
|
167
|
+
system_fingerprint=None,
|
|
168
|
+
)
|
|
169
|
+
chunk.usage = {
|
|
170
|
+
"prompt_tokens": prompt_tokens,
|
|
171
|
+
"completion_tokens": completion_tokens,
|
|
172
|
+
"total_tokens": prompt_tokens + completion_tokens,
|
|
173
|
+
"estimated_cost": None,
|
|
174
|
+
}
|
|
175
|
+
yield chunk
|
|
176
|
+
|
|
177
|
+
except json.JSONDecodeError:
|
|
178
|
+
continue
|
|
179
|
+
except Exception:
|
|
180
|
+
continue
|
|
181
|
+
|
|
182
|
+
# Final chunk with finish_reason="stop"
|
|
183
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
184
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
185
|
+
chunk = ChatCompletionChunk(
|
|
186
|
+
id=request_id,
|
|
187
|
+
choices=[choice],
|
|
188
|
+
created=created_time,
|
|
189
|
+
model=model,
|
|
190
|
+
system_fingerprint=None,
|
|
191
|
+
)
|
|
192
|
+
chunk.usage = {
|
|
193
|
+
"prompt_tokens": prompt_tokens,
|
|
194
|
+
"completion_tokens": completion_tokens,
|
|
195
|
+
"total_tokens": prompt_tokens + completion_tokens,
|
|
196
|
+
"estimated_cost": None,
|
|
197
|
+
}
|
|
198
|
+
yield chunk
|
|
199
|
+
|
|
200
|
+
except requests.exceptions.RequestException as e:
|
|
201
|
+
print(f"{RED}Error during Gradient stream request: {e}{RESET}")
|
|
202
|
+
raise IOError(f"Gradient request failed: {e}") from e
|
|
203
|
+
except Exception as e:
|
|
204
|
+
print(f"{RED}Error during Gradient stream request: {type(e).__name__}: {e}{RESET}")
|
|
205
|
+
raise IOError(f"Gradient request failed: {e}") from e
|
|
206
|
+
|
|
207
|
+
def _create_non_stream(
|
|
208
|
+
self,
|
|
209
|
+
request_id: str,
|
|
210
|
+
created_time: int,
|
|
211
|
+
model: str,
|
|
212
|
+
payload: Dict[str, Any],
|
|
213
|
+
timeout: Optional[int] = None,
|
|
214
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
215
|
+
) -> ChatCompletion:
|
|
216
|
+
"""Handle non-streaming response from Gradient API."""
|
|
217
|
+
try:
|
|
218
|
+
# Collect all chunks from streaming
|
|
219
|
+
full_content = ""
|
|
220
|
+
prompt_tokens = count_tokens(str(payload.get("messages", [])))
|
|
221
|
+
|
|
222
|
+
response = self._client.session.post(
|
|
223
|
+
self._client.base_url,
|
|
224
|
+
headers=self._client.headers,
|
|
225
|
+
json=payload,
|
|
226
|
+
stream=True,
|
|
227
|
+
timeout=timeout or self._client.timeout,
|
|
228
|
+
proxies=proxies or self._client.proxies or None,
|
|
229
|
+
)
|
|
230
|
+
response.raise_for_status()
|
|
231
|
+
|
|
232
|
+
for line in response.iter_lines():
|
|
233
|
+
if not line:
|
|
234
|
+
continue
|
|
235
|
+
|
|
236
|
+
try:
|
|
237
|
+
# Decode bytes to string
|
|
238
|
+
decoded_line = line.decode("utf-8") if isinstance(line, bytes) else line
|
|
239
|
+
decoded_line = decoded_line.strip()
|
|
240
|
+
if not decoded_line:
|
|
241
|
+
continue
|
|
242
|
+
|
|
243
|
+
data = json.loads(decoded_line)
|
|
244
|
+
|
|
245
|
+
# Only process "reply" type chunks
|
|
246
|
+
chunk_type = data.get("type")
|
|
247
|
+
if chunk_type != "reply":
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
reply_data = data.get("data", {})
|
|
251
|
+
# Prefer "content" over "reasoningContent"
|
|
252
|
+
content = reply_data.get("content") or reply_data.get("reasoningContent")
|
|
253
|
+
if content:
|
|
254
|
+
full_content += content
|
|
255
|
+
|
|
256
|
+
except json.JSONDecodeError:
|
|
257
|
+
continue
|
|
258
|
+
except Exception:
|
|
259
|
+
continue
|
|
260
|
+
|
|
261
|
+
completion_tokens = count_tokens(full_content)
|
|
262
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
263
|
+
|
|
264
|
+
usage = CompletionUsage(
|
|
265
|
+
prompt_tokens=prompt_tokens,
|
|
266
|
+
completion_tokens=completion_tokens,
|
|
267
|
+
total_tokens=total_tokens,
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
message = ChatCompletionMessage(role="assistant", content=full_content)
|
|
271
|
+
|
|
272
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
273
|
+
|
|
274
|
+
completion = ChatCompletion(
|
|
275
|
+
id=request_id,
|
|
276
|
+
choices=[choice],
|
|
277
|
+
created=created_time,
|
|
278
|
+
model=model,
|
|
279
|
+
usage=usage,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
return completion
|
|
283
|
+
|
|
284
|
+
except requests.exceptions.RequestException as e:
|
|
285
|
+
print(f"{RED}Error during Gradient non-stream request: {e}{RESET}")
|
|
286
|
+
raise IOError(f"Gradient request failed: {e}") from e
|
|
287
|
+
except Exception as e:
|
|
288
|
+
print(f"{RED}Error during Gradient non-stream request: {type(e).__name__}: {e}{RESET}")
|
|
289
|
+
raise IOError(f"Gradient request failed: {e}") from e
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class Chat(BaseChat):
|
|
293
|
+
"""Chat interface for Gradient Network."""
|
|
294
|
+
|
|
295
|
+
def __init__(self, client: "Gradient"):
|
|
296
|
+
self.completions = Completions(client)
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
class Gradient(OpenAICompatibleProvider):
|
|
300
|
+
"""
|
|
301
|
+
OpenAI-compatible client for Gradient Network API.
|
|
302
|
+
|
|
303
|
+
Gradient Network provides access to distributed GPU clusters running large language models.
|
|
304
|
+
This provider supports real-time streaming responses.
|
|
305
|
+
|
|
306
|
+
Note: GPT OSS 120B works on "nvidia" cluster, Qwen3 235B works on "hybrid" cluster.
|
|
307
|
+
Cluster mode is auto-detected based on model selection.
|
|
308
|
+
|
|
309
|
+
Usage:
|
|
310
|
+
client = Gradient()
|
|
311
|
+
response = client.chat.completions.create(
|
|
312
|
+
model="GPT OSS 120B",
|
|
313
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
314
|
+
)
|
|
315
|
+
print(response.choices[0].message.content)
|
|
316
|
+
|
|
317
|
+
# Streaming
|
|
318
|
+
for chunk in client.chat.completions.create(
|
|
319
|
+
model="Qwen3 235B",
|
|
320
|
+
messages=[{"role": "user", "content": "Tell me a story"}],
|
|
321
|
+
stream=True
|
|
322
|
+
):
|
|
323
|
+
if chunk.choices[0].delta.content:
|
|
324
|
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
|
325
|
+
"""
|
|
326
|
+
|
|
327
|
+
required_auth = False
|
|
328
|
+
|
|
329
|
+
AVAILABLE_MODELS = [
|
|
330
|
+
"GPT OSS 120B",
|
|
331
|
+
"Qwen3 235B",
|
|
332
|
+
]
|
|
333
|
+
|
|
334
|
+
# Model to cluster mapping
|
|
335
|
+
MODEL_CLUSTERS = {
|
|
336
|
+
"GPT OSS 120B": "nvidia",
|
|
337
|
+
"Qwen3 235B": "hybrid",
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
def __init__(
|
|
341
|
+
self,
|
|
342
|
+
timeout: int = 60,
|
|
343
|
+
cluster_mode: str = "nvidia",
|
|
344
|
+
enable_thinking: bool = True,
|
|
345
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
346
|
+
):
|
|
347
|
+
"""
|
|
348
|
+
Initialize the Gradient client.
|
|
349
|
+
|
|
350
|
+
Args:
|
|
351
|
+
timeout: Request timeout in seconds (default: 60)
|
|
352
|
+
cluster_mode: Default GPU cluster mode (default: "nvidia", auto-detected per model)
|
|
353
|
+
enable_thinking: Enable thinking/reasoning mode (default: True)
|
|
354
|
+
proxies: Optional proxy configuration
|
|
355
|
+
"""
|
|
356
|
+
self.timeout = timeout
|
|
357
|
+
self.cluster_mode = cluster_mode
|
|
358
|
+
self.enable_thinking = enable_thinking
|
|
359
|
+
self.proxies = proxies or {}
|
|
360
|
+
|
|
361
|
+
self.base_url = "https://chat.gradient.network/api/generate"
|
|
362
|
+
self.session = requests.Session()
|
|
363
|
+
|
|
364
|
+
# Set up headers matching the curl request
|
|
365
|
+
self.headers = {
|
|
366
|
+
"accept": "*/*",
|
|
367
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
368
|
+
"content-type": "application/json",
|
|
369
|
+
"dnt": "1",
|
|
370
|
+
"origin": "https://chat.gradient.network",
|
|
371
|
+
"priority": "u=1, i",
|
|
372
|
+
"referer": "https://chat.gradient.network/",
|
|
373
|
+
"sec-ch-ua": '"Microsoft Edge";v="143", "Chromium";v="143", "Not A(Brand";v="24"',
|
|
374
|
+
"sec-ch-ua-mobile": "?0",
|
|
375
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
376
|
+
"sec-fetch-dest": "empty",
|
|
377
|
+
"sec-fetch-mode": "cors",
|
|
378
|
+
"sec-fetch-site": "same-origin",
|
|
379
|
+
"sec-gpc": "1",
|
|
380
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36 Edg/143.0.0.0",
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
self.session.headers.update(self.headers)
|
|
384
|
+
|
|
385
|
+
# Initialize the chat interface
|
|
386
|
+
self.chat = Chat(self)
|
|
387
|
+
|
|
388
|
+
def convert_model_name(self, model: str) -> str:
|
|
389
|
+
"""
|
|
390
|
+
Ensure the model name is in the correct format.
|
|
391
|
+
|
|
392
|
+
Args:
|
|
393
|
+
model: Model name to convert
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
Valid model name
|
|
397
|
+
"""
|
|
398
|
+
if model in self.AVAILABLE_MODELS:
|
|
399
|
+
return model
|
|
400
|
+
|
|
401
|
+
# Try case-insensitive matching with dash to space conversion
|
|
402
|
+
model_lower = model.lower().replace("-", " ")
|
|
403
|
+
for available_model in self.AVAILABLE_MODELS:
|
|
404
|
+
if model_lower == available_model.lower():
|
|
405
|
+
return available_model
|
|
406
|
+
|
|
407
|
+
# Default to first available model
|
|
408
|
+
print(
|
|
409
|
+
f"{BOLD}Warning: Model '{model}' not found, using default model '{self.AVAILABLE_MODELS[0]}'{RESET}"
|
|
410
|
+
)
|
|
411
|
+
return self.AVAILABLE_MODELS[0]
|
|
412
|
+
|
|
413
|
+
@property
|
|
414
|
+
def models(self) -> SimpleModelList:
|
|
415
|
+
return SimpleModelList(type(self).AVAILABLE_MODELS)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
if __name__ == "__main__":
|
|
419
|
+
print("-" * 80)
|
|
420
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
421
|
+
print("-" * 80)
|
|
422
|
+
|
|
423
|
+
for model in Gradient.AVAILABLE_MODELS:
|
|
424
|
+
try:
|
|
425
|
+
client = Gradient(timeout=120)
|
|
426
|
+
response = client.chat.completions.create(
|
|
427
|
+
model=model,
|
|
428
|
+
messages=[
|
|
429
|
+
{"role": "user", "content": "Say 'Hello' in one word"},
|
|
430
|
+
],
|
|
431
|
+
stream=False,
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
if (
|
|
435
|
+
isinstance(response, ChatCompletion)
|
|
436
|
+
and response.choices
|
|
437
|
+
and response.choices[0].message
|
|
438
|
+
and response.choices[0].message.content
|
|
439
|
+
):
|
|
440
|
+
status = "✓"
|
|
441
|
+
display_text = response.choices[0].message.content.strip()
|
|
442
|
+
display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
|
|
443
|
+
else:
|
|
444
|
+
status = "✗"
|
|
445
|
+
display_text = "Empty or invalid response"
|
|
446
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
447
|
+
except Exception as e:
|
|
448
|
+
print(f"{model:<50} {'✗':<10} {str(e)[:50]}")
|