webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
|
@@ -1,206 +0,0 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
2
|
-
from enum import Enum
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
import re
|
|
6
|
-
import uuid
|
|
7
|
-
from typing import Union, List, Dict, Generator, Optional, Any, TypedDict, Final
|
|
8
|
-
|
|
9
|
-
# Type definitions
|
|
10
|
-
class Role(Enum):
|
|
11
|
-
SYSTEM = "system"
|
|
12
|
-
USER = "user"
|
|
13
|
-
ASSISTANT = "assistant"
|
|
14
|
-
|
|
15
|
-
class Message(TypedDict):
|
|
16
|
-
role: str
|
|
17
|
-
content: str
|
|
18
|
-
|
|
19
|
-
class APIResponse(TypedDict):
|
|
20
|
-
event_id: str
|
|
21
|
-
fn_index: int
|
|
22
|
-
data: List[Any]
|
|
23
|
-
|
|
24
|
-
class StreamData(TypedDict):
|
|
25
|
-
msg: str
|
|
26
|
-
output: Dict[str, Any]
|
|
27
|
-
|
|
28
|
-
@dataclass
|
|
29
|
-
class APIConfig:
|
|
30
|
-
url: Final[str] = "https://qwen-qwen2-72b-instruct.hf.space"
|
|
31
|
-
api_endpoint: Final[str] = "https://qwen-qwen2-72b-instruct.hf.space/queue/join?"
|
|
32
|
-
|
|
33
|
-
@dataclass
|
|
34
|
-
class RequestHeaders:
|
|
35
|
-
join: Dict[str, str]
|
|
36
|
-
data: Dict[str, str]
|
|
37
|
-
|
|
38
|
-
@classmethod
|
|
39
|
-
def create_default(cls, base_url: str) -> 'RequestHeaders':
|
|
40
|
-
common_headers = {
|
|
41
|
-
'accept-language': 'en-US,en;q=0.9',
|
|
42
|
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
return cls(
|
|
46
|
-
join={
|
|
47
|
-
**common_headers,
|
|
48
|
-
'accept': '*/*',
|
|
49
|
-
'content-type': 'application/json',
|
|
50
|
-
'origin': base_url,
|
|
51
|
-
'referer': f'{base_url}/',
|
|
52
|
-
},
|
|
53
|
-
data={
|
|
54
|
-
**common_headers,
|
|
55
|
-
'accept': 'text/event-stream',
|
|
56
|
-
'referer': f'{base_url}/',
|
|
57
|
-
}
|
|
58
|
-
)
|
|
59
|
-
|
|
60
|
-
class QwenAPI:
|
|
61
|
-
def __init__(self, config: APIConfig = APIConfig()):
|
|
62
|
-
self.config = config
|
|
63
|
-
self.headers = RequestHeaders.create_default(config.url)
|
|
64
|
-
|
|
65
|
-
@staticmethod
|
|
66
|
-
def generate_session_hash() -> str:
|
|
67
|
-
"""Generate a unique session hash."""
|
|
68
|
-
return str(uuid.uuid4()).replace('-', '')[:12]
|
|
69
|
-
|
|
70
|
-
@staticmethod
|
|
71
|
-
def format_prompt(messages: List[Message]) -> str:
|
|
72
|
-
"""
|
|
73
|
-
Formats a list of messages into a single prompt string.
|
|
74
|
-
|
|
75
|
-
Args:
|
|
76
|
-
messages: A list of message dictionaries with "role" and "content" keys.
|
|
77
|
-
|
|
78
|
-
Returns:
|
|
79
|
-
str: The formatted prompt.
|
|
80
|
-
"""
|
|
81
|
-
return "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
|
82
|
-
|
|
83
|
-
def create_sync_generator(
|
|
84
|
-
self,
|
|
85
|
-
model: str,
|
|
86
|
-
messages: List[Message],
|
|
87
|
-
proxy: Optional[str] = None,
|
|
88
|
-
**kwargs: Any
|
|
89
|
-
) -> Generator[str, None, None]:
|
|
90
|
-
"""
|
|
91
|
-
Synchronously streams responses from the Qwen_Qwen2_72B_Instruct API.
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
model: The model to use for the request.
|
|
95
|
-
messages: A list of message dictionaries with "role" and "content" keys.
|
|
96
|
-
proxy: Optional proxy URL for the request.
|
|
97
|
-
**kwargs: Additional keyword arguments.
|
|
98
|
-
|
|
99
|
-
Yields:
|
|
100
|
-
str: Text chunks from the API response.
|
|
101
|
-
|
|
102
|
-
Raises:
|
|
103
|
-
requests.exceptions.RequestException: If the API request fails.
|
|
104
|
-
json.JSONDecodeError: If the response cannot be parsed as JSON.
|
|
105
|
-
"""
|
|
106
|
-
session_hash: str = self.generate_session_hash()
|
|
107
|
-
|
|
108
|
-
# Prepare the prompt
|
|
109
|
-
system_messages: List[str] = [
|
|
110
|
-
message["content"]
|
|
111
|
-
for message in messages
|
|
112
|
-
if message["role"] == Role.SYSTEM.value
|
|
113
|
-
]
|
|
114
|
-
system_prompt: str = "\n".join(system_messages)
|
|
115
|
-
|
|
116
|
-
user_messages: List[Message] = [
|
|
117
|
-
message
|
|
118
|
-
for message in messages
|
|
119
|
-
if message["role"] != Role.SYSTEM.value
|
|
120
|
-
]
|
|
121
|
-
prompt: str = self.format_prompt(user_messages)
|
|
122
|
-
|
|
123
|
-
payload_join: Dict[str, Any] = {
|
|
124
|
-
"data": [prompt, [], system_prompt],
|
|
125
|
-
"event_data": None,
|
|
126
|
-
"fn_index": 0,
|
|
127
|
-
"trigger_id": 11,
|
|
128
|
-
"session_hash": session_hash
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
with requests.Session() as session:
|
|
132
|
-
# Send join request
|
|
133
|
-
response = session.post(
|
|
134
|
-
self.config.api_endpoint,
|
|
135
|
-
headers=self.headers.join,
|
|
136
|
-
json=payload_join
|
|
137
|
-
)
|
|
138
|
-
response.raise_for_status()
|
|
139
|
-
event_data: APIResponse = response.json()
|
|
140
|
-
|
|
141
|
-
# Prepare data stream request
|
|
142
|
-
url_data: str = f'{self.config.url}/queue/data'
|
|
143
|
-
params_data: Dict[str, str] = {'session_hash': session_hash}
|
|
144
|
-
|
|
145
|
-
# Send data stream request
|
|
146
|
-
full_response: str = ""
|
|
147
|
-
final_full_response: str = ""
|
|
148
|
-
|
|
149
|
-
with session.get(
|
|
150
|
-
url_data,
|
|
151
|
-
headers=self.headers.data,
|
|
152
|
-
params=params_data,
|
|
153
|
-
stream=True
|
|
154
|
-
) as response:
|
|
155
|
-
response.raise_for_status()
|
|
156
|
-
|
|
157
|
-
for line in response.iter_lines():
|
|
158
|
-
if line:
|
|
159
|
-
decoded_line: str = line.decode('utf-8')
|
|
160
|
-
if decoded_line.startswith('data: '):
|
|
161
|
-
try:
|
|
162
|
-
json_data: StreamData = json.loads(decoded_line[6:])
|
|
163
|
-
|
|
164
|
-
if json_data.get('msg') == 'process_generating':
|
|
165
|
-
if 'output' in json_data and 'data' in json_data['output']:
|
|
166
|
-
output_data: List[Any] = json_data['output']['data']
|
|
167
|
-
if len(output_data) > 1 and len(output_data[1]) > 0:
|
|
168
|
-
for item in output_data[1]:
|
|
169
|
-
if isinstance(item, list) and len(item) > 1:
|
|
170
|
-
fragment: str = str(item[1])
|
|
171
|
-
if not re.match(r'^\[.*\]$', fragment) and not full_response.endswith(fragment):
|
|
172
|
-
full_response += fragment
|
|
173
|
-
yield fragment
|
|
174
|
-
|
|
175
|
-
if json_data.get('msg') == 'process_completed':
|
|
176
|
-
if 'output' in json_data and 'data' in json_data['output']:
|
|
177
|
-
output_data = json_data['output']['data']
|
|
178
|
-
if len(output_data) > 1 and len(output_data[1]) > 0:
|
|
179
|
-
final_full_response = output_data[1][0][1]
|
|
180
|
-
|
|
181
|
-
if final_full_response.startswith(full_response):
|
|
182
|
-
final_full_response = final_full_response[len(full_response):]
|
|
183
|
-
|
|
184
|
-
if final_full_response:
|
|
185
|
-
yield final_full_response
|
|
186
|
-
break
|
|
187
|
-
|
|
188
|
-
except json.JSONDecodeError as e:
|
|
189
|
-
print(f"Could not parse JSON: {decoded_line}")
|
|
190
|
-
raise e
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
def main() -> None:
|
|
194
|
-
messages: List[Message] = [
|
|
195
|
-
{"role": Role.SYSTEM.value, "content": "You are a helpful assistant."},
|
|
196
|
-
{"role": Role.USER.value, "content": "LOL"}
|
|
197
|
-
]
|
|
198
|
-
|
|
199
|
-
api = QwenAPI()
|
|
200
|
-
for text in api.create_sync_generator("qwen-qwen2-72b-instruct", messages):
|
|
201
|
-
print(text, end="", flush=True)
|
|
202
|
-
print("\n---\n")
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
if __name__ == "__main__":
|
|
206
|
-
main()
|
|
@@ -1,469 +0,0 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import uuid
|
|
4
|
-
import json
|
|
5
|
-
import time
|
|
6
|
-
import random
|
|
7
|
-
import re
|
|
8
|
-
from typing import Any, Dict, List, Optional, Union, Generator
|
|
9
|
-
|
|
10
|
-
from webscout.AIutel import Conversation
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
class HuggingFaceChat(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the Hugging Face Chat API.
|
|
18
|
-
Uses cookies for authentication and supports streaming responses.
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
# Available models (default models - will be updated dynamically)
|
|
22
|
-
AVAILABLE_MODELS = ['meta-llama/Llama-3.3-70B-Instruct', 'Qwen/Qwen3-235B-A22B', 'Qwen/Qwen2.5-72B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'Qwen/QwQ-32B', 'google/gemma-3-27b-it', 'mistralai/Mistral-Small-3.1-24B-Instruct-2503', 'Qwen/Qwen2.5-VL-32B-Instruct', 'microsoft/Phi-4', 'NousResearch/Hermes-3-Llama-3.1-8B', 'internal/task']
|
|
23
|
-
|
|
24
|
-
def __init__(
|
|
25
|
-
self,
|
|
26
|
-
is_conversation: bool = True,
|
|
27
|
-
max_tokens: int = 2000, # Note: max_tokens is not used by this API
|
|
28
|
-
timeout: int = 60,
|
|
29
|
-
filepath: str = None,
|
|
30
|
-
update_file: bool = True,
|
|
31
|
-
proxies: dict = {},
|
|
32
|
-
model: str = "Qwen/QwQ-32B",
|
|
33
|
-
cookie_path: str = "cookies.json",
|
|
34
|
-
assistantId: str = None, # Note: assistantId is not used by this API
|
|
35
|
-
system_prompt: str = "You are a helpful assistant. Please answer the following question.",
|
|
36
|
-
):
|
|
37
|
-
"""Initialize the HuggingFaceChat client."""
|
|
38
|
-
self.url = "https://huggingface.co/chat"
|
|
39
|
-
self.cookie_path = cookie_path
|
|
40
|
-
# Initialize curl_cffi Session
|
|
41
|
-
self.session = Session()
|
|
42
|
-
# Set up headers for all requests (remove those handled by impersonate)
|
|
43
|
-
self.headers = {
|
|
44
|
-
"Content-Type": "application/json", # Keep Content-Type for JSON posts
|
|
45
|
-
"Accept": "*/*", # Keep Accept
|
|
46
|
-
"Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
|
|
47
|
-
"Origin": "https://huggingface.co", # Keep Origin
|
|
48
|
-
"Referer": "https://huggingface.co/chat", # Keep Referer (will be updated)
|
|
49
|
-
"Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
|
|
50
|
-
"Sec-Fetch-Mode": "cors",
|
|
51
|
-
"Sec-Fetch-Site": "same-origin",
|
|
52
|
-
"DNT": "1", # Keep DNT
|
|
53
|
-
"Priority": "u=1, i" # Keep Priority
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
# Update curl_cffi session headers and proxies
|
|
57
|
-
self.session.headers.update(self.headers)
|
|
58
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
59
|
-
self.system_prompt = system_prompt
|
|
60
|
-
self.assistantId = assistantId or None # Generate a new UUID if not provided
|
|
61
|
-
# Load cookies for authentication
|
|
62
|
-
self.cookies = self.load_cookies()
|
|
63
|
-
# Apply cookies to curl_cffi session
|
|
64
|
-
if self.cookies:
|
|
65
|
-
for name, value in self.cookies.items():
|
|
66
|
-
# Set cookies on the session object
|
|
67
|
-
self.session.cookies.set(name, value, domain="huggingface.co") # Specify domain if needed
|
|
68
|
-
|
|
69
|
-
# Update available models
|
|
70
|
-
self.update_available_models()
|
|
71
|
-
|
|
72
|
-
# Set default model if none provided
|
|
73
|
-
self.model = model
|
|
74
|
-
|
|
75
|
-
# Provider settings
|
|
76
|
-
self.is_conversation = is_conversation
|
|
77
|
-
self.max_tokens_to_sample = max_tokens
|
|
78
|
-
self.timeout = timeout
|
|
79
|
-
self.last_response = {}
|
|
80
|
-
|
|
81
|
-
# Initialize a simplified conversation history for file saving only
|
|
82
|
-
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
83
|
-
|
|
84
|
-
# Store conversation data for different models
|
|
85
|
-
self._conversation_data = {}
|
|
86
|
-
|
|
87
|
-
def update_available_models(self):
|
|
88
|
-
"""Update the available models list from HuggingFace"""
|
|
89
|
-
try:
|
|
90
|
-
models = self.get_models()
|
|
91
|
-
if models and len(models) > 0:
|
|
92
|
-
self.AVAILABLE_MODELS = models
|
|
93
|
-
except Exception:
|
|
94
|
-
# Fallback to default models list if fetching fails
|
|
95
|
-
pass
|
|
96
|
-
|
|
97
|
-
@classmethod
|
|
98
|
-
def get_models(cls):
|
|
99
|
-
"""Fetch available models from HuggingFace."""
|
|
100
|
-
try:
|
|
101
|
-
# Use a temporary curl_cffi session for this class method
|
|
102
|
-
temp_session = Session()
|
|
103
|
-
response = temp_session.get(
|
|
104
|
-
"https://huggingface.co/chat",
|
|
105
|
-
impersonate="chrome110" # Use impersonate for fetching
|
|
106
|
-
)
|
|
107
|
-
response.raise_for_status()
|
|
108
|
-
text = response.text
|
|
109
|
-
models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
|
|
110
|
-
|
|
111
|
-
if not models_match:
|
|
112
|
-
return cls.AVAILABLE_MODELS
|
|
113
|
-
|
|
114
|
-
models_text = models_match.group(1)
|
|
115
|
-
models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
|
|
116
|
-
models_text = models_text.replace('void 0', 'null')
|
|
117
|
-
|
|
118
|
-
def add_quotation_mark(match):
|
|
119
|
-
return f'{match.group(1)}"{match.group(2)}":'
|
|
120
|
-
|
|
121
|
-
models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
|
|
122
|
-
|
|
123
|
-
models_data = json.loads(models_text)
|
|
124
|
-
# print([model["id"] for model in models_data])
|
|
125
|
-
return [model["id"] for model in models_data]
|
|
126
|
-
except (CurlError, Exception): # Catch CurlError and other exceptions
|
|
127
|
-
return cls.AVAILABLE_MODELS
|
|
128
|
-
|
|
129
|
-
def load_cookies(self):
|
|
130
|
-
"""Load cookies from a JSON file"""
|
|
131
|
-
try:
|
|
132
|
-
with open(self.cookie_path, 'r') as f:
|
|
133
|
-
cookies_data = json.load(f)
|
|
134
|
-
|
|
135
|
-
# Convert the cookie list to a dictionary format for requests
|
|
136
|
-
cookies = {}
|
|
137
|
-
for cookie in cookies_data:
|
|
138
|
-
# Only include cookies that are not expired and have a name and value
|
|
139
|
-
if 'name' in cookie and 'value' in cookie:
|
|
140
|
-
# Check if the cookie hasn't expired
|
|
141
|
-
if 'expirationDate' not in cookie or cookie['expirationDate'] > time.time():
|
|
142
|
-
cookies[cookie['name']] = cookie['value']
|
|
143
|
-
|
|
144
|
-
return cookies
|
|
145
|
-
except Exception:
|
|
146
|
-
return {}
|
|
147
|
-
|
|
148
|
-
def create_conversation(self, model: str):
|
|
149
|
-
"""Create a new conversation with the specified model."""
|
|
150
|
-
url = "https://huggingface.co/chat/conversation"
|
|
151
|
-
payload = {"model": model, "assistantId": self.assistantId, "preprompt": self.system_prompt}
|
|
152
|
-
|
|
153
|
-
# Update referer for this specific request
|
|
154
|
-
headers = self.headers.copy()
|
|
155
|
-
headers["Referer"] = f"https://huggingface.co/chat/models/{model}"
|
|
156
|
-
|
|
157
|
-
try:
|
|
158
|
-
# Use curl_cffi session post with impersonate
|
|
159
|
-
response = self.session.post(
|
|
160
|
-
url,
|
|
161
|
-
json=payload,
|
|
162
|
-
headers=headers, # Use updated headers with specific Referer
|
|
163
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
if response.status_code == 401:
|
|
167
|
-
raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
|
|
168
|
-
|
|
169
|
-
# Handle other error codes
|
|
170
|
-
if response.status_code != 200:
|
|
171
|
-
return None
|
|
172
|
-
|
|
173
|
-
data = response.json()
|
|
174
|
-
conversation_id = data.get("conversationId")
|
|
175
|
-
|
|
176
|
-
# Store conversation data
|
|
177
|
-
if model not in self._conversation_data:
|
|
178
|
-
self._conversation_data[model] = {
|
|
179
|
-
"conversationId": conversation_id,
|
|
180
|
-
"messageId": str(uuid.uuid4()) # Initial message ID
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
return conversation_id
|
|
184
|
-
except CurlError as e: # Catch CurlError
|
|
185
|
-
# Log or handle CurlError specifically if needed
|
|
186
|
-
return None
|
|
187
|
-
except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
|
|
188
|
-
return None
|
|
189
|
-
|
|
190
|
-
def fetch_message_id(self, conversation_id: str) -> str:
|
|
191
|
-
"""Fetch the latest message ID for a conversation."""
|
|
192
|
-
try:
|
|
193
|
-
url = f"https://huggingface.co/chat/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
|
|
194
|
-
response = self.session.get(
|
|
195
|
-
url,
|
|
196
|
-
headers=self.headers, # Use base headers
|
|
197
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
198
|
-
)
|
|
199
|
-
response.raise_for_status()
|
|
200
|
-
|
|
201
|
-
# Parse the JSON data from the response
|
|
202
|
-
json_data = None
|
|
203
|
-
for line in response.text.split('\n'):
|
|
204
|
-
if line.strip():
|
|
205
|
-
try:
|
|
206
|
-
parsed = json.loads(line)
|
|
207
|
-
if isinstance(parsed, dict) and "nodes" in parsed:
|
|
208
|
-
json_data = parsed
|
|
209
|
-
break
|
|
210
|
-
except json.JSONDecodeError:
|
|
211
|
-
continue
|
|
212
|
-
|
|
213
|
-
if not json_data:
|
|
214
|
-
# Fall back to a UUID if we can't parse the response
|
|
215
|
-
return str(uuid.uuid4())
|
|
216
|
-
|
|
217
|
-
# Extract message ID using the same pattern as in the example
|
|
218
|
-
if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
|
|
219
|
-
return str(uuid.uuid4())
|
|
220
|
-
|
|
221
|
-
data = json_data["nodes"][1]["data"]
|
|
222
|
-
keys = data[data[0]["messages"]]
|
|
223
|
-
message_keys = data[keys[-1]]
|
|
224
|
-
message_id = data[message_keys["id"]]
|
|
225
|
-
|
|
226
|
-
return message_id
|
|
227
|
-
|
|
228
|
-
except CurlError: # Catch CurlError
|
|
229
|
-
return str(uuid.uuid4()) # Fallback on CurlError
|
|
230
|
-
except Exception: # Catch other potential exceptions
|
|
231
|
-
# Fall back to a UUID if there's an error
|
|
232
|
-
return str(uuid.uuid4())
|
|
233
|
-
|
|
234
|
-
def generate_boundary(self):
|
|
235
|
-
"""Generate a random boundary for multipart/form-data requests"""
|
|
236
|
-
boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
237
|
-
boundary = "----WebKitFormBoundary"
|
|
238
|
-
boundary += "".join(random.choice(boundary_chars) for _ in range(16))
|
|
239
|
-
return boundary
|
|
240
|
-
|
|
241
|
-
def process_response(self, response, prompt: str):
|
|
242
|
-
"""Process streaming response and extract content."""
|
|
243
|
-
full_text = ""
|
|
244
|
-
sources = None
|
|
245
|
-
reasoning_text = ""
|
|
246
|
-
has_reasoning = False
|
|
247
|
-
|
|
248
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
249
|
-
if not line:
|
|
250
|
-
continue
|
|
251
|
-
|
|
252
|
-
try:
|
|
253
|
-
# Parse each line as JSON
|
|
254
|
-
data = json.loads(line)
|
|
255
|
-
|
|
256
|
-
# Handle different response types
|
|
257
|
-
if "type" not in data:
|
|
258
|
-
continue
|
|
259
|
-
|
|
260
|
-
if data["type"] == "stream" and "token" in data:
|
|
261
|
-
token = data["token"].replace("\u0000", "")
|
|
262
|
-
full_text += token
|
|
263
|
-
resp = {"text": token}
|
|
264
|
-
yield resp
|
|
265
|
-
elif data["type"] == "finalAnswer":
|
|
266
|
-
final_text = data.get("text", "")
|
|
267
|
-
if final_text and not full_text:
|
|
268
|
-
full_text = final_text
|
|
269
|
-
resp = {"text": final_text}
|
|
270
|
-
yield resp
|
|
271
|
-
elif data["type"] == "webSearch" and "sources" in data:
|
|
272
|
-
sources = data["sources"]
|
|
273
|
-
elif data["type"] == "reasoning":
|
|
274
|
-
has_reasoning = True
|
|
275
|
-
if data.get("subtype") == "stream" and "token" in data:
|
|
276
|
-
reasoning_text += data["token"]
|
|
277
|
-
# elif data.get("subtype") == "status":
|
|
278
|
-
# # For status updates in reasoning, we can just append them as a comment
|
|
279
|
-
# if data.get("status"):
|
|
280
|
-
# reasoning_text += f"\n# {data['status']}"
|
|
281
|
-
|
|
282
|
-
# If we have reasoning, prepend it to the next text output
|
|
283
|
-
if reasoning_text and not full_text:
|
|
284
|
-
resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
|
|
285
|
-
yield resp
|
|
286
|
-
|
|
287
|
-
except json.JSONDecodeError:
|
|
288
|
-
continue
|
|
289
|
-
|
|
290
|
-
# Update conversation history only for saving to file if needed
|
|
291
|
-
if full_text and self.conversation.file:
|
|
292
|
-
if has_reasoning:
|
|
293
|
-
full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
|
|
294
|
-
self.last_response = {"text": full_text_with_reasoning}
|
|
295
|
-
self.conversation.update_chat_history(prompt, full_text_with_reasoning)
|
|
296
|
-
else:
|
|
297
|
-
self.last_response = {"text": full_text}
|
|
298
|
-
self.conversation.update_chat_history(prompt, full_text)
|
|
299
|
-
|
|
300
|
-
return full_text
|
|
301
|
-
|
|
302
|
-
def ask(
|
|
303
|
-
self,
|
|
304
|
-
prompt: str,
|
|
305
|
-
stream: bool = False, # API supports streaming
|
|
306
|
-
raw: bool = False,
|
|
307
|
-
optimizer: str = None, # Note: optimizer is not used by this API
|
|
308
|
-
conversationally: bool = False, # Note: conversationally is not used by this API
|
|
309
|
-
web_search: bool = False,
|
|
310
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
311
|
-
"""Send a message to the HuggingFace Chat API"""
|
|
312
|
-
model = self.model
|
|
313
|
-
|
|
314
|
-
# Check if we have a conversation for this model
|
|
315
|
-
if model not in self._conversation_data:
|
|
316
|
-
conversation_id = self.create_conversation(model)
|
|
317
|
-
if not conversation_id:
|
|
318
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
|
|
319
|
-
else:
|
|
320
|
-
conversation_id = self._conversation_data[model]["conversationId"]
|
|
321
|
-
# Refresh message ID
|
|
322
|
-
self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
|
|
323
|
-
|
|
324
|
-
url = f"https://huggingface.co/chat/conversation/{conversation_id}"
|
|
325
|
-
message_id = self._conversation_data[model]["messageId"]
|
|
326
|
-
|
|
327
|
-
# Data to send - use the prompt directly without generating a complete prompt
|
|
328
|
-
# since HuggingFace maintains conversation state internally
|
|
329
|
-
request_data = {
|
|
330
|
-
"inputs": prompt,
|
|
331
|
-
"id": message_id,
|
|
332
|
-
"is_retry": False,
|
|
333
|
-
"is_continue": False,
|
|
334
|
-
"web_search": web_search,
|
|
335
|
-
"tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
|
|
336
|
-
}
|
|
337
|
-
|
|
338
|
-
# Create multipart form data
|
|
339
|
-
boundary = self.generate_boundary()
|
|
340
|
-
multipart_headers = self.headers.copy()
|
|
341
|
-
multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
|
|
342
|
-
|
|
343
|
-
# Serialize the data to JSON
|
|
344
|
-
data_json = json.dumps(request_data, separators=(',', ':'))
|
|
345
|
-
|
|
346
|
-
# Create the multipart form data body
|
|
347
|
-
body = f"--{boundary}\r\n"
|
|
348
|
-
body += f'Content-Disposition: form-data; name="data"\r\n'
|
|
349
|
-
body += f"Content-Type: application/json\r\n\r\n"
|
|
350
|
-
body += f"{data_json}\r\n"
|
|
351
|
-
body += f"--{boundary}--\r\n"
|
|
352
|
-
|
|
353
|
-
multipart_headers["Content-Length"] = str(len(body))
|
|
354
|
-
|
|
355
|
-
def for_stream():
|
|
356
|
-
try:
|
|
357
|
-
# Try with multipart/form-data first
|
|
358
|
-
response = None
|
|
359
|
-
try:
|
|
360
|
-
# Use curl_cffi session post with impersonate
|
|
361
|
-
response = self.session.post(
|
|
362
|
-
url,
|
|
363
|
-
data=body,
|
|
364
|
-
headers=multipart_headers, # Use multipart headers
|
|
365
|
-
stream=True,
|
|
366
|
-
timeout=self.timeout,
|
|
367
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
368
|
-
)
|
|
369
|
-
response.raise_for_status() # Check status after potential error
|
|
370
|
-
except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
|
|
371
|
-
response = None # Ensure response is None if multipart fails
|
|
372
|
-
|
|
373
|
-
# If multipart fails or returns error, try with regular JSON
|
|
374
|
-
if not response or response.status_code != 200:
|
|
375
|
-
# Use curl_cffi session post with impersonate
|
|
376
|
-
response = self.session.post(
|
|
377
|
-
url,
|
|
378
|
-
json=request_data, # Use JSON payload
|
|
379
|
-
headers=self.headers, # Use class-defined headers
|
|
380
|
-
stream=True,
|
|
381
|
-
timeout=self.timeout,
|
|
382
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
383
|
-
)
|
|
384
|
-
|
|
385
|
-
response.raise_for_status() # Check status after potential fallback
|
|
386
|
-
|
|
387
|
-
# Process the streaming response (iter_lines works with curl_cffi)
|
|
388
|
-
yield from self.process_response(response, prompt)
|
|
389
|
-
|
|
390
|
-
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
|
|
391
|
-
if isinstance(e):
|
|
392
|
-
if hasattr(e, 'response') and e.response is not None:
|
|
393
|
-
status_code = e.response.status_code
|
|
394
|
-
if status_code == 401:
|
|
395
|
-
raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
|
|
396
|
-
|
|
397
|
-
# Try another model if current one fails
|
|
398
|
-
if len(self.AVAILABLE_MODELS) > 1:
|
|
399
|
-
current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
|
|
400
|
-
next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
|
|
401
|
-
self.model = self.AVAILABLE_MODELS[next_model_index]
|
|
402
|
-
|
|
403
|
-
# Create new conversation with the alternate model
|
|
404
|
-
conversation_id = self.create_conversation(self.model)
|
|
405
|
-
if conversation_id:
|
|
406
|
-
# Try again with the new model
|
|
407
|
-
yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
|
|
408
|
-
conversationally=conversationally, web_search=web_search)
|
|
409
|
-
return
|
|
410
|
-
|
|
411
|
-
# If we get here, all models failed
|
|
412
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
def for_non_stream():
|
|
416
|
-
# Aggregate the stream using the updated for_stream logic
|
|
417
|
-
response_text = ""
|
|
418
|
-
try:
|
|
419
|
-
# Ensure raw=False so for_stream yields dicts
|
|
420
|
-
for chunk_data in for_stream():
|
|
421
|
-
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
422
|
-
response_text += chunk_data["text"]
|
|
423
|
-
# Handle raw string case if raw=True was passed
|
|
424
|
-
elif raw and isinstance(chunk_data, str):
|
|
425
|
-
response_text += chunk_data
|
|
426
|
-
except Exception as e:
|
|
427
|
-
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
428
|
-
if not response_text:
|
|
429
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
430
|
-
|
|
431
|
-
# last_response and history are updated within process_response called by for_stream
|
|
432
|
-
# Return the final aggregated response dict or raw string
|
|
433
|
-
return response_text if raw else {"text": response_text} # Return dict for consistency
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
return for_stream() if stream else for_non_stream()
|
|
437
|
-
|
|
438
|
-
def chat(
|
|
439
|
-
self,
|
|
440
|
-
prompt: str,
|
|
441
|
-
stream: bool = False,
|
|
442
|
-
optimizer: str = None, # Note: optimizer is not used by this API
|
|
443
|
-
conversationally: bool = False, # Note: conversationally is not used by this API
|
|
444
|
-
web_search: bool = False
|
|
445
|
-
) -> Union[str, Generator]:
|
|
446
|
-
"""Generate a response to a prompt"""
|
|
447
|
-
def for_stream_chat():
|
|
448
|
-
# ask() yields dicts or strings when streaming
|
|
449
|
-
gen = self.ask(
|
|
450
|
-
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
451
|
-
optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
452
|
-
)
|
|
453
|
-
for response_dict in gen:
|
|
454
|
-
yield self.get_message(response_dict) # get_message expects dict
|
|
455
|
-
|
|
456
|
-
def for_non_stream_chat():
|
|
457
|
-
# ask() returns dict or str when not streaming
|
|
458
|
-
response_data = self.ask(
|
|
459
|
-
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
460
|
-
optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
461
|
-
)
|
|
462
|
-
return self.get_message(response_data) # get_message expects dict
|
|
463
|
-
|
|
464
|
-
return for_stream_chat() if stream else for_non_stream_chat()
|
|
465
|
-
|
|
466
|
-
def get_message(self, response: dict) -> str:
|
|
467
|
-
"""Extract message text from response"""
|
|
468
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
469
|
-
return response.get("text", "")
|