webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
|
@@ -1,378 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import time
|
|
3
|
-
import uuid
|
|
4
|
-
import re
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
from curl_cffi import CurlError
|
|
8
|
-
from curl_cffi.requests import Session
|
|
9
|
-
from uuid import uuid4
|
|
10
|
-
|
|
11
|
-
# Import base classes and utility structures
|
|
12
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
-
from .utils import (
|
|
14
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
-
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
from webscout.AIutel import sanitize_stream
|
|
19
|
-
from webscout.litagent import LitAgent
|
|
20
|
-
|
|
21
|
-
class Completions(BaseCompletions):
|
|
22
|
-
def __init__(self, client: 'Cloudflare'):
|
|
23
|
-
self._client = client
|
|
24
|
-
|
|
25
|
-
def create(
|
|
26
|
-
self,
|
|
27
|
-
*,
|
|
28
|
-
model: str,
|
|
29
|
-
messages: List[Dict[str, str]],
|
|
30
|
-
max_tokens: Optional[int] = None,
|
|
31
|
-
stream: bool = False,
|
|
32
|
-
temperature: Optional[float] = None,
|
|
33
|
-
top_p: Optional[float] = None,
|
|
34
|
-
**kwargs: Any
|
|
35
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
36
|
-
"""
|
|
37
|
-
Create a chat completion with Cloudflare API.
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
model: The model to use (from AVAILABLE_MODELS)
|
|
41
|
-
messages: List of message dictionaries with 'role' and 'content'
|
|
42
|
-
max_tokens: Maximum number of tokens to generate
|
|
43
|
-
stream: Whether to stream the response
|
|
44
|
-
temperature: Sampling temperature (0-1)
|
|
45
|
-
top_p: Nucleus sampling parameter (0-1)
|
|
46
|
-
**kwargs: Additional parameters to pass to the API
|
|
47
|
-
|
|
48
|
-
Returns:
|
|
49
|
-
If stream=False, returns a ChatCompletion object
|
|
50
|
-
If stream=True, returns a Generator yielding ChatCompletionChunk objects
|
|
51
|
-
"""
|
|
52
|
-
# Prepare the payload
|
|
53
|
-
payload = {
|
|
54
|
-
"messages": messages,
|
|
55
|
-
"lora": None,
|
|
56
|
-
"model": model,
|
|
57
|
-
"max_tokens": max_tokens or 600,
|
|
58
|
-
"stream": True # Always use streaming API
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
# Generate request ID and timestamp
|
|
62
|
-
request_id = str(uuid.uuid4())
|
|
63
|
-
created_time = int(time.time())
|
|
64
|
-
|
|
65
|
-
# Use streaming implementation if requested
|
|
66
|
-
if stream:
|
|
67
|
-
return self._create_streaming(
|
|
68
|
-
request_id=request_id,
|
|
69
|
-
created_time=created_time,
|
|
70
|
-
model=model,
|
|
71
|
-
payload=payload
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
# Otherwise use non-streaming implementation
|
|
75
|
-
return self._create_non_streaming(
|
|
76
|
-
request_id=request_id,
|
|
77
|
-
created_time=created_time,
|
|
78
|
-
model=model,
|
|
79
|
-
payload=payload
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
def _create_streaming(
|
|
83
|
-
self,
|
|
84
|
-
*,
|
|
85
|
-
request_id: str,
|
|
86
|
-
created_time: int,
|
|
87
|
-
model: str,
|
|
88
|
-
payload: Dict[str, Any]
|
|
89
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
90
|
-
"""Implementation for streaming chat completions."""
|
|
91
|
-
try:
|
|
92
|
-
response = self._client.session.post(
|
|
93
|
-
self._client.chat_endpoint,
|
|
94
|
-
headers=self._client.headers,
|
|
95
|
-
cookies=self._client.cookies,
|
|
96
|
-
data=json.dumps(payload),
|
|
97
|
-
stream=True,
|
|
98
|
-
timeout=self._client.timeout,
|
|
99
|
-
impersonate="chrome120"
|
|
100
|
-
)
|
|
101
|
-
response.raise_for_status()
|
|
102
|
-
|
|
103
|
-
# Process the stream using sanitize_stream
|
|
104
|
-
# This handles the extraction of content from Cloudflare's response format
|
|
105
|
-
processed_stream = sanitize_stream(
|
|
106
|
-
data=response.iter_content(chunk_size=None),
|
|
107
|
-
intro_value=None,
|
|
108
|
-
to_json=False,
|
|
109
|
-
skip_markers=None,
|
|
110
|
-
content_extractor=self._cloudflare_extractor,
|
|
111
|
-
yield_raw_on_error=False
|
|
112
|
-
)
|
|
113
|
-
|
|
114
|
-
# Track accumulated content for token counting
|
|
115
|
-
accumulated_content = ""
|
|
116
|
-
|
|
117
|
-
# Stream the chunks
|
|
118
|
-
for content_chunk in processed_stream:
|
|
119
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
120
|
-
accumulated_content += content_chunk
|
|
121
|
-
|
|
122
|
-
# Create and yield a chunk
|
|
123
|
-
delta = ChoiceDelta(content=content_chunk)
|
|
124
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
125
|
-
|
|
126
|
-
# Estimate token usage using count_tokens
|
|
127
|
-
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload["messages"]])
|
|
128
|
-
completion_tokens = count_tokens(accumulated_content)
|
|
129
|
-
|
|
130
|
-
chunk = ChatCompletionChunk(
|
|
131
|
-
id=request_id,
|
|
132
|
-
choices=[choice],
|
|
133
|
-
created=created_time,
|
|
134
|
-
model=model
|
|
135
|
-
)
|
|
136
|
-
|
|
137
|
-
yield chunk
|
|
138
|
-
|
|
139
|
-
# Final chunk with finish_reason
|
|
140
|
-
delta = ChoiceDelta(content=None)
|
|
141
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
142
|
-
chunk = ChatCompletionChunk(
|
|
143
|
-
id=request_id,
|
|
144
|
-
choices=[choice],
|
|
145
|
-
created=created_time,
|
|
146
|
-
model=model
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
yield chunk
|
|
150
|
-
|
|
151
|
-
except CurlError as e:
|
|
152
|
-
raise IOError(f"Cloudflare streaming request failed (CurlError): {e}") from e
|
|
153
|
-
except Exception as e:
|
|
154
|
-
raise IOError(f"Cloudflare streaming request failed: {e}") from e
|
|
155
|
-
|
|
156
|
-
def _create_non_streaming(
|
|
157
|
-
self,
|
|
158
|
-
*,
|
|
159
|
-
request_id: str,
|
|
160
|
-
created_time: int,
|
|
161
|
-
model: str,
|
|
162
|
-
payload: Dict[str, Any]
|
|
163
|
-
) -> ChatCompletion:
|
|
164
|
-
"""Implementation for non-streaming chat completions."""
|
|
165
|
-
try:
|
|
166
|
-
response = self._client.session.post(
|
|
167
|
-
self._client.chat_endpoint,
|
|
168
|
-
headers=self._client.headers,
|
|
169
|
-
cookies=self._client.cookies,
|
|
170
|
-
data=json.dumps(payload),
|
|
171
|
-
stream=True, # Still use streaming API but collect all chunks
|
|
172
|
-
timeout=self._client.timeout,
|
|
173
|
-
impersonate="chrome120"
|
|
174
|
-
)
|
|
175
|
-
response.raise_for_status()
|
|
176
|
-
|
|
177
|
-
# Process the stream and collect all content
|
|
178
|
-
processed_stream = sanitize_stream(
|
|
179
|
-
data=response.iter_content(chunk_size=None),
|
|
180
|
-
intro_value=None,
|
|
181
|
-
to_json=False,
|
|
182
|
-
skip_markers=None,
|
|
183
|
-
content_extractor=self._cloudflare_extractor,
|
|
184
|
-
yield_raw_on_error=False
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
full_content = ""
|
|
188
|
-
for content_chunk in processed_stream:
|
|
189
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
190
|
-
full_content += content_chunk
|
|
191
|
-
|
|
192
|
-
# Create the completion message
|
|
193
|
-
message = ChatCompletionMessage(
|
|
194
|
-
role="assistant",
|
|
195
|
-
content=full_content
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
# Create the choice
|
|
199
|
-
choice = Choice(
|
|
200
|
-
index=0,
|
|
201
|
-
message=message,
|
|
202
|
-
finish_reason="stop"
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
# Estimate token usage using count_tokens
|
|
206
|
-
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload["messages"]])
|
|
207
|
-
completion_tokens = count_tokens(full_content)
|
|
208
|
-
usage = CompletionUsage(
|
|
209
|
-
prompt_tokens=prompt_tokens,
|
|
210
|
-
completion_tokens=completion_tokens,
|
|
211
|
-
total_tokens=prompt_tokens + completion_tokens
|
|
212
|
-
)
|
|
213
|
-
|
|
214
|
-
# Create the completion object
|
|
215
|
-
completion = ChatCompletion(
|
|
216
|
-
id=request_id,
|
|
217
|
-
choices=[choice],
|
|
218
|
-
created=created_time,
|
|
219
|
-
model=model,
|
|
220
|
-
usage=usage,
|
|
221
|
-
)
|
|
222
|
-
|
|
223
|
-
return completion
|
|
224
|
-
|
|
225
|
-
except CurlError as e:
|
|
226
|
-
raise IOError(f"Cloudflare request failed (CurlError): {e}") from e
|
|
227
|
-
except Exception as e:
|
|
228
|
-
raise IOError(f"Cloudflare request failed: {e}") from e
|
|
229
|
-
|
|
230
|
-
@staticmethod
|
|
231
|
-
def _cloudflare_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
232
|
-
"""
|
|
233
|
-
Extracts content from Cloudflare stream JSON objects.
|
|
234
|
-
|
|
235
|
-
Args:
|
|
236
|
-
chunk: The chunk to extract content from
|
|
237
|
-
|
|
238
|
-
Returns:
|
|
239
|
-
Extracted content or None if extraction failed
|
|
240
|
-
"""
|
|
241
|
-
if isinstance(chunk, str):
|
|
242
|
-
# Use re.search to find the pattern 0:"<content>"
|
|
243
|
-
match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
|
|
244
|
-
if match:
|
|
245
|
-
# Decode potential unicode escapes and handle escaped quotes/backslashes
|
|
246
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
247
|
-
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
248
|
-
return None
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
class Chat(BaseChat):
|
|
252
|
-
def __init__(self, client: 'Cloudflare'):
|
|
253
|
-
self.completions = Completions(client)
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
class Cloudflare(OpenAICompatibleProvider):
|
|
257
|
-
"""
|
|
258
|
-
OpenAI-compatible client for Cloudflare API.
|
|
259
|
-
|
|
260
|
-
Usage:
|
|
261
|
-
client = Cloudflare()
|
|
262
|
-
response = client.chat.completions.create(
|
|
263
|
-
model="@cf/meta/llama-3-8b-instruct",
|
|
264
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
265
|
-
)
|
|
266
|
-
print(response.choices[0].message.content)
|
|
267
|
-
"""
|
|
268
|
-
|
|
269
|
-
AVAILABLE_MODELS = [
|
|
270
|
-
"@hf/thebloke/deepseek-coder-6.7b-base-awq",
|
|
271
|
-
"@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
|
|
272
|
-
"@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
273
|
-
"@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
|
|
274
|
-
"@cf/thebloke/discolm-german-7b-v1-awq",
|
|
275
|
-
"@cf/tiiuae/falcon-7b-instruct",
|
|
276
|
-
"@cf/google/gemma-3-12b-it",
|
|
277
|
-
"@hf/google/gemma-7b-it",
|
|
278
|
-
"@hf/nousresearch/hermes-2-pro-mistral-7b",
|
|
279
|
-
"@hf/thebloke/llama-2-13b-chat-awq",
|
|
280
|
-
"@cf/meta/llama-2-7b-chat-fp16",
|
|
281
|
-
"@cf/meta/llama-2-7b-chat-int8",
|
|
282
|
-
"@cf/meta/llama-3-8b-instruct",
|
|
283
|
-
"@cf/meta/llama-3-8b-instruct-awq",
|
|
284
|
-
"@cf/meta/llama-3.1-8b-instruct-awq",
|
|
285
|
-
"@cf/meta/llama-3.1-8b-instruct-fp8",
|
|
286
|
-
"@cf/meta/llama-3.2-11b-vision-instruct",
|
|
287
|
-
"@cf/meta/llama-3.2-1b-instruct",
|
|
288
|
-
"@cf/meta/llama-3.2-3b-instruct",
|
|
289
|
-
"@cf/meta/llama-3.3-70b-instruct-fp8-fast",
|
|
290
|
-
"@cf/meta/llama-4-scout-17b-16e-instruct",
|
|
291
|
-
"@cf/meta/llama-guard-3-8b",
|
|
292
|
-
"@hf/thebloke/llamaguard-7b-awq",
|
|
293
|
-
"@hf/meta-llama/meta-llama-3-8b-instruct",
|
|
294
|
-
"@cf/mistral/mistral-7b-instruct-v0.1",
|
|
295
|
-
"@hf/thebloke/mistral-7b-instruct-v0.1-awq",
|
|
296
|
-
"@hf/mistral/mistral-7b-instruct-v0.2",
|
|
297
|
-
"@cf/mistralai/mistral-small-3.1-24b-instruct",
|
|
298
|
-
"@hf/thebloke/neural-chat-7b-v3-1-awq",
|
|
299
|
-
"@cf/openchat/openchat-3.5-0106",
|
|
300
|
-
"@hf/thebloke/openhermes-2.5-mistral-7b-awq",
|
|
301
|
-
"@cf/microsoft/phi-2",
|
|
302
|
-
"@cf/qwen/qwen1.5-0.5b-chat",
|
|
303
|
-
"@cf/qwen/qwen1.5-1.8b-chat",
|
|
304
|
-
"@cf/qwen/qwen1.5-14b-chat-awq",
|
|
305
|
-
"@cf/qwen/qwen1.5-7b-chat-awq",
|
|
306
|
-
"@cf/qwen/qwen2.5-coder-32b-instruct",
|
|
307
|
-
"@cf/qwen/qwq-32b",
|
|
308
|
-
"@cf/defog/sqlcoder-7b-2",
|
|
309
|
-
"@hf/nexusflow/starling-lm-7b-beta",
|
|
310
|
-
"@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
|
|
311
|
-
"@cf/fblgit/una-cybertron-7b-v2-bf16",
|
|
312
|
-
"@hf/thebloke/zephyr-7b-beta-awq"
|
|
313
|
-
]
|
|
314
|
-
|
|
315
|
-
def __init__(
|
|
316
|
-
self,
|
|
317
|
-
api_key: Optional[str] = None, # Not used but included for compatibility
|
|
318
|
-
timeout: int = 30,
|
|
319
|
-
proxies: dict = {},
|
|
320
|
-
):
|
|
321
|
-
"""
|
|
322
|
-
Initialize the Cloudflare client.
|
|
323
|
-
|
|
324
|
-
Args:
|
|
325
|
-
api_key: Not used but included for compatibility with OpenAI interface
|
|
326
|
-
timeout: Request timeout in seconds
|
|
327
|
-
proxies: Optional proxy configuration
|
|
328
|
-
"""
|
|
329
|
-
self.timeout = timeout
|
|
330
|
-
self.proxies = proxies
|
|
331
|
-
self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
|
|
332
|
-
|
|
333
|
-
# Initialize session
|
|
334
|
-
self.session = Session()
|
|
335
|
-
|
|
336
|
-
# Set headers
|
|
337
|
-
self.headers = {
|
|
338
|
-
'Accept': 'text/event-stream',
|
|
339
|
-
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
340
|
-
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
341
|
-
'Content-Type': 'application/json',
|
|
342
|
-
'DNT': '1',
|
|
343
|
-
'Origin': 'https://playground.ai.cloudflare.com',
|
|
344
|
-
'Referer': 'https://playground.ai.cloudflare.com/',
|
|
345
|
-
'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
346
|
-
'Sec-CH-UA-Mobile': '?0',
|
|
347
|
-
'Sec-CH-UA-Platform': '"Windows"',
|
|
348
|
-
'Sec-Fetch-Dest': 'empty',
|
|
349
|
-
'Sec-Fetch-Mode': 'cors',
|
|
350
|
-
'Sec-Fetch-Site': 'same-origin',
|
|
351
|
-
'User-Agent': LitAgent().random()
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
# Set cookies
|
|
355
|
-
self.cookies = {
|
|
356
|
-
'cfzs_amplitude': uuid4().hex,
|
|
357
|
-
'cfz_amplitude': uuid4().hex,
|
|
358
|
-
'__cf_bm': uuid4().hex,
|
|
359
|
-
}
|
|
360
|
-
|
|
361
|
-
# Apply headers and proxies to session
|
|
362
|
-
self.session.headers.update(self.headers)
|
|
363
|
-
self.session.proxies = proxies
|
|
364
|
-
|
|
365
|
-
# Initialize chat interface
|
|
366
|
-
self.chat = Chat(self)
|
|
367
|
-
|
|
368
|
-
@property
|
|
369
|
-
def models(self):
|
|
370
|
-
class _ModelList:
|
|
371
|
-
def list(inner_self):
|
|
372
|
-
return type(self).AVAILABLE_MODELS
|
|
373
|
-
return _ModelList()
|
|
374
|
-
|
|
375
|
-
# @classmethod
|
|
376
|
-
# def models(cls):
|
|
377
|
-
# """Return the list of available models for Cloudflare."""
|
|
378
|
-
# return cls.AVAILABLE_MODELS
|
|
@@ -1,283 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
OpenAI-compatible client for the FreeGemini provider,
|
|
4
|
-
which uses the free-gemini.vercel.app service.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import time
|
|
8
|
-
import uuid
|
|
9
|
-
import json
|
|
10
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
|
-
|
|
12
|
-
from curl_cffi.requests import Session
|
|
13
|
-
|
|
14
|
-
from webscout.litagent import LitAgent
|
|
15
|
-
from webscout.AIutel import sanitize_stream
|
|
16
|
-
from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
17
|
-
from webscout.Provider.OPENAI.utils import (
|
|
18
|
-
ChatCompletion,
|
|
19
|
-
ChatCompletionChunk,
|
|
20
|
-
Choice,
|
|
21
|
-
ChatCompletionMessage,
|
|
22
|
-
ChoiceDelta,
|
|
23
|
-
CompletionUsage,
|
|
24
|
-
format_prompt,
|
|
25
|
-
get_system_prompt,
|
|
26
|
-
count_tokens
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
# ANSI escape codes for formatting
|
|
30
|
-
BOLD = "\033[1m"
|
|
31
|
-
RED = "\033[91m"
|
|
32
|
-
RESET = "\033[0m"
|
|
33
|
-
|
|
34
|
-
class Completions(BaseCompletions):
|
|
35
|
-
def __init__(self, client: 'FreeGemini'):
|
|
36
|
-
self._client = client
|
|
37
|
-
|
|
38
|
-
def create(
|
|
39
|
-
self,
|
|
40
|
-
*,
|
|
41
|
-
model: str,
|
|
42
|
-
messages: List[Dict[str, str]],
|
|
43
|
-
max_tokens: Optional[int] = None,
|
|
44
|
-
stream: bool = False,
|
|
45
|
-
temperature: Optional[float] = None,
|
|
46
|
-
top_p: Optional[float] = None,
|
|
47
|
-
**kwargs: Any
|
|
48
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
49
|
-
"""
|
|
50
|
-
Creates a model response for the given chat conversation.
|
|
51
|
-
Mimics openai.chat.completions.create
|
|
52
|
-
"""
|
|
53
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
54
|
-
created_time = int(time.time())
|
|
55
|
-
|
|
56
|
-
api_payload = {
|
|
57
|
-
"contents": messages,
|
|
58
|
-
"generationConfig": {
|
|
59
|
-
"temperature": temperature,
|
|
60
|
-
"maxOutputTokens": max_tokens,
|
|
61
|
-
"topP": top_p
|
|
62
|
-
},
|
|
63
|
-
"safetySettings": [
|
|
64
|
-
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
65
|
-
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
|
|
66
|
-
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
67
|
-
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}
|
|
68
|
-
]
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
if stream:
|
|
72
|
-
return self._create_stream(request_id, created_time, model, api_payload)
|
|
73
|
-
else:
|
|
74
|
-
return self._create_non_stream(request_id, created_time, model, api_payload)
|
|
75
|
-
|
|
76
|
-
def _create_stream(
|
|
77
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
78
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
79
|
-
try:
|
|
80
|
-
response = self._client.session.post(
|
|
81
|
-
self._client.api_endpoint,
|
|
82
|
-
json=payload,
|
|
83
|
-
stream=True,
|
|
84
|
-
timeout=self._client.timeout,
|
|
85
|
-
impersonate="chrome120"
|
|
86
|
-
)
|
|
87
|
-
response.raise_for_status()
|
|
88
|
-
|
|
89
|
-
# Track token usage across chunks
|
|
90
|
-
completion_tokens = 0
|
|
91
|
-
streaming_text = ""
|
|
92
|
-
|
|
93
|
-
processed_stream = sanitize_stream(
|
|
94
|
-
data=response.iter_content(chunk_size=None),
|
|
95
|
-
intro_value="data:",
|
|
96
|
-
to_json=True,
|
|
97
|
-
content_extractor=self._gemini_extractor,
|
|
98
|
-
yield_raw_on_error=False
|
|
99
|
-
)
|
|
100
|
-
|
|
101
|
-
for text_chunk in processed_stream:
|
|
102
|
-
if text_chunk and isinstance(text_chunk, str):
|
|
103
|
-
streaming_text += text_chunk
|
|
104
|
-
completion_tokens += count_tokens(text_chunk)
|
|
105
|
-
|
|
106
|
-
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
107
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
108
|
-
chunk = ChatCompletionChunk(
|
|
109
|
-
id=request_id,
|
|
110
|
-
choices=[choice],
|
|
111
|
-
created=created_time,
|
|
112
|
-
model=model
|
|
113
|
-
)
|
|
114
|
-
yield chunk
|
|
115
|
-
|
|
116
|
-
# Final chunk with finish_reason
|
|
117
|
-
delta = ChoiceDelta(content=None)
|
|
118
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
119
|
-
chunk = ChatCompletionChunk(
|
|
120
|
-
id=request_id,
|
|
121
|
-
choices=[choice],
|
|
122
|
-
created=created_time,
|
|
123
|
-
model=model
|
|
124
|
-
)
|
|
125
|
-
yield chunk
|
|
126
|
-
|
|
127
|
-
except Exception as e:
|
|
128
|
-
print(f"{RED}Error during FreeGemini stream request: {e}{RESET}")
|
|
129
|
-
raise IOError(f"FreeGemini stream request failed: {e}") from e
|
|
130
|
-
|
|
131
|
-
def _create_non_stream(
|
|
132
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
133
|
-
) -> ChatCompletion:
|
|
134
|
-
try:
|
|
135
|
-
# For non-streaming, we'll still use streaming since the API returns data in chunks
|
|
136
|
-
response = self._client.session.post(
|
|
137
|
-
self._client.api_endpoint,
|
|
138
|
-
json=payload,
|
|
139
|
-
stream=True, # API always returns streaming format
|
|
140
|
-
timeout=self._client.timeout,
|
|
141
|
-
impersonate="chrome120"
|
|
142
|
-
)
|
|
143
|
-
response.raise_for_status()
|
|
144
|
-
|
|
145
|
-
# Process the streaming response to get the full text
|
|
146
|
-
full_text_response = ""
|
|
147
|
-
|
|
148
|
-
# Process each chunk using the same method as streaming
|
|
149
|
-
for line in response.iter_lines():
|
|
150
|
-
if line and line.startswith(b"data:"):
|
|
151
|
-
# Extract the JSON part
|
|
152
|
-
json_str = line[5:].strip().decode('utf-8')
|
|
153
|
-
if json_str != "[DONE]":
|
|
154
|
-
try:
|
|
155
|
-
data = json.loads(json_str)
|
|
156
|
-
# Use the existing extractor to get the text
|
|
157
|
-
text_chunk = self._gemini_extractor(data)
|
|
158
|
-
if text_chunk:
|
|
159
|
-
full_text_response += text_chunk
|
|
160
|
-
except json.JSONDecodeError:
|
|
161
|
-
# Skip invalid JSON
|
|
162
|
-
pass
|
|
163
|
-
|
|
164
|
-
# Create usage statistics using count_tokens
|
|
165
|
-
prompt_tokens = count_tokens(str(payload))
|
|
166
|
-
completion_tokens = count_tokens(full_text_response)
|
|
167
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
168
|
-
|
|
169
|
-
usage = CompletionUsage(
|
|
170
|
-
prompt_tokens=prompt_tokens,
|
|
171
|
-
completion_tokens=completion_tokens,
|
|
172
|
-
total_tokens=total_tokens
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
# Create the message and choice objects
|
|
176
|
-
message = ChatCompletionMessage(
|
|
177
|
-
role="assistant",
|
|
178
|
-
content=full_text_response
|
|
179
|
-
)
|
|
180
|
-
choice = Choice(
|
|
181
|
-
index=0,
|
|
182
|
-
message=message,
|
|
183
|
-
finish_reason="stop"
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
# Create the completion object
|
|
187
|
-
completion = ChatCompletion(
|
|
188
|
-
id=request_id,
|
|
189
|
-
choices=[choice],
|
|
190
|
-
created=created_time,
|
|
191
|
-
model=model,
|
|
192
|
-
usage=usage
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
return completion
|
|
196
|
-
|
|
197
|
-
except Exception as e:
|
|
198
|
-
print(f"{RED}Error during FreeGemini non-stream request: {e}{RESET}")
|
|
199
|
-
raise IOError(f"FreeGemini request failed: {e}") from e
|
|
200
|
-
|
|
201
|
-
@staticmethod
|
|
202
|
-
def _gemini_extractor(data: Dict) -> Optional[str]:
|
|
203
|
-
"""Extract text content from Gemini API response stream data."""
|
|
204
|
-
try:
|
|
205
|
-
if "candidates" in data and data["candidates"]:
|
|
206
|
-
candidate = data["candidates"][0]
|
|
207
|
-
if "content" in candidate and "parts" in candidate["content"]:
|
|
208
|
-
parts = candidate["content"]["parts"]
|
|
209
|
-
if parts and "text" in parts[0]:
|
|
210
|
-
return parts[0]["text"]
|
|
211
|
-
except (KeyError, IndexError, TypeError):
|
|
212
|
-
pass
|
|
213
|
-
return None
|
|
214
|
-
|
|
215
|
-
class Chat(BaseChat):
|
|
216
|
-
def __init__(self, client: 'FreeGemini'):
|
|
217
|
-
self.completions = Completions(client)
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
class FreeGemini(OpenAICompatibleProvider):
|
|
221
|
-
"""
|
|
222
|
-
OpenAI-compatible client for FreeGemini API.
|
|
223
|
-
|
|
224
|
-
Usage:
|
|
225
|
-
client = FreeGemini()
|
|
226
|
-
response = client.chat.completions.create(
|
|
227
|
-
model="gemini-2.0-flash",
|
|
228
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
229
|
-
)
|
|
230
|
-
print(response.choices[0].message.content)
|
|
231
|
-
"""
|
|
232
|
-
|
|
233
|
-
AVAILABLE_MODELS = ["gemini-2.0-flash"]
|
|
234
|
-
|
|
235
|
-
def __init__(
|
|
236
|
-
self,
|
|
237
|
-
timeout: int = 30,
|
|
238
|
-
):
|
|
239
|
-
"""
|
|
240
|
-
Initialize the FreeGemini client.
|
|
241
|
-
|
|
242
|
-
Args:
|
|
243
|
-
timeout: Request timeout in seconds
|
|
244
|
-
"""
|
|
245
|
-
self.timeout = timeout
|
|
246
|
-
# Update the API endpoint to match the working implementation
|
|
247
|
-
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
|
|
248
|
-
|
|
249
|
-
# Initialize session with curl_cffi for better Cloudflare handling
|
|
250
|
-
self.session = Session()
|
|
251
|
-
|
|
252
|
-
# Use LitAgent for fingerprinting
|
|
253
|
-
self.agent = LitAgent()
|
|
254
|
-
|
|
255
|
-
# Set headers for the requests
|
|
256
|
-
self.headers = {
|
|
257
|
-
"Content-Type": "application/json",
|
|
258
|
-
"Accept": "application/json, text/event-stream",
|
|
259
|
-
"User-Agent": self.agent.random(),
|
|
260
|
-
"Origin": "https://free-gemini.vercel.app",
|
|
261
|
-
"Referer": "https://free-gemini.vercel.app/",
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
# Update session headers
|
|
265
|
-
self.session.headers.update(self.headers)
|
|
266
|
-
|
|
267
|
-
# Initialize chat interface
|
|
268
|
-
self.chat = Chat(self)
|
|
269
|
-
@property
|
|
270
|
-
def models(self):
|
|
271
|
-
class _ModelList:
|
|
272
|
-
def list(inner_self):
|
|
273
|
-
return type(self).AVAILABLE_MODELS
|
|
274
|
-
return _ModelList()
|
|
275
|
-
if __name__ == "__main__":
|
|
276
|
-
# Example usage
|
|
277
|
-
client = FreeGemini()
|
|
278
|
-
conversation_prompt = "Hello!"
|
|
279
|
-
response = client.chat.completions.create(
|
|
280
|
-
model="gemini-2.0-flash",
|
|
281
|
-
messages=[{"role": "user", "parts": [{"text": conversation_prompt}]}]
|
|
282
|
-
)
|
|
283
|
-
print(response.choices[0].message.content)
|