webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
|
@@ -1,335 +1,378 @@
|
|
|
1
|
-
import
|
|
2
|
-
import
|
|
3
|
-
import
|
|
4
|
-
import
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
from .
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
#
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
"
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import Any, Dict, Generator, List, Optional, Union, cast
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
# Import base classes and utility structures
|
|
9
|
+
from webscout.Provider.OPENAI.base import (
|
|
10
|
+
BaseChat,
|
|
11
|
+
BaseCompletions,
|
|
12
|
+
OpenAICompatibleProvider,
|
|
13
|
+
SimpleModelList,
|
|
14
|
+
)
|
|
15
|
+
from webscout.Provider.OPENAI.utils import (
|
|
16
|
+
ChatCompletion,
|
|
17
|
+
ChatCompletionChunk,
|
|
18
|
+
ChatCompletionMessage, # Import format_prompt
|
|
19
|
+
Choice,
|
|
20
|
+
ChoiceDelta,
|
|
21
|
+
CompletionUsage,
|
|
22
|
+
get_last_user_message,
|
|
23
|
+
get_system_prompt,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Attempt to import LitAgent, fallback if not available
|
|
27
|
+
from ...litagent import LitAgent
|
|
28
|
+
|
|
29
|
+
# --- LLMChatCo Client ---
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class Completions(BaseCompletions):
|
|
33
|
+
def __init__(self, client: "LLMChatCo"):
|
|
34
|
+
self._client = client
|
|
35
|
+
|
|
36
|
+
def create(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
model: str, # Model is now mandatory per request
|
|
40
|
+
messages: List[Dict[str, str]],
|
|
41
|
+
max_tokens: Optional[
|
|
42
|
+
int
|
|
43
|
+
] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
|
|
44
|
+
stream: bool = False,
|
|
45
|
+
temperature: Optional[
|
|
46
|
+
float
|
|
47
|
+
] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
|
|
48
|
+
top_p: Optional[
|
|
49
|
+
float
|
|
50
|
+
] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
|
|
51
|
+
web_search: bool = False, # LLMChatCo specific parameter
|
|
52
|
+
system_prompt: Optional[
|
|
53
|
+
str
|
|
54
|
+
] = "You are a helpful assistant.", # Default system prompt if not provided
|
|
55
|
+
timeout: Optional[int] = None,
|
|
56
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
57
|
+
**kwargs: Any,
|
|
58
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
59
|
+
"""
|
|
60
|
+
Creates a model response for the given chat conversation.
|
|
61
|
+
Mimics openai.chat.completions.create
|
|
62
|
+
"""
|
|
63
|
+
if model not in self._client.AVAILABLE_MODELS:
|
|
64
|
+
# Raise error as model is mandatory and must be valid for this provider
|
|
65
|
+
raise ValueError(
|
|
66
|
+
f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}"
|
|
67
|
+
)
|
|
68
|
+
actual_model = model
|
|
69
|
+
|
|
70
|
+
# Determine the effective system prompt
|
|
71
|
+
effective_system_prompt = system_prompt # Use the provided system_prompt or its default
|
|
72
|
+
get_system_prompt(messages)
|
|
73
|
+
# If a system prompt is also in messages, the explicit one takes precedence.
|
|
74
|
+
# We'll use the effective_system_prompt determined above.
|
|
75
|
+
|
|
76
|
+
# Prepare final messages list, ensuring only one system message at the start
|
|
77
|
+
final_messages = []
|
|
78
|
+
if effective_system_prompt:
|
|
79
|
+
final_messages.append({"role": "system", "content": effective_system_prompt})
|
|
80
|
+
final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
|
|
81
|
+
|
|
82
|
+
# Extract the last user prompt using the utility function for the separate 'prompt' field
|
|
83
|
+
last_user_prompt = get_last_user_message(final_messages)
|
|
84
|
+
|
|
85
|
+
# Note: format_prompt is not directly used here as the API requires the structured 'messages' list
|
|
86
|
+
# and a separate 'prompt' field, rather than a single formatted string.
|
|
87
|
+
|
|
88
|
+
# Generate a unique ID for this message
|
|
89
|
+
thread_item_id = "".join(str(uuid.uuid4()).split("-"))[:20]
|
|
90
|
+
|
|
91
|
+
payload = {
|
|
92
|
+
"mode": actual_model,
|
|
93
|
+
"prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
|
|
94
|
+
"threadId": self._client.thread_id,
|
|
95
|
+
"messages": final_messages, # Use the reconstructed final_messages list
|
|
96
|
+
"mcpConfig": {}, # Keep structure as observed
|
|
97
|
+
"threadItemId": thread_item_id,
|
|
98
|
+
"parentThreadItemId": "", # Assuming no parent for simplicity
|
|
99
|
+
"webSearch": web_search,
|
|
100
|
+
"showSuggestions": True, # Keep structure as observed
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
# Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
|
|
104
|
+
payload.update(kwargs)
|
|
105
|
+
|
|
106
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
107
|
+
created_time = int(time.time())
|
|
108
|
+
|
|
109
|
+
if stream:
|
|
110
|
+
return self._create_stream(
|
|
111
|
+
request_id, created_time, actual_model, payload, timeout, proxies
|
|
112
|
+
)
|
|
113
|
+
else:
|
|
114
|
+
return self._create_non_stream(
|
|
115
|
+
request_id, created_time, actual_model, payload, timeout, proxies
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
def _create_stream(
|
|
119
|
+
self,
|
|
120
|
+
request_id: str,
|
|
121
|
+
created_time: int,
|
|
122
|
+
model: str,
|
|
123
|
+
payload: Dict[str, Any],
|
|
124
|
+
timeout: Optional[int] = None,
|
|
125
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
126
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
127
|
+
try:
|
|
128
|
+
response = self._client.session.post(
|
|
129
|
+
self._client.api_endpoint,
|
|
130
|
+
headers=self._client.headers,
|
|
131
|
+
json=payload,
|
|
132
|
+
stream=True,
|
|
133
|
+
timeout=timeout or self._client.timeout,
|
|
134
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if not response.ok:
|
|
138
|
+
raise IOError(
|
|
139
|
+
f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
full_response_text = ""
|
|
143
|
+
current_event = None
|
|
144
|
+
buffer = ""
|
|
145
|
+
|
|
146
|
+
for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
|
|
147
|
+
if not chunk_bytes:
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
buffer += chunk_bytes.decode("utf-8", errors="replace")
|
|
151
|
+
|
|
152
|
+
while "\n" in buffer:
|
|
153
|
+
line, buffer = buffer.split("\n", 1)
|
|
154
|
+
line = line.strip()
|
|
155
|
+
|
|
156
|
+
if not line: # End of an event block
|
|
157
|
+
current_event = None
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
if line.startswith("event:"):
|
|
161
|
+
current_event = line[len("event:") :].strip()
|
|
162
|
+
elif line.startswith("data:"):
|
|
163
|
+
data_content = line[len("data:") :].strip()
|
|
164
|
+
if data_content and current_event == "answer":
|
|
165
|
+
try:
|
|
166
|
+
json_data = json.loads(data_content)
|
|
167
|
+
answer_data = json_data.get("answer", {})
|
|
168
|
+
text_chunk = answer_data.get("text", "")
|
|
169
|
+
full_text = answer_data.get("fullText")
|
|
170
|
+
status = answer_data.get("status")
|
|
171
|
+
|
|
172
|
+
# Prefer fullText if available and status is COMPLETED
|
|
173
|
+
if full_text is not None and status == "COMPLETED":
|
|
174
|
+
delta_content = full_text[len(full_response_text) :]
|
|
175
|
+
full_response_text = full_text # Update full response tracker
|
|
176
|
+
elif text_chunk is not None:
|
|
177
|
+
# Calculate delta based on potentially partial 'text' field
|
|
178
|
+
delta_content = text_chunk[len(full_response_text) :]
|
|
179
|
+
full_response_text = text_chunk # Update full response tracker
|
|
180
|
+
else:
|
|
181
|
+
delta_content = None
|
|
182
|
+
|
|
183
|
+
if delta_content:
|
|
184
|
+
delta = ChoiceDelta(content=delta_content, role="assistant")
|
|
185
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
186
|
+
chunk = ChatCompletionChunk(
|
|
187
|
+
id=request_id,
|
|
188
|
+
choices=[choice],
|
|
189
|
+
created=created_time,
|
|
190
|
+
model=model,
|
|
191
|
+
)
|
|
192
|
+
yield chunk
|
|
193
|
+
|
|
194
|
+
except json.JSONDecodeError:
|
|
195
|
+
print(f"Warning: Could not decode JSON data line: {data_content}")
|
|
196
|
+
continue
|
|
197
|
+
elif data_content and current_event == "done":
|
|
198
|
+
# The 'done' event signals the end of the stream
|
|
199
|
+
delta = ChoiceDelta() # Empty delta
|
|
200
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
201
|
+
chunk = ChatCompletionChunk(
|
|
202
|
+
id=request_id,
|
|
203
|
+
choices=[choice],
|
|
204
|
+
created=created_time,
|
|
205
|
+
model=model,
|
|
206
|
+
)
|
|
207
|
+
yield chunk
|
|
208
|
+
return # End the generator
|
|
209
|
+
|
|
210
|
+
except requests.exceptions.RequestException as e:
|
|
211
|
+
print(f"Error during LLMChatCo stream request: {e}")
|
|
212
|
+
raise IOError(f"LLMChatCo request failed: {e}") from e
|
|
213
|
+
except Exception as e:
|
|
214
|
+
print(f"Unexpected error during LLMChatCo stream: {e}")
|
|
215
|
+
raise IOError(f"LLMChatCo stream processing failed: {e}") from e
|
|
216
|
+
|
|
217
|
+
# Fallback final chunk if 'done' event wasn't received properly
|
|
218
|
+
delta = ChoiceDelta()
|
|
219
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
220
|
+
chunk = ChatCompletionChunk(
|
|
221
|
+
id=request_id,
|
|
222
|
+
choices=[choice],
|
|
223
|
+
created=created_time,
|
|
224
|
+
model=model,
|
|
225
|
+
)
|
|
226
|
+
yield chunk
|
|
227
|
+
|
|
228
|
+
def _create_non_stream(
|
|
229
|
+
self,
|
|
230
|
+
request_id: str,
|
|
231
|
+
created_time: int,
|
|
232
|
+
model: str,
|
|
233
|
+
payload: Dict[str, Any],
|
|
234
|
+
timeout: Optional[int] = None,
|
|
235
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
236
|
+
) -> ChatCompletion:
|
|
237
|
+
# Non-streaming requires accumulating stream chunks
|
|
238
|
+
full_response_content = ""
|
|
239
|
+
finish_reason = "stop" # Assume stop unless error occurs
|
|
240
|
+
|
|
241
|
+
try:
|
|
242
|
+
stream_generator = self._create_stream(
|
|
243
|
+
request_id, created_time, model, payload, timeout, proxies
|
|
244
|
+
)
|
|
245
|
+
for chunk in stream_generator:
|
|
246
|
+
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
247
|
+
full_response_content += chunk.choices[0].delta.content
|
|
248
|
+
if chunk.choices and chunk.choices[0].finish_reason:
|
|
249
|
+
finish_reason = chunk.choices[0].finish_reason
|
|
250
|
+
|
|
251
|
+
except IOError as e:
|
|
252
|
+
print(f"Error obtaining non-stream response from LLMChatCo: {e}")
|
|
253
|
+
# Return a partial or error response if needed, or re-raise
|
|
254
|
+
# For simplicity, we'll return what we have, potentially empty
|
|
255
|
+
finish_reason = "error" # Indicate an issue
|
|
256
|
+
|
|
257
|
+
# Construct the final ChatCompletion object
|
|
258
|
+
message = ChatCompletionMessage(role="assistant", content=full_response_content)
|
|
259
|
+
choice = Choice(index=0, message=message, finish_reason=finish_reason)
|
|
260
|
+
# Usage data is not provided by this API, so set to 0
|
|
261
|
+
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
262
|
+
|
|
263
|
+
completion = ChatCompletion(
|
|
264
|
+
id=request_id,
|
|
265
|
+
choices=[choice],
|
|
266
|
+
created=created_time,
|
|
267
|
+
model=model,
|
|
268
|
+
usage=usage,
|
|
269
|
+
)
|
|
270
|
+
return completion
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
class Chat(BaseChat):
|
|
274
|
+
def __init__(self, client: "LLMChatCo"):
|
|
275
|
+
self.completions = Completions(client)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class LLMChatCo(OpenAICompatibleProvider):
|
|
279
|
+
"""
|
|
280
|
+
OpenAI-compatible client for LLMChat.co API.
|
|
281
|
+
|
|
282
|
+
Usage:
|
|
283
|
+
client = LLMChatCo()
|
|
284
|
+
response = client.chat.completions.create(
|
|
285
|
+
model="gemini-flash-2.0", # Model must be specified here
|
|
286
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
287
|
+
)
|
|
288
|
+
print(response.choices[0].message.content)
|
|
289
|
+
"""
|
|
290
|
+
|
|
291
|
+
required_auth = False # No API key required for LLMChatCo
|
|
292
|
+
AVAILABLE_MODELS = [
|
|
293
|
+
"gemini-flash-2.0", # Default model
|
|
294
|
+
"llama-4-scout",
|
|
295
|
+
"gpt-4o-mini",
|
|
296
|
+
# "gpt-4.1",
|
|
297
|
+
# "gpt-4.1-mini",
|
|
298
|
+
"gpt-4.1-nano",
|
|
299
|
+
]
|
|
300
|
+
|
|
301
|
+
def __init__(
|
|
302
|
+
self,
|
|
303
|
+
timeout: int = 60,
|
|
304
|
+
browser: str = "chrome", # For User-Agent generation
|
|
305
|
+
):
|
|
306
|
+
"""
|
|
307
|
+
Initialize the LLMChatCo client.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
timeout: Request timeout in seconds.
|
|
311
|
+
browser: Browser name for LitAgent to generate User-Agent.
|
|
312
|
+
"""
|
|
313
|
+
# Removed model, system_prompt, proxies parameters
|
|
314
|
+
|
|
315
|
+
self.timeout = timeout
|
|
316
|
+
# Removed self.system_prompt assignment
|
|
317
|
+
self.api_endpoint = "https://llmchat.co/api/completion"
|
|
318
|
+
self.session = requests.Session()
|
|
319
|
+
self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
|
|
320
|
+
|
|
321
|
+
# Removed proxy handling block
|
|
322
|
+
|
|
323
|
+
# Initialize LitAgent for user agent generation and fingerprinting
|
|
324
|
+
try:
|
|
325
|
+
agent = LitAgent()
|
|
326
|
+
fingerprint = agent.generate_fingerprint(browser=browser)
|
|
327
|
+
except Exception as e:
|
|
328
|
+
print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
|
|
329
|
+
# Fallback fingerprint data
|
|
330
|
+
fingerprint = {
|
|
331
|
+
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
|
332
|
+
"accept_language": "en-US,en;q=0.9",
|
|
333
|
+
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
334
|
+
"platform": "Windows",
|
|
335
|
+
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
# Initialize headers using the fingerprint
|
|
339
|
+
self.headers = {
|
|
340
|
+
"Accept": fingerprint["accept"],
|
|
341
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
342
|
+
"Content-Type": "application/json",
|
|
343
|
+
"Cache-Control": "no-cache",
|
|
344
|
+
"Connection": "keep-alive",
|
|
345
|
+
"Origin": "https://llmchat.co", # Specific origin for LLMChatCo
|
|
346
|
+
"Pragma": "no-cache",
|
|
347
|
+
"Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
|
|
348
|
+
"Sec-Fetch-Dest": "empty",
|
|
349
|
+
"Sec-Fetch-Mode": "cors",
|
|
350
|
+
"Sec-Fetch-Site": "same-origin",
|
|
351
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"]
|
|
352
|
+
or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
|
|
353
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
354
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
355
|
+
"User-Agent": fingerprint["user_agent"],
|
|
356
|
+
"DNT": "1", # Added back from previous version
|
|
357
|
+
}
|
|
358
|
+
self.session.headers.update(self.headers)
|
|
359
|
+
|
|
360
|
+
# Initialize the chat interface
|
|
361
|
+
self.chat = Chat(self)
|
|
362
|
+
|
|
363
|
+
@property
|
|
364
|
+
def models(self) -> SimpleModelList:
|
|
365
|
+
return SimpleModelList(type(self).AVAILABLE_MODELS)
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
if __name__ == "__main__":
|
|
369
|
+
# Example usage
|
|
370
|
+
client = LLMChatCo()
|
|
371
|
+
response = client.chat.completions.create(
|
|
372
|
+
model="gemini-flash-2.0",
|
|
373
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
374
|
+
stream=False,
|
|
375
|
+
)
|
|
376
|
+
if isinstance(response, ChatCompletion):
|
|
377
|
+
if response.choices[0].message and response.choices[0].message.content:
|
|
378
|
+
print(response.choices[0].message.content)
|