webscout 8.2__tar.gz → 8.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-8.2/webscout.egg-info → webscout-8.2.2}/PKG-INFO +1 -1
- {webscout-8.2 → webscout-8.2.2}/webscout/Bard.py +5 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/tempmail/__init__.py +2 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/tempmail/base.py +6 -1
- webscout-8.2.2/webscout/Extra/tempmail/emailnator.py +84 -0
- webscout-8.2.2/webscout/Local/__init__.py +12 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/cli.py +178 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/llm.py +104 -5
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/model_manager.py +48 -0
- webscout-8.2.2/webscout/Local/server.py +721 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Gemini.py +2 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/__init__.py +2 -1
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/c4ai.py +22 -2
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/deepinfra.py +1 -13
- webscout-8.2.2/webscout/Provider/OPENAI/e2b.py +1350 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/exaai.py +1 -16
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/freeaichat.py +1 -4
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/typegpt.py +1 -16
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/venice.py +1 -16
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/writecream.py +2 -4
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/x0gpt.py +2 -20
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/yep.py +2 -4
- webscout-8.2.2/webscout/version.py +2 -0
- {webscout-8.2 → webscout-8.2.2/webscout.egg-info}/PKG-INFO +1 -1
- {webscout-8.2 → webscout-8.2.2}/webscout.egg-info/SOURCES.txt +2 -0
- webscout-8.2/webscout/Local/__init__.py +0 -6
- webscout-8.2/webscout/Local/server.py +0 -187
- webscout-8.2/webscout/version.py +0 -2
- {webscout-8.2 → webscout-8.2.2}/LICENSE.md +0 -0
- {webscout-8.2 → webscout-8.2.2}/README.md +0 -0
- {webscout-8.2 → webscout-8.2.2}/inferno/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/inferno/__main__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/inferno/cli.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/setup.cfg +0 -0
- {webscout-8.2 → webscout-8.2.2}/setup.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/AIauto.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/AIbase.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/AIutel.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/DWEBS.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/GitToolkit/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/GitToolkit/gitapi/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/GitToolkit/gitapi/repository.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/GitToolkit/gitapi/user.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/GitToolkit/gitapi/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/YTdownloader.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/transcriber.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/channel.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/errors.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/extras.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/https.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/patterns.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/playlist.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/pool.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/query.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/stream.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/YTToolkit/ytapi/video.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/autocoder/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/autocoder/autocoder.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/autocoder/autocoder_utiles.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/gguf.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/tempmail/async_utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/tempmail/cli.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/tempmail/mail_tm.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/tempmail/temp_mail_io.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/weather.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Extra/weather_ascii.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/LLM.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/core/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/core/level.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/core/logger.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/handlers/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/handlers/console.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/handlers/file.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/handlers/network.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/styles/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/styles/colors.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/styles/formats.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/styles/text.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/utils/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/utils/detectors.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Litlogger/utils/formatters.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/__main__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/api.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/config.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Local/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AI21.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/DeepFind.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/ISou.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/Perplexity.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/felo_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/genspark_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/hika_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/iask_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/monica_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/scira_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AISEARCH/webpilotai_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Aitopia.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/AllenAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Andi.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Blackboxai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/C4ai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ChatGPTClone.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ChatGPTES.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ChatGPTGratis.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Chatify.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Cloudflare.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Cohere.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/DeepSeek.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Deepinfra.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ElectronHub.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ExaAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ExaChat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Free2GPT.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/GPTWeb.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/GithubChat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Glider.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Groq.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/HF_space/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/HF_space/qwen_qwen2.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/HeckAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/HuggingFaceChat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Hunyuan.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Jadve.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Koboldai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/LambdaChat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Llama.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Llama3.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Marcus.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Netwrck.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OLLAMA.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/base.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/chatgpt.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/chatgptclone.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/exachat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/glider.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/heckai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/llmchatco.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/netwrck.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/opkfc.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/scirachat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/sonus.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/standardinput.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/textpollinations.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/toolbaz.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/uncovrAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OPENAI/wisecat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/OpenGPT.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Openai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/PI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Perplexitylabs.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Phind.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/PizzaGPT.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/QwenLM.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Reka.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/StandardInput.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/AiForce/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/AiForce/async_aiforce.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/ImgSys/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/MagicStudio/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/Nexra/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/Nexra/async_nexra.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/Nexra/sync_nexra.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/PollinationsAI/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/aiarta/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/aiarta/async_aiarta.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/artbit/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/artbit/async_artbit.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/artbit/sync_artbit.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/fastflux/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/fastflux/async_fastflux.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/huggingface/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/huggingface/async_huggingface.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/piclumen/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/piclumen/async_piclumen.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/pixelmuse/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/talkai/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/talkai/async_talkai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTI/talkai/sync_talkai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/deepgram.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/elevenlabs.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/gesserit.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/murfai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/parler.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/speechma.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/streamElements.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TTS/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TeachAnything.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TextPollinationsAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/TwoAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Venice.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/VercelAI.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/WebSim.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/WiseCat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Writecream.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/WritingMate.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/Youchat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/ai4chat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/aimathgpt.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/akashgpt.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/askmyai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/asksteve.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/bagoodex.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/cerebras.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/chatglm.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/cleeai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/copilot.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/elmo.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/freeaichat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/gaurish.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/geminiapi.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/geminiprorealtime.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/granite.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/hermes.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/julius.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/koala.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/labyrinth.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/learnfastai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/lepton.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/llama3mitril.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/llamatutor.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/llmchat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/llmchatco.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/meta.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/multichat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/promptrefine.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/scira_chat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/scnet.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/searchchat.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/sonus.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/talkai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/toolbaz.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/turboseek.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/tutorai.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/typefully.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/typegpt.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/uncovr.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/x0gpt.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/Provider/yep.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/__main__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/cli.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/conversation.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/exceptions.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/litagent/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/litagent/agent.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/litagent/constants.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/litprinter/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/models.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/optimizers.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/prompt_manager.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/crawler.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/scout.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/search_result.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/text_analyzer.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/text_utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core/web_analyzer.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/core.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/element.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/parsers/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/parsers/html5lib_parser.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/parsers/html_parser.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/parsers/lxml_parser.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/scout/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/swiftcli/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/tempid.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/update_checker.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/utils.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/webscout_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/webscout_search_async.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/yep_search.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/zeroart/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/zeroart/base.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/zeroart/effects.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout/zeroart/fonts.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout.egg-info/requires.txt +0 -0
- {webscout-8.2 → webscout-8.2.2}/webscout.egg-info/top_level.txt +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/__init__.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/classifier.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/keywords.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/language.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/ner.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/normalizer.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/processor.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/sentiment.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/stemmer.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/tagger.py +0 -0
- {webscout-8.2 → webscout-8.2.2}/webstoken/tokenizer.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 8.2
|
|
3
|
+
Version: 8.2.2
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -76,6 +76,11 @@ class Model(Enum):
|
|
|
76
76
|
{"x-goog-ext-525001261-jspb": '[null,null,null,null,"203e6bb81620bcfe"]'},
|
|
77
77
|
True,
|
|
78
78
|
)
|
|
79
|
+
G_2_5_FLASH = (
|
|
80
|
+
"gemini-2.5-flash",
|
|
81
|
+
{"x-goog-ext-525001261-jspb": '[1,null,null,null,"35609594dbe934d8"]'},
|
|
82
|
+
False,
|
|
83
|
+
)
|
|
79
84
|
|
|
80
85
|
def __init__(self, name, header, advanced_only):
|
|
81
86
|
self.model_name = name
|
|
@@ -12,6 +12,7 @@ from .base import (
|
|
|
12
12
|
)
|
|
13
13
|
from .mail_tm import MailTM, MailTMAsync
|
|
14
14
|
from .temp_mail_io import TempMailIO, TempMailIOAsync
|
|
15
|
+
from .emailnator import EmailnatorProvider
|
|
15
16
|
|
|
16
17
|
__all__ = [
|
|
17
18
|
'TempMailProvider',
|
|
@@ -20,6 +21,7 @@ __all__ = [
|
|
|
20
21
|
'MailTMAsync',
|
|
21
22
|
'TempMailIO',
|
|
22
23
|
'TempMailIOAsync',
|
|
24
|
+
'EmailnatorProvider',
|
|
23
25
|
'get_random_email',
|
|
24
26
|
'get_disposable_email',
|
|
25
27
|
'get_provider'
|
|
@@ -99,7 +99,7 @@ def get_provider(provider_name: str = "mailtm", async_provider: bool = False) ->
|
|
|
99
99
|
Get a temporary email provider instance
|
|
100
100
|
|
|
101
101
|
Args:
|
|
102
|
-
provider_name: Name of the provider to use ("mailtm" or "
|
|
102
|
+
provider_name: Name of the provider to use ("mailtm", "tempmailio", or "emailnator")
|
|
103
103
|
async_provider: Whether to return an async provider
|
|
104
104
|
|
|
105
105
|
Returns:
|
|
@@ -109,6 +109,8 @@ def get_provider(provider_name: str = "mailtm", async_provider: bool = False) ->
|
|
|
109
109
|
if provider_name.lower() == "tempmailio":
|
|
110
110
|
from .temp_mail_io import TempMailIOAsync
|
|
111
111
|
return TempMailIOAsync()
|
|
112
|
+
elif provider_name.lower() == "emailnator":
|
|
113
|
+
raise NotImplementedError("Emailnator async provider not implemented.")
|
|
112
114
|
else:
|
|
113
115
|
from .mail_tm import MailTMAsync
|
|
114
116
|
return MailTMAsync()
|
|
@@ -116,6 +118,9 @@ def get_provider(provider_name: str = "mailtm", async_provider: bool = False) ->
|
|
|
116
118
|
if provider_name.lower() == "tempmailio":
|
|
117
119
|
from .temp_mail_io import TempMailIO
|
|
118
120
|
return TempMailIO()
|
|
121
|
+
elif provider_name.lower() == "emailnator":
|
|
122
|
+
from .emailnator import EmailnatorProvider
|
|
123
|
+
return EmailnatorProvider()
|
|
119
124
|
else:
|
|
120
125
|
from .mail_tm import MailTM
|
|
121
126
|
return MailTM()
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Emailnator Provider Implementation
|
|
3
|
+
Synchronous provider for Emailnator.com
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Dict
|
|
7
|
+
from time import sleep
|
|
8
|
+
from requests import Session
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
from .base import TempMailProvider
|
|
11
|
+
from json import loads
|
|
12
|
+
from re import findall
|
|
13
|
+
|
|
14
|
+
class EmailnatorProvider(TempMailProvider):
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self.client = Session()
|
|
17
|
+
self.client.get("https://www.emailnator.com/", timeout=6)
|
|
18
|
+
self.cookies = self.client.cookies.get_dict()
|
|
19
|
+
self.user_agent = LitAgent()
|
|
20
|
+
self.client.headers = {
|
|
21
|
+
"authority": "www.emailnator.com",
|
|
22
|
+
"origin": "https://www.emailnator.com",
|
|
23
|
+
"referer": "https://www.emailnator.com/",
|
|
24
|
+
"user-agent": self.user_agent.random(),
|
|
25
|
+
"x-xsrf-token": self.client.cookies.get("XSRF-TOKEN")[:-3] + "=",
|
|
26
|
+
}
|
|
27
|
+
self.email = None
|
|
28
|
+
self._messages = []
|
|
29
|
+
self._account_deleted = False
|
|
30
|
+
|
|
31
|
+
def create_account(self) -> bool:
|
|
32
|
+
response = self.client.post(
|
|
33
|
+
"https://www.emailnator.com/generate-email",
|
|
34
|
+
json={"email": ["plusGmail", "dotGmail"]},
|
|
35
|
+
)
|
|
36
|
+
self.email = loads(response.text)["email"][0]
|
|
37
|
+
return bool(self.email)
|
|
38
|
+
|
|
39
|
+
def get_messages(self) -> List[Dict]:
|
|
40
|
+
# Wait for at least one message
|
|
41
|
+
for _ in range(30): # Wait up to 60 seconds
|
|
42
|
+
sleep(2)
|
|
43
|
+
mail_token = self.client.post(
|
|
44
|
+
"https://www.emailnator.com/message-list", json={"email": self.email}
|
|
45
|
+
)
|
|
46
|
+
mail_token = loads(mail_token.text)["messageData"]
|
|
47
|
+
if len(mail_token) > 1:
|
|
48
|
+
break
|
|
49
|
+
else:
|
|
50
|
+
return []
|
|
51
|
+
# Get message details
|
|
52
|
+
messages = []
|
|
53
|
+
for msg in mail_token[1:]:
|
|
54
|
+
msg_id = msg["messageID"]
|
|
55
|
+
mail_context = self.client.post(
|
|
56
|
+
"https://www.emailnator.com/message-list",
|
|
57
|
+
json={"email": self.email, "messageID": msg_id},
|
|
58
|
+
)
|
|
59
|
+
# The response is HTML, so we just store it as body
|
|
60
|
+
messages.append({
|
|
61
|
+
"msg_id": msg_id,
|
|
62
|
+
"from": msg.get("from", ""),
|
|
63
|
+
"subject": msg.get("subject", ""),
|
|
64
|
+
"body": mail_context.text,
|
|
65
|
+
})
|
|
66
|
+
self._messages = messages
|
|
67
|
+
return messages
|
|
68
|
+
|
|
69
|
+
def check_new_messages(self) -> List[Dict]:
|
|
70
|
+
current = self.get_messages()
|
|
71
|
+
if not self._messages:
|
|
72
|
+
return current
|
|
73
|
+
# Return only new messages
|
|
74
|
+
old_ids = {m["msg_id"] for m in self._messages}
|
|
75
|
+
new_msgs = [m for m in current if m["msg_id"] not in old_ids]
|
|
76
|
+
return new_msgs
|
|
77
|
+
|
|
78
|
+
def delete_account(self) -> bool:
|
|
79
|
+
# Emailnator does not support explicit account deletion, so just mark as deleted
|
|
80
|
+
self._account_deleted = True
|
|
81
|
+
return True
|
|
82
|
+
|
|
83
|
+
def get_account_info(self) -> Dict:
|
|
84
|
+
return {"email": self.email, "deleted": self._account_deleted}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Webscout.Local - A llama-cpp-python based LLM serving tool with Ollama-compatible API
|
|
3
|
+
"""
|
|
4
|
+
from webscout.version import __version__
|
|
5
|
+
|
|
6
|
+
# Import main components for easier access
|
|
7
|
+
from .llm import LLMInterface
|
|
8
|
+
from .model_manager import ModelManager
|
|
9
|
+
from .server import start_server
|
|
10
|
+
|
|
11
|
+
# Define what's available when using `from webscout.Local import *`
|
|
12
|
+
__all__ = ["LLMInterface", "ModelManager", "start_server"]
|
|
@@ -17,6 +17,16 @@ console: Console = Console()
|
|
|
17
17
|
|
|
18
18
|
model_manager: ModelManager = ModelManager()
|
|
19
19
|
|
|
20
|
+
# RAM requirements for different model sizes
|
|
21
|
+
RAM_REQUIREMENTS = {
|
|
22
|
+
"1B": "2 GB",
|
|
23
|
+
"3B": "4 GB",
|
|
24
|
+
"7B": "8 GB",
|
|
25
|
+
"13B": "16 GB",
|
|
26
|
+
"33B": "32 GB",
|
|
27
|
+
"70B": "64 GB",
|
|
28
|
+
}
|
|
29
|
+
|
|
20
30
|
@app.command("serve")
|
|
21
31
|
def run_model(
|
|
22
32
|
model_string: str = typer.Argument(..., help="Model to run (format: 'name', 'repo_id' or 'repo_id:filename')"),
|
|
@@ -55,6 +65,16 @@ def run_model(
|
|
|
55
65
|
console.print(f"[bold red]Error downloading model: {str(e)}[/bold red]")
|
|
56
66
|
return
|
|
57
67
|
|
|
68
|
+
# Check RAM requirements
|
|
69
|
+
ram_requirement = "Unknown"
|
|
70
|
+
for size, ram in RAM_REQUIREMENTS.items():
|
|
71
|
+
if size in model_name:
|
|
72
|
+
ram_requirement = ram
|
|
73
|
+
break
|
|
74
|
+
|
|
75
|
+
if ram_requirement != "Unknown":
|
|
76
|
+
console.print(f"[yellow]This model requires approximately {ram_requirement} of RAM[/yellow]")
|
|
77
|
+
|
|
58
78
|
# Try to load the model to verify it works
|
|
59
79
|
try:
|
|
60
80
|
llm = LLMInterface(model_name)
|
|
@@ -96,12 +116,42 @@ def list_models() -> None:
|
|
|
96
116
|
table.add_column("Name", style="cyan")
|
|
97
117
|
table.add_column("Repository", style="green")
|
|
98
118
|
table.add_column("Filename", style="blue")
|
|
119
|
+
table.add_column("Size", style="magenta")
|
|
120
|
+
table.add_column("Downloaded", style="yellow")
|
|
99
121
|
|
|
100
122
|
for model in models:
|
|
123
|
+
# Get file size in human-readable format
|
|
124
|
+
file_path = model.get("path")
|
|
125
|
+
file_size = "Unknown"
|
|
126
|
+
if file_path:
|
|
127
|
+
try:
|
|
128
|
+
import os
|
|
129
|
+
size_bytes = os.path.getsize(file_path)
|
|
130
|
+
# Convert to human-readable format
|
|
131
|
+
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
|
132
|
+
if size_bytes < 1024.0 or unit == 'TB':
|
|
133
|
+
file_size = f"{size_bytes:.2f} {unit}"
|
|
134
|
+
break
|
|
135
|
+
size_bytes /= 1024.0
|
|
136
|
+
except Exception:
|
|
137
|
+
pass
|
|
138
|
+
|
|
139
|
+
# Format downloaded date
|
|
140
|
+
downloaded_at = model.get("downloaded_at", "Unknown")
|
|
141
|
+
if downloaded_at != "Unknown":
|
|
142
|
+
try:
|
|
143
|
+
import datetime
|
|
144
|
+
dt = datetime.datetime.fromisoformat(downloaded_at)
|
|
145
|
+
downloaded_at = dt.strftime("%Y-%m-%d %H:%M")
|
|
146
|
+
except Exception:
|
|
147
|
+
pass
|
|
148
|
+
|
|
101
149
|
table.add_row(
|
|
102
150
|
model["name"],
|
|
103
151
|
model.get("repo_id", "Unknown"),
|
|
104
152
|
model.get("filename", "Unknown"),
|
|
153
|
+
file_size,
|
|
154
|
+
downloaded_at,
|
|
105
155
|
)
|
|
106
156
|
|
|
107
157
|
console.print(table)
|
|
@@ -181,6 +231,16 @@ def chat(
|
|
|
181
231
|
console.print(f"[bold red]Error downloading model: {str(e)}[/bold red]")
|
|
182
232
|
return
|
|
183
233
|
|
|
234
|
+
# Check RAM requirements
|
|
235
|
+
ram_requirement = "Unknown"
|
|
236
|
+
for size, ram in RAM_REQUIREMENTS.items():
|
|
237
|
+
if size in model_name:
|
|
238
|
+
ram_requirement = ram
|
|
239
|
+
break
|
|
240
|
+
|
|
241
|
+
if ram_requirement != "Unknown":
|
|
242
|
+
console.print(f"[yellow]This model requires approximately {ram_requirement} of RAM[/yellow]")
|
|
243
|
+
|
|
184
244
|
# Load the model
|
|
185
245
|
try:
|
|
186
246
|
llm = LLMInterface(model_name)
|
|
@@ -325,6 +385,124 @@ def chat(
|
|
|
325
385
|
# Add extra spacing after the response
|
|
326
386
|
console.print("")
|
|
327
387
|
|
|
388
|
+
@app.command("copy")
|
|
389
|
+
def copy_model(
|
|
390
|
+
source: str = typer.Argument(..., help="Name of the source model"),
|
|
391
|
+
destination: str = typer.Argument(..., help="Name for the destination model"),
|
|
392
|
+
) -> None:
|
|
393
|
+
"""
|
|
394
|
+
Copy a model to a new name.
|
|
395
|
+
"""
|
|
396
|
+
try:
|
|
397
|
+
if model_manager.copy_model(source, destination):
|
|
398
|
+
console.print(f"[bold green]Model {source} copied to {destination} successfully[/bold green]")
|
|
399
|
+
else:
|
|
400
|
+
console.print(f"[bold red]Failed to copy model {source} to {destination}[/bold red]")
|
|
401
|
+
except Exception as e:
|
|
402
|
+
console.print(f"[bold red]Error copying model: {str(e)}[/bold red]")
|
|
403
|
+
|
|
404
|
+
@app.command("show")
|
|
405
|
+
def show_model(
|
|
406
|
+
model_name: str = typer.Argument(..., help="Name of the model to show information for"),
|
|
407
|
+
verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed information"),
|
|
408
|
+
) -> None:
|
|
409
|
+
"""
|
|
410
|
+
Show detailed information about a model.
|
|
411
|
+
"""
|
|
412
|
+
model_info = model_manager.get_model_info(model_name)
|
|
413
|
+
|
|
414
|
+
if not model_info:
|
|
415
|
+
console.print(f"[yellow]Model {model_name} not found.[/yellow]")
|
|
416
|
+
return
|
|
417
|
+
|
|
418
|
+
# Create a table for basic information
|
|
419
|
+
table = Table(title=f"Model Information: {model_name}")
|
|
420
|
+
table.add_column("Property", style="cyan")
|
|
421
|
+
table.add_column("Value", style="green")
|
|
422
|
+
|
|
423
|
+
# Add basic properties
|
|
424
|
+
table.add_row("Name", model_info["name"])
|
|
425
|
+
table.add_row("Repository", model_info.get("repo_id", "Unknown"))
|
|
426
|
+
table.add_row("Filename", model_info.get("filename", "Unknown"))
|
|
427
|
+
|
|
428
|
+
# Get file size in human-readable format
|
|
429
|
+
file_path = model_info.get("path")
|
|
430
|
+
if file_path:
|
|
431
|
+
try:
|
|
432
|
+
import os
|
|
433
|
+
size_bytes = os.path.getsize(file_path)
|
|
434
|
+
# Convert to human-readable format
|
|
435
|
+
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
|
436
|
+
if size_bytes < 1024.0 or unit == 'TB':
|
|
437
|
+
file_size = f"{size_bytes:.2f} {unit}"
|
|
438
|
+
break
|
|
439
|
+
size_bytes /= 1024.0
|
|
440
|
+
table.add_row("Size", file_size)
|
|
441
|
+
except Exception:
|
|
442
|
+
table.add_row("Size", "Unknown")
|
|
443
|
+
|
|
444
|
+
# Format downloaded date
|
|
445
|
+
downloaded_at = model_info.get("downloaded_at", "Unknown")
|
|
446
|
+
if downloaded_at != "Unknown":
|
|
447
|
+
try:
|
|
448
|
+
import datetime
|
|
449
|
+
dt = datetime.datetime.fromisoformat(downloaded_at)
|
|
450
|
+
downloaded_at = dt.strftime("%Y-%m-%d %H:%M")
|
|
451
|
+
except Exception:
|
|
452
|
+
pass
|
|
453
|
+
table.add_row("Downloaded", downloaded_at)
|
|
454
|
+
|
|
455
|
+
# Add copied information if available
|
|
456
|
+
if "copied_from" in model_info:
|
|
457
|
+
table.add_row("Copied From", model_info["copied_from"])
|
|
458
|
+
copied_at = model_info.get("copied_at", "Unknown")
|
|
459
|
+
if copied_at != "Unknown":
|
|
460
|
+
try:
|
|
461
|
+
import datetime
|
|
462
|
+
dt = datetime.datetime.fromisoformat(copied_at)
|
|
463
|
+
copied_at = dt.strftime("%Y-%m-%d %H:%M")
|
|
464
|
+
except Exception:
|
|
465
|
+
pass
|
|
466
|
+
table.add_row("Copied At", copied_at)
|
|
467
|
+
|
|
468
|
+
# Estimate RAM requirements based on model name
|
|
469
|
+
ram_requirement = "Unknown"
|
|
470
|
+
for size, ram in RAM_REQUIREMENTS.items():
|
|
471
|
+
if size in model_name:
|
|
472
|
+
ram_requirement = ram
|
|
473
|
+
break
|
|
474
|
+
table.add_row("Estimated RAM", ram_requirement)
|
|
475
|
+
|
|
476
|
+
# Print the table
|
|
477
|
+
console.print(table)
|
|
478
|
+
|
|
479
|
+
# If verbose, show all properties
|
|
480
|
+
if verbose:
|
|
481
|
+
console.print("\n[bold]Detailed Information:[/bold]")
|
|
482
|
+
for key, value in model_info.items():
|
|
483
|
+
if key not in ["name", "repo_id", "filename", "path", "downloaded_at", "copied_from", "copied_at"]:
|
|
484
|
+
console.print(f"[cyan]{key}:[/cyan] {value}")
|
|
485
|
+
|
|
486
|
+
@app.command("ps")
|
|
487
|
+
def list_running_models() -> None:
|
|
488
|
+
"""
|
|
489
|
+
List running models.
|
|
490
|
+
"""
|
|
491
|
+
from .server import loaded_models
|
|
492
|
+
|
|
493
|
+
if not loaded_models:
|
|
494
|
+
console.print("[yellow]No models currently running.[/yellow]")
|
|
495
|
+
return
|
|
496
|
+
|
|
497
|
+
table = Table(title="Running Models")
|
|
498
|
+
table.add_column("Name", style="cyan")
|
|
499
|
+
table.add_column("Status", style="green")
|
|
500
|
+
|
|
501
|
+
for name in loaded_models.keys():
|
|
502
|
+
table.add_row(name, "Running")
|
|
503
|
+
|
|
504
|
+
console.print(table)
|
|
505
|
+
|
|
328
506
|
@app.command("version")
|
|
329
507
|
def version() -> None:
|
|
330
508
|
"""
|
|
@@ -37,29 +37,75 @@ class LLMInterface:
|
|
|
37
37
|
raise ValueError(f"Model {model_name} not found. Please download it first.")
|
|
38
38
|
self.llm = None
|
|
39
39
|
|
|
40
|
-
def load_model(
|
|
40
|
+
def load_model(
|
|
41
|
+
self,
|
|
42
|
+
n_gpu_layers: Optional[int] = None,
|
|
43
|
+
n_ctx: Optional[int] = None,
|
|
44
|
+
verbose: bool = False,
|
|
45
|
+
n_threads: Optional[int] = None,
|
|
46
|
+
n_batch: Optional[int] = None,
|
|
47
|
+
use_mlock: bool = False,
|
|
48
|
+
use_mmap: bool = True,
|
|
49
|
+
rope_freq_base: Optional[float] = None,
|
|
50
|
+
rope_freq_scale: Optional[float] = None,
|
|
51
|
+
low_vram: bool = False,
|
|
52
|
+
) -> None:
|
|
41
53
|
"""
|
|
42
54
|
Load the model into memory.
|
|
43
55
|
Args:
|
|
44
56
|
n_gpu_layers (Optional[int]): Number of layers to offload to GPU (-1 for all).
|
|
45
57
|
n_ctx (Optional[int]): Context size.
|
|
46
58
|
verbose (bool): Whether to show verbose output.
|
|
59
|
+
n_threads (Optional[int]): Number of threads to use.
|
|
60
|
+
n_batch (Optional[int]): Batch size for prompt processing.
|
|
61
|
+
use_mlock (bool): Whether to use mlock to keep model in memory.
|
|
62
|
+
use_mmap (bool): Whether to use memory mapping for the model.
|
|
63
|
+
rope_freq_base (Optional[float]): RoPE base frequency.
|
|
64
|
+
rope_freq_scale (Optional[float]): RoPE frequency scaling factor.
|
|
65
|
+
low_vram (bool): Whether to optimize for low VRAM usage.
|
|
47
66
|
Raises:
|
|
48
67
|
ValueError: If model loading fails.
|
|
49
68
|
"""
|
|
69
|
+
# If model is already loaded, check if we need to reload with different parameters
|
|
70
|
+
if self.llm is not None:
|
|
71
|
+
if n_ctx is not None and hasattr(self.llm, 'n_ctx') and self.llm.n_ctx != n_ctx:
|
|
72
|
+
# Need to reload with new context size
|
|
73
|
+
self.llm = None
|
|
74
|
+
else:
|
|
75
|
+
# Model already loaded with compatible parameters
|
|
76
|
+
return
|
|
77
|
+
|
|
50
78
|
if n_gpu_layers is None:
|
|
51
79
|
n_gpu_layers = config.get("default_gpu_layers", -1)
|
|
52
80
|
if n_ctx is None:
|
|
53
81
|
n_ctx = config.get("default_context_length", 4096)
|
|
82
|
+
|
|
83
|
+
# Determine number of threads if not specified
|
|
84
|
+
if n_threads is None:
|
|
85
|
+
import multiprocessing
|
|
86
|
+
n_threads = max(1, multiprocessing.cpu_count() // 2)
|
|
87
|
+
|
|
54
88
|
console.print(f"[bold blue]Loading model {self.model_name}...[/bold blue]")
|
|
55
89
|
try:
|
|
56
90
|
self.llm = Llama(
|
|
57
91
|
model_path=self.model_path,
|
|
58
92
|
n_gpu_layers=n_gpu_layers,
|
|
59
93
|
n_ctx=n_ctx,
|
|
60
|
-
verbose=verbose
|
|
94
|
+
verbose=verbose,
|
|
95
|
+
n_threads=n_threads,
|
|
96
|
+
n_batch=n_batch or 512,
|
|
97
|
+
use_mlock=use_mlock,
|
|
98
|
+
use_mmap=use_mmap,
|
|
99
|
+
rope_freq_base=rope_freq_base,
|
|
100
|
+
rope_freq_scale=rope_freq_scale,
|
|
101
|
+
low_vram=low_vram,
|
|
61
102
|
)
|
|
103
|
+
|
|
62
104
|
console.print(f"[bold green]Model {self.model_name} loaded successfully[/bold green]")
|
|
105
|
+
if verbose:
|
|
106
|
+
console.print(f"[dim]Using {n_threads} threads, context size: {n_ctx}[/dim]")
|
|
107
|
+
if n_gpu_layers and n_gpu_layers > 0:
|
|
108
|
+
console.print(f"[dim]GPU acceleration: {n_gpu_layers} layers offloaded to GPU[/dim]")
|
|
63
109
|
except Exception as e:
|
|
64
110
|
raise ValueError(f"Failed to load model from file: {self.model_path}\n{str(e)}")
|
|
65
111
|
|
|
@@ -71,6 +117,13 @@ class LLMInterface:
|
|
|
71
117
|
top_p: float = 0.95,
|
|
72
118
|
stream: bool = False,
|
|
73
119
|
stop: Optional[List[str]] = None,
|
|
120
|
+
suffix: Optional[str] = None,
|
|
121
|
+
images: Optional[List[str]] = None,
|
|
122
|
+
system: Optional[str] = None,
|
|
123
|
+
template: Optional[str] = None,
|
|
124
|
+
context: Optional[List[int]] = None,
|
|
125
|
+
raw: bool = False,
|
|
126
|
+
format: Optional[Union[str, Dict[str, Any]]] = None,
|
|
74
127
|
) -> Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
|
|
75
128
|
"""
|
|
76
129
|
Create a completion for the given prompt.
|
|
@@ -107,12 +160,14 @@ class LLMInterface:
|
|
|
107
160
|
|
|
108
161
|
def create_chat_completion(
|
|
109
162
|
self,
|
|
110
|
-
messages: List[Dict[str,
|
|
163
|
+
messages: List[Dict[str, Any]],
|
|
111
164
|
max_tokens: int = 256,
|
|
112
165
|
temperature: float = 0.7,
|
|
113
166
|
top_p: float = 0.95,
|
|
114
167
|
stream: bool = False,
|
|
115
168
|
stop: Optional[List[str]] = None,
|
|
169
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
170
|
+
format: Optional[Union[str, Dict[str, Any]]] = None,
|
|
116
171
|
) -> Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
|
|
117
172
|
"""
|
|
118
173
|
Create a chat completion for the given messages.
|
|
@@ -156,22 +211,26 @@ class LLMInterface:
|
|
|
156
211
|
|
|
157
212
|
def stream_chat_completion(
|
|
158
213
|
self,
|
|
159
|
-
messages: List[Dict[str,
|
|
214
|
+
messages: List[Dict[str, Any]],
|
|
160
215
|
callback: Callable[[str], None],
|
|
161
216
|
max_tokens: int = 256,
|
|
162
217
|
temperature: float = 0.7,
|
|
163
218
|
top_p: float = 0.95,
|
|
164
219
|
stop: Optional[List[str]] = None,
|
|
220
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
221
|
+
format: Optional[Union[str, Dict[str, Any]]] = None,
|
|
165
222
|
) -> None:
|
|
166
223
|
"""
|
|
167
224
|
Stream a chat completion with a callback for each token.
|
|
168
225
|
Args:
|
|
169
|
-
messages (List[Dict[str,
|
|
226
|
+
messages (List[Dict[str, Any]]): List of chat messages.
|
|
170
227
|
callback (Callable[[str], None]): Function to call with each token.
|
|
171
228
|
max_tokens (int): Maximum number of tokens to generate.
|
|
172
229
|
temperature (float): Sampling temperature.
|
|
173
230
|
top_p (float): Top-p sampling.
|
|
174
231
|
stop (Optional[List[str]]): List of strings to stop generation when encountered.
|
|
232
|
+
tools (Optional[List[Dict[str, Any]]]): List of tools for function calling.
|
|
233
|
+
format (Optional[Union[str, Dict[str, Any]]]): Format for structured output.
|
|
175
234
|
"""
|
|
176
235
|
stream = self.create_chat_completion(
|
|
177
236
|
messages=messages,
|
|
@@ -186,3 +245,43 @@ class LLMInterface:
|
|
|
186
245
|
if "delta" in chunk["choices"][0] and "content" in chunk["choices"][0]["delta"]:
|
|
187
246
|
content = chunk["choices"][0]["delta"]["content"]
|
|
188
247
|
callback(content)
|
|
248
|
+
|
|
249
|
+
def create_embeddings(
|
|
250
|
+
self,
|
|
251
|
+
input: Union[str, List[str]],
|
|
252
|
+
truncate: bool = True,
|
|
253
|
+
) -> Dict[str, Any]:
|
|
254
|
+
"""
|
|
255
|
+
Generate embeddings for the given input.
|
|
256
|
+
Args:
|
|
257
|
+
input (Union[str, List[str]]): Text or list of texts to generate embeddings for.
|
|
258
|
+
truncate (bool): Whether to truncate the input to fit within context length.
|
|
259
|
+
Returns:
|
|
260
|
+
Dict[str, Any]: Embeddings response.
|
|
261
|
+
"""
|
|
262
|
+
if self.llm is None:
|
|
263
|
+
self.load_model()
|
|
264
|
+
|
|
265
|
+
# Convert input to list if it's a string
|
|
266
|
+
if isinstance(input, str):
|
|
267
|
+
input_texts = [input]
|
|
268
|
+
else:
|
|
269
|
+
input_texts = input
|
|
270
|
+
|
|
271
|
+
# Generate embeddings for each input text
|
|
272
|
+
embeddings = []
|
|
273
|
+
for text in input_texts:
|
|
274
|
+
# Use llama-cpp-python's embedding method
|
|
275
|
+
embedding = self.llm.embed(text)
|
|
276
|
+
embeddings.append(embedding)
|
|
277
|
+
|
|
278
|
+
# Create response
|
|
279
|
+
response = {
|
|
280
|
+
"model": self.model_name,
|
|
281
|
+
"embeddings": embeddings,
|
|
282
|
+
"total_duration": 0, # Could be improved with actual timing
|
|
283
|
+
"load_duration": 0, # Could be improved with actual timing
|
|
284
|
+
"prompt_eval_count": len(input_texts)
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
return response
|
|
@@ -203,3 +203,51 @@ class ModelManager:
|
|
|
203
203
|
return model_info.get("path")
|
|
204
204
|
return None
|
|
205
205
|
return info["path"]
|
|
206
|
+
|
|
207
|
+
def copy_model(self, source_model: str, destination_model: str) -> bool:
|
|
208
|
+
"""
|
|
209
|
+
Copy a model to a new name.
|
|
210
|
+
Args:
|
|
211
|
+
source_model (str): Name of the source model.
|
|
212
|
+
destination_model (str): Name for the destination model.
|
|
213
|
+
Returns:
|
|
214
|
+
bool: True if copied successfully, False otherwise.
|
|
215
|
+
"""
|
|
216
|
+
# Get source model info
|
|
217
|
+
source_info = self.get_model_info(source_model)
|
|
218
|
+
if not source_info or "path" not in source_info:
|
|
219
|
+
console.print(f"[bold red]Source model {source_model} not found[/bold red]")
|
|
220
|
+
return False
|
|
221
|
+
|
|
222
|
+
# Create destination directory
|
|
223
|
+
dest_dir = config.get_model_path(destination_model)
|
|
224
|
+
dest_dir.mkdir(exist_ok=True, parents=True)
|
|
225
|
+
|
|
226
|
+
# Copy the model file
|
|
227
|
+
source_path = Path(source_info["path"])
|
|
228
|
+
dest_path = dest_dir / source_path.name
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
console.print(f"[bold blue]Copying model from {source_path} to {dest_path}...[/bold blue]")
|
|
232
|
+
shutil.copy2(source_path, dest_path)
|
|
233
|
+
|
|
234
|
+
# Create info file for the destination model
|
|
235
|
+
dest_info = source_info.copy()
|
|
236
|
+
dest_info["name"] = destination_model
|
|
237
|
+
dest_info["path"] = str(dest_path)
|
|
238
|
+
dest_info["copied_from"] = source_model
|
|
239
|
+
dest_info["copied_at"] = datetime.datetime.now().isoformat()
|
|
240
|
+
|
|
241
|
+
with open(dest_dir / "info.json", "w") as f:
|
|
242
|
+
json.dump(dest_info, f, indent=2)
|
|
243
|
+
|
|
244
|
+
console.print(f"[bold green]Model copied successfully to {dest_path}[/bold green]")
|
|
245
|
+
return True
|
|
246
|
+
except Exception as e:
|
|
247
|
+
console.print(f"[bold red]Error copying model: {str(e)}[/bold red]")
|
|
248
|
+
# Clean up if there was an error
|
|
249
|
+
if dest_path.exists():
|
|
250
|
+
dest_path.unlink()
|
|
251
|
+
if dest_dir.exists():
|
|
252
|
+
shutil.rmtree(dest_dir)
|
|
253
|
+
return False
|