webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/multichat.py
DELETED
|
@@ -1,325 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import uuid
|
|
4
|
-
from typing import Any, Dict, Union
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
from webscout.litagent import LitAgent
|
|
10
|
-
|
|
11
|
-
# Model configurations
|
|
12
|
-
MODEL_CONFIGS = {
|
|
13
|
-
"llama": {
|
|
14
|
-
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
15
|
-
"models": {
|
|
16
|
-
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
17
|
-
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
18
|
-
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
19
|
-
},
|
|
20
|
-
},
|
|
21
|
-
"cohere": {
|
|
22
|
-
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
23
|
-
"models": {
|
|
24
|
-
"command-r": {"contextLength": 128000},
|
|
25
|
-
"command": {"contextLength": 4096},
|
|
26
|
-
},
|
|
27
|
-
},
|
|
28
|
-
"google": {
|
|
29
|
-
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
30
|
-
"models": {
|
|
31
|
-
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
32
|
-
"gemma2-9b-it": {"contextLength": 8192},
|
|
33
|
-
"gemini-2.0-flash": {"contextLength": 128000},
|
|
34
|
-
},
|
|
35
|
-
"message_format": "parts",
|
|
36
|
-
},
|
|
37
|
-
"deepinfra": {
|
|
38
|
-
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
39
|
-
"models": {
|
|
40
|
-
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
41
|
-
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
42
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
43
|
-
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
44
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
45
|
-
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
46
|
-
"gemma-2-27b-it": {"contextLength": 8192},
|
|
47
|
-
},
|
|
48
|
-
},
|
|
49
|
-
"mistral": {
|
|
50
|
-
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
51
|
-
"models": {
|
|
52
|
-
"mistral-small-latest": {"contextLength": 32000},
|
|
53
|
-
"codestral-latest": {"contextLength": 32000},
|
|
54
|
-
"open-mistral-7b": {"contextLength": 8000},
|
|
55
|
-
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
56
|
-
},
|
|
57
|
-
},
|
|
58
|
-
"alibaba": {
|
|
59
|
-
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
60
|
-
"models": {
|
|
61
|
-
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
62
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
63
|
-
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
64
|
-
},
|
|
65
|
-
},
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
class MultiChatAI(Provider):
|
|
69
|
-
"""
|
|
70
|
-
A class to interact with the MultiChatAI API.
|
|
71
|
-
"""
|
|
72
|
-
AVAILABLE_MODELS = [
|
|
73
|
-
# Llama Models
|
|
74
|
-
"llama-3.3-70b-versatile",
|
|
75
|
-
"llama-3.2-11b-vision-preview",
|
|
76
|
-
"deepseek-r1-distill-llama-70b",
|
|
77
|
-
|
|
78
|
-
# Cohere Models
|
|
79
|
-
# "command-r", # >>>> NOT WORKING
|
|
80
|
-
# "command", # >>>> NOT WORKING
|
|
81
|
-
|
|
82
|
-
# Google Models
|
|
83
|
-
# "gemini-1.5-flash-002", #>>>> NOT WORKING
|
|
84
|
-
"gemma2-9b-it",
|
|
85
|
-
"gemini-2.0-flash",
|
|
86
|
-
|
|
87
|
-
# DeepInfra Models
|
|
88
|
-
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
89
|
-
"Gryphe/MythoMax-L2-13b",
|
|
90
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
91
|
-
"deepseek-ai/DeepSeek-V3",
|
|
92
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
93
|
-
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
94
|
-
# "gemma-2-27b-it", # >>>> NOT WORKING
|
|
95
|
-
|
|
96
|
-
# Mistral Models
|
|
97
|
-
# "mistral-small-latest", # >>>> NOT WORKING
|
|
98
|
-
# "codestral-latest", # >>>> NOT WORKING
|
|
99
|
-
# "open-mistral-7b", # >>>> NOT WORKING
|
|
100
|
-
# "open-mixtral-8x7b", # >>>> NOT WORKING
|
|
101
|
-
|
|
102
|
-
# Alibaba Models
|
|
103
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
104
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
105
|
-
"Qwen/QwQ-32B-Preview"
|
|
106
|
-
]
|
|
107
|
-
|
|
108
|
-
def __init__(
|
|
109
|
-
self,
|
|
110
|
-
is_conversation: bool = True,
|
|
111
|
-
max_tokens: int = 4000,
|
|
112
|
-
timeout: int = 30,
|
|
113
|
-
intro: str = None,
|
|
114
|
-
filepath: str = None,
|
|
115
|
-
update_file: bool = True,
|
|
116
|
-
proxies: dict = {},
|
|
117
|
-
history_offset: int = 10250,
|
|
118
|
-
act: str = None,
|
|
119
|
-
model: str = "llama-3.3-70b-versatile",
|
|
120
|
-
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
121
|
-
temperature: float = 0.5,
|
|
122
|
-
presence_penalty: int = 0,
|
|
123
|
-
frequency_penalty: int = 0,
|
|
124
|
-
top_p: float = 1
|
|
125
|
-
):
|
|
126
|
-
"""Initializes the MultiChatAI API client."""
|
|
127
|
-
if model not in self.AVAILABLE_MODELS:
|
|
128
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
129
|
-
self.session = requests.Session()
|
|
130
|
-
self.is_conversation = is_conversation
|
|
131
|
-
self.max_tokens_to_sample = max_tokens
|
|
132
|
-
self.timeout = timeout
|
|
133
|
-
self.last_response = {}
|
|
134
|
-
self.model = model
|
|
135
|
-
self.system_prompt = system_prompt
|
|
136
|
-
self.temperature = temperature
|
|
137
|
-
self.presence_penalty = presence_penalty
|
|
138
|
-
self.frequency_penalty = frequency_penalty
|
|
139
|
-
self.top_p = top_p
|
|
140
|
-
|
|
141
|
-
# Initialize LitAgent for user agent generation
|
|
142
|
-
self.agent = LitAgent()
|
|
143
|
-
|
|
144
|
-
self.headers = {
|
|
145
|
-
"accept": "*/*",
|
|
146
|
-
"accept-language": "en-US,en;q=0.9",
|
|
147
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
148
|
-
"origin": "https://www.multichatai.com",
|
|
149
|
-
"referer": "https://www.multichatai.com/",
|
|
150
|
-
"user-agent": self.agent.random(),
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
self.session.headers.update(self.headers)
|
|
154
|
-
self.session.proxies = proxies
|
|
155
|
-
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
156
|
-
|
|
157
|
-
self.__available_optimizers = (
|
|
158
|
-
method for method in dir(Optimizers)
|
|
159
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
Conversation.intro = (
|
|
163
|
-
AwesomePrompts().get_act(
|
|
164
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
165
|
-
)
|
|
166
|
-
if act
|
|
167
|
-
else intro or Conversation.intro
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
self.conversation = Conversation(
|
|
171
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
172
|
-
)
|
|
173
|
-
self.conversation.history_offset = history_offset
|
|
174
|
-
|
|
175
|
-
self.provider = self._get_provider_from_model(self.model)
|
|
176
|
-
self.model_name = self.model
|
|
177
|
-
|
|
178
|
-
def _get_endpoint(self) -> str:
|
|
179
|
-
"""Get the API endpoint for the current provider."""
|
|
180
|
-
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
181
|
-
|
|
182
|
-
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
183
|
-
"""Get chat settings for the current model."""
|
|
184
|
-
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
185
|
-
return {
|
|
186
|
-
"model": self.model,
|
|
187
|
-
"prompt": self.system_prompt,
|
|
188
|
-
"temperature": self.temperature,
|
|
189
|
-
"contextLength": base_settings["contextLength"],
|
|
190
|
-
"includeProfileContext": True,
|
|
191
|
-
"includeWorkspaceInstructions": True,
|
|
192
|
-
"embeddingsProvider": "openai"
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
def _get_system_message(self) -> str:
|
|
196
|
-
"""Generate system message with current date."""
|
|
197
|
-
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
198
|
-
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
199
|
-
|
|
200
|
-
def _build_messages(self, conversation_prompt: str) -> list:
|
|
201
|
-
"""Build messages array based on provider type."""
|
|
202
|
-
if self.provider == "google":
|
|
203
|
-
return [
|
|
204
|
-
{"role": "user", "parts": self._get_system_message()},
|
|
205
|
-
{"role": "model", "parts": "I will follow your instructions."},
|
|
206
|
-
{"role": "user", "parts": conversation_prompt}
|
|
207
|
-
]
|
|
208
|
-
else:
|
|
209
|
-
return [
|
|
210
|
-
{"role": "system", "content": self._get_system_message()},
|
|
211
|
-
{"role": "user", "content": conversation_prompt}
|
|
212
|
-
]
|
|
213
|
-
|
|
214
|
-
def _get_provider_from_model(self, model: str) -> str:
|
|
215
|
-
"""Determine the provider based on the model name."""
|
|
216
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
217
|
-
if model in config["models"]:
|
|
218
|
-
return provider
|
|
219
|
-
|
|
220
|
-
available_models = []
|
|
221
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
222
|
-
for model_name in config["models"].keys():
|
|
223
|
-
available_models.append(f"{provider}/{model_name}")
|
|
224
|
-
|
|
225
|
-
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
226
|
-
raise ValueError(error_msg)
|
|
227
|
-
|
|
228
|
-
def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
|
|
229
|
-
"""Make the API request with proper error handling."""
|
|
230
|
-
try:
|
|
231
|
-
response = self.session.post(
|
|
232
|
-
self._get_endpoint(),
|
|
233
|
-
headers=self.headers,
|
|
234
|
-
json=payload,
|
|
235
|
-
timeout=self.timeout,
|
|
236
|
-
)
|
|
237
|
-
response.raise_for_status()
|
|
238
|
-
return response
|
|
239
|
-
except requests.exceptions.RequestException as e:
|
|
240
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
241
|
-
|
|
242
|
-
def ask(
|
|
243
|
-
self,
|
|
244
|
-
prompt: str,
|
|
245
|
-
raw: bool = False,
|
|
246
|
-
optimizer: str = None,
|
|
247
|
-
conversationally: bool = False,
|
|
248
|
-
) -> Dict[str, Any]:
|
|
249
|
-
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
250
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
251
|
-
if optimizer:
|
|
252
|
-
if optimizer in self.__available_optimizers:
|
|
253
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
254
|
-
conversation_prompt if conversationally else prompt
|
|
255
|
-
)
|
|
256
|
-
else:
|
|
257
|
-
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
258
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
259
|
-
|
|
260
|
-
payload = {
|
|
261
|
-
"chatSettings": self._get_chat_settings(),
|
|
262
|
-
"messages": self._build_messages(conversation_prompt),
|
|
263
|
-
"customModelId": "",
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
response = self._make_request(payload)
|
|
267
|
-
try:
|
|
268
|
-
full_response = response.text.strip()
|
|
269
|
-
self.last_response = {"text": full_response}
|
|
270
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
271
|
-
return self.last_response
|
|
272
|
-
except json.JSONDecodeError as e:
|
|
273
|
-
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
274
|
-
|
|
275
|
-
def chat(
|
|
276
|
-
self,
|
|
277
|
-
prompt: str,
|
|
278
|
-
optimizer: str = None,
|
|
279
|
-
conversationally: bool = False,
|
|
280
|
-
) -> str:
|
|
281
|
-
"""Generate response."""
|
|
282
|
-
response = self.ask(
|
|
283
|
-
prompt, optimizer=optimizer, conversationally=conversationally
|
|
284
|
-
)
|
|
285
|
-
return self.get_message(response)
|
|
286
|
-
|
|
287
|
-
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
288
|
-
"""
|
|
289
|
-
Retrieves message from response.
|
|
290
|
-
|
|
291
|
-
Args:
|
|
292
|
-
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
293
|
-
|
|
294
|
-
Returns:
|
|
295
|
-
str: The extracted message text
|
|
296
|
-
"""
|
|
297
|
-
if isinstance(response, dict):
|
|
298
|
-
return response.get("text", "")
|
|
299
|
-
return str(response)
|
|
300
|
-
|
|
301
|
-
if __name__ == "__main__":
|
|
302
|
-
print("-" * 80)
|
|
303
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
304
|
-
print("-" * 80)
|
|
305
|
-
|
|
306
|
-
# Test all available models
|
|
307
|
-
working = 0
|
|
308
|
-
total = len(MultiChatAI.AVAILABLE_MODELS)
|
|
309
|
-
|
|
310
|
-
for model in MultiChatAI.AVAILABLE_MODELS:
|
|
311
|
-
try:
|
|
312
|
-
test_ai = MultiChatAI(model=model, timeout=60)
|
|
313
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
314
|
-
response_text = response
|
|
315
|
-
|
|
316
|
-
if response_text and len(response_text.strip()) > 0:
|
|
317
|
-
status = "✓"
|
|
318
|
-
# Truncate response if too long
|
|
319
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
320
|
-
else:
|
|
321
|
-
status = "✗"
|
|
322
|
-
display_text = "Empty or invalid response"
|
|
323
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
324
|
-
except Exception as e:
|
|
325
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -1,193 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout.litagent import LitAgent as UserAgent
|
|
10
|
-
|
|
11
|
-
class PromptRefine(Provider):
|
|
12
|
-
"""
|
|
13
|
-
A class to interact with the PromptRefine API.
|
|
14
|
-
"""
|
|
15
|
-
AVAILABLE_MODELS = ["openai/gpt-4", "openai/gpt-4o", "openai/gpt-4-1106-preview"]
|
|
16
|
-
def __init__(
|
|
17
|
-
self,
|
|
18
|
-
is_conversation: bool = True,
|
|
19
|
-
max_tokens: int = 600,
|
|
20
|
-
timeout: int = 30,
|
|
21
|
-
intro: str = None,
|
|
22
|
-
filepath: str = None,
|
|
23
|
-
update_file: bool = True,
|
|
24
|
-
proxies: dict = {},
|
|
25
|
-
history_offset: int = 10250,
|
|
26
|
-
act: str = None,
|
|
27
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
28
|
-
model: str = "openai/gpt-4o", # Default model
|
|
29
|
-
):
|
|
30
|
-
"""
|
|
31
|
-
Initializes the PromptRefine API with given parameters.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
35
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
-
system_prompt (str, optional): System prompt for PromptRefine. Defaults to "You are a helpful AI assistant.".
|
|
44
|
-
model (str, optional): Model to use for generation. Defaults to "openai/gpt-4o".
|
|
45
|
-
"""
|
|
46
|
-
self.session = requests.Session()
|
|
47
|
-
self.is_conversation = is_conversation
|
|
48
|
-
self.max_tokens_to_sample = max_tokens
|
|
49
|
-
self.api_endpoint = 'https://www.promptrefine.com/api/completion'
|
|
50
|
-
self.stream_chunk_size = 64
|
|
51
|
-
self.timeout = timeout
|
|
52
|
-
self.last_response = {}
|
|
53
|
-
self.system_prompt = system_prompt
|
|
54
|
-
self.model = model
|
|
55
|
-
self.headers = {
|
|
56
|
-
'origin': 'https://www.promptrefine.com',
|
|
57
|
-
'referer': 'https://www.promptrefine.com/prompt/new',
|
|
58
|
-
'user-agent': UserAgent().random()
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
self.__available_optimizers = (
|
|
62
|
-
method
|
|
63
|
-
for method in dir(Optimizers)
|
|
64
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
65
|
-
)
|
|
66
|
-
self.session.headers.update(self.headers)
|
|
67
|
-
Conversation.intro = (
|
|
68
|
-
AwesomePrompts().get_act(
|
|
69
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
70
|
-
)
|
|
71
|
-
if act
|
|
72
|
-
else intro or Conversation.intro
|
|
73
|
-
)
|
|
74
|
-
self.conversation = Conversation(
|
|
75
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
76
|
-
)
|
|
77
|
-
self.conversation.history_offset = history_offset
|
|
78
|
-
self.session.proxies = proxies
|
|
79
|
-
if self.model not in self.AVAILABLE_MODELS:
|
|
80
|
-
raise ValueError(f"Invalid model: {self.model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
|
|
81
|
-
|
|
82
|
-
def ask(
|
|
83
|
-
self,
|
|
84
|
-
prompt: str,
|
|
85
|
-
stream: bool = False,
|
|
86
|
-
raw: bool = False,
|
|
87
|
-
optimizer: str = None,
|
|
88
|
-
conversationally: bool = False,
|
|
89
|
-
) -> dict:
|
|
90
|
-
"""Chat with PromptRefine
|
|
91
|
-
|
|
92
|
-
Args:
|
|
93
|
-
prompt (str): Prompt to be send.
|
|
94
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
95
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
96
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
97
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
98
|
-
Returns:
|
|
99
|
-
dict : {}
|
|
100
|
-
```json
|
|
101
|
-
{
|
|
102
|
-
"text" : "How may I assist you today?"
|
|
103
|
-
}
|
|
104
|
-
```
|
|
105
|
-
"""
|
|
106
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
107
|
-
if optimizer:
|
|
108
|
-
if optimizer in self.__available_optimizers:
|
|
109
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
110
|
-
conversation_prompt if conversationally else prompt
|
|
111
|
-
)
|
|
112
|
-
else:
|
|
113
|
-
raise Exception(
|
|
114
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
payload = {
|
|
118
|
-
"messages": [
|
|
119
|
-
{"role": "system", "content": self.system_prompt},
|
|
120
|
-
{"role": "user", "content": conversation_prompt}
|
|
121
|
-
],
|
|
122
|
-
"variables": {},
|
|
123
|
-
"parameters": {},
|
|
124
|
-
"model": self.model,
|
|
125
|
-
"userId": str(uuid.uuid4()),
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
def for_stream():
|
|
129
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
130
|
-
if not response.ok:
|
|
131
|
-
raise Exception(
|
|
132
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
133
|
-
)
|
|
134
|
-
|
|
135
|
-
full_response = ""
|
|
136
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
137
|
-
if line:
|
|
138
|
-
full_response += line # No need to decode here
|
|
139
|
-
yield full_response if raw else dict(text=line)
|
|
140
|
-
self.last_response.update(dict(text=full_response))
|
|
141
|
-
self.conversation.update_chat_history(
|
|
142
|
-
prompt, self.get_message(self.last_response)
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
def for_non_stream():
|
|
146
|
-
for _ in for_stream():
|
|
147
|
-
pass
|
|
148
|
-
return self.last_response
|
|
149
|
-
|
|
150
|
-
return for_stream() if stream else for_non_stream()
|
|
151
|
-
|
|
152
|
-
def chat(
|
|
153
|
-
self,
|
|
154
|
-
prompt: str,
|
|
155
|
-
stream: bool = False,
|
|
156
|
-
optimizer: str = None,
|
|
157
|
-
conversationally: bool = False,
|
|
158
|
-
) -> str:
|
|
159
|
-
"""Generate response `str`
|
|
160
|
-
Args:
|
|
161
|
-
prompt (str): Prompt to be send.
|
|
162
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
163
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
164
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
165
|
-
Returns:
|
|
166
|
-
str: Response generated
|
|
167
|
-
"""
|
|
168
|
-
return self.get_message(
|
|
169
|
-
self.ask(
|
|
170
|
-
prompt,
|
|
171
|
-
optimizer=optimizer,
|
|
172
|
-
conversationally=conversationally,
|
|
173
|
-
)
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
def get_message(self, response: dict) -> str:
|
|
177
|
-
"""Retrieves message only from response
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
response (dict): Response generated by `self.ask`
|
|
181
|
-
|
|
182
|
-
Returns:
|
|
183
|
-
str: Message extracted
|
|
184
|
-
"""
|
|
185
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
186
|
-
return response["text"]
|
|
187
|
-
|
|
188
|
-
if __name__ == '__main__':
|
|
189
|
-
from rich import print
|
|
190
|
-
ai = PromptRefine(timeout=5000)
|
|
191
|
-
response = ai.chat("write a poem about AI", stream=True)
|
|
192
|
-
for chunk in response:
|
|
193
|
-
print(chunk, end="", flush=True)
|