webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/Provider/multichat.py
CHANGED
|
@@ -1,230 +1,331 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.
|
|
8
|
-
from webscout
|
|
9
|
-
from webscout import
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
"
|
|
16
|
-
|
|
17
|
-
"llama-3.
|
|
18
|
-
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
self.
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
self.
|
|
96
|
-
|
|
97
|
-
#
|
|
98
|
-
self.
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
"
|
|
158
|
-
"
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
)
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import Any, Dict
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
|
|
12
|
+
# Model configurations
|
|
13
|
+
MODEL_CONFIGS = {
|
|
14
|
+
"llama": {
|
|
15
|
+
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
16
|
+
"models": {
|
|
17
|
+
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
18
|
+
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
+
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
20
|
+
},
|
|
21
|
+
},
|
|
22
|
+
"cohere": {
|
|
23
|
+
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
24
|
+
"models": {"command-r": {"contextLength": 128000}},
|
|
25
|
+
},
|
|
26
|
+
"google": {
|
|
27
|
+
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
28
|
+
"models": {
|
|
29
|
+
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
30
|
+
"gemma2-9b-it": {"contextLength": 8192},
|
|
31
|
+
},
|
|
32
|
+
"message_format": "parts",
|
|
33
|
+
},
|
|
34
|
+
"deepinfra": {
|
|
35
|
+
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
36
|
+
"models": {
|
|
37
|
+
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
38
|
+
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
39
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
40
|
+
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
"mistral": {
|
|
44
|
+
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
45
|
+
"models": {
|
|
46
|
+
"mistral-small-latest": {"contextLength": 32000},
|
|
47
|
+
"codestral-latest": {"contextLength": 32000},
|
|
48
|
+
"open-mistral-7b": {"contextLength": 8000},
|
|
49
|
+
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
class MultiChatAI(Provider):
|
|
55
|
+
def __init__(
|
|
56
|
+
self,
|
|
57
|
+
is_conversation: bool = True,
|
|
58
|
+
max_tokens: int = 4000,
|
|
59
|
+
timeout: int = 30,
|
|
60
|
+
intro: str = None,
|
|
61
|
+
filepath: str = None,
|
|
62
|
+
update_file: bool = True,
|
|
63
|
+
proxies: dict = {},
|
|
64
|
+
history_offset: int = 10250,
|
|
65
|
+
act: str = None,
|
|
66
|
+
model: str = "llama-3.3-70b-versatile",
|
|
67
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
68
|
+
temperature: float = 0.5,
|
|
69
|
+
presence_penalty: int = 0,
|
|
70
|
+
frequency_penalty: int = 0,
|
|
71
|
+
top_p: float = 1,
|
|
72
|
+
logging: bool = False,
|
|
73
|
+
):
|
|
74
|
+
"""Initializes the MultiChatAI API client with logging capabilities."""
|
|
75
|
+
# Initialize logger first
|
|
76
|
+
self.logger = Logger(
|
|
77
|
+
name="MultiChatAI",
|
|
78
|
+
format=LogFormat.MODERN_EMOJI,
|
|
79
|
+
|
|
80
|
+
) if logging else None
|
|
81
|
+
|
|
82
|
+
if self.logger:
|
|
83
|
+
self.logger.debug("Initializing MultiChatAI")
|
|
84
|
+
|
|
85
|
+
self.session = requests.Session()
|
|
86
|
+
self.is_conversation = is_conversation
|
|
87
|
+
self.max_tokens_to_sample = max_tokens
|
|
88
|
+
self.timeout = timeout
|
|
89
|
+
self.last_response = {}
|
|
90
|
+
self.model = model
|
|
91
|
+
self.system_prompt = system_prompt
|
|
92
|
+
self.temperature = temperature
|
|
93
|
+
self.presence_penalty = presence_penalty
|
|
94
|
+
self.frequency_penalty = frequency_penalty
|
|
95
|
+
self.top_p = top_p
|
|
96
|
+
|
|
97
|
+
# Initialize LitAgent for user agent generation
|
|
98
|
+
self.agent = LitAgent()
|
|
99
|
+
|
|
100
|
+
self.headers = {
|
|
101
|
+
"accept": "*/*",
|
|
102
|
+
"accept-language": "en-US,en;q=0.9",
|
|
103
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
104
|
+
"origin": "https://www.multichatai.com",
|
|
105
|
+
"referer": "https://www.multichatai.com/",
|
|
106
|
+
"user-agent": self.agent.random(),
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if self.logger:
|
|
110
|
+
self.logger.debug(f"Setting up session with headers: {self.headers}")
|
|
111
|
+
|
|
112
|
+
self.session.headers.update(self.headers)
|
|
113
|
+
self.session.proxies = proxies
|
|
114
|
+
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
115
|
+
|
|
116
|
+
self.__available_optimizers = (
|
|
117
|
+
method for method in dir(Optimizers)
|
|
118
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
Conversation.intro = (
|
|
122
|
+
AwesomePrompts().get_act(
|
|
123
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
124
|
+
)
|
|
125
|
+
if act
|
|
126
|
+
else intro or Conversation.intro
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
self.conversation = Conversation(
|
|
130
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
131
|
+
)
|
|
132
|
+
self.conversation.history_offset = history_offset
|
|
133
|
+
|
|
134
|
+
# Get provider after logger initialization
|
|
135
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
136
|
+
self.model_name = self.model
|
|
137
|
+
|
|
138
|
+
if self.logger:
|
|
139
|
+
self.logger.info(f"MultiChatAI initialized with model: {self.model}")
|
|
140
|
+
|
|
141
|
+
def _get_endpoint(self) -> str:
|
|
142
|
+
"""Get the API endpoint for the current provider."""
|
|
143
|
+
endpoint = MODEL_CONFIGS[self.provider]["endpoint"]
|
|
144
|
+
if self.logger:
|
|
145
|
+
self.logger.debug(f"Using endpoint: {endpoint}")
|
|
146
|
+
return endpoint
|
|
147
|
+
|
|
148
|
+
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
149
|
+
"""Get chat settings for the current model."""
|
|
150
|
+
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
151
|
+
settings = {
|
|
152
|
+
"model": self.model,
|
|
153
|
+
"prompt": self.system_prompt,
|
|
154
|
+
"temperature": self.temperature,
|
|
155
|
+
"contextLength": base_settings["contextLength"],
|
|
156
|
+
"includeProfileContext": True,
|
|
157
|
+
"includeWorkspaceInstructions": True,
|
|
158
|
+
"embeddingsProvider": "openai"
|
|
159
|
+
}
|
|
160
|
+
if self.logger:
|
|
161
|
+
self.logger.debug(f"Chat settings: {settings}")
|
|
162
|
+
return settings
|
|
163
|
+
|
|
164
|
+
def _get_system_message(self) -> str:
|
|
165
|
+
"""Generate system message with current date."""
|
|
166
|
+
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
167
|
+
message = f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
168
|
+
if self.logger:
|
|
169
|
+
self.logger.debug(f"System message: {message}")
|
|
170
|
+
return message
|
|
171
|
+
|
|
172
|
+
def _build_messages(self, conversation_prompt: str) -> list:
|
|
173
|
+
"""Build messages array based on provider type."""
|
|
174
|
+
if self.provider == "google":
|
|
175
|
+
messages = [
|
|
176
|
+
{"role": "user", "parts": self._get_system_message()},
|
|
177
|
+
{"role": "model", "parts": "I will follow your instructions."},
|
|
178
|
+
{"role": "user", "parts": conversation_prompt}
|
|
179
|
+
]
|
|
180
|
+
else:
|
|
181
|
+
messages = [
|
|
182
|
+
{"role": "system", "content": self._get_system_message()},
|
|
183
|
+
{"role": "user", "content": conversation_prompt}
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
if self.logger:
|
|
187
|
+
self.logger.debug(f"Built messages: {messages}")
|
|
188
|
+
return messages
|
|
189
|
+
|
|
190
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
191
|
+
"""Determine the provider based on the model name."""
|
|
192
|
+
if self.logger:
|
|
193
|
+
self.logger.debug(f"Getting provider for model: {model}")
|
|
194
|
+
|
|
195
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
196
|
+
if model in config["models"]:
|
|
197
|
+
if self.logger:
|
|
198
|
+
self.logger.info(f"Found provider: {provider} for model: {model}")
|
|
199
|
+
return provider
|
|
200
|
+
|
|
201
|
+
available_models = []
|
|
202
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
203
|
+
for model_name in config["models"].keys():
|
|
204
|
+
available_models.append(f"{provider}/{model_name}")
|
|
205
|
+
|
|
206
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
207
|
+
if self.logger:
|
|
208
|
+
self.logger.error(error_msg)
|
|
209
|
+
raise ValueError(error_msg)
|
|
210
|
+
|
|
211
|
+
def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
|
|
212
|
+
"""Make the API request with proper error handling and logging."""
|
|
213
|
+
if self.logger:
|
|
214
|
+
self.logger.debug(f"Making request to endpoint: {self._get_endpoint()}")
|
|
215
|
+
self.logger.debug(f"Request payload: {json.dumps(payload, indent=2)}")
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
response = self.session.post(
|
|
219
|
+
self._get_endpoint(),
|
|
220
|
+
headers=self.headers,
|
|
221
|
+
json=payload,
|
|
222
|
+
timeout=self.timeout,
|
|
223
|
+
)
|
|
224
|
+
response.raise_for_status()
|
|
225
|
+
|
|
226
|
+
if self.logger:
|
|
227
|
+
self.logger.info(f"Request successful: {response.status_code}")
|
|
228
|
+
self.logger.debug(f"Response content: {response.text[:200]}...")
|
|
229
|
+
|
|
230
|
+
return response
|
|
231
|
+
except requests.exceptions.RequestException as e:
|
|
232
|
+
if self.logger:
|
|
233
|
+
self.logger.error(f"Request failed: {str(e)}")
|
|
234
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
235
|
+
|
|
236
|
+
def ask(
|
|
237
|
+
self,
|
|
238
|
+
prompt: str,
|
|
239
|
+
raw: bool = False,
|
|
240
|
+
optimizer: str = None,
|
|
241
|
+
conversationally: bool = False,
|
|
242
|
+
) -> Dict[str, Any]:
|
|
243
|
+
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
244
|
+
if self.logger:
|
|
245
|
+
self.logger.debug(f"ask() called with prompt: {prompt}")
|
|
246
|
+
|
|
247
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
248
|
+
if optimizer:
|
|
249
|
+
if optimizer in self.__available_optimizers:
|
|
250
|
+
if self.logger:
|
|
251
|
+
self.logger.info(f"Applying optimizer: {optimizer}")
|
|
252
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
253
|
+
conversation_prompt if conversationally else prompt
|
|
254
|
+
)
|
|
255
|
+
else:
|
|
256
|
+
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
257
|
+
if self.logger:
|
|
258
|
+
self.logger.error(error_msg)
|
|
259
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
260
|
+
|
|
261
|
+
payload = {
|
|
262
|
+
"chatSettings": self._get_chat_settings(),
|
|
263
|
+
"messages": self._build_messages(conversation_prompt),
|
|
264
|
+
"customModelId": "",
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
response = self._make_request(payload)
|
|
268
|
+
try:
|
|
269
|
+
full_response = response.text.strip()
|
|
270
|
+
self.last_response = {"text": full_response}
|
|
271
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
272
|
+
|
|
273
|
+
if self.logger:
|
|
274
|
+
self.logger.info("Successfully processed response")
|
|
275
|
+
self.logger.debug(f"Final response: {full_response[:200]}...")
|
|
276
|
+
|
|
277
|
+
return self.last_response
|
|
278
|
+
except json.JSONDecodeError as e:
|
|
279
|
+
if self.logger:
|
|
280
|
+
self.logger.error(f"Failed to decode JSON response: {e}")
|
|
281
|
+
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
282
|
+
|
|
283
|
+
def chat(
|
|
284
|
+
self,
|
|
285
|
+
prompt: str,
|
|
286
|
+
optimizer: str = None,
|
|
287
|
+
conversationally: bool = False,
|
|
288
|
+
) -> str:
|
|
289
|
+
"""Generate response with logging."""
|
|
290
|
+
if self.logger:
|
|
291
|
+
self.logger.debug(f"chat() called with prompt: {prompt}")
|
|
292
|
+
|
|
293
|
+
response = self.ask(
|
|
294
|
+
prompt, optimizer=optimizer, conversationally=conversationally
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
if self.logger:
|
|
298
|
+
self.logger.info("Chat response generated successfully")
|
|
299
|
+
|
|
300
|
+
return self.get_message(response)
|
|
301
|
+
|
|
302
|
+
def get_message(self, response: Dict[str, Any] | str) -> str:
|
|
303
|
+
"""
|
|
304
|
+
Retrieves message from response.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
str: The extracted message text
|
|
311
|
+
"""
|
|
312
|
+
if self.logger:
|
|
313
|
+
self.logger.debug(f"Extracting message from response type: {type(response)}")
|
|
314
|
+
|
|
315
|
+
if isinstance(response, dict):
|
|
316
|
+
message = response.get("text", "")
|
|
317
|
+
if self.logger:
|
|
318
|
+
self.logger.debug(f"Extracted message from dict: {message[:200]}...")
|
|
319
|
+
return message
|
|
320
|
+
return str(response)
|
|
321
|
+
|
|
322
|
+
if __name__ == "__main__":
|
|
323
|
+
from rich import print
|
|
324
|
+
|
|
325
|
+
# Example usage with logging enabled
|
|
326
|
+
ai = MultiChatAI(model="deepseek-r1-distill-llama-70b", logging=False)
|
|
327
|
+
try:
|
|
328
|
+
response = ai.chat("What is quantum computing?")
|
|
329
|
+
print(response)
|
|
330
|
+
except Exception as e:
|
|
331
|
+
print(f"Error: {str(e)}")
|