webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (154) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +23 -0
  24. webscout/Litlogger/core/logger.py +166 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +33 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +173 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +249 -0
  31. webscout/Litlogger/styles/formats.py +460 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/AISEARCH/ISou.py +277 -0
  38. webscout/Provider/AISEARCH/__init__.py +2 -1
  39. webscout/Provider/Blackboxai.py +3 -3
  40. webscout/Provider/ChatGPTGratis.py +226 -0
  41. webscout/Provider/Cloudflare.py +3 -4
  42. webscout/Provider/DeepSeek.py +218 -0
  43. webscout/Provider/Deepinfra.py +40 -24
  44. webscout/Provider/Free2GPT.py +131 -124
  45. webscout/Provider/Gemini.py +100 -115
  46. webscout/Provider/Glider.py +3 -3
  47. webscout/Provider/Groq.py +5 -1
  48. webscout/Provider/Jadve.py +3 -3
  49. webscout/Provider/Marcus.py +191 -192
  50. webscout/Provider/Netwrck.py +3 -3
  51. webscout/Provider/PI.py +2 -2
  52. webscout/Provider/PizzaGPT.py +2 -3
  53. webscout/Provider/QwenLM.py +311 -0
  54. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  55. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  56. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  57. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  58. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
  59. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
  60. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  61. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  62. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  63. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  64. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  65. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  66. webscout/Provider/TTI/__init__.py +2 -1
  67. webscout/Provider/TTI/artbit/__init__.py +22 -22
  68. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  69. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  70. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  71. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  72. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  73. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  74. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  75. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  76. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  77. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  78. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  79. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  80. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  81. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  82. webscout/Provider/TTI/talkai/__init__.py +4 -4
  83. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  84. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  85. webscout/Provider/TTS/deepgram.py +182 -182
  86. webscout/Provider/TTS/elevenlabs.py +136 -136
  87. webscout/Provider/TTS/gesserit.py +150 -150
  88. webscout/Provider/TTS/murfai.py +138 -138
  89. webscout/Provider/TTS/parler.py +133 -134
  90. webscout/Provider/TTS/streamElements.py +360 -360
  91. webscout/Provider/TTS/utils.py +280 -280
  92. webscout/Provider/TTS/voicepod.py +116 -116
  93. webscout/Provider/TextPollinationsAI.py +28 -8
  94. webscout/Provider/WiseCat.py +193 -0
  95. webscout/Provider/__init__.py +146 -134
  96. webscout/Provider/cerebras.py +242 -227
  97. webscout/Provider/chatglm.py +204 -204
  98. webscout/Provider/dgaf.py +2 -3
  99. webscout/Provider/freeaichat.py +221 -0
  100. webscout/Provider/gaurish.py +2 -3
  101. webscout/Provider/geminiapi.py +208 -208
  102. webscout/Provider/granite.py +223 -0
  103. webscout/Provider/hermes.py +218 -218
  104. webscout/Provider/llama3mitril.py +179 -179
  105. webscout/Provider/llamatutor.py +3 -3
  106. webscout/Provider/llmchat.py +2 -3
  107. webscout/Provider/meta.py +794 -794
  108. webscout/Provider/multichat.py +331 -331
  109. webscout/Provider/typegpt.py +359 -359
  110. webscout/Provider/yep.py +3 -3
  111. webscout/__init__.py +1 -0
  112. webscout/__main__.py +5 -5
  113. webscout/cli.py +319 -319
  114. webscout/conversation.py +241 -242
  115. webscout/exceptions.py +328 -328
  116. webscout/litagent/__init__.py +28 -28
  117. webscout/litagent/agent.py +2 -3
  118. webscout/litprinter/__init__.py +0 -58
  119. webscout/scout/__init__.py +8 -8
  120. webscout/scout/core.py +884 -884
  121. webscout/scout/element.py +459 -459
  122. webscout/scout/parsers/__init__.py +69 -69
  123. webscout/scout/parsers/html5lib_parser.py +172 -172
  124. webscout/scout/parsers/html_parser.py +236 -236
  125. webscout/scout/parsers/lxml_parser.py +178 -178
  126. webscout/scout/utils.py +38 -38
  127. webscout/swiftcli/__init__.py +811 -811
  128. webscout/update_checker.py +2 -12
  129. webscout/version.py +1 -1
  130. webscout/webscout_search.py +87 -6
  131. webscout/webscout_search_async.py +58 -1
  132. webscout/yep_search.py +297 -0
  133. webscout/zeroart/__init__.py +54 -54
  134. webscout/zeroart/base.py +60 -60
  135. webscout/zeroart/effects.py +99 -99
  136. webscout/zeroart/fonts.py +816 -816
  137. {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
  138. webscout-7.3.dist-info/RECORD +223 -0
  139. {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
  140. webstoken/__init__.py +30 -30
  141. webstoken/classifier.py +189 -189
  142. webstoken/keywords.py +216 -216
  143. webstoken/language.py +128 -128
  144. webstoken/ner.py +164 -164
  145. webstoken/normalizer.py +35 -35
  146. webstoken/processor.py +77 -77
  147. webstoken/sentiment.py +206 -206
  148. webstoken/stemmer.py +73 -73
  149. webstoken/tagger.py +60 -60
  150. webstoken/tokenizer.py +158 -158
  151. webscout-7.1.dist-info/RECORD +0 -198
  152. {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
  153. {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
  154. {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
@@ -1,331 +1,331 @@
1
- import requests
2
- import json
3
- import uuid
4
- from typing import Any, Dict
5
- from datetime import datetime
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
10
- from webscout.litagent import LitAgent
11
-
12
- # Model configurations
13
- MODEL_CONFIGS = {
14
- "llama": {
15
- "endpoint": "https://www.multichatai.com/api/chat/meta",
16
- "models": {
17
- "llama-3.3-70b-versatile": {"contextLength": 131072},
18
- "llama-3.2-11b-vision-preview": {"contextLength": 32768},
19
- "deepseek-r1-distill-llama-70b": {"contextLength": 128000},
20
- },
21
- },
22
- "cohere": {
23
- "endpoint": "https://www.multichatai.com/api/chat/cohere",
24
- "models": {"command-r": {"contextLength": 128000}},
25
- },
26
- "google": {
27
- "endpoint": "https://www.multichatai.com/api/chat/google",
28
- "models": {
29
- "gemini-1.5-flash-002": {"contextLength": 1048576},
30
- "gemma2-9b-it": {"contextLength": 8192},
31
- },
32
- "message_format": "parts",
33
- },
34
- "deepinfra": {
35
- "endpoint": "https://www.multichatai.com/api/chat/deepinfra",
36
- "models": {
37
- "Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
38
- "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
39
- "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
40
- "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
41
- },
42
- },
43
- "mistral": {
44
- "endpoint": "https://www.multichatai.com/api/chat/mistral",
45
- "models": {
46
- "mistral-small-latest": {"contextLength": 32000},
47
- "codestral-latest": {"contextLength": 32000},
48
- "open-mistral-7b": {"contextLength": 8000},
49
- "open-mixtral-8x7b": {"contextLength": 8000},
50
- },
51
- },
52
- }
53
-
54
- class MultiChatAI(Provider):
55
- def __init__(
56
- self,
57
- is_conversation: bool = True,
58
- max_tokens: int = 4000,
59
- timeout: int = 30,
60
- intro: str = None,
61
- filepath: str = None,
62
- update_file: bool = True,
63
- proxies: dict = {},
64
- history_offset: int = 10250,
65
- act: str = None,
66
- model: str = "llama-3.3-70b-versatile",
67
- system_prompt: str = "You are a friendly, helpful AI assistant.",
68
- temperature: float = 0.5,
69
- presence_penalty: int = 0,
70
- frequency_penalty: int = 0,
71
- top_p: float = 1,
72
- logging: bool = False,
73
- ):
74
- """Initializes the MultiChatAI API client with logging capabilities."""
75
- # Initialize logger first
76
- self.logger = LitLogger(
77
- name="MultiChatAI",
78
- format=LogFormat.MODERN_EMOJI,
79
- color_scheme=ColorScheme.CYBERPUNK
80
- ) if logging else None
81
-
82
- if self.logger:
83
- self.logger.debug("Initializing MultiChatAI")
84
-
85
- self.session = requests.Session()
86
- self.is_conversation = is_conversation
87
- self.max_tokens_to_sample = max_tokens
88
- self.timeout = timeout
89
- self.last_response = {}
90
- self.model = model
91
- self.system_prompt = system_prompt
92
- self.temperature = temperature
93
- self.presence_penalty = presence_penalty
94
- self.frequency_penalty = frequency_penalty
95
- self.top_p = top_p
96
-
97
- # Initialize LitAgent for user agent generation
98
- self.agent = LitAgent()
99
-
100
- self.headers = {
101
- "accept": "*/*",
102
- "accept-language": "en-US,en;q=0.9",
103
- "content-type": "text/plain;charset=UTF-8",
104
- "origin": "https://www.multichatai.com",
105
- "referer": "https://www.multichatai.com/",
106
- "user-agent": self.agent.random(),
107
- }
108
-
109
- if self.logger:
110
- self.logger.debug(f"Setting up session with headers: {self.headers}")
111
-
112
- self.session.headers.update(self.headers)
113
- self.session.proxies = proxies
114
- self.session.cookies.update({"session": uuid.uuid4().hex})
115
-
116
- self.__available_optimizers = (
117
- method for method in dir(Optimizers)
118
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
119
- )
120
-
121
- Conversation.intro = (
122
- AwesomePrompts().get_act(
123
- act, raise_not_found=True, default=None, case_insensitive=True
124
- )
125
- if act
126
- else intro or Conversation.intro
127
- )
128
-
129
- self.conversation = Conversation(
130
- is_conversation, self.max_tokens_to_sample, filepath, update_file
131
- )
132
- self.conversation.history_offset = history_offset
133
-
134
- # Get provider after logger initialization
135
- self.provider = self._get_provider_from_model(self.model)
136
- self.model_name = self.model
137
-
138
- if self.logger:
139
- self.logger.info(f"MultiChatAI initialized with model: {self.model}")
140
-
141
- def _get_endpoint(self) -> str:
142
- """Get the API endpoint for the current provider."""
143
- endpoint = MODEL_CONFIGS[self.provider]["endpoint"]
144
- if self.logger:
145
- self.logger.debug(f"Using endpoint: {endpoint}")
146
- return endpoint
147
-
148
- def _get_chat_settings(self) -> Dict[str, Any]:
149
- """Get chat settings for the current model."""
150
- base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
151
- settings = {
152
- "model": self.model,
153
- "prompt": self.system_prompt,
154
- "temperature": self.temperature,
155
- "contextLength": base_settings["contextLength"],
156
- "includeProfileContext": True,
157
- "includeWorkspaceInstructions": True,
158
- "embeddingsProvider": "openai"
159
- }
160
- if self.logger:
161
- self.logger.debug(f"Chat settings: {settings}")
162
- return settings
163
-
164
- def _get_system_message(self) -> str:
165
- """Generate system message with current date."""
166
- current_date = datetime.now().strftime("%d/%m/%Y")
167
- message = f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
168
- if self.logger:
169
- self.logger.debug(f"System message: {message}")
170
- return message
171
-
172
- def _build_messages(self, conversation_prompt: str) -> list:
173
- """Build messages array based on provider type."""
174
- if self.provider == "google":
175
- messages = [
176
- {"role": "user", "parts": self._get_system_message()},
177
- {"role": "model", "parts": "I will follow your instructions."},
178
- {"role": "user", "parts": conversation_prompt}
179
- ]
180
- else:
181
- messages = [
182
- {"role": "system", "content": self._get_system_message()},
183
- {"role": "user", "content": conversation_prompt}
184
- ]
185
-
186
- if self.logger:
187
- self.logger.debug(f"Built messages: {messages}")
188
- return messages
189
-
190
- def _get_provider_from_model(self, model: str) -> str:
191
- """Determine the provider based on the model name."""
192
- if self.logger:
193
- self.logger.debug(f"Getting provider for model: {model}")
194
-
195
- for provider, config in MODEL_CONFIGS.items():
196
- if model in config["models"]:
197
- if self.logger:
198
- self.logger.info(f"Found provider: {provider} for model: {model}")
199
- return provider
200
-
201
- available_models = []
202
- for provider, config in MODEL_CONFIGS.items():
203
- for model_name in config["models"].keys():
204
- available_models.append(f"{provider}/{model_name}")
205
-
206
- error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
207
- if self.logger:
208
- self.logger.error(error_msg)
209
- raise ValueError(error_msg)
210
-
211
- def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
212
- """Make the API request with proper error handling and logging."""
213
- if self.logger:
214
- self.logger.debug(f"Making request to endpoint: {self._get_endpoint()}")
215
- self.logger.debug(f"Request payload: {json.dumps(payload, indent=2)}")
216
-
217
- try:
218
- response = self.session.post(
219
- self._get_endpoint(),
220
- headers=self.headers,
221
- json=payload,
222
- timeout=self.timeout,
223
- )
224
- response.raise_for_status()
225
-
226
- if self.logger:
227
- self.logger.info(f"Request successful: {response.status_code}")
228
- self.logger.debug(f"Response content: {response.text[:200]}...")
229
-
230
- return response
231
- except requests.exceptions.RequestException as e:
232
- if self.logger:
233
- self.logger.error(f"Request failed: {str(e)}")
234
- raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
235
-
236
- def ask(
237
- self,
238
- prompt: str,
239
- raw: bool = False,
240
- optimizer: str = None,
241
- conversationally: bool = False,
242
- ) -> Dict[str, Any]:
243
- """Sends a prompt to the MultiChatAI API and returns the response."""
244
- if self.logger:
245
- self.logger.debug(f"ask() called with prompt: {prompt}")
246
-
247
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
248
- if optimizer:
249
- if optimizer in self.__available_optimizers:
250
- if self.logger:
251
- self.logger.info(f"Applying optimizer: {optimizer}")
252
- conversation_prompt = getattr(Optimizers, optimizer)(
253
- conversation_prompt if conversationally else prompt
254
- )
255
- else:
256
- error_msg = f"Optimizer is not one of {self.__available_optimizers}"
257
- if self.logger:
258
- self.logger.error(error_msg)
259
- raise exceptions.FailedToGenerateResponseError(error_msg)
260
-
261
- payload = {
262
- "chatSettings": self._get_chat_settings(),
263
- "messages": self._build_messages(conversation_prompt),
264
- "customModelId": "",
265
- }
266
-
267
- response = self._make_request(payload)
268
- try:
269
- full_response = response.text.strip()
270
- self.last_response = {"text": full_response}
271
- self.conversation.update_chat_history(prompt, full_response)
272
-
273
- if self.logger:
274
- self.logger.info("Successfully processed response")
275
- self.logger.debug(f"Final response: {full_response[:200]}...")
276
-
277
- return self.last_response
278
- except json.JSONDecodeError as e:
279
- if self.logger:
280
- self.logger.error(f"Failed to decode JSON response: {e}")
281
- raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
282
-
283
- def chat(
284
- self,
285
- prompt: str,
286
- optimizer: str = None,
287
- conversationally: bool = False,
288
- ) -> str:
289
- """Generate response with logging."""
290
- if self.logger:
291
- self.logger.debug(f"chat() called with prompt: {prompt}")
292
-
293
- response = self.ask(
294
- prompt, optimizer=optimizer, conversationally=conversationally
295
- )
296
-
297
- if self.logger:
298
- self.logger.info("Chat response generated successfully")
299
-
300
- return self.get_message(response)
301
-
302
- def get_message(self, response: Dict[str, Any] | str) -> str:
303
- """
304
- Retrieves message from response.
305
-
306
- Args:
307
- response (Union[Dict[str, Any], str]): The response to extract the message from
308
-
309
- Returns:
310
- str: The extracted message text
311
- """
312
- if self.logger:
313
- self.logger.debug(f"Extracting message from response type: {type(response)}")
314
-
315
- if isinstance(response, dict):
316
- message = response.get("text", "")
317
- if self.logger:
318
- self.logger.debug(f"Extracted message from dict: {message[:200]}...")
319
- return message
320
- return str(response)
321
-
322
- if __name__ == "__main__":
323
- from rich import print
324
-
325
- # Example usage with logging enabled
326
- ai = MultiChatAI(model="deepseek-r1-distill-llama-70b", logging=False)
327
- try:
328
- response = ai.chat("What is quantum computing?")
329
- print(response)
330
- except Exception as e:
331
- print(f"Error: {str(e)}")
1
+ import requests
2
+ import json
3
+ import uuid
4
+ from typing import Any, Dict
5
+ from datetime import datetime
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+ from webscout.Litlogger import Logger, LogFormat
10
+ from webscout.litagent import LitAgent
11
+
12
+ # Model configurations
13
+ MODEL_CONFIGS = {
14
+ "llama": {
15
+ "endpoint": "https://www.multichatai.com/api/chat/meta",
16
+ "models": {
17
+ "llama-3.3-70b-versatile": {"contextLength": 131072},
18
+ "llama-3.2-11b-vision-preview": {"contextLength": 32768},
19
+ "deepseek-r1-distill-llama-70b": {"contextLength": 128000},
20
+ },
21
+ },
22
+ "cohere": {
23
+ "endpoint": "https://www.multichatai.com/api/chat/cohere",
24
+ "models": {"command-r": {"contextLength": 128000}},
25
+ },
26
+ "google": {
27
+ "endpoint": "https://www.multichatai.com/api/chat/google",
28
+ "models": {
29
+ "gemini-1.5-flash-002": {"contextLength": 1048576},
30
+ "gemma2-9b-it": {"contextLength": 8192},
31
+ },
32
+ "message_format": "parts",
33
+ },
34
+ "deepinfra": {
35
+ "endpoint": "https://www.multichatai.com/api/chat/deepinfra",
36
+ "models": {
37
+ "Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
38
+ "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
39
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
40
+ "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
41
+ },
42
+ },
43
+ "mistral": {
44
+ "endpoint": "https://www.multichatai.com/api/chat/mistral",
45
+ "models": {
46
+ "mistral-small-latest": {"contextLength": 32000},
47
+ "codestral-latest": {"contextLength": 32000},
48
+ "open-mistral-7b": {"contextLength": 8000},
49
+ "open-mixtral-8x7b": {"contextLength": 8000},
50
+ },
51
+ },
52
+ }
53
+
54
+ class MultiChatAI(Provider):
55
+ def __init__(
56
+ self,
57
+ is_conversation: bool = True,
58
+ max_tokens: int = 4000,
59
+ timeout: int = 30,
60
+ intro: str = None,
61
+ filepath: str = None,
62
+ update_file: bool = True,
63
+ proxies: dict = {},
64
+ history_offset: int = 10250,
65
+ act: str = None,
66
+ model: str = "llama-3.3-70b-versatile",
67
+ system_prompt: str = "You are a friendly, helpful AI assistant.",
68
+ temperature: float = 0.5,
69
+ presence_penalty: int = 0,
70
+ frequency_penalty: int = 0,
71
+ top_p: float = 1,
72
+ logging: bool = False,
73
+ ):
74
+ """Initializes the MultiChatAI API client with logging capabilities."""
75
+ # Initialize logger first
76
+ self.logger = Logger(
77
+ name="MultiChatAI",
78
+ format=LogFormat.MODERN_EMOJI,
79
+
80
+ ) if logging else None
81
+
82
+ if self.logger:
83
+ self.logger.debug("Initializing MultiChatAI")
84
+
85
+ self.session = requests.Session()
86
+ self.is_conversation = is_conversation
87
+ self.max_tokens_to_sample = max_tokens
88
+ self.timeout = timeout
89
+ self.last_response = {}
90
+ self.model = model
91
+ self.system_prompt = system_prompt
92
+ self.temperature = temperature
93
+ self.presence_penalty = presence_penalty
94
+ self.frequency_penalty = frequency_penalty
95
+ self.top_p = top_p
96
+
97
+ # Initialize LitAgent for user agent generation
98
+ self.agent = LitAgent()
99
+
100
+ self.headers = {
101
+ "accept": "*/*",
102
+ "accept-language": "en-US,en;q=0.9",
103
+ "content-type": "text/plain;charset=UTF-8",
104
+ "origin": "https://www.multichatai.com",
105
+ "referer": "https://www.multichatai.com/",
106
+ "user-agent": self.agent.random(),
107
+ }
108
+
109
+ if self.logger:
110
+ self.logger.debug(f"Setting up session with headers: {self.headers}")
111
+
112
+ self.session.headers.update(self.headers)
113
+ self.session.proxies = proxies
114
+ self.session.cookies.update({"session": uuid.uuid4().hex})
115
+
116
+ self.__available_optimizers = (
117
+ method for method in dir(Optimizers)
118
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
119
+ )
120
+
121
+ Conversation.intro = (
122
+ AwesomePrompts().get_act(
123
+ act, raise_not_found=True, default=None, case_insensitive=True
124
+ )
125
+ if act
126
+ else intro or Conversation.intro
127
+ )
128
+
129
+ self.conversation = Conversation(
130
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
131
+ )
132
+ self.conversation.history_offset = history_offset
133
+
134
+ # Get provider after logger initialization
135
+ self.provider = self._get_provider_from_model(self.model)
136
+ self.model_name = self.model
137
+
138
+ if self.logger:
139
+ self.logger.info(f"MultiChatAI initialized with model: {self.model}")
140
+
141
+ def _get_endpoint(self) -> str:
142
+ """Get the API endpoint for the current provider."""
143
+ endpoint = MODEL_CONFIGS[self.provider]["endpoint"]
144
+ if self.logger:
145
+ self.logger.debug(f"Using endpoint: {endpoint}")
146
+ return endpoint
147
+
148
+ def _get_chat_settings(self) -> Dict[str, Any]:
149
+ """Get chat settings for the current model."""
150
+ base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
151
+ settings = {
152
+ "model": self.model,
153
+ "prompt": self.system_prompt,
154
+ "temperature": self.temperature,
155
+ "contextLength": base_settings["contextLength"],
156
+ "includeProfileContext": True,
157
+ "includeWorkspaceInstructions": True,
158
+ "embeddingsProvider": "openai"
159
+ }
160
+ if self.logger:
161
+ self.logger.debug(f"Chat settings: {settings}")
162
+ return settings
163
+
164
+ def _get_system_message(self) -> str:
165
+ """Generate system message with current date."""
166
+ current_date = datetime.now().strftime("%d/%m/%Y")
167
+ message = f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
168
+ if self.logger:
169
+ self.logger.debug(f"System message: {message}")
170
+ return message
171
+
172
+ def _build_messages(self, conversation_prompt: str) -> list:
173
+ """Build messages array based on provider type."""
174
+ if self.provider == "google":
175
+ messages = [
176
+ {"role": "user", "parts": self._get_system_message()},
177
+ {"role": "model", "parts": "I will follow your instructions."},
178
+ {"role": "user", "parts": conversation_prompt}
179
+ ]
180
+ else:
181
+ messages = [
182
+ {"role": "system", "content": self._get_system_message()},
183
+ {"role": "user", "content": conversation_prompt}
184
+ ]
185
+
186
+ if self.logger:
187
+ self.logger.debug(f"Built messages: {messages}")
188
+ return messages
189
+
190
+ def _get_provider_from_model(self, model: str) -> str:
191
+ """Determine the provider based on the model name."""
192
+ if self.logger:
193
+ self.logger.debug(f"Getting provider for model: {model}")
194
+
195
+ for provider, config in MODEL_CONFIGS.items():
196
+ if model in config["models"]:
197
+ if self.logger:
198
+ self.logger.info(f"Found provider: {provider} for model: {model}")
199
+ return provider
200
+
201
+ available_models = []
202
+ for provider, config in MODEL_CONFIGS.items():
203
+ for model_name in config["models"].keys():
204
+ available_models.append(f"{provider}/{model_name}")
205
+
206
+ error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
207
+ if self.logger:
208
+ self.logger.error(error_msg)
209
+ raise ValueError(error_msg)
210
+
211
+ def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
212
+ """Make the API request with proper error handling and logging."""
213
+ if self.logger:
214
+ self.logger.debug(f"Making request to endpoint: {self._get_endpoint()}")
215
+ self.logger.debug(f"Request payload: {json.dumps(payload, indent=2)}")
216
+
217
+ try:
218
+ response = self.session.post(
219
+ self._get_endpoint(),
220
+ headers=self.headers,
221
+ json=payload,
222
+ timeout=self.timeout,
223
+ )
224
+ response.raise_for_status()
225
+
226
+ if self.logger:
227
+ self.logger.info(f"Request successful: {response.status_code}")
228
+ self.logger.debug(f"Response content: {response.text[:200]}...")
229
+
230
+ return response
231
+ except requests.exceptions.RequestException as e:
232
+ if self.logger:
233
+ self.logger.error(f"Request failed: {str(e)}")
234
+ raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
235
+
236
+ def ask(
237
+ self,
238
+ prompt: str,
239
+ raw: bool = False,
240
+ optimizer: str = None,
241
+ conversationally: bool = False,
242
+ ) -> Dict[str, Any]:
243
+ """Sends a prompt to the MultiChatAI API and returns the response."""
244
+ if self.logger:
245
+ self.logger.debug(f"ask() called with prompt: {prompt}")
246
+
247
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
248
+ if optimizer:
249
+ if optimizer in self.__available_optimizers:
250
+ if self.logger:
251
+ self.logger.info(f"Applying optimizer: {optimizer}")
252
+ conversation_prompt = getattr(Optimizers, optimizer)(
253
+ conversation_prompt if conversationally else prompt
254
+ )
255
+ else:
256
+ error_msg = f"Optimizer is not one of {self.__available_optimizers}"
257
+ if self.logger:
258
+ self.logger.error(error_msg)
259
+ raise exceptions.FailedToGenerateResponseError(error_msg)
260
+
261
+ payload = {
262
+ "chatSettings": self._get_chat_settings(),
263
+ "messages": self._build_messages(conversation_prompt),
264
+ "customModelId": "",
265
+ }
266
+
267
+ response = self._make_request(payload)
268
+ try:
269
+ full_response = response.text.strip()
270
+ self.last_response = {"text": full_response}
271
+ self.conversation.update_chat_history(prompt, full_response)
272
+
273
+ if self.logger:
274
+ self.logger.info("Successfully processed response")
275
+ self.logger.debug(f"Final response: {full_response[:200]}...")
276
+
277
+ return self.last_response
278
+ except json.JSONDecodeError as e:
279
+ if self.logger:
280
+ self.logger.error(f"Failed to decode JSON response: {e}")
281
+ raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
282
+
283
+ def chat(
284
+ self,
285
+ prompt: str,
286
+ optimizer: str = None,
287
+ conversationally: bool = False,
288
+ ) -> str:
289
+ """Generate response with logging."""
290
+ if self.logger:
291
+ self.logger.debug(f"chat() called with prompt: {prompt}")
292
+
293
+ response = self.ask(
294
+ prompt, optimizer=optimizer, conversationally=conversationally
295
+ )
296
+
297
+ if self.logger:
298
+ self.logger.info("Chat response generated successfully")
299
+
300
+ return self.get_message(response)
301
+
302
+ def get_message(self, response: Dict[str, Any] | str) -> str:
303
+ """
304
+ Retrieves message from response.
305
+
306
+ Args:
307
+ response (Union[Dict[str, Any], str]): The response to extract the message from
308
+
309
+ Returns:
310
+ str: The extracted message text
311
+ """
312
+ if self.logger:
313
+ self.logger.debug(f"Extracting message from response type: {type(response)}")
314
+
315
+ if isinstance(response, dict):
316
+ message = response.get("text", "")
317
+ if self.logger:
318
+ self.logger.debug(f"Extracted message from dict: {message[:200]}...")
319
+ return message
320
+ return str(response)
321
+
322
+ if __name__ == "__main__":
323
+ from rich import print
324
+
325
+ # Example usage with logging enabled
326
+ ai = MultiChatAI(model="deepseek-r1-distill-llama-70b", logging=False)
327
+ try:
328
+ response = ai.chat("What is quantum computing?")
329
+ print(response)
330
+ except Exception as e:
331
+ print(f"Error: {str(e)}")