webscout 8.2.6__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +97 -87
- webscout/version.py +1 -1
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/METADATA +2 -15
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -910
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -684
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -198
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -322
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -447
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/ChatSandbox.py +0 -342
- webscout/Provider/Cloudflare.py +0 -325
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/Deepinfra.py +0 -338
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -370
- webscout/Provider/GizAI.py +0 -285
- webscout/Provider/Glider.py +0 -222
- webscout/Provider/Groq.py +0 -801
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -257
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Jadve.py +0 -291
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/MCPCore.py +0 -315
- webscout/Provider/Marcus.py +0 -206
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/Netwrck.py +0 -270
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -28
- webscout/Provider/OPENAI/ai4chat.py +0 -286
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -481
- webscout/Provider/OPENAI/deepinfra.py +0 -309
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -437
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/groq.py +0 -354
- webscout/Provider/OPENAI/heckai.py +0 -341
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/mcpcore.py +0 -376
- webscout/Provider/OPENAI/multichat.py +0 -368
- webscout/Provider/OPENAI/netwrck.py +0 -350
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -462
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -329
- webscout/Provider/OPENAI/toolbaz.py +0 -406
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -429
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -8
- webscout/Provider/TTS/base.py +0 -159
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -233
- webscout/Provider/TextPollinationsAI.py +0 -306
- webscout/Provider/TwoAI.py +0 -280
- webscout/Provider/TypliAI.py +0 -305
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/WiseCat.py +0 -233
- webscout/Provider/WrDoChat.py +0 -370
- webscout/Provider/Writecream.py +0 -237
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -178
- webscout/Provider/ai4chat.py +0 -203
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -335
- webscout/Provider/asksteve.py +0 -212
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -288
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/elmo.py +0 -283
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/learnfastai.py +0 -325
- webscout/Provider/llama3mitril.py +0 -215
- webscout/Provider/llmchat.py +0 -255
- webscout/Provider/llmchatco.py +0 -306
- webscout/Provider/meta.py +0 -798
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -297
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/searchchat.py +0 -292
- webscout/Provider/sonus.py +0 -258
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -353
- webscout/Provider/turboseek.py +0 -266
- webscout/Provider/typefully.py +0 -330
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/x0gpt.py +0 -299
- webscout/Provider/yep.py +0 -389
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -95
- webscout/swiftcli/core/__init__.py +0 -7
- webscout/swiftcli/core/cli.py +0 -297
- webscout/swiftcli/core/context.py +0 -104
- webscout/swiftcli/core/group.py +0 -241
- webscout/swiftcli/decorators/__init__.py +0 -28
- webscout/swiftcli/decorators/command.py +0 -221
- webscout/swiftcli/decorators/options.py +0 -220
- webscout/swiftcli/decorators/output.py +0 -252
- webscout/swiftcli/exceptions.py +0 -21
- webscout/swiftcli/plugins/__init__.py +0 -9
- webscout/swiftcli/plugins/base.py +0 -135
- webscout/swiftcli/plugins/manager.py +0 -262
- webscout/swiftcli/utils/__init__.py +0 -59
- webscout/swiftcli/utils/formatting.py +0 -252
- webscout/swiftcli/utils/parsing.py +0 -267
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.6.dist-info/RECORD +0 -307
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/multichat.py
DELETED
|
@@ -1,364 +0,0 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import Any, Dict, Union
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent
|
|
11
|
-
|
|
12
|
-
# Model configurations
|
|
13
|
-
MODEL_CONFIGS = {
|
|
14
|
-
"llama": {
|
|
15
|
-
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
16
|
-
"models": {
|
|
17
|
-
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
18
|
-
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
-
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
20
|
-
},
|
|
21
|
-
},
|
|
22
|
-
"cohere": {
|
|
23
|
-
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
24
|
-
"models": {
|
|
25
|
-
"command-r": {"contextLength": 128000},
|
|
26
|
-
"command": {"contextLength": 4096},
|
|
27
|
-
},
|
|
28
|
-
},
|
|
29
|
-
"google": {
|
|
30
|
-
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
31
|
-
"models": {
|
|
32
|
-
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
33
|
-
"gemma2-9b-it": {"contextLength": 8192},
|
|
34
|
-
"gemini-2.0-flash": {"contextLength": 128000},
|
|
35
|
-
},
|
|
36
|
-
"message_format": "parts",
|
|
37
|
-
},
|
|
38
|
-
"deepinfra": {
|
|
39
|
-
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
40
|
-
"models": {
|
|
41
|
-
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
42
|
-
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
43
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
44
|
-
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
45
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
46
|
-
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
47
|
-
"gemma-2-27b-it": {"contextLength": 8192},
|
|
48
|
-
},
|
|
49
|
-
},
|
|
50
|
-
"mistral": {
|
|
51
|
-
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
52
|
-
"models": {
|
|
53
|
-
"mistral-small-latest": {"contextLength": 32000},
|
|
54
|
-
"codestral-latest": {"contextLength": 32000},
|
|
55
|
-
"open-mistral-7b": {"contextLength": 8000},
|
|
56
|
-
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
57
|
-
},
|
|
58
|
-
},
|
|
59
|
-
"alibaba": {
|
|
60
|
-
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
61
|
-
"models": {
|
|
62
|
-
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
63
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
64
|
-
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
class MultiChatAI(Provider):
|
|
70
|
-
"""
|
|
71
|
-
A class to interact with the MultiChatAI API.
|
|
72
|
-
"""
|
|
73
|
-
AVAILABLE_MODELS = [
|
|
74
|
-
# Llama Models
|
|
75
|
-
"llama-3.3-70b-versatile",
|
|
76
|
-
"llama-3.2-11b-vision-preview",
|
|
77
|
-
"deepseek-r1-distill-llama-70b",
|
|
78
|
-
|
|
79
|
-
# Cohere Models
|
|
80
|
-
# "command-r", # >>>> NOT WORKING
|
|
81
|
-
# "command", # >>>> NOT WORKING
|
|
82
|
-
|
|
83
|
-
# Google Models
|
|
84
|
-
# "gemini-1.5-flash-002", #>>>> NOT WORKING
|
|
85
|
-
"gemma2-9b-it",
|
|
86
|
-
"gemini-2.0-flash",
|
|
87
|
-
|
|
88
|
-
# DeepInfra Models
|
|
89
|
-
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
90
|
-
"Gryphe/MythoMax-L2-13b",
|
|
91
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
92
|
-
"deepseek-ai/DeepSeek-V3",
|
|
93
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
94
|
-
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
95
|
-
# "gemma-2-27b-it", # >>>> NOT WORKING
|
|
96
|
-
|
|
97
|
-
# Mistral Models
|
|
98
|
-
# "mistral-small-latest", # >>>> NOT WORKING
|
|
99
|
-
# "codestral-latest", # >>>> NOT WORKING
|
|
100
|
-
# "open-mistral-7b", # >>>> NOT WORKING
|
|
101
|
-
# "open-mixtral-8x7b", # >>>> NOT WORKING
|
|
102
|
-
|
|
103
|
-
# Alibaba Models
|
|
104
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
105
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
106
|
-
"Qwen/QwQ-32B-Preview"
|
|
107
|
-
]
|
|
108
|
-
|
|
109
|
-
def __init__(
|
|
110
|
-
self,
|
|
111
|
-
is_conversation: bool = True,
|
|
112
|
-
max_tokens: int = 4000, # Note: max_tokens is not directly used by this API
|
|
113
|
-
timeout: int = 30,
|
|
114
|
-
intro: str = None,
|
|
115
|
-
filepath: str = None,
|
|
116
|
-
update_file: bool = True,
|
|
117
|
-
proxies: dict = {},
|
|
118
|
-
history_offset: int = 10250,
|
|
119
|
-
act: str = None,
|
|
120
|
-
model: str = "llama-3.3-70b-versatile",
|
|
121
|
-
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
122
|
-
temperature: float = 0.5,
|
|
123
|
-
presence_penalty: int = 0, # Note: presence_penalty is not used by this API
|
|
124
|
-
frequency_penalty: int = 0, # Note: frequency_penalty is not used by this API
|
|
125
|
-
top_p: float = 1 # Note: top_p is not used by this API
|
|
126
|
-
):
|
|
127
|
-
"""Initializes the MultiChatAI API client."""
|
|
128
|
-
if model not in self.AVAILABLE_MODELS:
|
|
129
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
130
|
-
# Initialize curl_cffi Session
|
|
131
|
-
self.session = Session()
|
|
132
|
-
self.is_conversation = is_conversation
|
|
133
|
-
self.max_tokens_to_sample = max_tokens
|
|
134
|
-
self.timeout = timeout
|
|
135
|
-
self.last_response = {}
|
|
136
|
-
self.model = model
|
|
137
|
-
self.system_prompt = system_prompt
|
|
138
|
-
self.temperature = temperature
|
|
139
|
-
self.presence_penalty = presence_penalty
|
|
140
|
-
self.frequency_penalty = frequency_penalty
|
|
141
|
-
self.top_p = top_p
|
|
142
|
-
|
|
143
|
-
# Initialize LitAgent for user agent generation (keep if needed for other headers)
|
|
144
|
-
self.agent = LitAgent()
|
|
145
|
-
|
|
146
|
-
self.headers = {
|
|
147
|
-
"accept": "*/*",
|
|
148
|
-
"accept-language": "en-US,en;q=0.9",
|
|
149
|
-
"content-type": "text/plain;charset=UTF-8", # Keep content-type
|
|
150
|
-
"origin": "https://www.multichatai.com",
|
|
151
|
-
"referer": "https://www.multichatai.com/",
|
|
152
|
-
"user-agent": self.agent.random(),
|
|
153
|
-
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
# Update curl_cffi session headers, proxies, and cookies
|
|
157
|
-
self.session.headers.update(self.headers)
|
|
158
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
159
|
-
# Set cookies on the session object for curl_cffi
|
|
160
|
-
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
161
|
-
|
|
162
|
-
self.__available_optimizers = (
|
|
163
|
-
method for method in dir(Optimizers)
|
|
164
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
Conversation.intro = (
|
|
168
|
-
AwesomePrompts().get_act(
|
|
169
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
170
|
-
)
|
|
171
|
-
if act
|
|
172
|
-
else intro or Conversation.intro
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
self.conversation = Conversation(
|
|
176
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
177
|
-
)
|
|
178
|
-
self.conversation.history_offset = history_offset
|
|
179
|
-
|
|
180
|
-
self.provider = self._get_provider_from_model(self.model)
|
|
181
|
-
self.model_name = self.model
|
|
182
|
-
|
|
183
|
-
def _get_endpoint(self) -> str:
|
|
184
|
-
"""Get the API endpoint for the current provider."""
|
|
185
|
-
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
186
|
-
|
|
187
|
-
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
188
|
-
"""Get chat settings for the current model."""
|
|
189
|
-
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
190
|
-
return {
|
|
191
|
-
"model": self.model,
|
|
192
|
-
"prompt": self.system_prompt,
|
|
193
|
-
"temperature": self.temperature,
|
|
194
|
-
"contextLength": base_settings["contextLength"],
|
|
195
|
-
"includeProfileContext": True,
|
|
196
|
-
"includeWorkspaceInstructions": True,
|
|
197
|
-
"embeddingsProvider": "openai"
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
def _get_system_message(self) -> str:
|
|
201
|
-
"""Generate system message with current date."""
|
|
202
|
-
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
203
|
-
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
204
|
-
|
|
205
|
-
def _build_messages(self, conversation_prompt: str) -> list:
|
|
206
|
-
"""Build messages array based on provider type."""
|
|
207
|
-
if self.provider == "google":
|
|
208
|
-
return [
|
|
209
|
-
{"role": "user", "parts": self._get_system_message()},
|
|
210
|
-
{"role": "model", "parts": "I will follow your instructions."},
|
|
211
|
-
{"role": "user", "parts": conversation_prompt}
|
|
212
|
-
]
|
|
213
|
-
else:
|
|
214
|
-
return [
|
|
215
|
-
{"role": "system", "content": self._get_system_message()},
|
|
216
|
-
{"role": "user", "content": conversation_prompt}
|
|
217
|
-
]
|
|
218
|
-
|
|
219
|
-
def _get_provider_from_model(self, model: str) -> str:
|
|
220
|
-
"""Determine the provider based on the model name."""
|
|
221
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
222
|
-
if model in config["models"]:
|
|
223
|
-
return provider
|
|
224
|
-
|
|
225
|
-
available_models = []
|
|
226
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
227
|
-
for model_name in config["models"].keys():
|
|
228
|
-
available_models.append(f"{provider}/{model_name}")
|
|
229
|
-
|
|
230
|
-
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
231
|
-
raise ValueError(error_msg)
|
|
232
|
-
|
|
233
|
-
def _make_request(self, payload: Dict[str, Any]) -> Any:
|
|
234
|
-
"""Make the API request with proper error handling."""
|
|
235
|
-
try:
|
|
236
|
-
# Use curl_cffi session post with impersonate
|
|
237
|
-
# Cookies are handled by the session
|
|
238
|
-
response = self.session.post(
|
|
239
|
-
self._get_endpoint(),
|
|
240
|
-
# headers are set on the session
|
|
241
|
-
json=payload,
|
|
242
|
-
timeout=self.timeout,
|
|
243
|
-
# proxies are set on the session
|
|
244
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
245
|
-
)
|
|
246
|
-
response.raise_for_status() # Check for HTTP errors
|
|
247
|
-
return response
|
|
248
|
-
except CurlError as e: # Catch CurlError
|
|
249
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
|
|
250
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
251
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
252
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
253
|
-
|
|
254
|
-
def ask(
|
|
255
|
-
self,
|
|
256
|
-
prompt: str,
|
|
257
|
-
raw: bool = False, # Keep raw param for interface consistency
|
|
258
|
-
optimizer: str = None,
|
|
259
|
-
conversationally: bool = False,
|
|
260
|
-
# Add stream parameter for consistency, though API doesn't stream
|
|
261
|
-
stream: bool = False
|
|
262
|
-
) -> Dict[str, Any]:
|
|
263
|
-
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
264
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
265
|
-
if optimizer:
|
|
266
|
-
if optimizer in self.__available_optimizers:
|
|
267
|
-
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
268
|
-
else:
|
|
269
|
-
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
270
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
271
|
-
|
|
272
|
-
payload = {
|
|
273
|
-
"chatSettings": self._get_chat_settings(),
|
|
274
|
-
"messages": self._build_messages(conversation_prompt),
|
|
275
|
-
"customModelId": "",
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
# API does not stream, implement non-stream logic directly
|
|
279
|
-
response = self._make_request(payload)
|
|
280
|
-
try:
|
|
281
|
-
# Use response.text which is already decoded
|
|
282
|
-
response_text_raw = response.text # Get raw text
|
|
283
|
-
|
|
284
|
-
# Process the text using sanitize_stream (even though it's not streaming)
|
|
285
|
-
processed_stream = sanitize_stream(
|
|
286
|
-
data=response_text_raw,
|
|
287
|
-
intro_value=None, # No prefix
|
|
288
|
-
to_json=False # It's plain text
|
|
289
|
-
)
|
|
290
|
-
# Aggregate the single result
|
|
291
|
-
full_response = "".join(list(processed_stream)).strip()
|
|
292
|
-
|
|
293
|
-
self.last_response = {"text": full_response} # Store processed text
|
|
294
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
295
|
-
# Return dict or raw string based on raw flag
|
|
296
|
-
return full_response if raw else self.last_response
|
|
297
|
-
except Exception as e: # Catch potential errors during text processing
|
|
298
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to process response: {e}") from e
|
|
299
|
-
|
|
300
|
-
def chat(
|
|
301
|
-
self,
|
|
302
|
-
prompt: str,
|
|
303
|
-
optimizer: str = None,
|
|
304
|
-
conversationally: bool = False,
|
|
305
|
-
# Add stream parameter for consistency
|
|
306
|
-
stream: bool = False
|
|
307
|
-
) -> str:
|
|
308
|
-
"""Generate response."""
|
|
309
|
-
# Since ask() now handles both stream=True/False by returning the full response dict/str:
|
|
310
|
-
response_data = self.ask(
|
|
311
|
-
prompt,
|
|
312
|
-
stream=False, # Call ask in non-stream mode internally
|
|
313
|
-
raw=False, # Ensure ask returns dict
|
|
314
|
-
optimizer=optimizer,
|
|
315
|
-
conversationally=conversationally
|
|
316
|
-
)
|
|
317
|
-
# If stream=True was requested, simulate streaming by yielding the full message at once
|
|
318
|
-
if stream:
|
|
319
|
-
def stream_wrapper():
|
|
320
|
-
yield self.get_message(response_data)
|
|
321
|
-
return stream_wrapper()
|
|
322
|
-
else:
|
|
323
|
-
# If stream=False, return the full message directly
|
|
324
|
-
return self.get_message(response_data)
|
|
325
|
-
|
|
326
|
-
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
327
|
-
"""
|
|
328
|
-
Retrieves message from response.
|
|
329
|
-
|
|
330
|
-
Args:
|
|
331
|
-
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
332
|
-
|
|
333
|
-
Returns:
|
|
334
|
-
str: The extracted message text
|
|
335
|
-
"""
|
|
336
|
-
if isinstance(response, dict):
|
|
337
|
-
return response.get("text", "")
|
|
338
|
-
return str(response)
|
|
339
|
-
|
|
340
|
-
if __name__ == "__main__":
|
|
341
|
-
print("-" * 80)
|
|
342
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
343
|
-
print("-" * 80)
|
|
344
|
-
|
|
345
|
-
# Test all available models
|
|
346
|
-
working = 0
|
|
347
|
-
total = len(MultiChatAI.AVAILABLE_MODELS)
|
|
348
|
-
|
|
349
|
-
for model in MultiChatAI.AVAILABLE_MODELS:
|
|
350
|
-
try:
|
|
351
|
-
test_ai = MultiChatAI(model=model, timeout=60)
|
|
352
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
353
|
-
response_text = response
|
|
354
|
-
|
|
355
|
-
if response_text and len(response_text.strip()) > 0:
|
|
356
|
-
status = "✓"
|
|
357
|
-
# Truncate response if too long
|
|
358
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
359
|
-
else:
|
|
360
|
-
status = "✗"
|
|
361
|
-
display_text = "Empty or invalid response"
|
|
362
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
363
|
-
except Exception as e:
|
|
364
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/scira_chat.py
DELETED
|
@@ -1,297 +0,0 @@
|
|
|
1
|
-
from os import system
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
from curl_cffi.requests import Session
|
|
4
|
-
import json
|
|
5
|
-
import uuid
|
|
6
|
-
import re
|
|
7
|
-
from typing import Any, Dict, Optional, Union, List
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
class SciraAI(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the Scira AI chat API.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
AVAILABLE_MODELS = {
|
|
21
|
-
"scira-default": "Grok3-mini", # thinking model
|
|
22
|
-
"scira-grok-3": "Grok3",
|
|
23
|
-
"scira-anthropic": "Sonnet 3.7 thinking",
|
|
24
|
-
"scira-vision" : "Grok2-Vision", # vision model
|
|
25
|
-
"scira-4.1-mini": "GPT4.1-mini",
|
|
26
|
-
"scira-qwq": "QWQ-32B",
|
|
27
|
-
"scira-o4-mini": "o4-mini",
|
|
28
|
-
"scira-google": "gemini 2.5 flash"
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
def __init__(
|
|
32
|
-
self,
|
|
33
|
-
is_conversation: bool = True,
|
|
34
|
-
max_tokens: int = 2049,
|
|
35
|
-
timeout: int = 30,
|
|
36
|
-
intro: str = None,
|
|
37
|
-
filepath: str = None,
|
|
38
|
-
update_file: bool = True,
|
|
39
|
-
proxies: dict = {},
|
|
40
|
-
history_offset: int = 10250,
|
|
41
|
-
act: str = None,
|
|
42
|
-
model: str = "scira-default",
|
|
43
|
-
chat_id: str = None,
|
|
44
|
-
user_id: str = None,
|
|
45
|
-
browser: str = "chrome",
|
|
46
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
47
|
-
):
|
|
48
|
-
"""Initializes the Scira AI API client.
|
|
49
|
-
|
|
50
|
-
Args:
|
|
51
|
-
is_conversation (bool): Whether to maintain conversation history.
|
|
52
|
-
max_tokens (int): Maximum number of tokens to generate.
|
|
53
|
-
timeout (int): Request timeout in seconds.
|
|
54
|
-
intro (str): Introduction text for the conversation.
|
|
55
|
-
filepath (str): Path to save conversation history.
|
|
56
|
-
update_file (bool): Whether to update the conversation history file.
|
|
57
|
-
proxies (dict): Proxy configuration for requests.
|
|
58
|
-
history_offset (int): Maximum history length in characters.
|
|
59
|
-
act (str): Persona for the AI to adopt.
|
|
60
|
-
model (str): Model to use, must be one of AVAILABLE_MODELS.
|
|
61
|
-
chat_id (str): Unique identifier for the chat session.
|
|
62
|
-
user_id (str): Unique identifier for the user.
|
|
63
|
-
browser (str): Browser to emulate in requests.
|
|
64
|
-
system_prompt (str): System prompt for the AI.
|
|
65
|
-
|
|
66
|
-
"""
|
|
67
|
-
if model not in self.AVAILABLE_MODELS:
|
|
68
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
69
|
-
|
|
70
|
-
self.url = "https://scira.ai/api/search"
|
|
71
|
-
|
|
72
|
-
# Initialize LitAgent for user agent generation
|
|
73
|
-
self.agent = LitAgent()
|
|
74
|
-
# Use fingerprinting to create a consistent browser identity
|
|
75
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
76
|
-
self.system_prompt = system_prompt
|
|
77
|
-
|
|
78
|
-
# Use the fingerprint for headers
|
|
79
|
-
self.headers = {
|
|
80
|
-
"Accept": self.fingerprint["accept"],
|
|
81
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
82
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
83
|
-
"Content-Type": "application/json",
|
|
84
|
-
"Origin": "https://scira.ai",
|
|
85
|
-
"Referer": "https://scira.ai/",
|
|
86
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
87
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
88
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
89
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
90
|
-
"Sec-Fetch-Dest": "empty",
|
|
91
|
-
"Sec-Fetch-Mode": "cors",
|
|
92
|
-
"Sec-Fetch-Site": "same-origin"
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
self.session = Session() # Use curl_cffi Session
|
|
96
|
-
self.session.headers.update(self.headers)
|
|
97
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
98
|
-
|
|
99
|
-
self.is_conversation = is_conversation
|
|
100
|
-
self.max_tokens_to_sample = max_tokens
|
|
101
|
-
self.timeout = timeout
|
|
102
|
-
self.last_response = {}
|
|
103
|
-
self.model = model
|
|
104
|
-
self.chat_id = chat_id or str(uuid.uuid4())
|
|
105
|
-
self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
106
|
-
|
|
107
|
-
# Always use chat mode (no web search)
|
|
108
|
-
self.search_mode = "chat"
|
|
109
|
-
|
|
110
|
-
self.__available_optimizers = (
|
|
111
|
-
method
|
|
112
|
-
for method in dir(Optimizers)
|
|
113
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
114
|
-
)
|
|
115
|
-
Conversation.intro = (
|
|
116
|
-
AwesomePrompts().get_act(
|
|
117
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
118
|
-
)
|
|
119
|
-
if act
|
|
120
|
-
else intro or Conversation.intro
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
self.conversation = Conversation(
|
|
124
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
125
|
-
)
|
|
126
|
-
self.conversation.history_offset = history_offset
|
|
127
|
-
|
|
128
|
-
def refresh_identity(self, browser: str = None):
|
|
129
|
-
"""
|
|
130
|
-
Refreshes the browser identity fingerprint.
|
|
131
|
-
|
|
132
|
-
Args:
|
|
133
|
-
browser: Specific browser to use for the new fingerprint
|
|
134
|
-
"""
|
|
135
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
136
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
137
|
-
|
|
138
|
-
# Update headers with new fingerprint
|
|
139
|
-
self.headers.update({
|
|
140
|
-
"Accept": self.fingerprint["accept"],
|
|
141
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
142
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
143
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
144
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
145
|
-
})
|
|
146
|
-
|
|
147
|
-
# Update session headers
|
|
148
|
-
for header, value in self.headers.items():
|
|
149
|
-
self.session.headers[header] = value
|
|
150
|
-
|
|
151
|
-
return self.fingerprint
|
|
152
|
-
|
|
153
|
-
@staticmethod
|
|
154
|
-
def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
155
|
-
"""Extracts content from the Scira stream format '0:"..."'."""
|
|
156
|
-
if isinstance(chunk, str):
|
|
157
|
-
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
158
|
-
if match:
|
|
159
|
-
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
160
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
161
|
-
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
162
|
-
return None
|
|
163
|
-
|
|
164
|
-
def ask(
|
|
165
|
-
self,
|
|
166
|
-
prompt: str,
|
|
167
|
-
optimizer: str = None,
|
|
168
|
-
conversationally: bool = False,
|
|
169
|
-
) -> Dict[str, Any]: # Note: Stream parameter removed as API doesn't seem to support it
|
|
170
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
171
|
-
if optimizer:
|
|
172
|
-
if optimizer in self.__available_optimizers:
|
|
173
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
174
|
-
conversation_prompt if conversationally else prompt
|
|
175
|
-
)
|
|
176
|
-
else:
|
|
177
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
178
|
-
|
|
179
|
-
messages = [
|
|
180
|
-
{"role": "system", "content": self.system_prompt},
|
|
181
|
-
{"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
|
|
182
|
-
]
|
|
183
|
-
|
|
184
|
-
# Prepare the request payload
|
|
185
|
-
payload = {
|
|
186
|
-
"id": self.chat_id,
|
|
187
|
-
"messages": messages,
|
|
188
|
-
"model": self.model,
|
|
189
|
-
"group": self.search_mode,
|
|
190
|
-
"user_id": self.user_id,
|
|
191
|
-
"timezone": "Asia/Calcutta"
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
try:
|
|
195
|
-
# Use curl_cffi post with impersonate
|
|
196
|
-
response = self.session.post(
|
|
197
|
-
self.url,
|
|
198
|
-
json=payload,
|
|
199
|
-
timeout=self.timeout,
|
|
200
|
-
impersonate="chrome120" # Add impersonate
|
|
201
|
-
)
|
|
202
|
-
if response.status_code != 200:
|
|
203
|
-
# Try to get response content for better error messages
|
|
204
|
-
try: # Use try-except for reading response content
|
|
205
|
-
error_content = response.text
|
|
206
|
-
except:
|
|
207
|
-
error_content = "<could not read response content>"
|
|
208
|
-
|
|
209
|
-
if response.status_code in [403, 429]:
|
|
210
|
-
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
211
|
-
self.refresh_identity()
|
|
212
|
-
response = self.session.post(
|
|
213
|
-
self.url, json=payload, timeout=self.timeout,
|
|
214
|
-
impersonate="chrome120" # Add impersonate to retry
|
|
215
|
-
)
|
|
216
|
-
if not response.ok:
|
|
217
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
218
|
-
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
219
|
-
)
|
|
220
|
-
print("Identity refreshed successfully.")
|
|
221
|
-
else:
|
|
222
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
223
|
-
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
response_text_raw = response.text # Get raw response text
|
|
227
|
-
|
|
228
|
-
# Process the text using sanitize_stream line by line
|
|
229
|
-
processed_stream = sanitize_stream(
|
|
230
|
-
data=response_text_raw.splitlines(), # Split into lines
|
|
231
|
-
intro_value=None, # No simple prefix
|
|
232
|
-
to_json=False, # Content is not JSON
|
|
233
|
-
content_extractor=self._scira_extractor # Use the specific extractor
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
# Aggregate the results from the generator
|
|
237
|
-
full_response = ""
|
|
238
|
-
for content in processed_stream:
|
|
239
|
-
if content and isinstance(content, str):
|
|
240
|
-
full_response += content
|
|
241
|
-
|
|
242
|
-
self.last_response = {"text": full_response}
|
|
243
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
244
|
-
return {"text": full_response}
|
|
245
|
-
except CurlError as e: # Catch CurlError
|
|
246
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
247
|
-
except Exception as e:
|
|
248
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
249
|
-
|
|
250
|
-
def chat(
|
|
251
|
-
self,
|
|
252
|
-
prompt: str,
|
|
253
|
-
optimizer: str = None,
|
|
254
|
-
conversationally: bool = False,
|
|
255
|
-
) -> str:
|
|
256
|
-
return self.get_message(
|
|
257
|
-
self.ask(
|
|
258
|
-
prompt, optimizer=optimizer, conversationally=conversationally
|
|
259
|
-
)
|
|
260
|
-
)
|
|
261
|
-
|
|
262
|
-
def get_message(self, response: dict) -> str:
|
|
263
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
264
|
-
# Extractor handles formatting
|
|
265
|
-
return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
266
|
-
|
|
267
|
-
if __name__ == "__main__":
|
|
268
|
-
print("-" * 100)
|
|
269
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
270
|
-
print("-" * 100)
|
|
271
|
-
|
|
272
|
-
test_prompt = "Say 'Hello' in one word"
|
|
273
|
-
|
|
274
|
-
# Test each model
|
|
275
|
-
for model in SciraAI.AVAILABLE_MODELS:
|
|
276
|
-
print(f"\rTesting {model}...", end="")
|
|
277
|
-
|
|
278
|
-
try:
|
|
279
|
-
test_ai = SciraAI(model=model, timeout=120) # Increased timeout
|
|
280
|
-
response = test_ai.chat(test_prompt)
|
|
281
|
-
|
|
282
|
-
if response and len(response.strip()) > 0:
|
|
283
|
-
status = "✓"
|
|
284
|
-
# Clean and truncate response
|
|
285
|
-
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
286
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
287
|
-
else:
|
|
288
|
-
status = "✗"
|
|
289
|
-
display_text = "Empty or invalid response"
|
|
290
|
-
|
|
291
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
292
|
-
except Exception as e:
|
|
293
|
-
error_msg = str(e)
|
|
294
|
-
# Truncate very long error messages
|
|
295
|
-
if len(error_msg) > 100:
|
|
296
|
-
error_msg = error_msg[:97] + "..."
|
|
297
|
-
print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
|