webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import Any, Dict, Union
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
|
|
12
|
+
# Model configurations
|
|
13
|
+
MODEL_CONFIGS = {
|
|
14
|
+
"llama": {
|
|
15
|
+
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
16
|
+
"models": {
|
|
17
|
+
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
18
|
+
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
+
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
20
|
+
},
|
|
21
|
+
},
|
|
22
|
+
"cohere": {
|
|
23
|
+
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
24
|
+
"models": {
|
|
25
|
+
"command-r": {"contextLength": 128000},
|
|
26
|
+
"command": {"contextLength": 4096},
|
|
27
|
+
},
|
|
28
|
+
},
|
|
29
|
+
"google": {
|
|
30
|
+
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
31
|
+
"models": {
|
|
32
|
+
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
33
|
+
"gemma2-9b-it": {"contextLength": 8192},
|
|
34
|
+
"gemini-2.0-flash": {"contextLength": 128000},
|
|
35
|
+
},
|
|
36
|
+
"message_format": "parts",
|
|
37
|
+
},
|
|
38
|
+
"deepinfra": {
|
|
39
|
+
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
40
|
+
"models": {
|
|
41
|
+
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
42
|
+
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
43
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
44
|
+
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
45
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
46
|
+
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
47
|
+
"gemma-2-27b-it": {"contextLength": 8192},
|
|
48
|
+
},
|
|
49
|
+
},
|
|
50
|
+
"mistral": {
|
|
51
|
+
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
52
|
+
"models": {
|
|
53
|
+
"mistral-small-latest": {"contextLength": 32000},
|
|
54
|
+
"codestral-latest": {"contextLength": 32000},
|
|
55
|
+
"open-mistral-7b": {"contextLength": 8000},
|
|
56
|
+
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
"alibaba": {
|
|
60
|
+
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
61
|
+
"models": {
|
|
62
|
+
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
63
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
64
|
+
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
65
|
+
},
|
|
66
|
+
},
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
class MultiChatAI(Provider):
|
|
70
|
+
"""
|
|
71
|
+
A class to interact with the MultiChatAI API.
|
|
72
|
+
"""
|
|
73
|
+
AVAILABLE_MODELS = [
|
|
74
|
+
# Llama Models
|
|
75
|
+
"llama-3.3-70b-versatile",
|
|
76
|
+
"llama-3.2-11b-vision-preview",
|
|
77
|
+
"deepseek-r1-distill-llama-70b",
|
|
78
|
+
|
|
79
|
+
# Cohere Models
|
|
80
|
+
# "command-r", # >>>> NOT WORKING
|
|
81
|
+
# "command", # >>>> NOT WORKING
|
|
82
|
+
|
|
83
|
+
# Google Models
|
|
84
|
+
# "gemini-1.5-flash-002", #>>>> NOT WORKING
|
|
85
|
+
"gemma2-9b-it",
|
|
86
|
+
"gemini-2.0-flash",
|
|
87
|
+
|
|
88
|
+
# DeepInfra Models
|
|
89
|
+
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
90
|
+
"Gryphe/MythoMax-L2-13b",
|
|
91
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
92
|
+
"deepseek-ai/DeepSeek-V3",
|
|
93
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
94
|
+
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
95
|
+
# "gemma-2-27b-it", # >>>> NOT WORKING
|
|
96
|
+
|
|
97
|
+
# Mistral Models
|
|
98
|
+
# "mistral-small-latest", # >>>> NOT WORKING
|
|
99
|
+
# "codestral-latest", # >>>> NOT WORKING
|
|
100
|
+
# "open-mistral-7b", # >>>> NOT WORKING
|
|
101
|
+
# "open-mixtral-8x7b", # >>>> NOT WORKING
|
|
102
|
+
|
|
103
|
+
# Alibaba Models
|
|
104
|
+
"Qwen/Qwen2.5-72B-Instruct",
|
|
105
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
106
|
+
"Qwen/QwQ-32B-Preview"
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
def __init__(
|
|
110
|
+
self,
|
|
111
|
+
is_conversation: bool = True,
|
|
112
|
+
max_tokens: int = 4000, # Note: max_tokens is not directly used by this API
|
|
113
|
+
timeout: int = 30,
|
|
114
|
+
intro: str = None,
|
|
115
|
+
filepath: str = None,
|
|
116
|
+
update_file: bool = True,
|
|
117
|
+
proxies: dict = {},
|
|
118
|
+
history_offset: int = 10250,
|
|
119
|
+
act: str = None,
|
|
120
|
+
model: str = "llama-3.3-70b-versatile",
|
|
121
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
122
|
+
temperature: float = 0.5,
|
|
123
|
+
presence_penalty: int = 0, # Note: presence_penalty is not used by this API
|
|
124
|
+
frequency_penalty: int = 0, # Note: frequency_penalty is not used by this API
|
|
125
|
+
top_p: float = 1 # Note: top_p is not used by this API
|
|
126
|
+
):
|
|
127
|
+
"""Initializes the MultiChatAI API client."""
|
|
128
|
+
if model not in self.AVAILABLE_MODELS:
|
|
129
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
130
|
+
# Initialize curl_cffi Session
|
|
131
|
+
self.session = Session()
|
|
132
|
+
self.is_conversation = is_conversation
|
|
133
|
+
self.max_tokens_to_sample = max_tokens
|
|
134
|
+
self.timeout = timeout
|
|
135
|
+
self.last_response = {}
|
|
136
|
+
self.model = model
|
|
137
|
+
self.system_prompt = system_prompt
|
|
138
|
+
self.temperature = temperature
|
|
139
|
+
self.presence_penalty = presence_penalty
|
|
140
|
+
self.frequency_penalty = frequency_penalty
|
|
141
|
+
self.top_p = top_p
|
|
142
|
+
|
|
143
|
+
# Initialize LitAgent for user agent generation (keep if needed for other headers)
|
|
144
|
+
self.agent = LitAgent()
|
|
145
|
+
|
|
146
|
+
self.headers = {
|
|
147
|
+
"accept": "*/*",
|
|
148
|
+
"accept-language": "en-US,en;q=0.9",
|
|
149
|
+
"content-type": "text/plain;charset=UTF-8", # Keep content-type
|
|
150
|
+
"origin": "https://www.multichatai.com",
|
|
151
|
+
"referer": "https://www.multichatai.com/",
|
|
152
|
+
"user-agent": self.agent.random(),
|
|
153
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
# Update curl_cffi session headers, proxies, and cookies
|
|
157
|
+
self.session.headers.update(self.headers)
|
|
158
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
159
|
+
# Set cookies on the session object for curl_cffi
|
|
160
|
+
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
161
|
+
|
|
162
|
+
self.__available_optimizers = (
|
|
163
|
+
method for method in dir(Optimizers)
|
|
164
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
Conversation.intro = (
|
|
168
|
+
AwesomePrompts().get_act(
|
|
169
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
170
|
+
)
|
|
171
|
+
if act
|
|
172
|
+
else intro or Conversation.intro
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
self.conversation = Conversation(
|
|
176
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
177
|
+
)
|
|
178
|
+
self.conversation.history_offset = history_offset
|
|
179
|
+
|
|
180
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
181
|
+
self.model_name = self.model
|
|
182
|
+
|
|
183
|
+
def _get_endpoint(self) -> str:
|
|
184
|
+
"""Get the API endpoint for the current provider."""
|
|
185
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
186
|
+
|
|
187
|
+
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
188
|
+
"""Get chat settings for the current model."""
|
|
189
|
+
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
190
|
+
return {
|
|
191
|
+
"model": self.model,
|
|
192
|
+
"prompt": self.system_prompt,
|
|
193
|
+
"temperature": self.temperature,
|
|
194
|
+
"contextLength": base_settings["contextLength"],
|
|
195
|
+
"includeProfileContext": True,
|
|
196
|
+
"includeWorkspaceInstructions": True,
|
|
197
|
+
"embeddingsProvider": "openai"
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
def _get_system_message(self) -> str:
|
|
201
|
+
"""Generate system message with current date."""
|
|
202
|
+
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
203
|
+
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
204
|
+
|
|
205
|
+
def _build_messages(self, conversation_prompt: str) -> list:
|
|
206
|
+
"""Build messages array based on provider type."""
|
|
207
|
+
if self.provider == "google":
|
|
208
|
+
return [
|
|
209
|
+
{"role": "user", "parts": self._get_system_message()},
|
|
210
|
+
{"role": "model", "parts": "I will follow your instructions."},
|
|
211
|
+
{"role": "user", "parts": conversation_prompt}
|
|
212
|
+
]
|
|
213
|
+
else:
|
|
214
|
+
return [
|
|
215
|
+
{"role": "system", "content": self._get_system_message()},
|
|
216
|
+
{"role": "user", "content": conversation_prompt}
|
|
217
|
+
]
|
|
218
|
+
|
|
219
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
220
|
+
"""Determine the provider based on the model name."""
|
|
221
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
222
|
+
if model in config["models"]:
|
|
223
|
+
return provider
|
|
224
|
+
|
|
225
|
+
available_models = []
|
|
226
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
227
|
+
for model_name in config["models"].keys():
|
|
228
|
+
available_models.append(f"{provider}/{model_name}")
|
|
229
|
+
|
|
230
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
231
|
+
raise ValueError(error_msg)
|
|
232
|
+
|
|
233
|
+
def _make_request(self, payload: Dict[str, Any]) -> Any:
|
|
234
|
+
"""Make the API request with proper error handling."""
|
|
235
|
+
try:
|
|
236
|
+
# Use curl_cffi session post with impersonate
|
|
237
|
+
# Cookies are handled by the session
|
|
238
|
+
response = self.session.post(
|
|
239
|
+
self._get_endpoint(),
|
|
240
|
+
# headers are set on the session
|
|
241
|
+
json=payload,
|
|
242
|
+
timeout=self.timeout,
|
|
243
|
+
# proxies are set on the session
|
|
244
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
245
|
+
)
|
|
246
|
+
response.raise_for_status() # Check for HTTP errors
|
|
247
|
+
return response
|
|
248
|
+
except CurlError as e: # Catch CurlError
|
|
249
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
|
|
250
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
251
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
252
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
253
|
+
|
|
254
|
+
def ask(
|
|
255
|
+
self,
|
|
256
|
+
prompt: str,
|
|
257
|
+
raw: bool = False, # Keep raw param for interface consistency
|
|
258
|
+
optimizer: str = None,
|
|
259
|
+
conversationally: bool = False,
|
|
260
|
+
# Add stream parameter for consistency, though API doesn't stream
|
|
261
|
+
stream: bool = False
|
|
262
|
+
) -> Dict[str, Any]:
|
|
263
|
+
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
264
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
265
|
+
if optimizer:
|
|
266
|
+
if optimizer in self.__available_optimizers:
|
|
267
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
268
|
+
else:
|
|
269
|
+
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
270
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
271
|
+
|
|
272
|
+
payload = {
|
|
273
|
+
"chatSettings": self._get_chat_settings(),
|
|
274
|
+
"messages": self._build_messages(conversation_prompt),
|
|
275
|
+
"customModelId": "",
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
# API does not stream, implement non-stream logic directly
|
|
279
|
+
response = self._make_request(payload)
|
|
280
|
+
try:
|
|
281
|
+
# Use response.text which is already decoded
|
|
282
|
+
response_text_raw = response.text # Get raw text
|
|
283
|
+
|
|
284
|
+
# Process the text using sanitize_stream (even though it's not streaming)
|
|
285
|
+
processed_stream = sanitize_stream(
|
|
286
|
+
data=response_text_raw,
|
|
287
|
+
intro_value=None, # No prefix
|
|
288
|
+
to_json=False # It's plain text
|
|
289
|
+
)
|
|
290
|
+
# Aggregate the single result
|
|
291
|
+
full_response = "".join(list(processed_stream)).strip()
|
|
292
|
+
|
|
293
|
+
self.last_response = {"text": full_response} # Store processed text
|
|
294
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
295
|
+
# Return dict or raw string based on raw flag
|
|
296
|
+
return full_response if raw else self.last_response
|
|
297
|
+
except Exception as e: # Catch potential errors during text processing
|
|
298
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to process response: {e}") from e
|
|
299
|
+
|
|
300
|
+
def chat(
|
|
301
|
+
self,
|
|
302
|
+
prompt: str,
|
|
303
|
+
optimizer: str = None,
|
|
304
|
+
conversationally: bool = False,
|
|
305
|
+
# Add stream parameter for consistency
|
|
306
|
+
stream: bool = False
|
|
307
|
+
) -> str:
|
|
308
|
+
"""Generate response."""
|
|
309
|
+
# Since ask() now handles both stream=True/False by returning the full response dict/str:
|
|
310
|
+
response_data = self.ask(
|
|
311
|
+
prompt,
|
|
312
|
+
stream=False, # Call ask in non-stream mode internally
|
|
313
|
+
raw=False, # Ensure ask returns dict
|
|
314
|
+
optimizer=optimizer,
|
|
315
|
+
conversationally=conversationally
|
|
316
|
+
)
|
|
317
|
+
# If stream=True was requested, simulate streaming by yielding the full message at once
|
|
318
|
+
if stream:
|
|
319
|
+
def stream_wrapper():
|
|
320
|
+
yield self.get_message(response_data)
|
|
321
|
+
return stream_wrapper()
|
|
322
|
+
else:
|
|
323
|
+
# If stream=False, return the full message directly
|
|
324
|
+
return self.get_message(response_data)
|
|
325
|
+
|
|
326
|
+
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
327
|
+
"""
|
|
328
|
+
Retrieves message from response.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
str: The extracted message text
|
|
335
|
+
"""
|
|
336
|
+
if isinstance(response, dict):
|
|
337
|
+
return response.get("text", "")
|
|
338
|
+
return str(response)
|
|
339
|
+
|
|
340
|
+
if __name__ == "__main__":
|
|
341
|
+
print("-" * 80)
|
|
342
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
343
|
+
print("-" * 80)
|
|
344
|
+
|
|
345
|
+
# Test all available models
|
|
346
|
+
working = 0
|
|
347
|
+
total = len(MultiChatAI.AVAILABLE_MODELS)
|
|
348
|
+
|
|
349
|
+
for model in MultiChatAI.AVAILABLE_MODELS:
|
|
350
|
+
try:
|
|
351
|
+
test_ai = MultiChatAI(model=model, timeout=60)
|
|
352
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
353
|
+
response_text = response
|
|
354
|
+
|
|
355
|
+
if response_text and len(response_text.strip()) > 0:
|
|
356
|
+
status = "✓"
|
|
357
|
+
# Truncate response if too long
|
|
358
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
359
|
+
else:
|
|
360
|
+
status = "✗"
|
|
361
|
+
display_text = "Empty or invalid response"
|
|
362
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
363
|
+
except Exception as e:
|
|
364
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
import secrets
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
import random
|
|
5
|
+
import string
|
|
6
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import Conversation
|
|
10
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class oivscode(Provider):
|
|
16
|
+
"""
|
|
17
|
+
A class to interact with a test API.
|
|
18
|
+
"""
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
"*",
|
|
21
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
22
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
23
|
+
"claude-3-5-sonnet-20240620",
|
|
24
|
+
"claude-3-5-sonnet-20241022",
|
|
25
|
+
"claude-3-7-sonnet-20250219",
|
|
26
|
+
"custom/blackbox-base",
|
|
27
|
+
"custom/blackbox-pro",
|
|
28
|
+
"custom/blackbox-pro-designer",
|
|
29
|
+
"custom/blackbox-pro-plus",
|
|
30
|
+
"deepseek-r1",
|
|
31
|
+
"deepseek-v3",
|
|
32
|
+
"deepseek/deepseek-chat",
|
|
33
|
+
"gemini-2.5-pro-preview-03-25",
|
|
34
|
+
"gpt-4o-mini",
|
|
35
|
+
"grok-3-beta",
|
|
36
|
+
"image-gen",
|
|
37
|
+
"llama-4-maverick-17b-128e-instruct-fp8",
|
|
38
|
+
"o1",
|
|
39
|
+
"o3-mini",
|
|
40
|
+
"o4-mini",
|
|
41
|
+
"transcribe",
|
|
42
|
+
"anthropic/claude-sonnet-4"
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
is_conversation: bool = True,
|
|
49
|
+
max_tokens: int = 1024,
|
|
50
|
+
timeout: int = 30,
|
|
51
|
+
intro: str = None,
|
|
52
|
+
filepath: str = None,
|
|
53
|
+
update_file: bool = True,
|
|
54
|
+
proxies: dict = {},
|
|
55
|
+
history_offset: int = 10250,
|
|
56
|
+
act: str = None,
|
|
57
|
+
model: str = "claude-3-5-sonnet-20240620",
|
|
58
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
59
|
+
|
|
60
|
+
):
|
|
61
|
+
"""
|
|
62
|
+
Initializes the oivscode with given parameters.
|
|
63
|
+
"""
|
|
64
|
+
if model not in self.AVAILABLE_MODELS:
|
|
65
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
self.session = requests.Session()
|
|
69
|
+
self.is_conversation = is_conversation
|
|
70
|
+
self.max_tokens_to_sample = max_tokens
|
|
71
|
+
self.api_endpoints = [
|
|
72
|
+
"https://oi-vscode-server.onrender.com/v1/chat/completions",
|
|
73
|
+
"https://oi-vscode-server-2.onrender.com/v1/chat/completions",
|
|
74
|
+
"https://oi-vscode-server-5.onrender.com/v1/chat/completions",
|
|
75
|
+
"https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
|
|
76
|
+
]
|
|
77
|
+
self.api_endpoint = random.choice(self.api_endpoints)
|
|
78
|
+
self.timeout = timeout
|
|
79
|
+
self.last_response = {}
|
|
80
|
+
self.model = model
|
|
81
|
+
self.system_prompt = system_prompt
|
|
82
|
+
self.headers = {
|
|
83
|
+
"accept": "*/*",
|
|
84
|
+
"accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
|
|
85
|
+
"cache-control": "no-cache",
|
|
86
|
+
"content-type": "application/json",
|
|
87
|
+
"pragma": "no-cache",
|
|
88
|
+
"priority": "u=1, i",
|
|
89
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
90
|
+
"sec-ch-ua-mobile": "?0",
|
|
91
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
92
|
+
"sec-fetch-dest": "empty",
|
|
93
|
+
"sec-fetch-mode": "cors",
|
|
94
|
+
"sec-fetch-site": "same-site",
|
|
95
|
+
}
|
|
96
|
+
self.userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
|
|
97
|
+
self.headers["userid"] = self.userid
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
self.__available_optimizers = (
|
|
101
|
+
method
|
|
102
|
+
for method in dir(Optimizers)
|
|
103
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
104
|
+
)
|
|
105
|
+
self.session.headers.update(self.headers)
|
|
106
|
+
Conversation.intro = (
|
|
107
|
+
AwesomePrompts().get_act(
|
|
108
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
109
|
+
)
|
|
110
|
+
if act
|
|
111
|
+
else intro or Conversation.intro
|
|
112
|
+
)
|
|
113
|
+
self.conversation = Conversation(
|
|
114
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
115
|
+
)
|
|
116
|
+
self.conversation.history_offset = history_offset
|
|
117
|
+
self.session.proxies = proxies
|
|
118
|
+
|
|
119
|
+
def _post_with_failover(self, payload, stream, timeout):
|
|
120
|
+
"""Try all endpoints until one succeeds, else raise last error."""
|
|
121
|
+
endpoints = self.api_endpoints.copy()
|
|
122
|
+
random.shuffle(endpoints)
|
|
123
|
+
last_exception = None
|
|
124
|
+
for endpoint in endpoints:
|
|
125
|
+
try:
|
|
126
|
+
response = self.session.post(endpoint, json=payload, stream=stream, timeout=timeout)
|
|
127
|
+
if not response.ok:
|
|
128
|
+
last_exception = exceptions.FailedToGenerateResponseError(
|
|
129
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
130
|
+
)
|
|
131
|
+
continue
|
|
132
|
+
return response
|
|
133
|
+
except Exception as e:
|
|
134
|
+
last_exception = e
|
|
135
|
+
continue
|
|
136
|
+
if last_exception:
|
|
137
|
+
raise last_exception
|
|
138
|
+
raise exceptions.FailedToGenerateResponseError("All API endpoints failed.")
|
|
139
|
+
|
|
140
|
+
def ask(
|
|
141
|
+
self,
|
|
142
|
+
prompt: str,
|
|
143
|
+
stream: bool = False,
|
|
144
|
+
raw: bool = False,
|
|
145
|
+
optimizer: str = None,
|
|
146
|
+
conversationally: bool = False,
|
|
147
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
148
|
+
"""Chat with AI (DeepInfra-style streaming and non-streaming)"""
|
|
149
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
150
|
+
if optimizer:
|
|
151
|
+
if optimizer in self.__available_optimizers:
|
|
152
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
153
|
+
conversation_prompt if conversationally else prompt
|
|
154
|
+
)
|
|
155
|
+
else:
|
|
156
|
+
raise Exception(
|
|
157
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
payload = {
|
|
161
|
+
"model": self.model,
|
|
162
|
+
"messages": [
|
|
163
|
+
{"role": "system", "content": self.system_prompt},
|
|
164
|
+
{"role": "user", "content": conversation_prompt},
|
|
165
|
+
],
|
|
166
|
+
"stream": stream
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
def for_stream():
|
|
170
|
+
streaming_text = ""
|
|
171
|
+
try:
|
|
172
|
+
response = self._post_with_failover(payload, stream=True, timeout=self.timeout)
|
|
173
|
+
response.raise_for_status()
|
|
174
|
+
# Use sanitize_stream for robust OpenAI-style streaming
|
|
175
|
+
processed_stream = sanitize_stream(
|
|
176
|
+
data=response.iter_content(chunk_size=None),
|
|
177
|
+
intro_value="data:",
|
|
178
|
+
to_json=True,
|
|
179
|
+
skip_markers=["[DONE]"],
|
|
180
|
+
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("delta", {}).get("content") if isinstance(chunk, dict) else None,
|
|
181
|
+
yield_raw_on_error=False
|
|
182
|
+
)
|
|
183
|
+
for content_chunk in processed_stream:
|
|
184
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
185
|
+
streaming_text += content_chunk
|
|
186
|
+
resp = dict(text=content_chunk)
|
|
187
|
+
yield resp if not raw else content_chunk
|
|
188
|
+
except Exception as e:
|
|
189
|
+
raise exceptions.FailedToGenerateResponseError(f"Streaming request failed: {e}") from e
|
|
190
|
+
finally:
|
|
191
|
+
if streaming_text:
|
|
192
|
+
self.last_response = {"text": streaming_text}
|
|
193
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
194
|
+
|
|
195
|
+
def for_non_stream():
|
|
196
|
+
try:
|
|
197
|
+
response = self._post_with_failover(payload, stream=False, timeout=self.timeout)
|
|
198
|
+
response.raise_for_status()
|
|
199
|
+
response_text = response.text
|
|
200
|
+
processed_stream = sanitize_stream(
|
|
201
|
+
data=response_text,
|
|
202
|
+
to_json=True,
|
|
203
|
+
intro_value=None,
|
|
204
|
+
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
|
|
205
|
+
yield_raw_on_error=False
|
|
206
|
+
)
|
|
207
|
+
content = next(processed_stream, None)
|
|
208
|
+
content = content if isinstance(content, str) else ""
|
|
209
|
+
self.last_response = {"text": content}
|
|
210
|
+
self.conversation.update_chat_history(prompt, content)
|
|
211
|
+
return self.last_response if not raw else content
|
|
212
|
+
except Exception as e:
|
|
213
|
+
raise exceptions.FailedToGenerateResponseError(f"Non-streaming request failed: {e}") from e
|
|
214
|
+
|
|
215
|
+
return for_stream() if stream else for_non_stream()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def chat(
|
|
219
|
+
self,
|
|
220
|
+
prompt: str,
|
|
221
|
+
stream: bool = False,
|
|
222
|
+
optimizer: str = None,
|
|
223
|
+
conversationally: bool = False,
|
|
224
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
225
|
+
"""Generate response `str`
|
|
226
|
+
Args:
|
|
227
|
+
prompt (str): Prompt to be send.
|
|
228
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
229
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
230
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
231
|
+
Returns:
|
|
232
|
+
str: Response generated
|
|
233
|
+
"""
|
|
234
|
+
def for_stream():
|
|
235
|
+
for response in self.ask(
|
|
236
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
237
|
+
):
|
|
238
|
+
yield self.get_message(response)
|
|
239
|
+
def for_non_stream():
|
|
240
|
+
return self.get_message(
|
|
241
|
+
self.ask(
|
|
242
|
+
prompt,
|
|
243
|
+
False,
|
|
244
|
+
optimizer=optimizer,
|
|
245
|
+
conversationally=conversationally,
|
|
246
|
+
)
|
|
247
|
+
)
|
|
248
|
+
return for_stream() if stream else for_non_stream()
|
|
249
|
+
|
|
250
|
+
def get_message(self, response: dict) -> str:
|
|
251
|
+
"""Retrieves message content from response, handling both streaming and non-streaming formats."""
|
|
252
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
253
|
+
# Streaming chunk: choices[0]["delta"]["content"]
|
|
254
|
+
if "choices" in response and response["choices"]:
|
|
255
|
+
choice = response["choices"][0]
|
|
256
|
+
if "delta" in choice and "content" in choice["delta"]:
|
|
257
|
+
return choice["delta"]["content"]
|
|
258
|
+
if "message" in choice and "content" in choice["message"]:
|
|
259
|
+
return choice["message"]["content"]
|
|
260
|
+
# Fallback for non-standard or legacy responses
|
|
261
|
+
if "text" in response:
|
|
262
|
+
return response["text"]
|
|
263
|
+
return ""
|
|
264
|
+
|
|
265
|
+
# def fetch_available_models(self):
|
|
266
|
+
# """Fetches available models from the /models endpoint of all API endpoints and prints models per endpoint."""
|
|
267
|
+
# endpoints = self.api_endpoints.copy()
|
|
268
|
+
# random.shuffle(endpoints)
|
|
269
|
+
# results = {}
|
|
270
|
+
# errors = []
|
|
271
|
+
# for endpoint in endpoints:
|
|
272
|
+
# models_url = endpoint.replace('/v1/chat/completions', '/v1/models')
|
|
273
|
+
# try:
|
|
274
|
+
# response = self.session.get(models_url, timeout=self.timeout)
|
|
275
|
+
# if response.ok:
|
|
276
|
+
# data = response.json()
|
|
277
|
+
# if isinstance(data, dict) and "data" in data:
|
|
278
|
+
# models = [m["id"] if isinstance(m, dict) and "id" in m else m for m in data["data"]]
|
|
279
|
+
# elif isinstance(data, list):
|
|
280
|
+
# models = data
|
|
281
|
+
# else:
|
|
282
|
+
# models = list(data.keys()) if isinstance(data, dict) else []
|
|
283
|
+
# results[models_url] = models
|
|
284
|
+
# else:
|
|
285
|
+
# errors.append(f"Failed to fetch models from {models_url}: {response.status_code} {response.text}")
|
|
286
|
+
# except Exception as e:
|
|
287
|
+
# errors.append(f"Error fetching from {models_url}: {e}")
|
|
288
|
+
# if results:
|
|
289
|
+
# for url, models in results.items():
|
|
290
|
+
# print(f"Models from {url}:")
|
|
291
|
+
# if models:
|
|
292
|
+
# for m in sorted(models):
|
|
293
|
+
# print(f" {m}")
|
|
294
|
+
# else:
|
|
295
|
+
# print(" No models found.")
|
|
296
|
+
# return results
|
|
297
|
+
# else:
|
|
298
|
+
# print("No models found from any endpoint.")
|
|
299
|
+
# for err in errors:
|
|
300
|
+
# print(err)
|
|
301
|
+
# return {}
|
|
302
|
+
|
|
303
|
+
if __name__ == "__main__":
|
|
304
|
+
from rich import print
|
|
305
|
+
chatbot = oivscode()
|
|
306
|
+
print(chatbot.fetch_available_models())
|
|
307
|
+
response = chatbot.chat(input(">>> "), stream=True)
|
|
308
|
+
for chunk in response:
|
|
309
|
+
print(chunk, end="", flush=True)
|