webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/Blackboxai.py
CHANGED
|
@@ -1,33 +1,277 @@
|
|
|
1
1
|
import requests
|
|
2
|
-
import
|
|
3
|
-
|
|
2
|
+
import random
|
|
3
|
+
import string
|
|
4
|
+
import base64
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from typing import Any, Dict, Union, Generator, List
|
|
4
7
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
5
8
|
from webscout.AIbase import Provider
|
|
6
9
|
from webscout import exceptions
|
|
7
|
-
from webscout.
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
def to_data_uri(image_data):
|
|
12
|
+
"""Convert image data to a data URI format"""
|
|
13
|
+
if isinstance(image_data, str):
|
|
14
|
+
# Assume it's already a data URI
|
|
15
|
+
return image_data
|
|
16
|
+
|
|
17
|
+
# Encode binary data to base64
|
|
18
|
+
encoded = base64.b64encode(image_data).decode('utf-8')
|
|
19
|
+
|
|
20
|
+
# Determine MIME type (simplified)
|
|
21
|
+
mime_type = "image/jpeg" # Default
|
|
22
|
+
if image_data.startswith(b'\x89PNG'):
|
|
23
|
+
mime_type = "image/png"
|
|
24
|
+
elif image_data.startswith(b'\xff\xd8'):
|
|
25
|
+
mime_type = "image/jpeg"
|
|
26
|
+
elif image_data.startswith(b'GIF'):
|
|
27
|
+
mime_type = "image/gif"
|
|
28
|
+
|
|
29
|
+
return f"data:{mime_type};base64,{encoded}"
|
|
30
|
+
|
|
8
31
|
|
|
9
32
|
class BLACKBOXAI(Provider):
|
|
10
33
|
"""
|
|
11
34
|
BlackboxAI provider for interacting with the Blackbox API.
|
|
12
35
|
Supports synchronous operations with multiple models.
|
|
13
36
|
"""
|
|
14
|
-
url = "https://
|
|
15
|
-
api_endpoint = "https://
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
37
|
+
url = "https://www.blackbox.ai"
|
|
38
|
+
api_endpoint = "https://www.blackbox.ai/api/chat"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# Default model (remains the same as per original class)
|
|
42
|
+
default_model = "GPT-4.1"
|
|
43
|
+
default_vision_model = default_model
|
|
44
|
+
|
|
45
|
+
# New OpenRouter models list
|
|
46
|
+
openrouter_models = [
|
|
47
|
+
"Deepcoder 14B Preview",
|
|
48
|
+
"DeepHermes 3 Llama 3 8B Preview",
|
|
49
|
+
"DeepSeek R1 Zero",
|
|
50
|
+
"Dolphin3.0 Mistral 24B",
|
|
51
|
+
"Dolphin3.0 R1 Mistral 24B",
|
|
52
|
+
"Flash 3",
|
|
53
|
+
"Gemini 2.0 Flash Experimental",
|
|
54
|
+
"Gemma 2 9B",
|
|
55
|
+
"Gemma 3 12B",
|
|
56
|
+
"Gemma 3 1B",
|
|
57
|
+
"Gemma 3 27B",
|
|
58
|
+
"Gemma 3 4B",
|
|
59
|
+
"Kimi VL A3B Thinking",
|
|
60
|
+
"Llama 3.1 8B Instruct",
|
|
61
|
+
"Llama 3.1 Nemotron Ultra 253B v1",
|
|
62
|
+
"Llama 3.2 11B Vision Instruct",
|
|
63
|
+
"Llama 3.2 1B Instruct",
|
|
64
|
+
"Llama 3.2 3B Instruct",
|
|
65
|
+
"Llama 3.3 70B Instruct",
|
|
66
|
+
"Llama 3.3 Nemotron Super 49B v1",
|
|
67
|
+
"Llama 4 Maverick",
|
|
68
|
+
"Llama 4 Scout",
|
|
69
|
+
"Mistral 7B Instruct",
|
|
70
|
+
"Mistral Nemo",
|
|
71
|
+
"Mistral Small 3",
|
|
72
|
+
"Mistral Small 3.1 24B",
|
|
73
|
+
"Molmo 7B D",
|
|
74
|
+
"Moonlight 16B A3B Instruct",
|
|
75
|
+
"Qwen2.5 72B Instruct",
|
|
76
|
+
"Qwen2.5 7B Instruct",
|
|
77
|
+
"Qwen2.5 Coder 32B Instruct",
|
|
78
|
+
"Qwen2.5 VL 32B Instruct",
|
|
79
|
+
"Qwen2.5 VL 3B Instruct",
|
|
80
|
+
"Qwen2.5 VL 72B Instruct",
|
|
81
|
+
"Qwen2.5-VL 7B Instruct",
|
|
82
|
+
"Qwerky 72B",
|
|
83
|
+
"QwQ 32B",
|
|
84
|
+
"QwQ 32B Preview",
|
|
85
|
+
"QwQ 32B RpR v1",
|
|
86
|
+
"R1",
|
|
87
|
+
"R1 Distill Llama 70B",
|
|
88
|
+
"R1 Distill Qwen 14B",
|
|
89
|
+
"R1 Distill Qwen 32B",
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
# New base models list
|
|
93
|
+
models = [
|
|
94
|
+
default_model,
|
|
95
|
+
"o3-mini",
|
|
96
|
+
"gpt-4.1-nano",
|
|
97
|
+
"Claude-sonnet-3.7",
|
|
98
|
+
"Claude-sonnet-3.5",
|
|
99
|
+
"DeepSeek-R1",
|
|
100
|
+
"Mistral-Small-24B-Instruct-2501",
|
|
101
|
+
*openrouter_models,
|
|
102
|
+
# Trending agent modes (names)
|
|
103
|
+
'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
|
|
104
|
+
'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
|
|
105
|
+
'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
|
|
106
|
+
'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
|
|
107
|
+
'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
|
|
108
|
+
'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
# Models that support vision capabilities
|
|
112
|
+
vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct"] # Added Llama vision
|
|
113
|
+
|
|
114
|
+
# Models that can be directly selected by users
|
|
115
|
+
userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
|
|
116
|
+
|
|
117
|
+
# Agent mode configurations
|
|
118
|
+
agentMode = {
|
|
119
|
+
# OpenRouter Free
|
|
120
|
+
'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
|
|
121
|
+
'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
|
|
122
|
+
'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
|
|
123
|
+
'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
|
|
124
|
+
'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
|
|
125
|
+
'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
|
|
126
|
+
'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
|
|
127
|
+
'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
|
|
128
|
+
'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
|
|
129
|
+
'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
|
|
130
|
+
'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
|
|
131
|
+
'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
|
|
132
|
+
'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
|
|
133
|
+
'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
|
|
134
|
+
'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
|
|
135
|
+
'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
|
|
136
|
+
'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
|
|
137
|
+
'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
|
|
138
|
+
'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
|
|
139
|
+
'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
|
|
140
|
+
'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
|
|
141
|
+
'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
|
|
142
|
+
'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
|
|
143
|
+
'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
|
|
144
|
+
'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
|
|
145
|
+
'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
|
|
146
|
+
'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
|
|
147
|
+
'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
|
|
148
|
+
'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
|
|
149
|
+
'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
|
|
150
|
+
'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
|
|
151
|
+
'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
|
|
152
|
+
'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
|
|
153
|
+
'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
|
|
154
|
+
'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
|
|
155
|
+
'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
|
|
156
|
+
'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
|
|
157
|
+
'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
|
|
158
|
+
'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
|
|
159
|
+
'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
|
|
160
|
+
'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
|
|
161
|
+
'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
|
|
162
|
+
'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
|
|
163
|
+
# Default models from the new list
|
|
164
|
+
'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
|
|
165
|
+
'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
|
|
166
|
+
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
|
|
167
|
+
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
|
|
168
|
+
# Add default_model if it's not covered and has an agent mode
|
|
169
|
+
default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
|
|
170
|
+
'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
|
|
171
|
+
'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
# Trending agent modes
|
|
175
|
+
trendingAgentMode = {
|
|
176
|
+
'Python Agent': {'mode': True, 'id': "python"},
|
|
177
|
+
'HTML Agent': {'mode': True, 'id': "html"},
|
|
178
|
+
'Builder Agent': {'mode': True, 'id': "builder"},
|
|
179
|
+
'Java Agent': {'mode': True, 'id': "java"},
|
|
180
|
+
'JavaScript Agent': {'mode': True, 'id': "javascript"},
|
|
181
|
+
'React Agent': {'mode': True, 'id': "react"},
|
|
182
|
+
'Android Agent': {'mode': True, 'id': "android"},
|
|
183
|
+
'Flutter Agent': {'mode': True, 'id': "flutter"},
|
|
184
|
+
'Next.js Agent': {'mode': True, 'id': "next.js"},
|
|
185
|
+
'AngularJS Agent': {'mode': True, 'id': "angularjs"},
|
|
186
|
+
'Swift Agent': {'mode': True, 'id': "swift"},
|
|
187
|
+
'MongoDB Agent': {'mode': True, 'id': "mongodb"},
|
|
188
|
+
'PyTorch Agent': {'mode': True, 'id': "pytorch"},
|
|
189
|
+
'Xcode Agent': {'mode': True, 'id': "xcode"},
|
|
190
|
+
'Azure Agent': {'mode': True, 'id': "azure"},
|
|
191
|
+
'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
|
|
192
|
+
'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
|
|
193
|
+
'Docker Agent': {'mode': True, 'id': "docker"},
|
|
194
|
+
'Electron Agent': {'mode': True, 'id': "electron"},
|
|
195
|
+
'Erlang Agent': {'mode': True, 'id': "erlang"},
|
|
196
|
+
'FastAPI Agent': {'mode': True, 'id': "fastapi"},
|
|
197
|
+
'Firebase Agent': {'mode': True, 'id': "firebase"},
|
|
198
|
+
'Flask Agent': {'mode': True, 'id': "flask"},
|
|
199
|
+
'Git Agent': {'mode': True, 'id': "git"},
|
|
200
|
+
'Gitlab Agent': {'mode': True, 'id': "gitlab"},
|
|
201
|
+
'Go Agent': {'mode': True, 'id': "go"},
|
|
202
|
+
'Godot Agent': {'mode': True, 'id': "godot"},
|
|
203
|
+
'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
|
|
204
|
+
'Heroku Agent': {'mode': True, 'id': "heroku"},
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
# Complete list of all models (for authorized users) - used for AVAILABLE_MODELS
|
|
208
|
+
_all_models = list(dict.fromkeys([
|
|
209
|
+
*models, # Includes default_model, o3-mini, etc., and openrouter_models and agent names
|
|
210
|
+
*list(agentMode.keys()), # Ensure all agentMode keys are included
|
|
211
|
+
*list(trendingAgentMode.keys()) # Ensure all trendingAgentMode keys are included
|
|
212
|
+
]))
|
|
213
|
+
|
|
214
|
+
AVAILABLE_MODELS = {name: name for name in _all_models}
|
|
215
|
+
# Update AVAILABLE_MODELS to use names from agentMode if available
|
|
216
|
+
for model_name_key in agentMode:
|
|
217
|
+
if model_name_key in AVAILABLE_MODELS: # Check if the key from agentMode is in _all_models
|
|
218
|
+
AVAILABLE_MODELS[model_name_key] = agentMode[model_name_key].get('name', model_name_key)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
# Model aliases for easier reference
|
|
222
|
+
model_aliases = {
|
|
223
|
+
"gpt-4": default_model, # default_model is "GPT-4.1"
|
|
224
|
+
"gpt-4.1": default_model,
|
|
225
|
+
"gpt-4o": default_model, # Defaulting to GPT-4.1 as per previous logic if specific GPT-4o handling isn't defined elsewhere
|
|
226
|
+
"gpt-4o-mini": default_model, # Defaulting
|
|
227
|
+
"claude-3.7-sonnet": "Claude-sonnet-3.7",
|
|
228
|
+
"claude-3.5-sonnet": "Claude-sonnet-3.5",
|
|
229
|
+
# "deepseek-r1": "DeepSeek-R1", # This is in base models, maps to R1 or DeepSeek R1 Zero in agentMode
|
|
230
|
+
#
|
|
231
|
+
"deepcoder-14b": "Deepcoder 14B Preview",
|
|
232
|
+
"deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
|
|
233
|
+
"deepseek-r1-zero": "DeepSeek R1 Zero",
|
|
234
|
+
"deepseek-r1": "R1", # Alias for R1 (which is deepseek/deepseek-r1:free)
|
|
235
|
+
"dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
|
|
236
|
+
"dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
|
|
237
|
+
"reka-flash": "Flash 3",
|
|
238
|
+
"gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
|
|
239
|
+
"gemma-2-9b": "Gemma 2 9B",
|
|
240
|
+
"gemma-3-12b": "Gemma 3 12B",
|
|
241
|
+
"gemma-3-1b": "Gemma 3 1B",
|
|
242
|
+
"gemma-3-27b": "Gemma 3 27B",
|
|
243
|
+
"gemma-3-4b": "Gemma 3 4B",
|
|
244
|
+
"kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
|
|
245
|
+
"llama-3.1-8b": "Llama 3.1 8B Instruct",
|
|
246
|
+
"nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
|
|
247
|
+
"llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
|
|
248
|
+
"llama-3.2-1b": "Llama 3.2 1B Instruct",
|
|
249
|
+
"llama-3.2-3b": "Llama 3.2 3B Instruct",
|
|
250
|
+
"llama-3.3-70b": "Llama 3.3 70B Instruct",
|
|
251
|
+
"nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
|
|
252
|
+
"llama-4-maverick": "Llama 4 Maverick",
|
|
253
|
+
"llama-4-scout": "Llama 4 Scout",
|
|
254
|
+
"mistral-7b": "Mistral 7B Instruct",
|
|
255
|
+
"mistral-nemo": "Mistral Nemo",
|
|
256
|
+
"mistral-small-24b": "Mistral Small 3", # Alias for "Mistral Small 3"
|
|
257
|
+
"mistral-small-24b-instruct-2501": "Mistral-Small-24B-Instruct-2501", # Specific name
|
|
258
|
+
"mistral-small-3.1-24b": "Mistral Small 3.1 24B",
|
|
259
|
+
"molmo-7b": "Molmo 7B D",
|
|
260
|
+
"moonlight-16b": "Moonlight 16B A3B Instruct",
|
|
261
|
+
"qwen-2.5-72b": "Qwen2.5 72B Instruct",
|
|
262
|
+
"qwen-2.5-7b": "Qwen2.5 7B Instruct",
|
|
263
|
+
"qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
|
|
264
|
+
"qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
|
|
265
|
+
"qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
|
|
266
|
+
"qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
|
|
267
|
+
"qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
|
|
268
|
+
"qwerky-72b": "Qwerky 72B",
|
|
269
|
+
"qwq-32b": "QwQ 32B",
|
|
270
|
+
"qwq-32b-preview": "QwQ 32B Preview",
|
|
271
|
+
"qwq-32b-arliai": "QwQ 32B RpR v1",
|
|
272
|
+
"deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
|
|
273
|
+
"deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
|
|
274
|
+
"deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
|
|
31
275
|
}
|
|
32
276
|
|
|
33
277
|
def __init__(
|
|
@@ -41,17 +285,10 @@ class BLACKBOXAI(Provider):
|
|
|
41
285
|
proxies: dict = {},
|
|
42
286
|
history_offset: int = 10250,
|
|
43
287
|
act: str = None,
|
|
44
|
-
model: str = "
|
|
45
|
-
logging: bool = False,
|
|
288
|
+
model: str = "gpt-4.1",
|
|
46
289
|
system_message: str = "You are a helpful AI assistant."
|
|
47
290
|
):
|
|
48
291
|
"""Initialize BlackboxAI with enhanced configuration options."""
|
|
49
|
-
self.logger = Logger(
|
|
50
|
-
name="BlackboxAI",
|
|
51
|
-
format=LogFormat.MODERN_EMOJI,
|
|
52
|
-
|
|
53
|
-
) if logging else None
|
|
54
|
-
|
|
55
292
|
self.session = requests.Session()
|
|
56
293
|
self.max_tokens_to_sample = max_tokens
|
|
57
294
|
self.is_conversation = is_conversation
|
|
@@ -64,9 +301,11 @@ class BLACKBOXAI(Provider):
|
|
|
64
301
|
"Content-Type": "application/json",
|
|
65
302
|
"Accept": "*/*",
|
|
66
303
|
}
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
304
|
+
self.cookies = {
|
|
305
|
+
'cfzs_amplitude': self.generate_id(32),
|
|
306
|
+
'cfz_amplitude': self.generate_id(32),
|
|
307
|
+
'__cf_bm': self.generate_id(32),
|
|
308
|
+
}
|
|
70
309
|
|
|
71
310
|
self.__available_optimizers = (
|
|
72
311
|
method for method in dir(Optimizers)
|
|
@@ -80,7 +319,7 @@ class BLACKBOXAI(Provider):
|
|
|
80
319
|
if act
|
|
81
320
|
else intro or Conversation.intro
|
|
82
321
|
)
|
|
83
|
-
|
|
322
|
+
|
|
84
323
|
self.conversation = Conversation(
|
|
85
324
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
86
325
|
)
|
|
@@ -88,77 +327,250 @@ class BLACKBOXAI(Provider):
|
|
|
88
327
|
self.session.proxies = proxies
|
|
89
328
|
|
|
90
329
|
@classmethod
|
|
91
|
-
def get_model(
|
|
330
|
+
def get_model(cls, model: str) -> str:
|
|
92
331
|
"""Resolve model name from alias"""
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
332
|
+
# Convert to lowercase for case-insensitive matching
|
|
333
|
+
model_lower = model.lower()
|
|
334
|
+
|
|
335
|
+
# Check aliases (case-insensitive)
|
|
336
|
+
for alias, target in cls.model_aliases.items():
|
|
337
|
+
if model_lower == alias.lower():
|
|
338
|
+
model = target
|
|
339
|
+
break
|
|
340
|
+
|
|
341
|
+
# Check available models (case-insensitive)
|
|
342
|
+
for available_model, target in cls.AVAILABLE_MODELS.items():
|
|
343
|
+
if model_lower == available_model.lower() or model == target:
|
|
344
|
+
return target
|
|
345
|
+
|
|
346
|
+
# If we get here, the model wasn't found
|
|
347
|
+
raise ValueError(f"Unknown model: {model}. Available models: {', '.join(cls.AVAILABLE_MODELS)}")
|
|
348
|
+
|
|
349
|
+
@classmethod
|
|
350
|
+
def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
|
|
351
|
+
"""
|
|
352
|
+
Generate a dynamic session with proper ID and expiry format using a specific email.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
email: The email to use for this session
|
|
356
|
+
id_length: Length of the numeric ID (default: 21)
|
|
357
|
+
days_ahead: Number of days ahead for expiry (default: 30)
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
dict: A session dictionary with user information and expiry
|
|
361
|
+
"""
|
|
362
|
+
# Generate a random name
|
|
363
|
+
first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
|
|
364
|
+
last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
|
|
365
|
+
name = f"{random.choice(first_names)} {random.choice(last_names)}"
|
|
366
|
+
|
|
367
|
+
# Generate numeric ID - using Google-like ID format
|
|
368
|
+
numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
|
|
369
|
+
|
|
370
|
+
# Generate future expiry date
|
|
371
|
+
future_date = datetime.now() + timedelta(days=days_ahead)
|
|
372
|
+
expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
|
|
373
|
+
|
|
374
|
+
# Generate random image ID for the new URL format
|
|
375
|
+
chars = string.ascii_letters + string.digits + "-"
|
|
376
|
+
random_img_id = ''.join(random.choice(chars) for _ in range(48))
|
|
377
|
+
image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
|
|
378
|
+
|
|
379
|
+
return {
|
|
380
|
+
"user": {
|
|
381
|
+
"name": name,
|
|
382
|
+
"email": email,
|
|
383
|
+
"image": image_url,
|
|
384
|
+
"id": numeric_id
|
|
385
|
+
},
|
|
386
|
+
"expires": expiry,
|
|
387
|
+
"isNewUser": False
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
@classmethod
|
|
391
|
+
def generate_id(cls, length: int = 7) -> str:
|
|
392
|
+
"""Generate a random ID of specified length"""
|
|
393
|
+
chars = string.ascii_letters + string.digits
|
|
394
|
+
return ''.join(random.choice(chars) for _ in range(length))
|
|
96
395
|
|
|
97
396
|
def _make_request(
|
|
98
397
|
self,
|
|
99
398
|
messages: List[Dict[str, str]],
|
|
100
|
-
stream: bool = False
|
|
399
|
+
stream: bool = False,
|
|
400
|
+
temperature: float = None,
|
|
401
|
+
top_p: float = None,
|
|
402
|
+
max_tokens: int = None,
|
|
403
|
+
media: List = None
|
|
101
404
|
) -> Generator[str, None, None]:
|
|
102
405
|
"""Make synchronous request to BlackboxAI API."""
|
|
103
|
-
|
|
104
|
-
|
|
406
|
+
# Generate a chat ID for this conversation
|
|
407
|
+
chat_id = self.generate_id()
|
|
408
|
+
|
|
409
|
+
# Format messages for the API
|
|
410
|
+
current_messages = []
|
|
411
|
+
for i, msg in enumerate(messages):
|
|
412
|
+
msg_id = chat_id if i == 0 and msg["role"] == "user" else self.generate_id()
|
|
413
|
+
current_msg = {
|
|
414
|
+
"id": msg_id,
|
|
415
|
+
"content": msg["content"],
|
|
416
|
+
"role": msg["role"]
|
|
417
|
+
}
|
|
418
|
+
current_messages.append(current_msg)
|
|
419
|
+
|
|
420
|
+
# Add image data if provided
|
|
421
|
+
if media:
|
|
422
|
+
current_messages[-1]['data'] = {
|
|
423
|
+
"imagesData": [
|
|
424
|
+
{
|
|
425
|
+
"filePath": f"/{image_name}",
|
|
426
|
+
"contents": to_data_uri(image)
|
|
427
|
+
} for image, image_name in media
|
|
428
|
+
],
|
|
429
|
+
"fileText": "",
|
|
430
|
+
"title": ""
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
# Generate a random email for the session
|
|
434
|
+
chars = string.ascii_lowercase + string.digits
|
|
435
|
+
random_team = ''.join(random.choice(chars) for _ in range(8))
|
|
436
|
+
request_email = f"{random_team}@blackbox.ai"
|
|
437
|
+
|
|
438
|
+
# Generate a session with the email
|
|
439
|
+
session_data = self.generate_session(request_email)
|
|
105
440
|
|
|
441
|
+
# Prepare the request data based on the working example
|
|
106
442
|
data = {
|
|
107
|
-
"messages":
|
|
108
|
-
"
|
|
109
|
-
"
|
|
443
|
+
"messages": current_messages,
|
|
444
|
+
"agentMode": self.agentMode.get(self.model, {}) if self.model in self.agentMode else {},
|
|
445
|
+
"id": chat_id,
|
|
446
|
+
"previewToken": None,
|
|
447
|
+
"userId": None,
|
|
448
|
+
"codeModelMode": True,
|
|
449
|
+
"trendingAgentMode": {},
|
|
450
|
+
"isMicMode": False,
|
|
451
|
+
"userSystemPrompt": self.system_message,
|
|
452
|
+
"maxTokens": max_tokens or self.max_tokens_to_sample,
|
|
453
|
+
"playgroundTopP": top_p,
|
|
454
|
+
"playgroundTemperature": temperature,
|
|
455
|
+
"isChromeExt": False,
|
|
456
|
+
"githubToken": "",
|
|
457
|
+
"clickedAnswer2": False,
|
|
458
|
+
"clickedAnswer3": False,
|
|
459
|
+
"clickedForceWebSearch": False,
|
|
460
|
+
"visitFromDelta": False,
|
|
461
|
+
"isMemoryEnabled": False,
|
|
462
|
+
"mobileClient": False,
|
|
463
|
+
"userSelectedModel": self.model if self.model in self.userSelectedModel else None,
|
|
464
|
+
"validated": "00f37b34-a166-4efb-bce5-1312d87f2f94", # Using a fixed validated value from the example
|
|
465
|
+
"imageGenerationMode": False,
|
|
466
|
+
"webSearchModePrompt": False,
|
|
467
|
+
"deepSearchMode": False,
|
|
468
|
+
"designerMode": False,
|
|
469
|
+
"domains": None,
|
|
470
|
+
"vscodeClient": False,
|
|
471
|
+
"codeInterpreterMode": False,
|
|
472
|
+
"customProfile": {
|
|
473
|
+
"name": "",
|
|
474
|
+
"occupation": "",
|
|
475
|
+
"traits": [],
|
|
476
|
+
"additionalInfo": "",
|
|
477
|
+
"enableNewChats": False
|
|
478
|
+
},
|
|
479
|
+
"webSearchModeOption": {
|
|
480
|
+
"autoMode": True,
|
|
481
|
+
"webMode": False,
|
|
482
|
+
"offlineMode": False
|
|
483
|
+
},
|
|
484
|
+
"session": session_data,
|
|
485
|
+
"isPremium": True,
|
|
486
|
+
"subscriptionCache": {
|
|
487
|
+
"status": "PREMIUM",
|
|
488
|
+
"customerId": "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14)),
|
|
489
|
+
"expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
|
|
490
|
+
"lastChecked": int(datetime.now().timestamp() * 1000),
|
|
491
|
+
"isTrialSubscription": True
|
|
492
|
+
},
|
|
493
|
+
"beastMode": False,
|
|
494
|
+
"reasoningMode": False,
|
|
495
|
+
"designerMode": False,
|
|
496
|
+
"workspaceId": ""
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
# Use LitAgent to generate a realistic browser fingerprint for headers
|
|
500
|
+
agent = LitAgent()
|
|
501
|
+
fingerprint = agent.generate_fingerprint("chrome")
|
|
502
|
+
headers = {
|
|
503
|
+
'accept': fingerprint['accept'],
|
|
504
|
+
'accept-encoding': 'gzip, deflate, br, zstd',
|
|
505
|
+
'accept-language': fingerprint['accept_language'],
|
|
506
|
+
'content-type': 'application/json',
|
|
507
|
+
'origin': 'https://www.blackbox.ai',
|
|
508
|
+
'referer': 'https://www.blackbox.ai/',
|
|
509
|
+
'sec-ch-ua': fingerprint['sec_ch_ua'],
|
|
510
|
+
'sec-ch-ua-mobile': '?0',
|
|
511
|
+
'sec-ch-ua-platform': f'"{fingerprint["platform"]}"',
|
|
512
|
+
'sec-fetch-dest': 'empty',
|
|
513
|
+
'sec-fetch-mode': 'cors',
|
|
514
|
+
'sec-fetch-site': 'same-origin',
|
|
515
|
+
'user-agent': fingerprint['user_agent']
|
|
110
516
|
}
|
|
111
517
|
|
|
112
518
|
try:
|
|
113
519
|
response = self.session.post(
|
|
114
520
|
self.api_endpoint,
|
|
115
521
|
json=data,
|
|
116
|
-
headers=
|
|
522
|
+
headers=headers,
|
|
117
523
|
stream=stream,
|
|
118
524
|
timeout=self.timeout
|
|
119
525
|
)
|
|
120
|
-
|
|
526
|
+
|
|
121
527
|
if not response.ok:
|
|
122
528
|
error_msg = f"API request failed: {response.status_code} - {response.text}"
|
|
123
|
-
|
|
124
|
-
|
|
529
|
+
|
|
530
|
+
# Check for service suspension
|
|
531
|
+
if response.status_code == 503 and "service has been suspended" in response.text.lower():
|
|
532
|
+
error_msg = "BlackboxAI service has been suspended by its owner. Please try again later or use a different provider."
|
|
533
|
+
|
|
534
|
+
# Check for API endpoint issues
|
|
535
|
+
if response.status_code == 403 and "replace" in response.text.lower() and "api.blackbox.ai" in response.text:
|
|
536
|
+
error_msg = "BlackboxAI API endpoint issue. Please check the API endpoint configuration."
|
|
537
|
+
|
|
125
538
|
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
126
539
|
|
|
127
540
|
if stream:
|
|
128
541
|
for line in response.iter_lines(decode_unicode=True):
|
|
129
542
|
if line:
|
|
543
|
+
if "You have reached your request limit for the hour" in line:
|
|
544
|
+
raise exceptions.RateLimitError("Rate limit exceeded")
|
|
130
545
|
yield line
|
|
131
546
|
else:
|
|
132
|
-
|
|
547
|
+
response_text = response.text
|
|
548
|
+
if "You have reached your request limit for the hour" in response_text:
|
|
549
|
+
raise exceptions.RateLimitError("Rate limit exceeded")
|
|
550
|
+
yield response_text
|
|
133
551
|
|
|
134
552
|
except requests.exceptions.RequestException as e:
|
|
135
|
-
if self.logger:
|
|
136
|
-
self.logger.error(f"Request failed: {str(e)}")
|
|
137
553
|
raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
|
|
138
554
|
|
|
139
555
|
def ask(
|
|
140
556
|
self,
|
|
141
557
|
prompt: str,
|
|
142
558
|
stream: bool = False,
|
|
143
|
-
|
|
559
|
+
temperature: float = None,
|
|
560
|
+
top_p: float = None,
|
|
561
|
+
max_tokens: int = None,
|
|
144
562
|
optimizer: str = None,
|
|
145
563
|
conversationally: bool = False,
|
|
564
|
+
media: List = None
|
|
146
565
|
) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
|
|
147
566
|
"""Send a prompt to BlackboxAI API and return the response."""
|
|
148
|
-
if self.logger:
|
|
149
|
-
self.logger.debug(f"Processing request [stream={stream}]")
|
|
150
|
-
|
|
151
567
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
152
568
|
if optimizer:
|
|
153
569
|
if optimizer in self.__available_optimizers:
|
|
154
570
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
155
571
|
conversation_prompt if conversationally else prompt
|
|
156
572
|
)
|
|
157
|
-
if self.logger:
|
|
158
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
159
573
|
else:
|
|
160
|
-
if self.logger:
|
|
161
|
-
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
162
574
|
raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
|
|
163
575
|
|
|
164
576
|
messages = [
|
|
@@ -167,11 +579,25 @@ class BLACKBOXAI(Provider):
|
|
|
167
579
|
]
|
|
168
580
|
|
|
169
581
|
def for_stream():
|
|
170
|
-
for text in self._make_request(
|
|
582
|
+
for text in self._make_request(
|
|
583
|
+
messages,
|
|
584
|
+
stream=True,
|
|
585
|
+
temperature=temperature,
|
|
586
|
+
top_p=top_p,
|
|
587
|
+
max_tokens=max_tokens,
|
|
588
|
+
media=media
|
|
589
|
+
):
|
|
171
590
|
yield {"text": text}
|
|
172
591
|
|
|
173
592
|
def for_non_stream():
|
|
174
|
-
response_text = next(self._make_request(
|
|
593
|
+
response_text = next(self._make_request(
|
|
594
|
+
messages,
|
|
595
|
+
stream=False,
|
|
596
|
+
temperature=temperature,
|
|
597
|
+
top_p=top_p,
|
|
598
|
+
max_tokens=max_tokens,
|
|
599
|
+
media=media
|
|
600
|
+
))
|
|
175
601
|
self.last_response = {"text": response_text}
|
|
176
602
|
return self.last_response
|
|
177
603
|
|
|
@@ -181,19 +607,25 @@ class BLACKBOXAI(Provider):
|
|
|
181
607
|
self,
|
|
182
608
|
prompt: str,
|
|
183
609
|
stream: bool = False,
|
|
610
|
+
temperature: float = None,
|
|
611
|
+
top_p: float = None,
|
|
612
|
+
max_tokens: int = None,
|
|
184
613
|
optimizer: str = None,
|
|
185
614
|
conversationally: bool = False,
|
|
615
|
+
media: List = None
|
|
186
616
|
) -> Union[str, Generator[str, None, None]]:
|
|
187
617
|
"""Generate response as string."""
|
|
188
|
-
if self.logger:
|
|
189
|
-
self.logger.debug(f"Chat request initiated [stream={stream}]")
|
|
190
618
|
|
|
191
619
|
def for_stream():
|
|
192
620
|
for response in self.ask(
|
|
193
621
|
prompt,
|
|
194
622
|
stream=True,
|
|
623
|
+
temperature=temperature,
|
|
624
|
+
top_p=top_p,
|
|
625
|
+
max_tokens=max_tokens,
|
|
195
626
|
optimizer=optimizer,
|
|
196
|
-
conversationally=conversationally
|
|
627
|
+
conversationally=conversationally,
|
|
628
|
+
media=media
|
|
197
629
|
):
|
|
198
630
|
yield self.get_message(response)
|
|
199
631
|
|
|
@@ -202,8 +634,12 @@ class BLACKBOXAI(Provider):
|
|
|
202
634
|
self.ask(
|
|
203
635
|
prompt,
|
|
204
636
|
stream=False,
|
|
637
|
+
temperature=temperature,
|
|
638
|
+
top_p=top_p,
|
|
639
|
+
max_tokens=max_tokens,
|
|
205
640
|
optimizer=optimizer,
|
|
206
641
|
conversationally=conversationally,
|
|
642
|
+
media=media
|
|
207
643
|
)
|
|
208
644
|
)
|
|
209
645
|
|
|
@@ -215,15 +651,23 @@ class BLACKBOXAI(Provider):
|
|
|
215
651
|
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
216
652
|
|
|
217
653
|
if __name__ == "__main__":
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
654
|
+
print("-" * 80)
|
|
655
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
656
|
+
print("-" * 80)
|
|
657
|
+
|
|
658
|
+
for model in BLACKBOXAI.AVAILABLE_MODELS:
|
|
659
|
+
try:
|
|
660
|
+
test_ai = BLACKBOXAI(model=model, timeout=60)
|
|
661
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
662
|
+
response_text = response
|
|
663
|
+
|
|
664
|
+
if response_text and len(response_text.strip()) > 0:
|
|
665
|
+
status = "✓"
|
|
666
|
+
# Truncate response if too long
|
|
667
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
668
|
+
else:
|
|
669
|
+
status = "✗"
|
|
670
|
+
display_text = "Empty or invalid response"
|
|
671
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
672
|
+
except Exception as e:
|
|
673
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|