webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +146 -37
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +30 -29
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +3 -1
- webscout/Provider/OPENAI/autoproxy.py +752 -17
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +128 -104
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
webscout/Provider/Writecream.py
DELETED
|
@@ -1,246 +0,0 @@
|
|
|
1
|
-
from curl_cffi import CurlError
|
|
2
|
-
from curl_cffi.requests import Session # Keep Session import
|
|
3
|
-
import json
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
from webscout.litagent import LitAgent
|
|
12
|
-
|
|
13
|
-
class Writecream(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the Writecream API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
AVAILABLE_MODELS = ["writecream-gpt"]
|
|
19
|
-
|
|
20
|
-
def __init__(
|
|
21
|
-
self,
|
|
22
|
-
is_conversation: bool = True,
|
|
23
|
-
max_tokens: int = 600,
|
|
24
|
-
timeout: int = 30,
|
|
25
|
-
intro: str = None,
|
|
26
|
-
filepath: str = None,
|
|
27
|
-
update_file: bool = True,
|
|
28
|
-
proxies: dict = {},
|
|
29
|
-
history_offset: int = 10250,
|
|
30
|
-
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
32
|
-
base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
|
|
33
|
-
referer: str = "https://www.writecream.com/chatgpt-chat/",
|
|
34
|
-
link: str = "writecream.com",
|
|
35
|
-
model: str = "writecream-gpt"
|
|
36
|
-
):
|
|
37
|
-
"""
|
|
38
|
-
Initializes the Writecream API with given parameters.
|
|
39
|
-
"""
|
|
40
|
-
if model not in self.AVAILABLE_MODELS:
|
|
41
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
42
|
-
|
|
43
|
-
# Initialize curl_cffi Session
|
|
44
|
-
self.session = Session()
|
|
45
|
-
self.is_conversation = is_conversation
|
|
46
|
-
self.max_tokens_to_sample = max_tokens
|
|
47
|
-
self.base_url = base_url
|
|
48
|
-
self.timeout = timeout
|
|
49
|
-
self.last_response = {}
|
|
50
|
-
self.system_prompt = system_prompt
|
|
51
|
-
self.model = model
|
|
52
|
-
# Initialize LitAgent
|
|
53
|
-
self.agent = LitAgent()
|
|
54
|
-
self.referer = referer
|
|
55
|
-
self.link = link
|
|
56
|
-
|
|
57
|
-
self.headers = {
|
|
58
|
-
# Use LitAgent for User-Agent
|
|
59
|
-
"User-Agent": self.agent.random(),
|
|
60
|
-
"Referer": self.referer
|
|
61
|
-
# Add other headers if needed by curl_cffi impersonation or API
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# Update curl_cffi session headers and proxies
|
|
71
|
-
self.session.headers.update(self.headers)
|
|
72
|
-
self.session.proxies.update(proxies)
|
|
73
|
-
|
|
74
|
-
Conversation.intro = (
|
|
75
|
-
AwesomePrompts().get_act(
|
|
76
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
-
)
|
|
78
|
-
if act
|
|
79
|
-
else intro or Conversation.intro
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
self.conversation = Conversation(
|
|
83
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
-
)
|
|
85
|
-
self.conversation.history_offset = history_offset
|
|
86
|
-
|
|
87
|
-
def ask(
|
|
88
|
-
self,
|
|
89
|
-
prompt: str,
|
|
90
|
-
stream: bool = False,
|
|
91
|
-
raw: bool = False,
|
|
92
|
-
optimizer: str = None,
|
|
93
|
-
conversationally: bool = False,
|
|
94
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
95
|
-
"""
|
|
96
|
-
Sends a message to the Writecream API and returns the response.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
prompt (str): Prompt to be sent.
|
|
100
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
103
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
-
|
|
105
|
-
Returns:
|
|
106
|
-
Union[Dict[str, Any], Generator]: Response from the API.
|
|
107
|
-
"""
|
|
108
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
-
if optimizer:
|
|
110
|
-
if optimizer in self.__available_optimizers:
|
|
111
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
112
|
-
conversation_prompt if conversationally else prompt
|
|
113
|
-
)
|
|
114
|
-
else:
|
|
115
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
116
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
final_query = [
|
|
120
|
-
{"role": "system", "content": self.system_prompt},
|
|
121
|
-
{"role": "user", "content": conversation_prompt}
|
|
122
|
-
]
|
|
123
|
-
|
|
124
|
-
params = {
|
|
125
|
-
"query": json.dumps(final_query),
|
|
126
|
-
"link": self.link
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
def for_non_stream():
|
|
130
|
-
try:
|
|
131
|
-
# Use curl_cffi session.get with impersonate
|
|
132
|
-
response = self.session.get(
|
|
133
|
-
self.base_url,
|
|
134
|
-
params=params,
|
|
135
|
-
timeout=self.timeout,
|
|
136
|
-
impersonate="chrome120" # Add impersonate
|
|
137
|
-
)
|
|
138
|
-
response.raise_for_status()
|
|
139
|
-
response_text = response.text # Get the raw text
|
|
140
|
-
|
|
141
|
-
# Use sanitize_stream to process the non-streaming text
|
|
142
|
-
# It will try to parse the whole text as JSON because to_json=True
|
|
143
|
-
processed_stream = sanitize_stream(
|
|
144
|
-
data=response_text,
|
|
145
|
-
to_json=True, # Attempt to parse the whole response text as JSON
|
|
146
|
-
intro_value=None, # No prefix expected on the full response
|
|
147
|
-
content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# Extract the single result from the generator
|
|
151
|
-
response_content = ""
|
|
152
|
-
for content in processed_stream:
|
|
153
|
-
response_content = content if isinstance(content, str) else ""
|
|
154
|
-
|
|
155
|
-
# Update conversation history
|
|
156
|
-
self.last_response = {"text": response_content}
|
|
157
|
-
self.conversation.update_chat_history(prompt, response_content)
|
|
158
|
-
|
|
159
|
-
return {"text": response_content}
|
|
160
|
-
except CurlError as e: # Catch CurlError
|
|
161
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
162
|
-
except Exception as e:
|
|
163
|
-
# Include original exception type
|
|
164
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
|
|
165
|
-
|
|
166
|
-
# Currently, Writecream API doesn't support streaming, so we always return non-streaming response
|
|
167
|
-
return for_non_stream()
|
|
168
|
-
|
|
169
|
-
def chat(
|
|
170
|
-
self,
|
|
171
|
-
prompt: str,
|
|
172
|
-
stream: bool = False,
|
|
173
|
-
optimizer: str = None,
|
|
174
|
-
conversationally: bool = False,
|
|
175
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
176
|
-
"""
|
|
177
|
-
Generates a response from the Writecream API.
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
prompt (str): Prompt to be sent.
|
|
181
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
182
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
183
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
184
|
-
|
|
185
|
-
Returns:
|
|
186
|
-
Union[str, Generator[str, None, None]]: Response from the API.
|
|
187
|
-
"""
|
|
188
|
-
def for_non_stream():
|
|
189
|
-
return self.get_message(
|
|
190
|
-
self.ask(
|
|
191
|
-
prompt,
|
|
192
|
-
stream=False,
|
|
193
|
-
optimizer=optimizer,
|
|
194
|
-
conversationally=conversationally,
|
|
195
|
-
)
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
if stream:
|
|
199
|
-
# For compatibility with AUTO streaming interface, yield a dict
|
|
200
|
-
response_dict = self.ask(
|
|
201
|
-
prompt,
|
|
202
|
-
stream=False,
|
|
203
|
-
optimizer=optimizer,
|
|
204
|
-
conversationally=conversationally,
|
|
205
|
-
)
|
|
206
|
-
yield response_dict
|
|
207
|
-
else:
|
|
208
|
-
return for_non_stream()
|
|
209
|
-
|
|
210
|
-
def get_message(self, response: dict) -> str:
|
|
211
|
-
"""
|
|
212
|
-
Retrieves message only from response.
|
|
213
|
-
|
|
214
|
-
Args:
|
|
215
|
-
response (dict): Response generated by `self.ask`
|
|
216
|
-
|
|
217
|
-
Returns:
|
|
218
|
-
str: Message extracted
|
|
219
|
-
"""
|
|
220
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
-
return response["text"]
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
if __name__ == "__main__":
|
|
225
|
-
# Ensure curl_cffi is installed
|
|
226
|
-
print("-" * 80)
|
|
227
|
-
print(f"{'Model':<30} {'Status':<10} {'Response'}")
|
|
228
|
-
print("-" * 80)
|
|
229
|
-
|
|
230
|
-
try:
|
|
231
|
-
test_api = Writecream(timeout=60)
|
|
232
|
-
prompt = "Say 'Hello' in one word"
|
|
233
|
-
response = test_api.chat(prompt)
|
|
234
|
-
|
|
235
|
-
if response and len(response.strip()) > 0:
|
|
236
|
-
status = "✓"
|
|
237
|
-
# Clean and truncate response
|
|
238
|
-
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
239
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
240
|
-
else:
|
|
241
|
-
status = "✗"
|
|
242
|
-
display_text = "Empty or invalid response"
|
|
243
|
-
|
|
244
|
-
print(f"{test_api.model:<30} {status:<10} {display_text}")
|
|
245
|
-
except Exception as e:
|
|
246
|
-
print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")
|
webscout/auth/static/favicon.svg
DELETED
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
/* Custom Swagger UI Favicon - SVG format */
|
|
2
|
-
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" width="32" height="32">
|
|
3
|
-
<defs>
|
|
4
|
-
<linearGradient id="grad1" x1="0%" y1="0%" x2="100%" y2="100%">
|
|
5
|
-
<stop offset="0%" style="stop-color:#6366f1;stop-opacity:1" />
|
|
6
|
-
<stop offset="100%" style="stop-color:#8b5cf6;stop-opacity:1" />
|
|
7
|
-
</linearGradient>
|
|
8
|
-
</defs>
|
|
9
|
-
<rect width="32" height="32" rx="8" fill="url(#grad1)"/>
|
|
10
|
-
<path d="M8 12h16M8 16h12M8 20h8" stroke="white" stroke-width="2" stroke-linecap="round"/>
|
|
11
|
-
</svg>
|
webscout/auth/swagger_ui.py
DELETED
|
@@ -1,203 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Custom Swagger UI implementation for Webscout FastAPI server.
|
|
3
|
-
Provides a modern, beautiful, and fully functional API documentation interface.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
import os
|
|
7
|
-
import logging
|
|
8
|
-
from fastapi import FastAPI, Request
|
|
9
|
-
from fastapi.responses import HTMLResponse
|
|
10
|
-
from fastapi.staticfiles import StaticFiles
|
|
11
|
-
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
|
|
12
|
-
|
|
13
|
-
class CustomSwaggerUI:
|
|
14
|
-
"""Custom Swagger UI handler for FastAPI applications."""
|
|
15
|
-
|
|
16
|
-
def __init__(self, app: FastAPI):
|
|
17
|
-
self.app = app
|
|
18
|
-
self.template_dir = os.path.join(os.path.dirname(__file__), "templates")
|
|
19
|
-
self.static_dir = os.path.join(os.path.dirname(__file__), "static")
|
|
20
|
-
self.template_static_dir = os.path.join(self.template_dir, "static")
|
|
21
|
-
|
|
22
|
-
# Initialize Jinja2 environment
|
|
23
|
-
self.jinja_env = Environment(
|
|
24
|
-
loader=FileSystemLoader(self.template_dir),
|
|
25
|
-
autoescape=True
|
|
26
|
-
)
|
|
27
|
-
|
|
28
|
-
# Mount static files - prioritize template static files
|
|
29
|
-
static_mounted = False
|
|
30
|
-
|
|
31
|
-
# First try to mount template static files (higher priority)
|
|
32
|
-
if os.path.exists(self.template_static_dir):
|
|
33
|
-
try:
|
|
34
|
-
app.mount("/static", StaticFiles(directory=self.template_static_dir), name="template_static")
|
|
35
|
-
static_mounted = True
|
|
36
|
-
logging.info(f"Mounted template static files from: {self.template_static_dir}")
|
|
37
|
-
except Exception as e:
|
|
38
|
-
logging.warning(f"Failed to mount template static files: {e}")
|
|
39
|
-
|
|
40
|
-
# Fallback to regular static directory if template static not available
|
|
41
|
-
if not static_mounted and os.path.exists(self.static_dir):
|
|
42
|
-
try:
|
|
43
|
-
app.mount("/static", StaticFiles(directory=self.static_dir), name="static")
|
|
44
|
-
logging.info(f"Mounted static files from: {self.static_dir}")
|
|
45
|
-
except Exception as e:
|
|
46
|
-
logging.warning(f"Failed to mount static files: {e}")
|
|
47
|
-
|
|
48
|
-
# Log static file status
|
|
49
|
-
if not static_mounted:
|
|
50
|
-
logging.warning("No static files mounted - CSS and JS may not load correctly")
|
|
51
|
-
|
|
52
|
-
self._setup_routes()
|
|
53
|
-
|
|
54
|
-
def _setup_routes(self):
|
|
55
|
-
"""Setup custom Swagger UI routes."""
|
|
56
|
-
|
|
57
|
-
@self.app.get("/docs", response_class=HTMLResponse, include_in_schema=False)
|
|
58
|
-
async def custom_swagger_ui(request: Request):
|
|
59
|
-
"""Serve the custom Swagger UI."""
|
|
60
|
-
return await self._render_swagger_ui(request)
|
|
61
|
-
|
|
62
|
-
@self.app.get("/swagger-ui", response_class=HTMLResponse, include_in_schema=False)
|
|
63
|
-
async def custom_swagger_ui_alt(request: Request):
|
|
64
|
-
"""Alternative endpoint for custom Swagger UI."""
|
|
65
|
-
return await self._render_swagger_ui(request)
|
|
66
|
-
|
|
67
|
-
async def _render_swagger_ui(self, request: Request) -> HTMLResponse:
|
|
68
|
-
"""Render the custom Swagger UI template."""
|
|
69
|
-
try:
|
|
70
|
-
# Get app metadata
|
|
71
|
-
title = getattr(self.app, 'title', 'Webscout OpenAI API')
|
|
72
|
-
description = getattr(self.app, 'description', 'OpenAI API compatible interface')
|
|
73
|
-
version = getattr(self.app, 'version', '0.2.0')
|
|
74
|
-
|
|
75
|
-
# Get base URL
|
|
76
|
-
base_url = str(request.base_url).rstrip('/')
|
|
77
|
-
|
|
78
|
-
# Load and count models
|
|
79
|
-
model_count = await self._get_model_count()
|
|
80
|
-
provider_count = await self._get_provider_count()
|
|
81
|
-
|
|
82
|
-
# Load the main template using Jinja2 environment
|
|
83
|
-
template = self.jinja_env.get_template("swagger_ui.html")
|
|
84
|
-
|
|
85
|
-
# Render with context
|
|
86
|
-
rendered_html = template.render(
|
|
87
|
-
title=title,
|
|
88
|
-
description=description,
|
|
89
|
-
version=version,
|
|
90
|
-
base_url=base_url,
|
|
91
|
-
model_count=model_count,
|
|
92
|
-
provider_count=provider_count
|
|
93
|
-
)
|
|
94
|
-
|
|
95
|
-
return HTMLResponse(content=rendered_html, status_code=200)
|
|
96
|
-
|
|
97
|
-
except TemplateNotFound:
|
|
98
|
-
# Template file doesn't exist, use fallback
|
|
99
|
-
logging.warning("Template file 'swagger_ui.html' not found, using fallback HTML")
|
|
100
|
-
return HTMLResponse(content=self._get_fallback_html(), status_code=200)
|
|
101
|
-
|
|
102
|
-
except Exception as e:
|
|
103
|
-
# Other errors, log and use fallback
|
|
104
|
-
logging.error(f"Error rendering Swagger UI template: {e}")
|
|
105
|
-
return HTMLResponse(content=self._get_fallback_html(), status_code=200)
|
|
106
|
-
|
|
107
|
-
async def _get_model_count(self) -> int:
|
|
108
|
-
"""Get the number of available models."""
|
|
109
|
-
try:
|
|
110
|
-
# Try to get from auth config
|
|
111
|
-
from .config import AppConfig
|
|
112
|
-
if hasattr(AppConfig, 'provider_map') and AppConfig.provider_map:
|
|
113
|
-
# Count models (keys with "/" are model names)
|
|
114
|
-
model_count = len([model for model in AppConfig.provider_map.keys() if "/" in model])
|
|
115
|
-
return model_count if model_count > 0 else 589
|
|
116
|
-
return 589 # Default fallback
|
|
117
|
-
except Exception as e:
|
|
118
|
-
logging.debug(f"Could not get model count: {e}")
|
|
119
|
-
return 589
|
|
120
|
-
|
|
121
|
-
async def _get_provider_count(self) -> int:
|
|
122
|
-
"""Get the number of available providers."""
|
|
123
|
-
try:
|
|
124
|
-
# Try to get from auth config
|
|
125
|
-
from .config import AppConfig
|
|
126
|
-
if hasattr(AppConfig, 'provider_map') and AppConfig.provider_map:
|
|
127
|
-
# Count unique providers
|
|
128
|
-
providers = set()
|
|
129
|
-
for model_key in AppConfig.provider_map.keys():
|
|
130
|
-
if "/" in model_key:
|
|
131
|
-
provider_name = model_key.split("/")[0]
|
|
132
|
-
providers.add(provider_name)
|
|
133
|
-
return len(providers) if len(providers) > 0 else 42
|
|
134
|
-
return 42 # Default fallback
|
|
135
|
-
except Exception as e:
|
|
136
|
-
logging.debug(f"Could not get provider count: {e}")
|
|
137
|
-
return 42
|
|
138
|
-
|
|
139
|
-
def _get_fallback_html(self) -> str:
|
|
140
|
-
"""Fallback HTML if template loading fails."""
|
|
141
|
-
return """
|
|
142
|
-
<!DOCTYPE html>
|
|
143
|
-
<html lang="en">
|
|
144
|
-
<head>
|
|
145
|
-
<meta charset="UTF-8">
|
|
146
|
-
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
147
|
-
<title>Webscout API Documentation</title>
|
|
148
|
-
<style>
|
|
149
|
-
body {
|
|
150
|
-
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
|
151
|
-
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
|
152
|
-
color: white;
|
|
153
|
-
margin: 0;
|
|
154
|
-
padding: 2rem;
|
|
155
|
-
min-height: 100vh;
|
|
156
|
-
display: flex;
|
|
157
|
-
align-items: center;
|
|
158
|
-
justify-content: center;
|
|
159
|
-
flex-direction: column;
|
|
160
|
-
}
|
|
161
|
-
.container {
|
|
162
|
-
text-align: center;
|
|
163
|
-
background: rgba(255, 255, 255, 0.1);
|
|
164
|
-
backdrop-filter: blur(10px);
|
|
165
|
-
padding: 3rem;
|
|
166
|
-
border-radius: 1rem;
|
|
167
|
-
border: 1px solid rgba(255, 255, 255, 0.2);
|
|
168
|
-
}
|
|
169
|
-
h1 { font-size: 3rem; margin-bottom: 1rem; }
|
|
170
|
-
p { font-size: 1.2rem; margin-bottom: 2rem; opacity: 0.9; }
|
|
171
|
-
.btn {
|
|
172
|
-
background: rgba(255, 255, 255, 0.2);
|
|
173
|
-
color: white;
|
|
174
|
-
border: 1px solid rgba(255, 255, 255, 0.3);
|
|
175
|
-
padding: 1rem 2rem;
|
|
176
|
-
border-radius: 0.5rem;
|
|
177
|
-
text-decoration: none;
|
|
178
|
-
font-weight: 600;
|
|
179
|
-
transition: all 0.3s ease;
|
|
180
|
-
display: inline-block;
|
|
181
|
-
margin: 0.5rem;
|
|
182
|
-
}
|
|
183
|
-
.btn:hover {
|
|
184
|
-
background: rgba(255, 255, 255, 0.3);
|
|
185
|
-
transform: translateY(-2px);
|
|
186
|
-
}
|
|
187
|
-
</style>
|
|
188
|
-
</head>
|
|
189
|
-
<body>
|
|
190
|
-
<div class="container">
|
|
191
|
-
<h1>🚀 Webscout API</h1>
|
|
192
|
-
<p>OpenAI-Compatible API Documentation</p>
|
|
193
|
-
<a href="/redoc" class="btn">📖 ReDoc Documentation</a>
|
|
194
|
-
<a href="/openapi.json" class="btn">📋 OpenAPI Schema</a>
|
|
195
|
-
</div>
|
|
196
|
-
</body>
|
|
197
|
-
</html>
|
|
198
|
-
"""
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
def setup_custom_swagger_ui(app: FastAPI) -> CustomSwaggerUI:
|
|
202
|
-
"""Setup custom Swagger UI for the FastAPI app."""
|
|
203
|
-
return CustomSwaggerUI(app)
|