webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,810 @@
1
+ """
2
+ OpenAI-Compatible API Server for Webscout
3
+
4
+ This module provides an OpenAI-compatible API server that allows using
5
+ various AI providers through a standardized interface compatible with
6
+ OpenAI's API. This enables using Webscout providers with any tool or
7
+ application designed to work with OpenAI's API.
8
+
9
+ Usage:
10
+ # From command line:
11
+ python -m webscout.Provider.OPENAI.api --port 8080 --api-key "your-key"
12
+
13
+ # From Python code:
14
+ from webscout.Provider.OPENAI.api import start_server
15
+ start_server(port=8080, api_key="your-key")
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import logging
21
+ import json
22
+ import uvicorn
23
+ import secrets
24
+ import os
25
+ import uuid
26
+ import time
27
+ from pathlib import Path
28
+ from typing import List, Dict, Optional, Union, Any, Generator
29
+ from fastapi import FastAPI, Response, Request, Depends
30
+ from fastapi.middleware.cors import CORSMiddleware
31
+ from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse, FileResponse
32
+ from fastapi.staticfiles import StaticFiles
33
+
34
+ from fastapi.exceptions import RequestValidationError
35
+ from fastapi.security import APIKeyHeader
36
+ from starlette.exceptions import HTTPException
37
+ from starlette.status import (
38
+ HTTP_200_OK,
39
+ HTTP_422_UNPROCESSABLE_ENTITY,
40
+ HTTP_404_NOT_FOUND,
41
+ HTTP_401_UNAUTHORIZED,
42
+ HTTP_403_FORBIDDEN,
43
+ HTTP_500_INTERNAL_SERVER_ERROR,
44
+ )
45
+ from fastapi.encoders import jsonable_encoder
46
+ from pydantic import BaseModel, Field
47
+ from typing import List, Optional, Literal, Union
48
+
49
+ # Import provider classes from the OPENAI directory
50
+ from webscout.Provider.OPENAI import *
51
+ from webscout.Provider.OPENAI.utils import (
52
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
53
+ ChatCompletionMessage, CompletionUsage
54
+ )
55
+
56
+ logger = logging.getLogger(__name__)
57
+
58
+ DEFAULT_PORT = 8000
59
+
60
+ class Message(BaseModel):
61
+ role: Literal["system", "user", "assistant", "function", "tool"]
62
+ content: str
63
+ name: Optional[str] = None
64
+
65
+ class ChatCompletionRequest(BaseModel):
66
+ model: str
67
+ messages: List[Message]
68
+ temperature: Optional[float] = None
69
+ top_p: Optional[float] = None
70
+ n: Optional[int] = 1
71
+ stream: Optional[bool] = False
72
+ max_tokens: Optional[int] = None
73
+ presence_penalty: Optional[float] = None
74
+ frequency_penalty: Optional[float] = None
75
+ logit_bias: Optional[Dict[str, float]] = None
76
+ user: Optional[str] = None
77
+ stop: Optional[Union[str, List[str]]] = None
78
+
79
+ class Config:
80
+ extra = "ignore" # Ignore extra fields that aren't in the model
81
+
82
+ class ModelListResponse(BaseModel):
83
+ object: str = "list"
84
+ data: List[Dict[str, Any]]
85
+
86
+ class ErrorResponse(Response):
87
+ media_type = "application/json"
88
+
89
+ @classmethod
90
+ def from_exception(cls, exception: Exception, status_code: int = HTTP_500_INTERNAL_SERVER_ERROR):
91
+ return cls(format_exception(exception), status_code)
92
+
93
+ @classmethod
94
+ def from_message(cls, message: str, status_code: int = HTTP_500_INTERNAL_SERVER_ERROR, headers: dict = None):
95
+ return cls(format_exception(message), status_code, headers=headers)
96
+
97
+ def render(self, content) -> bytes:
98
+ return str(content).encode(errors="ignore")
99
+
100
+ class AppConfig:
101
+ api_key: Optional[str] = None
102
+ provider_map = {}
103
+ default_provider = "ChatGPT"
104
+
105
+ @classmethod
106
+ def set_config(cls, **data):
107
+ for key, value in data.items():
108
+ setattr(cls, key, value)
109
+
110
+ def create_app():
111
+ app = FastAPI(
112
+ title="Webscout OpenAI API",
113
+ description="OpenAI API compatible interface for various LLM providers",
114
+ version="0.1.0",
115
+ docs_url=None,
116
+ )
117
+
118
+ # Add CORS middleware to allow cross-origin requests
119
+ app.add_middleware(
120
+ CORSMiddleware,
121
+ allow_origins=["*"],
122
+ allow_credentials=True,
123
+ allow_methods=["*"],
124
+ allow_headers=["*"],
125
+ )
126
+
127
+ api = Api(app)
128
+ api.register_authorization()
129
+ api.register_json_middleware() # Add custom JSON middleware
130
+ api.register_validation_exception_handler()
131
+ api.register_routes()
132
+
133
+ # Initialize provider map
134
+ initialize_provider_map()
135
+
136
+ return app
137
+
138
+ def create_app_debug():
139
+ logging.basicConfig(level=logging.DEBUG)
140
+ return create_app()
141
+
142
+ def initialize_provider_map():
143
+ """Initialize the provider map with available provider classes"""
144
+ import sys
145
+ import inspect
146
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider
147
+
148
+ # Get all imported modules from OPENAI package
149
+ module = sys.modules["webscout.Provider.OPENAI"]
150
+
151
+ # Find all provider classes (subclasses of OpenAICompatibleProvider)
152
+ for name, obj in inspect.getmembers(module):
153
+ if inspect.isclass(obj) and issubclass(obj, OpenAICompatibleProvider) and obj.__name__ != "OpenAICompatibleProvider":
154
+ # Register the provider class by its name
155
+ AppConfig.provider_map[obj.__name__] = obj
156
+ logger.info(f"Registered provider: {obj.__name__}")
157
+
158
+ # Also add additional mappings for model names
159
+ if hasattr(obj, "AVAILABLE_MODELS") and isinstance(obj.AVAILABLE_MODELS, (list, tuple, set)):
160
+ for model in obj.AVAILABLE_MODELS:
161
+ if model and isinstance(model, str) and model != obj.__name__:
162
+ AppConfig.provider_map[model] = obj
163
+ logger.info(f"Mapped model {model} to provider {obj.__name__}")
164
+
165
+ # If no providers were found, add a fallback for testing
166
+ if not AppConfig.provider_map:
167
+ logger.warning("No providers found, using ChatGPT as fallback")
168
+ from webscout.Provider.OPENAI.chatgpt import ChatGPT
169
+ AppConfig.provider_map["ChatGPT"] = ChatGPT
170
+ AppConfig.provider_map["gpt-4"] = ChatGPT
171
+ AppConfig.provider_map["gpt-4o"] = ChatGPT
172
+ AppConfig.provider_map["gpt-4o-mini"] = ChatGPT
173
+ AppConfig.default_provider = "ChatGPT"
174
+
175
+ # Get distinct provider names
176
+ provider_names = list(set(v.__name__ for v in AppConfig.provider_map.values()))
177
+
178
+ # Get model names (excluding provider class names)
179
+ provider_class_names = set(v.__name__ for v in AppConfig.provider_map.values())
180
+ model_names = [model for model in AppConfig.provider_map.keys() if model not in provider_class_names]
181
+
182
+ logger.info(f"Available providers ({len(provider_names)}): {provider_names}")
183
+ logger.info(f"Available models ({len(model_names)}): {sorted(model_names)}")
184
+ logger.info(f"Default provider: {AppConfig.default_provider}")
185
+
186
+ class Api:
187
+ def __init__(self, app: FastAPI) -> None:
188
+ self.app = app
189
+ self.get_api_key = APIKeyHeader(name="authorization", auto_error=False)
190
+
191
+ def register_authorization(self):
192
+ @self.app.middleware("http")
193
+ async def authorization(request: Request, call_next):
194
+ if AppConfig.api_key is not None:
195
+ auth_header = await self.get_api_key(request)
196
+ path = request.url.path
197
+ if path.startswith("/v1"):
198
+ if auth_header is None:
199
+ return ErrorResponse.from_message("API key required", HTTP_401_UNAUTHORIZED)
200
+ # Strip "Bearer " prefix if present
201
+ if auth_header.startswith("Bearer "):
202
+ auth_header = auth_header[7:]
203
+ if AppConfig.api_key is None or not secrets.compare_digest(AppConfig.api_key, auth_header):
204
+ return ErrorResponse.from_message("Invalid API key", HTTP_403_FORBIDDEN)
205
+ return await call_next(request)
206
+
207
+ def register_json_middleware(self):
208
+ @self.app.middleware("http")
209
+ async def parse_json_middleware(request: Request, call_next):
210
+ if request.method == "POST" and "/v1/chat/completions" in request.url.path:
211
+ try:
212
+ # Try parsing the JSON body manually first to catch JSON errors early
213
+ body = await request.body()
214
+ if body:
215
+ body_str = body.decode('utf-8', errors='ignore')
216
+ original_body = body_str
217
+ logger.debug(f"Original request body: {body_str}")
218
+
219
+ # PowerShell with curl often has formatting issues with JSON
220
+ try:
221
+ # First try normal JSON parsing
222
+ json.loads(body_str)
223
+ logger.debug("JSON parsed successfully")
224
+ except json.JSONDecodeError as e:
225
+ logger.warning(f"JSON parse error, attempting fixes: {str(e)}")
226
+
227
+ # Series of fixes to try for common PowerShell JSON issues
228
+ try:
229
+ # Fix 1: Try to clean up the JSON string
230
+ # Replace literal backslash+quote with just quote
231
+ body_str = body_str.replace("\\\"", "\"")
232
+ # Add double quotes to unquoted property names and string values
233
+ # This is a common issue with PowerShell's curl
234
+ import re
235
+
236
+ # Try a full JSON correction - replace single quotes with double quotes
237
+ # This is a more aggressive fix that might work in simple cases
238
+ fixed_body = body_str.replace("'", "\"")
239
+ try:
240
+ json.loads(fixed_body)
241
+ body_str = fixed_body
242
+ logger.info("Fixed JSON by replacing single quotes with double quotes")
243
+ except json.JSONDecodeError:
244
+ # If that didn't work, try more sophisticated fixes
245
+ pass
246
+
247
+ # Check for missing quotes around property names
248
+ # Look for patterns like {model: instead of {"model":
249
+ body_str = re.sub(r'\{([^"\s][^:\s]*)(\s*:)', r'{"\1"\2', body_str)
250
+ body_str = re.sub(r',\s*([^"\s][^:\s]*)(\s*:)', r', "\1"\2', body_str)
251
+
252
+ # Try to parse with the fixed body
253
+ json.loads(body_str)
254
+ # If successful, modify the request._body for downstream processing
255
+ logger.info(f"Successfully fixed JSON format\nOriginal: {original_body}\nFixed: {body_str}")
256
+ request._body = body_str.encode('utf-8')
257
+ except Exception as fix_error:
258
+ logger.error(f"Failed to fix JSON: {str(fix_error)}")
259
+
260
+ # Let's return a helpful error message with the proper format example
261
+ example = json.dumps({
262
+ "model": "gpt-4",
263
+ "messages": [{"role": "user", "content": "Hello"}]
264
+ })
265
+ return JSONResponse(
266
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
267
+ content=jsonable_encoder({
268
+ "detail": [
269
+ {
270
+ "loc": ["body", 0],
271
+ "message": f"Invalid JSON format: {str(e)}. Make sure to use double quotes for both keys and values. Example: {example}",
272
+ "type": "json_invalid"
273
+ }
274
+ ]
275
+ }),
276
+ )
277
+ except Exception as e:
278
+ error_detail = str(e)
279
+ logger.error(f"Request processing error: {error_detail}")
280
+ return JSONResponse(
281
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
282
+ content=jsonable_encoder({
283
+ "detail": [
284
+ {
285
+ "loc": ["body", 0],
286
+ "message": f"Request processing error: {error_detail}",
287
+ "type": "request_invalid"
288
+ }
289
+ ]
290
+ }),
291
+ )
292
+ return await call_next(request)
293
+
294
+ def register_validation_exception_handler(self):
295
+ @self.app.exception_handler(RequestValidationError)
296
+ async def validation_exception_handler(request: Request, exc: RequestValidationError):
297
+ details = exc.errors()
298
+ modified_details = []
299
+ for error in details:
300
+ modified_details.append({
301
+ "loc": error["loc"],
302
+ "message": error["msg"],
303
+ "type": error["type"],
304
+ })
305
+ return JSONResponse(
306
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
307
+ content=jsonable_encoder({"detail": modified_details}),
308
+ )
309
+
310
+ @self.app.exception_handler(HTTPException)
311
+ async def http_exception_handler(request: Request, exc: HTTPException):
312
+ return JSONResponse(
313
+ status_code=exc.status_code,
314
+ content=jsonable_encoder({"detail": exc.detail}),
315
+ )
316
+
317
+ @self.app.exception_handler(json.JSONDecodeError)
318
+ async def json_decode_error_handler(request: Request, exc: json.JSONDecodeError):
319
+ return JSONResponse(
320
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
321
+ content=jsonable_encoder({
322
+ "detail": [
323
+ {
324
+ "loc": ["body", 0],
325
+ "message": f"Invalid JSON format: {str(exc)}",
326
+ "type": "json_invalid"
327
+ }
328
+ ]
329
+ }),
330
+ )
331
+
332
+ def register_routes(self):
333
+ @self.app.get("/")
334
+ async def read_root(request: Request):
335
+ return RedirectResponse(url="/docs")
336
+
337
+ @self.app.get("/v1")
338
+ async def read_root_v1(request: Request):
339
+ return RedirectResponse(url="/docs")
340
+
341
+ @self.app.get("/docs", include_in_schema=False)
342
+ async def custom_swagger_ui(request: Request):
343
+ from fastapi.openapi.docs import get_swagger_ui_html
344
+ return get_swagger_ui_html(
345
+ openapi_url=self.app.openapi_url,
346
+ title=f"{self.app.title} - Swagger UI"
347
+ )
348
+
349
+ @self.app.get("/v1//models", include_in_schema=False) # Handle double slash case
350
+ async def list_models_double_slash():
351
+ """Redirect double slash models endpoint to the correct one"""
352
+ return RedirectResponse(url="/v1/models")
353
+
354
+ @self.app.get("/v1/models")
355
+ async def list_models():
356
+ """List available models"""
357
+ from webscout.Provider.OPENAI.utils import ModelData, ModelList
358
+ models_data = []
359
+
360
+ # Get current timestamp
361
+ created_time = int(time.time())
362
+
363
+ for model_name, provider_class in AppConfig.provider_map.items():
364
+ if not hasattr(provider_class, "AVAILABLE_MODELS") or model_name in provider_class.AVAILABLE_MODELS:
365
+ # Create a more detailed model data object with proper fields
366
+ model = ModelData(
367
+ id=model_name,
368
+ created=created_time,
369
+ owned_by=getattr(provider_class, "__name__", "webscout"),
370
+ permission=[{
371
+ "id": f"modelperm-{model_name}",
372
+ "object": "model_permission",
373
+ "created": created_time,
374
+ "allow_create_engine": False,
375
+ "allow_sampling": True,
376
+ "allow_logprobs": True,
377
+ "allow_search_indices": hasattr(provider_class, "supports_embeddings") and provider_class.supports_embeddings,
378
+ "allow_view": True,
379
+ "allow_fine_tuning": False,
380
+ "organization": "*",
381
+ "group": None,
382
+ "is_blocking": False
383
+ }]
384
+ )
385
+ models_data.append(model)
386
+
387
+ # Return as ModelList for proper formatting
388
+ response = ModelList(data=models_data)
389
+ return response.to_dict()
390
+
391
+ @self.app.get("/v1/models/{model_name}")
392
+ async def get_model(model_name: str):
393
+ """Get information about a specific model"""
394
+ from webscout.Provider.OPENAI.utils import ModelData
395
+ created_time = int(time.time())
396
+
397
+ # Check if the model exists in our provider map
398
+ if model_name in AppConfig.provider_map:
399
+ provider_class = AppConfig.provider_map[model_name]
400
+
401
+ # Create a proper OpenAI-compatible model response
402
+ model = ModelData(
403
+ id=model_name,
404
+ created=created_time,
405
+ owned_by=getattr(provider_class, "__name__", "webscout"),
406
+ permission=[{
407
+ "id": f"modelperm-{model_name}",
408
+ "object": "model_permission",
409
+ "created": created_time,
410
+ "allow_create_engine": False,
411
+ "allow_sampling": True,
412
+ "allow_logprobs": True,
413
+ "allow_search_indices": hasattr(provider_class, "supports_embeddings") and provider_class.supports_embeddings,
414
+ "allow_view": True,
415
+ "allow_fine_tuning": False,
416
+ "organization": "*",
417
+ "group": None,
418
+ "is_blocking": False
419
+ }]
420
+ )
421
+ return model.to_dict()
422
+
423
+ # If we reached here, the model was not found
424
+ return ErrorResponse.from_message(f"Model '{model_name}' not found", HTTP_404_NOT_FOUND)
425
+
426
+ @self.app.post("/v1/chat/completions")
427
+ async def chat_completions(request: Request):
428
+ """Create a chat completion"""
429
+ # First manually extract the request body to better handle parsing errors
430
+ try:
431
+ # Note: We don't need to parse JSON here as our middleware already handles that
432
+ # and fixes PowerShell JSON issues
433
+ body = await request.json()
434
+ logger.debug(f"Request body parsed successfully: {body}")
435
+
436
+ # Check for required fields
437
+ if "model" not in body:
438
+ return JSONResponse(
439
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
440
+ content=jsonable_encoder({
441
+ "detail": [
442
+ {
443
+ "loc": ["body", "model"],
444
+ "message": "Field 'model' is required",
445
+ "type": "missing"
446
+ }
447
+ ]
448
+ }),
449
+ )
450
+
451
+ if "messages" not in body or not isinstance(body["messages"], list) or len(body["messages"]) == 0:
452
+ return JSONResponse(
453
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
454
+ content=jsonable_encoder({
455
+ "detail": [
456
+ {
457
+ "loc": ["body", "messages"],
458
+ "message": "Field 'messages' must be a non-empty array",
459
+ "type": "missing"
460
+ }
461
+ ]
462
+ }),
463
+ )
464
+
465
+ # Now parse it through Pydantic model
466
+ try:
467
+ chat_request = ChatCompletionRequest(**body)
468
+ except Exception as validation_error:
469
+ logger.warning(f"Validation error: {validation_error}")
470
+ # Try to provide helpful error messages for common validation issues
471
+ error_msg = str(validation_error)
472
+ if "role" in error_msg:
473
+ return JSONResponse(
474
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
475
+ content=jsonable_encoder({
476
+ "detail": [
477
+ {
478
+ "loc": ["body", "messages", 0, "role"],
479
+ "message": "Each message must have a 'role' field with one of these values: 'system', 'user', 'assistant'",
480
+ "type": "value_error"
481
+ }
482
+ ]
483
+ }),
484
+ )
485
+ elif "content" in error_msg:
486
+ return JSONResponse(
487
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
488
+ content=jsonable_encoder({
489
+ "detail": [
490
+ {
491
+ "loc": ["body", "messages", 0, "content"],
492
+ "message": "Each message must have a 'content' field with string value",
493
+ "type": "value_error"
494
+ }
495
+ ]
496
+ }),
497
+ )
498
+ else:
499
+ return JSONResponse(
500
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
501
+ content=jsonable_encoder({
502
+ "detail": [
503
+ {
504
+ "loc": ["body"],
505
+ "message": f"Validation error: {error_msg}",
506
+ "type": "value_error"
507
+ }
508
+ ]
509
+ }),
510
+ )
511
+
512
+ except json.JSONDecodeError as e:
513
+ logger.error(f"JSON decode error in chat_completions: {e}")
514
+ example = json.dumps({
515
+ "model": "gpt-4",
516
+ "messages": [{"role": "user", "content": "Hello"}]
517
+ })
518
+ return JSONResponse(
519
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
520
+ content=jsonable_encoder({
521
+ "detail": [
522
+ {
523
+ "loc": ["body", 0],
524
+ "message": f"Invalid JSON format: {str(e)}. Example of correct format: {example}",
525
+ "type": "json_invalid"
526
+ }
527
+ ]
528
+ }),
529
+ )
530
+ except Exception as e:
531
+ logger.exception(f"Unexpected error in chat_completions: {e}")
532
+ return ErrorResponse.from_message(
533
+ f"Invalid request parameters: {str(e)}",
534
+ HTTP_422_UNPROCESSABLE_ENTITY
535
+ )
536
+ """Create a chat completion"""
537
+ try:
538
+ # Determine which provider to use based on the model
539
+ provider_class = None
540
+ model = chat_request.model
541
+ logger.info(f"Chat completion request for model: {model}")
542
+
543
+ if model in AppConfig.provider_map:
544
+ provider_class = AppConfig.provider_map[model]
545
+ logger.info(f"Found provider class for model {model}: {provider_class.__name__}")
546
+ else:
547
+ # Use default provider if specific provider not found
548
+ provider_class = AppConfig.provider_map.get(AppConfig.default_provider)
549
+ logger.info(f"Using default provider {AppConfig.default_provider} for model {model}")
550
+
551
+ if not provider_class:
552
+ logger.error(f"No provider available for model {model}. Available models: {list(AppConfig.provider_map.keys())}")
553
+ return ErrorResponse.from_message(
554
+ f"Model '{model}' not supported. Available models: {list(AppConfig.provider_map.keys())}",
555
+ HTTP_404_NOT_FOUND
556
+ )
557
+
558
+ # Initialize provider
559
+ logger.info(f"Initializing provider {provider_class.__name__}")
560
+ try:
561
+ provider = provider_class()
562
+ except Exception as e:
563
+ logger.exception(f"Failed to initialize provider {provider_class.__name__}: {e}")
564
+ return ErrorResponse.from_message(
565
+ f"Failed to initialize provider {provider_class.__name__}: {e}",
566
+ HTTP_500_INTERNAL_SERVER_ERROR
567
+ )
568
+
569
+ # Prepare completion parameters
570
+ # Convert Message objects to dictionaries for the provider
571
+ messages = []
572
+ for msg in chat_request.messages:
573
+ message_dict = {
574
+ "role": msg.role,
575
+ "content": msg.content
576
+ }
577
+ # Add name field if present
578
+ if msg.name:
579
+ message_dict["name"] = msg.name
580
+ messages.append(message_dict)
581
+
582
+ params = {
583
+ "model": model,
584
+ "messages": messages,
585
+ "stream": chat_request.stream,
586
+ }
587
+
588
+ # Add optional parameters if provided
589
+ if chat_request.temperature is not None:
590
+ params["temperature"] = chat_request.temperature
591
+ if chat_request.max_tokens is not None:
592
+ params["max_tokens"] = chat_request.max_tokens
593
+ if chat_request.top_p is not None:
594
+ params["top_p"] = chat_request.top_p
595
+
596
+ # Create completion
597
+ if chat_request.stream:
598
+ async def streaming():
599
+ try:
600
+ logger.info(f"Creating streaming completion with {provider_class.__name__}")
601
+ completion_stream = provider.chat.completions.create(**params)
602
+ logger.info(f"Got streaming response: {type(completion_stream)}")
603
+
604
+ if isinstance(completion_stream, Generator):
605
+ for chunk in completion_stream:
606
+ logger.debug(f"Streaming chunk: {type(chunk)}")
607
+ if hasattr(chunk, 'to_dict'):
608
+ # Use to_dict() for our custom dataclasses
609
+ yield f"data: {json.dumps(chunk.to_dict())}\n\n"
610
+ elif hasattr(chunk, 'model_dump'):
611
+ # For Pydantic models
612
+ yield f"data: {json.dumps(chunk.model_dump())}\n\n"
613
+ else:
614
+ # For dictionaries or other JSON-serializable objects
615
+ yield f"data: {json.dumps(chunk)}\n\n"
616
+ else:
617
+ # If the provider doesn't implement streaming but stream=True,
618
+ # simulate streaming with a single chunk
619
+ logger.info(f"Provider returned non-streaming response, simulating stream")
620
+ yield f"data: {json.dumps(completion_stream)}\n\n"
621
+ except Exception as e:
622
+ logger.exception(f"Error in streaming: {e}")
623
+ yield f"data: {format_exception(e)}\n\n"
624
+ yield "data: [DONE]\n\n"
625
+
626
+ return StreamingResponse(streaming(), media_type="text/event-stream")
627
+ else:
628
+ logger.info(f"Creating non-streaming completion with {provider_class.__name__}")
629
+ try:
630
+ completion = provider.chat.completions.create(**params)
631
+ logger.info(f"Got completion response: {type(completion)}")
632
+
633
+ # If the response is empty or None, create a default response
634
+ if completion is None:
635
+ logger.warning(f"Provider {provider_class.__name__} returned None for completion")
636
+ return {
637
+ "id": f"chatcmpl-{uuid.uuid4()}",
638
+ "created": int(time.time()),
639
+ "model": model,
640
+ "choices": [
641
+ {
642
+ "index": 0,
643
+ "message": {
644
+ "role": "assistant",
645
+ "content": "I apologize, but I couldn't generate a response. Please try again or try a different model.",
646
+ },
647
+ "finish_reason": "stop",
648
+ }
649
+ ],
650
+ "usage": {
651
+ "prompt_tokens": 0,
652
+ "completion_tokens": 0,
653
+ "total_tokens": 0,
654
+ },
655
+ }
656
+
657
+ # Return the response in the appropriate format
658
+ if isinstance(completion, dict):
659
+ return completion
660
+ elif hasattr(completion, "model_dump"):
661
+ return completion.model_dump()
662
+ else:
663
+ return completion
664
+ except Exception as e:
665
+ logger.exception(f"Error in completion: {e}")
666
+ return ErrorResponse.from_exception(e, HTTP_500_INTERNAL_SERVER_ERROR)
667
+
668
+ except Exception as e:
669
+ logger.exception(e)
670
+ return ErrorResponse.from_exception(e, HTTP_500_INTERNAL_SERVER_ERROR)
671
+
672
+ def format_exception(e: Union[Exception, str]) -> str:
673
+ """Format exception into a JSON string"""
674
+ if isinstance(e, str):
675
+ message = e
676
+ else:
677
+ message = f"{e.__class__.__name__}: {e}"
678
+ return json.dumps({
679
+ "error": {
680
+ "message": message,
681
+ "type": "server_error",
682
+ "param": None,
683
+ "code": "internal_server_error"
684
+ }
685
+ })
686
+
687
+ def start_server(port: int = DEFAULT_PORT, api_key: str = None, default_provider: str = None):
688
+ """
689
+ Simple helper function to start the OpenAI-compatible API server.
690
+
691
+ Args:
692
+ port: Port to run the server on (default: 8000)
693
+ api_key: Optional API key for authentication
694
+ default_provider: Default provider to use (e.g., "ChatGPT", "Claude", etc.)
695
+
696
+ Example:
697
+ ```python
698
+ from webscout.Provider.OPENAI.api import start_server
699
+
700
+ # Start server with default settings
701
+ start_server()
702
+
703
+ # Start server with custom settings
704
+ start_server(port=8080, api_key="your-api-key", default_provider="Claude")
705
+ ```
706
+ """
707
+ run_api(
708
+ host="0.0.0.0",
709
+ port=port,
710
+ api_key=api_key,
711
+ default_provider=default_provider,
712
+ debug=False,
713
+ )
714
+
715
+ def run_api(
716
+ host: str = '0.0.0.0',
717
+ port: int = None,
718
+ api_key: str = None,
719
+ default_provider: str = None,
720
+ debug: bool = False,
721
+ show_available_providers: bool = True,
722
+ ) -> None:
723
+ """Run the API server
724
+
725
+ Args:
726
+ host: Host to bind the server to
727
+ port: Port to bind the server to
728
+ api_key: API key for authentication (optional)
729
+ default_provider: Default provider to use if no provider is specified
730
+ debug: Whether to run in debug mode
731
+ show_available_providers: Whether to display available providers on startup
732
+ """
733
+ print(f"Starting Webscout OpenAI API server...")
734
+
735
+ if port is None:
736
+ port = DEFAULT_PORT
737
+
738
+ # Set configuration
739
+ AppConfig.set_config(
740
+ api_key=api_key,
741
+ default_provider=default_provider or AppConfig.default_provider
742
+ )
743
+
744
+ # Initialize provider map early to show available providers
745
+ initialize_provider_map()
746
+
747
+ if show_available_providers:
748
+ print("\n=== Available Providers ===")
749
+ providers = list(set(v.__name__ for v in AppConfig.provider_map.values()))
750
+ for i, provider in enumerate(providers, 1):
751
+ print(f"{i}. {provider}")
752
+
753
+ print("\n=== Available Models ===")
754
+ # Filter out provider class names from the model list
755
+ provider_class_names = set(v.__name__ for v in AppConfig.provider_map.values())
756
+ models = [model for model in AppConfig.provider_map.keys() if model not in provider_class_names]
757
+
758
+ # Display models in a more organized way
759
+ if models:
760
+ for i, model in enumerate(sorted(models), 1):
761
+ print(f"{i}. {model}")
762
+ else:
763
+ print("No specific models registered. Use provider names as models.")
764
+
765
+ print(f"\nDefault provider: {AppConfig.default_provider}")
766
+ print(f"API Authentication: {'Enabled' if api_key else 'Disabled'}")
767
+ print(f"Server URL: http://{host if host != '0.0.0.0' else 'localhost'}:{port}")
768
+ print(f"API Endpoint: http://{host if host != '0.0.0.0' else 'localhost'}:{port}/v1/chat/completions")
769
+ print(f"Documentation: http://{host if host != '0.0.0.0' else 'localhost'}:{port}/docs")
770
+ print("\nUse Ctrl+C to stop the server")
771
+ print("=" * 30 + "\n")
772
+
773
+ # Run the server
774
+ uvicorn.run(
775
+ "webscout.Provider.OPENAI.api:create_app_debug" if debug else "webscout.Provider.OPENAI.api:create_app",
776
+ host=host,
777
+ port=int(port),
778
+ factory=True,
779
+ )
780
+
781
+ # Command line interface
782
+ if __name__ == "__main__":
783
+ import argparse
784
+
785
+ parser = argparse.ArgumentParser(description="Webscout OpenAI-compatible API server")
786
+ parser.add_argument("--host", default="0.0.0.0", help="Host to bind the server to")
787
+ parser.add_argument("--port", type=int, default=DEFAULT_PORT, help="Port to bind the server to")
788
+ parser.add_argument("--api-key", help="API key for authentication (optional)")
789
+ parser.add_argument("--default-provider", help="Default provider to use if no provider is specified")
790
+ parser.add_argument("--debug", action="store_true", help="Run in debug mode")
791
+ parser.add_argument("--quiet", action="store_true", help="Don't show available providers on startup")
792
+
793
+ args = parser.parse_args()
794
+
795
+ try:
796
+ run_api(
797
+ host=args.host,
798
+ port=args.port,
799
+ api_key=args.api_key,
800
+ default_provider=args.default_provider,
801
+ debug=args.debug,
802
+ show_available_providers=not args.quiet,
803
+ )
804
+ except KeyboardInterrupt:
805
+ print("\nServer stopped by user")
806
+ except Exception as e:
807
+ print(f"\nError: {e}")
808
+ if args.debug:
809
+ import traceback
810
+ traceback.print_exc()