webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
webscout/Provider/OPENAI/api.py
CHANGED
|
@@ -9,16 +9,18 @@ authentication, and provider management.
|
|
|
9
9
|
from __future__ import annotations
|
|
10
10
|
|
|
11
11
|
import json
|
|
12
|
-
import logging
|
|
13
12
|
import os
|
|
14
13
|
import secrets
|
|
15
14
|
import sys
|
|
16
15
|
import time
|
|
17
16
|
import uuid
|
|
18
17
|
import inspect
|
|
18
|
+
import re
|
|
19
|
+
import codecs
|
|
19
20
|
from typing import List, Dict, Optional, Union, Any, Generator, Callable
|
|
20
21
|
import types
|
|
21
22
|
|
|
23
|
+
from webscout.Litlogger import Logger, LogLevel, LogFormat, ConsoleHandler
|
|
22
24
|
import uvicorn
|
|
23
25
|
from fastapi import FastAPI, Response, Request, Body
|
|
24
26
|
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -28,6 +30,18 @@ from fastapi.routing import APIRoute
|
|
|
28
30
|
from fastapi.exceptions import RequestValidationError
|
|
29
31
|
from fastapi.security import APIKeyHeader
|
|
30
32
|
from starlette.exceptions import HTTPException as StarletteHTTPException
|
|
33
|
+
|
|
34
|
+
def clean_text(text):
|
|
35
|
+
"""Clean text by removing null bytes and control characters except newlines and tabs."""
|
|
36
|
+
if not isinstance(text, str):
|
|
37
|
+
return text
|
|
38
|
+
|
|
39
|
+
# Remove null bytes
|
|
40
|
+
text = text.replace('\x00', '')
|
|
41
|
+
|
|
42
|
+
# Keep newlines, tabs, and other printable characters, remove other control chars
|
|
43
|
+
# This regex matches control characters except \n, \r, \t
|
|
44
|
+
return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
|
|
31
45
|
from starlette.status import (
|
|
32
46
|
HTTP_422_UNPROCESSABLE_ENTITY,
|
|
33
47
|
HTTP_404_NOT_FOUND,
|
|
@@ -44,6 +58,9 @@ from webscout.Provider.OPENAI import *
|
|
|
44
58
|
from webscout.Provider.OPENAI.utils import (
|
|
45
59
|
ChatCompletion, Choice, ChatCompletionMessage, CompletionUsage
|
|
46
60
|
)
|
|
61
|
+
from webscout.Provider.TTI import *
|
|
62
|
+
from webscout.Provider.TTI.utils import ImageData, ImageResponse
|
|
63
|
+
from webscout.Provider.TTI.base import TTICompatibleProvider
|
|
47
64
|
|
|
48
65
|
|
|
49
66
|
# Configuration constants
|
|
@@ -51,15 +68,13 @@ DEFAULT_PORT = 8000
|
|
|
51
68
|
DEFAULT_HOST = "0.0.0.0"
|
|
52
69
|
API_VERSION = "v1"
|
|
53
70
|
|
|
54
|
-
# Setup
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
handlers=[
|
|
59
|
-
|
|
60
|
-
]
|
|
71
|
+
# Setup Litlogger
|
|
72
|
+
logger = Logger(
|
|
73
|
+
name="webscout.api",
|
|
74
|
+
level=LogLevel.INFO,
|
|
75
|
+
handlers=[ConsoleHandler(stream=sys.stdout)],
|
|
76
|
+
fmt=LogFormat.DEFAULT
|
|
61
77
|
)
|
|
62
|
-
logger = logging.getLogger("webscout.api")
|
|
63
78
|
|
|
64
79
|
|
|
65
80
|
class ServerConfig:
|
|
@@ -97,6 +112,10 @@ class ServerConfig:
|
|
|
97
112
|
# Global configuration instance
|
|
98
113
|
config = ServerConfig()
|
|
99
114
|
|
|
115
|
+
# Cache for provider instances to avoid reinitialization on every request
|
|
116
|
+
provider_instances: Dict[str, Any] = {}
|
|
117
|
+
tti_provider_instances: Dict[str, Any] = {}
|
|
118
|
+
|
|
100
119
|
|
|
101
120
|
# Define Pydantic models for multimodal content parts, aligning with OpenAI's API
|
|
102
121
|
class TextPart(BaseModel):
|
|
@@ -153,7 +172,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
153
172
|
extra = "ignore" # Ignore extra fields that aren't in the model
|
|
154
173
|
schema_extra = {
|
|
155
174
|
"example": {
|
|
156
|
-
"model": "
|
|
175
|
+
"model": "Cloudflare/@cf/meta/llama-4-scout-17b-16e-instruct",
|
|
157
176
|
"messages": [
|
|
158
177
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
159
178
|
{"role": "user", "content": "Hello, how are you?"}
|
|
@@ -164,6 +183,33 @@ class ChatCompletionRequest(BaseModel):
|
|
|
164
183
|
}
|
|
165
184
|
}
|
|
166
185
|
|
|
186
|
+
class ImageGenerationRequest(BaseModel):
|
|
187
|
+
"""Request model for OpenAI-compatible image generation endpoint."""
|
|
188
|
+
prompt: str = Field(..., description="A text description of the desired image(s). The maximum length is 1000 characters.")
|
|
189
|
+
model: str = Field(..., description="The model to use for image generation.")
|
|
190
|
+
n: Optional[int] = Field(1, description="The number of images to generate. Must be between 1 and 10.")
|
|
191
|
+
size: Optional[str] = Field("1024x1024", description="The size of the generated images. Must be one of: '256x256', '512x512', or '1024x1024'.")
|
|
192
|
+
response_format: Optional[Literal["url", "b64_json"]] = Field("url", description="The format in which the generated images are returned. Must be either 'url' or 'b64_json'.")
|
|
193
|
+
user: Optional[str] = Field(None, description="A unique identifier representing your end-user, which can help to monitor and detect abuse.")
|
|
194
|
+
style: Optional[str] = Field(None, description="Optional style for the image (provider/model-specific).")
|
|
195
|
+
aspect_ratio: Optional[str] = Field(None, description="Optional aspect ratio for the image (provider/model-specific).")
|
|
196
|
+
timeout: Optional[int] = Field(None, description="Optional timeout for the image generation request in seconds.")
|
|
197
|
+
image_format: Optional[str] = Field(None, description="Optional image format (e.g., 'png', 'jpeg').")
|
|
198
|
+
seed: Optional[int] = Field(None, description="Optional random seed for reproducibility.")
|
|
199
|
+
|
|
200
|
+
class Config:
|
|
201
|
+
extra = "ignore"
|
|
202
|
+
schema_extra = {
|
|
203
|
+
"example": {
|
|
204
|
+
"prompt": "A futuristic cityscape at sunset, digital art",
|
|
205
|
+
"model": "PollinationsAI/turbo",
|
|
206
|
+
"n": 1,
|
|
207
|
+
"size": "1024x1024",
|
|
208
|
+
"response_format": "url",
|
|
209
|
+
"user": "user-1234"
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
167
213
|
class ModelInfo(BaseModel):
|
|
168
214
|
"""Model information for the models endpoint."""
|
|
169
215
|
id: str
|
|
@@ -223,7 +269,9 @@ class AppConfig:
|
|
|
223
269
|
"""Legacy configuration class for backward compatibility."""
|
|
224
270
|
api_key: Optional[str] = None
|
|
225
271
|
provider_map = {}
|
|
272
|
+
tti_provider_map = {} # Add TTI provider map
|
|
226
273
|
default_provider = "ChatGPT"
|
|
274
|
+
default_tti_provider = "PollinationsAI" # Add default TTI provider
|
|
227
275
|
base_url: Optional[str] = None
|
|
228
276
|
|
|
229
277
|
@classmethod
|
|
@@ -275,6 +323,7 @@ def create_app():
|
|
|
275
323
|
api.register_validation_exception_handler()
|
|
276
324
|
api.register_routes()
|
|
277
325
|
initialize_provider_map()
|
|
326
|
+
initialize_tti_provider_map() # Initialize TTI providers
|
|
278
327
|
|
|
279
328
|
def custom_openapi():
|
|
280
329
|
if app.openapi_schema:
|
|
@@ -303,6 +352,7 @@ def create_app():
|
|
|
303
352
|
"ImagePart": ImagePart,
|
|
304
353
|
"Message": Message,
|
|
305
354
|
"ChatCompletionRequest": ChatCompletionRequest,
|
|
355
|
+
"ImageGenerationRequest": ImageGenerationRequest,
|
|
306
356
|
}
|
|
307
357
|
|
|
308
358
|
for name, model_cls in pydantic_models_to_register.items():
|
|
@@ -386,6 +436,63 @@ def initialize_provider_map() -> None:
|
|
|
386
436
|
logger.error(f"Failed to initialize provider map: {e}")
|
|
387
437
|
raise APIError(f"Provider initialization failed: {e}", HTTP_500_INTERNAL_SERVER_ERROR)
|
|
388
438
|
|
|
439
|
+
def initialize_tti_provider_map() -> None:
|
|
440
|
+
"""Initialize the TTI provider map by discovering available TTI providers."""
|
|
441
|
+
logger.info("Initializing TTI provider map...")
|
|
442
|
+
|
|
443
|
+
try:
|
|
444
|
+
import webscout.Provider.TTI as tti_module
|
|
445
|
+
|
|
446
|
+
provider_count = 0
|
|
447
|
+
model_count = 0
|
|
448
|
+
|
|
449
|
+
for name, obj in inspect.getmembers(tti_module):
|
|
450
|
+
if (
|
|
451
|
+
inspect.isclass(obj)
|
|
452
|
+
and issubclass(obj, TTICompatibleProvider)
|
|
453
|
+
and obj.__name__ != "TTICompatibleProvider"
|
|
454
|
+
and obj.__name__ != "BaseImages"
|
|
455
|
+
):
|
|
456
|
+
provider_name = obj.__name__
|
|
457
|
+
AppConfig.tti_provider_map[provider_name] = obj
|
|
458
|
+
provider_count += 1
|
|
459
|
+
|
|
460
|
+
# Register available models for this TTI provider
|
|
461
|
+
if hasattr(obj, "AVAILABLE_MODELS") and isinstance(
|
|
462
|
+
obj.AVAILABLE_MODELS, (list, tuple, set)
|
|
463
|
+
):
|
|
464
|
+
for model in obj.AVAILABLE_MODELS:
|
|
465
|
+
if model and isinstance(model, str):
|
|
466
|
+
model_key = f"{provider_name}/{model}"
|
|
467
|
+
AppConfig.tti_provider_map[model_key] = obj
|
|
468
|
+
model_count += 1
|
|
469
|
+
|
|
470
|
+
# Fallback to PollinationsAI if no TTI providers found
|
|
471
|
+
if not AppConfig.tti_provider_map:
|
|
472
|
+
logger.warning("No TTI providers found, using PollinationsAI fallback")
|
|
473
|
+
try:
|
|
474
|
+
from webscout.Provider.TTI.pollinations import PollinationsAI
|
|
475
|
+
fallback_models = ["flux", "turbo", "gptimage"]
|
|
476
|
+
|
|
477
|
+
AppConfig.tti_provider_map["PollinationsAI"] = PollinationsAI
|
|
478
|
+
|
|
479
|
+
for model in fallback_models:
|
|
480
|
+
model_key = f"PollinationsAI/{model}"
|
|
481
|
+
AppConfig.tti_provider_map[model_key] = PollinationsAI
|
|
482
|
+
|
|
483
|
+
AppConfig.default_tti_provider = "PollinationsAI"
|
|
484
|
+
provider_count = 1
|
|
485
|
+
model_count = len(fallback_models)
|
|
486
|
+
except ImportError as e:
|
|
487
|
+
logger.error(f"Failed to import PollinationsAI fallback: {e}")
|
|
488
|
+
raise APIError("No TTI providers available", HTTP_500_INTERNAL_SERVER_ERROR)
|
|
489
|
+
|
|
490
|
+
logger.info(f"Initialized {provider_count} TTI providers with {model_count} models")
|
|
491
|
+
|
|
492
|
+
except Exception as e:
|
|
493
|
+
logger.error(f"Failed to initialize TTI provider map: {e}")
|
|
494
|
+
raise APIError(f"TTI Provider initialization failed: {e}", HTTP_500_INTERNAL_SERVER_ERROR)
|
|
495
|
+
|
|
389
496
|
class Api:
|
|
390
497
|
def __init__(self, app: FastAPI) -> None:
|
|
391
498
|
self.app = app
|
|
@@ -536,6 +643,29 @@ class Api:
|
|
|
536
643
|
"created": int(time.time()),
|
|
537
644
|
"owned_by": provider_class.__name__
|
|
538
645
|
})
|
|
646
|
+
# Sort models alphabetically by the part after the first '/'
|
|
647
|
+
models = sorted(models, key=lambda m: m["id"].split("/", 1)[1].lower())
|
|
648
|
+
return {
|
|
649
|
+
"object": "list",
|
|
650
|
+
"data": models
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
@self.app.get("/v1/TTI/models", response_model=ModelListResponse)
|
|
654
|
+
async def list_tti_models():
|
|
655
|
+
models = []
|
|
656
|
+
for model_name, provider_class in AppConfig.tti_provider_map.items():
|
|
657
|
+
if "/" not in model_name:
|
|
658
|
+
continue # Skip provider names
|
|
659
|
+
if any(m["id"] == model_name for m in models):
|
|
660
|
+
continue
|
|
661
|
+
models.append({
|
|
662
|
+
"id": model_name,
|
|
663
|
+
"object": "model",
|
|
664
|
+
"created": int(time.time()),
|
|
665
|
+
"owned_by": provider_class.__name__
|
|
666
|
+
})
|
|
667
|
+
# Sort models alphabetically by the part after the first '/'
|
|
668
|
+
models = sorted(models, key=lambda m: m["id"].split("/", 1)[1].lower())
|
|
539
669
|
return {
|
|
540
670
|
"object": "list",
|
|
541
671
|
"data": models
|
|
@@ -571,10 +701,10 @@ class Api:
|
|
|
571
701
|
# Resolve provider and model
|
|
572
702
|
provider_class, model_name = resolve_provider_and_model(chat_request.model)
|
|
573
703
|
|
|
574
|
-
# Initialize provider with error handling
|
|
704
|
+
# Initialize provider with caching and error handling
|
|
575
705
|
try:
|
|
576
|
-
provider = provider_class
|
|
577
|
-
logger.debug(f"
|
|
706
|
+
provider = get_provider_instance(provider_class)
|
|
707
|
+
logger.debug(f"Using provider instance: {provider_class.__name__}")
|
|
578
708
|
except Exception as e:
|
|
579
709
|
logger.error(f"Failed to initialize provider {provider_class.__name__}: {e}")
|
|
580
710
|
raise APIError(
|
|
@@ -606,6 +736,92 @@ class Api:
|
|
|
606
736
|
"internal_error"
|
|
607
737
|
)
|
|
608
738
|
|
|
739
|
+
@self.app.post(
|
|
740
|
+
"/v1/images/generations",
|
|
741
|
+
response_model_exclude_none=True,
|
|
742
|
+
response_model_exclude_unset=True,
|
|
743
|
+
openapi_extra={
|
|
744
|
+
"requestBody": {
|
|
745
|
+
"content": {
|
|
746
|
+
"application/json": {
|
|
747
|
+
"schema": {
|
|
748
|
+
"$ref": "#/components/schemas/ImageGenerationRequest"
|
|
749
|
+
},
|
|
750
|
+
"example": ImageGenerationRequest.Config.schema_extra["example"]
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
} }
|
|
754
|
+
)
|
|
755
|
+
async def image_generations(
|
|
756
|
+
image_request: ImageGenerationRequest = Body(...)
|
|
757
|
+
):
|
|
758
|
+
"""Handle image generation requests (OpenAI-compatible)."""
|
|
759
|
+
request_id = f"imggen-{uuid.uuid4()}"
|
|
760
|
+
try:
|
|
761
|
+
logger.info(f"Processing image generation request {request_id} for model: {image_request.model}")
|
|
762
|
+
# Provider/model resolution using TTI providers
|
|
763
|
+
provider_class, model_name = resolve_tti_provider_and_model(image_request.model)
|
|
764
|
+
# Initialize provider with caching
|
|
765
|
+
try:
|
|
766
|
+
provider = get_tti_provider_instance(provider_class)
|
|
767
|
+
logger.debug(f"Using TTI provider instance: {provider_class.__name__}")
|
|
768
|
+
except Exception as e:
|
|
769
|
+
logger.error(f"Failed to initialize provider {provider_class.__name__}: {e}")
|
|
770
|
+
raise APIError(
|
|
771
|
+
f"Failed to initialize provider {provider_class.__name__}: {e}",
|
|
772
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
773
|
+
"provider_error"
|
|
774
|
+
)
|
|
775
|
+
# Prepare parameters for provider
|
|
776
|
+
params = {
|
|
777
|
+
"model": model_name,
|
|
778
|
+
"prompt": image_request.prompt,
|
|
779
|
+
"n": image_request.n,
|
|
780
|
+
"size": image_request.size,
|
|
781
|
+
"response_format": image_request.response_format,
|
|
782
|
+
"user": image_request.user,
|
|
783
|
+
"style": image_request.style,
|
|
784
|
+
"aspect_ratio": image_request.aspect_ratio,
|
|
785
|
+
"timeout": image_request.timeout,
|
|
786
|
+
"image_format": image_request.image_format,
|
|
787
|
+
"seed": image_request.seed,
|
|
788
|
+
}
|
|
789
|
+
# Remove None values
|
|
790
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
791
|
+
# Call provider
|
|
792
|
+
try:
|
|
793
|
+
result = provider.images.create(**params)
|
|
794
|
+
except Exception as e:
|
|
795
|
+
logger.error(f"Error in image generation for request {request_id}: {e}")
|
|
796
|
+
raise APIError(
|
|
797
|
+
f"Provider error: {str(e)}",
|
|
798
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
799
|
+
"provider_error"
|
|
800
|
+
)
|
|
801
|
+
# Standardize response
|
|
802
|
+
if hasattr(result, "model_dump"):
|
|
803
|
+
response_data = result.model_dump(exclude_none=True)
|
|
804
|
+
elif hasattr(result, "dict"):
|
|
805
|
+
response_data = result.dict(exclude_none=True)
|
|
806
|
+
elif isinstance(result, dict):
|
|
807
|
+
response_data = result
|
|
808
|
+
else:
|
|
809
|
+
raise APIError(
|
|
810
|
+
"Invalid response format from provider",
|
|
811
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
812
|
+
"provider_error"
|
|
813
|
+
)
|
|
814
|
+
return response_data
|
|
815
|
+
except APIError:
|
|
816
|
+
raise
|
|
817
|
+
except Exception as e:
|
|
818
|
+
logger.error(f"Unexpected error in image generation {request_id}: {e}")
|
|
819
|
+
raise APIError(
|
|
820
|
+
f"Internal server error: {str(e)}",
|
|
821
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
822
|
+
"internal_error"
|
|
823
|
+
)
|
|
824
|
+
|
|
609
825
|
|
|
610
826
|
def resolve_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
611
827
|
"""Resolve provider class and model name from model identifier."""
|
|
@@ -634,7 +850,16 @@ def resolve_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
|
634
850
|
|
|
635
851
|
# Validate model availability
|
|
636
852
|
if hasattr(provider_class, "AVAILABLE_MODELS") and model_name is not None:
|
|
637
|
-
available = getattr(provider_class, "AVAILABLE_MODELS",
|
|
853
|
+
available = getattr(provider_class, "AVAILABLE_MODELS", None)
|
|
854
|
+
# If it's a property, get from instance
|
|
855
|
+
if isinstance(available, property):
|
|
856
|
+
try:
|
|
857
|
+
available = getattr(provider_class(), "AVAILABLE_MODELS", [])
|
|
858
|
+
except Exception:
|
|
859
|
+
available = []
|
|
860
|
+
# If still not iterable, fallback to empty list
|
|
861
|
+
if not isinstance(available, (list, tuple, set)):
|
|
862
|
+
available = list(available) if hasattr(available, "__iter__") and not isinstance(available, str) else []
|
|
638
863
|
if available and model_name not in available:
|
|
639
864
|
raise APIError(
|
|
640
865
|
f"Model '{model_name}' not supported by provider '{provider_class.__name__}'. Available models: {available}",
|
|
@@ -645,6 +870,73 @@ def resolve_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
|
645
870
|
|
|
646
871
|
return provider_class, model_name
|
|
647
872
|
|
|
873
|
+
def resolve_tti_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
874
|
+
"""Resolve TTI provider class and model name from model identifier."""
|
|
875
|
+
provider_class = None
|
|
876
|
+
model_name = None
|
|
877
|
+
|
|
878
|
+
# Check for explicit provider/model syntax
|
|
879
|
+
if model_identifier in AppConfig.tti_provider_map and "/" in model_identifier:
|
|
880
|
+
provider_class = AppConfig.tti_provider_map[model_identifier]
|
|
881
|
+
_, model_name = model_identifier.split("/", 1)
|
|
882
|
+
elif "/" in model_identifier:
|
|
883
|
+
provider_name, model_name = model_identifier.split("/", 1)
|
|
884
|
+
provider_class = AppConfig.tti_provider_map.get(provider_name)
|
|
885
|
+
else:
|
|
886
|
+
provider_class = AppConfig.tti_provider_map.get(AppConfig.default_tti_provider)
|
|
887
|
+
model_name = model_identifier
|
|
888
|
+
|
|
889
|
+
if not provider_class:
|
|
890
|
+
available_providers = list(set(v.__name__ for v in AppConfig.tti_provider_map.values()))
|
|
891
|
+
raise APIError(
|
|
892
|
+
f"TTI Provider for model '{model_identifier}' not found. Available TTI providers: {available_providers}",
|
|
893
|
+
HTTP_404_NOT_FOUND,
|
|
894
|
+
"model_not_found",
|
|
895
|
+
param="model"
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
# Validate model availability
|
|
899
|
+
if hasattr(provider_class, "AVAILABLE_MODELS") and model_name is not None:
|
|
900
|
+
available = getattr(provider_class, "AVAILABLE_MODELS", None)
|
|
901
|
+
# If it's a property, get from instance
|
|
902
|
+
if isinstance(available, property):
|
|
903
|
+
try:
|
|
904
|
+
available = getattr(provider_class(), "AVAILABLE_MODELS", [])
|
|
905
|
+
except Exception:
|
|
906
|
+
available = []
|
|
907
|
+
# If still not iterable, fallback to empty list
|
|
908
|
+
if not isinstance(available, (list, tuple, set)):
|
|
909
|
+
available = list(available) if hasattr(available, "__iter__") and not isinstance(available, str) else []
|
|
910
|
+
if available and model_name not in available:
|
|
911
|
+
raise APIError(
|
|
912
|
+
f"Model '{model_name}' not supported by TTI provider '{provider_class.__name__}'. Available models: {available}",
|
|
913
|
+
HTTP_404_NOT_FOUND,
|
|
914
|
+
"model_not_found",
|
|
915
|
+
param="model"
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
return provider_class, model_name
|
|
919
|
+
|
|
920
|
+
|
|
921
|
+
def get_provider_instance(provider_class: Any):
|
|
922
|
+
"""Return a cached instance of the provider, creating it if necessary."""
|
|
923
|
+
key = provider_class.__name__
|
|
924
|
+
instance = provider_instances.get(key)
|
|
925
|
+
if instance is None:
|
|
926
|
+
instance = provider_class()
|
|
927
|
+
provider_instances[key] = instance
|
|
928
|
+
return instance
|
|
929
|
+
|
|
930
|
+
|
|
931
|
+
def get_tti_provider_instance(provider_class: Any):
|
|
932
|
+
"""Return a cached instance of the TTI provider, creating it if needed."""
|
|
933
|
+
key = provider_class.__name__
|
|
934
|
+
instance = tti_provider_instances.get(key)
|
|
935
|
+
if instance is None:
|
|
936
|
+
instance = provider_class()
|
|
937
|
+
tti_provider_instances[key] = instance
|
|
938
|
+
return instance
|
|
939
|
+
|
|
648
940
|
|
|
649
941
|
def process_messages(messages: List[Message]) -> List[Dict[str, Any]]:
|
|
650
942
|
"""Process and validate chat messages."""
|
|
@@ -722,34 +1014,69 @@ async def handle_streaming_response(provider: Any, params: Dict[str, Any], reque
|
|
|
722
1014
|
chunk_data = chunk
|
|
723
1015
|
else: # Fallback for unknown chunk types
|
|
724
1016
|
chunk_data = chunk
|
|
725
|
-
|
|
1017
|
+
|
|
1018
|
+
# Clean text content in the chunk to remove control characters
|
|
1019
|
+
if isinstance(chunk_data, dict) and 'choices' in chunk_data:
|
|
1020
|
+
for choice in chunk_data.get('choices', []):
|
|
1021
|
+
if isinstance(choice, dict):
|
|
1022
|
+
# Handle delta for streaming
|
|
1023
|
+
if 'delta' in choice and isinstance(choice['delta'], dict) and 'content' in choice['delta']:
|
|
1024
|
+
choice['delta']['content'] = clean_text(choice['delta']['content'])
|
|
1025
|
+
# Handle message for non-streaming
|
|
1026
|
+
elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
|
|
1027
|
+
choice['message']['content'] = clean_text(choice['message']['content'])
|
|
1028
|
+
|
|
1029
|
+
yield f"data: {json.dumps(chunk_data, ensure_ascii=False)}\n\n"
|
|
726
1030
|
except TypeError as te:
|
|
727
1031
|
logger.error(f"Error iterating over completion_stream: {te}")
|
|
728
1032
|
# Fall back to treating as non-generator response
|
|
729
1033
|
if hasattr(completion_stream, 'model_dump'):
|
|
730
|
-
|
|
1034
|
+
response_data = completion_stream.model_dump(exclude_none=True)
|
|
731
1035
|
elif hasattr(completion_stream, 'dict'):
|
|
732
|
-
|
|
1036
|
+
response_data = completion_stream.dict(exclude_none=True)
|
|
733
1037
|
else:
|
|
734
|
-
|
|
1038
|
+
response_data = completion_stream
|
|
1039
|
+
|
|
1040
|
+
# Clean text content in the response
|
|
1041
|
+
if isinstance(response_data, dict) and 'choices' in response_data:
|
|
1042
|
+
for choice in response_data.get('choices', []):
|
|
1043
|
+
if isinstance(choice, dict):
|
|
1044
|
+
if 'delta' in choice and isinstance(choice['delta'], dict) and 'content' in choice['delta']:
|
|
1045
|
+
choice['delta']['content'] = clean_text(choice['delta']['content'])
|
|
1046
|
+
elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
|
|
1047
|
+
choice['message']['content'] = clean_text(choice['message']['content'])
|
|
1048
|
+
|
|
1049
|
+
yield f"data: {json.dumps(response_data, ensure_ascii=False)}\n\n"
|
|
735
1050
|
else: # Non-generator response
|
|
736
1051
|
if hasattr(completion_stream, 'model_dump'):
|
|
737
|
-
|
|
1052
|
+
response_data = completion_stream.model_dump(exclude_none=True)
|
|
738
1053
|
elif hasattr(completion_stream, 'dict'):
|
|
739
|
-
|
|
1054
|
+
response_data = completion_stream.dict(exclude_none=True)
|
|
740
1055
|
else:
|
|
741
|
-
|
|
1056
|
+
response_data = completion_stream
|
|
1057
|
+
|
|
1058
|
+
# Clean text content in the response
|
|
1059
|
+
if isinstance(response_data, dict) and 'choices' in response_data:
|
|
1060
|
+
for choice in response_data.get('choices', []):
|
|
1061
|
+
if isinstance(choice, dict):
|
|
1062
|
+
if 'delta' in choice and isinstance(choice['delta'], dict) and 'content' in choice['delta']:
|
|
1063
|
+
choice['delta']['content'] = clean_text(choice['delta']['content'])
|
|
1064
|
+
elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
|
|
1065
|
+
choice['message']['content'] = clean_text(choice['message']['content'])
|
|
1066
|
+
|
|
1067
|
+
yield f"data: {json.dumps(response_data, ensure_ascii=False)}\n\n"
|
|
742
1068
|
|
|
743
1069
|
except Exception as e:
|
|
744
1070
|
logger.error(f"Error in streaming response for request {request_id}: {e}")
|
|
1071
|
+
error_message = clean_text(str(e))
|
|
745
1072
|
error_data = {
|
|
746
1073
|
"error": {
|
|
747
|
-
"message":
|
|
1074
|
+
"message": error_message,
|
|
748
1075
|
"type": "server_error",
|
|
749
1076
|
"code": "streaming_error"
|
|
750
1077
|
}
|
|
751
1078
|
}
|
|
752
|
-
yield f"data: {json.dumps(error_data)}\n\n"
|
|
1079
|
+
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
|
|
753
1080
|
finally:
|
|
754
1081
|
yield "data: [DONE]\n\n"
|
|
755
1082
|
return StreamingResponse(streaming(), media_type="text/event-stream")
|
|
@@ -789,6 +1116,13 @@ async def handle_non_streaming_response(provider: Any, params: Dict[str, Any],
|
|
|
789
1116
|
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
790
1117
|
"provider_error"
|
|
791
1118
|
)
|
|
1119
|
+
|
|
1120
|
+
# Clean text content in the response to remove control characters
|
|
1121
|
+
if isinstance(response_data, dict) and 'choices' in response_data:
|
|
1122
|
+
for choice in response_data.get('choices', []):
|
|
1123
|
+
if isinstance(choice, dict) and 'message' in choice:
|
|
1124
|
+
if isinstance(choice['message'], dict) and 'content' in choice['message']:
|
|
1125
|
+
choice['message']['content'] = clean_text(choice['message']['content'])
|
|
792
1126
|
|
|
793
1127
|
elapsed = time.time() - start_time
|
|
794
1128
|
logger.info(f"Completed non-streaming request {request_id} in {elapsed:.2f}s")
|
|
@@ -797,8 +1131,9 @@ async def handle_non_streaming_response(provider: Any, params: Dict[str, Any],
|
|
|
797
1131
|
|
|
798
1132
|
except Exception as e:
|
|
799
1133
|
logger.error(f"Error in non-streaming response for request {request_id}: {e}")
|
|
1134
|
+
error_message = clean_text(str(e))
|
|
800
1135
|
raise APIError(
|
|
801
|
-
f"Provider error: {
|
|
1136
|
+
f"Provider error: {error_message}",
|
|
802
1137
|
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
803
1138
|
"provider_error"
|
|
804
1139
|
)
|
|
@@ -864,6 +1199,8 @@ def run_api(
|
|
|
864
1199
|
if show_available_providers: # Initialize map if needed for display before app creation
|
|
865
1200
|
if not AppConfig.provider_map: # Avoid re-initializing if already done by app creation logic path
|
|
866
1201
|
initialize_provider_map()
|
|
1202
|
+
if not AppConfig.tti_provider_map:
|
|
1203
|
+
initialize_tti_provider_map() # Ensure TTI providers are initialized for display
|
|
867
1204
|
|
|
868
1205
|
print("\n=== Webscout OpenAI API Server ===")
|
|
869
1206
|
print(f"Server URL: http://{host if host != '0.0.0.0' else 'localhost'}:{port}")
|
|
@@ -895,6 +1232,19 @@ def run_api(
|
|
|
895
1232
|
else:
|
|
896
1233
|
print("\nNo specific models registered. Use provider names as models.")
|
|
897
1234
|
|
|
1235
|
+
tti_providers = list(set(v.__name__ for v in AppConfig.tti_provider_map.values()))
|
|
1236
|
+
print(f"\n--- Available TTI Providers ({len(tti_providers)}) ---")
|
|
1237
|
+
for i, provider_name in enumerate(sorted(tti_providers), 1):
|
|
1238
|
+
print(f"{i}. {provider_name}")
|
|
1239
|
+
|
|
1240
|
+
tti_models = sorted([model for model in AppConfig.tti_provider_map.keys() if model not in tti_providers])
|
|
1241
|
+
if tti_models:
|
|
1242
|
+
print(f"\n--- Available TTI Models ({len(tti_models)}) ---")
|
|
1243
|
+
for i, model_name in enumerate(tti_models, 1):
|
|
1244
|
+
print(f"{i}. {model_name} (via {AppConfig.tti_provider_map[model_name].__name__})")
|
|
1245
|
+
else:
|
|
1246
|
+
print("\nNo specific TTI models registered. Use TTI provider names as models.")
|
|
1247
|
+
|
|
898
1248
|
print("\nUse Ctrl+C to stop the server.")
|
|
899
1249
|
print("=" * 40 + "\n")
|
|
900
1250
|
|
|
@@ -967,3 +1317,4 @@ if __name__ == "__main__":
|
|
|
967
1317
|
base_url=args.base_url,
|
|
968
1318
|
debug=args.debug
|
|
969
1319
|
)
|
|
1320
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# # ProxyFox integration for OpenAI-compatible providers
|
|
2
|
+
# # This module provides a singleton proxy pool for all providers
|
|
3
|
+
|
|
4
|
+
# import proxyfox
|
|
5
|
+
|
|
6
|
+
# def get_auto_proxy(protocol='https', country=None, max_speed_ms=1000):
|
|
7
|
+
# """
|
|
8
|
+
# Returns a single proxy string (e.g. '11.22.33.44:8080') using proxyfox.
|
|
9
|
+
# You can specify protocol, country, and max_speed_ms for filtering.
|
|
10
|
+
# """
|
|
11
|
+
# kwargs = {'protocol': protocol, 'max_speed_ms': max_speed_ms}
|
|
12
|
+
# if country:
|
|
13
|
+
# kwargs['country'] = country
|
|
14
|
+
# return proxyfox.get_one(**kwargs)
|
|
15
|
+
|
|
16
|
+
# # Optionally: pool support for advanced usage
|
|
17
|
+
# _pool = None
|
|
18
|
+
|
|
19
|
+
# def get_proxy_pool(size=10, refresh_interval=300, protocol='https', max_speed_ms=1000):
|
|
20
|
+
# global _pool
|
|
21
|
+
# if _pool is None:
|
|
22
|
+
# _pool = proxyfox.create_pool(
|
|
23
|
+
# size=size,
|
|
24
|
+
# refresh_interval=refresh_interval,
|
|
25
|
+
# protocol=protocol,
|
|
26
|
+
# max_speed_ms=max_speed_ms
|
|
27
|
+
# )
|
|
28
|
+
# return _pool
|
|
29
|
+
|
|
30
|
+
# def get_pool_proxy():
|
|
31
|
+
# pool = get_proxy_pool()
|
|
32
|
+
# return pool.get()
|
|
33
|
+
|
|
34
|
+
# def get_all_pool_proxies():
|
|
35
|
+
# pool = get_proxy_pool()
|
|
36
|
+
# return pool.all()
|
|
37
|
+
|
|
38
|
+
# if __name__ == "__main__":
|
|
39
|
+
# print(get_auto_proxy())
|