webscout 8.3__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +4 -4
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/LambdaChat.py +7 -1
- webscout/Provider/OPENAI/BLACKBOXAI.py +1049 -1017
- webscout/Provider/OPENAI/Qwen3.py +303 -303
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/__init__.py +2 -1
- webscout/Provider/OPENAI/api.py +298 -13
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +89 -12
- webscout/Provider/OPENAI/chatgpt.py +15 -2
- webscout/Provider/OPENAI/chatgptclone.py +14 -3
- webscout/Provider/OPENAI/deepinfra.py +339 -328
- webscout/Provider/OPENAI/e2b.py +295 -73
- webscout/Provider/OPENAI/opkfc.py +18 -6
- webscout/Provider/OPENAI/scirachat.py +3 -2
- webscout/Provider/OPENAI/toolbaz.py +0 -1
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +367 -367
- webscout/Provider/OPENAI/yep.py +383 -383
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -0
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/METADATA +1 -1
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/RECORD +61 -51
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/WHEEL +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
webscout/Provider/OPENAI/api.py
CHANGED
|
@@ -9,7 +9,6 @@ authentication, and provider management.
|
|
|
9
9
|
from __future__ import annotations
|
|
10
10
|
|
|
11
11
|
import json
|
|
12
|
-
import logging
|
|
13
12
|
import os
|
|
14
13
|
import secrets
|
|
15
14
|
import sys
|
|
@@ -21,6 +20,7 @@ import codecs
|
|
|
21
20
|
from typing import List, Dict, Optional, Union, Any, Generator, Callable
|
|
22
21
|
import types
|
|
23
22
|
|
|
23
|
+
from webscout.Litlogger import Logger, LogLevel, LogFormat, ConsoleHandler
|
|
24
24
|
import uvicorn
|
|
25
25
|
from fastapi import FastAPI, Response, Request, Body
|
|
26
26
|
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -58,6 +58,9 @@ from webscout.Provider.OPENAI import *
|
|
|
58
58
|
from webscout.Provider.OPENAI.utils import (
|
|
59
59
|
ChatCompletion, Choice, ChatCompletionMessage, CompletionUsage
|
|
60
60
|
)
|
|
61
|
+
from webscout.Provider.TTI import *
|
|
62
|
+
from webscout.Provider.TTI.utils import ImageData, ImageResponse
|
|
63
|
+
from webscout.Provider.TTI.base import TTICompatibleProvider
|
|
61
64
|
|
|
62
65
|
|
|
63
66
|
# Configuration constants
|
|
@@ -65,15 +68,13 @@ DEFAULT_PORT = 8000
|
|
|
65
68
|
DEFAULT_HOST = "0.0.0.0"
|
|
66
69
|
API_VERSION = "v1"
|
|
67
70
|
|
|
68
|
-
# Setup
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
handlers=[
|
|
73
|
-
|
|
74
|
-
]
|
|
71
|
+
# Setup Litlogger
|
|
72
|
+
logger = Logger(
|
|
73
|
+
name="webscout.api",
|
|
74
|
+
level=LogLevel.INFO,
|
|
75
|
+
handlers=[ConsoleHandler(stream=sys.stdout)],
|
|
76
|
+
fmt=LogFormat.DEFAULT
|
|
75
77
|
)
|
|
76
|
-
logger = logging.getLogger("webscout.api")
|
|
77
78
|
|
|
78
79
|
|
|
79
80
|
class ServerConfig:
|
|
@@ -111,6 +112,10 @@ class ServerConfig:
|
|
|
111
112
|
# Global configuration instance
|
|
112
113
|
config = ServerConfig()
|
|
113
114
|
|
|
115
|
+
# Cache for provider instances to avoid reinitialization on every request
|
|
116
|
+
provider_instances: Dict[str, Any] = {}
|
|
117
|
+
tti_provider_instances: Dict[str, Any] = {}
|
|
118
|
+
|
|
114
119
|
|
|
115
120
|
# Define Pydantic models for multimodal content parts, aligning with OpenAI's API
|
|
116
121
|
class TextPart(BaseModel):
|
|
@@ -167,7 +172,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
167
172
|
extra = "ignore" # Ignore extra fields that aren't in the model
|
|
168
173
|
schema_extra = {
|
|
169
174
|
"example": {
|
|
170
|
-
"model": "
|
|
175
|
+
"model": "Cloudflare/@cf/meta/llama-4-scout-17b-16e-instruct",
|
|
171
176
|
"messages": [
|
|
172
177
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
173
178
|
{"role": "user", "content": "Hello, how are you?"}
|
|
@@ -178,6 +183,33 @@ class ChatCompletionRequest(BaseModel):
|
|
|
178
183
|
}
|
|
179
184
|
}
|
|
180
185
|
|
|
186
|
+
class ImageGenerationRequest(BaseModel):
|
|
187
|
+
"""Request model for OpenAI-compatible image generation endpoint."""
|
|
188
|
+
prompt: str = Field(..., description="A text description of the desired image(s). The maximum length is 1000 characters.")
|
|
189
|
+
model: str = Field(..., description="The model to use for image generation.")
|
|
190
|
+
n: Optional[int] = Field(1, description="The number of images to generate. Must be between 1 and 10.")
|
|
191
|
+
size: Optional[str] = Field("1024x1024", description="The size of the generated images. Must be one of: '256x256', '512x512', or '1024x1024'.")
|
|
192
|
+
response_format: Optional[Literal["url", "b64_json"]] = Field("url", description="The format in which the generated images are returned. Must be either 'url' or 'b64_json'.")
|
|
193
|
+
user: Optional[str] = Field(None, description="A unique identifier representing your end-user, which can help to monitor and detect abuse.")
|
|
194
|
+
style: Optional[str] = Field(None, description="Optional style for the image (provider/model-specific).")
|
|
195
|
+
aspect_ratio: Optional[str] = Field(None, description="Optional aspect ratio for the image (provider/model-specific).")
|
|
196
|
+
timeout: Optional[int] = Field(None, description="Optional timeout for the image generation request in seconds.")
|
|
197
|
+
image_format: Optional[str] = Field(None, description="Optional image format (e.g., 'png', 'jpeg').")
|
|
198
|
+
seed: Optional[int] = Field(None, description="Optional random seed for reproducibility.")
|
|
199
|
+
|
|
200
|
+
class Config:
|
|
201
|
+
extra = "ignore"
|
|
202
|
+
schema_extra = {
|
|
203
|
+
"example": {
|
|
204
|
+
"prompt": "A futuristic cityscape at sunset, digital art",
|
|
205
|
+
"model": "PollinationsAI/turbo",
|
|
206
|
+
"n": 1,
|
|
207
|
+
"size": "1024x1024",
|
|
208
|
+
"response_format": "url",
|
|
209
|
+
"user": "user-1234"
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
181
213
|
class ModelInfo(BaseModel):
|
|
182
214
|
"""Model information for the models endpoint."""
|
|
183
215
|
id: str
|
|
@@ -237,7 +269,9 @@ class AppConfig:
|
|
|
237
269
|
"""Legacy configuration class for backward compatibility."""
|
|
238
270
|
api_key: Optional[str] = None
|
|
239
271
|
provider_map = {}
|
|
272
|
+
tti_provider_map = {} # Add TTI provider map
|
|
240
273
|
default_provider = "ChatGPT"
|
|
274
|
+
default_tti_provider = "PollinationsAI" # Add default TTI provider
|
|
241
275
|
base_url: Optional[str] = None
|
|
242
276
|
|
|
243
277
|
@classmethod
|
|
@@ -289,6 +323,7 @@ def create_app():
|
|
|
289
323
|
api.register_validation_exception_handler()
|
|
290
324
|
api.register_routes()
|
|
291
325
|
initialize_provider_map()
|
|
326
|
+
initialize_tti_provider_map() # Initialize TTI providers
|
|
292
327
|
|
|
293
328
|
def custom_openapi():
|
|
294
329
|
if app.openapi_schema:
|
|
@@ -317,6 +352,7 @@ def create_app():
|
|
|
317
352
|
"ImagePart": ImagePart,
|
|
318
353
|
"Message": Message,
|
|
319
354
|
"ChatCompletionRequest": ChatCompletionRequest,
|
|
355
|
+
"ImageGenerationRequest": ImageGenerationRequest,
|
|
320
356
|
}
|
|
321
357
|
|
|
322
358
|
for name, model_cls in pydantic_models_to_register.items():
|
|
@@ -400,6 +436,63 @@ def initialize_provider_map() -> None:
|
|
|
400
436
|
logger.error(f"Failed to initialize provider map: {e}")
|
|
401
437
|
raise APIError(f"Provider initialization failed: {e}", HTTP_500_INTERNAL_SERVER_ERROR)
|
|
402
438
|
|
|
439
|
+
def initialize_tti_provider_map() -> None:
|
|
440
|
+
"""Initialize the TTI provider map by discovering available TTI providers."""
|
|
441
|
+
logger.info("Initializing TTI provider map...")
|
|
442
|
+
|
|
443
|
+
try:
|
|
444
|
+
import webscout.Provider.TTI as tti_module
|
|
445
|
+
|
|
446
|
+
provider_count = 0
|
|
447
|
+
model_count = 0
|
|
448
|
+
|
|
449
|
+
for name, obj in inspect.getmembers(tti_module):
|
|
450
|
+
if (
|
|
451
|
+
inspect.isclass(obj)
|
|
452
|
+
and issubclass(obj, TTICompatibleProvider)
|
|
453
|
+
and obj.__name__ != "TTICompatibleProvider"
|
|
454
|
+
and obj.__name__ != "BaseImages"
|
|
455
|
+
):
|
|
456
|
+
provider_name = obj.__name__
|
|
457
|
+
AppConfig.tti_provider_map[provider_name] = obj
|
|
458
|
+
provider_count += 1
|
|
459
|
+
|
|
460
|
+
# Register available models for this TTI provider
|
|
461
|
+
if hasattr(obj, "AVAILABLE_MODELS") and isinstance(
|
|
462
|
+
obj.AVAILABLE_MODELS, (list, tuple, set)
|
|
463
|
+
):
|
|
464
|
+
for model in obj.AVAILABLE_MODELS:
|
|
465
|
+
if model and isinstance(model, str):
|
|
466
|
+
model_key = f"{provider_name}/{model}"
|
|
467
|
+
AppConfig.tti_provider_map[model_key] = obj
|
|
468
|
+
model_count += 1
|
|
469
|
+
|
|
470
|
+
# Fallback to PollinationsAI if no TTI providers found
|
|
471
|
+
if not AppConfig.tti_provider_map:
|
|
472
|
+
logger.warning("No TTI providers found, using PollinationsAI fallback")
|
|
473
|
+
try:
|
|
474
|
+
from webscout.Provider.TTI.pollinations import PollinationsAI
|
|
475
|
+
fallback_models = ["flux", "turbo", "gptimage"]
|
|
476
|
+
|
|
477
|
+
AppConfig.tti_provider_map["PollinationsAI"] = PollinationsAI
|
|
478
|
+
|
|
479
|
+
for model in fallback_models:
|
|
480
|
+
model_key = f"PollinationsAI/{model}"
|
|
481
|
+
AppConfig.tti_provider_map[model_key] = PollinationsAI
|
|
482
|
+
|
|
483
|
+
AppConfig.default_tti_provider = "PollinationsAI"
|
|
484
|
+
provider_count = 1
|
|
485
|
+
model_count = len(fallback_models)
|
|
486
|
+
except ImportError as e:
|
|
487
|
+
logger.error(f"Failed to import PollinationsAI fallback: {e}")
|
|
488
|
+
raise APIError("No TTI providers available", HTTP_500_INTERNAL_SERVER_ERROR)
|
|
489
|
+
|
|
490
|
+
logger.info(f"Initialized {provider_count} TTI providers with {model_count} models")
|
|
491
|
+
|
|
492
|
+
except Exception as e:
|
|
493
|
+
logger.error(f"Failed to initialize TTI provider map: {e}")
|
|
494
|
+
raise APIError(f"TTI Provider initialization failed: {e}", HTTP_500_INTERNAL_SERVER_ERROR)
|
|
495
|
+
|
|
403
496
|
class Api:
|
|
404
497
|
def __init__(self, app: FastAPI) -> None:
|
|
405
498
|
self.app = app
|
|
@@ -550,6 +643,29 @@ class Api:
|
|
|
550
643
|
"created": int(time.time()),
|
|
551
644
|
"owned_by": provider_class.__name__
|
|
552
645
|
})
|
|
646
|
+
# Sort models alphabetically by the part after the first '/'
|
|
647
|
+
models = sorted(models, key=lambda m: m["id"].split("/", 1)[1].lower())
|
|
648
|
+
return {
|
|
649
|
+
"object": "list",
|
|
650
|
+
"data": models
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
@self.app.get("/v1/TTI/models", response_model=ModelListResponse)
|
|
654
|
+
async def list_tti_models():
|
|
655
|
+
models = []
|
|
656
|
+
for model_name, provider_class in AppConfig.tti_provider_map.items():
|
|
657
|
+
if "/" not in model_name:
|
|
658
|
+
continue # Skip provider names
|
|
659
|
+
if any(m["id"] == model_name for m in models):
|
|
660
|
+
continue
|
|
661
|
+
models.append({
|
|
662
|
+
"id": model_name,
|
|
663
|
+
"object": "model",
|
|
664
|
+
"created": int(time.time()),
|
|
665
|
+
"owned_by": provider_class.__name__
|
|
666
|
+
})
|
|
667
|
+
# Sort models alphabetically by the part after the first '/'
|
|
668
|
+
models = sorted(models, key=lambda m: m["id"].split("/", 1)[1].lower())
|
|
553
669
|
return {
|
|
554
670
|
"object": "list",
|
|
555
671
|
"data": models
|
|
@@ -585,10 +701,10 @@ class Api:
|
|
|
585
701
|
# Resolve provider and model
|
|
586
702
|
provider_class, model_name = resolve_provider_and_model(chat_request.model)
|
|
587
703
|
|
|
588
|
-
# Initialize provider with error handling
|
|
704
|
+
# Initialize provider with caching and error handling
|
|
589
705
|
try:
|
|
590
|
-
provider = provider_class
|
|
591
|
-
logger.debug(f"
|
|
706
|
+
provider = get_provider_instance(provider_class)
|
|
707
|
+
logger.debug(f"Using provider instance: {provider_class.__name__}")
|
|
592
708
|
except Exception as e:
|
|
593
709
|
logger.error(f"Failed to initialize provider {provider_class.__name__}: {e}")
|
|
594
710
|
raise APIError(
|
|
@@ -620,6 +736,92 @@ class Api:
|
|
|
620
736
|
"internal_error"
|
|
621
737
|
)
|
|
622
738
|
|
|
739
|
+
@self.app.post(
|
|
740
|
+
"/v1/images/generations",
|
|
741
|
+
response_model_exclude_none=True,
|
|
742
|
+
response_model_exclude_unset=True,
|
|
743
|
+
openapi_extra={
|
|
744
|
+
"requestBody": {
|
|
745
|
+
"content": {
|
|
746
|
+
"application/json": {
|
|
747
|
+
"schema": {
|
|
748
|
+
"$ref": "#/components/schemas/ImageGenerationRequest"
|
|
749
|
+
},
|
|
750
|
+
"example": ImageGenerationRequest.Config.schema_extra["example"]
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
} }
|
|
754
|
+
)
|
|
755
|
+
async def image_generations(
|
|
756
|
+
image_request: ImageGenerationRequest = Body(...)
|
|
757
|
+
):
|
|
758
|
+
"""Handle image generation requests (OpenAI-compatible)."""
|
|
759
|
+
request_id = f"imggen-{uuid.uuid4()}"
|
|
760
|
+
try:
|
|
761
|
+
logger.info(f"Processing image generation request {request_id} for model: {image_request.model}")
|
|
762
|
+
# Provider/model resolution using TTI providers
|
|
763
|
+
provider_class, model_name = resolve_tti_provider_and_model(image_request.model)
|
|
764
|
+
# Initialize provider with caching
|
|
765
|
+
try:
|
|
766
|
+
provider = get_tti_provider_instance(provider_class)
|
|
767
|
+
logger.debug(f"Using TTI provider instance: {provider_class.__name__}")
|
|
768
|
+
except Exception as e:
|
|
769
|
+
logger.error(f"Failed to initialize provider {provider_class.__name__}: {e}")
|
|
770
|
+
raise APIError(
|
|
771
|
+
f"Failed to initialize provider {provider_class.__name__}: {e}",
|
|
772
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
773
|
+
"provider_error"
|
|
774
|
+
)
|
|
775
|
+
# Prepare parameters for provider
|
|
776
|
+
params = {
|
|
777
|
+
"model": model_name,
|
|
778
|
+
"prompt": image_request.prompt,
|
|
779
|
+
"n": image_request.n,
|
|
780
|
+
"size": image_request.size,
|
|
781
|
+
"response_format": image_request.response_format,
|
|
782
|
+
"user": image_request.user,
|
|
783
|
+
"style": image_request.style,
|
|
784
|
+
"aspect_ratio": image_request.aspect_ratio,
|
|
785
|
+
"timeout": image_request.timeout,
|
|
786
|
+
"image_format": image_request.image_format,
|
|
787
|
+
"seed": image_request.seed,
|
|
788
|
+
}
|
|
789
|
+
# Remove None values
|
|
790
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
791
|
+
# Call provider
|
|
792
|
+
try:
|
|
793
|
+
result = provider.images.create(**params)
|
|
794
|
+
except Exception as e:
|
|
795
|
+
logger.error(f"Error in image generation for request {request_id}: {e}")
|
|
796
|
+
raise APIError(
|
|
797
|
+
f"Provider error: {str(e)}",
|
|
798
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
799
|
+
"provider_error"
|
|
800
|
+
)
|
|
801
|
+
# Standardize response
|
|
802
|
+
if hasattr(result, "model_dump"):
|
|
803
|
+
response_data = result.model_dump(exclude_none=True)
|
|
804
|
+
elif hasattr(result, "dict"):
|
|
805
|
+
response_data = result.dict(exclude_none=True)
|
|
806
|
+
elif isinstance(result, dict):
|
|
807
|
+
response_data = result
|
|
808
|
+
else:
|
|
809
|
+
raise APIError(
|
|
810
|
+
"Invalid response format from provider",
|
|
811
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
812
|
+
"provider_error"
|
|
813
|
+
)
|
|
814
|
+
return response_data
|
|
815
|
+
except APIError:
|
|
816
|
+
raise
|
|
817
|
+
except Exception as e:
|
|
818
|
+
logger.error(f"Unexpected error in image generation {request_id}: {e}")
|
|
819
|
+
raise APIError(
|
|
820
|
+
f"Internal server error: {str(e)}",
|
|
821
|
+
HTTP_500_INTERNAL_SERVER_ERROR,
|
|
822
|
+
"internal_error"
|
|
823
|
+
)
|
|
824
|
+
|
|
623
825
|
|
|
624
826
|
def resolve_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
625
827
|
"""Resolve provider class and model name from model identifier."""
|
|
@@ -668,6 +870,73 @@ def resolve_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
|
668
870
|
|
|
669
871
|
return provider_class, model_name
|
|
670
872
|
|
|
873
|
+
def resolve_tti_provider_and_model(model_identifier: str) -> tuple[Any, str]:
|
|
874
|
+
"""Resolve TTI provider class and model name from model identifier."""
|
|
875
|
+
provider_class = None
|
|
876
|
+
model_name = None
|
|
877
|
+
|
|
878
|
+
# Check for explicit provider/model syntax
|
|
879
|
+
if model_identifier in AppConfig.tti_provider_map and "/" in model_identifier:
|
|
880
|
+
provider_class = AppConfig.tti_provider_map[model_identifier]
|
|
881
|
+
_, model_name = model_identifier.split("/", 1)
|
|
882
|
+
elif "/" in model_identifier:
|
|
883
|
+
provider_name, model_name = model_identifier.split("/", 1)
|
|
884
|
+
provider_class = AppConfig.tti_provider_map.get(provider_name)
|
|
885
|
+
else:
|
|
886
|
+
provider_class = AppConfig.tti_provider_map.get(AppConfig.default_tti_provider)
|
|
887
|
+
model_name = model_identifier
|
|
888
|
+
|
|
889
|
+
if not provider_class:
|
|
890
|
+
available_providers = list(set(v.__name__ for v in AppConfig.tti_provider_map.values()))
|
|
891
|
+
raise APIError(
|
|
892
|
+
f"TTI Provider for model '{model_identifier}' not found. Available TTI providers: {available_providers}",
|
|
893
|
+
HTTP_404_NOT_FOUND,
|
|
894
|
+
"model_not_found",
|
|
895
|
+
param="model"
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
# Validate model availability
|
|
899
|
+
if hasattr(provider_class, "AVAILABLE_MODELS") and model_name is not None:
|
|
900
|
+
available = getattr(provider_class, "AVAILABLE_MODELS", None)
|
|
901
|
+
# If it's a property, get from instance
|
|
902
|
+
if isinstance(available, property):
|
|
903
|
+
try:
|
|
904
|
+
available = getattr(provider_class(), "AVAILABLE_MODELS", [])
|
|
905
|
+
except Exception:
|
|
906
|
+
available = []
|
|
907
|
+
# If still not iterable, fallback to empty list
|
|
908
|
+
if not isinstance(available, (list, tuple, set)):
|
|
909
|
+
available = list(available) if hasattr(available, "__iter__") and not isinstance(available, str) else []
|
|
910
|
+
if available and model_name not in available:
|
|
911
|
+
raise APIError(
|
|
912
|
+
f"Model '{model_name}' not supported by TTI provider '{provider_class.__name__}'. Available models: {available}",
|
|
913
|
+
HTTP_404_NOT_FOUND,
|
|
914
|
+
"model_not_found",
|
|
915
|
+
param="model"
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
return provider_class, model_name
|
|
919
|
+
|
|
920
|
+
|
|
921
|
+
def get_provider_instance(provider_class: Any):
|
|
922
|
+
"""Return a cached instance of the provider, creating it if necessary."""
|
|
923
|
+
key = provider_class.__name__
|
|
924
|
+
instance = provider_instances.get(key)
|
|
925
|
+
if instance is None:
|
|
926
|
+
instance = provider_class()
|
|
927
|
+
provider_instances[key] = instance
|
|
928
|
+
return instance
|
|
929
|
+
|
|
930
|
+
|
|
931
|
+
def get_tti_provider_instance(provider_class: Any):
|
|
932
|
+
"""Return a cached instance of the TTI provider, creating it if needed."""
|
|
933
|
+
key = provider_class.__name__
|
|
934
|
+
instance = tti_provider_instances.get(key)
|
|
935
|
+
if instance is None:
|
|
936
|
+
instance = provider_class()
|
|
937
|
+
tti_provider_instances[key] = instance
|
|
938
|
+
return instance
|
|
939
|
+
|
|
671
940
|
|
|
672
941
|
def process_messages(messages: List[Message]) -> List[Dict[str, Any]]:
|
|
673
942
|
"""Process and validate chat messages."""
|
|
@@ -930,6 +1199,8 @@ def run_api(
|
|
|
930
1199
|
if show_available_providers: # Initialize map if needed for display before app creation
|
|
931
1200
|
if not AppConfig.provider_map: # Avoid re-initializing if already done by app creation logic path
|
|
932
1201
|
initialize_provider_map()
|
|
1202
|
+
if not AppConfig.tti_provider_map:
|
|
1203
|
+
initialize_tti_provider_map() # Ensure TTI providers are initialized for display
|
|
933
1204
|
|
|
934
1205
|
print("\n=== Webscout OpenAI API Server ===")
|
|
935
1206
|
print(f"Server URL: http://{host if host != '0.0.0.0' else 'localhost'}:{port}")
|
|
@@ -961,6 +1232,19 @@ def run_api(
|
|
|
961
1232
|
else:
|
|
962
1233
|
print("\nNo specific models registered. Use provider names as models.")
|
|
963
1234
|
|
|
1235
|
+
tti_providers = list(set(v.__name__ for v in AppConfig.tti_provider_map.values()))
|
|
1236
|
+
print(f"\n--- Available TTI Providers ({len(tti_providers)}) ---")
|
|
1237
|
+
for i, provider_name in enumerate(sorted(tti_providers), 1):
|
|
1238
|
+
print(f"{i}. {provider_name}")
|
|
1239
|
+
|
|
1240
|
+
tti_models = sorted([model for model in AppConfig.tti_provider_map.keys() if model not in tti_providers])
|
|
1241
|
+
if tti_models:
|
|
1242
|
+
print(f"\n--- Available TTI Models ({len(tti_models)}) ---")
|
|
1243
|
+
for i, model_name in enumerate(tti_models, 1):
|
|
1244
|
+
print(f"{i}. {model_name} (via {AppConfig.tti_provider_map[model_name].__name__})")
|
|
1245
|
+
else:
|
|
1246
|
+
print("\nNo specific TTI models registered. Use TTI provider names as models.")
|
|
1247
|
+
|
|
964
1248
|
print("\nUse Ctrl+C to stop the server.")
|
|
965
1249
|
print("=" * 40 + "\n")
|
|
966
1250
|
|
|
@@ -1033,3 +1317,4 @@ if __name__ == "__main__":
|
|
|
1033
1317
|
base_url=args.base_url,
|
|
1034
1318
|
debug=args.debug
|
|
1035
1319
|
)
|
|
1320
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# # ProxyFox integration for OpenAI-compatible providers
|
|
2
|
+
# # This module provides a singleton proxy pool for all providers
|
|
3
|
+
|
|
4
|
+
# import proxyfox
|
|
5
|
+
|
|
6
|
+
# def get_auto_proxy(protocol='https', country=None, max_speed_ms=1000):
|
|
7
|
+
# """
|
|
8
|
+
# Returns a single proxy string (e.g. '11.22.33.44:8080') using proxyfox.
|
|
9
|
+
# You can specify protocol, country, and max_speed_ms for filtering.
|
|
10
|
+
# """
|
|
11
|
+
# kwargs = {'protocol': protocol, 'max_speed_ms': max_speed_ms}
|
|
12
|
+
# if country:
|
|
13
|
+
# kwargs['country'] = country
|
|
14
|
+
# return proxyfox.get_one(**kwargs)
|
|
15
|
+
|
|
16
|
+
# # Optionally: pool support for advanced usage
|
|
17
|
+
# _pool = None
|
|
18
|
+
|
|
19
|
+
# def get_proxy_pool(size=10, refresh_interval=300, protocol='https', max_speed_ms=1000):
|
|
20
|
+
# global _pool
|
|
21
|
+
# if _pool is None:
|
|
22
|
+
# _pool = proxyfox.create_pool(
|
|
23
|
+
# size=size,
|
|
24
|
+
# refresh_interval=refresh_interval,
|
|
25
|
+
# protocol=protocol,
|
|
26
|
+
# max_speed_ms=max_speed_ms
|
|
27
|
+
# )
|
|
28
|
+
# return _pool
|
|
29
|
+
|
|
30
|
+
# def get_pool_proxy():
|
|
31
|
+
# pool = get_proxy_pool()
|
|
32
|
+
# return pool.get()
|
|
33
|
+
|
|
34
|
+
# def get_all_pool_proxies():
|
|
35
|
+
# pool = get_proxy_pool()
|
|
36
|
+
# return pool.all()
|
|
37
|
+
|
|
38
|
+
# if __name__ == "__main__":
|
|
39
|
+
# print(get_auto_proxy())
|
webscout/Provider/OPENAI/base.py
CHANGED
|
@@ -3,12 +3,11 @@ from typing import List, Dict, Optional, Union, Generator, Any, TypedDict, Calla
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
from dataclasses import dataclass
|
|
6
|
-
|
|
7
6
|
logger = logging.getLogger(__name__)
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
# Import the utils for response structures
|
|
11
|
-
from webscout.Provider.OPENAI.utils import ChatCompletion, ChatCompletionChunk
|
|
10
|
+
from webscout.Provider.OPENAI.utils import ChatCompletion, ChatCompletionChunk
|
|
12
11
|
|
|
13
12
|
# Define tool-related structures
|
|
14
13
|
class ToolDefinition(TypedDict):
|
|
@@ -174,10 +173,93 @@ class BaseChat(ABC):
|
|
|
174
173
|
completions: BaseCompletions
|
|
175
174
|
|
|
176
175
|
|
|
176
|
+
# class ProxyAutoMeta(ABCMeta):
|
|
177
|
+
# """
|
|
178
|
+
# Metaclass to ensure all OpenAICompatibleProvider subclasses automatically get proxy support.
|
|
179
|
+
# This will inject proxies into any requests.Session, httpx.Client, or curl_cffi session attributes found on the instance.
|
|
180
|
+
|
|
181
|
+
# To disable automatic proxy injection, set disable_auto_proxy=True in the constructor or
|
|
182
|
+
# set the class attribute DISABLE_AUTO_PROXY = True.
|
|
183
|
+
# """
|
|
184
|
+
# def __call__(cls, *args, **kwargs):
|
|
185
|
+
# instance = super().__call__(*args, **kwargs)
|
|
186
|
+
|
|
187
|
+
# # Check if auto proxy is disabled
|
|
188
|
+
# disable_auto_proxy = kwargs.get('disable_auto_proxy', False) or getattr(cls, 'DISABLE_AUTO_PROXY', False)
|
|
189
|
+
|
|
190
|
+
# proxies = getattr(instance, 'proxies', None) or kwargs.get('proxies', None)
|
|
191
|
+
# if proxies is None and not disable_auto_proxy:
|
|
192
|
+
# try:
|
|
193
|
+
# proxies = {"http": get_auto_proxy(), "https": get_auto_proxy()}
|
|
194
|
+
# except Exception as e:
|
|
195
|
+
# logger.warning(f"Failed to get auto proxy, disabling proxy support: {e}")
|
|
196
|
+
# proxies = {}
|
|
197
|
+
# elif proxies is None:
|
|
198
|
+
# proxies = {}
|
|
199
|
+
# instance.proxies = proxies
|
|
200
|
+
# # Patch sessions if we have valid proxies
|
|
201
|
+
# if proxies:
|
|
202
|
+
# for attr in dir(instance):
|
|
203
|
+
# obj = getattr(instance, attr)
|
|
204
|
+
# if isinstance(obj, requests.Session):
|
|
205
|
+
# obj.proxies.update(proxies)
|
|
206
|
+
# if httpx and isinstance(obj, httpx.Client):
|
|
207
|
+
# try:
|
|
208
|
+
# obj._proxies = proxies
|
|
209
|
+
# except Exception:
|
|
210
|
+
# pass
|
|
211
|
+
# # Patch curl_cffi sessions if present
|
|
212
|
+
# if CurlSession and isinstance(obj, CurlSession):
|
|
213
|
+
# try:
|
|
214
|
+
# obj.proxies.update(proxies)
|
|
215
|
+
# except Exception:
|
|
216
|
+
# pass
|
|
217
|
+
# if CurlAsyncSession and isinstance(obj, CurlAsyncSession):
|
|
218
|
+
# try:
|
|
219
|
+
# obj.proxies.update(proxies)
|
|
220
|
+
# except Exception:
|
|
221
|
+
# pass
|
|
222
|
+
# # Provide helpers for proxied sessions
|
|
223
|
+
# def get_proxied_session():
|
|
224
|
+
# s = requests.Session()
|
|
225
|
+
# s.proxies.update(proxies)
|
|
226
|
+
# return s
|
|
227
|
+
# instance.get_proxied_session = get_proxied_session
|
|
228
|
+
|
|
229
|
+
# def get_proxied_curl_session(impersonate="chrome120", **kwargs):
|
|
230
|
+
# """Get a curl_cffi Session with proxies configured"""
|
|
231
|
+
# if CurlSession:
|
|
232
|
+
# return CurlSession(proxies=proxies, impersonate=impersonate, **kwargs)
|
|
233
|
+
# else:
|
|
234
|
+
# raise ImportError("curl_cffi is not installed")
|
|
235
|
+
# instance.get_proxied_curl_session = get_proxied_curl_session
|
|
236
|
+
|
|
237
|
+
# def get_proxied_curl_async_session(impersonate="chrome120", **kwargs):
|
|
238
|
+
# """Get a curl_cffi AsyncSession with proxies configured"""
|
|
239
|
+
# if CurlAsyncSession:
|
|
240
|
+
# return CurlAsyncSession(proxies=proxies, impersonate=impersonate, **kwargs)
|
|
241
|
+
# else:
|
|
242
|
+
# raise ImportError("curl_cffi is not installed")
|
|
243
|
+
# instance.get_proxied_curl_async_session = get_proxied_curl_async_session
|
|
244
|
+
|
|
245
|
+
# return instance
|
|
246
|
+
# class OPENAICompatibleMeta(ABC, metaclass=ProxyAutoMeta):
|
|
177
247
|
class OpenAICompatibleProvider(ABC):
|
|
178
248
|
"""
|
|
179
249
|
Abstract Base Class for providers mimicking the OpenAI Python client structure.
|
|
180
250
|
Requires a nested 'chat.completions' structure with tool support.
|
|
251
|
+
All subclasses automatically get proxy support via ProxyAutoMeta.
|
|
252
|
+
|
|
253
|
+
# Available proxy helpers:
|
|
254
|
+
# - self.get_proxied_session() - returns a requests.Session with proxies
|
|
255
|
+
# - self.get_proxied_curl_session() - returns a curl_cffi.Session with proxies
|
|
256
|
+
# - self.get_proxied_curl_async_session() - returns a curl_cffi.AsyncSession with proxies
|
|
257
|
+
|
|
258
|
+
# Proxy support is automatically injected into:
|
|
259
|
+
# - requests.Session objects
|
|
260
|
+
# - httpx.Client objects
|
|
261
|
+
# - curl_cffi.requests.Session objects
|
|
262
|
+
# - curl_cffi.requests.AsyncSession objects
|
|
181
263
|
"""
|
|
182
264
|
chat: BaseChat
|
|
183
265
|
available_tools: Dict[str, Tool] = {} # Dictionary of available tools
|
|
@@ -185,19 +267,14 @@ class OpenAICompatibleProvider(ABC):
|
|
|
185
267
|
supports_tool_choice: bool = False # Whether the provider supports tool_choice
|
|
186
268
|
|
|
187
269
|
@abstractmethod
|
|
188
|
-
def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, **kwargs: Any):
|
|
189
|
-
"""
|
|
190
|
-
Initialize the provider, potentially with an API key and tools.
|
|
191
|
-
|
|
192
|
-
Args:
|
|
193
|
-
api_key: Optional API key for the provider
|
|
194
|
-
tools: Optional list of tools to make available to the provider
|
|
195
|
-
**kwargs: Additional provider-specific parameters
|
|
196
|
-
"""
|
|
270
|
+
def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, **kwargs: Any):
|
|
197
271
|
self.available_tools = {}
|
|
198
272
|
if tools:
|
|
199
273
|
self.register_tools(tools)
|
|
200
|
-
|
|
274
|
+
# self.proxies is set by ProxyAutoMeta
|
|
275
|
+
# Subclasses should use self.proxies for all network requests
|
|
276
|
+
# Optionally, use self.get_proxied_session() for a requests.Session with proxies
|
|
277
|
+
# raise NotImplementedError # <-- Commented out for metaclass test
|
|
201
278
|
|
|
202
279
|
@property
|
|
203
280
|
@abstractmethod
|
|
@@ -37,9 +37,21 @@ class ChatGPTReversed:
|
|
|
37
37
|
csrf_token = None
|
|
38
38
|
initialized = False
|
|
39
39
|
|
|
40
|
+
_instance = None
|
|
41
|
+
|
|
42
|
+
def __new__(cls, model="auto"):
|
|
43
|
+
if cls._instance is None:
|
|
44
|
+
cls._instance = super(ChatGPTReversed, cls).__new__(cls)
|
|
45
|
+
cls._instance.initialized = False
|
|
46
|
+
return cls._instance
|
|
47
|
+
|
|
40
48
|
def __init__(self, model="auto"):
|
|
41
|
-
if
|
|
42
|
-
|
|
49
|
+
if self.initialized:
|
|
50
|
+
# Already initialized, just update model if needed
|
|
51
|
+
if model not in self.AVAILABLE_MODELS:
|
|
52
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
53
|
+
self.model = model
|
|
54
|
+
return
|
|
43
55
|
|
|
44
56
|
if model not in self.AVAILABLE_MODELS:
|
|
45
57
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
@@ -573,3 +585,4 @@ if __name__ == "__main__":
|
|
|
573
585
|
messages=[{"role": "user", "content": "How manr r in strawberry"}]
|
|
574
586
|
)
|
|
575
587
|
print(response.choices[0].message.content)
|
|
588
|
+
print()
|
|
@@ -7,8 +7,8 @@ import re
|
|
|
7
7
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
8
8
|
|
|
9
9
|
# Import base classes and utility structures
|
|
10
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
11
|
-
from .utils import (
|
|
10
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
11
|
+
from webscout.Provider.OPENAI.utils import (
|
|
12
12
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
13
13
|
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
14
14
|
)
|
|
@@ -510,4 +510,15 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
510
510
|
class _ModelList:
|
|
511
511
|
def list(inner_self):
|
|
512
512
|
return type(self).AVAILABLE_MODELS
|
|
513
|
-
return _ModelList()
|
|
513
|
+
return _ModelList()
|
|
514
|
+
if __name__ == "__main__":
|
|
515
|
+
# Example usage
|
|
516
|
+
client = ChatGPTClone()
|
|
517
|
+
response = client.chat.completions.create(
|
|
518
|
+
model="gpt-4",
|
|
519
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
520
|
+
)
|
|
521
|
+
print(response.choices[0].message.content)
|
|
522
|
+
print()
|
|
523
|
+
print("Proxies on instance:", client.proxies)
|
|
524
|
+
print("Proxies on session:", client.session.proxies)
|