isa-model 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/config/__init__.py +9 -0
- isa_model/config/config_manager.py +213 -0
- isa_model/core/model_manager.py +5 -0
- isa_model/core/model_registry.py +39 -6
- isa_model/core/storage/supabase_storage.py +344 -0
- isa_model/core/vision_models_init.py +116 -0
- isa_model/deployment/cloud/__init__.py +9 -0
- isa_model/deployment/cloud/modal/__init__.py +10 -0
- isa_model/deployment/cloud/modal/isa_vision_doc_service.py +612 -0
- isa_model/deployment/cloud/modal/isa_vision_ui_service.py +305 -0
- isa_model/inference/ai_factory.py +238 -14
- isa_model/inference/providers/modal_provider.py +109 -0
- isa_model/inference/providers/yyds_provider.py +108 -0
- isa_model/inference/services/__init__.py +2 -1
- isa_model/inference/services/base_service.py +0 -38
- isa_model/inference/services/llm/base_llm_service.py +32 -0
- isa_model/inference/services/llm/llm_adapter.py +73 -3
- isa_model/inference/services/llm/ollama_llm_service.py +104 -3
- isa_model/inference/services/llm/openai_llm_service.py +67 -15
- isa_model/inference/services/llm/yyds_llm_service.py +254 -0
- isa_model/inference/services/stacked/__init__.py +26 -0
- isa_model/inference/services/stacked/base_stacked_service.py +269 -0
- isa_model/inference/services/stacked/config.py +426 -0
- isa_model/inference/services/stacked/doc_analysis_service.py +640 -0
- isa_model/inference/services/stacked/flux_professional_service.py +579 -0
- isa_model/inference/services/stacked/ui_analysis_service.py +1319 -0
- isa_model/inference/services/vision/base_image_gen_service.py +0 -34
- isa_model/inference/services/vision/base_vision_service.py +46 -2
- isa_model/inference/services/vision/isA_vision_service.py +402 -0
- isa_model/inference/services/vision/openai_vision_service.py +151 -9
- isa_model/inference/services/vision/replicate_image_gen_service.py +166 -38
- isa_model/inference/services/vision/replicate_vision_service.py +693 -0
- isa_model/serving/__init__.py +19 -0
- isa_model/serving/api/__init__.py +10 -0
- isa_model/serving/api/fastapi_server.py +84 -0
- isa_model/serving/api/middleware/__init__.py +9 -0
- isa_model/serving/api/middleware/request_logger.py +88 -0
- isa_model/serving/api/routes/__init__.py +5 -0
- isa_model/serving/api/routes/health.py +82 -0
- isa_model/serving/api/routes/llm.py +19 -0
- isa_model/serving/api/routes/ui_analysis.py +223 -0
- isa_model/serving/api/routes/vision.py +19 -0
- isa_model/serving/api/schemas/__init__.py +17 -0
- isa_model/serving/api/schemas/common.py +33 -0
- isa_model/serving/api/schemas/ui_analysis.py +78 -0
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/METADATA +1 -1
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/RECORD +49 -17
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/WHEEL +0 -0
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,19 @@
|
|
1
|
+
"""
|
2
|
+
ISA Model Serving Module
|
3
|
+
|
4
|
+
Core module for model inference services, including:
|
5
|
+
- API service framework
|
6
|
+
- Model worker processes
|
7
|
+
- Caching layer
|
8
|
+
- Performance optimization
|
9
|
+
|
10
|
+
Difference from inference module:
|
11
|
+
- inference: Client-side inference, calling third-party APIs
|
12
|
+
- serving: Self-hosted model services, providing API services
|
13
|
+
"""
|
14
|
+
|
15
|
+
__version__ = "0.1.0"
|
16
|
+
|
17
|
+
from .api.fastapi_server import create_app
|
18
|
+
|
19
|
+
__all__ = ["create_app"]
|
@@ -0,0 +1,84 @@
|
|
1
|
+
"""
|
2
|
+
FastAPI Server for ISA Model Serving
|
3
|
+
|
4
|
+
Main FastAPI application that serves model inference endpoints
|
5
|
+
"""
|
6
|
+
|
7
|
+
from fastapi import FastAPI, Request
|
8
|
+
from fastapi.middleware.cors import CORSMiddleware
|
9
|
+
from fastapi.responses import JSONResponse
|
10
|
+
import time
|
11
|
+
import logging
|
12
|
+
from typing import Dict, Any
|
13
|
+
|
14
|
+
from .routes import ui_analysis, vision, llm, health
|
15
|
+
from .middleware.request_logger import RequestLoggerMiddleware
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
def create_app(config: Dict[str, Any] = None) -> FastAPI:
|
20
|
+
"""
|
21
|
+
Create and configure FastAPI application
|
22
|
+
|
23
|
+
Args:
|
24
|
+
config: Optional configuration dictionary
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
Configured FastAPI application
|
28
|
+
"""
|
29
|
+
app = FastAPI(
|
30
|
+
title="ISA Model Serving API",
|
31
|
+
description="High-performance model inference API",
|
32
|
+
version="1.0.0",
|
33
|
+
docs_url="/docs",
|
34
|
+
redoc_url="/redoc"
|
35
|
+
)
|
36
|
+
|
37
|
+
# Configure CORS
|
38
|
+
app.add_middleware(
|
39
|
+
CORSMiddleware,
|
40
|
+
allow_origins=["*"], # Configure appropriately for production
|
41
|
+
allow_credentials=True,
|
42
|
+
allow_methods=["*"],
|
43
|
+
allow_headers=["*"],
|
44
|
+
)
|
45
|
+
|
46
|
+
# Add custom middleware
|
47
|
+
app.add_middleware(RequestLoggerMiddleware)
|
48
|
+
|
49
|
+
# Exception handlers
|
50
|
+
@app.exception_handler(Exception)
|
51
|
+
async def global_exception_handler(request: Request, exc: Exception):
|
52
|
+
logger.error(f"Global exception: {exc}", exc_info=True)
|
53
|
+
return JSONResponse(
|
54
|
+
status_code=500,
|
55
|
+
content={
|
56
|
+
"error": "Internal server error",
|
57
|
+
"detail": str(exc) if config and config.get("debug") else "An error occurred"
|
58
|
+
}
|
59
|
+
)
|
60
|
+
|
61
|
+
# Include routers
|
62
|
+
app.include_router(health.router, prefix="/health", tags=["health"])
|
63
|
+
app.include_router(ui_analysis.router, prefix="/ui-analysis", tags=["ui-analysis"])
|
64
|
+
app.include_router(vision.router, prefix="/vision", tags=["vision"])
|
65
|
+
app.include_router(llm.router, prefix="/llm", tags=["llm"])
|
66
|
+
|
67
|
+
# Root endpoint
|
68
|
+
@app.get("/")
|
69
|
+
async def root():
|
70
|
+
return {
|
71
|
+
"service": "isa-model-serving",
|
72
|
+
"version": "1.0.0",
|
73
|
+
"status": "running",
|
74
|
+
"timestamp": time.time()
|
75
|
+
}
|
76
|
+
|
77
|
+
return app
|
78
|
+
|
79
|
+
# Create default app instance
|
80
|
+
app = create_app()
|
81
|
+
|
82
|
+
if __name__ == "__main__":
|
83
|
+
import uvicorn
|
84
|
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
@@ -0,0 +1,88 @@
|
|
1
|
+
"""
|
2
|
+
Request Logger Middleware
|
3
|
+
|
4
|
+
Logs all incoming requests and responses for monitoring
|
5
|
+
"""
|
6
|
+
|
7
|
+
from fastapi import Request, Response
|
8
|
+
from starlette.middleware.base import BaseHTTPMiddleware
|
9
|
+
import time
|
10
|
+
import logging
|
11
|
+
import json
|
12
|
+
from typing import Callable
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
class RequestLoggerMiddleware(BaseHTTPMiddleware):
|
17
|
+
"""
|
18
|
+
Middleware to log HTTP requests and responses
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(self, app, log_body: bool = False):
|
22
|
+
super().__init__(app)
|
23
|
+
self.log_body = log_body
|
24
|
+
|
25
|
+
async def dispatch(self, request: Request, call_next: Callable) -> Response:
|
26
|
+
"""
|
27
|
+
Process request and log details
|
28
|
+
"""
|
29
|
+
start_time = time.time()
|
30
|
+
|
31
|
+
# Log request
|
32
|
+
request_info = {
|
33
|
+
"method": request.method,
|
34
|
+
"url": str(request.url),
|
35
|
+
"headers": dict(request.headers),
|
36
|
+
"client": request.client.host if request.client else None,
|
37
|
+
"timestamp": start_time
|
38
|
+
}
|
39
|
+
|
40
|
+
# Optionally log request body (be careful with large images)
|
41
|
+
if self.log_body and request.method in ["POST", "PUT", "PATCH"]:
|
42
|
+
try:
|
43
|
+
body = await request.body()
|
44
|
+
if len(body) < 1024: # Only log small bodies
|
45
|
+
request_info["body_size"] = len(body)
|
46
|
+
else:
|
47
|
+
request_info["body_size"] = len(body)
|
48
|
+
request_info["body_preview"] = "Large body truncated"
|
49
|
+
except Exception as e:
|
50
|
+
request_info["body_error"] = str(e)
|
51
|
+
|
52
|
+
logger.info(f"Request: {json.dumps(request_info, default=str)}")
|
53
|
+
|
54
|
+
# Process request
|
55
|
+
try:
|
56
|
+
response = await call_next(request)
|
57
|
+
|
58
|
+
# Calculate processing time
|
59
|
+
process_time = time.time() - start_time
|
60
|
+
|
61
|
+
# Log response
|
62
|
+
response_info = {
|
63
|
+
"status_code": response.status_code,
|
64
|
+
"processing_time": process_time,
|
65
|
+
"url": str(request.url),
|
66
|
+
"method": request.method
|
67
|
+
}
|
68
|
+
|
69
|
+
# Add processing time header
|
70
|
+
response.headers["X-Process-Time"] = str(process_time)
|
71
|
+
|
72
|
+
if response.status_code >= 400:
|
73
|
+
logger.warning(f"Response: {json.dumps(response_info, default=str)}")
|
74
|
+
else:
|
75
|
+
logger.info(f"Response: {json.dumps(response_info, default=str)}")
|
76
|
+
|
77
|
+
return response
|
78
|
+
|
79
|
+
except Exception as e:
|
80
|
+
process_time = time.time() - start_time
|
81
|
+
error_info = {
|
82
|
+
"error": str(e),
|
83
|
+
"processing_time": process_time,
|
84
|
+
"url": str(request.url),
|
85
|
+
"method": request.method
|
86
|
+
}
|
87
|
+
logger.error(f"Request error: {json.dumps(error_info, default=str)}")
|
88
|
+
raise
|
@@ -0,0 +1,82 @@
|
|
1
|
+
"""
|
2
|
+
Health Check Routes
|
3
|
+
|
4
|
+
System health and status endpoints
|
5
|
+
"""
|
6
|
+
|
7
|
+
from fastapi import APIRouter, HTTPException
|
8
|
+
from pydantic import BaseModel
|
9
|
+
import time
|
10
|
+
import psutil
|
11
|
+
import torch
|
12
|
+
from typing import Dict, Any
|
13
|
+
|
14
|
+
router = APIRouter()
|
15
|
+
|
16
|
+
class HealthResponse(BaseModel):
|
17
|
+
status: str
|
18
|
+
timestamp: float
|
19
|
+
version: str
|
20
|
+
uptime: float
|
21
|
+
system: Dict[str, Any]
|
22
|
+
|
23
|
+
@router.get("/", response_model=HealthResponse)
|
24
|
+
async def health_check():
|
25
|
+
"""
|
26
|
+
Basic health check endpoint
|
27
|
+
"""
|
28
|
+
return HealthResponse(
|
29
|
+
status="healthy",
|
30
|
+
timestamp=time.time(),
|
31
|
+
version="1.0.0",
|
32
|
+
uptime=time.time(), # Simplified uptime
|
33
|
+
system={
|
34
|
+
"cpu_percent": psutil.cpu_percent(),
|
35
|
+
"memory_percent": psutil.virtual_memory().percent,
|
36
|
+
"gpu_available": torch.cuda.is_available(),
|
37
|
+
"gpu_count": torch.cuda.device_count() if torch.cuda.is_available() else 0
|
38
|
+
}
|
39
|
+
)
|
40
|
+
|
41
|
+
@router.get("/detailed")
|
42
|
+
async def detailed_health():
|
43
|
+
"""
|
44
|
+
Detailed health check with system information
|
45
|
+
"""
|
46
|
+
gpu_info = []
|
47
|
+
if torch.cuda.is_available():
|
48
|
+
for i in range(torch.cuda.device_count()):
|
49
|
+
gpu_info.append({
|
50
|
+
"device": i,
|
51
|
+
"name": torch.cuda.get_device_name(i),
|
52
|
+
"memory_allocated": torch.cuda.memory_allocated(i),
|
53
|
+
"memory_cached": torch.cuda.memory_reserved(i)
|
54
|
+
})
|
55
|
+
|
56
|
+
return {
|
57
|
+
"status": "healthy",
|
58
|
+
"timestamp": time.time(),
|
59
|
+
"system": {
|
60
|
+
"cpu": {
|
61
|
+
"percent": psutil.cpu_percent(),
|
62
|
+
"count": psutil.cpu_count()
|
63
|
+
},
|
64
|
+
"memory": {
|
65
|
+
"percent": psutil.virtual_memory().percent,
|
66
|
+
"available": psutil.virtual_memory().available,
|
67
|
+
"total": psutil.virtual_memory().total
|
68
|
+
},
|
69
|
+
"gpu": {
|
70
|
+
"available": torch.cuda.is_available(),
|
71
|
+
"devices": gpu_info
|
72
|
+
}
|
73
|
+
}
|
74
|
+
}
|
75
|
+
|
76
|
+
@router.get("/ready")
|
77
|
+
async def readiness_probe():
|
78
|
+
"""
|
79
|
+
Kubernetes readiness probe endpoint
|
80
|
+
"""
|
81
|
+
# Add model loading checks here
|
82
|
+
return {"status": "ready", "timestamp": time.time()}
|
@@ -0,0 +1,19 @@
|
|
1
|
+
"""
|
2
|
+
LLM API Routes
|
3
|
+
|
4
|
+
Endpoints for language model tasks (placeholder)
|
5
|
+
"""
|
6
|
+
|
7
|
+
from fastapi import APIRouter, HTTPException
|
8
|
+
from pydantic import BaseModel
|
9
|
+
|
10
|
+
router = APIRouter()
|
11
|
+
|
12
|
+
@router.get("/")
|
13
|
+
async def llm_info():
|
14
|
+
"""LLM service information"""
|
15
|
+
return {
|
16
|
+
"service": "llm",
|
17
|
+
"status": "placeholder",
|
18
|
+
"description": "Language model processing endpoints"
|
19
|
+
}
|
@@ -0,0 +1,223 @@
|
|
1
|
+
"""
|
2
|
+
UI Analysis API Routes
|
3
|
+
|
4
|
+
Endpoints for UI element detection and analysis
|
5
|
+
"""
|
6
|
+
|
7
|
+
from fastapi import APIRouter, HTTPException, UploadFile, File, Form
|
8
|
+
from pydantic import BaseModel
|
9
|
+
from typing import List, Dict, Any, Optional
|
10
|
+
import base64
|
11
|
+
import time
|
12
|
+
import logging
|
13
|
+
|
14
|
+
from ..schemas.ui_analysis import (
|
15
|
+
UIAnalysisRequest,
|
16
|
+
UIAnalysisResponse,
|
17
|
+
UIElement,
|
18
|
+
ActionPlan
|
19
|
+
)
|
20
|
+
|
21
|
+
router = APIRouter()
|
22
|
+
logger = logging.getLogger(__name__)
|
23
|
+
|
24
|
+
class UIAnalysisService:
|
25
|
+
"""
|
26
|
+
Placeholder for UI Analysis Service
|
27
|
+
Will be replaced with actual Modal deployment integration
|
28
|
+
"""
|
29
|
+
|
30
|
+
@staticmethod
|
31
|
+
async def analyze_ui(image_b64: str, task_type: str = "search") -> Dict[str, Any]:
|
32
|
+
"""
|
33
|
+
Placeholder method for UI analysis
|
34
|
+
"""
|
35
|
+
# TODO: Replace with actual Modal service call
|
36
|
+
return {
|
37
|
+
"success": True,
|
38
|
+
"service": "ui_analysis",
|
39
|
+
"total_execution_time": 2.5,
|
40
|
+
"final_output": {
|
41
|
+
"ui_elements": {
|
42
|
+
"interactive_elements": [
|
43
|
+
{
|
44
|
+
"id": "ui_0",
|
45
|
+
"type": "textbox",
|
46
|
+
"content": "Search",
|
47
|
+
"center": [400, 200],
|
48
|
+
"bbox": [300, 180, 500, 220],
|
49
|
+
"confidence": 0.95,
|
50
|
+
"interactable": True
|
51
|
+
}
|
52
|
+
],
|
53
|
+
"summary": {
|
54
|
+
"interactive_count": 1,
|
55
|
+
"detection_confidence": 0.95
|
56
|
+
}
|
57
|
+
},
|
58
|
+
"action_plan": {
|
59
|
+
"action_plan": [
|
60
|
+
{
|
61
|
+
"step": 1,
|
62
|
+
"action": "click",
|
63
|
+
"target_coordinates": [400, 200],
|
64
|
+
"actual_coordinates": [400, 200],
|
65
|
+
"description": "Click search box",
|
66
|
+
"confidence": 0.95
|
67
|
+
}
|
68
|
+
]
|
69
|
+
},
|
70
|
+
"automation_ready": {
|
71
|
+
"ready": True,
|
72
|
+
"confidence": 0.95,
|
73
|
+
"page_type": task_type,
|
74
|
+
"steps_count": 1
|
75
|
+
}
|
76
|
+
}
|
77
|
+
}
|
78
|
+
|
79
|
+
@router.post("/analyze", response_model=UIAnalysisResponse)
|
80
|
+
async def analyze_ui_elements(request: UIAnalysisRequest):
|
81
|
+
"""
|
82
|
+
Analyze UI elements in an image
|
83
|
+
|
84
|
+
Args:
|
85
|
+
request: UI analysis request with image and task type
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
UI analysis results with detected elements and action plan
|
89
|
+
"""
|
90
|
+
try:
|
91
|
+
start_time = time.time()
|
92
|
+
|
93
|
+
# Validate task type
|
94
|
+
valid_task_types = ["login", "search", "content", "navigation"]
|
95
|
+
if request.task_type not in valid_task_types:
|
96
|
+
raise HTTPException(
|
97
|
+
status_code=400,
|
98
|
+
detail=f"Invalid task_type. Must be one of: {valid_task_types}"
|
99
|
+
)
|
100
|
+
|
101
|
+
# Call UI analysis service
|
102
|
+
result = await UIAnalysisService.analyze_ui(
|
103
|
+
request.image_b64,
|
104
|
+
request.task_type
|
105
|
+
)
|
106
|
+
|
107
|
+
if not result.get("success"):
|
108
|
+
raise HTTPException(
|
109
|
+
status_code=500,
|
110
|
+
detail=f"UI analysis failed: {result.get('error', 'Unknown error')}"
|
111
|
+
)
|
112
|
+
|
113
|
+
# Convert to response model
|
114
|
+
final_output = result["final_output"]
|
115
|
+
|
116
|
+
return UIAnalysisResponse(
|
117
|
+
success=True,
|
118
|
+
service="ui_analysis",
|
119
|
+
total_execution_time=result["total_execution_time"],
|
120
|
+
ui_elements=[
|
121
|
+
UIElement(**elem)
|
122
|
+
for elem in final_output["ui_elements"]["interactive_elements"]
|
123
|
+
],
|
124
|
+
action_plan=ActionPlan(
|
125
|
+
steps=final_output["action_plan"]["action_plan"]
|
126
|
+
),
|
127
|
+
automation_ready=final_output["automation_ready"],
|
128
|
+
metadata={
|
129
|
+
"detection_method": "modal_omniparser",
|
130
|
+
"request_time": start_time,
|
131
|
+
"task_type": request.task_type
|
132
|
+
}
|
133
|
+
)
|
134
|
+
|
135
|
+
except HTTPException:
|
136
|
+
raise
|
137
|
+
except Exception as e:
|
138
|
+
logger.error(f"UI analysis error: {e}", exc_info=True)
|
139
|
+
raise HTTPException(status_code=500, detail=str(e))
|
140
|
+
|
141
|
+
@router.post("/upload")
|
142
|
+
async def upload_and_analyze(
|
143
|
+
file: UploadFile = File(...),
|
144
|
+
task_type: str = Form("search")
|
145
|
+
):
|
146
|
+
"""
|
147
|
+
Upload image file and analyze UI elements
|
148
|
+
|
149
|
+
Args:
|
150
|
+
file: Image file upload
|
151
|
+
task_type: Type of UI analysis task
|
152
|
+
|
153
|
+
Returns:
|
154
|
+
UI analysis results
|
155
|
+
"""
|
156
|
+
try:
|
157
|
+
# Validate file type
|
158
|
+
if not file.content_type.startswith('image/'):
|
159
|
+
raise HTTPException(
|
160
|
+
status_code=400,
|
161
|
+
detail="File must be an image"
|
162
|
+
)
|
163
|
+
|
164
|
+
# Read and encode image
|
165
|
+
image_data = await file.read()
|
166
|
+
image_b64 = base64.b64encode(image_data).decode()
|
167
|
+
|
168
|
+
# Create request
|
169
|
+
request = UIAnalysisRequest(
|
170
|
+
image_b64=image_b64,
|
171
|
+
task_type=task_type
|
172
|
+
)
|
173
|
+
|
174
|
+
# Analyze
|
175
|
+
return await analyze_ui_elements(request)
|
176
|
+
|
177
|
+
except HTTPException:
|
178
|
+
raise
|
179
|
+
except Exception as e:
|
180
|
+
logger.error(f"Upload and analyze error: {e}", exc_info=True)
|
181
|
+
raise HTTPException(status_code=500, detail=str(e))
|
182
|
+
|
183
|
+
@router.post("/detect")
|
184
|
+
async def detect_elements_only(request: UIAnalysisRequest):
|
185
|
+
"""
|
186
|
+
Detect UI elements only (without action planning)
|
187
|
+
|
188
|
+
Args:
|
189
|
+
request: UI analysis request
|
190
|
+
|
191
|
+
Returns:
|
192
|
+
UI elements detection results
|
193
|
+
"""
|
194
|
+
try:
|
195
|
+
# Call UI analysis service for detection only
|
196
|
+
result = await UIAnalysisService.analyze_ui(
|
197
|
+
request.image_b64,
|
198
|
+
request.task_type
|
199
|
+
)
|
200
|
+
|
201
|
+
if not result.get("success"):
|
202
|
+
raise HTTPException(
|
203
|
+
status_code=500,
|
204
|
+
detail=f"UI detection failed: {result.get('error', 'Unknown error')}"
|
205
|
+
)
|
206
|
+
|
207
|
+
# Return only UI elements
|
208
|
+
final_output = result["final_output"]
|
209
|
+
ui_elements = final_output["ui_elements"]["interactive_elements"]
|
210
|
+
|
211
|
+
return {
|
212
|
+
"success": True,
|
213
|
+
"processing_time": result["total_execution_time"],
|
214
|
+
"ui_elements": ui_elements,
|
215
|
+
"element_count": len(ui_elements),
|
216
|
+
"task_type": request.task_type
|
217
|
+
}
|
218
|
+
|
219
|
+
except HTTPException:
|
220
|
+
raise
|
221
|
+
except Exception as e:
|
222
|
+
logger.error(f"UI detection error: {e}", exc_info=True)
|
223
|
+
raise HTTPException(status_code=500, detail=str(e))
|
@@ -0,0 +1,19 @@
|
|
1
|
+
"""
|
2
|
+
Vision API Routes
|
3
|
+
|
4
|
+
Endpoints for general vision tasks (placeholder)
|
5
|
+
"""
|
6
|
+
|
7
|
+
from fastapi import APIRouter, HTTPException
|
8
|
+
from pydantic import BaseModel
|
9
|
+
|
10
|
+
router = APIRouter()
|
11
|
+
|
12
|
+
@router.get("/")
|
13
|
+
async def vision_info():
|
14
|
+
"""Vision service information"""
|
15
|
+
return {
|
16
|
+
"service": "vision",
|
17
|
+
"status": "placeholder",
|
18
|
+
"description": "General vision processing endpoints"
|
19
|
+
}
|
@@ -0,0 +1,17 @@
|
|
1
|
+
"""
|
2
|
+
API Schemas Module
|
3
|
+
|
4
|
+
Pydantic models for API request and response validation
|
5
|
+
"""
|
6
|
+
|
7
|
+
from .ui_analysis import *
|
8
|
+
from .common import *
|
9
|
+
|
10
|
+
__all__ = [
|
11
|
+
"UIAnalysisRequest",
|
12
|
+
"UIAnalysisResponse",
|
13
|
+
"UIElement",
|
14
|
+
"ActionPlan",
|
15
|
+
"BaseResponse",
|
16
|
+
"ErrorResponse"
|
17
|
+
]
|
@@ -0,0 +1,33 @@
|
|
1
|
+
"""
|
2
|
+
Common API Schemas
|
3
|
+
|
4
|
+
Base schemas used across different endpoints
|
5
|
+
"""
|
6
|
+
|
7
|
+
from pydantic import BaseModel
|
8
|
+
from typing import Dict, Any, Optional
|
9
|
+
import time
|
10
|
+
|
11
|
+
class BaseResponse(BaseModel):
|
12
|
+
"""Base response model"""
|
13
|
+
success: bool
|
14
|
+
timestamp: float = time.time()
|
15
|
+
|
16
|
+
class ErrorResponse(BaseResponse):
|
17
|
+
"""Error response model"""
|
18
|
+
success: bool = False
|
19
|
+
error: str
|
20
|
+
detail: Optional[str] = None
|
21
|
+
|
22
|
+
class HealthStatus(BaseModel):
|
23
|
+
"""Health status model"""
|
24
|
+
status: str
|
25
|
+
timestamp: float
|
26
|
+
version: str
|
27
|
+
|
28
|
+
class SystemInfo(BaseModel):
|
29
|
+
"""System information model"""
|
30
|
+
cpu_percent: float
|
31
|
+
memory_percent: float
|
32
|
+
gpu_available: bool
|
33
|
+
gpu_count: int
|