webscout 8.1__py3-none-any.whl → 8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/__init__.py +6 -0
- inferno/__main__.py +9 -0
- inferno/cli.py +6 -0
- webscout/Local/__init__.py +6 -0
- webscout/Local/__main__.py +9 -0
- webscout/Local/api.py +576 -0
- webscout/Local/cli.py +338 -0
- webscout/Local/config.py +75 -0
- webscout/Local/llm.py +188 -0
- webscout/Local/model_manager.py +205 -0
- webscout/Local/server.py +187 -0
- webscout/Local/utils.py +93 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/__init__.py +2 -1
- webscout/Provider/AISEARCH/scira_search.py +8 -4
- webscout/Provider/ExaChat.py +18 -8
- webscout/Provider/GithubChat.py +5 -1
- webscout/Provider/Glider.py +4 -2
- webscout/Provider/OPENAI/__init__.py +8 -1
- webscout/Provider/OPENAI/chatgpt.py +549 -0
- webscout/Provider/OPENAI/exachat.py +20 -8
- webscout/Provider/OPENAI/glider.py +3 -1
- webscout/Provider/OPENAI/llmchatco.py +3 -1
- webscout/Provider/OPENAI/opkfc.py +488 -0
- webscout/Provider/OPENAI/scirachat.py +11 -7
- webscout/Provider/OPENAI/standardinput.py +425 -0
- webscout/Provider/OPENAI/textpollinations.py +285 -0
- webscout/Provider/OPENAI/toolbaz.py +405 -0
- webscout/Provider/OPENAI/uncovrAI.py +455 -0
- webscout/Provider/OPENAI/writecream.py +158 -0
- webscout/Provider/StandardInput.py +278 -0
- webscout/Provider/TextPollinationsAI.py +27 -28
- webscout/Provider/Writecream.py +211 -0
- webscout/Provider/WritingMate.py +197 -0
- webscout/Provider/Youchat.py +30 -26
- webscout/Provider/__init__.py +10 -2
- webscout/Provider/koala.py +2 -2
- webscout/Provider/llmchatco.py +5 -0
- webscout/Provider/scira_chat.py +5 -2
- webscout/Provider/scnet.py +187 -0
- webscout/Provider/toolbaz.py +320 -0
- webscout/Provider/uncovr.py +3 -3
- webscout/conversation.py +32 -32
- webscout/version.py +1 -1
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/METADATA +54 -3
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/RECORD +50 -25
- webscout-8.2.dist-info/entry_points.txt +5 -0
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
- webscout-8.1.dist-info/entry_points.txt +0 -3
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model management for webscout.local
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import json
|
|
7
|
+
import datetime
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, Any, Optional, List, Tuple
|
|
10
|
+
import shutil
|
|
11
|
+
|
|
12
|
+
from rich.console import Console
|
|
13
|
+
from rich.prompt import Prompt
|
|
14
|
+
from huggingface_hub import hf_hub_download, HfFileSystem
|
|
15
|
+
|
|
16
|
+
from .config import config
|
|
17
|
+
|
|
18
|
+
console = Console()
|
|
19
|
+
|
|
20
|
+
class ModelManager:
|
|
21
|
+
"""
|
|
22
|
+
Manager for downloading and managing models.
|
|
23
|
+
Handles model download, listing, removal, and path resolution.
|
|
24
|
+
"""
|
|
25
|
+
models_dir: Path
|
|
26
|
+
|
|
27
|
+
def __init__(self) -> None:
|
|
28
|
+
self.models_dir = config.models_dir
|
|
29
|
+
|
|
30
|
+
def parse_model_string(self, model_string: str) -> Tuple[str, Optional[str]]:
|
|
31
|
+
"""
|
|
32
|
+
Parse a model string in the format 'repo_id:filename' or just 'repo_id'.
|
|
33
|
+
Args:
|
|
34
|
+
model_string (str): The model string to parse.
|
|
35
|
+
Returns:
|
|
36
|
+
Tuple[str, Optional[str]]: (repo_id, filename)
|
|
37
|
+
"""
|
|
38
|
+
if ":" in model_string:
|
|
39
|
+
repo_id, filename = model_string.split(":", 1)
|
|
40
|
+
return repo_id, filename
|
|
41
|
+
else:
|
|
42
|
+
return model_string, None
|
|
43
|
+
|
|
44
|
+
def list_repo_gguf_files(self, repo_id: str) -> List[str]:
|
|
45
|
+
"""
|
|
46
|
+
List all GGUF files in a repository.
|
|
47
|
+
Args:
|
|
48
|
+
repo_id (str): The Hugging Face repository ID.
|
|
49
|
+
Returns:
|
|
50
|
+
List[str]: List of filenames.
|
|
51
|
+
"""
|
|
52
|
+
fs = HfFileSystem()
|
|
53
|
+
try:
|
|
54
|
+
files = fs.ls(repo_id, detail=False)
|
|
55
|
+
gguf_files = [os.path.basename(f) for f in files if f.endswith(".gguf")]
|
|
56
|
+
return gguf_files
|
|
57
|
+
except Exception as e:
|
|
58
|
+
console.print(f"[bold red]Error listing files in repository {repo_id}: {str(e)}[/bold red]")
|
|
59
|
+
return []
|
|
60
|
+
|
|
61
|
+
def select_file_interactive(self, repo_id: str) -> Optional[str]:
|
|
62
|
+
"""
|
|
63
|
+
Interactively select a file from a repository.
|
|
64
|
+
Args:
|
|
65
|
+
repo_id (str): The Hugging Face repository ID.
|
|
66
|
+
Returns:
|
|
67
|
+
Optional[str]: Selected filename or None if cancelled.
|
|
68
|
+
"""
|
|
69
|
+
gguf_files = self.list_repo_gguf_files(repo_id)
|
|
70
|
+
if not gguf_files:
|
|
71
|
+
console.print(f"[bold red]No GGUF files found in repository {repo_id}[/bold red]")
|
|
72
|
+
return None
|
|
73
|
+
console.print(f"[bold blue]Available GGUF files in {repo_id}:[/bold blue]")
|
|
74
|
+
for i, filename in enumerate(gguf_files):
|
|
75
|
+
console.print(f" [{i+1}] {filename}")
|
|
76
|
+
choice = Prompt.ask(
|
|
77
|
+
"Select a file to download (number or filename)",
|
|
78
|
+
default="1"
|
|
79
|
+
)
|
|
80
|
+
try:
|
|
81
|
+
idx = int(choice) - 1
|
|
82
|
+
if 0 <= idx < len(gguf_files):
|
|
83
|
+
return gguf_files[idx]
|
|
84
|
+
except ValueError:
|
|
85
|
+
if choice in gguf_files:
|
|
86
|
+
return choice
|
|
87
|
+
console.print(f"[bold red]Invalid selection: {choice}[/bold red]")
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
def download_model(self, model_string: str, filename: Optional[str] = None) -> Tuple[str, Path]:
|
|
91
|
+
"""
|
|
92
|
+
Download a model from Hugging Face Hub.
|
|
93
|
+
Args:
|
|
94
|
+
model_string (str): The model string in format 'repo_id' or 'repo_id:filename'.
|
|
95
|
+
filename (Optional[str]): Specific filename to download, overrides filename in model_string.
|
|
96
|
+
Returns:
|
|
97
|
+
Tuple[str, Path]: (model_name, model_path)
|
|
98
|
+
"""
|
|
99
|
+
repo_id, file_from_string = self.parse_model_string(model_string)
|
|
100
|
+
filename = filename or file_from_string
|
|
101
|
+
model_name = repo_id.split("/")[-1] if "/" in repo_id else repo_id
|
|
102
|
+
model_dir = config.get_model_path(model_name)
|
|
103
|
+
model_dir.mkdir(exist_ok=True, parents=True)
|
|
104
|
+
model_info: Dict[str, Any] = {
|
|
105
|
+
"repo_id": repo_id,
|
|
106
|
+
"name": model_name,
|
|
107
|
+
"downloaded_at": datetime.datetime.now().isoformat(),
|
|
108
|
+
}
|
|
109
|
+
with open(model_dir / "info.json", "w") as f:
|
|
110
|
+
json.dump(model_info, f, indent=2)
|
|
111
|
+
if not filename:
|
|
112
|
+
console.print(f"[yellow]No filename provided, searching for GGUF files in {repo_id}...[/yellow]")
|
|
113
|
+
filename = self.select_file_interactive(repo_id)
|
|
114
|
+
if not filename:
|
|
115
|
+
raise ValueError(f"No GGUF file selected from repository {repo_id}")
|
|
116
|
+
console.print(f"[green]Selected GGUF file: {filename}[/green]")
|
|
117
|
+
console.print(f"[bold blue]Downloading {filename} from {repo_id}...[/bold blue]")
|
|
118
|
+
try:
|
|
119
|
+
model_path = hf_hub_download(
|
|
120
|
+
repo_id=repo_id,
|
|
121
|
+
filename=filename,
|
|
122
|
+
local_dir=model_dir,
|
|
123
|
+
)
|
|
124
|
+
except Exception as e:
|
|
125
|
+
console.print(f"[bold red]Error downloading file: {str(e)}[/bold red]")
|
|
126
|
+
raise
|
|
127
|
+
console.print(f"[bold green]Model downloaded to {model_path}[/bold green]")
|
|
128
|
+
model_info["filename"] = filename
|
|
129
|
+
model_info["path"] = str(model_path)
|
|
130
|
+
with open(model_dir / "info.json", "w") as f:
|
|
131
|
+
json.dump(model_info, f, indent=2)
|
|
132
|
+
return model_name, Path(model_path)
|
|
133
|
+
|
|
134
|
+
def get_model_info(self, model_name: str) -> Optional[Dict[str, Any]]:
|
|
135
|
+
"""
|
|
136
|
+
Get information about a downloaded model.
|
|
137
|
+
Args:
|
|
138
|
+
model_name (str): Name of the model.
|
|
139
|
+
Returns:
|
|
140
|
+
Optional[Dict[str, Any]]: Model info dict or None if not found.
|
|
141
|
+
"""
|
|
142
|
+
model_dir = config.get_model_path(model_name)
|
|
143
|
+
info_file = model_dir / "info.json"
|
|
144
|
+
if not info_file.exists():
|
|
145
|
+
return None
|
|
146
|
+
with open(info_file, "r") as f:
|
|
147
|
+
return json.load(f)
|
|
148
|
+
|
|
149
|
+
def list_models(self) -> List[Dict[str, Any]]:
|
|
150
|
+
"""
|
|
151
|
+
List all downloaded models with their information.
|
|
152
|
+
Returns:
|
|
153
|
+
List[Dict[str, Any]]: List of model info dicts.
|
|
154
|
+
"""
|
|
155
|
+
models: List[Dict[str, Any]] = []
|
|
156
|
+
seen_paths: set = set()
|
|
157
|
+
if not config.models_dir.exists():
|
|
158
|
+
return []
|
|
159
|
+
model_dirs = [d for d in config.models_dir.iterdir() if d.is_dir()]
|
|
160
|
+
for model_dir in model_dirs:
|
|
161
|
+
if ":" in model_dir.name:
|
|
162
|
+
continue
|
|
163
|
+
info_file = model_dir / "info.json"
|
|
164
|
+
if info_file.exists():
|
|
165
|
+
try:
|
|
166
|
+
with open(info_file, "r") as f:
|
|
167
|
+
info = json.load(f)
|
|
168
|
+
if "path" in info and info["path"] in seen_paths:
|
|
169
|
+
continue
|
|
170
|
+
if "path" in info:
|
|
171
|
+
seen_paths.add(info["path"])
|
|
172
|
+
models.append(info)
|
|
173
|
+
except Exception:
|
|
174
|
+
pass
|
|
175
|
+
return models
|
|
176
|
+
|
|
177
|
+
def remove_model(self, model_name: str) -> bool:
|
|
178
|
+
"""
|
|
179
|
+
Remove a downloaded model.
|
|
180
|
+
Args:
|
|
181
|
+
model_name (str): Name of the model to remove.
|
|
182
|
+
Returns:
|
|
183
|
+
bool: True if removed, False if not found.
|
|
184
|
+
"""
|
|
185
|
+
model_dir = config.get_model_path(model_name)
|
|
186
|
+
if not model_dir.exists():
|
|
187
|
+
return False
|
|
188
|
+
shutil.rmtree(model_dir)
|
|
189
|
+
return True
|
|
190
|
+
|
|
191
|
+
def get_model_path(self, model_name: str) -> Optional[str]:
|
|
192
|
+
"""
|
|
193
|
+
Get the path to a model file.
|
|
194
|
+
Args:
|
|
195
|
+
model_name (str): Name or filename of the model.
|
|
196
|
+
Returns:
|
|
197
|
+
Optional[str]: Path to the model file or None if not found.
|
|
198
|
+
"""
|
|
199
|
+
info = self.get_model_info(model_name)
|
|
200
|
+
if not info or "path" not in info:
|
|
201
|
+
for model_info in self.list_models():
|
|
202
|
+
if model_info.get("filename") == model_name:
|
|
203
|
+
return model_info.get("path")
|
|
204
|
+
return None
|
|
205
|
+
return info["path"]
|
webscout/Local/server.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""
|
|
2
|
+
API server with OpenAI compatibility
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import time
|
|
7
|
+
from typing import Dict, Any, List, Optional, AsyncGenerator
|
|
8
|
+
|
|
9
|
+
import uvicorn
|
|
10
|
+
from fastapi import FastAPI, HTTPException
|
|
11
|
+
from fastapi.responses import StreamingResponse
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
|
|
14
|
+
from .config import config
|
|
15
|
+
from .model_manager import ModelManager
|
|
16
|
+
from .llm import LLMInterface
|
|
17
|
+
|
|
18
|
+
app = FastAPI(title="webscout.local API", description="OpenAI-compatible API for webscout.local")
|
|
19
|
+
|
|
20
|
+
# Models
|
|
21
|
+
model_manager: ModelManager = ModelManager()
|
|
22
|
+
loaded_models: Dict[str, LLMInterface] = {} # Cache for loaded models
|
|
23
|
+
|
|
24
|
+
class ChatMessage(BaseModel):
|
|
25
|
+
"""
|
|
26
|
+
Represents a single chat message for the chat completion endpoint.
|
|
27
|
+
"""
|
|
28
|
+
role: str
|
|
29
|
+
content: str
|
|
30
|
+
|
|
31
|
+
class ChatCompletionRequest(BaseModel):
|
|
32
|
+
"""
|
|
33
|
+
Request model for chat completions.
|
|
34
|
+
"""
|
|
35
|
+
model: str
|
|
36
|
+
messages: List[ChatMessage]
|
|
37
|
+
temperature: float = 0.7
|
|
38
|
+
top_p: float = 0.95
|
|
39
|
+
max_tokens: int = 256
|
|
40
|
+
stream: bool = False
|
|
41
|
+
stop: Optional[List[str]] = None
|
|
42
|
+
|
|
43
|
+
class CompletionRequest(BaseModel):
|
|
44
|
+
"""
|
|
45
|
+
Request model for text completions.
|
|
46
|
+
"""
|
|
47
|
+
model: str
|
|
48
|
+
prompt: str
|
|
49
|
+
temperature: float = 0.7
|
|
50
|
+
top_p: float = 0.95
|
|
51
|
+
max_tokens: int = 256
|
|
52
|
+
stream: bool = False
|
|
53
|
+
stop: Optional[List[str]] = None
|
|
54
|
+
|
|
55
|
+
class ModelInfo(BaseModel):
|
|
56
|
+
"""
|
|
57
|
+
Model information for listing available models.
|
|
58
|
+
"""
|
|
59
|
+
id: str
|
|
60
|
+
object: str = "model"
|
|
61
|
+
created: int
|
|
62
|
+
owned_by: str = "webscout.local"
|
|
63
|
+
|
|
64
|
+
class ModelList(BaseModel):
|
|
65
|
+
"""
|
|
66
|
+
List of available models.
|
|
67
|
+
"""
|
|
68
|
+
object: str = "list"
|
|
69
|
+
data: List[ModelInfo]
|
|
70
|
+
|
|
71
|
+
def get_model(model_name: str) -> LLMInterface:
|
|
72
|
+
"""
|
|
73
|
+
Get or load a model by name, using a cache for efficiency.
|
|
74
|
+
Args:
|
|
75
|
+
model_name (str): Name of the model to load.
|
|
76
|
+
Returns:
|
|
77
|
+
LLMInterface: Loaded model interface.
|
|
78
|
+
Raises:
|
|
79
|
+
HTTPException: If the model cannot be loaded.
|
|
80
|
+
"""
|
|
81
|
+
if model_name not in loaded_models:
|
|
82
|
+
try:
|
|
83
|
+
loaded_models[model_name] = LLMInterface(model_name)
|
|
84
|
+
loaded_models[model_name].load_model()
|
|
85
|
+
except Exception as e:
|
|
86
|
+
raise HTTPException(status_code=404, detail=f"Model {model_name} not found: {str(e)}")
|
|
87
|
+
return loaded_models[model_name]
|
|
88
|
+
|
|
89
|
+
@app.get("/v1/models", response_model=ModelList)
|
|
90
|
+
async def list_models() -> ModelList:
|
|
91
|
+
"""
|
|
92
|
+
List available models.
|
|
93
|
+
Returns:
|
|
94
|
+
ModelList: List of available models.
|
|
95
|
+
"""
|
|
96
|
+
models = model_manager.list_models()
|
|
97
|
+
model_list: List[ModelInfo] = []
|
|
98
|
+
for model in models:
|
|
99
|
+
model_list.append(
|
|
100
|
+
ModelInfo(
|
|
101
|
+
id=model["name"],
|
|
102
|
+
created=int(time.time()),
|
|
103
|
+
)
|
|
104
|
+
)
|
|
105
|
+
return ModelList(object="list", data=model_list)
|
|
106
|
+
|
|
107
|
+
@app.post("/v1/chat/completions")
|
|
108
|
+
async def create_chat_completion(request: ChatCompletionRequest) -> Any:
|
|
109
|
+
"""
|
|
110
|
+
Create a chat completion.
|
|
111
|
+
Args:
|
|
112
|
+
request (ChatCompletionRequest): Chat completion request.
|
|
113
|
+
Returns:
|
|
114
|
+
StreamingResponse or dict: Streaming or regular response.
|
|
115
|
+
"""
|
|
116
|
+
model = get_model(request.model)
|
|
117
|
+
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
|
118
|
+
if request.stream:
|
|
119
|
+
async def generate() -> AsyncGenerator[str, None]:
|
|
120
|
+
stream = model.create_chat_completion(
|
|
121
|
+
messages=messages,
|
|
122
|
+
max_tokens=request.max_tokens,
|
|
123
|
+
temperature=request.temperature,
|
|
124
|
+
top_p=request.top_p,
|
|
125
|
+
stream=True,
|
|
126
|
+
stop=request.stop,
|
|
127
|
+
)
|
|
128
|
+
for chunk in stream:
|
|
129
|
+
yield f"data: {json.dumps(chunk)}\n\n"
|
|
130
|
+
yield "data: [DONE]\n\n"
|
|
131
|
+
return StreamingResponse(generate(), media_type="text/event-stream")
|
|
132
|
+
else:
|
|
133
|
+
response = model.create_chat_completion(
|
|
134
|
+
messages=messages,
|
|
135
|
+
max_tokens=request.max_tokens,
|
|
136
|
+
temperature=request.temperature,
|
|
137
|
+
top_p=request.top_p,
|
|
138
|
+
stream=False,
|
|
139
|
+
stop=request.stop,
|
|
140
|
+
)
|
|
141
|
+
return response
|
|
142
|
+
|
|
143
|
+
@app.post("/v1/completions")
|
|
144
|
+
async def create_completion(request: CompletionRequest) -> Any:
|
|
145
|
+
"""
|
|
146
|
+
Create a text completion.
|
|
147
|
+
Args:
|
|
148
|
+
request (CompletionRequest): Completion request.
|
|
149
|
+
Returns:
|
|
150
|
+
StreamingResponse or dict: Streaming or regular response.
|
|
151
|
+
"""
|
|
152
|
+
model = get_model(request.model)
|
|
153
|
+
if request.stream:
|
|
154
|
+
async def generate() -> AsyncGenerator[str, None]:
|
|
155
|
+
stream = model.create_completion(
|
|
156
|
+
prompt=request.prompt,
|
|
157
|
+
max_tokens=request.max_tokens,
|
|
158
|
+
temperature=request.temperature,
|
|
159
|
+
top_p=request.top_p,
|
|
160
|
+
stream=True,
|
|
161
|
+
stop=request.stop,
|
|
162
|
+
)
|
|
163
|
+
for chunk in stream:
|
|
164
|
+
yield f"data: {json.dumps(chunk)}\n\n"
|
|
165
|
+
yield "data: [DONE]\n\n"
|
|
166
|
+
return StreamingResponse(generate(), media_type="text/event-stream")
|
|
167
|
+
else:
|
|
168
|
+
response = model.create_completion(
|
|
169
|
+
prompt=request.prompt,
|
|
170
|
+
max_tokens=request.max_tokens,
|
|
171
|
+
temperature=request.temperature,
|
|
172
|
+
top_p=request.top_p,
|
|
173
|
+
stream=False,
|
|
174
|
+
stop=request.stop,
|
|
175
|
+
)
|
|
176
|
+
return response
|
|
177
|
+
|
|
178
|
+
def start_server(host: Optional[str] = None, port: Optional[int] = None) -> None:
|
|
179
|
+
"""
|
|
180
|
+
Start the API server.
|
|
181
|
+
Args:
|
|
182
|
+
host (Optional[str]): Host to bind the server to.
|
|
183
|
+
port (Optional[int]): Port to bind the server to.
|
|
184
|
+
"""
|
|
185
|
+
host = host or config.get("api_host", "127.0.0.1")
|
|
186
|
+
port = port or config.get("api_port", 8000)
|
|
187
|
+
uvicorn.run(app, host=host, port=port)
|
webscout/Local/utils.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for webscout.local
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import logging
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
def parse_duration(duration_str: str) -> float:
|
|
11
|
+
"""
|
|
12
|
+
Parse a duration string into seconds.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
duration_str (str): Duration string (e.g., '5m', '1h', '30s', '500ms', '0').
|
|
16
|
+
Returns:
|
|
17
|
+
float: Duration in seconds.
|
|
18
|
+
"""
|
|
19
|
+
if not duration_str:
|
|
20
|
+
return 300.0 # Default 5 minutes
|
|
21
|
+
if duration_str.endswith("ms"):
|
|
22
|
+
return int(duration_str[:-2]) / 1000.0
|
|
23
|
+
elif duration_str.endswith("s"):
|
|
24
|
+
return int(duration_str[:-1])
|
|
25
|
+
elif duration_str.endswith("m"):
|
|
26
|
+
return int(duration_str[:-1]) * 60
|
|
27
|
+
elif duration_str.endswith("h"):
|
|
28
|
+
return int(duration_str[:-1]) * 3600
|
|
29
|
+
elif duration_str == "0":
|
|
30
|
+
return 0.0
|
|
31
|
+
else:
|
|
32
|
+
try:
|
|
33
|
+
return float(duration_str)
|
|
34
|
+
except ValueError:
|
|
35
|
+
return 300.0 # Default 5 minutes
|
|
36
|
+
|
|
37
|
+
def format_duration(seconds: float) -> str:
|
|
38
|
+
"""
|
|
39
|
+
Format seconds into a human-readable duration string.
|
|
40
|
+
Args:
|
|
41
|
+
seconds (float): Duration in seconds.
|
|
42
|
+
Returns:
|
|
43
|
+
str: Human-readable duration string.
|
|
44
|
+
"""
|
|
45
|
+
if seconds < 1:
|
|
46
|
+
return f"{int(seconds * 1000)}ms"
|
|
47
|
+
elif seconds < 60:
|
|
48
|
+
return f"{int(seconds)}s"
|
|
49
|
+
elif seconds < 3600:
|
|
50
|
+
return f"{int(seconds / 60)}m"
|
|
51
|
+
else:
|
|
52
|
+
return f"{int(seconds / 3600)}h"
|
|
53
|
+
|
|
54
|
+
def decode_image(image_str: str) -> bytes:
|
|
55
|
+
"""
|
|
56
|
+
Decode a base64 image string to bytes.
|
|
57
|
+
Args:
|
|
58
|
+
image_str (str): Base64-encoded image string (optionally with data URI prefix).
|
|
59
|
+
Returns:
|
|
60
|
+
bytes: Decoded image bytes.
|
|
61
|
+
"""
|
|
62
|
+
if image_str.startswith("data:"):
|
|
63
|
+
image_str = image_str.split(",", 1)[1]
|
|
64
|
+
return base64.b64decode(image_str)
|
|
65
|
+
|
|
66
|
+
def encode_image(image_bytes: bytes, mime_type: str = "image/png") -> str:
|
|
67
|
+
"""
|
|
68
|
+
Encode image bytes to a base64 data URI.
|
|
69
|
+
Args:
|
|
70
|
+
image_bytes (bytes): Image data.
|
|
71
|
+
mime_type (str): MIME type for the image.
|
|
72
|
+
Returns:
|
|
73
|
+
str: Base64-encoded data URI string.
|
|
74
|
+
"""
|
|
75
|
+
encoded = base64.b64encode(image_bytes).decode("utf-8")
|
|
76
|
+
return f"data:{mime_type};base64,{encoded}"
|
|
77
|
+
|
|
78
|
+
def get_file_size_str(size_bytes: int) -> str:
|
|
79
|
+
"""
|
|
80
|
+
Convert file size in bytes to a human-readable string.
|
|
81
|
+
Args:
|
|
82
|
+
size_bytes (int): File size in bytes.
|
|
83
|
+
Returns:
|
|
84
|
+
str: Human-readable file size string.
|
|
85
|
+
"""
|
|
86
|
+
if size_bytes < 1024:
|
|
87
|
+
return f"{size_bytes} B"
|
|
88
|
+
elif size_bytes < 1024 * 1024:
|
|
89
|
+
return f"{size_bytes / 1024:.2f} KB"
|
|
90
|
+
elif size_bytes < 1024 * 1024 * 1024:
|
|
91
|
+
return f"{size_bytes / (1024 * 1024):.2f} MB"
|
|
92
|
+
else:
|
|
93
|
+
return f"{size_bytes / (1024 * 1024 * 1024):.2f} GB"
|