@1mancompany/onemancompany 0.7.68 → 0.7.70
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/pyproject.toml +1 -1
- package/src/onemancompany/agents/base.py +6 -0
- package/src/onemancompany/core/default_vessel.yaml +0 -1
- package/src/onemancompany/core/standalone_runner.py +12 -1
- package/src/onemancompany/core/vessel_config.py +0 -4
- package/src/onemancompany/onboard.py +5 -64
- package/company/assets/tools/background_removal/background_removal.py +0 -158
- package/company/assets/tools/background_removal/tool.yaml +0 -9
- package/company/assets/tools/image_generation/image_generation.py +0 -281
- package/company/assets/tools/image_generation/tool.yaml +0 -26
- package/company/assets/tools/video_generation/tool.yaml +0 -15
- package/company/assets/tools/video_generation/video_generation.py +0 -267
package/package.json
CHANGED
package/pyproject.toml
CHANGED
|
@@ -221,6 +221,11 @@ def make_llm(employee_id: str = "", temperature: float | None = None) -> BaseCha
|
|
|
221
221
|
base_url = settings.openrouter_base_url
|
|
222
222
|
elif api_provider == "custom" or (settings.default_api_base_url and api_provider == settings.default_api_provider):
|
|
223
223
|
base_url = settings.default_api_base_url
|
|
224
|
+
extra_body = None
|
|
225
|
+
if (api_provider or "").lower() == "deepseek":
|
|
226
|
+
# DeepSeek V4 thinking mode currently requires reasoning_content
|
|
227
|
+
# replay across tool calls, which LangChain does not preserve.
|
|
228
|
+
extra_body = {"thinking": {"type": "disabled"}}
|
|
224
229
|
return ChatOpenAI(
|
|
225
230
|
model=model,
|
|
226
231
|
api_key=effective_key,
|
|
@@ -229,6 +234,7 @@ def make_llm(employee_id: str = "", temperature: float | None = None) -> BaseCha
|
|
|
229
234
|
max_retries=3,
|
|
230
235
|
request_timeout=300.0,
|
|
231
236
|
stream_usage=True,
|
|
237
|
+
extra_body=extra_body,
|
|
232
238
|
)
|
|
233
239
|
|
|
234
240
|
# --- Fallback: unknown provider or no key → fall back to openrouter with default model ---
|
|
@@ -96,7 +96,18 @@ def _load_llm(profile: dict):
|
|
|
96
96
|
base_url = prov[0]
|
|
97
97
|
if provider_name == "openrouter":
|
|
98
98
|
base_url = os.environ.get("OPENROUTER_BASE_URL", base_url)
|
|
99
|
-
|
|
99
|
+
extra_body = None
|
|
100
|
+
if (provider_name or "").lower() == "deepseek":
|
|
101
|
+
# DeepSeek V4 thinking mode requires reasoning_content replay
|
|
102
|
+
# across tool calls; LangChain does not preserve it yet.
|
|
103
|
+
extra_body = {{"thinking": {{"type": "disabled"}}}}
|
|
104
|
+
return ChatOpenAI(
|
|
105
|
+
model=model,
|
|
106
|
+
api_key=key,
|
|
107
|
+
base_url=base_url,
|
|
108
|
+
temperature=temperature,
|
|
109
|
+
extra_body=extra_body,
|
|
110
|
+
)
|
|
100
111
|
|
|
101
112
|
# Unknown provider — fall back to OpenRouter
|
|
102
113
|
key = api_key or os.environ.get("OPENROUTER_API_KEY", "")
|
|
@@ -72,7 +72,6 @@ class CapabilitiesConfig:
|
|
|
72
72
|
file_upload: bool = False
|
|
73
73
|
websocket: bool = False
|
|
74
74
|
sandbox: bool = False
|
|
75
|
-
image_generation: bool = False
|
|
76
75
|
|
|
77
76
|
|
|
78
77
|
@dataclass
|
|
@@ -138,7 +137,6 @@ def _parse_vessel_dict(raw: dict) -> VesselConfig:
|
|
|
138
137
|
file_upload=caps_raw.get("file_upload", False),
|
|
139
138
|
websocket=caps_raw.get("websocket", False),
|
|
140
139
|
sandbox=caps_raw.get("sandbox", False),
|
|
141
|
-
image_generation=caps_raw.get("image_generation", False),
|
|
142
140
|
),
|
|
143
141
|
)
|
|
144
142
|
|
|
@@ -207,11 +205,9 @@ def save_vessel_config(emp_dir: Path, config: VesselConfig) -> None:
|
|
|
207
205
|
"file_upload": config.capabilities.file_upload,
|
|
208
206
|
"websocket": config.capabilities.websocket,
|
|
209
207
|
"sandbox": config.capabilities.sandbox,
|
|
210
|
-
"image_generation": config.capabilities.image_generation,
|
|
211
208
|
},
|
|
212
209
|
}
|
|
213
210
|
|
|
214
211
|
with open_utf(vessel_dir / VESSEL_YAML_FILENAME, "w") as f:
|
|
215
212
|
yaml.dump(data, f, default_flow_style=False, allow_unicode=True)
|
|
216
213
|
|
|
217
|
-
|
|
@@ -83,7 +83,7 @@ LOGO = r"""
|
|
|
83
83
|
░▒▓ [ NEURAL BOOTSTRAP SEQUENCE ]
|
|
84
84
|
"""
|
|
85
85
|
|
|
86
|
-
TOTAL_STEPS =
|
|
86
|
+
TOTAL_STEPS = 5
|
|
87
87
|
|
|
88
88
|
HOSTING_LABELS = {"company": "LangChain", "self": "Claude Code", "openclaw": "OpenClaw"}
|
|
89
89
|
|
|
@@ -418,7 +418,7 @@ def _step_llm(console: Console) -> tuple[str, str, str, str]:
|
|
|
418
418
|
|
|
419
419
|
def _step_server(console: Console) -> tuple[str, int]:
|
|
420
420
|
console.print()
|
|
421
|
-
_print_step(console,
|
|
421
|
+
_print_step(console, 4, "NETWORK NODE", "Server Configuration")
|
|
422
422
|
console.print(
|
|
423
423
|
"\n [dim]Deploy your company node on the local network.[/dim]\n"
|
|
424
424
|
" [dim]After genesis, open the URL to enter your office.[/dim]\n"
|
|
@@ -544,64 +544,6 @@ def _step_agent_family(console: Console) -> dict[str, str]:
|
|
|
544
544
|
return founders
|
|
545
545
|
|
|
546
546
|
|
|
547
|
-
def _step_sandbox(console: Console) -> bool:
|
|
548
|
-
"""Ask whether to install sandbox tools (Docker-based code execution)."""
|
|
549
|
-
console.print()
|
|
550
|
-
_print_step(console, 4, "SANDBOX MESH", "Isolated Execution")
|
|
551
|
-
console.print(
|
|
552
|
-
"\n [dim]Sandbox gives your AI employees a safe place to run code.\n"
|
|
553
|
-
" Without it, code execution happens directly on your machine.\n"
|
|
554
|
-
" With it, each task runs in an isolated Docker container.[/dim]\n"
|
|
555
|
-
)
|
|
556
|
-
console.print(
|
|
557
|
-
" [bold]Requirements:[/bold]\n"
|
|
558
|
-
" • [cyan]Docker[/cyan] — must be installed and running\n"
|
|
559
|
-
" • Python packages will be installed automatically\n"
|
|
560
|
-
" [dim]This is optional. You can always enable it later.[/dim]\n"
|
|
561
|
-
)
|
|
562
|
-
from InquirerPy import inquirer as _inq
|
|
563
|
-
install = _inq.confirm(
|
|
564
|
-
message="Install sandbox tools?",
|
|
565
|
-
default=False,
|
|
566
|
-
style=INQ_STYLE,
|
|
567
|
-
).execute()
|
|
568
|
-
if install:
|
|
569
|
-
console.print()
|
|
570
|
-
_install_sandbox_deps(console)
|
|
571
|
-
return install
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
def _install_sandbox_deps(console: Console) -> None:
|
|
575
|
-
"""Attempt to install sandbox optional dependencies via uv/pip."""
|
|
576
|
-
import subprocess
|
|
577
|
-
import sys
|
|
578
|
-
|
|
579
|
-
# Try uv first, fall back to pip
|
|
580
|
-
venv_python = sys.executable
|
|
581
|
-
cmds = [
|
|
582
|
-
[venv_python, "-m", "uv", "pip", "install", "onemancompany[sandbox]"],
|
|
583
|
-
[venv_python, "-m", "pip", "install", "onemancompany[sandbox]"],
|
|
584
|
-
]
|
|
585
|
-
for cmd in cmds:
|
|
586
|
-
try:
|
|
587
|
-
with console.status(" Installing sandbox dependencies..."):
|
|
588
|
-
result = subprocess.run(
|
|
589
|
-
cmd, capture_output=True, text=True, timeout=120,
|
|
590
|
-
)
|
|
591
|
-
if result.returncode == 0:
|
|
592
|
-
console.print(" [green]✔[/green] Sandbox dependencies installed")
|
|
593
|
-
return
|
|
594
|
-
except FileNotFoundError:
|
|
595
|
-
console.print(f" [dim]{cmd[2]} not available, trying fallback...[/dim]")
|
|
596
|
-
except subprocess.TimeoutExpired:
|
|
597
|
-
console.print(" [yellow]⚠[/yellow] Installation timed out")
|
|
598
|
-
|
|
599
|
-
console.print(
|
|
600
|
-
" [yellow]⚠[/yellow] Auto-install failed. Install manually:\n"
|
|
601
|
-
" [dim]uv pip install 'onemancompany[sandbox]'[/dim]"
|
|
602
|
-
)
|
|
603
|
-
|
|
604
|
-
|
|
605
547
|
def _step_optional(console: Console) -> dict[str, str]:
|
|
606
548
|
console.print()
|
|
607
549
|
_print_step(console, 3, "UPLINK ARRAY", "External Integrations")
|
|
@@ -694,7 +636,7 @@ def _step_execute(
|
|
|
694
636
|
custom_chat_class: str = "",
|
|
695
637
|
) -> None:
|
|
696
638
|
console.print()
|
|
697
|
-
_print_step(console,
|
|
639
|
+
_print_step(console, 5, "GENESIS", "Company Initialization")
|
|
698
640
|
console.print(
|
|
699
641
|
"\n [dim]Deploying company infrastructure and founding team...[/dim]\n"
|
|
700
642
|
)
|
|
@@ -1010,10 +952,9 @@ def run_wizard() -> None:
|
|
|
1010
952
|
founder_families = _step_agent_family(console) # Step 1: Agent Family
|
|
1011
953
|
provider, api_key, model, base_url, custom_chat_class = _step_llm(console) # Step 2: LLM Provider & Key
|
|
1012
954
|
extras = _step_optional(console) # Step 3: External Integrations
|
|
1013
|
-
|
|
1014
|
-
host, port = _step_server(console) # Step 5: Server
|
|
955
|
+
host, port = _step_server(console) # Step 4: Server
|
|
1015
956
|
_step_execute(console, provider, api_key, model, host, port, extras,
|
|
1016
|
-
|
|
957
|
+
founder_families=founder_families,
|
|
1017
958
|
base_url=base_url, custom_chat_class=custom_chat_class)
|
|
1018
959
|
_step_done(console, host, port)
|
|
1019
960
|
|
|
@@ -1,158 +0,0 @@
|
|
|
1
|
-
"""Background removal tool for generated images.
|
|
2
|
-
|
|
3
|
-
Provides one LangChain @tool:
|
|
4
|
-
- remove_image_background(input_path, output_path, tolerance=28)
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from __future__ import annotations
|
|
8
|
-
|
|
9
|
-
from collections import deque
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
|
|
12
|
-
from langchain_core.tools import tool
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def _dominant_corner_color(px, width: int, height: int) -> tuple[int, int, int]:
|
|
16
|
-
"""Estimate background color from the most similar corner cluster."""
|
|
17
|
-
corners = [
|
|
18
|
-
px[0, 0][:3],
|
|
19
|
-
px[max(width - 1, 0), 0][:3],
|
|
20
|
-
px[0, max(height - 1, 0)][:3],
|
|
21
|
-
px[max(width - 1, 0), max(height - 1, 0)][:3],
|
|
22
|
-
]
|
|
23
|
-
# 16-level quantization keeps similar corner colors in one bucket.
|
|
24
|
-
buckets: dict[tuple[int, int, int], list[tuple[int, int, int]]] = {}
|
|
25
|
-
for c in corners:
|
|
26
|
-
key = (c[0] // 16, c[1] // 16, c[2] // 16)
|
|
27
|
-
buckets.setdefault(key, []).append(c)
|
|
28
|
-
dominant = max(buckets.values(), key=len)
|
|
29
|
-
r = sum(c[0] for c in dominant) // len(dominant)
|
|
30
|
-
g = sum(c[1] for c in dominant) // len(dominant)
|
|
31
|
-
b = sum(c[2] for c in dominant) // len(dominant)
|
|
32
|
-
return r, g, b
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def _is_close_to_bg(rgb: tuple[int, int, int], bg: tuple[int, int, int], tolerance: int) -> bool:
|
|
36
|
-
"""Fast color similarity check (Manhattan distance)."""
|
|
37
|
-
return (
|
|
38
|
-
abs(rgb[0] - bg[0]) +
|
|
39
|
-
abs(rgb[1] - bg[1]) +
|
|
40
|
-
abs(rgb[2] - bg[2])
|
|
41
|
-
) <= tolerance * 3
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
@tool
|
|
45
|
-
def remove_image_background(input_path: str, output_path: str, tolerance: int = 28) -> dict:
|
|
46
|
-
"""Remove background from an image and save as transparent PNG. USE THIS after image_generation when transparency is needed.
|
|
47
|
-
|
|
48
|
-
Best for: logos, stickers, product images, overlays — any image that needs a transparent background.
|
|
49
|
-
|
|
50
|
-
Args:
|
|
51
|
-
input_path: Source image path (output from image_generation).
|
|
52
|
-
output_path: Output path (will be saved as .png with transparency).
|
|
53
|
-
tolerance: Color tolerance for background matching (0-255, default 28). Increase for noisy backgrounds.
|
|
54
|
-
"""
|
|
55
|
-
input_path = (input_path or "").strip()
|
|
56
|
-
output_path = (output_path or "").strip()
|
|
57
|
-
if not input_path:
|
|
58
|
-
return {"status": "error", "message": "input_path is empty"}
|
|
59
|
-
if not output_path:
|
|
60
|
-
return {"status": "error", "message": "output_path is empty"}
|
|
61
|
-
|
|
62
|
-
tolerance = max(0, min(int(tolerance), 255))
|
|
63
|
-
|
|
64
|
-
src = Path(input_path).expanduser()
|
|
65
|
-
if not src.exists() or not src.is_file():
|
|
66
|
-
return {"status": "error", "message": f"input file not found: {src}"}
|
|
67
|
-
|
|
68
|
-
try:
|
|
69
|
-
from PIL import Image
|
|
70
|
-
except ImportError:
|
|
71
|
-
return {"status": "error", "message": "Pillow is required (pip install pillow)"}
|
|
72
|
-
|
|
73
|
-
try:
|
|
74
|
-
img = Image.open(src).convert("RGBA")
|
|
75
|
-
except Exception as e:
|
|
76
|
-
return {"status": "error", "message": f"failed to read image: {e}"}
|
|
77
|
-
|
|
78
|
-
width, height = img.size
|
|
79
|
-
if width == 0 or height == 0:
|
|
80
|
-
return {"status": "error", "message": "invalid image size"}
|
|
81
|
-
|
|
82
|
-
px = img.load()
|
|
83
|
-
bg_color = _dominant_corner_color(px, width, height)
|
|
84
|
-
visited = bytearray(width * height)
|
|
85
|
-
q: deque[tuple[int, int]] = deque()
|
|
86
|
-
|
|
87
|
-
def idx(x: int, y: int) -> int:
|
|
88
|
-
return y * width + x
|
|
89
|
-
|
|
90
|
-
def maybe_enqueue(x: int, y: int) -> None:
|
|
91
|
-
if x < 0 or y < 0 or x >= width or y >= height:
|
|
92
|
-
return
|
|
93
|
-
p = px[x, y]
|
|
94
|
-
rgb = p[:3]
|
|
95
|
-
if p[3] == 0 or _is_close_to_bg(rgb, bg_color, tolerance):
|
|
96
|
-
q.append((x, y))
|
|
97
|
-
|
|
98
|
-
# Seed flood fill from image borders only, so subject interior isn't removed.
|
|
99
|
-
for x in range(width):
|
|
100
|
-
maybe_enqueue(x, 0)
|
|
101
|
-
if height > 1:
|
|
102
|
-
maybe_enqueue(x, height - 1)
|
|
103
|
-
for y in range(1, height - 1):
|
|
104
|
-
maybe_enqueue(0, y)
|
|
105
|
-
if width > 1:
|
|
106
|
-
maybe_enqueue(width - 1, y)
|
|
107
|
-
|
|
108
|
-
while q:
|
|
109
|
-
x, y = q.popleft()
|
|
110
|
-
if x < 0 or y < 0 or x >= width or y >= height:
|
|
111
|
-
continue
|
|
112
|
-
i = idx(x, y)
|
|
113
|
-
if visited[i]:
|
|
114
|
-
continue
|
|
115
|
-
|
|
116
|
-
p = px[x, y]
|
|
117
|
-
rgb = p[:3]
|
|
118
|
-
if p[3] != 0 and not _is_close_to_bg(rgb, bg_color, tolerance):
|
|
119
|
-
continue
|
|
120
|
-
|
|
121
|
-
visited[i] = 1
|
|
122
|
-
q.append((x + 1, y))
|
|
123
|
-
q.append((x - 1, y))
|
|
124
|
-
q.append((x, y + 1))
|
|
125
|
-
q.append((x, y - 1))
|
|
126
|
-
|
|
127
|
-
removed_pixels = 0
|
|
128
|
-
for y in range(height):
|
|
129
|
-
for x in range(width):
|
|
130
|
-
i = idx(x, y)
|
|
131
|
-
if not visited[i]:
|
|
132
|
-
continue
|
|
133
|
-
r, g, b, a = px[x, y]
|
|
134
|
-
if a != 0:
|
|
135
|
-
px[x, y] = (r, g, b, 0)
|
|
136
|
-
removed_pixels += 1
|
|
137
|
-
|
|
138
|
-
out = Path(output_path).expanduser()
|
|
139
|
-
if out.suffix.lower() != ".png":
|
|
140
|
-
out = out.with_suffix(".png")
|
|
141
|
-
out.parent.mkdir(parents=True, exist_ok=True)
|
|
142
|
-
|
|
143
|
-
try:
|
|
144
|
-
img.save(out, format="PNG")
|
|
145
|
-
except Exception as e:
|
|
146
|
-
return {"status": "error", "message": f"failed to save output: {e}"}
|
|
147
|
-
|
|
148
|
-
total_pixels = width * height
|
|
149
|
-
return {
|
|
150
|
-
"status": "ok",
|
|
151
|
-
"input_path": str(src),
|
|
152
|
-
"output_path": str(out),
|
|
153
|
-
"size": {"width": width, "height": height},
|
|
154
|
-
"bg_color_rgb": bg_color,
|
|
155
|
-
"tolerance": tolerance,
|
|
156
|
-
"removed_pixels": removed_pixels,
|
|
157
|
-
"removed_ratio": round(removed_pixels / max(total_pixels, 1), 4),
|
|
158
|
-
}
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
id: background_removal
|
|
2
|
-
name: Background Removal
|
|
3
|
-
description: >
|
|
4
|
-
Remove image backgrounds and export transparent PNGs.
|
|
5
|
-
USE THIS after generating an image when you need a transparent version
|
|
6
|
-
(e.g., for logos, stickers, overlays). Call remove_image_background(input_path, output_path).
|
|
7
|
-
added_by: CEO
|
|
8
|
-
type: langchain_module
|
|
9
|
-
sprite: desk_equipment
|
|
@@ -1,281 +0,0 @@
|
|
|
1
|
-
"""Image generation tool via OpenRouter + Gemini image model.
|
|
2
|
-
|
|
3
|
-
Provides one LangChain @tool:
|
|
4
|
-
- image_generation(requirement, save_path)
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from __future__ import annotations
|
|
8
|
-
|
|
9
|
-
import base64
|
|
10
|
-
import json
|
|
11
|
-
import os
|
|
12
|
-
import re
|
|
13
|
-
import urllib.error
|
|
14
|
-
import urllib.request
|
|
15
|
-
from pathlib import Path
|
|
16
|
-
|
|
17
|
-
from langchain_core.tools import tool
|
|
18
|
-
|
|
19
|
-
_MODEL = "google/gemini-3-pro-image-preview"
|
|
20
|
-
_DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
|
|
21
|
-
_USER_AGENT = "OneManCompany-ImageGeneration/1.0"
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def _post_json(url: str, headers: dict, payload: dict, timeout: int = 60) -> tuple[dict | None, str | None]:
|
|
25
|
-
"""POST JSON and return (json_body, error)."""
|
|
26
|
-
req = urllib.request.Request(
|
|
27
|
-
url,
|
|
28
|
-
data=json.dumps(payload).encode("utf-8"),
|
|
29
|
-
headers=headers,
|
|
30
|
-
method="POST",
|
|
31
|
-
)
|
|
32
|
-
try:
|
|
33
|
-
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
34
|
-
raw = resp.read().decode("utf-8", errors="replace")
|
|
35
|
-
return json.loads(raw), None
|
|
36
|
-
except urllib.error.HTTPError as e:
|
|
37
|
-
body_text = e.read().decode("utf-8", errors="replace") if e.fp else ""
|
|
38
|
-
return None, f"HTTP {e.code}: {body_text[:800]}"
|
|
39
|
-
except json.JSONDecodeError as e:
|
|
40
|
-
return None, f"Invalid JSON response: {e}"
|
|
41
|
-
except Exception as e:
|
|
42
|
-
return None, str(e)
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def _mime_to_ext(mime: str) -> str:
|
|
46
|
-
mapping = {
|
|
47
|
-
"image/png": ".png",
|
|
48
|
-
"image/jpeg": ".jpg",
|
|
49
|
-
"image/jpg": ".jpg",
|
|
50
|
-
"image/webp": ".webp",
|
|
51
|
-
"image/gif": ".gif",
|
|
52
|
-
}
|
|
53
|
-
return mapping.get(mime.lower(), ".png")
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
def _decode_data_url(data_url: str) -> tuple[bytes | None, str]:
|
|
57
|
-
"""Decode data:image/...;base64,... into bytes."""
|
|
58
|
-
if not data_url.startswith("data:image/") or "," not in data_url:
|
|
59
|
-
return None, ""
|
|
60
|
-
header, payload = data_url.split(",", 1)
|
|
61
|
-
mime = header.split(";")[0][5:].strip() or "image/png"
|
|
62
|
-
try:
|
|
63
|
-
return base64.b64decode(payload), mime
|
|
64
|
-
except Exception:
|
|
65
|
-
return None, ""
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def _decode_base64(payload: str) -> bytes | None:
|
|
69
|
-
"""Decode base64 payload (supports missing padding)."""
|
|
70
|
-
text = payload.strip()
|
|
71
|
-
if not text:
|
|
72
|
-
return None
|
|
73
|
-
try:
|
|
74
|
-
return base64.b64decode(text, validate=True)
|
|
75
|
-
except Exception:
|
|
76
|
-
pad = "=" * (-len(text) % 4)
|
|
77
|
-
try:
|
|
78
|
-
return base64.b64decode(text + pad)
|
|
79
|
-
except Exception:
|
|
80
|
-
try:
|
|
81
|
-
return base64.urlsafe_b64decode(text + pad)
|
|
82
|
-
except Exception:
|
|
83
|
-
return None
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
def _download_image(url: str) -> tuple[bytes | None, str]:
|
|
87
|
-
req = urllib.request.Request(url, headers={"User-Agent": _USER_AGENT})
|
|
88
|
-
try:
|
|
89
|
-
with urllib.request.urlopen(req, timeout=60) as resp:
|
|
90
|
-
data = resp.read()
|
|
91
|
-
mime = resp.headers.get_content_type() or "image/png"
|
|
92
|
-
return data, mime
|
|
93
|
-
except Exception:
|
|
94
|
-
return None, ""
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
def _extract_data_url_from_text(text: str) -> tuple[bytes | None, str]:
|
|
98
|
-
"""Extract first data:image URL from free text."""
|
|
99
|
-
m = re.search(r"(data:image/[a-zA-Z0-9.+-]+;base64,[A-Za-z0-9+/=_-]+)", text)
|
|
100
|
-
if not m:
|
|
101
|
-
return None, ""
|
|
102
|
-
return _decode_data_url(m.group(1))
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def _iter_values(obj):
|
|
106
|
-
"""Yield (key, value) pairs recursively for dict/list trees."""
|
|
107
|
-
if isinstance(obj, dict):
|
|
108
|
-
for k, v in obj.items():
|
|
109
|
-
yield k, v
|
|
110
|
-
yield from _iter_values(v)
|
|
111
|
-
elif isinstance(obj, list):
|
|
112
|
-
for item in obj:
|
|
113
|
-
yield from _iter_values(item)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
def _extract_image_bytes(response_json: dict) -> tuple[bytes | None, str]:
|
|
117
|
-
"""Try to extract generated image bytes from multiple response layouts."""
|
|
118
|
-
# 1) OpenAI Images style: {"data":[{"b64_json":"..."}]}
|
|
119
|
-
data = response_json.get("data")
|
|
120
|
-
if isinstance(data, list):
|
|
121
|
-
for item in data:
|
|
122
|
-
if not isinstance(item, dict):
|
|
123
|
-
continue
|
|
124
|
-
b64_payload = item.get("b64_json") or item.get("image_base64")
|
|
125
|
-
if isinstance(b64_payload, str):
|
|
126
|
-
decoded = _decode_base64(b64_payload)
|
|
127
|
-
if decoded:
|
|
128
|
-
return decoded, "image/png"
|
|
129
|
-
url = item.get("url") or item.get("image_url")
|
|
130
|
-
if isinstance(url, str):
|
|
131
|
-
if url.startswith("data:image/"):
|
|
132
|
-
decoded, mime = _decode_data_url(url)
|
|
133
|
-
if decoded:
|
|
134
|
-
return decoded, mime
|
|
135
|
-
elif url.startswith("http://") or url.startswith("https://"):
|
|
136
|
-
downloaded, mime = _download_image(url)
|
|
137
|
-
if downloaded:
|
|
138
|
-
return downloaded, mime
|
|
139
|
-
|
|
140
|
-
# 2) Chat/Responses style variants — recursive scan for common fields
|
|
141
|
-
base64_keys = {"b64_json", "image_base64", "base64", "b64"}
|
|
142
|
-
url_keys = {"image_url", "url"}
|
|
143
|
-
for key, value in _iter_values(response_json):
|
|
144
|
-
if key in base64_keys and isinstance(value, str):
|
|
145
|
-
decoded = _decode_base64(value)
|
|
146
|
-
if decoded:
|
|
147
|
-
return decoded, "image/png"
|
|
148
|
-
|
|
149
|
-
if key in url_keys:
|
|
150
|
-
url_value = ""
|
|
151
|
-
if isinstance(value, str):
|
|
152
|
-
url_value = value
|
|
153
|
-
elif isinstance(value, dict) and isinstance(value.get("url"), str):
|
|
154
|
-
url_value = value["url"]
|
|
155
|
-
|
|
156
|
-
if url_value.startswith("data:image/"):
|
|
157
|
-
decoded, mime = _decode_data_url(url_value)
|
|
158
|
-
if decoded:
|
|
159
|
-
return decoded, mime
|
|
160
|
-
elif url_value.startswith("http://") or url_value.startswith("https://"):
|
|
161
|
-
downloaded, mime = _download_image(url_value)
|
|
162
|
-
if downloaded:
|
|
163
|
-
return downloaded, mime
|
|
164
|
-
|
|
165
|
-
if isinstance(value, str) and "data:image/" in value:
|
|
166
|
-
decoded, mime = _extract_data_url_from_text(value)
|
|
167
|
-
if decoded:
|
|
168
|
-
return decoded, mime
|
|
169
|
-
|
|
170
|
-
return None, ""
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
def _build_headers(api_key: str) -> dict:
|
|
174
|
-
headers = {
|
|
175
|
-
"Authorization": f"Bearer {api_key}",
|
|
176
|
-
"Content-Type": "application/json",
|
|
177
|
-
"User-Agent": _USER_AGENT,
|
|
178
|
-
}
|
|
179
|
-
app_name = os.environ.get("OPENROUTER_APP_NAME", "OneManCompany")
|
|
180
|
-
http_referer = os.environ.get("OPENROUTER_HTTP_REFERER", "http://localhost:8000")
|
|
181
|
-
if app_name:
|
|
182
|
-
headers["X-Title"] = app_name
|
|
183
|
-
if http_referer:
|
|
184
|
-
headers["HTTP-Referer"] = http_referer
|
|
185
|
-
return headers
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
@tool
|
|
189
|
-
def image_generation(requirement: str, save_path: str) -> dict:
|
|
190
|
-
"""Generate an image from a text prompt and save it to disk. USE THIS for any image creation task.
|
|
191
|
-
|
|
192
|
-
Suitable for: illustrations, logos, banners, icons, marketing visuals, concept art, diagrams.
|
|
193
|
-
After generating, use remove_image_background if you need a transparent version.
|
|
194
|
-
|
|
195
|
-
Args:
|
|
196
|
-
requirement: Detailed text prompt describing the image to generate. Be specific about style, colors, composition.
|
|
197
|
-
save_path: Output file path (e.g. /tmp/banner.png).
|
|
198
|
-
"""
|
|
199
|
-
requirement = (requirement or "").strip()
|
|
200
|
-
save_path = (save_path or "").strip()
|
|
201
|
-
if not requirement:
|
|
202
|
-
return {"status": "error", "message": "requirement is empty"}
|
|
203
|
-
if not save_path:
|
|
204
|
-
return {"status": "error", "message": "save_path is empty"}
|
|
205
|
-
|
|
206
|
-
api_key = os.environ.get("OPENROUTER_API_KEY", "").strip()
|
|
207
|
-
if not api_key:
|
|
208
|
-
return {"status": "error", "message": "OPENROUTER_API_KEY not configured"}
|
|
209
|
-
|
|
210
|
-
base_url = os.environ.get("OPENROUTER_BASE_URL", _DEFAULT_BASE_URL).rstrip("/")
|
|
211
|
-
headers = _build_headers(api_key)
|
|
212
|
-
|
|
213
|
-
attempts = [
|
|
214
|
-
(
|
|
215
|
-
"chat.completions.text",
|
|
216
|
-
f"{base_url}/chat/completions",
|
|
217
|
-
{
|
|
218
|
-
"model": _MODEL,
|
|
219
|
-
"messages": [{"role": "user", "content": requirement}],
|
|
220
|
-
},
|
|
221
|
-
),
|
|
222
|
-
(
|
|
223
|
-
"chat.completions.multimodal",
|
|
224
|
-
f"{base_url}/chat/completions",
|
|
225
|
-
{
|
|
226
|
-
"model": _MODEL,
|
|
227
|
-
"messages": [
|
|
228
|
-
{
|
|
229
|
-
"role": "user",
|
|
230
|
-
"content": [{"type": "text", "text": requirement}],
|
|
231
|
-
}
|
|
232
|
-
],
|
|
233
|
-
},
|
|
234
|
-
),
|
|
235
|
-
(
|
|
236
|
-
"responses",
|
|
237
|
-
f"{base_url}/responses",
|
|
238
|
-
{
|
|
239
|
-
"model": _MODEL,
|
|
240
|
-
"input": requirement,
|
|
241
|
-
},
|
|
242
|
-
),
|
|
243
|
-
]
|
|
244
|
-
|
|
245
|
-
image_bytes: bytes | None = None
|
|
246
|
-
image_mime = "image/png"
|
|
247
|
-
errors: list[str] = []
|
|
248
|
-
|
|
249
|
-
for attempt_name, url, payload in attempts:
|
|
250
|
-
resp_json, err = _post_json(url, headers, payload)
|
|
251
|
-
if err:
|
|
252
|
-
errors.append(f"{attempt_name}: {err}")
|
|
253
|
-
continue
|
|
254
|
-
assert resp_json is not None
|
|
255
|
-
image_bytes, image_mime = _extract_image_bytes(resp_json)
|
|
256
|
-
if image_bytes:
|
|
257
|
-
break
|
|
258
|
-
snippet = json.dumps(resp_json, ensure_ascii=False)[:400]
|
|
259
|
-
errors.append(f"{attempt_name}: no image found, response={snippet}")
|
|
260
|
-
|
|
261
|
-
if not image_bytes:
|
|
262
|
-
return {
|
|
263
|
-
"status": "error",
|
|
264
|
-
"message": "Image generation failed on all OpenRouter attempts.",
|
|
265
|
-
"model": _MODEL,
|
|
266
|
-
"errors": errors,
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
out = Path(save_path).expanduser()
|
|
270
|
-
if not out.suffix:
|
|
271
|
-
out = out.with_suffix(_mime_to_ext(image_mime))
|
|
272
|
-
out.parent.mkdir(parents=True, exist_ok=True)
|
|
273
|
-
out.write_bytes(image_bytes)
|
|
274
|
-
|
|
275
|
-
return {
|
|
276
|
-
"status": "ok",
|
|
277
|
-
"model": _MODEL,
|
|
278
|
-
"saved_to": str(out),
|
|
279
|
-
"bytes": len(image_bytes),
|
|
280
|
-
"mime": image_mime,
|
|
281
|
-
}
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
id: image_generation
|
|
2
|
-
name: Image Generation
|
|
3
|
-
description: >
|
|
4
|
-
Generate images from text prompts (Gemini via OpenRouter).
|
|
5
|
-
USE THIS whenever a task involves creating images, illustrations, logos, banners,
|
|
6
|
-
icons, diagrams, or any visual content. Call image_generation(requirement, save_path).
|
|
7
|
-
added_by: CEO
|
|
8
|
-
type: langchain_module
|
|
9
|
-
sprite: desk_equipment
|
|
10
|
-
env_vars:
|
|
11
|
-
- name: OPENROUTER_API_KEY
|
|
12
|
-
label: OpenRouter API Key
|
|
13
|
-
secret: true
|
|
14
|
-
placeholder: sk-or-v1-...
|
|
15
|
-
- name: OPENROUTER_BASE_URL
|
|
16
|
-
label: OpenRouter Base URL
|
|
17
|
-
secret: false
|
|
18
|
-
placeholder: https://openrouter.ai/api/v1
|
|
19
|
-
- name: OPENROUTER_APP_NAME
|
|
20
|
-
label: OpenRouter App Name
|
|
21
|
-
secret: false
|
|
22
|
-
placeholder: OneManCompany
|
|
23
|
-
- name: OPENROUTER_HTTP_REFERER
|
|
24
|
-
label: OpenRouter HTTP Referer
|
|
25
|
-
secret: false
|
|
26
|
-
placeholder: http://localhost:8000
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
id: video_generation
|
|
2
|
-
name: Video Generation
|
|
3
|
-
description: >
|
|
4
|
-
Generate videos from text prompts (Bytedance Seedance 1.5 Pro via fal.ai).
|
|
5
|
-
USE THIS whenever a task involves creating video content, promotional videos, demos, or animations.
|
|
6
|
-
Workflow: call video_submit → create a cron task to call video_check_status every 30s →
|
|
7
|
-
when COMPLETED, call video_download to save the file. Do NOT block-wait.
|
|
8
|
-
added_by: CEO
|
|
9
|
-
type: langchain_module
|
|
10
|
-
sprite: desk_equipment
|
|
11
|
-
env_vars:
|
|
12
|
-
- name: FAL_API_KEY
|
|
13
|
-
label: fal.ai API Key
|
|
14
|
-
secret: true
|
|
15
|
-
placeholder: "key_id:key_secret"
|
|
@@ -1,267 +0,0 @@
|
|
|
1
|
-
"""Video generation tool via fal.ai + Bytedance Seedance 1.5 Pro.
|
|
2
|
-
|
|
3
|
-
Three-step workflow:
|
|
4
|
-
1. video_submit(prompt) → returns status_url + response_url
|
|
5
|
-
2. video_check_status(status_url) → poll via cron task every 30s until COMPLETED
|
|
6
|
-
3. video_download(response_url, save_path) → save video to disk
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
from __future__ import annotations
|
|
10
|
-
|
|
11
|
-
import json
|
|
12
|
-
import os
|
|
13
|
-
import urllib.error
|
|
14
|
-
import urllib.request
|
|
15
|
-
from pathlib import Path
|
|
16
|
-
|
|
17
|
-
from langchain_core.tools import tool
|
|
18
|
-
|
|
19
|
-
_MODEL_ID = "fal-ai/bytedance/seedance/v1.5/pro/text-to-video"
|
|
20
|
-
_QUEUE_BASE = "https://queue.fal.run"
|
|
21
|
-
_USER_AGENT = "OneManCompany-VideoGeneration/1.0"
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
# ── HTTP helpers ──────────────────────────────────────────
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def _get_api_key() -> str | None:
|
|
28
|
-
key = os.environ.get("FAL_KEY", "") or os.environ.get("FAL_API_KEY", "")
|
|
29
|
-
return key.strip() or None
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
def _build_headers(api_key: str) -> dict:
|
|
33
|
-
return {
|
|
34
|
-
"Authorization": f"Key {api_key}",
|
|
35
|
-
"Content-Type": "application/json",
|
|
36
|
-
"User-Agent": _USER_AGENT,
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def _request_json(
|
|
41
|
-
method: str,
|
|
42
|
-
url: str,
|
|
43
|
-
headers: dict,
|
|
44
|
-
payload: dict | None = None,
|
|
45
|
-
timeout: int = 120,
|
|
46
|
-
) -> tuple[dict | None, str | None]:
|
|
47
|
-
"""Make an HTTP request and return (json_body, error)."""
|
|
48
|
-
data = json.dumps(payload).encode("utf-8") if payload else None
|
|
49
|
-
req = urllib.request.Request(url, data=data, headers=headers, method=method)
|
|
50
|
-
try:
|
|
51
|
-
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
52
|
-
raw = resp.read().decode("utf-8", errors="replace")
|
|
53
|
-
return json.loads(raw), None
|
|
54
|
-
except urllib.error.HTTPError as e:
|
|
55
|
-
body_text = e.read().decode("utf-8", errors="replace") if e.fp else ""
|
|
56
|
-
return None, f"HTTP {e.code}: {body_text[:800]}"
|
|
57
|
-
except json.JSONDecodeError as e:
|
|
58
|
-
return None, f"Invalid JSON response: {e}"
|
|
59
|
-
except Exception as e:
|
|
60
|
-
return None, str(e)
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def _download_file(url: str, timeout: int = 300) -> tuple[bytes | None, str]:
|
|
64
|
-
"""Download a file from URL, return (bytes, error_or_content_type)."""
|
|
65
|
-
req = urllib.request.Request(url, headers={"User-Agent": _USER_AGENT})
|
|
66
|
-
try:
|
|
67
|
-
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
68
|
-
data = resp.read()
|
|
69
|
-
ct = resp.headers.get_content_type() or "video/mp4"
|
|
70
|
-
return data, ct
|
|
71
|
-
except Exception as e:
|
|
72
|
-
return None, str(e)
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
# ── Tool 1: Submit ────────────────────────────────────────
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
@tool
|
|
79
|
-
def video_submit(
|
|
80
|
-
prompt: str,
|
|
81
|
-
duration: str = "5",
|
|
82
|
-
aspect_ratio: str = "16:9",
|
|
83
|
-
resolution: str = "720p",
|
|
84
|
-
generate_audio: bool = True,
|
|
85
|
-
) -> dict:
|
|
86
|
-
"""Submit a text-to-video generation request. USE THIS for any video creation task.
|
|
87
|
-
|
|
88
|
-
This is Step 1 of 3 for video generation:
|
|
89
|
-
1. Call video_submit(prompt) → get status_url and response_url
|
|
90
|
-
2. Create a cron task to call video_check_status(status_url) every 30 seconds
|
|
91
|
-
3. When status is COMPLETED, call video_download(response_url, save_path)
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
prompt: Detailed description of the video to generate. Be specific about scenes, motion, style.
|
|
95
|
-
duration: Video length: "auto", or "4" through "15" seconds (default: "5").
|
|
96
|
-
aspect_ratio: "auto", "21:9", "16:9", "4:3", "1:1", "3:4", "9:16" (default: "16:9").
|
|
97
|
-
resolution: "480p" or "720p" (default: "720p").
|
|
98
|
-
generate_audio: Whether to generate audio (default: True, doubles cost).
|
|
99
|
-
"""
|
|
100
|
-
prompt = (prompt or "").strip()
|
|
101
|
-
if not prompt:
|
|
102
|
-
return {"status": "error", "message": "prompt is empty"}
|
|
103
|
-
|
|
104
|
-
api_key = _get_api_key()
|
|
105
|
-
if not api_key:
|
|
106
|
-
return {"status": "error", "message": "FAL_KEY or FAL_API_KEY not configured"}
|
|
107
|
-
|
|
108
|
-
headers = _build_headers(api_key)
|
|
109
|
-
payload = {
|
|
110
|
-
"prompt": prompt,
|
|
111
|
-
"duration": str(duration),
|
|
112
|
-
"aspect_ratio": aspect_ratio,
|
|
113
|
-
"resolution": resolution,
|
|
114
|
-
"generate_audio": generate_audio,
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
url = f"{_QUEUE_BASE}/{_MODEL_ID}"
|
|
118
|
-
resp, err = _request_json("POST", url, headers, payload, timeout=30)
|
|
119
|
-
if err:
|
|
120
|
-
return {"status": "error", "message": f"Failed to submit: {err}"}
|
|
121
|
-
|
|
122
|
-
assert resp is not None
|
|
123
|
-
request_id = resp.get("request_id")
|
|
124
|
-
if not request_id:
|
|
125
|
-
return {"status": "error", "message": f"No request_id: {json.dumps(resp)[:400]}"}
|
|
126
|
-
|
|
127
|
-
return {
|
|
128
|
-
"status": "ok",
|
|
129
|
-
"message": (
|
|
130
|
-
f"Video generation submitted! request_id: {request_id}. "
|
|
131
|
-
"Generation typically takes 1-5 minutes. "
|
|
132
|
-
"Create a cron task to call video_check_status every 30-60 seconds "
|
|
133
|
-
"with the status_url below. Once COMPLETED, call video_download "
|
|
134
|
-
"with the response_url below and a save_path."
|
|
135
|
-
),
|
|
136
|
-
"request_id": request_id,
|
|
137
|
-
"status_url": resp.get("status_url"),
|
|
138
|
-
"response_url": resp.get("response_url"),
|
|
139
|
-
"cancel_url": resp.get("cancel_url"),
|
|
140
|
-
"queue_position": resp.get("queue_position"),
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
# ── Tool 2: Check Status ─────────────────────────────────
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
@tool
|
|
148
|
-
def video_check_status(status_url: str) -> dict:
|
|
149
|
-
"""Check if a submitted video generation is done (Step 2 of 3).
|
|
150
|
-
|
|
151
|
-
Call this via cron task every 30 seconds after video_submit.
|
|
152
|
-
When generation_status is COMPLETED → call video_download.
|
|
153
|
-
When FAILED → stop the cron task and report the error.
|
|
154
|
-
|
|
155
|
-
Args:
|
|
156
|
-
status_url: The status_url returned by video_submit.
|
|
157
|
-
"""
|
|
158
|
-
status_url = (status_url or "").strip()
|
|
159
|
-
if not status_url:
|
|
160
|
-
return {"status": "error", "message": "status_url is empty"}
|
|
161
|
-
|
|
162
|
-
api_key = _get_api_key()
|
|
163
|
-
if not api_key:
|
|
164
|
-
return {"status": "error", "message": "FAL_KEY or FAL_API_KEY not configured"}
|
|
165
|
-
|
|
166
|
-
headers = _build_headers(api_key)
|
|
167
|
-
resp, err = _request_json("GET", status_url, headers, timeout=30)
|
|
168
|
-
if err:
|
|
169
|
-
return {"status": "error", "message": f"Failed to check status: {err}"}
|
|
170
|
-
|
|
171
|
-
assert resp is not None
|
|
172
|
-
gen_status = resp.get("status", "UNKNOWN")
|
|
173
|
-
|
|
174
|
-
result = {
|
|
175
|
-
"status": "ok",
|
|
176
|
-
"generation_status": gen_status,
|
|
177
|
-
"request_id": resp.get("request_id"),
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
if gen_status == "IN_QUEUE":
|
|
181
|
-
result["queue_position"] = resp.get("queue_position")
|
|
182
|
-
result["message"] = "Still in queue. Check again in 30-60 seconds."
|
|
183
|
-
elif gen_status == "IN_PROGRESS":
|
|
184
|
-
result["message"] = "Video is being generated. Check again in 30-60 seconds."
|
|
185
|
-
elif gen_status == "COMPLETED":
|
|
186
|
-
result["message"] = (
|
|
187
|
-
"Video generation is complete! "
|
|
188
|
-
"Call video_download with the response_url and a save_path to download it. "
|
|
189
|
-
"You can cancel the cron task now."
|
|
190
|
-
)
|
|
191
|
-
result["inference_time"] = (resp.get("metrics") or {}).get("inference_time")
|
|
192
|
-
elif gen_status in ("FAILED", "ERROR", "CANCELLED", "CANCELED"):
|
|
193
|
-
result["message"] = f"Generation failed: {resp.get('error', 'unknown error')}. You can cancel the cron task now."
|
|
194
|
-
else:
|
|
195
|
-
result["message"] = f"Unknown status: {gen_status}. Check again in 30 seconds."
|
|
196
|
-
|
|
197
|
-
return result
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
# ── Tool 3: Download ──────────────────────────────────────
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
@tool
|
|
204
|
-
def video_download(response_url: str, save_path: str) -> dict:
|
|
205
|
-
"""Download a completed video to disk (Step 3 of 3).
|
|
206
|
-
|
|
207
|
-
Call this after video_check_status returns COMPLETED. Stop the cron task after downloading.
|
|
208
|
-
|
|
209
|
-
Args:
|
|
210
|
-
response_url: The response_url returned by video_submit.
|
|
211
|
-
save_path: Output file path (e.g. /tmp/my_video.mp4).
|
|
212
|
-
"""
|
|
213
|
-
response_url = (response_url or "").strip()
|
|
214
|
-
save_path = (save_path or "").strip()
|
|
215
|
-
if not response_url:
|
|
216
|
-
return {"status": "error", "message": "response_url is empty"}
|
|
217
|
-
if not save_path:
|
|
218
|
-
return {"status": "error", "message": "save_path is empty"}
|
|
219
|
-
|
|
220
|
-
api_key = _get_api_key()
|
|
221
|
-
if not api_key:
|
|
222
|
-
return {"status": "error", "message": "FAL_KEY or FAL_API_KEY not configured"}
|
|
223
|
-
|
|
224
|
-
headers = _build_headers(api_key)
|
|
225
|
-
|
|
226
|
-
# Fetch the result
|
|
227
|
-
resp, err = _request_json("GET", response_url, headers, timeout=60)
|
|
228
|
-
if err:
|
|
229
|
-
return {"status": "error", "message": f"Failed to fetch result: {err}"}
|
|
230
|
-
|
|
231
|
-
assert resp is not None
|
|
232
|
-
|
|
233
|
-
# Extract video URL — fal.ai format: {"video": {"url": "...", ...}}
|
|
234
|
-
video_info = resp.get("video", {})
|
|
235
|
-
video_url = video_info.get("url") if isinstance(video_info, dict) else None
|
|
236
|
-
if not video_url:
|
|
237
|
-
snippet = json.dumps(resp, ensure_ascii=False)[:400]
|
|
238
|
-
return {
|
|
239
|
-
"status": "error",
|
|
240
|
-
"message": f"No video URL in result: {snippet}",
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
# Download
|
|
244
|
-
video_bytes, content_type = _download_file(video_url)
|
|
245
|
-
if not video_bytes:
|
|
246
|
-
return {
|
|
247
|
-
"status": "error",
|
|
248
|
-
"message": f"Failed to download video: {content_type}",
|
|
249
|
-
"video_url": video_url,
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
# Save to disk
|
|
253
|
-
out = Path(save_path).expanduser()
|
|
254
|
-
if not out.suffix:
|
|
255
|
-
out = out.with_suffix(".mp4")
|
|
256
|
-
out.parent.mkdir(parents=True, exist_ok=True)
|
|
257
|
-
out.write_bytes(video_bytes)
|
|
258
|
-
|
|
259
|
-
return {
|
|
260
|
-
"status": "ok",
|
|
261
|
-
"model": _MODEL_ID,
|
|
262
|
-
"saved_to": str(out),
|
|
263
|
-
"bytes": len(video_bytes),
|
|
264
|
-
"content_type": video_info.get("content_type", content_type),
|
|
265
|
-
"video_url": video_url,
|
|
266
|
-
"seed": resp.get("seed"),
|
|
267
|
-
}
|