nous-genai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nous/__init__.py +3 -0
- nous/genai/__init__.py +56 -0
- nous/genai/__main__.py +3 -0
- nous/genai/_internal/__init__.py +1 -0
- nous/genai/_internal/capability_rules.py +476 -0
- nous/genai/_internal/config.py +102 -0
- nous/genai/_internal/errors.py +63 -0
- nous/genai/_internal/http.py +951 -0
- nous/genai/_internal/json_schema.py +54 -0
- nous/genai/cli.py +1316 -0
- nous/genai/client.py +719 -0
- nous/genai/mcp_cli.py +275 -0
- nous/genai/mcp_server.py +1080 -0
- nous/genai/providers/__init__.py +15 -0
- nous/genai/providers/aliyun.py +535 -0
- nous/genai/providers/anthropic.py +483 -0
- nous/genai/providers/gemini.py +1606 -0
- nous/genai/providers/openai.py +1909 -0
- nous/genai/providers/tuzi.py +1158 -0
- nous/genai/providers/volcengine.py +273 -0
- nous/genai/reference/__init__.py +17 -0
- nous/genai/reference/catalog.py +206 -0
- nous/genai/reference/mappings.py +467 -0
- nous/genai/reference/mode_overrides.py +26 -0
- nous/genai/reference/model_catalog.py +82 -0
- nous/genai/reference/model_catalog_data/__init__.py +1 -0
- nous/genai/reference/model_catalog_data/aliyun.py +98 -0
- nous/genai/reference/model_catalog_data/anthropic.py +10 -0
- nous/genai/reference/model_catalog_data/google.py +45 -0
- nous/genai/reference/model_catalog_data/openai.py +44 -0
- nous/genai/reference/model_catalog_data/tuzi_anthropic.py +21 -0
- nous/genai/reference/model_catalog_data/tuzi_google.py +19 -0
- nous/genai/reference/model_catalog_data/tuzi_openai.py +75 -0
- nous/genai/reference/model_catalog_data/tuzi_web.py +136 -0
- nous/genai/reference/model_catalog_data/volcengine.py +107 -0
- nous/genai/tools/__init__.py +13 -0
- nous/genai/tools/output_parser.py +119 -0
- nous/genai/types.py +416 -0
- nous/py.typed +1 -0
- nous_genai-0.1.0.dist-info/METADATA +200 -0
- nous_genai-0.1.0.dist-info/RECORD +45 -0
- nous_genai-0.1.0.dist-info/WHEEL +5 -0
- nous_genai-0.1.0.dist-info/entry_points.txt +4 -0
- nous_genai-0.1.0.dist-info/licenses/LICENSE +190 -0
- nous_genai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1158 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
import time
|
|
6
|
+
from dataclasses import dataclass, replace
|
|
7
|
+
from typing import Any, Iterator
|
|
8
|
+
from uuid import uuid4
|
|
9
|
+
|
|
10
|
+
from .._internal.errors import (
|
|
11
|
+
GenAIError,
|
|
12
|
+
invalid_request_error,
|
|
13
|
+
not_supported_error,
|
|
14
|
+
provider_error,
|
|
15
|
+
)
|
|
16
|
+
from .._internal.http import request_json
|
|
17
|
+
from ..types import (
|
|
18
|
+
Capability,
|
|
19
|
+
GenerateEvent,
|
|
20
|
+
GenerateRequest,
|
|
21
|
+
GenerateResponse,
|
|
22
|
+
JobInfo,
|
|
23
|
+
Message,
|
|
24
|
+
Part,
|
|
25
|
+
PartSourceUrl,
|
|
26
|
+
)
|
|
27
|
+
from .anthropic import AnthropicAdapter
|
|
28
|
+
from .gemini import GeminiAdapter
|
|
29
|
+
from .openai import OpenAIAdapter
|
|
30
|
+
|
|
31
|
+
_ASYNCDATA_BASE_URL = "https://asyncdata.net"
|
|
32
|
+
_ASYNCDATA_PRO_BASE_URL = "https://pro.asyncdata.net"
|
|
33
|
+
|
|
34
|
+
_DEEPSEARCH_MODELS = frozenset(
|
|
35
|
+
{
|
|
36
|
+
"gemini-2.5-flash-deepsearch",
|
|
37
|
+
"gemini-2.5-flash-deepsearch-async",
|
|
38
|
+
"gemini-2.5-pro-deepsearch",
|
|
39
|
+
"gemini-2.5-pro-deepsearch-async",
|
|
40
|
+
"gemini-3-pro-deepsearch",
|
|
41
|
+
"gemini-3-pro-deepsearch-async",
|
|
42
|
+
}
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _is_deepsearch_model(model_id: str) -> bool:
|
|
47
|
+
return model_id.lower().strip() in _DEEPSEARCH_MODELS
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
_MP4_URL_RE = re.compile(r"https?://[^\s\"'<>]+?\.mp4(?:\?[^\s\"'<>]*)?", re.IGNORECASE)
|
|
51
|
+
_AUDIO_URL_RE = re.compile(
|
|
52
|
+
r"https?://[^\s\"'<>]+?\.(?:mp3|wav|m4a|aac|flac|ogg|opus)(?:\?[^\s\"'<>]*)?",
|
|
53
|
+
re.IGNORECASE,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
_SUNO_WORKFLOW_MODELS = frozenset(
|
|
57
|
+
{
|
|
58
|
+
"suno-all-stems",
|
|
59
|
+
"suno-continue",
|
|
60
|
+
"suno-continue-uploaded",
|
|
61
|
+
"suno-infill",
|
|
62
|
+
"suno-infill-uploaded",
|
|
63
|
+
"suno-midi",
|
|
64
|
+
"suno-overpainting",
|
|
65
|
+
"suno-remix",
|
|
66
|
+
"suno-remix-uploaded",
|
|
67
|
+
"suno-rewrite",
|
|
68
|
+
"suno-tags",
|
|
69
|
+
"suno-vocal-stems",
|
|
70
|
+
"suno_act_midi",
|
|
71
|
+
"suno_act_mp4",
|
|
72
|
+
"suno_act_stems",
|
|
73
|
+
"suno_act_tags",
|
|
74
|
+
"suno_act_timing",
|
|
75
|
+
"suno_act_wav",
|
|
76
|
+
"suno_concat",
|
|
77
|
+
"suno_persona_create",
|
|
78
|
+
"suno_uploads",
|
|
79
|
+
}
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _extract_first_url(pattern: re.Pattern[str], text: str) -> str | None:
|
|
84
|
+
m = pattern.search(text)
|
|
85
|
+
if m is None:
|
|
86
|
+
return None
|
|
87
|
+
return m.group(0)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _closest_kling_duration(duration_sec: int | None) -> str:
|
|
91
|
+
if duration_sec is None:
|
|
92
|
+
return "5"
|
|
93
|
+
try:
|
|
94
|
+
sec = int(duration_sec)
|
|
95
|
+
except Exception:
|
|
96
|
+
return "5"
|
|
97
|
+
return "5" if sec <= 5 else "10"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _sora_api_model_and_prompt_suffix(model_id: str) -> tuple[str, str | None]:
|
|
101
|
+
mid = model_id.strip()
|
|
102
|
+
mid_l = mid.lower()
|
|
103
|
+
if mid_l in {"sora-2", "sora-2-pro", "sora-2-character", "sora-2-pro-character"}:
|
|
104
|
+
return mid, None
|
|
105
|
+
if mid_l.startswith("sora-") and ":" in mid_l:
|
|
106
|
+
parts = mid_l.split("-")
|
|
107
|
+
ratio = parts[1] if len(parts) > 1 else ""
|
|
108
|
+
res = parts[2] if len(parts) > 2 else ""
|
|
109
|
+
dur = parts[3] if len(parts) > 3 else ""
|
|
110
|
+
api_model = "sora-2-pro" if "720p" in res else "sora-2"
|
|
111
|
+
suffix_parts: list[str] = []
|
|
112
|
+
if ratio:
|
|
113
|
+
suffix_parts.append(ratio)
|
|
114
|
+
if res:
|
|
115
|
+
suffix_parts.append(res)
|
|
116
|
+
if dur:
|
|
117
|
+
suffix_parts.append(dur if dur.endswith("s") else f"{dur}s")
|
|
118
|
+
return api_model, " ".join(suffix_parts) if suffix_parts else None
|
|
119
|
+
return mid, None
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@dataclass(frozen=True, slots=True)
|
|
123
|
+
class TuziAdapter:
|
|
124
|
+
"""
|
|
125
|
+
Tuzi exposes multiple protocols (OpenAI-compatible, Gemini v1beta, Anthropic /v1/messages)
|
|
126
|
+
under a single API key. Route by model_id.
|
|
127
|
+
|
|
128
|
+
For deepsearch models, uses the asyncdata.net async API.
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
openai: OpenAIAdapter | None
|
|
132
|
+
gemini: GeminiAdapter | None
|
|
133
|
+
anthropic: AnthropicAdapter | None
|
|
134
|
+
proxy_url: str | None = None
|
|
135
|
+
|
|
136
|
+
def capabilities(self, model_id: str) -> Capability:
|
|
137
|
+
mid_l = model_id.lower().strip()
|
|
138
|
+
if mid_l in {"kling_image", "seededit"}:
|
|
139
|
+
return Capability(
|
|
140
|
+
input_modalities={"text", "image"},
|
|
141
|
+
output_modalities={"image"},
|
|
142
|
+
supports_stream=False,
|
|
143
|
+
supports_job=(mid_l == "kling_image"),
|
|
144
|
+
supports_tools=False,
|
|
145
|
+
supports_json_schema=False,
|
|
146
|
+
)
|
|
147
|
+
if mid_l in _SUNO_WORKFLOW_MODELS:
|
|
148
|
+
return Capability(
|
|
149
|
+
input_modalities={"text"},
|
|
150
|
+
output_modalities={"audio"},
|
|
151
|
+
supports_stream=False,
|
|
152
|
+
supports_job=True,
|
|
153
|
+
supports_tools=False,
|
|
154
|
+
supports_json_schema=False,
|
|
155
|
+
)
|
|
156
|
+
if mid_l == "suno_lyrics":
|
|
157
|
+
return Capability(
|
|
158
|
+
input_modalities={"text"},
|
|
159
|
+
output_modalities={"text"},
|
|
160
|
+
supports_stream=False,
|
|
161
|
+
supports_job=True,
|
|
162
|
+
supports_tools=False,
|
|
163
|
+
supports_json_schema=False,
|
|
164
|
+
)
|
|
165
|
+
if mid_l == "suno_music" or (
|
|
166
|
+
mid_l.startswith("chirp-") and mid_l != "chirp-v3"
|
|
167
|
+
):
|
|
168
|
+
return Capability(
|
|
169
|
+
input_modalities={"text"},
|
|
170
|
+
output_modalities={"audio"},
|
|
171
|
+
supports_stream=False,
|
|
172
|
+
supports_job=True,
|
|
173
|
+
supports_tools=False,
|
|
174
|
+
supports_json_schema=False,
|
|
175
|
+
)
|
|
176
|
+
if _is_deepsearch_model(model_id):
|
|
177
|
+
return Capability(
|
|
178
|
+
input_modalities={"text"},
|
|
179
|
+
output_modalities={"text"},
|
|
180
|
+
supports_stream=False,
|
|
181
|
+
supports_job=True,
|
|
182
|
+
supports_tools=False,
|
|
183
|
+
supports_json_schema=False,
|
|
184
|
+
)
|
|
185
|
+
return self._route(model_id).capabilities(model_id)
|
|
186
|
+
|
|
187
|
+
def generate(
|
|
188
|
+
self, request: GenerateRequest, *, stream: bool
|
|
189
|
+
) -> GenerateResponse | Iterator[GenerateEvent]:
|
|
190
|
+
model_id = request.model_id()
|
|
191
|
+
mid_l = model_id.lower().strip()
|
|
192
|
+
modalities = set(request.output.modalities)
|
|
193
|
+
|
|
194
|
+
if modalities == {"video"} and mid_l.startswith("pika-"):
|
|
195
|
+
raise not_supported_error(
|
|
196
|
+
"tuzi pika endpoints are not available on api.tu-zi.com (returns HTML)"
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
if modalities == {"video"} and "seedance" in mid_l:
|
|
200
|
+
raise not_supported_error(
|
|
201
|
+
"doubao seedance video is not supported on tuzi-web (upstream returns multipart: NextPart: EOF)"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if modalities == {"video"} and mid_l.startswith("kling"):
|
|
205
|
+
if stream:
|
|
206
|
+
raise invalid_request_error(
|
|
207
|
+
"kling video generation does not support streaming"
|
|
208
|
+
)
|
|
209
|
+
return self._kling_text2video(request, model_id=model_id)
|
|
210
|
+
|
|
211
|
+
if modalities == {"video"} and mid_l.startswith("sora-"):
|
|
212
|
+
if stream:
|
|
213
|
+
raise invalid_request_error(
|
|
214
|
+
"sora video generation does not support streaming"
|
|
215
|
+
)
|
|
216
|
+
return self._async_chat_video(request, model_id=model_id)
|
|
217
|
+
|
|
218
|
+
if modalities == {"video"} and mid_l.startswith("runway-"):
|
|
219
|
+
raise not_supported_error(
|
|
220
|
+
"tuzi runway endpoints are not available on api.tu-zi.com (returns HTML)"
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if modalities == {"image"} and mid_l in {"kling_image", "seededit"}:
|
|
224
|
+
if stream:
|
|
225
|
+
raise invalid_request_error(
|
|
226
|
+
f"{mid_l} image generation does not support streaming"
|
|
227
|
+
)
|
|
228
|
+
if mid_l == "kling_image" and self._has_image_input(request):
|
|
229
|
+
return self._route(model_id).generate(request, stream=False)
|
|
230
|
+
if mid_l == "kling_image":
|
|
231
|
+
return self._kling_text2image(request, model_id=model_id)
|
|
232
|
+
return self._seededit(request, model_id=model_id)
|
|
233
|
+
|
|
234
|
+
if modalities == {"text"} and mid_l == "suno_lyrics":
|
|
235
|
+
if stream:
|
|
236
|
+
raise invalid_request_error(
|
|
237
|
+
"suno lyrics generation does not support streaming"
|
|
238
|
+
)
|
|
239
|
+
return self._suno_lyrics(request)
|
|
240
|
+
|
|
241
|
+
if modalities == {"audio"} and mid_l in _SUNO_WORKFLOW_MODELS:
|
|
242
|
+
if stream:
|
|
243
|
+
raise invalid_request_error(
|
|
244
|
+
"suno workflow endpoints do not support streaming"
|
|
245
|
+
)
|
|
246
|
+
return self._suno_workflow(request, model_id=model_id)
|
|
247
|
+
|
|
248
|
+
if modalities == {"audio"} and (
|
|
249
|
+
mid_l == "suno_music"
|
|
250
|
+
or (mid_l.startswith("chirp-") and mid_l != "chirp-v3")
|
|
251
|
+
):
|
|
252
|
+
if stream:
|
|
253
|
+
raise invalid_request_error(
|
|
254
|
+
"suno music generation does not support streaming"
|
|
255
|
+
)
|
|
256
|
+
return self._suno_music(request, model_id=model_id)
|
|
257
|
+
|
|
258
|
+
if _is_deepsearch_model(model_id):
|
|
259
|
+
if stream:
|
|
260
|
+
raise invalid_request_error(
|
|
261
|
+
"deepsearch models do not support streaming; use stream=False"
|
|
262
|
+
)
|
|
263
|
+
return self._deepsearch(request, model_id=model_id)
|
|
264
|
+
return self._route(model_id).generate(request, stream=stream)
|
|
265
|
+
|
|
266
|
+
def _has_image_input(self, request: GenerateRequest) -> bool:
|
|
267
|
+
for msg in request.input:
|
|
268
|
+
for part in msg.content:
|
|
269
|
+
if part.type == "image":
|
|
270
|
+
return True
|
|
271
|
+
return False
|
|
272
|
+
|
|
273
|
+
def _base_host(self) -> str:
|
|
274
|
+
if self.gemini is not None and self.gemini.base_url:
|
|
275
|
+
return self.gemini.base_url.rstrip("/")
|
|
276
|
+
if self.anthropic is not None and self.anthropic.base_url:
|
|
277
|
+
return self.anthropic.base_url.rstrip("/")
|
|
278
|
+
if self.openai is not None and self.openai.base_url:
|
|
279
|
+
base = self.openai.base_url.rstrip("/")
|
|
280
|
+
if base.endswith("/v1"):
|
|
281
|
+
return base[:-3]
|
|
282
|
+
return base
|
|
283
|
+
raise invalid_request_error("tuzi base url not configured")
|
|
284
|
+
|
|
285
|
+
def _bearer_headers(self) -> dict[str, str]:
|
|
286
|
+
if self.openai is not None and self.openai.api_key:
|
|
287
|
+
return {"Authorization": f"Bearer {self.openai.api_key}"}
|
|
288
|
+
if self.gemini is not None and self.gemini.api_key:
|
|
289
|
+
return {"Authorization": f"Bearer {self.gemini.api_key}"}
|
|
290
|
+
if self.anthropic is not None and self.anthropic.api_key:
|
|
291
|
+
return {"Authorization": f"Bearer {self.anthropic.api_key}"}
|
|
292
|
+
raise invalid_request_error("tuzi api key not configured")
|
|
293
|
+
|
|
294
|
+
def _single_text_prompt(self, request: GenerateRequest) -> str:
|
|
295
|
+
texts: list[str] = []
|
|
296
|
+
for msg in request.input:
|
|
297
|
+
for part in msg.content:
|
|
298
|
+
if part.type != "text":
|
|
299
|
+
continue
|
|
300
|
+
t = part.require_text().strip()
|
|
301
|
+
if t:
|
|
302
|
+
texts.append(t)
|
|
303
|
+
if len(texts) != 1:
|
|
304
|
+
raise invalid_request_error("this operation requires exactly one text part")
|
|
305
|
+
return texts[0]
|
|
306
|
+
|
|
307
|
+
def _text_prompt_or_none(self, request: GenerateRequest) -> str | None:
|
|
308
|
+
chunks: list[str] = []
|
|
309
|
+
for msg in request.input:
|
|
310
|
+
for part in msg.content:
|
|
311
|
+
if part.type != "text":
|
|
312
|
+
continue
|
|
313
|
+
t = part.require_text().strip()
|
|
314
|
+
if t:
|
|
315
|
+
chunks.append(t)
|
|
316
|
+
if not chunks:
|
|
317
|
+
return None
|
|
318
|
+
return "\n".join(chunks).strip()
|
|
319
|
+
|
|
320
|
+
def _seededit(self, request: GenerateRequest, *, model_id: str) -> GenerateResponse:
|
|
321
|
+
if self.openai is None:
|
|
322
|
+
raise invalid_request_error("tuzi openai adapter not configured")
|
|
323
|
+
if not self._has_image_input(request):
|
|
324
|
+
raise invalid_request_error("seededit requires image input")
|
|
325
|
+
req = replace(request, model="tuzi-web:api-images-seededit")
|
|
326
|
+
resp = self.openai.generate(req, stream=False)
|
|
327
|
+
assert isinstance(resp, GenerateResponse)
|
|
328
|
+
return replace(resp, model=f"tuzi-web:{model_id}")
|
|
329
|
+
|
|
330
|
+
def _kling_text2image(
|
|
331
|
+
self, request: GenerateRequest, *, model_id: str
|
|
332
|
+
) -> GenerateResponse:
|
|
333
|
+
prompt = self._single_text_prompt(request)
|
|
334
|
+
host = self._base_host()
|
|
335
|
+
body: dict[str, object] = {
|
|
336
|
+
"prompt": prompt,
|
|
337
|
+
"negative_prompt": "",
|
|
338
|
+
"aspect_ratio": "1:1",
|
|
339
|
+
"callback_url": "",
|
|
340
|
+
}
|
|
341
|
+
opts = request.provider_options.get("tuzi-web")
|
|
342
|
+
if isinstance(opts, dict):
|
|
343
|
+
for k, v in opts.items():
|
|
344
|
+
if k in body:
|
|
345
|
+
raise invalid_request_error(f"provider_options cannot override {k}")
|
|
346
|
+
body[k] = v
|
|
347
|
+
|
|
348
|
+
obj = request_json(
|
|
349
|
+
method="POST",
|
|
350
|
+
url=f"{host}/kling/v1/images/text2image",
|
|
351
|
+
headers=self._bearer_headers(),
|
|
352
|
+
json_body=body,
|
|
353
|
+
timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
|
|
354
|
+
proxy_url=self.proxy_url,
|
|
355
|
+
)
|
|
356
|
+
data = obj.get("data")
|
|
357
|
+
task_id = data.get("task_id") if isinstance(data, dict) else None
|
|
358
|
+
if not isinstance(task_id, str) or not task_id:
|
|
359
|
+
raise provider_error("kling submit missing task_id")
|
|
360
|
+
|
|
361
|
+
if not request.wait:
|
|
362
|
+
return GenerateResponse(
|
|
363
|
+
id=f"sdk_{uuid4().hex}",
|
|
364
|
+
provider="tuzi-web",
|
|
365
|
+
model=f"tuzi-web:{model_id}",
|
|
366
|
+
status="running",
|
|
367
|
+
job=JobInfo(job_id=task_id, poll_after_ms=1_000),
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
poll_url = f"{host}/kling/v1/images/text2image/{task_id}"
|
|
371
|
+
budget_ms = (
|
|
372
|
+
120_000 if request.params.timeout_ms is None else request.params.timeout_ms
|
|
373
|
+
)
|
|
374
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
375
|
+
while True:
|
|
376
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
377
|
+
if remaining_ms <= 0:
|
|
378
|
+
break
|
|
379
|
+
obj = request_json(
|
|
380
|
+
method="GET",
|
|
381
|
+
url=poll_url,
|
|
382
|
+
headers=self._bearer_headers(),
|
|
383
|
+
json_body=None,
|
|
384
|
+
timeout_ms=min(30_000, remaining_ms),
|
|
385
|
+
proxy_url=self.proxy_url,
|
|
386
|
+
)
|
|
387
|
+
data = obj.get("data")
|
|
388
|
+
if not isinstance(data, dict):
|
|
389
|
+
time.sleep(1.0)
|
|
390
|
+
continue
|
|
391
|
+
status = data.get("task_status")
|
|
392
|
+
if status == "failed":
|
|
393
|
+
raise provider_error(
|
|
394
|
+
f"kling task failed: {data.get('task_status_msg')}"
|
|
395
|
+
)
|
|
396
|
+
if status == "succeed":
|
|
397
|
+
task_result = data.get("task_result")
|
|
398
|
+
if isinstance(task_result, dict):
|
|
399
|
+
images = task_result.get("images")
|
|
400
|
+
if isinstance(images, list) and images:
|
|
401
|
+
first = images[0]
|
|
402
|
+
if isinstance(first, str) and first:
|
|
403
|
+
part = Part(type="image", source=PartSourceUrl(url=first))
|
|
404
|
+
return GenerateResponse(
|
|
405
|
+
id=f"sdk_{uuid4().hex}",
|
|
406
|
+
provider="tuzi-web",
|
|
407
|
+
model=f"tuzi-web:{model_id}",
|
|
408
|
+
status="completed",
|
|
409
|
+
output=[Message(role="assistant", content=[part])],
|
|
410
|
+
)
|
|
411
|
+
if isinstance(first, dict):
|
|
412
|
+
u = first.get("url")
|
|
413
|
+
if isinstance(u, str) and u:
|
|
414
|
+
part = Part(type="image", source=PartSourceUrl(url=u))
|
|
415
|
+
return GenerateResponse(
|
|
416
|
+
id=f"sdk_{uuid4().hex}",
|
|
417
|
+
provider="tuzi-web",
|
|
418
|
+
model=f"tuzi-web:{model_id}",
|
|
419
|
+
status="completed",
|
|
420
|
+
output=[Message(role="assistant", content=[part])],
|
|
421
|
+
)
|
|
422
|
+
raise provider_error("kling task succeeded but missing image url")
|
|
423
|
+
time.sleep(min(1.0, max(0.0, deadline - time.time())))
|
|
424
|
+
|
|
425
|
+
return GenerateResponse(
|
|
426
|
+
id=f"sdk_{uuid4().hex}",
|
|
427
|
+
provider="tuzi-web",
|
|
428
|
+
model=f"tuzi-web:{model_id}",
|
|
429
|
+
status="running",
|
|
430
|
+
job=JobInfo(job_id=task_id, poll_after_ms=1_000),
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
def _async_chat_video(
|
|
434
|
+
self, request: GenerateRequest, *, model_id: str
|
|
435
|
+
) -> GenerateResponse:
|
|
436
|
+
if self.openai is None:
|
|
437
|
+
raise invalid_request_error(
|
|
438
|
+
"NOUS_GENAI_TUZI_OPENAI_API_KEY required for async chat video models"
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
api_model, suffix = _sora_api_model_and_prompt_suffix(model_id)
|
|
442
|
+
messages = []
|
|
443
|
+
for msg in request.input:
|
|
444
|
+
role = msg.role if msg.role in {"system", "assistant"} else "user"
|
|
445
|
+
text = "".join(
|
|
446
|
+
p.require_text() for p in msg.content if p.type == "text"
|
|
447
|
+
).strip()
|
|
448
|
+
if not text:
|
|
449
|
+
continue
|
|
450
|
+
if suffix and role == "user":
|
|
451
|
+
text = f"{text} {suffix}".strip()
|
|
452
|
+
suffix = None
|
|
453
|
+
messages.append({"role": role, "content": text})
|
|
454
|
+
if not messages:
|
|
455
|
+
raise invalid_request_error(
|
|
456
|
+
"video generation requires at least one text message"
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
original_url = f"{self.openai.base_url}/chat/completions"
|
|
460
|
+
submit_url = f"{_ASYNCDATA_BASE_URL}/tran/{original_url}"
|
|
461
|
+
obj = request_json(
|
|
462
|
+
method="POST",
|
|
463
|
+
url=submit_url,
|
|
464
|
+
headers=self._bearer_headers(),
|
|
465
|
+
json_body={"model": api_model, "messages": messages},
|
|
466
|
+
timeout_ms=max(request.params.timeout_ms or 120_000, 120_000),
|
|
467
|
+
proxy_url=self.proxy_url,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
task_id = obj.get("id")
|
|
471
|
+
if not isinstance(task_id, str) or not task_id:
|
|
472
|
+
raise provider_error("async video submit missing id")
|
|
473
|
+
source_url = obj.get("source_url")
|
|
474
|
+
|
|
475
|
+
if not request.wait:
|
|
476
|
+
return GenerateResponse(
|
|
477
|
+
id=f"sdk_{uuid4().hex}",
|
|
478
|
+
provider="tuzi-web",
|
|
479
|
+
model=f"tuzi-web:{model_id}",
|
|
480
|
+
status="running",
|
|
481
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
content = self._poll_asyncdata_content(
|
|
485
|
+
task_id=task_id, source_url=source_url, timeout_ms=request.params.timeout_ms
|
|
486
|
+
)
|
|
487
|
+
if content is None:
|
|
488
|
+
return GenerateResponse(
|
|
489
|
+
id=f"sdk_{uuid4().hex}",
|
|
490
|
+
provider="tuzi-web",
|
|
491
|
+
model=f"tuzi-web:{model_id}",
|
|
492
|
+
status="running",
|
|
493
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
mp4 = _extract_first_url(_MP4_URL_RE, content)
|
|
497
|
+
if not mp4:
|
|
498
|
+
raise provider_error("async video completed but no mp4 url found")
|
|
499
|
+
part = Part(type="video", mime_type="video/mp4", source=PartSourceUrl(url=mp4))
|
|
500
|
+
return GenerateResponse(
|
|
501
|
+
id=f"sdk_{uuid4().hex}",
|
|
502
|
+
provider="tuzi-web",
|
|
503
|
+
model=f"tuzi-web:{model_id}",
|
|
504
|
+
status="completed",
|
|
505
|
+
output=[Message(role="assistant", content=[part])],
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
def _poll_asyncdata_content(
|
|
509
|
+
self, *, task_id: str, source_url: object, timeout_ms: int | None
|
|
510
|
+
) -> str | None:
|
|
511
|
+
poll_urls: list[str] = []
|
|
512
|
+
if isinstance(source_url, str) and source_url.strip():
|
|
513
|
+
poll_urls.append(source_url.strip())
|
|
514
|
+
poll_urls.extend(
|
|
515
|
+
[
|
|
516
|
+
f"{_ASYNCDATA_BASE_URL}/source/{task_id}",
|
|
517
|
+
f"{_ASYNCDATA_PRO_BASE_URL}/source/{task_id}",
|
|
518
|
+
]
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
budget_ms = 120_000 if timeout_ms is None else timeout_ms
|
|
522
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
523
|
+
while True:
|
|
524
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
525
|
+
if remaining_ms <= 0:
|
|
526
|
+
return None
|
|
527
|
+
for url in poll_urls:
|
|
528
|
+
obj = request_json(
|
|
529
|
+
method="GET",
|
|
530
|
+
url=url,
|
|
531
|
+
headers=None,
|
|
532
|
+
json_body=None,
|
|
533
|
+
timeout_ms=min(30_000, remaining_ms),
|
|
534
|
+
proxy_url=self.proxy_url,
|
|
535
|
+
)
|
|
536
|
+
content = obj.get("content")
|
|
537
|
+
if isinstance(content, str) and content:
|
|
538
|
+
return content
|
|
539
|
+
time.sleep(min(2.0, max(0.0, deadline - time.time())))
|
|
540
|
+
|
|
541
|
+
def _kling_text2video(
|
|
542
|
+
self, request: GenerateRequest, *, model_id: str
|
|
543
|
+
) -> GenerateResponse:
|
|
544
|
+
prompt = self._single_text_prompt(request)
|
|
545
|
+
host = self._base_host()
|
|
546
|
+
video = request.output.video
|
|
547
|
+
body: dict[str, object] = {
|
|
548
|
+
"prompt": prompt,
|
|
549
|
+
"negative_prompt": "",
|
|
550
|
+
"aspect_ratio": (
|
|
551
|
+
video.aspect_ratio if video and video.aspect_ratio else "16:9"
|
|
552
|
+
),
|
|
553
|
+
"duration": _closest_kling_duration(video.duration_sec if video else None),
|
|
554
|
+
"callback_url": "",
|
|
555
|
+
}
|
|
556
|
+
obj = request_json(
|
|
557
|
+
method="POST",
|
|
558
|
+
url=f"{host}/kling/v1/videos/text2video",
|
|
559
|
+
headers=self._bearer_headers(),
|
|
560
|
+
json_body=body,
|
|
561
|
+
timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
|
|
562
|
+
proxy_url=self.proxy_url,
|
|
563
|
+
)
|
|
564
|
+
data = obj.get("data")
|
|
565
|
+
task_id = data.get("task_id") if isinstance(data, dict) else None
|
|
566
|
+
if not isinstance(task_id, str) or not task_id:
|
|
567
|
+
raise provider_error("kling submit missing task_id")
|
|
568
|
+
|
|
569
|
+
if not request.wait:
|
|
570
|
+
return GenerateResponse(
|
|
571
|
+
id=f"sdk_{uuid4().hex}",
|
|
572
|
+
provider="tuzi-web",
|
|
573
|
+
model=f"tuzi-web:{model_id}",
|
|
574
|
+
status="running",
|
|
575
|
+
job=JobInfo(job_id=task_id, poll_after_ms=1_000),
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
poll_url = f"{host}/kling/v1/videos/text2video/{task_id}"
|
|
579
|
+
budget_ms = (
|
|
580
|
+
120_000 if request.params.timeout_ms is None else request.params.timeout_ms
|
|
581
|
+
)
|
|
582
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
583
|
+
while True:
|
|
584
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
585
|
+
if remaining_ms <= 0:
|
|
586
|
+
break
|
|
587
|
+
obj = request_json(
|
|
588
|
+
method="GET",
|
|
589
|
+
url=poll_url,
|
|
590
|
+
headers=self._bearer_headers(),
|
|
591
|
+
json_body=None,
|
|
592
|
+
timeout_ms=min(30_000, remaining_ms),
|
|
593
|
+
proxy_url=self.proxy_url,
|
|
594
|
+
)
|
|
595
|
+
data = obj.get("data")
|
|
596
|
+
if not isinstance(data, dict):
|
|
597
|
+
time.sleep(1.0)
|
|
598
|
+
continue
|
|
599
|
+
status = data.get("task_status")
|
|
600
|
+
if status == "failed":
|
|
601
|
+
raise provider_error(
|
|
602
|
+
f"kling task failed: {data.get('task_status_msg')}"
|
|
603
|
+
)
|
|
604
|
+
if status == "succeed":
|
|
605
|
+
task_result = data.get("task_result")
|
|
606
|
+
if isinstance(task_result, dict):
|
|
607
|
+
videos = task_result.get("videos")
|
|
608
|
+
if isinstance(videos, list) and videos:
|
|
609
|
+
first = videos[0]
|
|
610
|
+
if isinstance(first, dict):
|
|
611
|
+
u = first.get("url")
|
|
612
|
+
if isinstance(u, str) and u:
|
|
613
|
+
part = Part(
|
|
614
|
+
type="video",
|
|
615
|
+
mime_type="video/mp4",
|
|
616
|
+
source=PartSourceUrl(url=u),
|
|
617
|
+
)
|
|
618
|
+
return GenerateResponse(
|
|
619
|
+
id=f"sdk_{uuid4().hex}",
|
|
620
|
+
provider="tuzi-web",
|
|
621
|
+
model=f"tuzi-web:{model_id}",
|
|
622
|
+
status="completed",
|
|
623
|
+
output=[Message(role="assistant", content=[part])],
|
|
624
|
+
)
|
|
625
|
+
raise provider_error("kling task succeeded but missing video url")
|
|
626
|
+
time.sleep(min(1.0, max(0.0, deadline - time.time())))
|
|
627
|
+
|
|
628
|
+
return GenerateResponse(
|
|
629
|
+
id=f"sdk_{uuid4().hex}",
|
|
630
|
+
provider="tuzi-web",
|
|
631
|
+
model=f"tuzi-web:{model_id}",
|
|
632
|
+
status="running",
|
|
633
|
+
job=JobInfo(job_id=task_id, poll_after_ms=1_000),
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
def _suno_lyrics(self, request: GenerateRequest) -> GenerateResponse:
|
|
637
|
+
prompt = self._single_text_prompt(request)
|
|
638
|
+
host = self._base_host()
|
|
639
|
+
obj = request_json(
|
|
640
|
+
method="POST",
|
|
641
|
+
url=f"{host}/suno/submit/lyrics",
|
|
642
|
+
headers=self._bearer_headers(),
|
|
643
|
+
json_body={"prompt": prompt},
|
|
644
|
+
timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
|
|
645
|
+
proxy_url=self.proxy_url,
|
|
646
|
+
)
|
|
647
|
+
task_id = obj.get("data")
|
|
648
|
+
if not isinstance(task_id, str) or not task_id:
|
|
649
|
+
raise provider_error("suno lyrics submit missing task id")
|
|
650
|
+
return self._suno_wait_fetch_text(
|
|
651
|
+
task_id=task_id,
|
|
652
|
+
model_id="suno_lyrics",
|
|
653
|
+
timeout_ms=request.params.timeout_ms,
|
|
654
|
+
wait=request.wait,
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
def _suno_music(
|
|
658
|
+
self, request: GenerateRequest, *, model_id: str
|
|
659
|
+
) -> GenerateResponse:
|
|
660
|
+
prompt = self._single_text_prompt(request)
|
|
661
|
+
host = self._base_host()
|
|
662
|
+
mv = model_id if model_id.lower().startswith("chirp-") else "chirp-v3-5"
|
|
663
|
+
body: dict[str, object] = {
|
|
664
|
+
"prompt": prompt,
|
|
665
|
+
"tags": "",
|
|
666
|
+
"mv": mv,
|
|
667
|
+
"title": "suno",
|
|
668
|
+
"infill_start_s": None,
|
|
669
|
+
"infill_end_s": None,
|
|
670
|
+
}
|
|
671
|
+
obj = request_json(
|
|
672
|
+
method="POST",
|
|
673
|
+
url=f"{host}/suno/submit/music",
|
|
674
|
+
headers=self._bearer_headers(),
|
|
675
|
+
json_body=body,
|
|
676
|
+
timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
|
|
677
|
+
proxy_url=self.proxy_url,
|
|
678
|
+
)
|
|
679
|
+
task_id = obj.get("data")
|
|
680
|
+
if not isinstance(task_id, str) or not task_id:
|
|
681
|
+
raise provider_error("suno music submit missing task id")
|
|
682
|
+
return self._suno_wait_fetch_audio(
|
|
683
|
+
task_id=task_id,
|
|
684
|
+
model_id=model_id,
|
|
685
|
+
timeout_ms=request.params.timeout_ms,
|
|
686
|
+
wait=request.wait,
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
def _suno_workflow_endpoint(self, model_id: str) -> str:
|
|
690
|
+
mid_l = model_id.lower().strip()
|
|
691
|
+
if mid_l == "suno_concat":
|
|
692
|
+
return "/suno/submit/concat"
|
|
693
|
+
if mid_l == "suno_uploads":
|
|
694
|
+
return "/suno/submit/upload"
|
|
695
|
+
if mid_l == "suno_persona_create":
|
|
696
|
+
return "/suno/submit/persona-create"
|
|
697
|
+
if mid_l.startswith("suno_act_"):
|
|
698
|
+
suffix = mid_l[len("suno_act_") :]
|
|
699
|
+
if not suffix:
|
|
700
|
+
raise invalid_request_error("invalid suno_act model id")
|
|
701
|
+
return f"/suno/submit/act-{suffix}"
|
|
702
|
+
if mid_l.startswith("suno-"):
|
|
703
|
+
suffix = mid_l[len("suno-") :]
|
|
704
|
+
if not suffix:
|
|
705
|
+
raise invalid_request_error("invalid suno model id")
|
|
706
|
+
return f"/suno/submit/{suffix}"
|
|
707
|
+
raise invalid_request_error(f"unsupported suno workflow model: {model_id}")
|
|
708
|
+
|
|
709
|
+
def _suno_workflow(
|
|
710
|
+
self, request: GenerateRequest, *, model_id: str
|
|
711
|
+
) -> GenerateResponse:
|
|
712
|
+
host = self._base_host()
|
|
713
|
+
endpoint = self._suno_workflow_endpoint(model_id)
|
|
714
|
+
|
|
715
|
+
body: dict[str, object] = {}
|
|
716
|
+
opts = request.provider_options.get("tuzi-web")
|
|
717
|
+
if isinstance(opts, dict):
|
|
718
|
+
body.update(opts)
|
|
719
|
+
|
|
720
|
+
prompt = self._text_prompt_or_none(request)
|
|
721
|
+
if prompt and "prompt" not in body:
|
|
722
|
+
body["prompt"] = prompt
|
|
723
|
+
|
|
724
|
+
obj = request_json(
|
|
725
|
+
method="POST",
|
|
726
|
+
url=f"{host}{endpoint}",
|
|
727
|
+
headers=self._bearer_headers(),
|
|
728
|
+
json_body=body,
|
|
729
|
+
timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
|
|
730
|
+
proxy_url=self.proxy_url,
|
|
731
|
+
)
|
|
732
|
+
task_id = obj.get("data")
|
|
733
|
+
if not isinstance(task_id, str) or not task_id:
|
|
734
|
+
raise provider_error("suno submit missing task id")
|
|
735
|
+
return self._suno_wait_fetch_any(
|
|
736
|
+
task_id=task_id,
|
|
737
|
+
model_id=model_id,
|
|
738
|
+
timeout_ms=request.params.timeout_ms,
|
|
739
|
+
wait=request.wait,
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
def _suno_fetch(
|
|
743
|
+
self, *, host: str, task_id: str, timeout_ms: int | None
|
|
744
|
+
) -> dict[str, object]:
|
|
745
|
+
obj = request_json(
|
|
746
|
+
method="GET",
|
|
747
|
+
url=f"{host}/suno/fetch/{task_id}",
|
|
748
|
+
headers=self._bearer_headers(),
|
|
749
|
+
json_body=None,
|
|
750
|
+
timeout_ms=timeout_ms,
|
|
751
|
+
proxy_url=self.proxy_url,
|
|
752
|
+
)
|
|
753
|
+
data = obj.get("data")
|
|
754
|
+
if not isinstance(data, dict):
|
|
755
|
+
raise provider_error("suno fetch missing data")
|
|
756
|
+
return data
|
|
757
|
+
|
|
758
|
+
def _suno_wait_fetch_text(
|
|
759
|
+
self, *, task_id: str, model_id: str, timeout_ms: int | None, wait: bool
|
|
760
|
+
) -> GenerateResponse:
|
|
761
|
+
if not wait:
|
|
762
|
+
return GenerateResponse(
|
|
763
|
+
id=f"sdk_{uuid4().hex}",
|
|
764
|
+
provider="tuzi-web",
|
|
765
|
+
model=f"tuzi-web:{model_id}",
|
|
766
|
+
status="running",
|
|
767
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
768
|
+
)
|
|
769
|
+
host = self._base_host()
|
|
770
|
+
budget_ms = 60_000 if timeout_ms is None else timeout_ms
|
|
771
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
772
|
+
while True:
|
|
773
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
774
|
+
if remaining_ms <= 0:
|
|
775
|
+
break
|
|
776
|
+
data = self._suno_fetch(
|
|
777
|
+
host=host, task_id=task_id, timeout_ms=min(30_000, remaining_ms)
|
|
778
|
+
)
|
|
779
|
+
status = data.get("status")
|
|
780
|
+
if status == "SUCCESS":
|
|
781
|
+
inner = data.get("data")
|
|
782
|
+
if isinstance(inner, dict):
|
|
783
|
+
text = inner.get("text")
|
|
784
|
+
if isinstance(text, str):
|
|
785
|
+
return GenerateResponse(
|
|
786
|
+
id=f"sdk_{uuid4().hex}",
|
|
787
|
+
provider="tuzi-web",
|
|
788
|
+
model=f"tuzi-web:{model_id}",
|
|
789
|
+
status="completed",
|
|
790
|
+
output=[
|
|
791
|
+
Message(
|
|
792
|
+
role="assistant", content=[Part.from_text(text)]
|
|
793
|
+
)
|
|
794
|
+
],
|
|
795
|
+
)
|
|
796
|
+
raise provider_error("suno lyrics succeeded but missing text")
|
|
797
|
+
if status == "FAIL":
|
|
798
|
+
raise provider_error(f"suno task failed: {data.get('fail_reason')}")
|
|
799
|
+
time.sleep(min(2.0, max(0.0, deadline - time.time())))
|
|
800
|
+
|
|
801
|
+
return GenerateResponse(
|
|
802
|
+
id=f"sdk_{uuid4().hex}",
|
|
803
|
+
provider="tuzi-web",
|
|
804
|
+
model=f"tuzi-web:{model_id}",
|
|
805
|
+
status="running",
|
|
806
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
def _suno_wait_fetch_audio(
|
|
810
|
+
self, *, task_id: str, model_id: str, timeout_ms: int | None, wait: bool
|
|
811
|
+
) -> GenerateResponse:
|
|
812
|
+
if not wait:
|
|
813
|
+
return GenerateResponse(
|
|
814
|
+
id=f"sdk_{uuid4().hex}",
|
|
815
|
+
provider="tuzi-web",
|
|
816
|
+
model=f"tuzi-web:{model_id}",
|
|
817
|
+
status="running",
|
|
818
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
819
|
+
)
|
|
820
|
+
host = self._base_host()
|
|
821
|
+
budget_ms = 120_000 if timeout_ms is None else timeout_ms
|
|
822
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
823
|
+
while True:
|
|
824
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
825
|
+
if remaining_ms <= 0:
|
|
826
|
+
break
|
|
827
|
+
data = self._suno_fetch(
|
|
828
|
+
host=host, task_id=task_id, timeout_ms=min(30_000, remaining_ms)
|
|
829
|
+
)
|
|
830
|
+
status = data.get("status")
|
|
831
|
+
if status == "SUCCESS":
|
|
832
|
+
inner = data.get("data")
|
|
833
|
+
urls: list[str] = []
|
|
834
|
+
if isinstance(inner, dict):
|
|
835
|
+
u = inner.get("audio_url")
|
|
836
|
+
if isinstance(u, str) and u:
|
|
837
|
+
urls.append(u)
|
|
838
|
+
clips = inner.get("clips")
|
|
839
|
+
if isinstance(clips, list):
|
|
840
|
+
for clip in clips:
|
|
841
|
+
if not isinstance(clip, dict):
|
|
842
|
+
continue
|
|
843
|
+
u = clip.get("audio_url")
|
|
844
|
+
if isinstance(u, str) and u:
|
|
845
|
+
urls.append(u)
|
|
846
|
+
elif isinstance(inner, list):
|
|
847
|
+
for clip in inner:
|
|
848
|
+
if not isinstance(clip, dict):
|
|
849
|
+
continue
|
|
850
|
+
u = clip.get("audio_url")
|
|
851
|
+
if isinstance(u, str) and u:
|
|
852
|
+
urls.append(u)
|
|
853
|
+
if not urls:
|
|
854
|
+
blob = json.dumps(inner, ensure_ascii=False)
|
|
855
|
+
u = _extract_first_url(_AUDIO_URL_RE, blob)
|
|
856
|
+
if u:
|
|
857
|
+
urls.append(u)
|
|
858
|
+
if urls:
|
|
859
|
+
part = Part(
|
|
860
|
+
type="audio",
|
|
861
|
+
mime_type="audio/mpeg",
|
|
862
|
+
source=PartSourceUrl(url=urls[0]),
|
|
863
|
+
)
|
|
864
|
+
return GenerateResponse(
|
|
865
|
+
id=f"sdk_{uuid4().hex}",
|
|
866
|
+
provider="tuzi-web",
|
|
867
|
+
model=f"tuzi-web:{model_id}",
|
|
868
|
+
status="completed",
|
|
869
|
+
output=[Message(role="assistant", content=[part])],
|
|
870
|
+
)
|
|
871
|
+
raise provider_error("suno music succeeded but missing audio url")
|
|
872
|
+
if status == "FAIL":
|
|
873
|
+
raise provider_error(f"suno task failed: {data.get('fail_reason')}")
|
|
874
|
+
time.sleep(min(2.0, max(0.0, deadline - time.time())))
|
|
875
|
+
|
|
876
|
+
return GenerateResponse(
|
|
877
|
+
id=f"sdk_{uuid4().hex}",
|
|
878
|
+
provider="tuzi-web",
|
|
879
|
+
model=f"tuzi-web:{model_id}",
|
|
880
|
+
status="running",
|
|
881
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
def _suno_wait_fetch_any(
|
|
885
|
+
self, *, task_id: str, model_id: str, timeout_ms: int | None, wait: bool
|
|
886
|
+
) -> GenerateResponse:
|
|
887
|
+
if not wait:
|
|
888
|
+
return GenerateResponse(
|
|
889
|
+
id=f"sdk_{uuid4().hex}",
|
|
890
|
+
provider="tuzi-web",
|
|
891
|
+
model=f"tuzi-web:{model_id}",
|
|
892
|
+
status="running",
|
|
893
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
894
|
+
)
|
|
895
|
+
host = self._base_host()
|
|
896
|
+
budget_ms = 120_000 if timeout_ms is None else timeout_ms
|
|
897
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
898
|
+
while True:
|
|
899
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
900
|
+
if remaining_ms <= 0:
|
|
901
|
+
break
|
|
902
|
+
data = self._suno_fetch(
|
|
903
|
+
host=host, task_id=task_id, timeout_ms=min(30_000, remaining_ms)
|
|
904
|
+
)
|
|
905
|
+
status = data.get("status")
|
|
906
|
+
if status == "SUCCESS":
|
|
907
|
+
inner = data.get("data")
|
|
908
|
+
parts: list[Part] = []
|
|
909
|
+
blob = json.dumps(inner, ensure_ascii=False)
|
|
910
|
+
|
|
911
|
+
audio_urls: list[str] = []
|
|
912
|
+
if isinstance(inner, dict):
|
|
913
|
+
clips = inner.get("clips")
|
|
914
|
+
if isinstance(clips, list):
|
|
915
|
+
for clip in clips:
|
|
916
|
+
if not isinstance(clip, dict):
|
|
917
|
+
continue
|
|
918
|
+
u = clip.get("audio_url")
|
|
919
|
+
if isinstance(u, str) and u:
|
|
920
|
+
audio_urls.append(u)
|
|
921
|
+
|
|
922
|
+
text = inner.get("text")
|
|
923
|
+
if isinstance(text, str) and text:
|
|
924
|
+
parts.append(Part.from_text(text))
|
|
925
|
+
|
|
926
|
+
if not audio_urls:
|
|
927
|
+
u = _extract_first_url(_AUDIO_URL_RE, blob)
|
|
928
|
+
if u:
|
|
929
|
+
audio_urls.append(u)
|
|
930
|
+
for u in audio_urls:
|
|
931
|
+
parts.append(
|
|
932
|
+
Part(
|
|
933
|
+
type="audio",
|
|
934
|
+
mime_type="audio/mpeg",
|
|
935
|
+
source=PartSourceUrl(url=u),
|
|
936
|
+
)
|
|
937
|
+
)
|
|
938
|
+
|
|
939
|
+
mp4 = _extract_first_url(_MP4_URL_RE, blob)
|
|
940
|
+
if mp4:
|
|
941
|
+
parts.append(
|
|
942
|
+
Part(
|
|
943
|
+
type="video",
|
|
944
|
+
mime_type="video/mp4",
|
|
945
|
+
source=PartSourceUrl(url=mp4),
|
|
946
|
+
)
|
|
947
|
+
)
|
|
948
|
+
|
|
949
|
+
if not parts:
|
|
950
|
+
parts.append(
|
|
951
|
+
Part.from_text(blob if blob and blob != "null" else "{}")
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
return GenerateResponse(
|
|
955
|
+
id=f"sdk_{uuid4().hex}",
|
|
956
|
+
provider="tuzi-web",
|
|
957
|
+
model=f"tuzi-web:{model_id}",
|
|
958
|
+
status="completed",
|
|
959
|
+
output=[Message(role="assistant", content=parts)],
|
|
960
|
+
)
|
|
961
|
+
if status == "FAIL":
|
|
962
|
+
raise provider_error(f"suno task failed: {data.get('fail_reason')}")
|
|
963
|
+
time.sleep(min(2.0, max(0.0, deadline - time.time())))
|
|
964
|
+
|
|
965
|
+
return GenerateResponse(
|
|
966
|
+
id=f"sdk_{uuid4().hex}",
|
|
967
|
+
provider="tuzi-web",
|
|
968
|
+
model=f"tuzi-web:{model_id}",
|
|
969
|
+
status="running",
|
|
970
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
971
|
+
)
|
|
972
|
+
|
|
973
|
+
def _deepsearch(
|
|
974
|
+
self, request: GenerateRequest, *, model_id: str
|
|
975
|
+
) -> GenerateResponse:
|
|
976
|
+
"""Handle deepsearch models via asyncdata.net async API."""
|
|
977
|
+
if self.openai is None:
|
|
978
|
+
raise invalid_request_error(
|
|
979
|
+
"NOUS_GENAI_TUZI_OPENAI_API_KEY required for deepsearch models"
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
# Build chat completions body
|
|
983
|
+
messages = []
|
|
984
|
+
for msg in request.input:
|
|
985
|
+
role = msg.role
|
|
986
|
+
if role == "system":
|
|
987
|
+
role = "system"
|
|
988
|
+
elif role == "assistant":
|
|
989
|
+
role = "assistant"
|
|
990
|
+
else:
|
|
991
|
+
role = "user"
|
|
992
|
+
text = "".join(p.require_text() for p in msg.content if p.type == "text")
|
|
993
|
+
if text:
|
|
994
|
+
messages.append({"role": role, "content": text})
|
|
995
|
+
|
|
996
|
+
if not messages:
|
|
997
|
+
raise invalid_request_error("deepsearch requires at least one message")
|
|
998
|
+
|
|
999
|
+
# asyncdata.net requires -async suffix for deepsearch models
|
|
1000
|
+
api_model_id = model_id if model_id.endswith("-async") else f"{model_id}-async"
|
|
1001
|
+
body: dict[str, Any] = {"model": api_model_id, "messages": messages}
|
|
1002
|
+
if request.params.temperature is not None:
|
|
1003
|
+
body["temperature"] = request.params.temperature
|
|
1004
|
+
|
|
1005
|
+
# Submit async task
|
|
1006
|
+
# Note: URL is NOT encoded per official API docs
|
|
1007
|
+
original_url = f"{self.openai.base_url}/chat/completions"
|
|
1008
|
+
submit_url = f"{_ASYNCDATA_BASE_URL}/tran/{original_url}"
|
|
1009
|
+
|
|
1010
|
+
# asyncdata.net may take a long time to return task_id; retry on transient errors
|
|
1011
|
+
submit_timeout_ms = max(request.params.timeout_ms or 300_000, 300_000)
|
|
1012
|
+
last_error: str | None = None
|
|
1013
|
+
for attempt in range(3):
|
|
1014
|
+
obj = request_json(
|
|
1015
|
+
method="POST",
|
|
1016
|
+
url=submit_url,
|
|
1017
|
+
headers={"Authorization": f"Bearer {self.openai.api_key}"},
|
|
1018
|
+
json_body=body,
|
|
1019
|
+
timeout_ms=submit_timeout_ms,
|
|
1020
|
+
proxy_url=self.proxy_url,
|
|
1021
|
+
)
|
|
1022
|
+
task_id = obj.get("id")
|
|
1023
|
+
if isinstance(task_id, str) and task_id:
|
|
1024
|
+
break
|
|
1025
|
+
last_error = obj.get("error", "missing task id")
|
|
1026
|
+
time.sleep(1.0) # Brief delay before retry
|
|
1027
|
+
else:
|
|
1028
|
+
raise provider_error(f"deepsearch submit failed: {last_error}")
|
|
1029
|
+
|
|
1030
|
+
# Non-blocking mode
|
|
1031
|
+
if not request.wait:
|
|
1032
|
+
return GenerateResponse(
|
|
1033
|
+
id=f"sdk_{uuid4().hex}",
|
|
1034
|
+
provider="tuzi-web",
|
|
1035
|
+
model=f"tuzi-web:{model_id}",
|
|
1036
|
+
status="running",
|
|
1037
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
1038
|
+
)
|
|
1039
|
+
|
|
1040
|
+
# Blocking mode: poll until complete
|
|
1041
|
+
budget_ms = request.params.timeout_ms or 300_000 # 5 min default for deepsearch
|
|
1042
|
+
deadline = time.time() + max(1, budget_ms) / 1000.0
|
|
1043
|
+
content = self._poll_deepsearch(task_id=task_id, deadline=deadline)
|
|
1044
|
+
|
|
1045
|
+
if content is None:
|
|
1046
|
+
return GenerateResponse(
|
|
1047
|
+
id=f"sdk_{uuid4().hex}",
|
|
1048
|
+
provider="tuzi-web",
|
|
1049
|
+
model=f"tuzi-web:{model_id}",
|
|
1050
|
+
status="running",
|
|
1051
|
+
job=JobInfo(job_id=task_id, poll_after_ms=2_000),
|
|
1052
|
+
)
|
|
1053
|
+
|
|
1054
|
+
return GenerateResponse(
|
|
1055
|
+
id=f"sdk_{uuid4().hex}",
|
|
1056
|
+
provider="tuzi-web",
|
|
1057
|
+
model=f"tuzi-web:{model_id}",
|
|
1058
|
+
status="completed",
|
|
1059
|
+
output=[Message(role="assistant", content=[Part.from_text(content)])],
|
|
1060
|
+
)
|
|
1061
|
+
|
|
1062
|
+
def _poll_deepsearch(self, *, task_id: str, deadline: float) -> str | None:
|
|
1063
|
+
"""Poll asyncdata.net until task completes or deadline reached."""
|
|
1064
|
+
poll_url = f"{_ASYNCDATA_BASE_URL}/source/{task_id}"
|
|
1065
|
+
while True:
|
|
1066
|
+
remaining_ms = int((deadline - time.time()) * 1000)
|
|
1067
|
+
if remaining_ms <= 0:
|
|
1068
|
+
return None
|
|
1069
|
+
|
|
1070
|
+
obj = request_json(
|
|
1071
|
+
method="GET",
|
|
1072
|
+
url=poll_url,
|
|
1073
|
+
headers=None,
|
|
1074
|
+
json_body=None,
|
|
1075
|
+
timeout_ms=min(30_000, remaining_ms),
|
|
1076
|
+
proxy_url=self.proxy_url,
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
content = obj.get("content")
|
|
1080
|
+
if isinstance(content, str) and content:
|
|
1081
|
+
return content
|
|
1082
|
+
|
|
1083
|
+
# Still processing, wait before next poll
|
|
1084
|
+
time.sleep(min(2.0, max(0.0, deadline - time.time())))
|
|
1085
|
+
|
|
1086
|
+
def list_models(self, *, timeout_ms: int | None = None) -> list[str]:
|
|
1087
|
+
"""
|
|
1088
|
+
Fetch remote model ids by querying each underlying protocol adapter (when configured).
|
|
1089
|
+
"""
|
|
1090
|
+
out: set[str] = set()
|
|
1091
|
+
if self.openai is not None:
|
|
1092
|
+
try:
|
|
1093
|
+
openai_models = self.openai.list_models(timeout_ms=timeout_ms)
|
|
1094
|
+
except GenAIError:
|
|
1095
|
+
openai_models = []
|
|
1096
|
+
if openai_models:
|
|
1097
|
+
return openai_models
|
|
1098
|
+
if self.gemini is not None:
|
|
1099
|
+
try:
|
|
1100
|
+
out.update(self.gemini.list_models(timeout_ms=timeout_ms))
|
|
1101
|
+
except GenAIError:
|
|
1102
|
+
pass
|
|
1103
|
+
if self.anthropic is not None:
|
|
1104
|
+
try:
|
|
1105
|
+
out.update(self.anthropic.list_models(timeout_ms=timeout_ms))
|
|
1106
|
+
except GenAIError:
|
|
1107
|
+
pass
|
|
1108
|
+
return sorted(out)
|
|
1109
|
+
|
|
1110
|
+
def _route(self, model_id: str):
|
|
1111
|
+
mid = model_id.strip()
|
|
1112
|
+
if not mid:
|
|
1113
|
+
raise invalid_request_error("model_id must not be empty")
|
|
1114
|
+
mid_l = mid.lower()
|
|
1115
|
+
|
|
1116
|
+
if mid_l.startswith("claude-"):
|
|
1117
|
+
if self.anthropic is None:
|
|
1118
|
+
raise invalid_request_error(
|
|
1119
|
+
"NOUS_GENAI_TUZI_ANTHROPIC_API_KEY/TUZI_ANTHROPIC_API_KEY "
|
|
1120
|
+
"(or NOUS_GENAI_TUZI_WEB_API_KEY/TUZI_WEB_API_KEY) not configured"
|
|
1121
|
+
)
|
|
1122
|
+
return self.anthropic
|
|
1123
|
+
|
|
1124
|
+
if mid_l.startswith(("models/", "gemini-", "gemma-", "veo-")):
|
|
1125
|
+
if self.gemini is None:
|
|
1126
|
+
raise invalid_request_error(
|
|
1127
|
+
"NOUS_GENAI_TUZI_GOOGLE_API_KEY/TUZI_GOOGLE_API_KEY "
|
|
1128
|
+
"(or NOUS_GENAI_TUZI_WEB_API_KEY/TUZI_WEB_API_KEY) not configured"
|
|
1129
|
+
)
|
|
1130
|
+
return self.gemini
|
|
1131
|
+
if mid_l.startswith("veo2"):
|
|
1132
|
+
if self.gemini is None:
|
|
1133
|
+
raise invalid_request_error(
|
|
1134
|
+
"NOUS_GENAI_TUZI_GOOGLE_API_KEY/TUZI_GOOGLE_API_KEY "
|
|
1135
|
+
"(or NOUS_GENAI_TUZI_WEB_API_KEY/TUZI_WEB_API_KEY) not configured"
|
|
1136
|
+
)
|
|
1137
|
+
return self.gemini
|
|
1138
|
+
|
|
1139
|
+
if mid_l in {
|
|
1140
|
+
"text-embedding-004",
|
|
1141
|
+
"embedding-001",
|
|
1142
|
+
"embedding-gecko-001",
|
|
1143
|
+
"gemini-embedding-001",
|
|
1144
|
+
"gemini-embedding-exp-03-07",
|
|
1145
|
+
}:
|
|
1146
|
+
if self.gemini is None:
|
|
1147
|
+
raise invalid_request_error(
|
|
1148
|
+
"NOUS_GENAI_TUZI_GOOGLE_API_KEY/TUZI_GOOGLE_API_KEY "
|
|
1149
|
+
"(or NOUS_GENAI_TUZI_WEB_API_KEY/TUZI_WEB_API_KEY) not configured"
|
|
1150
|
+
)
|
|
1151
|
+
return self.gemini
|
|
1152
|
+
|
|
1153
|
+
if self.openai is None:
|
|
1154
|
+
raise invalid_request_error(
|
|
1155
|
+
"NOUS_GENAI_TUZI_OPENAI_API_KEY/TUZI_OPENAI_API_KEY "
|
|
1156
|
+
"(or NOUS_GENAI_TUZI_WEB_API_KEY/TUZI_WEB_API_KEY) not configured"
|
|
1157
|
+
)
|
|
1158
|
+
return self.openai
|