nous-genai 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -63,7 +63,6 @@ _TTS_PREFIX: Final[str] = "tts-"
63
63
  _TTS_SUFFIX: Final[str] = "-tts"
64
64
  _VOICE_SUFFIXES: Final[tuple[str, ...]] = ("-voice", "_voice")
65
65
  _ADVANCED_VOICE_MODEL: Final[str] = "advanced-voice"
66
- _SUNO_PREFIX: Final[str] = "suno-"
67
66
  _CHIRP_PREFIX: Final[str] = "chirp-"
68
67
 
69
68
  _WHISPER_PREFIX: Final[str] = "whisper-"
@@ -223,7 +222,6 @@ def is_tts_model(model_id: str) -> bool:
223
222
  mid_l = _norm(model_id)
224
223
  return (
225
224
  mid_l.startswith(_TTS_PREFIX)
226
- or mid_l.startswith(_SUNO_PREFIX)
227
225
  or mid_l.startswith(_CHIRP_PREFIX)
228
226
  or mid_l.endswith(_TTS_SUFFIX)
229
227
  or mid_l.endswith(_VOICE_SUFFIXES)
nous/genai/cli.py CHANGED
@@ -109,6 +109,15 @@ def main(argv: list[str] | None = None) -> None:
109
109
  parser.add_argument("--video-path", help="Input video file path")
110
110
  parser.add_argument("--output-path", help="Write output to file (text/json/binary)")
111
111
  parser.add_argument("--ouput-path", dest="output_path", help=argparse.SUPPRESS)
112
+ parser.add_argument(
113
+ "--job-id",
114
+ help="Resume/poll a provider job id (tuzi-web only for now); ignores --prompt/--*-path",
115
+ )
116
+ parser.add_argument(
117
+ "--no-wait",
118
+ action="store_true",
119
+ help="Do not wait for job completion (returns job_id if supported)",
120
+ )
112
121
  parser.add_argument(
113
122
  "--timeout-ms",
114
123
  type=int,
@@ -184,7 +193,25 @@ def main(argv: list[str] | None = None) -> None:
184
193
  except BrokenPipeError:
185
194
  return
186
195
 
196
+ client = Client()
187
197
  provider, model_id = _split_model(args.model)
198
+ _apply_protocol_override(client, provider=provider, protocol=args.protocol)
199
+
200
+ cap = client.capabilities(args.model)
201
+ output = _infer_output_spec(provider=provider, model_id=model_id, cap=cap)
202
+
203
+ if args.job_id:
204
+ _run_job(
205
+ client,
206
+ provider=provider,
207
+ model_id=model_id,
208
+ job_id=str(args.job_id),
209
+ output=output,
210
+ output_path=args.output_path,
211
+ timeout_ms=timeout_ms,
212
+ )
213
+ return
214
+
188
215
  prompt = args.prompt
189
216
  if prompt is None and args.prompt_path:
190
217
  try:
@@ -192,11 +219,6 @@ def main(argv: list[str] | None = None) -> None:
192
219
  prompt = f.read()
193
220
  except OSError as e:
194
221
  raise SystemExit(f"cannot read --prompt-path: {e}") from None
195
- client = Client()
196
- _apply_protocol_override(client, provider=provider, protocol=args.protocol)
197
-
198
- cap = client.capabilities(args.model)
199
- output = _infer_output_spec(provider=provider, model_id=model_id, cap=cap)
200
222
 
201
223
  parts = _build_input_parts(
202
224
  prompt=prompt,
@@ -212,7 +234,7 @@ def main(argv: list[str] | None = None) -> None:
212
234
  model=args.model,
213
235
  input=[Message(role="user", content=parts)],
214
236
  output=output,
215
- wait=True,
237
+ wait=not bool(getattr(args, "no_wait", False)),
216
238
  )
217
239
  if timeout_ms is not None:
218
240
  req = replace(req, params=replace(req.params, timeout_ms=timeout_ms))
@@ -229,21 +251,37 @@ def main(argv: list[str] | None = None) -> None:
229
251
  if resp.job and resp.job.job_id:
230
252
  print(resp.job.job_id)
231
253
  if resp.status == "running":
232
- effective_timeout_ms = timeout_ms
233
- if effective_timeout_ms is None:
234
- effective_timeout_ms = getattr(
235
- client, "_default_timeout_ms", None
254
+ status_note = ""
255
+ if resp.job.last_status:
256
+ status_note += f" upstream_status={resp.job.last_status}"
257
+ if resp.job.last_detail:
258
+ d = resp.job.last_detail
259
+ if len(d) > 200:
260
+ d = d[:200] + "..."
261
+ status_note += f" fail_reason={d}"
262
+ if not req.wait:
263
+ print(
264
+ "[INFO] 已提交任务(未等待完成);已返回 job_id。"
265
+ f"可用 --job-id {resp.job.job_id} 继续轮询/下载。",
266
+ file=sys.stderr,
267
+ )
268
+ else:
269
+ effective_timeout_ms = timeout_ms
270
+ if effective_timeout_ms is None:
271
+ effective_timeout_ms = getattr(
272
+ client, "_default_timeout_ms", None
273
+ )
274
+ timeout_note = (
275
+ f"{effective_timeout_ms}ms"
276
+ if isinstance(effective_timeout_ms, int)
277
+ else "timeout"
278
+ )
279
+ print(
280
+ f"[INFO] 任务仍在运行(等待 {elapsed_s:.1f}s,可能已超时 {timeout_note});已返回 job_id。"
281
+ f"可用 --job-id {resp.job.job_id} 继续轮询/下载,或增大 --timeout-ms 重试。"
282
+ f"{status_note}",
283
+ file=sys.stderr,
236
284
  )
237
- timeout_note = (
238
- f"{effective_timeout_ms}ms"
239
- if isinstance(effective_timeout_ms, int)
240
- else "timeout"
241
- )
242
- print(
243
- f"[INFO] 任务仍在运行(等待 {elapsed_s:.1f}s,可能已超时 {timeout_note});已返回 job_id。"
244
- "可增大 --timeout-ms 或设置 NOUS_GENAI_TIMEOUT_MS 后重试。",
245
- file=sys.stderr,
246
- )
247
285
  if args.output_path:
248
286
  print(
249
287
  f"[INFO] 未写入输出文件:{args.output_path}",
@@ -275,6 +313,122 @@ _DEFAULT_VIDEO_URL = (
275
313
  )
276
314
 
277
315
 
316
+ def _run_job(
317
+ client: Client,
318
+ *,
319
+ provider: str,
320
+ model_id: str,
321
+ job_id: str,
322
+ output: OutputSpec,
323
+ output_path: str | None,
324
+ timeout_ms: int | None,
325
+ ) -> None:
326
+ provider = provider.strip().lower()
327
+ if provider != "tuzi-web":
328
+ raise SystemExit("--job-id only supported for provider=tuzi-web for now")
329
+
330
+ job_id = job_id.strip()
331
+ if not job_id:
332
+ raise SystemExit("--job-id must be non-empty")
333
+
334
+ adapter = client._adapter(provider)
335
+ from .providers import TuziAdapter
336
+
337
+ if not isinstance(adapter, TuziAdapter):
338
+ raise SystemExit("tuzi-web adapter not configured")
339
+
340
+ effective_timeout_ms = timeout_ms
341
+ if effective_timeout_ms is None:
342
+ effective_timeout_ms = getattr(client, "_default_timeout_ms", None)
343
+ if effective_timeout_ms is None:
344
+ effective_timeout_ms = 120_000
345
+
346
+ modalities = set(output.modalities)
347
+ mid_l = model_id.lower().strip()
348
+ is_chirp_music = (
349
+ modalities == {"audio"} and mid_l.startswith("chirp-") and mid_l != "chirp-v3"
350
+ )
351
+ if not is_chirp_music:
352
+ raise SystemExit("--job-id only supports tuzi-web chirp-* audio tasks for now")
353
+
354
+ def fn():
355
+ try:
356
+ host = adapter._base_host()
357
+ probe = adapter._suno_feed(
358
+ host=host,
359
+ ids=job_id,
360
+ timeout_ms=min(10_000, int(effective_timeout_ms)),
361
+ )
362
+ clips = probe.get("clips")
363
+ clip_found = bool(
364
+ isinstance(clips, list)
365
+ and any(
366
+ isinstance(c, dict)
367
+ and isinstance(c.get("id"), str)
368
+ and c.get("id") == job_id
369
+ for c in clips
370
+ )
371
+ )
372
+ if clip_found:
373
+ return adapter._suno_wait_feed_audio(
374
+ clip_id=job_id,
375
+ model_id=model_id,
376
+ timeout_ms=effective_timeout_ms,
377
+ wait=True,
378
+ )
379
+ except Exception:
380
+ pass
381
+ return adapter._suno_wait_fetch_audio(
382
+ task_id=job_id,
383
+ model_id=model_id,
384
+ timeout_ms=effective_timeout_ms,
385
+ wait=True,
386
+ )
387
+
388
+ show_progress = sys.stderr.isatty()
389
+ resp, elapsed_s = _run_with_spinner(fn, enabled=show_progress, label="等待任务完成")
390
+
391
+ if resp.status != "completed":
392
+ if resp.job and resp.job.job_id:
393
+ print(resp.job.job_id)
394
+ if resp.status == "running":
395
+ status_note = ""
396
+ if resp.job.last_status:
397
+ status_note += f" upstream_status={resp.job.last_status}"
398
+ if resp.job.last_detail:
399
+ d = resp.job.last_detail
400
+ if len(d) > 200:
401
+ d = d[:200] + "..."
402
+ status_note += f" fail_reason={d}"
403
+ timeout_note = (
404
+ f"{effective_timeout_ms}ms"
405
+ if isinstance(effective_timeout_ms, int)
406
+ else "timeout"
407
+ )
408
+ print(
409
+ f"[INFO] 任务仍在运行(等待 {elapsed_s:.1f}s,可能已超时 {timeout_note});已返回 job_id。"
410
+ f"可稍后重试 --job-id。{status_note}",
411
+ file=sys.stderr,
412
+ )
413
+ if output_path:
414
+ print(f"[INFO] 未写入输出文件:{output_path}", file=sys.stderr)
415
+ else:
416
+ raise SystemExit(f"[FAIL]: request status={resp.status}")
417
+ return
418
+
419
+ if not resp.output:
420
+ raise SystemExit("[FAIL]: missing output")
421
+ _write_response(
422
+ resp.output[0].content,
423
+ output=output,
424
+ output_path=output_path,
425
+ timeout_ms=timeout_ms,
426
+ download_auth=_download_auth(client, provider=provider),
427
+ )
428
+ if show_progress:
429
+ print(f"[INFO] 完成,用时 {elapsed_s:.1f}s", file=sys.stderr)
430
+
431
+
278
432
  def _run_probe(args: argparse.Namespace, *, timeout_ms: int | None) -> int:
279
433
  from .client import _normalize_provider
280
434
  from .reference import get_sdk_supported_models_for_provider
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import json
4
4
  import re
5
5
  import time
6
+ import urllib.parse
6
7
  from dataclasses import dataclass, replace
7
8
  from typing import Any, Iterator
8
9
  from uuid import uuid4
@@ -53,32 +54,6 @@ _AUDIO_URL_RE = re.compile(
53
54
  re.IGNORECASE,
54
55
  )
55
56
 
56
- _SUNO_WORKFLOW_MODELS = frozenset(
57
- {
58
- "suno-all-stems",
59
- "suno-continue",
60
- "suno-continue-uploaded",
61
- "suno-infill",
62
- "suno-infill-uploaded",
63
- "suno-midi",
64
- "suno-overpainting",
65
- "suno-remix",
66
- "suno-remix-uploaded",
67
- "suno-rewrite",
68
- "suno-tags",
69
- "suno-vocal-stems",
70
- "suno_act_midi",
71
- "suno_act_mp4",
72
- "suno_act_stems",
73
- "suno_act_tags",
74
- "suno_act_timing",
75
- "suno_act_wav",
76
- "suno_concat",
77
- "suno_persona_create",
78
- "suno_uploads",
79
- }
80
- )
81
-
82
57
 
83
58
  def _extract_first_url(pattern: re.Pattern[str], text: str) -> str | None:
84
59
  m = pattern.search(text)
@@ -135,6 +110,10 @@ class TuziAdapter:
135
110
 
136
111
  def capabilities(self, model_id: str) -> Capability:
137
112
  mid_l = model_id.lower().strip()
113
+ if mid_l.startswith(("suno-", "suno_")):
114
+ raise invalid_request_error(
115
+ "suno model ids are not supported; use chirp-* (e.g. chirp-v3-5)"
116
+ )
138
117
  if mid_l in {"kling_image", "seededit"}:
139
118
  return Capability(
140
119
  input_modalities={"text", "image"},
@@ -144,27 +123,7 @@ class TuziAdapter:
144
123
  supports_tools=False,
145
124
  supports_json_schema=False,
146
125
  )
147
- if mid_l in _SUNO_WORKFLOW_MODELS:
148
- return Capability(
149
- input_modalities={"text"},
150
- output_modalities={"audio"},
151
- supports_stream=False,
152
- supports_job=True,
153
- supports_tools=False,
154
- supports_json_schema=False,
155
- )
156
- if mid_l == "suno_lyrics":
157
- return Capability(
158
- input_modalities={"text"},
159
- output_modalities={"text"},
160
- supports_stream=False,
161
- supports_job=True,
162
- supports_tools=False,
163
- supports_json_schema=False,
164
- )
165
- if mid_l == "suno_music" or (
166
- mid_l.startswith("chirp-") and mid_l != "chirp-v3"
167
- ):
126
+ if mid_l.startswith("chirp-") and mid_l != "chirp-v3":
168
127
  return Capability(
169
128
  input_modalities={"text"},
170
129
  output_modalities={"audio"},
@@ -191,6 +150,11 @@ class TuziAdapter:
191
150
  mid_l = model_id.lower().strip()
192
151
  modalities = set(request.output.modalities)
193
152
 
153
+ if mid_l.startswith(("suno-", "suno_")):
154
+ raise invalid_request_error(
155
+ "suno model ids are not supported; use chirp-* (e.g. chirp-v3-5)"
156
+ )
157
+
194
158
  if modalities == {"video"} and mid_l.startswith("pika-"):
195
159
  raise not_supported_error(
196
160
  "tuzi pika endpoints are not available on api.tu-zi.com (returns HTML)"
@@ -231,27 +195,14 @@ class TuziAdapter:
231
195
  return self._kling_text2image(request, model_id=model_id)
232
196
  return self._seededit(request, model_id=model_id)
233
197
 
234
- if modalities == {"text"} and mid_l == "suno_lyrics":
235
- if stream:
236
- raise invalid_request_error(
237
- "suno lyrics generation does not support streaming"
238
- )
239
- return self._suno_lyrics(request)
240
-
241
- if modalities == {"audio"} and mid_l in _SUNO_WORKFLOW_MODELS:
242
- if stream:
243
- raise invalid_request_error(
244
- "suno workflow endpoints do not support streaming"
245
- )
246
- return self._suno_workflow(request, model_id=model_id)
247
-
248
- if modalities == {"audio"} and (
249
- mid_l == "suno_music"
250
- or (mid_l.startswith("chirp-") and mid_l != "chirp-v3")
198
+ if (
199
+ modalities == {"audio"}
200
+ and mid_l.startswith("chirp-")
201
+ and mid_l != "chirp-v3"
251
202
  ):
252
203
  if stream:
253
204
  raise invalid_request_error(
254
- "suno music generation does not support streaming"
205
+ "chirp music generation does not support streaming"
255
206
  )
256
207
  return self._suno_music(request, model_id=model_id)
257
208
 
@@ -633,41 +584,30 @@ class TuziAdapter:
633
584
  job=JobInfo(job_id=task_id, poll_after_ms=1_000),
634
585
  )
635
586
 
636
- def _suno_lyrics(self, request: GenerateRequest) -> GenerateResponse:
637
- prompt = self._single_text_prompt(request)
638
- host = self._base_host()
639
- obj = request_json(
640
- method="POST",
641
- url=f"{host}/suno/submit/lyrics",
642
- headers=self._bearer_headers(),
643
- json_body={"prompt": prompt},
644
- timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
645
- proxy_url=self.proxy_url,
646
- )
647
- task_id = obj.get("data")
648
- if not isinstance(task_id, str) or not task_id:
649
- raise provider_error("suno lyrics submit missing task id")
650
- return self._suno_wait_fetch_text(
651
- task_id=task_id,
652
- model_id="suno_lyrics",
653
- timeout_ms=request.params.timeout_ms,
654
- wait=request.wait,
655
- )
656
-
657
587
  def _suno_music(
658
588
  self, request: GenerateRequest, *, model_id: str
659
589
  ) -> GenerateResponse:
660
590
  prompt = self._single_text_prompt(request)
661
591
  host = self._base_host()
662
- mv = model_id if model_id.lower().startswith("chirp-") else "chirp-v3-5"
663
- body: dict[str, object] = {
664
- "prompt": prompt,
665
- "tags": "",
666
- "mv": mv,
667
- "title": "suno",
668
- "infill_start_s": None,
669
- "infill_end_s": None,
670
- }
592
+ mid_l = model_id.lower().strip()
593
+ if not (mid_l.startswith("chirp-") and mid_l != "chirp-v3"):
594
+ raise invalid_request_error(
595
+ f"unsupported music model_id: {model_id} (use chirp-*, e.g. chirp-v3-5)"
596
+ )
597
+ body: dict[str, object] = {"prompt": prompt, "mv": model_id}
598
+ opts = request.provider_options.get("tuzi-web")
599
+ if isinstance(opts, dict):
600
+ for k, v in opts.items():
601
+ if k in body:
602
+ raise invalid_request_error(f"provider_options cannot override {k}")
603
+ if (
604
+ k == "tags"
605
+ and isinstance(v, list)
606
+ and all(isinstance(x, str) and x.strip() for x in v)
607
+ ):
608
+ body["tags"] = ",".join([x.strip() for x in v])
609
+ continue
610
+ body[k] = v
671
611
  obj = request_json(
672
612
  method="POST",
673
613
  url=f"{host}/suno/submit/music",
@@ -676,87 +616,97 @@ class TuziAdapter:
676
616
  timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
677
617
  proxy_url=self.proxy_url,
678
618
  )
679
- task_id = obj.get("data")
680
- if not isinstance(task_id, str) or not task_id:
681
- raise provider_error("suno music submit missing task id")
682
- return self._suno_wait_fetch_audio(
683
- task_id=task_id,
684
- model_id=model_id,
685
- timeout_ms=request.params.timeout_ms,
686
- wait=request.wait,
687
- )
688
619
 
689
- def _suno_workflow_endpoint(self, model_id: str) -> str:
690
- mid_l = model_id.lower().strip()
691
- if mid_l == "suno_concat":
692
- return "/suno/submit/concat"
693
- if mid_l == "suno_uploads":
694
- return "/suno/submit/upload"
695
- if mid_l == "suno_persona_create":
696
- return "/suno/submit/persona-create"
697
- if mid_l.startswith("suno_act_"):
698
- suffix = mid_l[len("suno_act_") :]
699
- if not suffix:
700
- raise invalid_request_error("invalid suno_act model id")
701
- return f"/suno/submit/act-{suffix}"
702
- if mid_l.startswith("suno-"):
703
- suffix = mid_l[len("suno-") :]
704
- if not suffix:
705
- raise invalid_request_error("invalid suno model id")
706
- return f"/suno/submit/{suffix}"
707
- raise invalid_request_error(f"unsupported suno workflow model: {model_id}")
708
-
709
- def _suno_workflow(
710
- self, request: GenerateRequest, *, model_id: str
711
- ) -> GenerateResponse:
712
- host = self._base_host()
713
- endpoint = self._suno_workflow_endpoint(model_id)
620
+ clip_id: str | None = None
621
+ audio_url: str | None = None
622
+ data = obj.get("data")
623
+ sources: list[dict[str, object]] = [obj]
624
+ if isinstance(data, dict):
625
+ sources.append(data)
626
+ for src in sources:
627
+ clips = src.get("clips")
628
+ if not isinstance(clips, list):
629
+ continue
630
+ for clip in clips:
631
+ if not isinstance(clip, dict):
632
+ continue
633
+ if clip_id is None:
634
+ cid = clip.get("id")
635
+ if isinstance(cid, str) and cid.strip():
636
+ clip_id = cid.strip()
637
+ if audio_url is None:
638
+ au = clip.get("audio_url")
639
+ if isinstance(au, str) and au.strip():
640
+ audio_url = au.strip()
641
+ if clip_id is not None and audio_url is not None:
642
+ break
643
+ if clip_id is not None:
644
+ break
714
645
 
715
- body: dict[str, object] = {}
716
- opts = request.provider_options.get("tuzi-web")
717
- if isinstance(opts, dict):
718
- body.update(opts)
646
+ if request.wait and audio_url:
647
+ part = Part(
648
+ type="audio",
649
+ mime_type="audio/mpeg",
650
+ source=PartSourceUrl(url=audio_url),
651
+ )
652
+ return GenerateResponse(
653
+ id=f"sdk_{uuid4().hex}",
654
+ provider="tuzi-web",
655
+ model=f"tuzi-web:{model_id}",
656
+ status="completed",
657
+ output=[Message(role="assistant", content=[part])],
658
+ )
719
659
 
720
- prompt = self._text_prompt_or_none(request)
721
- if prompt and "prompt" not in body:
722
- body["prompt"] = prompt
660
+ task_id: str | None = None
661
+ if isinstance(data, str) and data.strip():
662
+ task_id = data.strip()
663
+ for src in sources:
664
+ if task_id is not None:
665
+ break
666
+ for key in ("task_id", "id"):
667
+ v = src.get(key)
668
+ if isinstance(v, str) and v.strip():
669
+ task_id = v.strip()
670
+ break
671
+ if task_id is not None:
672
+ return self._suno_wait_fetch_audio(
673
+ task_id=task_id,
674
+ model_id=model_id,
675
+ timeout_ms=request.params.timeout_ms,
676
+ wait=request.wait,
677
+ )
723
678
 
724
- obj = request_json(
725
- method="POST",
726
- url=f"{host}{endpoint}",
727
- headers=self._bearer_headers(),
728
- json_body=body,
729
- timeout_ms=max(request.params.timeout_ms or 60_000, 60_000),
730
- proxy_url=self.proxy_url,
731
- )
732
- task_id = obj.get("data")
733
- if not isinstance(task_id, str) or not task_id:
734
- raise provider_error("suno submit missing task id")
735
- return self._suno_wait_fetch_any(
736
- task_id=task_id,
737
- model_id=model_id,
738
- timeout_ms=request.params.timeout_ms,
739
- wait=request.wait,
679
+ if clip_id is not None:
680
+ return self._suno_wait_feed_audio(
681
+ clip_id=clip_id,
682
+ model_id=model_id,
683
+ timeout_ms=request.params.timeout_ms,
684
+ wait=request.wait,
685
+ )
686
+
687
+ keys = (
688
+ ",".join(sorted([k for k in obj.keys() if isinstance(k, str)])) or "<none>"
740
689
  )
690
+ raise provider_error(f"suno music submit missing clip id (keys={keys})")
741
691
 
742
- def _suno_fetch(
743
- self, *, host: str, task_id: str, timeout_ms: int | None
692
+ def _suno_feed(
693
+ self, *, host: str, ids: str, timeout_ms: int | None
744
694
  ) -> dict[str, object]:
695
+ qs = urllib.parse.urlencode({"ids": ids})
745
696
  obj = request_json(
746
697
  method="GET",
747
- url=f"{host}/suno/fetch/{task_id}",
698
+ url=f"{host}/suno/feed?{qs}",
748
699
  headers=self._bearer_headers(),
749
700
  json_body=None,
750
701
  timeout_ms=timeout_ms,
751
702
  proxy_url=self.proxy_url,
752
703
  )
753
- data = obj.get("data")
754
- if not isinstance(data, dict):
755
- raise provider_error("suno fetch missing data")
756
- return data
704
+ if not isinstance(obj, dict):
705
+ raise provider_error("suno feed invalid response", retryable=True)
706
+ return obj
757
707
 
758
- def _suno_wait_fetch_text(
759
- self, *, task_id: str, model_id: str, timeout_ms: int | None, wait: bool
708
+ def _suno_wait_feed_audio(
709
+ self, *, clip_id: str, model_id: str, timeout_ms: int | None, wait: bool
760
710
  ) -> GenerateResponse:
761
711
  if not wait:
762
712
  return GenerateResponse(
@@ -764,102 +714,51 @@ class TuziAdapter:
764
714
  provider="tuzi-web",
765
715
  model=f"tuzi-web:{model_id}",
766
716
  status="running",
767
- job=JobInfo(job_id=task_id, poll_after_ms=2_000),
768
- )
769
- host = self._base_host()
770
- budget_ms = 60_000 if timeout_ms is None else timeout_ms
771
- deadline = time.time() + max(1, budget_ms) / 1000.0
772
- while True:
773
- remaining_ms = int((deadline - time.time()) * 1000)
774
- if remaining_ms <= 0:
775
- break
776
- data = self._suno_fetch(
777
- host=host, task_id=task_id, timeout_ms=min(30_000, remaining_ms)
717
+ job=JobInfo(job_id=clip_id, poll_after_ms=2_000),
778
718
  )
779
- status = data.get("status")
780
- if status == "SUCCESS":
781
- inner = data.get("data")
782
- if isinstance(inner, dict):
783
- text = inner.get("text")
784
- if isinstance(text, str):
785
- return GenerateResponse(
786
- id=f"sdk_{uuid4().hex}",
787
- provider="tuzi-web",
788
- model=f"tuzi-web:{model_id}",
789
- status="completed",
790
- output=[
791
- Message(
792
- role="assistant", content=[Part.from_text(text)]
793
- )
794
- ],
795
- )
796
- raise provider_error("suno lyrics succeeded but missing text")
797
- if status == "FAIL":
798
- raise provider_error(f"suno task failed: {data.get('fail_reason')}")
799
- time.sleep(min(2.0, max(0.0, deadline - time.time())))
800
719
 
801
- return GenerateResponse(
802
- id=f"sdk_{uuid4().hex}",
803
- provider="tuzi-web",
804
- model=f"tuzi-web:{model_id}",
805
- status="running",
806
- job=JobInfo(job_id=task_id, poll_after_ms=2_000),
807
- )
808
-
809
- def _suno_wait_fetch_audio(
810
- self, *, task_id: str, model_id: str, timeout_ms: int | None, wait: bool
811
- ) -> GenerateResponse:
812
- if not wait:
813
- return GenerateResponse(
814
- id=f"sdk_{uuid4().hex}",
815
- provider="tuzi-web",
816
- model=f"tuzi-web:{model_id}",
817
- status="running",
818
- job=JobInfo(job_id=task_id, poll_after_ms=2_000),
819
- )
820
720
  host = self._base_host()
821
721
  budget_ms = 120_000 if timeout_ms is None else timeout_ms
822
722
  deadline = time.time() + max(1, budget_ms) / 1000.0
723
+ last_status: str | None = None
724
+ last_detail: str | None = None
823
725
  while True:
824
726
  remaining_ms = int((deadline - time.time()) * 1000)
825
727
  if remaining_ms <= 0:
826
728
  break
827
- data = self._suno_fetch(
828
- host=host, task_id=task_id, timeout_ms=min(30_000, remaining_ms)
729
+
730
+ data = self._suno_feed(
731
+ host=host, ids=clip_id, timeout_ms=min(30_000, remaining_ms)
829
732
  )
830
- status = data.get("status")
831
- if status == "SUCCESS":
832
- inner = data.get("data")
833
- urls: list[str] = []
834
- if isinstance(inner, dict):
835
- u = inner.get("audio_url")
836
- if isinstance(u, str) and u:
837
- urls.append(u)
838
- clips = inner.get("clips")
839
- if isinstance(clips, list):
840
- for clip in clips:
841
- if not isinstance(clip, dict):
842
- continue
843
- u = clip.get("audio_url")
844
- if isinstance(u, str) and u:
845
- urls.append(u)
846
- elif isinstance(inner, list):
847
- for clip in inner:
848
- if not isinstance(clip, dict):
849
- continue
850
- u = clip.get("audio_url")
851
- if isinstance(u, str) and u:
852
- urls.append(u)
853
- if not urls:
854
- blob = json.dumps(inner, ensure_ascii=False)
855
- u = _extract_first_url(_AUDIO_URL_RE, blob)
856
- if u:
857
- urls.append(u)
858
- if urls:
733
+ clips = data.get("clips")
734
+ clip: dict[str, object] | None = None
735
+ if isinstance(clips, list):
736
+ for c in clips:
737
+ if not isinstance(c, dict):
738
+ continue
739
+ cid = c.get("id")
740
+ if isinstance(cid, str) and cid.strip() == clip_id:
741
+ clip = c
742
+ break
743
+ if clip is None and clips and isinstance(clips[0], dict):
744
+ clip = clips[0]
745
+
746
+ if clip is not None:
747
+ raw_status = clip.get("status")
748
+ status = (
749
+ raw_status.strip().upper() if isinstance(raw_status, str) else ""
750
+ )
751
+ if status:
752
+ last_status = status
753
+ elif raw_status is not None:
754
+ last_status = str(raw_status)
755
+
756
+ au = clip.get("audio_url")
757
+ if isinstance(au, str) and au.strip():
859
758
  part = Part(
860
759
  type="audio",
861
760
  mime_type="audio/mpeg",
862
- source=PartSourceUrl(url=urls[0]),
761
+ source=PartSourceUrl(url=au.strip()),
863
762
  )
864
763
  return GenerateResponse(
865
764
  id=f"sdk_{uuid4().hex}",
@@ -868,9 +767,23 @@ class TuziAdapter:
868
767
  status="completed",
869
768
  output=[Message(role="assistant", content=[part])],
870
769
  )
871
- raise provider_error("suno music succeeded but missing audio url")
872
- if status == "FAIL":
873
- raise provider_error(f"suno task failed: {data.get('fail_reason')}")
770
+
771
+ if status in {"FAIL", "FAILED", "ERROR"}:
772
+ meta = clip.get("metadata")
773
+ if isinstance(meta, dict):
774
+ err = meta.get("error_message")
775
+ if isinstance(err, str) and err:
776
+ last_detail = err
777
+ raise provider_error(
778
+ f"suno feed task failed: {last_detail or ''}".strip()
779
+ )
780
+
781
+ meta = clip.get("metadata")
782
+ if isinstance(meta, dict):
783
+ err = meta.get("error_message")
784
+ if isinstance(err, str) and err:
785
+ last_detail = err
786
+
874
787
  time.sleep(min(2.0, max(0.0, deadline - time.time())))
875
788
 
876
789
  return GenerateResponse(
@@ -878,10 +791,31 @@ class TuziAdapter:
878
791
  provider="tuzi-web",
879
792
  model=f"tuzi-web:{model_id}",
880
793
  status="running",
881
- job=JobInfo(job_id=task_id, poll_after_ms=2_000),
794
+ job=JobInfo(
795
+ job_id=clip_id,
796
+ poll_after_ms=2_000,
797
+ last_status=last_status,
798
+ last_detail=last_detail,
799
+ ),
882
800
  )
883
801
 
884
- def _suno_wait_fetch_any(
802
+ def _suno_fetch(
803
+ self, *, host: str, task_id: str, timeout_ms: int | None
804
+ ) -> dict[str, object]:
805
+ obj = request_json(
806
+ method="GET",
807
+ url=f"{host}/suno/fetch/{task_id}",
808
+ headers=self._bearer_headers(),
809
+ json_body=None,
810
+ timeout_ms=timeout_ms,
811
+ proxy_url=self.proxy_url,
812
+ )
813
+ data = obj.get("data")
814
+ if not isinstance(data, dict):
815
+ raise provider_error("suno fetch missing data")
816
+ return data
817
+
818
+ def _suno_wait_fetch_audio(
885
819
  self, *, task_id: str, model_id: str, timeout_ms: int | None, wait: bool
886
820
  ) -> GenerateResponse:
887
821
  if not wait:
@@ -895,6 +829,8 @@ class TuziAdapter:
895
829
  host = self._base_host()
896
830
  budget_ms = 120_000 if timeout_ms is None else timeout_ms
897
831
  deadline = time.time() + max(1, budget_ms) / 1000.0
832
+ last_status: str | None = None
833
+ last_detail: str | None = None
898
834
  while True:
899
835
  remaining_ms = int((deadline - time.time()) * 1000)
900
836
  if remaining_ms <= 0:
@@ -902,63 +838,57 @@ class TuziAdapter:
902
838
  data = self._suno_fetch(
903
839
  host=host, task_id=task_id, timeout_ms=min(30_000, remaining_ms)
904
840
  )
905
- status = data.get("status")
906
- if status == "SUCCESS":
907
- inner = data.get("data")
908
- parts: list[Part] = []
909
- blob = json.dumps(inner, ensure_ascii=False)
910
-
911
- audio_urls: list[str] = []
912
- if isinstance(inner, dict):
913
- clips = inner.get("clips")
841
+ raw_status = data.get("status")
842
+ status = raw_status.strip().upper() if isinstance(raw_status, str) else ""
843
+ if status:
844
+ last_status = status
845
+ elif raw_status is not None:
846
+ last_status = str(raw_status)
847
+ fail_reason = data.get("fail_reason")
848
+ if isinstance(fail_reason, str) and fail_reason:
849
+ last_detail = fail_reason
850
+
851
+ def _collect_urls(obj: object) -> list[str]:
852
+ urls: list[str] = []
853
+ if isinstance(obj, dict):
854
+ u = obj.get("audio_url")
855
+ if isinstance(u, str) and u.strip():
856
+ urls.append(u.strip())
857
+ clips = obj.get("clips")
914
858
  if isinstance(clips, list):
915
859
  for clip in clips:
916
- if not isinstance(clip, dict):
917
- continue
918
- u = clip.get("audio_url")
919
- if isinstance(u, str) and u:
920
- audio_urls.append(u)
921
-
922
- text = inner.get("text")
923
- if isinstance(text, str) and text:
924
- parts.append(Part.from_text(text))
925
-
926
- if not audio_urls:
927
- u = _extract_first_url(_AUDIO_URL_RE, blob)
928
- if u:
929
- audio_urls.append(u)
930
- for u in audio_urls:
931
- parts.append(
932
- Part(
933
- type="audio",
934
- mime_type="audio/mpeg",
935
- source=PartSourceUrl(url=u),
936
- )
937
- )
938
-
939
- mp4 = _extract_first_url(_MP4_URL_RE, blob)
940
- if mp4:
941
- parts.append(
942
- Part(
943
- type="video",
944
- mime_type="video/mp4",
945
- source=PartSourceUrl(url=mp4),
946
- )
947
- )
948
-
949
- if not parts:
950
- parts.append(
951
- Part.from_text(blob if blob and blob != "null" else "{}")
952
- )
953
-
860
+ urls.extend(_collect_urls(clip))
861
+ elif isinstance(obj, list):
862
+ for item in obj:
863
+ urls.extend(_collect_urls(item))
864
+ return urls
865
+
866
+ inner = data.get("data")
867
+ urls = _collect_urls(data) + _collect_urls(inner)
868
+ if not urls:
869
+ blob = json.dumps(
870
+ inner if inner is not None else data, ensure_ascii=False
871
+ )
872
+ u = _extract_first_url(_AUDIO_URL_RE, blob)
873
+ if u:
874
+ urls.append(u)
875
+ if urls:
876
+ part = Part(
877
+ type="audio",
878
+ mime_type="audio/mpeg",
879
+ source=PartSourceUrl(url=urls[0]),
880
+ )
954
881
  return GenerateResponse(
955
882
  id=f"sdk_{uuid4().hex}",
956
883
  provider="tuzi-web",
957
884
  model=f"tuzi-web:{model_id}",
958
885
  status="completed",
959
- output=[Message(role="assistant", content=parts)],
886
+ output=[Message(role="assistant", content=[part])],
960
887
  )
961
- if status == "FAIL":
888
+
889
+ if status in {"SUCCESS", "SUCCEEDED", "COMPLETE", "COMPLETED", "DONE"}:
890
+ raise provider_error("suno music succeeded but missing audio url")
891
+ if status in {"FAIL", "FAILED", "ERROR"}:
962
892
  raise provider_error(f"suno task failed: {data.get('fail_reason')}")
963
893
  time.sleep(min(2.0, max(0.0, deadline - time.time())))
964
894
 
@@ -967,7 +897,12 @@ class TuziAdapter:
967
897
  provider="tuzi-web",
968
898
  model=f"tuzi-web:{model_id}",
969
899
  status="running",
970
- job=JobInfo(job_id=task_id, poll_after_ms=2_000),
900
+ job=JobInfo(
901
+ job_id=task_id,
902
+ poll_after_ms=2_000,
903
+ last_status=last_status,
904
+ last_detail=last_detail,
905
+ ),
971
906
  )
972
907
 
973
908
  def _deepsearch(
@@ -126,9 +126,9 @@ MODELS: list[str] = [
126
126
  "chirp-auk",
127
127
  "chirp-bluejay",
128
128
  "chirp-crow",
129
+ "chirp-v3-0",
130
+ "chirp-v3-5",
129
131
  "chirp-v4",
130
- "suno-v3",
131
- "suno_lyrics",
132
132
  "sonar-medium-chat",
133
133
  "sonar-medium-online",
134
134
  "sonar-small-chat",
nous/genai/types.py CHANGED
@@ -339,6 +339,8 @@ class JobInfo:
339
339
  job_id: str
340
340
  poll_after_ms: int = 1_000
341
341
  expires_at: str | None = None
342
+ last_status: str | None = None
343
+ last_detail: str | None = None
342
344
 
343
345
 
344
346
  @dataclass(frozen=True, slots=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nous-genai
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Summary: Single-endpoint GenAI SDK (multi-provider, multimodal)
5
5
  License-Expression: Apache-2.0
6
6
  Project-URL: Homepage, https://github.com/gravtice/nous-genai
@@ -168,6 +168,11 @@ If you need to write to file, see `examples/demo.py` (`_write_binary()`), or reu
168
168
  uv run genai --model openai:gpt-4o-mini --prompt "Hello"
169
169
  uv run genai model available --all
170
170
 
171
+ # Tuzi Chirp music
172
+ uv run genai --model tuzi-web:chirp-v3-5 --prompt "Lo-fi hiphop beat, 30s" --no-wait
173
+ # ...later
174
+ uv run genai --model tuzi-web:chirp-v3-5 --job-id "<job_id>" --output-path demo_suno.mp3 --timeout-ms 600000
175
+
171
176
  # MCP Server
172
177
  uv run genai-mcp-server # Streamable HTTP: /mcp, SSE: /sse
173
178
  uv run genai-mcp-cli tools # Debug CLI
@@ -2,13 +2,13 @@ nous/__init__.py,sha256=Hh6QnLL0rRIVOTu33-Yt8GxHSNGfckYHTIsNP5prvEE,32
2
2
  nous/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
3
  nous/genai/__init__.py,sha256=VY43n2cxXUP8ozW5vkW3P_WyEkGWYQY17NVHJHdRY24,1037
4
4
  nous/genai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
5
- nous/genai/cli.py,sha256=f_e1Ss1ehVw-lfVaF9wEuquP5e0Fib2FfSnr0zmnTrQ,42733
5
+ nous/genai/cli.py,sha256=H6cIhkMzaxnT7GeA9zWBpk8ePzhPwQupyxh5YnrIdZY,48227
6
6
  nous/genai/client.py,sha256=uKwttxMSkXCa1w8i7g3Mqs0L6ZCZZvMk7HdTXPfzoV0,26249
7
7
  nous/genai/mcp_cli.py,sha256=bhAhL4r9x3Xkzb_7NypfUELAHqdIiC23LQbSAGZIPAE,10356
8
8
  nous/genai/mcp_server.py,sha256=1NlJnAIV4LhMUDJ4BmH60KU7Jrx-bfivgIbcIaofPVU,40507
9
- nous/genai/types.py,sha256=g589VUakFeMEIN3-AwHHKP6PCcMDlD6svKZcrBeu0Ts,13011
9
+ nous/genai/types.py,sha256=ndAO2SsVTyoQEKt06A7UGt4fixeY1hw7nS30nIqdTh0,13081
10
10
  nous/genai/_internal/__init__.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
11
- nous/genai/_internal/capability_rules.py,sha256=uCYeBDbJZx-S4Bib66oi_v3nZXS7dG84X0eEDifMjbY,13299
11
+ nous/genai/_internal/capability_rules.py,sha256=aYoVSRVP4FK32Pgi7Qrs86J2Kj4g_GTPSgI9dqsSlts,13222
12
12
  nous/genai/_internal/config.py,sha256=JfT8uhU_QSNfPEKPU614pB_jRJbMBnVkbHnOm7UeYoc,3243
13
13
  nous/genai/_internal/errors.py,sha256=JZ2GIdFjae1I9ar7BCXmy01VenTrgvn4WfhbFNVF4i4,1639
14
14
  nous/genai/_internal/http.py,sha256=XcO13YOs28N-Tq8WKi0a4REIHgWQxXBgzPxDM_MBSo0,30236
@@ -18,7 +18,7 @@ nous/genai/providers/aliyun.py,sha256=FOAhQOlHmJC3Nm5cagPbf6JQ94XtM-TKFVCH51PPSL
18
18
  nous/genai/providers/anthropic.py,sha256=MShEm1lAw3dpbCPcsRz9eqWHt_x20EBswe4cJ4-ZgHo,17920
19
19
  nous/genai/providers/gemini.py,sha256=Kb-oe7zVYlm--EGla1yZ0BavUx11GkAO1d0dtyLSC3A,58320
20
20
  nous/genai/providers/openai.py,sha256=HxhR3pABZMLPtECAq4rUwuDSsBM4bb7AF66AVYLjUIU,75303
21
- nous/genai/providers/tuzi.py,sha256=arkU5UBtcwb4bNOEEMcNW9hww8NNzXxc1-d7TAv_2Hg,44761
21
+ nous/genai/providers/tuzi.py,sha256=TVZpppT-MWcsmq5sCW6WDMF-usaKYkTAos7UVWakllM,42229
22
22
  nous/genai/providers/volcengine.py,sha256=hZVO9jIBLqKSqwZsysOoO_FtrLGGUvJVbfLDKzXITQc,9962
23
23
  nous/genai/reference/__init__.py,sha256=nfMsgNMUF1CHw3qslPA5Zniy8rBds9eISBpkCshbbJE,409
24
24
  nous/genai/reference/catalog.py,sha256=FeF3iabPNgNdWFR40ffAYTPHXfuEY4zT8G9asrlBD14,6633
@@ -33,13 +33,13 @@ nous/genai/reference/model_catalog_data/openai.py,sha256=siwx-htP7kPtlcDPzl6ommo
33
33
  nous/genai/reference/model_catalog_data/tuzi_anthropic.py,sha256=8jLckkoU0zwjoaTsAcdK03yY0u6iikK9p32HnL55mOk,519
34
34
  nous/genai/reference/model_catalog_data/tuzi_google.py,sha256=3S4mJfN1VtsNkTxbb42AeKc-oF84KRPTILA39QOpCew,540
35
35
  nous/genai/reference/model_catalog_data/tuzi_openai.py,sha256=K_w802dimRfQmmlgc4P2eI6ohvqAsQt2GoWb0K57BX0,1632
36
- nous/genai/reference/model_catalog_data/tuzi_web.py,sha256=wKYo9yndsi5yQA44HB2omQKz6esHvOqZKjMizOvFdbc,3081
36
+ nous/genai/reference/model_catalog_data/tuzi_web.py,sha256=BrTBuKGt6aEPtgSRl-82xJcGes7Z8onPLHe2bdrZh4A,3083
37
37
  nous/genai/reference/model_catalog_data/volcengine.py,sha256=GxDELF9z8sbYyOEu-SpYluCal0tTAfFFLkS9-vXt1hg,3657
38
38
  nous/genai/tools/__init__.py,sha256=YWNhBKZ4t6diXI5SlCHA5OAIhLLOOA3vwQ6jClJsO2o,292
39
39
  nous/genai/tools/output_parser.py,sha256=epHW-fLBatrjMTl2nMvh_vFWFEmP8Xj2pZ5fosaZxTc,3968
40
- nous_genai-0.1.0.dist-info/licenses/LICENSE,sha256=yMLznCFyvxXAx7UUyAtq1gYFyzRsqGngYiaRF9R33Lg,10757
41
- nous_genai-0.1.0.dist-info/METADATA,sha256=UmxjLMmPE1GA4FpMFbo2HrjdThAB_BDMg4fMnhr1AT4,5474
42
- nous_genai-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
- nous_genai-0.1.0.dist-info/entry_points.txt,sha256=j_cgAhxjPu7wtlo_zcdFLmz58dSxvnmSFM4f0A-ozEc,132
44
- nous_genai-0.1.0.dist-info/top_level.txt,sha256=yUcst4OAspsyKhX0y5ENzFkJKzR_gislA5MykV1pVbk,5
45
- nous_genai-0.1.0.dist-info/RECORD,,
40
+ nous_genai-0.1.2.dist-info/licenses/LICENSE,sha256=yMLznCFyvxXAx7UUyAtq1gYFyzRsqGngYiaRF9R33Lg,10757
41
+ nous_genai-0.1.2.dist-info/METADATA,sha256=umYJBfPzynwOUGrq5Gdf3qn8bJ6vtooqmO2lwCe08mE,5699
42
+ nous_genai-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
+ nous_genai-0.1.2.dist-info/entry_points.txt,sha256=j_cgAhxjPu7wtlo_zcdFLmz58dSxvnmSFM4f0A-ozEc,132
44
+ nous_genai-0.1.2.dist-info/top_level.txt,sha256=yUcst4OAspsyKhX0y5ENzFkJKzR_gislA5MykV1pVbk,5
45
+ nous_genai-0.1.2.dist-info/RECORD,,