ponyflash 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ponyflash-0.1.1/.gitignore +40 -0
- ponyflash-0.1.1/CHANGELOG.md +24 -0
- ponyflash-0.1.1/LICENSE +21 -0
- ponyflash-0.1.1/PKG-INFO +314 -0
- ponyflash-0.1.1/README.md +271 -0
- ponyflash-0.1.1/pyproject.toml +88 -0
- ponyflash-0.1.1/src/ponyflash/__init__.py +80 -0
- ponyflash-0.1.1/src/ponyflash/_base_client.py +447 -0
- ponyflash-0.1.1/src/ponyflash/_client.py +207 -0
- ponyflash-0.1.1/src/ponyflash/_compat.py +33 -0
- ponyflash-0.1.1/src/ponyflash/_constants.py +11 -0
- ponyflash-0.1.1/src/ponyflash/_exceptions.py +239 -0
- ponyflash-0.1.1/src/ponyflash/_files.py +98 -0
- ponyflash-0.1.1/src/ponyflash/_models.py +26 -0
- ponyflash-0.1.1/src/ponyflash/_resource.py +39 -0
- ponyflash-0.1.1/src/ponyflash/_types.py +107 -0
- ponyflash-0.1.1/src/ponyflash/_utils.py +18 -0
- ponyflash-0.1.1/src/ponyflash/_version.py +1 -0
- ponyflash-0.1.1/src/ponyflash/pagination.py +140 -0
- ponyflash-0.1.1/src/ponyflash/py.typed +0 -0
- ponyflash-0.1.1/src/ponyflash/resources/__init__.py +27 -0
- ponyflash-0.1.1/src/ponyflash/resources/account.py +35 -0
- ponyflash-0.1.1/src/ponyflash/resources/files.py +185 -0
- ponyflash-0.1.1/src/ponyflash/resources/generations.py +62 -0
- ponyflash-0.1.1/src/ponyflash/resources/images.py +170 -0
- ponyflash-0.1.1/src/ponyflash/resources/models.py +57 -0
- ponyflash-0.1.1/src/ponyflash/resources/music.py +154 -0
- ponyflash-0.1.1/src/ponyflash/resources/speech.py +144 -0
- ponyflash-0.1.1/src/ponyflash/resources/video.py +212 -0
- ponyflash-0.1.1/src/ponyflash/types/__init__.py +24 -0
- ponyflash-0.1.1/src/ponyflash/types/create_response.py +12 -0
- ponyflash-0.1.1/src/ponyflash/types/credit_balance.py +11 -0
- ponyflash-0.1.1/src/ponyflash/types/file_object.py +19 -0
- ponyflash-0.1.1/src/ponyflash/types/file_presign.py +15 -0
- ponyflash-0.1.1/src/ponyflash/types/generation.py +48 -0
- ponyflash-0.1.1/src/ponyflash/types/image_generate_params.py +21 -0
- ponyflash-0.1.1/src/ponyflash/types/model_detail.py +41 -0
- ponyflash-0.1.1/src/ponyflash/types/model_info.py +24 -0
- ponyflash-0.1.1/src/ponyflash/types/music_generate_params.py +19 -0
- ponyflash-0.1.1/src/ponyflash/types/recharge_response.py +12 -0
- ponyflash-0.1.1/src/ponyflash/types/speech_generate_params.py +26 -0
- ponyflash-0.1.1/src/ponyflash/types/video_generate_params.py +25 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# Byte-compiled / optimized
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# Distribution / packaging
|
|
7
|
+
build/
|
|
8
|
+
dist/
|
|
9
|
+
*.egg-info/
|
|
10
|
+
*.egg
|
|
11
|
+
.eggs/
|
|
12
|
+
|
|
13
|
+
# Virtual environments
|
|
14
|
+
.venv/
|
|
15
|
+
venv/
|
|
16
|
+
env/
|
|
17
|
+
|
|
18
|
+
# IDE
|
|
19
|
+
.idea/
|
|
20
|
+
.vscode/
|
|
21
|
+
*.swp
|
|
22
|
+
*.swo
|
|
23
|
+
*~
|
|
24
|
+
|
|
25
|
+
# Testing
|
|
26
|
+
.pytest_cache/
|
|
27
|
+
htmlcov/
|
|
28
|
+
.coverage
|
|
29
|
+
.coverage.*
|
|
30
|
+
coverage.xml
|
|
31
|
+
|
|
32
|
+
# mypy
|
|
33
|
+
.mypy_cache/
|
|
34
|
+
|
|
35
|
+
# ruff
|
|
36
|
+
.ruff_cache/
|
|
37
|
+
|
|
38
|
+
# OS
|
|
39
|
+
.DS_Store
|
|
40
|
+
Thumbs.db
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
All notable changes to this project will be documented in this file.
|
|
4
|
+
|
|
5
|
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
|
6
|
+
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
|
+
|
|
8
|
+
## [0.1.0] - 2026-03-09
|
|
9
|
+
|
|
10
|
+
### Added
|
|
11
|
+
|
|
12
|
+
- Initial release of PonyFlash Python SDK
|
|
13
|
+
- Sync (`PonyFlash`) and async (`AsyncPonyFlash`) client
|
|
14
|
+
- Image generation: text-to-image, image-to-image, multi-image, inpainting with mask
|
|
15
|
+
- Video generation: text-to-video, first-frame, first+last-frame, OmniHuman (portrait+audio), Motion Transfer (portrait+motion_video), multi-image reference, video remix
|
|
16
|
+
- Audio generation: text-to-audio
|
|
17
|
+
- Unified polling with `generate()` (blocking) and `submit()` (non-blocking)
|
|
18
|
+
- File uploads via presigned URL (Tencent Cloud COS)
|
|
19
|
+
- Unified `FileInput` supporting local paths, bytes, URLs, and file_ids
|
|
20
|
+
- Cursor-based pagination for model listing
|
|
21
|
+
- Model detail retrieval with supported modes and limits
|
|
22
|
+
- Account credits query and recharge link
|
|
23
|
+
- Custom exception hierarchy with automatic retry on transient errors
|
|
24
|
+
- PEP 561 `py.typed` marker for type checker support
|
ponyflash-0.1.1/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 PonyFlash Team
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
ponyflash-0.1.1/PKG-INFO
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ponyflash
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: PonyFlash Python SDK — Image, Video & Audio generation
|
|
5
|
+
Project-URL: Homepage, https://github.com/ponyflash/ponyflash-python
|
|
6
|
+
Project-URL: Documentation, https://docs.ponyflash.com
|
|
7
|
+
Project-URL: Repository, https://github.com/ponyflash/ponyflash-python
|
|
8
|
+
Project-URL: Changelog, https://github.com/ponyflash/ponyflash-python/blob/main/CHANGELOG.md
|
|
9
|
+
Project-URL: Issues, https://github.com/ponyflash/ponyflash-python/issues
|
|
10
|
+
Author-email: PonyFlash Team <dev@ponyflash.com>
|
|
11
|
+
License-Expression: MIT
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Keywords: ai,audio-generation,image-generation,ponyflash,sdk,video-generation
|
|
14
|
+
Classifier: Development Status :: 4 - Beta
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
24
|
+
Classifier: Topic :: Multimedia :: Graphics
|
|
25
|
+
Classifier: Topic :: Multimedia :: Sound/Audio
|
|
26
|
+
Classifier: Topic :: Multimedia :: Video
|
|
27
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
28
|
+
Classifier: Typing :: Typed
|
|
29
|
+
Requires-Python: >=3.8
|
|
30
|
+
Requires-Dist: anyio<5,>=3.0
|
|
31
|
+
Requires-Dist: httpx<1,>=0.25.0
|
|
32
|
+
Requires-Dist: pydantic<3,>=2.0
|
|
33
|
+
Requires-Dist: typing-extensions>=4.7
|
|
34
|
+
Provides-Extra: dev
|
|
35
|
+
Requires-Dist: build>=1.0; extra == 'dev'
|
|
36
|
+
Requires-Dist: mypy>=1.9; extra == 'dev'
|
|
37
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
|
|
38
|
+
Requires-Dist: pytest>=7; extra == 'dev'
|
|
39
|
+
Requires-Dist: respx>=0.20; extra == 'dev'
|
|
40
|
+
Requires-Dist: ruff>=0.4; extra == 'dev'
|
|
41
|
+
Requires-Dist: twine>=5.0; extra == 'dev'
|
|
42
|
+
Description-Content-Type: text/markdown
|
|
43
|
+
|
|
44
|
+
# PonyFlash Python SDK
|
|
45
|
+
|
|
46
|
+
AI-native image, video, speech, and music generation SDK.
|
|
47
|
+
|
|
48
|
+
**Zero friction file handling** — pass `open()` file objects, `Path` objects, URLs, `bytes`, or `file_id` strings. The SDK auto-uploads via presigned URLs and cleans up temp files when the task completes.
|
|
49
|
+
|
|
50
|
+
## Installation
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
pip install ponyflash
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Quick Start
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
from ponyflash import PonyFlash
|
|
60
|
+
|
|
61
|
+
client = PonyFlash(api_key="pf_xxx")
|
|
62
|
+
|
|
63
|
+
# Text-to-image
|
|
64
|
+
gen = client.images.generate(
|
|
65
|
+
model="nano-banana-pro",
|
|
66
|
+
prompt="A sunset over mountains",
|
|
67
|
+
resolution="2K",
|
|
68
|
+
)
|
|
69
|
+
print(gen.url) # first output URL
|
|
70
|
+
print(f"Credits used: {gen.credits}") # credits consumed
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Video Generation
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
from pathlib import Path
|
|
77
|
+
|
|
78
|
+
# Text-to-video
|
|
79
|
+
gen = client.video.generate(
|
|
80
|
+
model="video-gen-1",
|
|
81
|
+
prompt="A timelapse of a city at night",
|
|
82
|
+
size="1920x1080",
|
|
83
|
+
duration=8,
|
|
84
|
+
)
|
|
85
|
+
print(gen.url)
|
|
86
|
+
|
|
87
|
+
# First-frame to video (local file)
|
|
88
|
+
with open("my_photo.jpg", "rb") as f:
|
|
89
|
+
gen = client.video.generate(
|
|
90
|
+
model="video-gen-1",
|
|
91
|
+
first_frame=f,
|
|
92
|
+
prompt="Camera slowly zooms in",
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# First-frame to video (public URL)
|
|
96
|
+
gen = client.video.generate(
|
|
97
|
+
model="video-gen-1",
|
|
98
|
+
first_frame="https://example.com/photo.jpg",
|
|
99
|
+
prompt="Camera slowly zooms in",
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# OmniHuman: portrait + audio → talking video
|
|
103
|
+
with open("portrait.jpg", "rb") as img, open("speech.wav", "rb") as audio:
|
|
104
|
+
gen = client.video.generate(
|
|
105
|
+
model="omnihuman-1.5",
|
|
106
|
+
first_frame=img,
|
|
107
|
+
audio=audio,
|
|
108
|
+
prompt="Natural speaking with subtle hand gestures",
|
|
109
|
+
size="1280x720",
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# OmniHuman with fast mode and seed
|
|
113
|
+
with open("portrait.jpg", "rb") as img, open("speech.wav", "rb") as audio:
|
|
114
|
+
gen = client.video.generate(
|
|
115
|
+
model="omnihuman-1.5",
|
|
116
|
+
first_frame=img,
|
|
117
|
+
audio=audio,
|
|
118
|
+
seed=42,
|
|
119
|
+
fast_mode=True,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Motion Transfer: person image + dance video → person performs the dance
|
|
123
|
+
with open("my_avatar.jpg", "rb") as img, open("dance_clip.mp4", "rb") as vid:
|
|
124
|
+
gen = client.video.generate(
|
|
125
|
+
model="motion-transfer-1",
|
|
126
|
+
first_frame=img,
|
|
127
|
+
motion_video=vid,
|
|
128
|
+
size="1280x720",
|
|
129
|
+
)
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
## Image Generation
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
# Text-to-image
|
|
136
|
+
gen = client.images.generate(
|
|
137
|
+
model="nano-banana-pro",
|
|
138
|
+
prompt="A sunset",
|
|
139
|
+
resolution="2K",
|
|
140
|
+
aspect_ratio="16:9",
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
# Image-to-image (local file)
|
|
144
|
+
with open("source.png", "rb") as f:
|
|
145
|
+
gen = client.images.generate(
|
|
146
|
+
model="nano-banana-pro",
|
|
147
|
+
prompt="Make it look like a watercolor painting",
|
|
148
|
+
reference_images=[f],
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Image-to-image (public URL)
|
|
152
|
+
gen = client.images.generate(
|
|
153
|
+
model="nano-banana-pro",
|
|
154
|
+
prompt="Make it look like a watercolor painting",
|
|
155
|
+
reference_images=["https://example.com/source.png"],
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Inpainting with mask
|
|
159
|
+
with open("photo.jpg", "rb") as img, open("mask.png", "rb") as mask:
|
|
160
|
+
gen = client.images.generate(
|
|
161
|
+
model="nano-banana-pro",
|
|
162
|
+
prompt="Replace the sky with aurora borealis",
|
|
163
|
+
reference_images=[img],
|
|
164
|
+
mask=mask,
|
|
165
|
+
)
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
## Speech Synthesis (TTS)
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
gen = client.speech.generate(
|
|
172
|
+
model="speech-2.8-hd",
|
|
173
|
+
input="欢迎使用 PonyFlash,这是一段语音合成示例。",
|
|
174
|
+
voice="English_Graceful_Lady",
|
|
175
|
+
language="zh-CN",
|
|
176
|
+
)
|
|
177
|
+
print(gen.url)
|
|
178
|
+
|
|
179
|
+
# With emotion and pitch control
|
|
180
|
+
gen = client.speech.generate(
|
|
181
|
+
model="speech-2.8-hd",
|
|
182
|
+
input="今天天气真好,我好开心!",
|
|
183
|
+
voice="English_Insightful_Speaker",
|
|
184
|
+
emotion="happy",
|
|
185
|
+
pitch=2,
|
|
186
|
+
speed=1.1,
|
|
187
|
+
)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
## Music Generation
|
|
191
|
+
|
|
192
|
+
```python
|
|
193
|
+
gen = client.music.generate(
|
|
194
|
+
model="suno-v4.5",
|
|
195
|
+
prompt="A melancholic indie folk ballad with acoustic guitar",
|
|
196
|
+
title="Autumn Leaves",
|
|
197
|
+
duration=180,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Extend from reference audio
|
|
201
|
+
with open("my_song_clip.mp3", "rb") as f:
|
|
202
|
+
gen = client.music.generate(
|
|
203
|
+
model="suno-v4.5",
|
|
204
|
+
prompt="Continue with an energetic chorus",
|
|
205
|
+
reference_audio=f,
|
|
206
|
+
continue_at=60.0,
|
|
207
|
+
)
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
## Downloading Results
|
|
211
|
+
|
|
212
|
+
```python
|
|
213
|
+
import httpx
|
|
214
|
+
|
|
215
|
+
gen = client.images.generate(
|
|
216
|
+
model="nano-banana-pro",
|
|
217
|
+
prompt="A cat wearing sunglasses",
|
|
218
|
+
resolution="2K",
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Download the generated image
|
|
222
|
+
resp = httpx.get(gen.url)
|
|
223
|
+
with open("output.png", "wb") as f:
|
|
224
|
+
f.write(resp.content)
|
|
225
|
+
|
|
226
|
+
# Multiple outputs
|
|
227
|
+
for i, url in enumerate(gen.urls):
|
|
228
|
+
resp = httpx.get(url)
|
|
229
|
+
with open(f"output_{i}.png", "wb") as f:
|
|
230
|
+
f.write(resp.content)
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
## File Input Types
|
|
234
|
+
|
|
235
|
+
Every file parameter (`reference_images`, `mask`, `first_frame`, `audio`, `motion_video`, `reference_audio`, ...) accepts:
|
|
236
|
+
|
|
237
|
+
| Input | Example | Behavior |
|
|
238
|
+
|-------|---------|----------|
|
|
239
|
+
| Open file object | `open("photo.jpg", "rb")` | **Recommended.** Auto-uploaded, auto-cleaned. |
|
|
240
|
+
| `Path` object | `Path("photo.jpg")` | Same as above. |
|
|
241
|
+
| `bytes` | `image_bytes` | Same as above. |
|
|
242
|
+
| `(filename, bytes)` tuple | `("photo.jpg", data)` | Same as above. |
|
|
243
|
+
| URL string | `"https://example.com/photo.jpg"` | Passed directly to backend. No upload. |
|
|
244
|
+
| `file_id` string | `"file_abc123"` | Reuses a previously uploaded file. |
|
|
245
|
+
|
|
246
|
+
> `generate()` auto-cleans temp files after the task completes. `submit()` does not — use it when you need `request_id` for manual polling.
|
|
247
|
+
|
|
248
|
+
## Non-blocking: `submit()` + `generations.wait()`
|
|
249
|
+
|
|
250
|
+
```python
|
|
251
|
+
task = client.images.submit(model="nano-banana-pro", prompt="A sunset")
|
|
252
|
+
print(task.request_id) # "req_img_001"
|
|
253
|
+
print(task.estimated_credits) # 20
|
|
254
|
+
|
|
255
|
+
# ... do other work ...
|
|
256
|
+
|
|
257
|
+
gen = client.generations.wait(task.request_id)
|
|
258
|
+
print(gen.url)
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
## Async
|
|
262
|
+
|
|
263
|
+
```python
|
|
264
|
+
from ponyflash import AsyncPonyFlash
|
|
265
|
+
|
|
266
|
+
client = AsyncPonyFlash(api_key="pf_xxx")
|
|
267
|
+
|
|
268
|
+
gen = await client.images.generate(model="nano-banana-pro", prompt="A sunset")
|
|
269
|
+
print(gen.url)
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
## Configuration
|
|
273
|
+
|
|
274
|
+
```python
|
|
275
|
+
client = PonyFlash(
|
|
276
|
+
api_key="pf_xxx", # or PONYFLASH_API_KEY env var
|
|
277
|
+
base_url="https://custom.example.com/v1", # or PONYFLASH_BASE_URL env var
|
|
278
|
+
max_retries=3,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Polling timeout is per-resource, not per-client:
|
|
282
|
+
gen = client.video.generate(
|
|
283
|
+
model="video-gen-1",
|
|
284
|
+
prompt="...",
|
|
285
|
+
timeout=900.0, # wait up to 15 min for the task to complete (default: 600s)
|
|
286
|
+
)
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
> **Two kinds of timeout:**
|
|
290
|
+
> - `PonyFlash(timeout=...)` — per-HTTP-request timeout (default 300s). Only affects individual API calls.
|
|
291
|
+
> - `generate(timeout=...)` — polling timeout, how long to wait for the task to finish. Defaults vary by resource: images 120s, video/music 600s, speech 300s.
|
|
292
|
+
|
|
293
|
+
## Advanced: Manual File Management
|
|
294
|
+
|
|
295
|
+
The file API is available for advanced use cases:
|
|
296
|
+
|
|
297
|
+
```python
|
|
298
|
+
from pathlib import Path
|
|
299
|
+
|
|
300
|
+
# Upload explicitly (useful when reusing across multiple requests)
|
|
301
|
+
file_id = client.files.upload(Path("large_video.mp4"))
|
|
302
|
+
|
|
303
|
+
# Use the file_id in multiple requests without re-uploading
|
|
304
|
+
gen1 = client.video.generate(model="video-gen-1", video=file_id, prompt="Style A")
|
|
305
|
+
gen2 = client.video.generate(model="video-gen-1", video=file_id, prompt="Style B")
|
|
306
|
+
|
|
307
|
+
# Clean up when done
|
|
308
|
+
client.files.delete(file_id)
|
|
309
|
+
|
|
310
|
+
# List and inspect files
|
|
311
|
+
files = client.files.list()
|
|
312
|
+
info = client.files.get(file_id)
|
|
313
|
+
print(info.status, info.expires_at)
|
|
314
|
+
```
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
# PonyFlash Python SDK
|
|
2
|
+
|
|
3
|
+
AI-native image, video, speech, and music generation SDK.
|
|
4
|
+
|
|
5
|
+
**Zero friction file handling** — pass `open()` file objects, `Path` objects, URLs, `bytes`, or `file_id` strings. The SDK auto-uploads via presigned URLs and cleans up temp files when the task completes.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install ponyflash
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Quick Start
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
from ponyflash import PonyFlash
|
|
17
|
+
|
|
18
|
+
client = PonyFlash(api_key="pf_xxx")
|
|
19
|
+
|
|
20
|
+
# Text-to-image
|
|
21
|
+
gen = client.images.generate(
|
|
22
|
+
model="nano-banana-pro",
|
|
23
|
+
prompt="A sunset over mountains",
|
|
24
|
+
resolution="2K",
|
|
25
|
+
)
|
|
26
|
+
print(gen.url) # first output URL
|
|
27
|
+
print(f"Credits used: {gen.credits}") # credits consumed
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Video Generation
|
|
31
|
+
|
|
32
|
+
```python
|
|
33
|
+
from pathlib import Path
|
|
34
|
+
|
|
35
|
+
# Text-to-video
|
|
36
|
+
gen = client.video.generate(
|
|
37
|
+
model="video-gen-1",
|
|
38
|
+
prompt="A timelapse of a city at night",
|
|
39
|
+
size="1920x1080",
|
|
40
|
+
duration=8,
|
|
41
|
+
)
|
|
42
|
+
print(gen.url)
|
|
43
|
+
|
|
44
|
+
# First-frame to video (local file)
|
|
45
|
+
with open("my_photo.jpg", "rb") as f:
|
|
46
|
+
gen = client.video.generate(
|
|
47
|
+
model="video-gen-1",
|
|
48
|
+
first_frame=f,
|
|
49
|
+
prompt="Camera slowly zooms in",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# First-frame to video (public URL)
|
|
53
|
+
gen = client.video.generate(
|
|
54
|
+
model="video-gen-1",
|
|
55
|
+
first_frame="https://example.com/photo.jpg",
|
|
56
|
+
prompt="Camera slowly zooms in",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# OmniHuman: portrait + audio → talking video
|
|
60
|
+
with open("portrait.jpg", "rb") as img, open("speech.wav", "rb") as audio:
|
|
61
|
+
gen = client.video.generate(
|
|
62
|
+
model="omnihuman-1.5",
|
|
63
|
+
first_frame=img,
|
|
64
|
+
audio=audio,
|
|
65
|
+
prompt="Natural speaking with subtle hand gestures",
|
|
66
|
+
size="1280x720",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# OmniHuman with fast mode and seed
|
|
70
|
+
with open("portrait.jpg", "rb") as img, open("speech.wav", "rb") as audio:
|
|
71
|
+
gen = client.video.generate(
|
|
72
|
+
model="omnihuman-1.5",
|
|
73
|
+
first_frame=img,
|
|
74
|
+
audio=audio,
|
|
75
|
+
seed=42,
|
|
76
|
+
fast_mode=True,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Motion Transfer: person image + dance video → person performs the dance
|
|
80
|
+
with open("my_avatar.jpg", "rb") as img, open("dance_clip.mp4", "rb") as vid:
|
|
81
|
+
gen = client.video.generate(
|
|
82
|
+
model="motion-transfer-1",
|
|
83
|
+
first_frame=img,
|
|
84
|
+
motion_video=vid,
|
|
85
|
+
size="1280x720",
|
|
86
|
+
)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Image Generation
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
# Text-to-image
|
|
93
|
+
gen = client.images.generate(
|
|
94
|
+
model="nano-banana-pro",
|
|
95
|
+
prompt="A sunset",
|
|
96
|
+
resolution="2K",
|
|
97
|
+
aspect_ratio="16:9",
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Image-to-image (local file)
|
|
101
|
+
with open("source.png", "rb") as f:
|
|
102
|
+
gen = client.images.generate(
|
|
103
|
+
model="nano-banana-pro",
|
|
104
|
+
prompt="Make it look like a watercolor painting",
|
|
105
|
+
reference_images=[f],
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Image-to-image (public URL)
|
|
109
|
+
gen = client.images.generate(
|
|
110
|
+
model="nano-banana-pro",
|
|
111
|
+
prompt="Make it look like a watercolor painting",
|
|
112
|
+
reference_images=["https://example.com/source.png"],
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Inpainting with mask
|
|
116
|
+
with open("photo.jpg", "rb") as img, open("mask.png", "rb") as mask:
|
|
117
|
+
gen = client.images.generate(
|
|
118
|
+
model="nano-banana-pro",
|
|
119
|
+
prompt="Replace the sky with aurora borealis",
|
|
120
|
+
reference_images=[img],
|
|
121
|
+
mask=mask,
|
|
122
|
+
)
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
## Speech Synthesis (TTS)
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
gen = client.speech.generate(
|
|
129
|
+
model="speech-2.8-hd",
|
|
130
|
+
input="欢迎使用 PonyFlash,这是一段语音合成示例。",
|
|
131
|
+
voice="English_Graceful_Lady",
|
|
132
|
+
language="zh-CN",
|
|
133
|
+
)
|
|
134
|
+
print(gen.url)
|
|
135
|
+
|
|
136
|
+
# With emotion and pitch control
|
|
137
|
+
gen = client.speech.generate(
|
|
138
|
+
model="speech-2.8-hd",
|
|
139
|
+
input="今天天气真好,我好开心!",
|
|
140
|
+
voice="English_Insightful_Speaker",
|
|
141
|
+
emotion="happy",
|
|
142
|
+
pitch=2,
|
|
143
|
+
speed=1.1,
|
|
144
|
+
)
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
## Music Generation
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
gen = client.music.generate(
|
|
151
|
+
model="suno-v4.5",
|
|
152
|
+
prompt="A melancholic indie folk ballad with acoustic guitar",
|
|
153
|
+
title="Autumn Leaves",
|
|
154
|
+
duration=180,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Extend from reference audio
|
|
158
|
+
with open("my_song_clip.mp3", "rb") as f:
|
|
159
|
+
gen = client.music.generate(
|
|
160
|
+
model="suno-v4.5",
|
|
161
|
+
prompt="Continue with an energetic chorus",
|
|
162
|
+
reference_audio=f,
|
|
163
|
+
continue_at=60.0,
|
|
164
|
+
)
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
## Downloading Results
|
|
168
|
+
|
|
169
|
+
```python
|
|
170
|
+
import httpx
|
|
171
|
+
|
|
172
|
+
gen = client.images.generate(
|
|
173
|
+
model="nano-banana-pro",
|
|
174
|
+
prompt="A cat wearing sunglasses",
|
|
175
|
+
resolution="2K",
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Download the generated image
|
|
179
|
+
resp = httpx.get(gen.url)
|
|
180
|
+
with open("output.png", "wb") as f:
|
|
181
|
+
f.write(resp.content)
|
|
182
|
+
|
|
183
|
+
# Multiple outputs
|
|
184
|
+
for i, url in enumerate(gen.urls):
|
|
185
|
+
resp = httpx.get(url)
|
|
186
|
+
with open(f"output_{i}.png", "wb") as f:
|
|
187
|
+
f.write(resp.content)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
## File Input Types
|
|
191
|
+
|
|
192
|
+
Every file parameter (`reference_images`, `mask`, `first_frame`, `audio`, `motion_video`, `reference_audio`, ...) accepts:
|
|
193
|
+
|
|
194
|
+
| Input | Example | Behavior |
|
|
195
|
+
|-------|---------|----------|
|
|
196
|
+
| Open file object | `open("photo.jpg", "rb")` | **Recommended.** Auto-uploaded, auto-cleaned. |
|
|
197
|
+
| `Path` object | `Path("photo.jpg")` | Same as above. |
|
|
198
|
+
| `bytes` | `image_bytes` | Same as above. |
|
|
199
|
+
| `(filename, bytes)` tuple | `("photo.jpg", data)` | Same as above. |
|
|
200
|
+
| URL string | `"https://example.com/photo.jpg"` | Passed directly to backend. No upload. |
|
|
201
|
+
| `file_id` string | `"file_abc123"` | Reuses a previously uploaded file. |
|
|
202
|
+
|
|
203
|
+
> `generate()` auto-cleans temp files after the task completes. `submit()` does not — use it when you need `request_id` for manual polling.
|
|
204
|
+
|
|
205
|
+
## Non-blocking: `submit()` + `generations.wait()`
|
|
206
|
+
|
|
207
|
+
```python
|
|
208
|
+
task = client.images.submit(model="nano-banana-pro", prompt="A sunset")
|
|
209
|
+
print(task.request_id) # "req_img_001"
|
|
210
|
+
print(task.estimated_credits) # 20
|
|
211
|
+
|
|
212
|
+
# ... do other work ...
|
|
213
|
+
|
|
214
|
+
gen = client.generations.wait(task.request_id)
|
|
215
|
+
print(gen.url)
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
## Async
|
|
219
|
+
|
|
220
|
+
```python
|
|
221
|
+
from ponyflash import AsyncPonyFlash
|
|
222
|
+
|
|
223
|
+
client = AsyncPonyFlash(api_key="pf_xxx")
|
|
224
|
+
|
|
225
|
+
gen = await client.images.generate(model="nano-banana-pro", prompt="A sunset")
|
|
226
|
+
print(gen.url)
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
## Configuration
|
|
230
|
+
|
|
231
|
+
```python
|
|
232
|
+
client = PonyFlash(
|
|
233
|
+
api_key="pf_xxx", # or PONYFLASH_API_KEY env var
|
|
234
|
+
base_url="https://custom.example.com/v1", # or PONYFLASH_BASE_URL env var
|
|
235
|
+
max_retries=3,
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# Polling timeout is per-resource, not per-client:
|
|
239
|
+
gen = client.video.generate(
|
|
240
|
+
model="video-gen-1",
|
|
241
|
+
prompt="...",
|
|
242
|
+
timeout=900.0, # wait up to 15 min for the task to complete (default: 600s)
|
|
243
|
+
)
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
> **Two kinds of timeout:**
|
|
247
|
+
> - `PonyFlash(timeout=...)` — per-HTTP-request timeout (default 300s). Only affects individual API calls.
|
|
248
|
+
> - `generate(timeout=...)` — polling timeout, how long to wait for the task to finish. Defaults vary by resource: images 120s, video/music 600s, speech 300s.
|
|
249
|
+
|
|
250
|
+
## Advanced: Manual File Management
|
|
251
|
+
|
|
252
|
+
The file API is available for advanced use cases:
|
|
253
|
+
|
|
254
|
+
```python
|
|
255
|
+
from pathlib import Path
|
|
256
|
+
|
|
257
|
+
# Upload explicitly (useful when reusing across multiple requests)
|
|
258
|
+
file_id = client.files.upload(Path("large_video.mp4"))
|
|
259
|
+
|
|
260
|
+
# Use the file_id in multiple requests without re-uploading
|
|
261
|
+
gen1 = client.video.generate(model="video-gen-1", video=file_id, prompt="Style A")
|
|
262
|
+
gen2 = client.video.generate(model="video-gen-1", video=file_id, prompt="Style B")
|
|
263
|
+
|
|
264
|
+
# Clean up when done
|
|
265
|
+
client.files.delete(file_id)
|
|
266
|
+
|
|
267
|
+
# List and inspect files
|
|
268
|
+
files = client.files.list()
|
|
269
|
+
info = client.files.get(file_id)
|
|
270
|
+
print(info.status, info.expires_at)
|
|
271
|
+
```
|