runwayml 3.12.1__tar.gz → 3.14.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runwayml-3.14.0/.release-please-manifest.json +3 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/CHANGELOG.md +18 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/PKG-INFO +1 -1
- {runwayml-3.12.1 → runwayml-3.14.0}/api.md +12 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/pyproject.toml +1 -1
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_client.py +9 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_version.py +1 -1
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/__init__.py +14 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/image_to_video.py +24 -6
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/text_to_image.py +76 -4
- runwayml-3.14.0/src/runwayml/resources/text_to_video.py +223 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/__init__.py +2 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/image_to_video_create_params.py +17 -4
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/organization_retrieve_response.py +42 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/organization_retrieve_usage_response.py +22 -2
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/text_to_image_create_params.py +41 -3
- runwayml-3.14.0/src/runwayml/types/text_to_video_create_params.py +34 -0
- runwayml-3.14.0/src/runwayml/types/text_to_video_create_response.py +10 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_image_to_video.py +8 -8
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_text_to_image.py +8 -8
- runwayml-3.14.0/tests/api_resources/test_text_to_video.py +126 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_client.py +10 -10
- runwayml-3.12.1/.release-please-manifest.json +0 -3
- {runwayml-3.12.1 → runwayml-3.14.0}/.gitignore +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/CONTRIBUTING.md +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/LICENSE +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/README.md +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/SECURITY.md +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/bin/check-release-environment +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/bin/publish-pypi +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/examples/.keep +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/examples/generate_image.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/noxfile.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/release-please-config.json +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/requirements-dev.lock +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/requirements.lock +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/__init__.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_base_client.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_compat.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_constants.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_exceptions.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_files.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_models.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_qs.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_resource.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_streaming.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_types.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/__init__.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_compat.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_datetime_parse.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_logs.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_proxy.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_reflection.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_resources_proxy.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_streams.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_sync.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_transform.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_typing.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/_utils/_utils.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/lib/.keep +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/lib/polling.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/py.typed +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/character_performance.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/organization.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/tasks.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/video_to_video.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/resources/video_upscale.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/character_performance_create_params.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/character_performance_create_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/image_to_video_create_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/organization_retrieve_usage_params.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/task_retrieve_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/text_to_image_create_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/video_to_video_create_params.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/video_to_video_create_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/video_upscale_create_params.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/src/runwayml/types/video_upscale_create_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/__init__.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/__init__.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_character_performance.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_organization.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_tasks.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_video_to_video.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/api_resources/test_video_upscale.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/conftest.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/sample_file.txt +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_deepcopy.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_extract_files.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_files.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_models.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_qs.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_required_args.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_response.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_streaming.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_transform.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_utils/test_datetime_parse.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_utils/test_proxy.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/test_utils/test_typing.py +0 -0
- {runwayml-3.12.1 → runwayml-3.14.0}/tests/utils.py +0 -0
@@ -1,5 +1,23 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 3.14.0 (2025-09-11)
|
4
|
+
|
5
|
+
Full Changelog: [v3.13.0...v3.14.0](https://github.com/runwayml/sdk-python/compare/v3.13.0...v3.14.0)
|
6
|
+
|
7
|
+
### Features
|
8
|
+
|
9
|
+
* **api:** Add Gemini 2.5 Flash Image to t2i ([3cb044a](https://github.com/runwayml/sdk-python/commit/3cb044af287c1da1b0a648fdf91074f9a34abd6a))
|
10
|
+
|
11
|
+
## 3.13.0 (2025-09-10)
|
12
|
+
|
13
|
+
Full Changelog: [v3.12.1...v3.13.0](https://github.com/runwayml/sdk-python/compare/v3.12.1...v3.13.0)
|
14
|
+
|
15
|
+
### Features
|
16
|
+
|
17
|
+
* **api:** Update t2v parameters ([7163db8](https://github.com/runwayml/sdk-python/commit/7163db8841a5dca89b546e8d445105aeffe07d10))
|
18
|
+
* **api:** Veo3 integration ([37e7224](https://github.com/runwayml/sdk-python/commit/37e7224b1e0de5450b69bb42a1dc6a9039e435af))
|
19
|
+
* **client:** Make t2v waitable ([d21c4e4](https://github.com/runwayml/sdk-python/commit/d21c4e4da2c8760aceaf07579051ab1e486c08b2))
|
20
|
+
|
3
21
|
## 3.12.1 (2025-09-06)
|
4
22
|
|
5
23
|
Full Changelog: [v3.12.0...v3.12.1](https://github.com/runwayml/sdk-python/compare/v3.12.0...v3.12.1)
|
@@ -35,6 +35,18 @@ Methods:
|
|
35
35
|
|
36
36
|
- <code title="post /v1/video_to_video">client.video_to_video.<a href="./src/runwayml/resources/video_to_video.py">create</a>(\*\*<a href="src/runwayml/types/video_to_video_create_params.py">params</a>) -> <a href="./src/runwayml/types/video_to_video_create_response.py">VideoToVideoCreateResponse</a></code>
|
37
37
|
|
38
|
+
# TextToVideo
|
39
|
+
|
40
|
+
Types:
|
41
|
+
|
42
|
+
```python
|
43
|
+
from runwayml.types import TextToVideoCreateResponse
|
44
|
+
```
|
45
|
+
|
46
|
+
Methods:
|
47
|
+
|
48
|
+
- <code title="post /v1/text_to_video">client.text_to_video.<a href="./src/runwayml/resources/text_to_video.py">create</a>(\*\*<a href="src/runwayml/types/text_to_video_create_params.py">params</a>) -> <a href="./src/runwayml/types/text_to_video_create_response.py">TextToVideoCreateResponse</a></code>
|
49
|
+
|
38
50
|
# TextToImage
|
39
51
|
|
40
52
|
Types:
|
@@ -25,6 +25,7 @@ from .resources import (
|
|
25
25
|
tasks,
|
26
26
|
organization,
|
27
27
|
text_to_image,
|
28
|
+
text_to_video,
|
28
29
|
video_upscale,
|
29
30
|
image_to_video,
|
30
31
|
video_to_video,
|
@@ -54,6 +55,7 @@ class RunwayML(SyncAPIClient):
|
|
54
55
|
tasks: tasks.TasksResource
|
55
56
|
image_to_video: image_to_video.ImageToVideoResource
|
56
57
|
video_to_video: video_to_video.VideoToVideoResource
|
58
|
+
text_to_video: text_to_video.TextToVideoResource
|
57
59
|
text_to_image: text_to_image.TextToImageResource
|
58
60
|
video_upscale: video_upscale.VideoUpscaleResource
|
59
61
|
character_performance: character_performance.CharacterPerformanceResource
|
@@ -124,6 +126,7 @@ class RunwayML(SyncAPIClient):
|
|
124
126
|
self.tasks = tasks.TasksResource(self)
|
125
127
|
self.image_to_video = image_to_video.ImageToVideoResource(self)
|
126
128
|
self.video_to_video = video_to_video.VideoToVideoResource(self)
|
129
|
+
self.text_to_video = text_to_video.TextToVideoResource(self)
|
127
130
|
self.text_to_image = text_to_image.TextToImageResource(self)
|
128
131
|
self.video_upscale = video_upscale.VideoUpscaleResource(self)
|
129
132
|
self.character_performance = character_performance.CharacterPerformanceResource(self)
|
@@ -243,6 +246,7 @@ class AsyncRunwayML(AsyncAPIClient):
|
|
243
246
|
tasks: tasks.AsyncTasksResource
|
244
247
|
image_to_video: image_to_video.AsyncImageToVideoResource
|
245
248
|
video_to_video: video_to_video.AsyncVideoToVideoResource
|
249
|
+
text_to_video: text_to_video.AsyncTextToVideoResource
|
246
250
|
text_to_image: text_to_image.AsyncTextToImageResource
|
247
251
|
video_upscale: video_upscale.AsyncVideoUpscaleResource
|
248
252
|
character_performance: character_performance.AsyncCharacterPerformanceResource
|
@@ -313,6 +317,7 @@ class AsyncRunwayML(AsyncAPIClient):
|
|
313
317
|
self.tasks = tasks.AsyncTasksResource(self)
|
314
318
|
self.image_to_video = image_to_video.AsyncImageToVideoResource(self)
|
315
319
|
self.video_to_video = video_to_video.AsyncVideoToVideoResource(self)
|
320
|
+
self.text_to_video = text_to_video.AsyncTextToVideoResource(self)
|
316
321
|
self.text_to_image = text_to_image.AsyncTextToImageResource(self)
|
317
322
|
self.video_upscale = video_upscale.AsyncVideoUpscaleResource(self)
|
318
323
|
self.character_performance = character_performance.AsyncCharacterPerformanceResource(self)
|
@@ -433,6 +438,7 @@ class RunwayMLWithRawResponse:
|
|
433
438
|
self.tasks = tasks.TasksResourceWithRawResponse(client.tasks)
|
434
439
|
self.image_to_video = image_to_video.ImageToVideoResourceWithRawResponse(client.image_to_video)
|
435
440
|
self.video_to_video = video_to_video.VideoToVideoResourceWithRawResponse(client.video_to_video)
|
441
|
+
self.text_to_video = text_to_video.TextToVideoResourceWithRawResponse(client.text_to_video)
|
436
442
|
self.text_to_image = text_to_image.TextToImageResourceWithRawResponse(client.text_to_image)
|
437
443
|
self.video_upscale = video_upscale.VideoUpscaleResourceWithRawResponse(client.video_upscale)
|
438
444
|
self.character_performance = character_performance.CharacterPerformanceResourceWithRawResponse(
|
@@ -446,6 +452,7 @@ class AsyncRunwayMLWithRawResponse:
|
|
446
452
|
self.tasks = tasks.AsyncTasksResourceWithRawResponse(client.tasks)
|
447
453
|
self.image_to_video = image_to_video.AsyncImageToVideoResourceWithRawResponse(client.image_to_video)
|
448
454
|
self.video_to_video = video_to_video.AsyncVideoToVideoResourceWithRawResponse(client.video_to_video)
|
455
|
+
self.text_to_video = text_to_video.AsyncTextToVideoResourceWithRawResponse(client.text_to_video)
|
449
456
|
self.text_to_image = text_to_image.AsyncTextToImageResourceWithRawResponse(client.text_to_image)
|
450
457
|
self.video_upscale = video_upscale.AsyncVideoUpscaleResourceWithRawResponse(client.video_upscale)
|
451
458
|
self.character_performance = character_performance.AsyncCharacterPerformanceResourceWithRawResponse(
|
@@ -459,6 +466,7 @@ class RunwayMLWithStreamedResponse:
|
|
459
466
|
self.tasks = tasks.TasksResourceWithStreamingResponse(client.tasks)
|
460
467
|
self.image_to_video = image_to_video.ImageToVideoResourceWithStreamingResponse(client.image_to_video)
|
461
468
|
self.video_to_video = video_to_video.VideoToVideoResourceWithStreamingResponse(client.video_to_video)
|
469
|
+
self.text_to_video = text_to_video.TextToVideoResourceWithStreamingResponse(client.text_to_video)
|
462
470
|
self.text_to_image = text_to_image.TextToImageResourceWithStreamingResponse(client.text_to_image)
|
463
471
|
self.video_upscale = video_upscale.VideoUpscaleResourceWithStreamingResponse(client.video_upscale)
|
464
472
|
self.character_performance = character_performance.CharacterPerformanceResourceWithStreamingResponse(
|
@@ -472,6 +480,7 @@ class AsyncRunwayMLWithStreamedResponse:
|
|
472
480
|
self.tasks = tasks.AsyncTasksResourceWithStreamingResponse(client.tasks)
|
473
481
|
self.image_to_video = image_to_video.AsyncImageToVideoResourceWithStreamingResponse(client.image_to_video)
|
474
482
|
self.video_to_video = video_to_video.AsyncVideoToVideoResourceWithStreamingResponse(client.video_to_video)
|
483
|
+
self.text_to_video = text_to_video.AsyncTextToVideoResourceWithStreamingResponse(client.text_to_video)
|
475
484
|
self.text_to_image = text_to_image.AsyncTextToImageResourceWithStreamingResponse(client.text_to_image)
|
476
485
|
self.video_upscale = video_upscale.AsyncVideoUpscaleResourceWithStreamingResponse(client.video_upscale)
|
477
486
|
self.character_performance = character_performance.AsyncCharacterPerformanceResourceWithStreamingResponse(
|
@@ -24,6 +24,14 @@ from .text_to_image import (
|
|
24
24
|
TextToImageResourceWithStreamingResponse,
|
25
25
|
AsyncTextToImageResourceWithStreamingResponse,
|
26
26
|
)
|
27
|
+
from .text_to_video import (
|
28
|
+
TextToVideoResource,
|
29
|
+
AsyncTextToVideoResource,
|
30
|
+
TextToVideoResourceWithRawResponse,
|
31
|
+
AsyncTextToVideoResourceWithRawResponse,
|
32
|
+
TextToVideoResourceWithStreamingResponse,
|
33
|
+
AsyncTextToVideoResourceWithStreamingResponse,
|
34
|
+
)
|
27
35
|
from .video_upscale import (
|
28
36
|
VideoUpscaleResource,
|
29
37
|
AsyncVideoUpscaleResource,
|
@@ -76,6 +84,12 @@ __all__ = [
|
|
76
84
|
"AsyncVideoToVideoResourceWithRawResponse",
|
77
85
|
"VideoToVideoResourceWithStreamingResponse",
|
78
86
|
"AsyncVideoToVideoResourceWithStreamingResponse",
|
87
|
+
"TextToVideoResource",
|
88
|
+
"AsyncTextToVideoResource",
|
89
|
+
"TextToVideoResourceWithRawResponse",
|
90
|
+
"AsyncTextToVideoResourceWithRawResponse",
|
91
|
+
"TextToVideoResourceWithStreamingResponse",
|
92
|
+
"AsyncTextToVideoResourceWithStreamingResponse",
|
79
93
|
"TextToImageResource",
|
80
94
|
"AsyncTextToImageResource",
|
81
95
|
"TextToImageResourceWithRawResponse",
|
@@ -53,11 +53,11 @@ class ImageToVideoResource(SyncAPIResource):
|
|
53
53
|
def create(
|
54
54
|
self,
|
55
55
|
*,
|
56
|
-
model: Literal["gen3a_turbo", "
|
56
|
+
model: Literal["gen4_turbo", "gen3a_turbo", "veo3"],
|
57
57
|
prompt_image: Union[str, Iterable[image_to_video_create_params.PromptImagePromptImage]],
|
58
58
|
ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"],
|
59
59
|
content_moderation: image_to_video_create_params.ContentModeration | NotGiven = NOT_GIVEN,
|
60
|
-
duration: Literal[5, 10] | NotGiven = NOT_GIVEN,
|
60
|
+
duration: Literal[5, 8, 10] | NotGiven = NOT_GIVEN,
|
61
61
|
prompt_text: str | NotGiven = NOT_GIVEN,
|
62
62
|
seed: int | NotGiven = NOT_GIVEN,
|
63
63
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
@@ -93,9 +93,18 @@ class ImageToVideoResource(SyncAPIResource):
|
|
93
93
|
- `1280:768`
|
94
94
|
- `768:1280`
|
95
95
|
|
96
|
+
`veo3` supports the following values:
|
97
|
+
|
98
|
+
- `1280:720`
|
99
|
+
- `720:1280`
|
100
|
+
|
96
101
|
content_moderation: Settings that affect the behavior of the content moderation system.
|
97
102
|
|
98
|
-
|
103
|
+
This field is allowed only for the following model variants: `gen4_turbo`,
|
104
|
+
`gen3a_turbo`
|
105
|
+
|
106
|
+
duration: The number of seconds of duration for the output video. `veo3` requires a
|
107
|
+
duration of 8. gen4_turbo, gen3a_turbo must specify a duration of 5 or 10.
|
99
108
|
|
100
109
|
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
101
110
|
should describe in detail what should appear in the output.
|
@@ -156,11 +165,11 @@ class AsyncImageToVideoResource(AsyncAPIResource):
|
|
156
165
|
async def create(
|
157
166
|
self,
|
158
167
|
*,
|
159
|
-
model: Literal["gen3a_turbo", "
|
168
|
+
model: Literal["gen4_turbo", "gen3a_turbo", "veo3"],
|
160
169
|
prompt_image: Union[str, Iterable[image_to_video_create_params.PromptImagePromptImage]],
|
161
170
|
ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"],
|
162
171
|
content_moderation: image_to_video_create_params.ContentModeration | NotGiven = NOT_GIVEN,
|
163
|
-
duration: Literal[5, 10] | NotGiven = NOT_GIVEN,
|
172
|
+
duration: Literal[5, 8, 10] | NotGiven = NOT_GIVEN,
|
164
173
|
prompt_text: str | NotGiven = NOT_GIVEN,
|
165
174
|
seed: int | NotGiven = NOT_GIVEN,
|
166
175
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
@@ -196,9 +205,18 @@ class AsyncImageToVideoResource(AsyncAPIResource):
|
|
196
205
|
- `1280:768`
|
197
206
|
- `768:1280`
|
198
207
|
|
208
|
+
`veo3` supports the following values:
|
209
|
+
|
210
|
+
- `1280:720`
|
211
|
+
- `720:1280`
|
212
|
+
|
199
213
|
content_moderation: Settings that affect the behavior of the content moderation system.
|
200
214
|
|
201
|
-
|
215
|
+
This field is allowed only for the following model variants: `gen4_turbo`,
|
216
|
+
`gen3a_turbo`
|
217
|
+
|
218
|
+
duration: The number of seconds of duration for the output video. `veo3` requires a
|
219
|
+
duration of 8. gen4_turbo, gen3a_turbo must specify a duration of 5 or 10.
|
202
220
|
|
203
221
|
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
204
222
|
should describe in detail what should appear in the output.
|
@@ -53,7 +53,7 @@ class TextToImageResource(SyncAPIResource):
|
|
53
53
|
def create(
|
54
54
|
self,
|
55
55
|
*,
|
56
|
-
model: Literal["gen4_image", "
|
56
|
+
model: Literal["gen4_image_turbo", "gen4_image", "gemini_2.5_flash"],
|
57
57
|
prompt_text: str,
|
58
58
|
ratio: Literal[
|
59
59
|
"1920:1080",
|
@@ -72,6 +72,11 @@ class TextToImageResource(SyncAPIResource):
|
|
72
72
|
"960:720",
|
73
73
|
"720:960",
|
74
74
|
"1680:720",
|
75
|
+
"1344:768",
|
76
|
+
"768:1344",
|
77
|
+
"1184:864",
|
78
|
+
"864:1184",
|
79
|
+
"1536:672",
|
75
80
|
],
|
76
81
|
content_moderation: text_to_image_create_params.ContentModeration | NotGiven = NOT_GIVEN,
|
77
82
|
reference_images: Iterable[text_to_image_create_params.ReferenceImage] | NotGiven = NOT_GIVEN,
|
@@ -92,10 +97,41 @@ class TextToImageResource(SyncAPIResource):
|
|
92
97
|
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
93
98
|
should describe in detail what should appear in the output.
|
94
99
|
|
95
|
-
ratio: The resolution of the output image
|
100
|
+
ratio: The resolution of the output image.
|
101
|
+
|
102
|
+
`gen4_image_turbo`, `gen4_image` support the following values:
|
103
|
+
|
104
|
+
- `1920:1080`
|
105
|
+
- `1080:1920`
|
106
|
+
- `1024:1024`
|
107
|
+
- `1360:768`
|
108
|
+
- `1080:1080`
|
109
|
+
- `1168:880`
|
110
|
+
- `1440:1080`
|
111
|
+
- `1080:1440`
|
112
|
+
- `1808:768`
|
113
|
+
- `2112:912`
|
114
|
+
- `1280:720`
|
115
|
+
- `720:1280`
|
116
|
+
- `720:720`
|
117
|
+
- `960:720`
|
118
|
+
- `720:960`
|
119
|
+
- `1680:720`
|
120
|
+
|
121
|
+
`gemini_2.5_flash` supports the following values:
|
122
|
+
|
123
|
+
- `1344:768`
|
124
|
+
- `768:1344`
|
125
|
+
- `1024:1024`
|
126
|
+
- `1184:864`
|
127
|
+
- `864:1184`
|
128
|
+
- `1536:672`
|
96
129
|
|
97
130
|
content_moderation: Settings that affect the behavior of the content moderation system.
|
98
131
|
|
132
|
+
This field is allowed only for the following model variants: `gen4_image_turbo`,
|
133
|
+
`gen4_image`
|
134
|
+
|
99
135
|
reference_images: An array of up to three images to be used as references for the generated image
|
100
136
|
output.
|
101
137
|
|
@@ -156,7 +192,7 @@ class AsyncTextToImageResource(AsyncAPIResource):
|
|
156
192
|
async def create(
|
157
193
|
self,
|
158
194
|
*,
|
159
|
-
model: Literal["gen4_image", "
|
195
|
+
model: Literal["gen4_image_turbo", "gen4_image", "gemini_2.5_flash"],
|
160
196
|
prompt_text: str,
|
161
197
|
ratio: Literal[
|
162
198
|
"1920:1080",
|
@@ -175,6 +211,11 @@ class AsyncTextToImageResource(AsyncAPIResource):
|
|
175
211
|
"960:720",
|
176
212
|
"720:960",
|
177
213
|
"1680:720",
|
214
|
+
"1344:768",
|
215
|
+
"768:1344",
|
216
|
+
"1184:864",
|
217
|
+
"864:1184",
|
218
|
+
"1536:672",
|
178
219
|
],
|
179
220
|
content_moderation: text_to_image_create_params.ContentModeration | NotGiven = NOT_GIVEN,
|
180
221
|
reference_images: Iterable[text_to_image_create_params.ReferenceImage] | NotGiven = NOT_GIVEN,
|
@@ -195,10 +236,41 @@ class AsyncTextToImageResource(AsyncAPIResource):
|
|
195
236
|
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
196
237
|
should describe in detail what should appear in the output.
|
197
238
|
|
198
|
-
ratio: The resolution of the output image
|
239
|
+
ratio: The resolution of the output image.
|
240
|
+
|
241
|
+
`gen4_image_turbo`, `gen4_image` support the following values:
|
242
|
+
|
243
|
+
- `1920:1080`
|
244
|
+
- `1080:1920`
|
245
|
+
- `1024:1024`
|
246
|
+
- `1360:768`
|
247
|
+
- `1080:1080`
|
248
|
+
- `1168:880`
|
249
|
+
- `1440:1080`
|
250
|
+
- `1080:1440`
|
251
|
+
- `1808:768`
|
252
|
+
- `2112:912`
|
253
|
+
- `1280:720`
|
254
|
+
- `720:1280`
|
255
|
+
- `720:720`
|
256
|
+
- `960:720`
|
257
|
+
- `720:960`
|
258
|
+
- `1680:720`
|
259
|
+
|
260
|
+
`gemini_2.5_flash` supports the following values:
|
261
|
+
|
262
|
+
- `1344:768`
|
263
|
+
- `768:1344`
|
264
|
+
- `1024:1024`
|
265
|
+
- `1184:864`
|
266
|
+
- `864:1184`
|
267
|
+
- `1536:672`
|
199
268
|
|
200
269
|
content_moderation: Settings that affect the behavior of the content moderation system.
|
201
270
|
|
271
|
+
This field is allowed only for the following model variants: `gen4_image_turbo`,
|
272
|
+
`gen4_image`
|
273
|
+
|
202
274
|
reference_images: An array of up to three images to be used as references for the generated image
|
203
275
|
output.
|
204
276
|
|
@@ -0,0 +1,223 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from typing_extensions import Literal
|
6
|
+
|
7
|
+
import httpx
|
8
|
+
|
9
|
+
from ..types import text_to_video_create_params
|
10
|
+
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
11
|
+
from .._utils import maybe_transform, async_maybe_transform
|
12
|
+
from .._compat import cached_property
|
13
|
+
from .._resource import SyncAPIResource, AsyncAPIResource
|
14
|
+
from .._response import (
|
15
|
+
to_raw_response_wrapper,
|
16
|
+
to_streamed_response_wrapper,
|
17
|
+
async_to_raw_response_wrapper,
|
18
|
+
async_to_streamed_response_wrapper,
|
19
|
+
)
|
20
|
+
from ..lib.polling import (
|
21
|
+
NewTaskCreatedResponse,
|
22
|
+
AsyncNewTaskCreatedResponse,
|
23
|
+
create_waitable_resource,
|
24
|
+
create_async_waitable_resource,
|
25
|
+
)
|
26
|
+
from .._base_client import make_request_options
|
27
|
+
from ..types.text_to_video_create_response import TextToVideoCreateResponse
|
28
|
+
|
29
|
+
__all__ = ["TextToVideoResource", "AsyncTextToVideoResource"]
|
30
|
+
|
31
|
+
|
32
|
+
class TextToVideoResource(SyncAPIResource):
|
33
|
+
@cached_property
|
34
|
+
def with_raw_response(self) -> TextToVideoResourceWithRawResponse:
|
35
|
+
"""
|
36
|
+
This property can be used as a prefix for any HTTP method call to return
|
37
|
+
the raw response object instead of the parsed content.
|
38
|
+
|
39
|
+
For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
|
40
|
+
"""
|
41
|
+
return TextToVideoResourceWithRawResponse(self)
|
42
|
+
|
43
|
+
@cached_property
|
44
|
+
def with_streaming_response(self) -> TextToVideoResourceWithStreamingResponse:
|
45
|
+
"""
|
46
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
47
|
+
|
48
|
+
For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
|
49
|
+
"""
|
50
|
+
return TextToVideoResourceWithStreamingResponse(self)
|
51
|
+
|
52
|
+
def create(
|
53
|
+
self,
|
54
|
+
*,
|
55
|
+
duration: Literal[8],
|
56
|
+
model: Literal["veo3"],
|
57
|
+
prompt_text: str,
|
58
|
+
ratio: Literal["1280:720", "720:1280"],
|
59
|
+
seed: int | NotGiven = NOT_GIVEN,
|
60
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
61
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
62
|
+
extra_headers: Headers | None = None,
|
63
|
+
extra_query: Query | None = None,
|
64
|
+
extra_body: Body | None = None,
|
65
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
66
|
+
) -> NewTaskCreatedResponse:
|
67
|
+
"""
|
68
|
+
This endpoint will start a new task to generate a video from a text prompt.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
duration: Veo 3 videos must be 8 seconds long.
|
72
|
+
|
73
|
+
model: The model variant to use.
|
74
|
+
|
75
|
+
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
76
|
+
should describe in detail what should appear in the output.
|
77
|
+
|
78
|
+
ratio: A string representing the aspect ratio of the output video.
|
79
|
+
|
80
|
+
seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
|
81
|
+
get different results for the same other request parameters. Using the same seed
|
82
|
+
integer for an identical request will produce similar results.
|
83
|
+
|
84
|
+
extra_headers: Send extra headers
|
85
|
+
|
86
|
+
extra_query: Add additional query parameters to the request
|
87
|
+
|
88
|
+
extra_body: Add additional JSON properties to the request
|
89
|
+
|
90
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
91
|
+
"""
|
92
|
+
return self._post(
|
93
|
+
"/v1/text_to_video",
|
94
|
+
body=maybe_transform(
|
95
|
+
{
|
96
|
+
"duration": duration,
|
97
|
+
"model": model,
|
98
|
+
"prompt_text": prompt_text,
|
99
|
+
"ratio": ratio,
|
100
|
+
"seed": seed,
|
101
|
+
},
|
102
|
+
text_to_video_create_params.TextToVideoCreateParams,
|
103
|
+
),
|
104
|
+
options=make_request_options(
|
105
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
106
|
+
),
|
107
|
+
cast_to=create_waitable_resource(TextToVideoCreateResponse, self._client),
|
108
|
+
)
|
109
|
+
|
110
|
+
|
111
|
+
class AsyncTextToVideoResource(AsyncAPIResource):
|
112
|
+
@cached_property
|
113
|
+
def with_raw_response(self) -> AsyncTextToVideoResourceWithRawResponse:
|
114
|
+
"""
|
115
|
+
This property can be used as a prefix for any HTTP method call to return
|
116
|
+
the raw response object instead of the parsed content.
|
117
|
+
|
118
|
+
For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
|
119
|
+
"""
|
120
|
+
return AsyncTextToVideoResourceWithRawResponse(self)
|
121
|
+
|
122
|
+
@cached_property
|
123
|
+
def with_streaming_response(self) -> AsyncTextToVideoResourceWithStreamingResponse:
|
124
|
+
"""
|
125
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
126
|
+
|
127
|
+
For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
|
128
|
+
"""
|
129
|
+
return AsyncTextToVideoResourceWithStreamingResponse(self)
|
130
|
+
|
131
|
+
async def create(
|
132
|
+
self,
|
133
|
+
*,
|
134
|
+
duration: Literal[8],
|
135
|
+
model: Literal["veo3"],
|
136
|
+
prompt_text: str,
|
137
|
+
ratio: Literal["1280:720", "720:1280"],
|
138
|
+
seed: int | NotGiven = NOT_GIVEN,
|
139
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
140
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
141
|
+
extra_headers: Headers | None = None,
|
142
|
+
extra_query: Query | None = None,
|
143
|
+
extra_body: Body | None = None,
|
144
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
145
|
+
) -> AsyncNewTaskCreatedResponse:
|
146
|
+
"""
|
147
|
+
This endpoint will start a new task to generate a video from a text prompt.
|
148
|
+
|
149
|
+
Args:
|
150
|
+
duration: Veo 3 videos must be 8 seconds long.
|
151
|
+
|
152
|
+
model: The model variant to use.
|
153
|
+
|
154
|
+
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
155
|
+
should describe in detail what should appear in the output.
|
156
|
+
|
157
|
+
ratio: A string representing the aspect ratio of the output video.
|
158
|
+
|
159
|
+
seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
|
160
|
+
get different results for the same other request parameters. Using the same seed
|
161
|
+
integer for an identical request will produce similar results.
|
162
|
+
|
163
|
+
extra_headers: Send extra headers
|
164
|
+
|
165
|
+
extra_query: Add additional query parameters to the request
|
166
|
+
|
167
|
+
extra_body: Add additional JSON properties to the request
|
168
|
+
|
169
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
170
|
+
"""
|
171
|
+
return await self._post(
|
172
|
+
"/v1/text_to_video",
|
173
|
+
body=await async_maybe_transform(
|
174
|
+
{
|
175
|
+
"duration": duration,
|
176
|
+
"model": model,
|
177
|
+
"prompt_text": prompt_text,
|
178
|
+
"ratio": ratio,
|
179
|
+
"seed": seed,
|
180
|
+
},
|
181
|
+
text_to_video_create_params.TextToVideoCreateParams,
|
182
|
+
),
|
183
|
+
options=make_request_options(
|
184
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
185
|
+
),
|
186
|
+
cast_to=create_async_waitable_resource(TextToVideoCreateResponse, self._client),
|
187
|
+
)
|
188
|
+
|
189
|
+
|
190
|
+
class TextToVideoResourceWithRawResponse:
|
191
|
+
def __init__(self, text_to_video: TextToVideoResource) -> None:
|
192
|
+
self._text_to_video = text_to_video
|
193
|
+
|
194
|
+
self.create = to_raw_response_wrapper(
|
195
|
+
text_to_video.create,
|
196
|
+
)
|
197
|
+
|
198
|
+
|
199
|
+
class AsyncTextToVideoResourceWithRawResponse:
|
200
|
+
def __init__(self, text_to_video: AsyncTextToVideoResource) -> None:
|
201
|
+
self._text_to_video = text_to_video
|
202
|
+
|
203
|
+
self.create = async_to_raw_response_wrapper(
|
204
|
+
text_to_video.create,
|
205
|
+
)
|
206
|
+
|
207
|
+
|
208
|
+
class TextToVideoResourceWithStreamingResponse:
|
209
|
+
def __init__(self, text_to_video: TextToVideoResource) -> None:
|
210
|
+
self._text_to_video = text_to_video
|
211
|
+
|
212
|
+
self.create = to_streamed_response_wrapper(
|
213
|
+
text_to_video.create,
|
214
|
+
)
|
215
|
+
|
216
|
+
|
217
|
+
class AsyncTextToVideoResourceWithStreamingResponse:
|
218
|
+
def __init__(self, text_to_video: AsyncTextToVideoResource) -> None:
|
219
|
+
self._text_to_video = text_to_video
|
220
|
+
|
221
|
+
self.create = async_to_streamed_response_wrapper(
|
222
|
+
text_to_video.create,
|
223
|
+
)
|
@@ -4,10 +4,12 @@ from __future__ import annotations
|
|
4
4
|
|
5
5
|
from .task_retrieve_response import TaskRetrieveResponse as TaskRetrieveResponse
|
6
6
|
from .text_to_image_create_params import TextToImageCreateParams as TextToImageCreateParams
|
7
|
+
from .text_to_video_create_params import TextToVideoCreateParams as TextToVideoCreateParams
|
7
8
|
from .video_upscale_create_params import VideoUpscaleCreateParams as VideoUpscaleCreateParams
|
8
9
|
from .image_to_video_create_params import ImageToVideoCreateParams as ImageToVideoCreateParams
|
9
10
|
from .video_to_video_create_params import VideoToVideoCreateParams as VideoToVideoCreateParams
|
10
11
|
from .text_to_image_create_response import TextToImageCreateResponse as TextToImageCreateResponse
|
12
|
+
from .text_to_video_create_response import TextToVideoCreateResponse as TextToVideoCreateResponse
|
11
13
|
from .video_upscale_create_response import VideoUpscaleCreateResponse as VideoUpscaleCreateResponse
|
12
14
|
from .image_to_video_create_response import ImageToVideoCreateResponse as ImageToVideoCreateResponse
|
13
15
|
from .organization_retrieve_response import OrganizationRetrieveResponse as OrganizationRetrieveResponse
|
@@ -11,7 +11,7 @@ __all__ = ["ImageToVideoCreateParams", "PromptImagePromptImage", "ContentModerat
|
|
11
11
|
|
12
12
|
|
13
13
|
class ImageToVideoCreateParams(TypedDict, total=False):
|
14
|
-
model: Required[Literal["gen3a_turbo", "
|
14
|
+
model: Required[Literal["gen4_turbo", "gen3a_turbo", "veo3"]]
|
15
15
|
"""The model variant to use."""
|
16
16
|
|
17
17
|
prompt_image: Required[Annotated[Union[str, Iterable[PromptImagePromptImage]], PropertyInfo(alias="promptImage")]]
|
@@ -39,13 +39,26 @@ class ImageToVideoCreateParams(TypedDict, total=False):
|
|
39
39
|
|
40
40
|
- `1280:768`
|
41
41
|
- `768:1280`
|
42
|
+
|
43
|
+
`veo3` supports the following values:
|
44
|
+
|
45
|
+
- `1280:720`
|
46
|
+
- `720:1280`
|
42
47
|
"""
|
43
48
|
|
44
49
|
content_moderation: Annotated[ContentModeration, PropertyInfo(alias="contentModeration")]
|
45
|
-
"""Settings that affect the behavior of the content moderation system.
|
50
|
+
"""Settings that affect the behavior of the content moderation system.
|
46
51
|
|
47
|
-
|
48
|
-
|
52
|
+
This field is allowed only for the following model variants: `gen4_turbo`,
|
53
|
+
`gen3a_turbo`
|
54
|
+
"""
|
55
|
+
|
56
|
+
duration: Literal[5, 8, 10]
|
57
|
+
"""The number of seconds of duration for the output video.
|
58
|
+
|
59
|
+
`veo3` requires a duration of 8. gen4_turbo, gen3a_turbo must specify a duration
|
60
|
+
of 5 or 10.
|
61
|
+
"""
|
49
62
|
|
50
63
|
prompt_text: Annotated[str, PropertyInfo(alias="promptText")]
|
51
64
|
"""A non-empty string up to 1000 characters (measured in UTF-16 code units).
|