runwayml 3.0.6__tar.gz → 3.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runwayml-3.2.0/.release-please-manifest.json +3 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/CHANGELOG.md +26 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/PKG-INFO +19 -1
- {runwayml-3.0.6 → runwayml-3.2.0}/README.md +18 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/SECURITY.md +2 -2
- {runwayml-3.0.6 → runwayml-3.2.0}/api.md +12 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/pyproject.toml +1 -1
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_client.py +9 -1
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_version.py +1 -1
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/resources/__init__.py +14 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/resources/image_to_video.py +4 -6
- runwayml-3.2.0/src/runwayml/resources/text_to_image.py +262 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/types/__init__.py +2 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/types/image_to_video_create_params.py +3 -4
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/types/organization_retrieve_response.py +21 -0
- runwayml-3.2.0/src/runwayml/types/text_to_image_create_params.py +85 -0
- runwayml-3.2.0/src/runwayml/types/text_to_image_create_response.py +10 -0
- runwayml-3.2.0/tests/api_resources/test_text_to_image.py +130 -0
- runwayml-3.0.6/.release-please-manifest.json +0 -3
- {runwayml-3.0.6 → runwayml-3.2.0}/.gitignore +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/CONTRIBUTING.md +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/LICENSE +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/bin/check-release-environment +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/bin/publish-pypi +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/examples/.keep +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/mypy.ini +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/noxfile.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/release-please-config.json +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/requirements-dev.lock +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/requirements.lock +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/__init__.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_base_client.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_compat.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_constants.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_exceptions.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_files.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_models.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_qs.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_resource.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_response.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_streaming.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_types.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/__init__.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_logs.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_proxy.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_reflection.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_resources_proxy.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_streams.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_sync.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_transform.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_typing.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/_utils/_utils.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/lib/.keep +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/py.typed +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/resources/organization.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/resources/tasks.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/types/image_to_video_create_response.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/src/runwayml/types/task_retrieve_response.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/__init__.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/api_resources/__init__.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/api_resources/test_image_to_video.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/api_resources/test_organization.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/api_resources/test_tasks.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/conftest.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/sample_file.txt +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_client.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_deepcopy.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_extract_files.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_files.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_models.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_qs.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_required_args.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_response.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_streaming.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_transform.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_utils/test_proxy.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/test_utils/test_typing.py +0 -0
- {runwayml-3.0.6 → runwayml-3.2.0}/tests/utils.py +0 -0
@@ -1,5 +1,31 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 3.2.0 (2025-05-29)
|
4
|
+
|
5
|
+
Full Changelog: [v3.1.0...v3.2.0](https://github.com/runwayml/sdk-python/compare/v3.1.0...v3.2.0)
|
6
|
+
|
7
|
+
### Features
|
8
|
+
|
9
|
+
* **api:** Add 720p t2i ratios ([b722686](https://github.com/runwayml/sdk-python/commit/b722686c0e4fa03768ce61380c77fe04c93cfc90))
|
10
|
+
|
11
|
+
|
12
|
+
### Chores
|
13
|
+
|
14
|
+
* **docs:** grammar improvements ([fbf2481](https://github.com/runwayml/sdk-python/commit/fbf2481cd6da85fb5ce2e6fea66cf8f1233a8e2e))
|
15
|
+
|
16
|
+
## 3.1.0 (2025-05-16)
|
17
|
+
|
18
|
+
Full Changelog: [v3.0.6...v3.1.0](https://github.com/runwayml/sdk-python/compare/v3.0.6...v3.1.0)
|
19
|
+
|
20
|
+
### Features
|
21
|
+
|
22
|
+
* **api:** Gen-4 Image (text-to-image) support ([8adcf96](https://github.com/runwayml/sdk-python/commit/8adcf96fa228c8694272175317587bf3c7b9e2fe))
|
23
|
+
|
24
|
+
|
25
|
+
### Chores
|
26
|
+
|
27
|
+
* **ci:** fix installation instructions ([a04350d](https://github.com/runwayml/sdk-python/commit/a04350dd31c9551695c1856a7f6d700d69960807))
|
28
|
+
|
3
29
|
## 3.0.6 (2025-05-15)
|
4
30
|
|
5
31
|
Full Changelog: [v3.0.5...v3.0.6](https://github.com/runwayml/sdk-python/compare/v3.0.5...v3.0.6)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: runwayml
|
3
|
-
Version: 3.0
|
3
|
+
Version: 3.2.0
|
4
4
|
Summary: The official Python library for the runwayml API
|
5
5
|
Project-URL: Homepage, https://github.com/runwayml/sdk-python
|
6
6
|
Project-URL: Repository, https://github.com/runwayml/sdk-python
|
@@ -114,6 +114,24 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
|
|
114
114
|
|
115
115
|
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
|
116
116
|
|
117
|
+
## Nested params
|
118
|
+
|
119
|
+
Nested parameters are dictionaries, typed using `TypedDict`, for example:
|
120
|
+
|
121
|
+
```python
|
122
|
+
from runwayml import RunwayML
|
123
|
+
|
124
|
+
client = RunwayML()
|
125
|
+
|
126
|
+
text_to_image = client.text_to_image.create(
|
127
|
+
model="gen4_image",
|
128
|
+
prompt_text="promptText",
|
129
|
+
ratio="1920:1080",
|
130
|
+
content_moderation={"public_figure_threshold": "auto"},
|
131
|
+
)
|
132
|
+
print(text_to_image.content_moderation)
|
133
|
+
```
|
134
|
+
|
117
135
|
## Handling errors
|
118
136
|
|
119
137
|
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `runwayml.APIConnectionError` is raised.
|
@@ -83,6 +83,24 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
|
|
83
83
|
|
84
84
|
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
|
85
85
|
|
86
|
+
## Nested params
|
87
|
+
|
88
|
+
Nested parameters are dictionaries, typed using `TypedDict`, for example:
|
89
|
+
|
90
|
+
```python
|
91
|
+
from runwayml import RunwayML
|
92
|
+
|
93
|
+
client = RunwayML()
|
94
|
+
|
95
|
+
text_to_image = client.text_to_image.create(
|
96
|
+
model="gen4_image",
|
97
|
+
prompt_text="promptText",
|
98
|
+
ratio="1920:1080",
|
99
|
+
content_moderation={"public_figure_threshold": "auto"},
|
100
|
+
)
|
101
|
+
print(text_to_image.content_moderation)
|
102
|
+
```
|
103
|
+
|
86
104
|
## Handling errors
|
87
105
|
|
88
106
|
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `runwayml.APIConnectionError` is raised.
|
@@ -16,11 +16,11 @@ before making any information public.
|
|
16
16
|
## Reporting Non-SDK Related Security Issues
|
17
17
|
|
18
18
|
If you encounter security issues that are not directly related to SDKs but pertain to the services
|
19
|
-
or products provided by RunwayML please follow the respective company's security reporting guidelines.
|
19
|
+
or products provided by RunwayML, please follow the respective company's security reporting guidelines.
|
20
20
|
|
21
21
|
### RunwayML Terms and Policies
|
22
22
|
|
23
|
-
Please contact dev-feedback@runwayml.com for any questions or concerns regarding security of our services.
|
23
|
+
Please contact dev-feedback@runwayml.com for any questions or concerns regarding the security of our services.
|
24
24
|
|
25
25
|
---
|
26
26
|
|
@@ -23,6 +23,18 @@ Methods:
|
|
23
23
|
|
24
24
|
- <code title="post /v1/image_to_video">client.image_to_video.<a href="./src/runwayml/resources/image_to_video.py">create</a>(\*\*<a href="src/runwayml/types/image_to_video_create_params.py">params</a>) -> <a href="./src/runwayml/types/image_to_video_create_response.py">ImageToVideoCreateResponse</a></code>
|
25
25
|
|
26
|
+
# TextToImage
|
27
|
+
|
28
|
+
Types:
|
29
|
+
|
30
|
+
```python
|
31
|
+
from runwayml.types import TextToImageCreateResponse
|
32
|
+
```
|
33
|
+
|
34
|
+
Methods:
|
35
|
+
|
36
|
+
- <code title="post /v1/text_to_image">client.text_to_image.<a href="./src/runwayml/resources/text_to_image.py">create</a>(\*\*<a href="src/runwayml/types/text_to_image_create_params.py">params</a>) -> <a href="./src/runwayml/types/text_to_image_create_response.py">TextToImageCreateResponse</a></code>
|
37
|
+
|
26
38
|
# Organization
|
27
39
|
|
28
40
|
Types:
|
@@ -21,7 +21,7 @@ from ._types import (
|
|
21
21
|
)
|
22
22
|
from ._utils import is_given, get_async_library
|
23
23
|
from ._version import __version__
|
24
|
-
from .resources import tasks, organization, image_to_video
|
24
|
+
from .resources import tasks, organization, text_to_image, image_to_video
|
25
25
|
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
|
26
26
|
from ._exceptions import RunwayMLError, APIStatusError
|
27
27
|
from ._base_client import (
|
@@ -45,6 +45,7 @@ __all__ = [
|
|
45
45
|
class RunwayML(SyncAPIClient):
|
46
46
|
tasks: tasks.TasksResource
|
47
47
|
image_to_video: image_to_video.ImageToVideoResource
|
48
|
+
text_to_image: text_to_image.TextToImageResource
|
48
49
|
organization: organization.OrganizationResource
|
49
50
|
with_raw_response: RunwayMLWithRawResponse
|
50
51
|
with_streaming_response: RunwayMLWithStreamedResponse
|
@@ -111,6 +112,7 @@ class RunwayML(SyncAPIClient):
|
|
111
112
|
|
112
113
|
self.tasks = tasks.TasksResource(self)
|
113
114
|
self.image_to_video = image_to_video.ImageToVideoResource(self)
|
115
|
+
self.text_to_image = text_to_image.TextToImageResource(self)
|
114
116
|
self.organization = organization.OrganizationResource(self)
|
115
117
|
self.with_raw_response = RunwayMLWithRawResponse(self)
|
116
118
|
self.with_streaming_response = RunwayMLWithStreamedResponse(self)
|
@@ -226,6 +228,7 @@ class RunwayML(SyncAPIClient):
|
|
226
228
|
class AsyncRunwayML(AsyncAPIClient):
|
227
229
|
tasks: tasks.AsyncTasksResource
|
228
230
|
image_to_video: image_to_video.AsyncImageToVideoResource
|
231
|
+
text_to_image: text_to_image.AsyncTextToImageResource
|
229
232
|
organization: organization.AsyncOrganizationResource
|
230
233
|
with_raw_response: AsyncRunwayMLWithRawResponse
|
231
234
|
with_streaming_response: AsyncRunwayMLWithStreamedResponse
|
@@ -292,6 +295,7 @@ class AsyncRunwayML(AsyncAPIClient):
|
|
292
295
|
|
293
296
|
self.tasks = tasks.AsyncTasksResource(self)
|
294
297
|
self.image_to_video = image_to_video.AsyncImageToVideoResource(self)
|
298
|
+
self.text_to_image = text_to_image.AsyncTextToImageResource(self)
|
295
299
|
self.organization = organization.AsyncOrganizationResource(self)
|
296
300
|
self.with_raw_response = AsyncRunwayMLWithRawResponse(self)
|
297
301
|
self.with_streaming_response = AsyncRunwayMLWithStreamedResponse(self)
|
@@ -408,6 +412,7 @@ class RunwayMLWithRawResponse:
|
|
408
412
|
def __init__(self, client: RunwayML) -> None:
|
409
413
|
self.tasks = tasks.TasksResourceWithRawResponse(client.tasks)
|
410
414
|
self.image_to_video = image_to_video.ImageToVideoResourceWithRawResponse(client.image_to_video)
|
415
|
+
self.text_to_image = text_to_image.TextToImageResourceWithRawResponse(client.text_to_image)
|
411
416
|
self.organization = organization.OrganizationResourceWithRawResponse(client.organization)
|
412
417
|
|
413
418
|
|
@@ -415,6 +420,7 @@ class AsyncRunwayMLWithRawResponse:
|
|
415
420
|
def __init__(self, client: AsyncRunwayML) -> None:
|
416
421
|
self.tasks = tasks.AsyncTasksResourceWithRawResponse(client.tasks)
|
417
422
|
self.image_to_video = image_to_video.AsyncImageToVideoResourceWithRawResponse(client.image_to_video)
|
423
|
+
self.text_to_image = text_to_image.AsyncTextToImageResourceWithRawResponse(client.text_to_image)
|
418
424
|
self.organization = organization.AsyncOrganizationResourceWithRawResponse(client.organization)
|
419
425
|
|
420
426
|
|
@@ -422,6 +428,7 @@ class RunwayMLWithStreamedResponse:
|
|
422
428
|
def __init__(self, client: RunwayML) -> None:
|
423
429
|
self.tasks = tasks.TasksResourceWithStreamingResponse(client.tasks)
|
424
430
|
self.image_to_video = image_to_video.ImageToVideoResourceWithStreamingResponse(client.image_to_video)
|
431
|
+
self.text_to_image = text_to_image.TextToImageResourceWithStreamingResponse(client.text_to_image)
|
425
432
|
self.organization = organization.OrganizationResourceWithStreamingResponse(client.organization)
|
426
433
|
|
427
434
|
|
@@ -429,6 +436,7 @@ class AsyncRunwayMLWithStreamedResponse:
|
|
429
436
|
def __init__(self, client: AsyncRunwayML) -> None:
|
430
437
|
self.tasks = tasks.AsyncTasksResourceWithStreamingResponse(client.tasks)
|
431
438
|
self.image_to_video = image_to_video.AsyncImageToVideoResourceWithStreamingResponse(client.image_to_video)
|
439
|
+
self.text_to_image = text_to_image.AsyncTextToImageResourceWithStreamingResponse(client.text_to_image)
|
432
440
|
self.organization = organization.AsyncOrganizationResourceWithStreamingResponse(client.organization)
|
433
441
|
|
434
442
|
|
@@ -16,6 +16,14 @@ from .organization import (
|
|
16
16
|
OrganizationResourceWithStreamingResponse,
|
17
17
|
AsyncOrganizationResourceWithStreamingResponse,
|
18
18
|
)
|
19
|
+
from .text_to_image import (
|
20
|
+
TextToImageResource,
|
21
|
+
AsyncTextToImageResource,
|
22
|
+
TextToImageResourceWithRawResponse,
|
23
|
+
AsyncTextToImageResourceWithRawResponse,
|
24
|
+
TextToImageResourceWithStreamingResponse,
|
25
|
+
AsyncTextToImageResourceWithStreamingResponse,
|
26
|
+
)
|
19
27
|
from .image_to_video import (
|
20
28
|
ImageToVideoResource,
|
21
29
|
AsyncImageToVideoResource,
|
@@ -38,6 +46,12 @@ __all__ = [
|
|
38
46
|
"AsyncImageToVideoResourceWithRawResponse",
|
39
47
|
"ImageToVideoResourceWithStreamingResponse",
|
40
48
|
"AsyncImageToVideoResourceWithStreamingResponse",
|
49
|
+
"TextToImageResource",
|
50
|
+
"AsyncTextToImageResource",
|
51
|
+
"TextToImageResourceWithRawResponse",
|
52
|
+
"AsyncTextToImageResourceWithRawResponse",
|
53
|
+
"TextToImageResourceWithStreamingResponse",
|
54
|
+
"AsyncTextToImageResourceWithStreamingResponse",
|
41
55
|
"OrganizationResource",
|
42
56
|
"AsyncOrganizationResource",
|
43
57
|
"OrganizationResourceWithRawResponse",
|
@@ -88,9 +88,8 @@ class ImageToVideoResource(SyncAPIResource):
|
|
88
88
|
|
89
89
|
duration: The number of seconds of duration for the output video.
|
90
90
|
|
91
|
-
prompt_text: A non-empty string up to 1000 UTF-16 code
|
92
|
-
|
93
|
-
should appear in the output.
|
91
|
+
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
92
|
+
should describe in detail what should appear in the output.
|
94
93
|
|
95
94
|
seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
|
96
95
|
get different results for the same other request parameters. Using the same seed
|
@@ -188,9 +187,8 @@ class AsyncImageToVideoResource(AsyncAPIResource):
|
|
188
187
|
|
189
188
|
duration: The number of seconds of duration for the output video.
|
190
189
|
|
191
|
-
prompt_text: A non-empty string up to 1000 UTF-16 code
|
192
|
-
|
193
|
-
should appear in the output.
|
190
|
+
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
191
|
+
should describe in detail what should appear in the output.
|
194
192
|
|
195
193
|
seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
|
196
194
|
get different results for the same other request parameters. Using the same seed
|
@@ -0,0 +1,262 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from typing import Iterable
|
6
|
+
from typing_extensions import Literal
|
7
|
+
|
8
|
+
import httpx
|
9
|
+
|
10
|
+
from ..types import text_to_image_create_params
|
11
|
+
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
12
|
+
from .._utils import maybe_transform, async_maybe_transform
|
13
|
+
from .._compat import cached_property
|
14
|
+
from .._resource import SyncAPIResource, AsyncAPIResource
|
15
|
+
from .._response import (
|
16
|
+
to_raw_response_wrapper,
|
17
|
+
to_streamed_response_wrapper,
|
18
|
+
async_to_raw_response_wrapper,
|
19
|
+
async_to_streamed_response_wrapper,
|
20
|
+
)
|
21
|
+
from .._base_client import make_request_options
|
22
|
+
from ..types.text_to_image_create_response import TextToImageCreateResponse
|
23
|
+
|
24
|
+
__all__ = ["TextToImageResource", "AsyncTextToImageResource"]
|
25
|
+
|
26
|
+
|
27
|
+
class TextToImageResource(SyncAPIResource):
|
28
|
+
@cached_property
|
29
|
+
def with_raw_response(self) -> TextToImageResourceWithRawResponse:
|
30
|
+
"""
|
31
|
+
This property can be used as a prefix for any HTTP method call to return
|
32
|
+
the raw response object instead of the parsed content.
|
33
|
+
|
34
|
+
For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
|
35
|
+
"""
|
36
|
+
return TextToImageResourceWithRawResponse(self)
|
37
|
+
|
38
|
+
@cached_property
|
39
|
+
def with_streaming_response(self) -> TextToImageResourceWithStreamingResponse:
|
40
|
+
"""
|
41
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
42
|
+
|
43
|
+
For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
|
44
|
+
"""
|
45
|
+
return TextToImageResourceWithStreamingResponse(self)
|
46
|
+
|
47
|
+
def create(
|
48
|
+
self,
|
49
|
+
*,
|
50
|
+
model: Literal["gen4_image"],
|
51
|
+
prompt_text: str,
|
52
|
+
ratio: Literal[
|
53
|
+
"1920:1080",
|
54
|
+
"1080:1920",
|
55
|
+
"1024:1024",
|
56
|
+
"1360:768",
|
57
|
+
"1080:1080",
|
58
|
+
"1168:880",
|
59
|
+
"1440:1080",
|
60
|
+
"1080:1440",
|
61
|
+
"1808:768",
|
62
|
+
"2112:912",
|
63
|
+
"1280:720",
|
64
|
+
"720:1280",
|
65
|
+
"720:720",
|
66
|
+
"960:720",
|
67
|
+
"720:960",
|
68
|
+
"1680:720",
|
69
|
+
],
|
70
|
+
content_moderation: text_to_image_create_params.ContentModeration | NotGiven = NOT_GIVEN,
|
71
|
+
reference_images: Iterable[text_to_image_create_params.ReferenceImage] | NotGiven = NOT_GIVEN,
|
72
|
+
seed: int | NotGiven = NOT_GIVEN,
|
73
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
74
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
75
|
+
extra_headers: Headers | None = None,
|
76
|
+
extra_query: Query | None = None,
|
77
|
+
extra_body: Body | None = None,
|
78
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
79
|
+
) -> TextToImageCreateResponse:
|
80
|
+
"""
|
81
|
+
This endpoint will start a new task to generate images from text.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
model: The model variant to use.
|
85
|
+
|
86
|
+
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
87
|
+
should describe in detail what should appear in the output.
|
88
|
+
|
89
|
+
ratio: The resolution of the output image(s).
|
90
|
+
|
91
|
+
content_moderation: Settings that affect the behavior of the content moderation system.
|
92
|
+
|
93
|
+
reference_images: An array of images to be used as references for the generated image output. Up
|
94
|
+
to three reference images can be provided.
|
95
|
+
|
96
|
+
seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
|
97
|
+
get different results for the same other request parameters. Using the same seed
|
98
|
+
integer for an identical request will produce similar results.
|
99
|
+
|
100
|
+
extra_headers: Send extra headers
|
101
|
+
|
102
|
+
extra_query: Add additional query parameters to the request
|
103
|
+
|
104
|
+
extra_body: Add additional JSON properties to the request
|
105
|
+
|
106
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
107
|
+
"""
|
108
|
+
return self._post(
|
109
|
+
"/v1/text_to_image",
|
110
|
+
body=maybe_transform(
|
111
|
+
{
|
112
|
+
"model": model,
|
113
|
+
"prompt_text": prompt_text,
|
114
|
+
"ratio": ratio,
|
115
|
+
"content_moderation": content_moderation,
|
116
|
+
"reference_images": reference_images,
|
117
|
+
"seed": seed,
|
118
|
+
},
|
119
|
+
text_to_image_create_params.TextToImageCreateParams,
|
120
|
+
),
|
121
|
+
options=make_request_options(
|
122
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
123
|
+
),
|
124
|
+
cast_to=TextToImageCreateResponse,
|
125
|
+
)
|
126
|
+
|
127
|
+
|
128
|
+
class AsyncTextToImageResource(AsyncAPIResource):
|
129
|
+
@cached_property
|
130
|
+
def with_raw_response(self) -> AsyncTextToImageResourceWithRawResponse:
|
131
|
+
"""
|
132
|
+
This property can be used as a prefix for any HTTP method call to return
|
133
|
+
the raw response object instead of the parsed content.
|
134
|
+
|
135
|
+
For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
|
136
|
+
"""
|
137
|
+
return AsyncTextToImageResourceWithRawResponse(self)
|
138
|
+
|
139
|
+
@cached_property
|
140
|
+
def with_streaming_response(self) -> AsyncTextToImageResourceWithStreamingResponse:
|
141
|
+
"""
|
142
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
143
|
+
|
144
|
+
For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
|
145
|
+
"""
|
146
|
+
return AsyncTextToImageResourceWithStreamingResponse(self)
|
147
|
+
|
148
|
+
async def create(
|
149
|
+
self,
|
150
|
+
*,
|
151
|
+
model: Literal["gen4_image"],
|
152
|
+
prompt_text: str,
|
153
|
+
ratio: Literal[
|
154
|
+
"1920:1080",
|
155
|
+
"1080:1920",
|
156
|
+
"1024:1024",
|
157
|
+
"1360:768",
|
158
|
+
"1080:1080",
|
159
|
+
"1168:880",
|
160
|
+
"1440:1080",
|
161
|
+
"1080:1440",
|
162
|
+
"1808:768",
|
163
|
+
"2112:912",
|
164
|
+
"1280:720",
|
165
|
+
"720:1280",
|
166
|
+
"720:720",
|
167
|
+
"960:720",
|
168
|
+
"720:960",
|
169
|
+
"1680:720",
|
170
|
+
],
|
171
|
+
content_moderation: text_to_image_create_params.ContentModeration | NotGiven = NOT_GIVEN,
|
172
|
+
reference_images: Iterable[text_to_image_create_params.ReferenceImage] | NotGiven = NOT_GIVEN,
|
173
|
+
seed: int | NotGiven = NOT_GIVEN,
|
174
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
175
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
176
|
+
extra_headers: Headers | None = None,
|
177
|
+
extra_query: Query | None = None,
|
178
|
+
extra_body: Body | None = None,
|
179
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
180
|
+
) -> TextToImageCreateResponse:
|
181
|
+
"""
|
182
|
+
This endpoint will start a new task to generate images from text.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
model: The model variant to use.
|
186
|
+
|
187
|
+
prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
|
188
|
+
should describe in detail what should appear in the output.
|
189
|
+
|
190
|
+
ratio: The resolution of the output image(s).
|
191
|
+
|
192
|
+
content_moderation: Settings that affect the behavior of the content moderation system.
|
193
|
+
|
194
|
+
reference_images: An array of images to be used as references for the generated image output. Up
|
195
|
+
to three reference images can be provided.
|
196
|
+
|
197
|
+
seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
|
198
|
+
get different results for the same other request parameters. Using the same seed
|
199
|
+
integer for an identical request will produce similar results.
|
200
|
+
|
201
|
+
extra_headers: Send extra headers
|
202
|
+
|
203
|
+
extra_query: Add additional query parameters to the request
|
204
|
+
|
205
|
+
extra_body: Add additional JSON properties to the request
|
206
|
+
|
207
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
208
|
+
"""
|
209
|
+
return await self._post(
|
210
|
+
"/v1/text_to_image",
|
211
|
+
body=await async_maybe_transform(
|
212
|
+
{
|
213
|
+
"model": model,
|
214
|
+
"prompt_text": prompt_text,
|
215
|
+
"ratio": ratio,
|
216
|
+
"content_moderation": content_moderation,
|
217
|
+
"reference_images": reference_images,
|
218
|
+
"seed": seed,
|
219
|
+
},
|
220
|
+
text_to_image_create_params.TextToImageCreateParams,
|
221
|
+
),
|
222
|
+
options=make_request_options(
|
223
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
224
|
+
),
|
225
|
+
cast_to=TextToImageCreateResponse,
|
226
|
+
)
|
227
|
+
|
228
|
+
|
229
|
+
class TextToImageResourceWithRawResponse:
|
230
|
+
def __init__(self, text_to_image: TextToImageResource) -> None:
|
231
|
+
self._text_to_image = text_to_image
|
232
|
+
|
233
|
+
self.create = to_raw_response_wrapper(
|
234
|
+
text_to_image.create,
|
235
|
+
)
|
236
|
+
|
237
|
+
|
238
|
+
class AsyncTextToImageResourceWithRawResponse:
|
239
|
+
def __init__(self, text_to_image: AsyncTextToImageResource) -> None:
|
240
|
+
self._text_to_image = text_to_image
|
241
|
+
|
242
|
+
self.create = async_to_raw_response_wrapper(
|
243
|
+
text_to_image.create,
|
244
|
+
)
|
245
|
+
|
246
|
+
|
247
|
+
class TextToImageResourceWithStreamingResponse:
|
248
|
+
def __init__(self, text_to_image: TextToImageResource) -> None:
|
249
|
+
self._text_to_image = text_to_image
|
250
|
+
|
251
|
+
self.create = to_streamed_response_wrapper(
|
252
|
+
text_to_image.create,
|
253
|
+
)
|
254
|
+
|
255
|
+
|
256
|
+
class AsyncTextToImageResourceWithStreamingResponse:
|
257
|
+
def __init__(self, text_to_image: AsyncTextToImageResource) -> None:
|
258
|
+
self._text_to_image = text_to_image
|
259
|
+
|
260
|
+
self.create = async_to_streamed_response_wrapper(
|
261
|
+
text_to_image.create,
|
262
|
+
)
|
@@ -3,6 +3,8 @@
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
5
|
from .task_retrieve_response import TaskRetrieveResponse as TaskRetrieveResponse
|
6
|
+
from .text_to_image_create_params import TextToImageCreateParams as TextToImageCreateParams
|
6
7
|
from .image_to_video_create_params import ImageToVideoCreateParams as ImageToVideoCreateParams
|
8
|
+
from .text_to_image_create_response import TextToImageCreateResponse as TextToImageCreateResponse
|
7
9
|
from .image_to_video_create_response import ImageToVideoCreateResponse as ImageToVideoCreateResponse
|
8
10
|
from .organization_retrieve_response import OrganizationRetrieveResponse as OrganizationRetrieveResponse
|
@@ -45,10 +45,9 @@ class ImageToVideoCreateParams(TypedDict, total=False):
|
|
45
45
|
"""The number of seconds of duration for the output video."""
|
46
46
|
|
47
47
|
prompt_text: Annotated[str, PropertyInfo(alias="promptText")]
|
48
|
-
"""
|
49
|
-
|
50
|
-
|
51
|
-
should appear in the output.
|
48
|
+
"""A non-empty string up to 1000 characters (measured in UTF-16 code units).
|
49
|
+
|
50
|
+
This should describe in detail what should appear in the output.
|
52
51
|
"""
|
53
52
|
|
54
53
|
seed: int
|
@@ -11,10 +11,12 @@ __all__ = [
|
|
11
11
|
"Tier",
|
12
12
|
"TierModels",
|
13
13
|
"TierModelsGen3aTurbo",
|
14
|
+
"TierModelsGen4Image",
|
14
15
|
"TierModelsGen4Turbo",
|
15
16
|
"Usage",
|
16
17
|
"UsageModels",
|
17
18
|
"UsageModelsGen3aTurbo",
|
19
|
+
"UsageModelsGen4Image",
|
18
20
|
"UsageModelsGen4Turbo",
|
19
21
|
]
|
20
22
|
|
@@ -27,6 +29,14 @@ class TierModelsGen3aTurbo(BaseModel):
|
|
27
29
|
"""The maximum number of generations that can be created each day for this model."""
|
28
30
|
|
29
31
|
|
32
|
+
class TierModelsGen4Image(BaseModel):
|
33
|
+
max_concurrent_generations: int = FieldInfo(alias="maxConcurrentGenerations")
|
34
|
+
"""The maximum number of generations that can be run concurrently for this model."""
|
35
|
+
|
36
|
+
max_daily_generations: int = FieldInfo(alias="maxDailyGenerations")
|
37
|
+
"""The maximum number of generations that can be created each day for this model."""
|
38
|
+
|
39
|
+
|
30
40
|
class TierModelsGen4Turbo(BaseModel):
|
31
41
|
max_concurrent_generations: int = FieldInfo(alias="maxConcurrentGenerations")
|
32
42
|
"""The maximum number of generations that can be run concurrently for this model."""
|
@@ -39,6 +49,9 @@ class TierModels(BaseModel):
|
|
39
49
|
gen3a_turbo: Optional[TierModelsGen3aTurbo] = None
|
40
50
|
"""Limits associated with the gen3a_turbo model."""
|
41
51
|
|
52
|
+
gen4_image: Optional[TierModelsGen4Image] = None
|
53
|
+
"""Limits associated with the gen4_image model."""
|
54
|
+
|
42
55
|
gen4_turbo: Optional[TierModelsGen4Turbo] = None
|
43
56
|
"""Limits associated with the gen4_turbo model."""
|
44
57
|
|
@@ -56,6 +69,11 @@ class UsageModelsGen3aTurbo(BaseModel):
|
|
56
69
|
"""The number of generations that have been run for this model in the past day."""
|
57
70
|
|
58
71
|
|
72
|
+
class UsageModelsGen4Image(BaseModel):
|
73
|
+
daily_generations: int = FieldInfo(alias="dailyGenerations")
|
74
|
+
"""The number of generations that have been run for this model in the past day."""
|
75
|
+
|
76
|
+
|
59
77
|
class UsageModelsGen4Turbo(BaseModel):
|
60
78
|
daily_generations: int = FieldInfo(alias="dailyGenerations")
|
61
79
|
"""The number of generations that have been run for this model in the past day."""
|
@@ -65,6 +83,9 @@ class UsageModels(BaseModel):
|
|
65
83
|
gen3a_turbo: Optional[UsageModelsGen3aTurbo] = None
|
66
84
|
"""Usage data for the gen3a_turbo model."""
|
67
85
|
|
86
|
+
gen4_image: Optional[UsageModelsGen4Image] = None
|
87
|
+
"""Usage data for the gen4_image model."""
|
88
|
+
|
68
89
|
gen4_turbo: Optional[UsageModelsGen4Turbo] = None
|
69
90
|
"""Usage data for the gen4_turbo model."""
|
70
91
|
|
@@ -0,0 +1,85 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from typing import Iterable
|
6
|
+
from typing_extensions import Literal, Required, Annotated, TypedDict
|
7
|
+
|
8
|
+
from .._utils import PropertyInfo
|
9
|
+
|
10
|
+
__all__ = ["TextToImageCreateParams", "ContentModeration", "ReferenceImage"]
|
11
|
+
|
12
|
+
|
13
|
+
class TextToImageCreateParams(TypedDict, total=False):
|
14
|
+
model: Required[Literal["gen4_image"]]
|
15
|
+
"""The model variant to use."""
|
16
|
+
|
17
|
+
prompt_text: Required[Annotated[str, PropertyInfo(alias="promptText")]]
|
18
|
+
"""A non-empty string up to 1000 characters (measured in UTF-16 code units).
|
19
|
+
|
20
|
+
This should describe in detail what should appear in the output.
|
21
|
+
"""
|
22
|
+
|
23
|
+
ratio: Required[
|
24
|
+
Literal[
|
25
|
+
"1920:1080",
|
26
|
+
"1080:1920",
|
27
|
+
"1024:1024",
|
28
|
+
"1360:768",
|
29
|
+
"1080:1080",
|
30
|
+
"1168:880",
|
31
|
+
"1440:1080",
|
32
|
+
"1080:1440",
|
33
|
+
"1808:768",
|
34
|
+
"2112:912",
|
35
|
+
"1280:720",
|
36
|
+
"720:1280",
|
37
|
+
"720:720",
|
38
|
+
"960:720",
|
39
|
+
"720:960",
|
40
|
+
"1680:720",
|
41
|
+
]
|
42
|
+
]
|
43
|
+
"""The resolution of the output image(s)."""
|
44
|
+
|
45
|
+
content_moderation: Annotated[ContentModeration, PropertyInfo(alias="contentModeration")]
|
46
|
+
"""Settings that affect the behavior of the content moderation system."""
|
47
|
+
|
48
|
+
reference_images: Annotated[Iterable[ReferenceImage], PropertyInfo(alias="referenceImages")]
|
49
|
+
"""An array of images to be used as references for the generated image output.
|
50
|
+
|
51
|
+
Up to three reference images can be provided.
|
52
|
+
"""
|
53
|
+
|
54
|
+
seed: int
|
55
|
+
"""If unspecified, a random number is chosen.
|
56
|
+
|
57
|
+
Varying the seed integer is a way to get different results for the same other
|
58
|
+
request parameters. Using the same seed integer for an identical request will
|
59
|
+
produce similar results.
|
60
|
+
"""
|
61
|
+
|
62
|
+
|
63
|
+
class ContentModeration(TypedDict, total=False):
|
64
|
+
public_figure_threshold: Annotated[Literal["auto", "low"], PropertyInfo(alias="publicFigureThreshold")]
|
65
|
+
"""
|
66
|
+
When set to `low`, the content moderation system will be less strict about
|
67
|
+
preventing generations that include recognizable public figures.
|
68
|
+
"""
|
69
|
+
|
70
|
+
|
71
|
+
class ReferenceImage(TypedDict, total=False):
|
72
|
+
uri: Required[str]
|
73
|
+
"""
|
74
|
+
A HTTPS URL or data URI containing an encoded image to be used as reference for
|
75
|
+
the generated output image. See [our docs](/assets/inputs#images) on image
|
76
|
+
inputs for more information.
|
77
|
+
"""
|
78
|
+
|
79
|
+
tag: str
|
80
|
+
"""A name used to refer to the image reference, from 3 to 16 characters in length.
|
81
|
+
|
82
|
+
Tags must be alphanumeric (plus underscores) and start with a letter. You can
|
83
|
+
refer to the reference image's tag in the prompt text with at-mention syntax:
|
84
|
+
`@tag`. Tags are case-sensitive.
|
85
|
+
"""
|
@@ -0,0 +1,10 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from .._models import BaseModel
|
4
|
+
|
5
|
+
__all__ = ["TextToImageCreateResponse"]
|
6
|
+
|
7
|
+
|
8
|
+
class TextToImageCreateResponse(BaseModel):
|
9
|
+
id: str
|
10
|
+
"""The ID of the newly created task."""
|
@@ -0,0 +1,130 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import os
|
6
|
+
from typing import Any, cast
|
7
|
+
|
8
|
+
import pytest
|
9
|
+
|
10
|
+
from runwayml import RunwayML, AsyncRunwayML
|
11
|
+
from tests.utils import assert_matches_type
|
12
|
+
from runwayml.types import TextToImageCreateResponse
|
13
|
+
|
14
|
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
|
15
|
+
|
16
|
+
|
17
|
+
class TestTextToImage:
|
18
|
+
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
|
19
|
+
|
20
|
+
@parametrize
|
21
|
+
def test_method_create(self, client: RunwayML) -> None:
|
22
|
+
text_to_image = client.text_to_image.create(
|
23
|
+
model="gen4_image",
|
24
|
+
prompt_text="promptText",
|
25
|
+
ratio="1920:1080",
|
26
|
+
)
|
27
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
28
|
+
|
29
|
+
@parametrize
|
30
|
+
def test_method_create_with_all_params(self, client: RunwayML) -> None:
|
31
|
+
text_to_image = client.text_to_image.create(
|
32
|
+
model="gen4_image",
|
33
|
+
prompt_text="promptText",
|
34
|
+
ratio="1920:1080",
|
35
|
+
content_moderation={"public_figure_threshold": "auto"},
|
36
|
+
reference_images=[
|
37
|
+
{
|
38
|
+
"uri": "https://example.com",
|
39
|
+
"tag": "tag",
|
40
|
+
}
|
41
|
+
],
|
42
|
+
seed=0,
|
43
|
+
)
|
44
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
45
|
+
|
46
|
+
@parametrize
|
47
|
+
def test_raw_response_create(self, client: RunwayML) -> None:
|
48
|
+
response = client.text_to_image.with_raw_response.create(
|
49
|
+
model="gen4_image",
|
50
|
+
prompt_text="promptText",
|
51
|
+
ratio="1920:1080",
|
52
|
+
)
|
53
|
+
|
54
|
+
assert response.is_closed is True
|
55
|
+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
|
56
|
+
text_to_image = response.parse()
|
57
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
58
|
+
|
59
|
+
@parametrize
|
60
|
+
def test_streaming_response_create(self, client: RunwayML) -> None:
|
61
|
+
with client.text_to_image.with_streaming_response.create(
|
62
|
+
model="gen4_image",
|
63
|
+
prompt_text="promptText",
|
64
|
+
ratio="1920:1080",
|
65
|
+
) as response:
|
66
|
+
assert not response.is_closed
|
67
|
+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
|
68
|
+
|
69
|
+
text_to_image = response.parse()
|
70
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
71
|
+
|
72
|
+
assert cast(Any, response.is_closed) is True
|
73
|
+
|
74
|
+
|
75
|
+
class TestAsyncTextToImage:
|
76
|
+
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
|
77
|
+
|
78
|
+
@parametrize
|
79
|
+
async def test_method_create(self, async_client: AsyncRunwayML) -> None:
|
80
|
+
text_to_image = await async_client.text_to_image.create(
|
81
|
+
model="gen4_image",
|
82
|
+
prompt_text="promptText",
|
83
|
+
ratio="1920:1080",
|
84
|
+
)
|
85
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
86
|
+
|
87
|
+
@parametrize
|
88
|
+
async def test_method_create_with_all_params(self, async_client: AsyncRunwayML) -> None:
|
89
|
+
text_to_image = await async_client.text_to_image.create(
|
90
|
+
model="gen4_image",
|
91
|
+
prompt_text="promptText",
|
92
|
+
ratio="1920:1080",
|
93
|
+
content_moderation={"public_figure_threshold": "auto"},
|
94
|
+
reference_images=[
|
95
|
+
{
|
96
|
+
"uri": "https://example.com",
|
97
|
+
"tag": "tag",
|
98
|
+
}
|
99
|
+
],
|
100
|
+
seed=0,
|
101
|
+
)
|
102
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
103
|
+
|
104
|
+
@parametrize
|
105
|
+
async def test_raw_response_create(self, async_client: AsyncRunwayML) -> None:
|
106
|
+
response = await async_client.text_to_image.with_raw_response.create(
|
107
|
+
model="gen4_image",
|
108
|
+
prompt_text="promptText",
|
109
|
+
ratio="1920:1080",
|
110
|
+
)
|
111
|
+
|
112
|
+
assert response.is_closed is True
|
113
|
+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
|
114
|
+
text_to_image = await response.parse()
|
115
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
116
|
+
|
117
|
+
@parametrize
|
118
|
+
async def test_streaming_response_create(self, async_client: AsyncRunwayML) -> None:
|
119
|
+
async with async_client.text_to_image.with_streaming_response.create(
|
120
|
+
model="gen4_image",
|
121
|
+
prompt_text="promptText",
|
122
|
+
ratio="1920:1080",
|
123
|
+
) as response:
|
124
|
+
assert not response.is_closed
|
125
|
+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
|
126
|
+
|
127
|
+
text_to_image = await response.parse()
|
128
|
+
assert_matches_type(TextToImageCreateResponse, text_to_image, path=["response"])
|
129
|
+
|
130
|
+
assert cast(Any, response.is_closed) is True
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|