runwayml 3.12.1__py3-none-any.whl → 3.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
runwayml/_client.py CHANGED
@@ -25,6 +25,7 @@ from .resources import (
25
25
  tasks,
26
26
  organization,
27
27
  text_to_image,
28
+ text_to_video,
28
29
  video_upscale,
29
30
  image_to_video,
30
31
  video_to_video,
@@ -54,6 +55,7 @@ class RunwayML(SyncAPIClient):
54
55
  tasks: tasks.TasksResource
55
56
  image_to_video: image_to_video.ImageToVideoResource
56
57
  video_to_video: video_to_video.VideoToVideoResource
58
+ text_to_video: text_to_video.TextToVideoResource
57
59
  text_to_image: text_to_image.TextToImageResource
58
60
  video_upscale: video_upscale.VideoUpscaleResource
59
61
  character_performance: character_performance.CharacterPerformanceResource
@@ -124,6 +126,7 @@ class RunwayML(SyncAPIClient):
124
126
  self.tasks = tasks.TasksResource(self)
125
127
  self.image_to_video = image_to_video.ImageToVideoResource(self)
126
128
  self.video_to_video = video_to_video.VideoToVideoResource(self)
129
+ self.text_to_video = text_to_video.TextToVideoResource(self)
127
130
  self.text_to_image = text_to_image.TextToImageResource(self)
128
131
  self.video_upscale = video_upscale.VideoUpscaleResource(self)
129
132
  self.character_performance = character_performance.CharacterPerformanceResource(self)
@@ -243,6 +246,7 @@ class AsyncRunwayML(AsyncAPIClient):
243
246
  tasks: tasks.AsyncTasksResource
244
247
  image_to_video: image_to_video.AsyncImageToVideoResource
245
248
  video_to_video: video_to_video.AsyncVideoToVideoResource
249
+ text_to_video: text_to_video.AsyncTextToVideoResource
246
250
  text_to_image: text_to_image.AsyncTextToImageResource
247
251
  video_upscale: video_upscale.AsyncVideoUpscaleResource
248
252
  character_performance: character_performance.AsyncCharacterPerformanceResource
@@ -313,6 +317,7 @@ class AsyncRunwayML(AsyncAPIClient):
313
317
  self.tasks = tasks.AsyncTasksResource(self)
314
318
  self.image_to_video = image_to_video.AsyncImageToVideoResource(self)
315
319
  self.video_to_video = video_to_video.AsyncVideoToVideoResource(self)
320
+ self.text_to_video = text_to_video.AsyncTextToVideoResource(self)
316
321
  self.text_to_image = text_to_image.AsyncTextToImageResource(self)
317
322
  self.video_upscale = video_upscale.AsyncVideoUpscaleResource(self)
318
323
  self.character_performance = character_performance.AsyncCharacterPerformanceResource(self)
@@ -433,6 +438,7 @@ class RunwayMLWithRawResponse:
433
438
  self.tasks = tasks.TasksResourceWithRawResponse(client.tasks)
434
439
  self.image_to_video = image_to_video.ImageToVideoResourceWithRawResponse(client.image_to_video)
435
440
  self.video_to_video = video_to_video.VideoToVideoResourceWithRawResponse(client.video_to_video)
441
+ self.text_to_video = text_to_video.TextToVideoResourceWithRawResponse(client.text_to_video)
436
442
  self.text_to_image = text_to_image.TextToImageResourceWithRawResponse(client.text_to_image)
437
443
  self.video_upscale = video_upscale.VideoUpscaleResourceWithRawResponse(client.video_upscale)
438
444
  self.character_performance = character_performance.CharacterPerformanceResourceWithRawResponse(
@@ -446,6 +452,7 @@ class AsyncRunwayMLWithRawResponse:
446
452
  self.tasks = tasks.AsyncTasksResourceWithRawResponse(client.tasks)
447
453
  self.image_to_video = image_to_video.AsyncImageToVideoResourceWithRawResponse(client.image_to_video)
448
454
  self.video_to_video = video_to_video.AsyncVideoToVideoResourceWithRawResponse(client.video_to_video)
455
+ self.text_to_video = text_to_video.AsyncTextToVideoResourceWithRawResponse(client.text_to_video)
449
456
  self.text_to_image = text_to_image.AsyncTextToImageResourceWithRawResponse(client.text_to_image)
450
457
  self.video_upscale = video_upscale.AsyncVideoUpscaleResourceWithRawResponse(client.video_upscale)
451
458
  self.character_performance = character_performance.AsyncCharacterPerformanceResourceWithRawResponse(
@@ -459,6 +466,7 @@ class RunwayMLWithStreamedResponse:
459
466
  self.tasks = tasks.TasksResourceWithStreamingResponse(client.tasks)
460
467
  self.image_to_video = image_to_video.ImageToVideoResourceWithStreamingResponse(client.image_to_video)
461
468
  self.video_to_video = video_to_video.VideoToVideoResourceWithStreamingResponse(client.video_to_video)
469
+ self.text_to_video = text_to_video.TextToVideoResourceWithStreamingResponse(client.text_to_video)
462
470
  self.text_to_image = text_to_image.TextToImageResourceWithStreamingResponse(client.text_to_image)
463
471
  self.video_upscale = video_upscale.VideoUpscaleResourceWithStreamingResponse(client.video_upscale)
464
472
  self.character_performance = character_performance.CharacterPerformanceResourceWithStreamingResponse(
@@ -472,6 +480,7 @@ class AsyncRunwayMLWithStreamedResponse:
472
480
  self.tasks = tasks.AsyncTasksResourceWithStreamingResponse(client.tasks)
473
481
  self.image_to_video = image_to_video.AsyncImageToVideoResourceWithStreamingResponse(client.image_to_video)
474
482
  self.video_to_video = video_to_video.AsyncVideoToVideoResourceWithStreamingResponse(client.video_to_video)
483
+ self.text_to_video = text_to_video.AsyncTextToVideoResourceWithStreamingResponse(client.text_to_video)
475
484
  self.text_to_image = text_to_image.AsyncTextToImageResourceWithStreamingResponse(client.text_to_image)
476
485
  self.video_upscale = video_upscale.AsyncVideoUpscaleResourceWithStreamingResponse(client.video_upscale)
477
486
  self.character_performance = character_performance.AsyncCharacterPerformanceResourceWithStreamingResponse(
runwayml/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "runwayml"
4
- __version__ = "3.12.1" # x-release-please-version
4
+ __version__ = "3.14.0" # x-release-please-version
@@ -24,6 +24,14 @@ from .text_to_image import (
24
24
  TextToImageResourceWithStreamingResponse,
25
25
  AsyncTextToImageResourceWithStreamingResponse,
26
26
  )
27
+ from .text_to_video import (
28
+ TextToVideoResource,
29
+ AsyncTextToVideoResource,
30
+ TextToVideoResourceWithRawResponse,
31
+ AsyncTextToVideoResourceWithRawResponse,
32
+ TextToVideoResourceWithStreamingResponse,
33
+ AsyncTextToVideoResourceWithStreamingResponse,
34
+ )
27
35
  from .video_upscale import (
28
36
  VideoUpscaleResource,
29
37
  AsyncVideoUpscaleResource,
@@ -76,6 +84,12 @@ __all__ = [
76
84
  "AsyncVideoToVideoResourceWithRawResponse",
77
85
  "VideoToVideoResourceWithStreamingResponse",
78
86
  "AsyncVideoToVideoResourceWithStreamingResponse",
87
+ "TextToVideoResource",
88
+ "AsyncTextToVideoResource",
89
+ "TextToVideoResourceWithRawResponse",
90
+ "AsyncTextToVideoResourceWithRawResponse",
91
+ "TextToVideoResourceWithStreamingResponse",
92
+ "AsyncTextToVideoResourceWithStreamingResponse",
79
93
  "TextToImageResource",
80
94
  "AsyncTextToImageResource",
81
95
  "TextToImageResourceWithRawResponse",
@@ -53,11 +53,11 @@ class ImageToVideoResource(SyncAPIResource):
53
53
  def create(
54
54
  self,
55
55
  *,
56
- model: Literal["gen3a_turbo", "gen4_turbo"],
56
+ model: Literal["gen4_turbo", "gen3a_turbo", "veo3"],
57
57
  prompt_image: Union[str, Iterable[image_to_video_create_params.PromptImagePromptImage]],
58
58
  ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"],
59
59
  content_moderation: image_to_video_create_params.ContentModeration | NotGiven = NOT_GIVEN,
60
- duration: Literal[5, 10] | NotGiven = NOT_GIVEN,
60
+ duration: Literal[5, 8, 10] | NotGiven = NOT_GIVEN,
61
61
  prompt_text: str | NotGiven = NOT_GIVEN,
62
62
  seed: int | NotGiven = NOT_GIVEN,
63
63
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -93,9 +93,18 @@ class ImageToVideoResource(SyncAPIResource):
93
93
  - `1280:768`
94
94
  - `768:1280`
95
95
 
96
+ `veo3` supports the following values:
97
+
98
+ - `1280:720`
99
+ - `720:1280`
100
+
96
101
  content_moderation: Settings that affect the behavior of the content moderation system.
97
102
 
98
- duration: The number of seconds of duration for the output video.
103
+ This field is allowed only for the following model variants: `gen4_turbo`,
104
+ `gen3a_turbo`
105
+
106
+ duration: The number of seconds of duration for the output video. `veo3` requires a
107
+ duration of 8. gen4_turbo, gen3a_turbo must specify a duration of 5 or 10.
99
108
 
100
109
  prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
101
110
  should describe in detail what should appear in the output.
@@ -156,11 +165,11 @@ class AsyncImageToVideoResource(AsyncAPIResource):
156
165
  async def create(
157
166
  self,
158
167
  *,
159
- model: Literal["gen3a_turbo", "gen4_turbo"],
168
+ model: Literal["gen4_turbo", "gen3a_turbo", "veo3"],
160
169
  prompt_image: Union[str, Iterable[image_to_video_create_params.PromptImagePromptImage]],
161
170
  ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"],
162
171
  content_moderation: image_to_video_create_params.ContentModeration | NotGiven = NOT_GIVEN,
163
- duration: Literal[5, 10] | NotGiven = NOT_GIVEN,
172
+ duration: Literal[5, 8, 10] | NotGiven = NOT_GIVEN,
164
173
  prompt_text: str | NotGiven = NOT_GIVEN,
165
174
  seed: int | NotGiven = NOT_GIVEN,
166
175
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -196,9 +205,18 @@ class AsyncImageToVideoResource(AsyncAPIResource):
196
205
  - `1280:768`
197
206
  - `768:1280`
198
207
 
208
+ `veo3` supports the following values:
209
+
210
+ - `1280:720`
211
+ - `720:1280`
212
+
199
213
  content_moderation: Settings that affect the behavior of the content moderation system.
200
214
 
201
- duration: The number of seconds of duration for the output video.
215
+ This field is allowed only for the following model variants: `gen4_turbo`,
216
+ `gen3a_turbo`
217
+
218
+ duration: The number of seconds of duration for the output video. `veo3` requires a
219
+ duration of 8. gen4_turbo, gen3a_turbo must specify a duration of 5 or 10.
202
220
 
203
221
  prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
204
222
  should describe in detail what should appear in the output.
@@ -53,7 +53,7 @@ class TextToImageResource(SyncAPIResource):
53
53
  def create(
54
54
  self,
55
55
  *,
56
- model: Literal["gen4_image", "gen4_image_turbo"],
56
+ model: Literal["gen4_image_turbo", "gen4_image", "gemini_2.5_flash"],
57
57
  prompt_text: str,
58
58
  ratio: Literal[
59
59
  "1920:1080",
@@ -72,6 +72,11 @@ class TextToImageResource(SyncAPIResource):
72
72
  "960:720",
73
73
  "720:960",
74
74
  "1680:720",
75
+ "1344:768",
76
+ "768:1344",
77
+ "1184:864",
78
+ "864:1184",
79
+ "1536:672",
75
80
  ],
76
81
  content_moderation: text_to_image_create_params.ContentModeration | NotGiven = NOT_GIVEN,
77
82
  reference_images: Iterable[text_to_image_create_params.ReferenceImage] | NotGiven = NOT_GIVEN,
@@ -92,10 +97,41 @@ class TextToImageResource(SyncAPIResource):
92
97
  prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
93
98
  should describe in detail what should appear in the output.
94
99
 
95
- ratio: The resolution of the output image(s).
100
+ ratio: The resolution of the output image.
101
+
102
+ `gen4_image_turbo`, `gen4_image` support the following values:
103
+
104
+ - `1920:1080`
105
+ - `1080:1920`
106
+ - `1024:1024`
107
+ - `1360:768`
108
+ - `1080:1080`
109
+ - `1168:880`
110
+ - `1440:1080`
111
+ - `1080:1440`
112
+ - `1808:768`
113
+ - `2112:912`
114
+ - `1280:720`
115
+ - `720:1280`
116
+ - `720:720`
117
+ - `960:720`
118
+ - `720:960`
119
+ - `1680:720`
120
+
121
+ `gemini_2.5_flash` supports the following values:
122
+
123
+ - `1344:768`
124
+ - `768:1344`
125
+ - `1024:1024`
126
+ - `1184:864`
127
+ - `864:1184`
128
+ - `1536:672`
96
129
 
97
130
  content_moderation: Settings that affect the behavior of the content moderation system.
98
131
 
132
+ This field is allowed only for the following model variants: `gen4_image_turbo`,
133
+ `gen4_image`
134
+
99
135
  reference_images: An array of up to three images to be used as references for the generated image
100
136
  output.
101
137
 
@@ -156,7 +192,7 @@ class AsyncTextToImageResource(AsyncAPIResource):
156
192
  async def create(
157
193
  self,
158
194
  *,
159
- model: Literal["gen4_image", "gen4_image_turbo"],
195
+ model: Literal["gen4_image_turbo", "gen4_image", "gemini_2.5_flash"],
160
196
  prompt_text: str,
161
197
  ratio: Literal[
162
198
  "1920:1080",
@@ -175,6 +211,11 @@ class AsyncTextToImageResource(AsyncAPIResource):
175
211
  "960:720",
176
212
  "720:960",
177
213
  "1680:720",
214
+ "1344:768",
215
+ "768:1344",
216
+ "1184:864",
217
+ "864:1184",
218
+ "1536:672",
178
219
  ],
179
220
  content_moderation: text_to_image_create_params.ContentModeration | NotGiven = NOT_GIVEN,
180
221
  reference_images: Iterable[text_to_image_create_params.ReferenceImage] | NotGiven = NOT_GIVEN,
@@ -195,10 +236,41 @@ class AsyncTextToImageResource(AsyncAPIResource):
195
236
  prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
196
237
  should describe in detail what should appear in the output.
197
238
 
198
- ratio: The resolution of the output image(s).
239
+ ratio: The resolution of the output image.
240
+
241
+ `gen4_image_turbo`, `gen4_image` support the following values:
242
+
243
+ - `1920:1080`
244
+ - `1080:1920`
245
+ - `1024:1024`
246
+ - `1360:768`
247
+ - `1080:1080`
248
+ - `1168:880`
249
+ - `1440:1080`
250
+ - `1080:1440`
251
+ - `1808:768`
252
+ - `2112:912`
253
+ - `1280:720`
254
+ - `720:1280`
255
+ - `720:720`
256
+ - `960:720`
257
+ - `720:960`
258
+ - `1680:720`
259
+
260
+ `gemini_2.5_flash` supports the following values:
261
+
262
+ - `1344:768`
263
+ - `768:1344`
264
+ - `1024:1024`
265
+ - `1184:864`
266
+ - `864:1184`
267
+ - `1536:672`
199
268
 
200
269
  content_moderation: Settings that affect the behavior of the content moderation system.
201
270
 
271
+ This field is allowed only for the following model variants: `gen4_image_turbo`,
272
+ `gen4_image`
273
+
202
274
  reference_images: An array of up to three images to be used as references for the generated image
203
275
  output.
204
276
 
@@ -0,0 +1,223 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal
6
+
7
+ import httpx
8
+
9
+ from ..types import text_to_video_create_params
10
+ from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
11
+ from .._utils import maybe_transform, async_maybe_transform
12
+ from .._compat import cached_property
13
+ from .._resource import SyncAPIResource, AsyncAPIResource
14
+ from .._response import (
15
+ to_raw_response_wrapper,
16
+ to_streamed_response_wrapper,
17
+ async_to_raw_response_wrapper,
18
+ async_to_streamed_response_wrapper,
19
+ )
20
+ from ..lib.polling import (
21
+ NewTaskCreatedResponse,
22
+ AsyncNewTaskCreatedResponse,
23
+ create_waitable_resource,
24
+ create_async_waitable_resource,
25
+ )
26
+ from .._base_client import make_request_options
27
+ from ..types.text_to_video_create_response import TextToVideoCreateResponse
28
+
29
+ __all__ = ["TextToVideoResource", "AsyncTextToVideoResource"]
30
+
31
+
32
+ class TextToVideoResource(SyncAPIResource):
33
+ @cached_property
34
+ def with_raw_response(self) -> TextToVideoResourceWithRawResponse:
35
+ """
36
+ This property can be used as a prefix for any HTTP method call to return
37
+ the raw response object instead of the parsed content.
38
+
39
+ For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
40
+ """
41
+ return TextToVideoResourceWithRawResponse(self)
42
+
43
+ @cached_property
44
+ def with_streaming_response(self) -> TextToVideoResourceWithStreamingResponse:
45
+ """
46
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
47
+
48
+ For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
49
+ """
50
+ return TextToVideoResourceWithStreamingResponse(self)
51
+
52
+ def create(
53
+ self,
54
+ *,
55
+ duration: Literal[8],
56
+ model: Literal["veo3"],
57
+ prompt_text: str,
58
+ ratio: Literal["1280:720", "720:1280"],
59
+ seed: int | NotGiven = NOT_GIVEN,
60
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
61
+ # The extra values given here take precedence over values defined on the client or passed to this method.
62
+ extra_headers: Headers | None = None,
63
+ extra_query: Query | None = None,
64
+ extra_body: Body | None = None,
65
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
66
+ ) -> NewTaskCreatedResponse:
67
+ """
68
+ This endpoint will start a new task to generate a video from a text prompt.
69
+
70
+ Args:
71
+ duration: Veo 3 videos must be 8 seconds long.
72
+
73
+ model: The model variant to use.
74
+
75
+ prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
76
+ should describe in detail what should appear in the output.
77
+
78
+ ratio: A string representing the aspect ratio of the output video.
79
+
80
+ seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
81
+ get different results for the same other request parameters. Using the same seed
82
+ integer for an identical request will produce similar results.
83
+
84
+ extra_headers: Send extra headers
85
+
86
+ extra_query: Add additional query parameters to the request
87
+
88
+ extra_body: Add additional JSON properties to the request
89
+
90
+ timeout: Override the client-level default timeout for this request, in seconds
91
+ """
92
+ return self._post(
93
+ "/v1/text_to_video",
94
+ body=maybe_transform(
95
+ {
96
+ "duration": duration,
97
+ "model": model,
98
+ "prompt_text": prompt_text,
99
+ "ratio": ratio,
100
+ "seed": seed,
101
+ },
102
+ text_to_video_create_params.TextToVideoCreateParams,
103
+ ),
104
+ options=make_request_options(
105
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
106
+ ),
107
+ cast_to=create_waitable_resource(TextToVideoCreateResponse, self._client),
108
+ )
109
+
110
+
111
+ class AsyncTextToVideoResource(AsyncAPIResource):
112
+ @cached_property
113
+ def with_raw_response(self) -> AsyncTextToVideoResourceWithRawResponse:
114
+ """
115
+ This property can be used as a prefix for any HTTP method call to return
116
+ the raw response object instead of the parsed content.
117
+
118
+ For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
119
+ """
120
+ return AsyncTextToVideoResourceWithRawResponse(self)
121
+
122
+ @cached_property
123
+ def with_streaming_response(self) -> AsyncTextToVideoResourceWithStreamingResponse:
124
+ """
125
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
126
+
127
+ For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
128
+ """
129
+ return AsyncTextToVideoResourceWithStreamingResponse(self)
130
+
131
+ async def create(
132
+ self,
133
+ *,
134
+ duration: Literal[8],
135
+ model: Literal["veo3"],
136
+ prompt_text: str,
137
+ ratio: Literal["1280:720", "720:1280"],
138
+ seed: int | NotGiven = NOT_GIVEN,
139
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
140
+ # The extra values given here take precedence over values defined on the client or passed to this method.
141
+ extra_headers: Headers | None = None,
142
+ extra_query: Query | None = None,
143
+ extra_body: Body | None = None,
144
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
145
+ ) -> AsyncNewTaskCreatedResponse:
146
+ """
147
+ This endpoint will start a new task to generate a video from a text prompt.
148
+
149
+ Args:
150
+ duration: Veo 3 videos must be 8 seconds long.
151
+
152
+ model: The model variant to use.
153
+
154
+ prompt_text: A non-empty string up to 1000 characters (measured in UTF-16 code units). This
155
+ should describe in detail what should appear in the output.
156
+
157
+ ratio: A string representing the aspect ratio of the output video.
158
+
159
+ seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
160
+ get different results for the same other request parameters. Using the same seed
161
+ integer for an identical request will produce similar results.
162
+
163
+ extra_headers: Send extra headers
164
+
165
+ extra_query: Add additional query parameters to the request
166
+
167
+ extra_body: Add additional JSON properties to the request
168
+
169
+ timeout: Override the client-level default timeout for this request, in seconds
170
+ """
171
+ return await self._post(
172
+ "/v1/text_to_video",
173
+ body=await async_maybe_transform(
174
+ {
175
+ "duration": duration,
176
+ "model": model,
177
+ "prompt_text": prompt_text,
178
+ "ratio": ratio,
179
+ "seed": seed,
180
+ },
181
+ text_to_video_create_params.TextToVideoCreateParams,
182
+ ),
183
+ options=make_request_options(
184
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
185
+ ),
186
+ cast_to=create_async_waitable_resource(TextToVideoCreateResponse, self._client),
187
+ )
188
+
189
+
190
+ class TextToVideoResourceWithRawResponse:
191
+ def __init__(self, text_to_video: TextToVideoResource) -> None:
192
+ self._text_to_video = text_to_video
193
+
194
+ self.create = to_raw_response_wrapper(
195
+ text_to_video.create,
196
+ )
197
+
198
+
199
+ class AsyncTextToVideoResourceWithRawResponse:
200
+ def __init__(self, text_to_video: AsyncTextToVideoResource) -> None:
201
+ self._text_to_video = text_to_video
202
+
203
+ self.create = async_to_raw_response_wrapper(
204
+ text_to_video.create,
205
+ )
206
+
207
+
208
+ class TextToVideoResourceWithStreamingResponse:
209
+ def __init__(self, text_to_video: TextToVideoResource) -> None:
210
+ self._text_to_video = text_to_video
211
+
212
+ self.create = to_streamed_response_wrapper(
213
+ text_to_video.create,
214
+ )
215
+
216
+
217
+ class AsyncTextToVideoResourceWithStreamingResponse:
218
+ def __init__(self, text_to_video: AsyncTextToVideoResource) -> None:
219
+ self._text_to_video = text_to_video
220
+
221
+ self.create = async_to_streamed_response_wrapper(
222
+ text_to_video.create,
223
+ )
@@ -4,10 +4,12 @@ from __future__ import annotations
4
4
 
5
5
  from .task_retrieve_response import TaskRetrieveResponse as TaskRetrieveResponse
6
6
  from .text_to_image_create_params import TextToImageCreateParams as TextToImageCreateParams
7
+ from .text_to_video_create_params import TextToVideoCreateParams as TextToVideoCreateParams
7
8
  from .video_upscale_create_params import VideoUpscaleCreateParams as VideoUpscaleCreateParams
8
9
  from .image_to_video_create_params import ImageToVideoCreateParams as ImageToVideoCreateParams
9
10
  from .video_to_video_create_params import VideoToVideoCreateParams as VideoToVideoCreateParams
10
11
  from .text_to_image_create_response import TextToImageCreateResponse as TextToImageCreateResponse
12
+ from .text_to_video_create_response import TextToVideoCreateResponse as TextToVideoCreateResponse
11
13
  from .video_upscale_create_response import VideoUpscaleCreateResponse as VideoUpscaleCreateResponse
12
14
  from .image_to_video_create_response import ImageToVideoCreateResponse as ImageToVideoCreateResponse
13
15
  from .organization_retrieve_response import OrganizationRetrieveResponse as OrganizationRetrieveResponse
@@ -11,7 +11,7 @@ __all__ = ["ImageToVideoCreateParams", "PromptImagePromptImage", "ContentModerat
11
11
 
12
12
 
13
13
  class ImageToVideoCreateParams(TypedDict, total=False):
14
- model: Required[Literal["gen3a_turbo", "gen4_turbo"]]
14
+ model: Required[Literal["gen4_turbo", "gen3a_turbo", "veo3"]]
15
15
  """The model variant to use."""
16
16
 
17
17
  prompt_image: Required[Annotated[Union[str, Iterable[PromptImagePromptImage]], PropertyInfo(alias="promptImage")]]
@@ -39,13 +39,26 @@ class ImageToVideoCreateParams(TypedDict, total=False):
39
39
 
40
40
  - `1280:768`
41
41
  - `768:1280`
42
+
43
+ `veo3` supports the following values:
44
+
45
+ - `1280:720`
46
+ - `720:1280`
42
47
  """
43
48
 
44
49
  content_moderation: Annotated[ContentModeration, PropertyInfo(alias="contentModeration")]
45
- """Settings that affect the behavior of the content moderation system."""
50
+ """Settings that affect the behavior of the content moderation system.
46
51
 
47
- duration: Literal[5, 10]
48
- """The number of seconds of duration for the output video."""
52
+ This field is allowed only for the following model variants: `gen4_turbo`,
53
+ `gen3a_turbo`
54
+ """
55
+
56
+ duration: Literal[5, 8, 10]
57
+ """The number of seconds of duration for the output video.
58
+
59
+ `veo3` requires a duration of 8. gen4_turbo, gen3a_turbo must specify a duration
60
+ of 5 or 10.
61
+ """
49
62
 
50
63
  prompt_text: Annotated[str, PropertyInfo(alias="promptText")]
51
64
  """A non-empty string up to 1000 characters (measured in UTF-16 code units).
@@ -11,21 +11,25 @@ __all__ = [
11
11
  "Tier",
12
12
  "TierModels",
13
13
  "TierModelsActTwo",
14
+ "TierModelsGemini2_5Flash",
14
15
  "TierModelsGen3aTurbo",
15
16
  "TierModelsGen4Aleph",
16
17
  "TierModelsGen4Image",
17
18
  "TierModelsGen4ImageTurbo",
18
19
  "TierModelsGen4Turbo",
19
20
  "TierModelsUpscaleV1",
21
+ "TierModelsVeo3",
20
22
  "Usage",
21
23
  "UsageModels",
22
24
  "UsageModelsActTwo",
25
+ "UsageModelsGemini2_5Flash",
23
26
  "UsageModelsGen3aTurbo",
24
27
  "UsageModelsGen4Aleph",
25
28
  "UsageModelsGen4Image",
26
29
  "UsageModelsGen4ImageTurbo",
27
30
  "UsageModelsGen4Turbo",
28
31
  "UsageModelsUpscaleV1",
32
+ "UsageModelsVeo3",
29
33
  ]
30
34
 
31
35
 
@@ -37,6 +41,14 @@ class TierModelsActTwo(BaseModel):
37
41
  """The maximum number of generations that can be created each day for this model."""
38
42
 
39
43
 
44
+ class TierModelsGemini2_5Flash(BaseModel):
45
+ max_concurrent_generations: int = FieldInfo(alias="maxConcurrentGenerations")
46
+ """The maximum number of generations that can be run concurrently for this model."""
47
+
48
+ max_daily_generations: int = FieldInfo(alias="maxDailyGenerations")
49
+ """The maximum number of generations that can be created each day for this model."""
50
+
51
+
40
52
  class TierModelsGen3aTurbo(BaseModel):
41
53
  max_concurrent_generations: int = FieldInfo(alias="maxConcurrentGenerations")
42
54
  """The maximum number of generations that can be run concurrently for this model."""
@@ -85,10 +97,21 @@ class TierModelsUpscaleV1(BaseModel):
85
97
  """The maximum number of generations that can be created each day for this model."""
86
98
 
87
99
 
100
+ class TierModelsVeo3(BaseModel):
101
+ max_concurrent_generations: int = FieldInfo(alias="maxConcurrentGenerations")
102
+ """The maximum number of generations that can be run concurrently for this model."""
103
+
104
+ max_daily_generations: int = FieldInfo(alias="maxDailyGenerations")
105
+ """The maximum number of generations that can be created each day for this model."""
106
+
107
+
88
108
  class TierModels(BaseModel):
89
109
  act_two: Optional[TierModelsActTwo] = None
90
110
  """Limits associated with the act_two model."""
91
111
 
112
+ gemini_2_5_flash: Optional[TierModelsGemini2_5Flash] = FieldInfo(alias="gemini_2.5_flash", default=None)
113
+ """Limits associated with the gemini_2.5_flash model."""
114
+
92
115
  gen3a_turbo: Optional[TierModelsGen3aTurbo] = None
93
116
  """Limits associated with the gen3a_turbo model."""
94
117
 
@@ -107,6 +130,9 @@ class TierModels(BaseModel):
107
130
  upscale_v1: Optional[TierModelsUpscaleV1] = None
108
131
  """Limits associated with the upscale_v1 model."""
109
132
 
133
+ veo3: Optional[TierModelsVeo3] = None
134
+ """Limits associated with the veo3 model."""
135
+
110
136
 
111
137
  class Tier(BaseModel):
112
138
  max_monthly_credit_spend: int = FieldInfo(alias="maxMonthlyCreditSpend")
@@ -121,6 +147,11 @@ class UsageModelsActTwo(BaseModel):
121
147
  """The number of generations that have been run for this model in the past day."""
122
148
 
123
149
 
150
+ class UsageModelsGemini2_5Flash(BaseModel):
151
+ daily_generations: int = FieldInfo(alias="dailyGenerations")
152
+ """The number of generations that have been run for this model in the past day."""
153
+
154
+
124
155
  class UsageModelsGen3aTurbo(BaseModel):
125
156
  daily_generations: int = FieldInfo(alias="dailyGenerations")
126
157
  """The number of generations that have been run for this model in the past day."""
@@ -151,10 +182,18 @@ class UsageModelsUpscaleV1(BaseModel):
151
182
  """The number of generations that have been run for this model in the past day."""
152
183
 
153
184
 
185
+ class UsageModelsVeo3(BaseModel):
186
+ daily_generations: int = FieldInfo(alias="dailyGenerations")
187
+ """The number of generations that have been run for this model in the past day."""
188
+
189
+
154
190
  class UsageModels(BaseModel):
155
191
  act_two: Optional[UsageModelsActTwo] = None
156
192
  """Usage data for the act_two model."""
157
193
 
194
+ gemini_2_5_flash: Optional[UsageModelsGemini2_5Flash] = FieldInfo(alias="gemini_2.5_flash", default=None)
195
+ """Usage data for the gemini_2.5_flash model."""
196
+
158
197
  gen3a_turbo: Optional[UsageModelsGen3aTurbo] = None
159
198
  """Usage data for the gen3a_turbo model."""
160
199
 
@@ -173,6 +212,9 @@ class UsageModels(BaseModel):
173
212
  upscale_v1: Optional[UsageModelsUpscaleV1] = None
174
213
  """Usage data for the upscale_v1 model."""
175
214
 
215
+ veo3: Optional[UsageModelsVeo3] = None
216
+ """Usage data for the veo3 model."""
217
+
176
218
 
177
219
  class Usage(BaseModel):
178
220
  models: UsageModels
@@ -15,7 +15,17 @@ class ResultUsedCredit(BaseModel):
15
15
  amount: int
16
16
  """The number of credits used for the model."""
17
17
 
18
- model: Literal["upscale_v1", "act_two", "gen4_image", "gen3a_turbo", "gen4_turbo", "gen4_aleph", "gen4_image_turbo"]
18
+ model: Literal[
19
+ "act_two",
20
+ "gemini_2.5_flash",
21
+ "gen3a_turbo",
22
+ "gen4_aleph",
23
+ "gen4_image",
24
+ "gen4_image_turbo",
25
+ "gen4_turbo",
26
+ "upscale_v1",
27
+ "veo3",
28
+ ]
19
29
  """The model whose usage resulted in the credit usage."""
20
30
 
21
31
 
@@ -32,7 +42,17 @@ class Result(BaseModel):
32
42
 
33
43
  class OrganizationRetrieveUsageResponse(BaseModel):
34
44
  models: List[
35
- Literal["upscale_v1", "act_two", "gen4_image", "gen3a_turbo", "gen4_turbo", "gen4_aleph", "gen4_image_turbo"]
45
+ Literal[
46
+ "act_two",
47
+ "gemini_2.5_flash",
48
+ "gen3a_turbo",
49
+ "gen4_aleph",
50
+ "gen4_image",
51
+ "gen4_image_turbo",
52
+ "gen4_turbo",
53
+ "upscale_v1",
54
+ "veo3",
55
+ ]
36
56
  ]
37
57
  """The list of models with usage during the queried time range."""
38
58
 
@@ -11,7 +11,7 @@ __all__ = ["TextToImageCreateParams", "ContentModeration", "ReferenceImage"]
11
11
 
12
12
 
13
13
  class TextToImageCreateParams(TypedDict, total=False):
14
- model: Required[Literal["gen4_image", "gen4_image_turbo"]]
14
+ model: Required[Literal["gen4_image_turbo", "gen4_image", "gemini_2.5_flash"]]
15
15
  """The model variant to use."""
16
16
 
17
17
  prompt_text: Required[Annotated[str, PropertyInfo(alias="promptText")]]
@@ -38,12 +38,50 @@ class TextToImageCreateParams(TypedDict, total=False):
38
38
  "960:720",
39
39
  "720:960",
40
40
  "1680:720",
41
+ "1344:768",
42
+ "768:1344",
43
+ "1184:864",
44
+ "864:1184",
45
+ "1536:672",
41
46
  ]
42
47
  ]
43
- """The resolution of the output image(s)."""
48
+ """The resolution of the output image.
49
+
50
+ `gen4_image_turbo`, `gen4_image` support the following values:
51
+
52
+ - `1920:1080`
53
+ - `1080:1920`
54
+ - `1024:1024`
55
+ - `1360:768`
56
+ - `1080:1080`
57
+ - `1168:880`
58
+ - `1440:1080`
59
+ - `1080:1440`
60
+ - `1808:768`
61
+ - `2112:912`
62
+ - `1280:720`
63
+ - `720:1280`
64
+ - `720:720`
65
+ - `960:720`
66
+ - `720:960`
67
+ - `1680:720`
68
+
69
+ `gemini_2.5_flash` supports the following values:
70
+
71
+ - `1344:768`
72
+ - `768:1344`
73
+ - `1024:1024`
74
+ - `1184:864`
75
+ - `864:1184`
76
+ - `1536:672`
77
+ """
44
78
 
45
79
  content_moderation: Annotated[ContentModeration, PropertyInfo(alias="contentModeration")]
46
- """Settings that affect the behavior of the content moderation system."""
80
+ """Settings that affect the behavior of the content moderation system.
81
+
82
+ This field is allowed only for the following model variants: `gen4_image_turbo`,
83
+ `gen4_image`
84
+ """
47
85
 
48
86
  reference_images: Annotated[Iterable[ReferenceImage], PropertyInfo(alias="referenceImages")]
49
87
  """
@@ -0,0 +1,34 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, Annotated, TypedDict
6
+
7
+ from .._utils import PropertyInfo
8
+
9
+ __all__ = ["TextToVideoCreateParams"]
10
+
11
+
12
+ class TextToVideoCreateParams(TypedDict, total=False):
13
+ duration: Required[Literal[8]]
14
+ """Veo 3 videos must be 8 seconds long."""
15
+
16
+ model: Required[Literal["veo3"]]
17
+ """The model variant to use."""
18
+
19
+ prompt_text: Required[Annotated[str, PropertyInfo(alias="promptText")]]
20
+ """A non-empty string up to 1000 characters (measured in UTF-16 code units).
21
+
22
+ This should describe in detail what should appear in the output.
23
+ """
24
+
25
+ ratio: Required[Literal["1280:720", "720:1280"]]
26
+ """A string representing the aspect ratio of the output video."""
27
+
28
+ seed: int
29
+ """If unspecified, a random number is chosen.
30
+
31
+ Varying the seed integer is a way to get different results for the same other
32
+ request parameters. Using the same seed integer for an identical request will
33
+ produce similar results.
34
+ """
@@ -0,0 +1,10 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .._models import BaseModel
4
+
5
+ __all__ = ["TextToVideoCreateResponse"]
6
+
7
+
8
+ class TextToVideoCreateResponse(BaseModel):
9
+ id: str
10
+ """The ID of the newly created task."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: runwayml
3
- Version: 3.12.1
3
+ Version: 3.14.0
4
4
  Summary: The official Python library for the runwayml API
5
5
  Project-URL: Homepage, https://github.com/runwayml/sdk-python
6
6
  Project-URL: Repository, https://github.com/runwayml/sdk-python
@@ -1,6 +1,6 @@
1
1
  runwayml/__init__.py,sha256=tr-n2Y4sH_wBv8t2F_jk7ku_rLT2erU3j61ONZJWUVs,2743
2
2
  runwayml/_base_client.py,sha256=-TE2OySFDIosZ104M_34xSxb7CzQkzN-KVaO1XH0Bzs,67049
3
- runwayml/_client.py,sha256=f5_2h1VDkVIpczg2a_FqhZL82qOS_qvEYmpe4dhuK-Q,20285
3
+ runwayml/_client.py,sha256=aCYgB84rOTMVoGit65S8DN1N3BQbcIwe840FyMilLpg,20980
4
4
  runwayml/_compat.py,sha256=DQBVORjFb33zch24jzkhM14msvnzY7mmSmgDLaVFUM8,6562
5
5
  runwayml/_constants.py,sha256=S14PFzyN9-I31wiV7SmIlL5Ga0MLHxdvegInGdXH7tM,462
6
6
  runwayml/_exceptions.py,sha256=p2Q8kywHCVQzArLQL4Ht-HetTBhAvevU6yDvEq7PpIE,3224
@@ -11,7 +11,7 @@ runwayml/_resource.py,sha256=BF-j3xY5eRTKmuTxg8eDhLtLP4MLB1phDh_B6BKipKA,1112
11
11
  runwayml/_response.py,sha256=WxjSEXX-j01ZhlSxYyMCVSEKxo20pgy40RA7iyski8M,28800
12
12
  runwayml/_streaming.py,sha256=NSVuAgknVQWU1cgZEjQn01IdZKKynb5rOeYp5Lo-OEQ,10108
13
13
  runwayml/_types.py,sha256=x9kxQosdf8uFYdVp9BA2p97HMcdp9M4oky-BbC5t1TU,7298
14
- runwayml/_version.py,sha256=KcdPdc9QHVA2ikmmFZ6xIpUH_wWy__q-wpcNb3Yy_LQ,161
14
+ runwayml/_version.py,sha256=emHDPS6mhVFiE4rUxJ5vj3jWYeOhXGCD11qtMCQXqxk,161
15
15
  runwayml/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  runwayml/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  runwayml/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
@@ -27,30 +27,33 @@ runwayml/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,47
27
27
  runwayml/_utils/_utils.py,sha256=D2QE7mVPNEJzaB50u8rvDQAUDS5jx7JoeFD7zdj-TeI,12231
28
28
  runwayml/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
29
29
  runwayml/lib/polling.py,sha256=4fF0gP-h4iR0jvxWifsqtR1iH9vRoKEgGykkaZNT9Ek,4743
30
- runwayml/resources/__init__.py,sha256=eDf9c_7y8W_cwo5E5_L3_tOxWmt0k6GfjupfkLE_Zdo,3806
30
+ runwayml/resources/__init__.py,sha256=VH2pNFyx5LAk5NugfO2XI-zG0IXRIILLlURn_KVfA4w,4323
31
31
  runwayml/resources/character_performance.py,sha256=8KBZQuht5CKDeqk89UCIWw7EMqt6iB-It0iIFejOJ-M,10905
32
- runwayml/resources/image_to_video.py,sha256=O6YbnD7QEE_YK5UeRDq8RzWwiuS5nTQSAkCl9RvZe74,10880
32
+ runwayml/resources/image_to_video.py,sha256=5azI4bF-U6ooYhGiz3OYhVlRR1bW9Xk6Miu8eu_h4RI,11568
33
33
  runwayml/resources/organization.py,sha256=iPwFFz8nltHXea0uFJd-700657xgJdnEJiWAwXVNDqY,10581
34
34
  runwayml/resources/tasks.py,sha256=mjdBqB1G4u9v3xB_9yn6aIdvsDmawxSNcTENkMpKSms,10146
35
- runwayml/resources/text_to_image.py,sha256=p3Gq7GyoSuziFuoGmQ1VILVMeBGqwAJn6-er3C3YsMI,10338
35
+ runwayml/resources/text_to_image.py,sha256=r70rbRqELhn4XTbawXef3QcKUxLfXgfP5bd_WFb8f1w,12344
36
+ runwayml/resources/text_to_video.py,sha256=cQzDY2TzliUYhGBh2M6S6_f9TcwHSFxLwwY7f80OIZY,8564
36
37
  runwayml/resources/video_to_video.py,sha256=Obl7vrfwgcRC9bWUylVWVYc3r99aJt0I7NHhuhPvPus,10121
37
38
  runwayml/resources/video_upscale.py,sha256=8Mz_g5Swxmgp14jfcfexurUYpPi73q_iU-9D1jOddt0,7691
38
- runwayml/types/__init__.py,sha256=ve_d68f-Qs6UW9hflrdvex1pyi7yvo3afYNsqq__6sY,1572
39
+ runwayml/types/__init__.py,sha256=UTMpvsnh_66TPQ_2hKTFbS5zKI-UqGvpmO6XQV7B4lA,1762
39
40
  runwayml/types/character_performance_create_params.py,sha256=TYmR-YCK8_4fomSoqtC8dT1iIR7z2gMQvtu9u-FatQ4,3266
40
41
  runwayml/types/character_performance_create_response.py,sha256=QIJUfqWraZTJmX67zu3VQevBoFxDPmUh74C-_EelHy8,280
41
- runwayml/types/image_to_video_create_params.py,sha256=6M_xJRx0ws8nQ0a3k3jEICDm-WXJUG9j-j1UIxAAg-s,2869
42
+ runwayml/types/image_to_video_create_params.py,sha256=b96f5j13VE2CojJ6wTNk4lUqahw7zep8FZ3ezGpypO8,3168
42
43
  runwayml/types/image_to_video_create_response.py,sha256=WvZHbZxxJz8KerRNogzb1RYBrxa1x0iCPDi9-LCpHyE,345
43
- runwayml/types/organization_retrieve_response.py,sha256=lq6QVzkOl77unf-TFOAoUxclSAW-7QC-Bu3NlvwXjz4,6905
44
+ runwayml/types/organization_retrieve_response.py,sha256=Mx80Xu99d7t_aVyEl9Q8TqgF1CEp08o6X8LkzV3XOAc,8659
44
45
  runwayml/types/organization_retrieve_usage_params.py,sha256=vF5GUqaDqY1x6W2RzJ923jspuZyoNgCUaoLI3mW25zg,999
45
- runwayml/types/organization_retrieve_usage_response.py,sha256=yHqBM1EknT2Sb_04eCBRnLoHgZeo8wph4t6Dy6aZYMs,1186
46
+ runwayml/types/organization_retrieve_usage_response.py,sha256=jKd73nkM2R6Zlp5vv60nRs32YiwXwWFZ3du-7FzXxkI,1440
46
47
  runwayml/types/task_retrieve_response.py,sha256=v8y2bLxsW6srzScW-B3Akv72q_PI_NQmduGrGRQMHds,2139
47
- runwayml/types/text_to_image_create_params.py,sha256=Uk3-aCu8LhlmsYpFXBfw5wUAzNFDbjBVnLWoRTu2esQ,2857
48
+ runwayml/types/text_to_image_create_params.py,sha256=tcxQOR1_24smxqTUXXXRPfsOG3eSjgqqUglVPH1167o,3610
48
49
  runwayml/types/text_to_image_create_response.py,sha256=koMzUg82dYFQPp77wln3UR1z8WO2sHCNMWGgoQ9Id8M,262
50
+ runwayml/types/text_to_video_create_params.py,sha256=SyZtoIVD2cjzWZQWkxDAC1CXFh2VTTfw3M4HV-FqoXc,1107
51
+ runwayml/types/text_to_video_create_response.py,sha256=hgsHI1iKdm1GxDyrdjlpCZtC6DWphHbHy8JUQYq9lgs,262
49
52
  runwayml/types/video_to_video_create_params.py,sha256=0qfsIDlcTpqn9eiY-7X0J1NuDQMYzsLu-e_fmKhNljU,2357
50
53
  runwayml/types/video_to_video_create_response.py,sha256=CXgAUmnPIZOxCW_macIBPOC8MZYQpq9a5_jteSkeBt8,264
51
54
  runwayml/types/video_upscale_create_params.py,sha256=Ta3BNQy9aeTUBU5Ui-CMJtF32HeNRqbNpqjAAOKXyks,743
52
55
  runwayml/types/video_upscale_create_response.py,sha256=zf-79HbJa68dUHltBiZjVtnW_U6HUI-htmkTm5URBSU,264
53
- runwayml-3.12.1.dist-info/METADATA,sha256=Cuw_qzcd2920w7UUu086K3JKU_KVBeQ2hV58hVVYXzY,15158
54
- runwayml-3.12.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
55
- runwayml-3.12.1.dist-info/licenses/LICENSE,sha256=baeFj6izBWIm6A5_7N3-WAsy_VYpDF05Dd4zS1zsfZI,11338
56
- runwayml-3.12.1.dist-info/RECORD,,
56
+ runwayml-3.14.0.dist-info/METADATA,sha256=5X1-fZRAF2hidPYIcyoXSAzBeIh2d7k3XrOa1WnBrsE,15158
57
+ runwayml-3.14.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
58
+ runwayml-3.14.0.dist-info/licenses/LICENSE,sha256=baeFj6izBWIm6A5_7N3-WAsy_VYpDF05Dd4zS1zsfZI,11338
59
+ runwayml-3.14.0.dist-info/RECORD,,