runwayml 3.0.2__py3-none-any.whl → 3.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
runwayml/_base_client.py CHANGED
@@ -437,8 +437,7 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
437
437
  headers = httpx.Headers(headers_dict)
438
438
 
439
439
  idempotency_header = self._idempotency_header
440
- if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
441
- options.idempotency_key = options.idempotency_key or self._idempotency_key()
440
+ if idempotency_header and options.idempotency_key and idempotency_header not in headers:
442
441
  headers[idempotency_header] = options.idempotency_key
443
442
 
444
443
  # Don't set these headers if they were already set or removed by the caller. We check
@@ -903,7 +902,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
903
902
  self,
904
903
  cast_to: Type[ResponseT],
905
904
  options: FinalRequestOptions,
906
- remaining_retries: Optional[int] = None,
907
905
  *,
908
906
  stream: Literal[True],
909
907
  stream_cls: Type[_StreamT],
@@ -914,7 +912,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
914
912
  self,
915
913
  cast_to: Type[ResponseT],
916
914
  options: FinalRequestOptions,
917
- remaining_retries: Optional[int] = None,
918
915
  *,
919
916
  stream: Literal[False] = False,
920
917
  ) -> ResponseT: ...
@@ -924,7 +921,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
924
921
  self,
925
922
  cast_to: Type[ResponseT],
926
923
  options: FinalRequestOptions,
927
- remaining_retries: Optional[int] = None,
928
924
  *,
929
925
  stream: bool = False,
930
926
  stream_cls: Type[_StreamT] | None = None,
@@ -934,125 +930,109 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
934
930
  self,
935
931
  cast_to: Type[ResponseT],
936
932
  options: FinalRequestOptions,
937
- remaining_retries: Optional[int] = None,
938
933
  *,
939
934
  stream: bool = False,
940
935
  stream_cls: type[_StreamT] | None = None,
941
936
  ) -> ResponseT | _StreamT:
942
- if remaining_retries is not None:
943
- retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
944
- else:
945
- retries_taken = 0
946
-
947
- return self._request(
948
- cast_to=cast_to,
949
- options=options,
950
- stream=stream,
951
- stream_cls=stream_cls,
952
- retries_taken=retries_taken,
953
- )
937
+ cast_to = self._maybe_override_cast_to(cast_to, options)
954
938
 
955
- def _request(
956
- self,
957
- *,
958
- cast_to: Type[ResponseT],
959
- options: FinalRequestOptions,
960
- retries_taken: int,
961
- stream: bool,
962
- stream_cls: type[_StreamT] | None,
963
- ) -> ResponseT | _StreamT:
964
939
  # create a copy of the options we were given so that if the
965
940
  # options are mutated later & we then retry, the retries are
966
941
  # given the original options
967
942
  input_options = model_copy(options)
968
-
969
- cast_to = self._maybe_override_cast_to(cast_to, options)
970
- options = self._prepare_options(options)
971
-
972
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
973
- request = self._build_request(options, retries_taken=retries_taken)
974
- self._prepare_request(request)
975
-
976
- if options.idempotency_key:
943
+ if input_options.idempotency_key is None and input_options.method.lower() != "get":
977
944
  # ensure the idempotency key is reused between requests
978
- input_options.idempotency_key = options.idempotency_key
945
+ input_options.idempotency_key = self._idempotency_key()
979
946
 
980
- kwargs: HttpxSendArgs = {}
981
- if self.custom_auth is not None:
982
- kwargs["auth"] = self.custom_auth
947
+ response: httpx.Response | None = None
948
+ max_retries = input_options.get_max_retries(self.max_retries)
983
949
 
984
- log.debug("Sending HTTP Request: %s %s", request.method, request.url)
950
+ retries_taken = 0
951
+ for retries_taken in range(max_retries + 1):
952
+ options = model_copy(input_options)
953
+ options = self._prepare_options(options)
985
954
 
986
- try:
987
- response = self._client.send(
988
- request,
989
- stream=stream or self._should_stream_response_body(request=request),
990
- **kwargs,
991
- )
992
- except httpx.TimeoutException as err:
993
- log.debug("Encountered httpx.TimeoutException", exc_info=True)
955
+ remaining_retries = max_retries - retries_taken
956
+ request = self._build_request(options, retries_taken=retries_taken)
957
+ self._prepare_request(request)
994
958
 
995
- if remaining_retries > 0:
996
- return self._retry_request(
997
- input_options,
998
- cast_to,
999
- retries_taken=retries_taken,
1000
- stream=stream,
1001
- stream_cls=stream_cls,
1002
- response_headers=None,
1003
- )
959
+ kwargs: HttpxSendArgs = {}
960
+ if self.custom_auth is not None:
961
+ kwargs["auth"] = self.custom_auth
1004
962
 
1005
- log.debug("Raising timeout error")
1006
- raise APITimeoutError(request=request) from err
1007
- except Exception as err:
1008
- log.debug("Encountered Exception", exc_info=True)
963
+ log.debug("Sending HTTP Request: %s %s", request.method, request.url)
1009
964
 
1010
- if remaining_retries > 0:
1011
- return self._retry_request(
1012
- input_options,
1013
- cast_to,
1014
- retries_taken=retries_taken,
1015
- stream=stream,
1016
- stream_cls=stream_cls,
1017
- response_headers=None,
965
+ response = None
966
+ try:
967
+ response = self._client.send(
968
+ request,
969
+ stream=stream or self._should_stream_response_body(request=request),
970
+ **kwargs,
1018
971
  )
972
+ except httpx.TimeoutException as err:
973
+ log.debug("Encountered httpx.TimeoutException", exc_info=True)
974
+
975
+ if remaining_retries > 0:
976
+ self._sleep_for_retry(
977
+ retries_taken=retries_taken,
978
+ max_retries=max_retries,
979
+ options=input_options,
980
+ response=None,
981
+ )
982
+ continue
983
+
984
+ log.debug("Raising timeout error")
985
+ raise APITimeoutError(request=request) from err
986
+ except Exception as err:
987
+ log.debug("Encountered Exception", exc_info=True)
988
+
989
+ if remaining_retries > 0:
990
+ self._sleep_for_retry(
991
+ retries_taken=retries_taken,
992
+ max_retries=max_retries,
993
+ options=input_options,
994
+ response=None,
995
+ )
996
+ continue
997
+
998
+ log.debug("Raising connection error")
999
+ raise APIConnectionError(request=request) from err
1000
+
1001
+ log.debug(
1002
+ 'HTTP Response: %s %s "%i %s" %s',
1003
+ request.method,
1004
+ request.url,
1005
+ response.status_code,
1006
+ response.reason_phrase,
1007
+ response.headers,
1008
+ )
1019
1009
 
1020
- log.debug("Raising connection error")
1021
- raise APIConnectionError(request=request) from err
1022
-
1023
- log.debug(
1024
- 'HTTP Response: %s %s "%i %s" %s',
1025
- request.method,
1026
- request.url,
1027
- response.status_code,
1028
- response.reason_phrase,
1029
- response.headers,
1030
- )
1010
+ try:
1011
+ response.raise_for_status()
1012
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
1013
+ log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
1014
+
1015
+ if remaining_retries > 0 and self._should_retry(err.response):
1016
+ err.response.close()
1017
+ self._sleep_for_retry(
1018
+ retries_taken=retries_taken,
1019
+ max_retries=max_retries,
1020
+ options=input_options,
1021
+ response=response,
1022
+ )
1023
+ continue
1031
1024
 
1032
- try:
1033
- response.raise_for_status()
1034
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
1035
- log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
1036
-
1037
- if remaining_retries > 0 and self._should_retry(err.response):
1038
- err.response.close()
1039
- return self._retry_request(
1040
- input_options,
1041
- cast_to,
1042
- retries_taken=retries_taken,
1043
- response_headers=err.response.headers,
1044
- stream=stream,
1045
- stream_cls=stream_cls,
1046
- )
1025
+ # If the response is streamed then we need to explicitly read the response
1026
+ # to completion before attempting to access the response text.
1027
+ if not err.response.is_closed:
1028
+ err.response.read()
1047
1029
 
1048
- # If the response is streamed then we need to explicitly read the response
1049
- # to completion before attempting to access the response text.
1050
- if not err.response.is_closed:
1051
- err.response.read()
1030
+ log.debug("Re-raising status error")
1031
+ raise self._make_status_error_from_response(err.response) from None
1052
1032
 
1053
- log.debug("Re-raising status error")
1054
- raise self._make_status_error_from_response(err.response) from None
1033
+ break
1055
1034
 
1035
+ assert response is not None, "could not resolve response (should never happen)"
1056
1036
  return self._process_response(
1057
1037
  cast_to=cast_to,
1058
1038
  options=options,
@@ -1062,37 +1042,20 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
1062
1042
  retries_taken=retries_taken,
1063
1043
  )
1064
1044
 
1065
- def _retry_request(
1066
- self,
1067
- options: FinalRequestOptions,
1068
- cast_to: Type[ResponseT],
1069
- *,
1070
- retries_taken: int,
1071
- response_headers: httpx.Headers | None,
1072
- stream: bool,
1073
- stream_cls: type[_StreamT] | None,
1074
- ) -> ResponseT | _StreamT:
1075
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
1045
+ def _sleep_for_retry(
1046
+ self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
1047
+ ) -> None:
1048
+ remaining_retries = max_retries - retries_taken
1076
1049
  if remaining_retries == 1:
1077
1050
  log.debug("1 retry left")
1078
1051
  else:
1079
1052
  log.debug("%i retries left", remaining_retries)
1080
1053
 
1081
- timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
1054
+ timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
1082
1055
  log.info("Retrying request to %s in %f seconds", options.url, timeout)
1083
1056
 
1084
- # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1085
- # different thread if necessary.
1086
1057
  time.sleep(timeout)
1087
1058
 
1088
- return self._request(
1089
- options=options,
1090
- cast_to=cast_to,
1091
- retries_taken=retries_taken + 1,
1092
- stream=stream,
1093
- stream_cls=stream_cls,
1094
- )
1095
-
1096
1059
  def _process_response(
1097
1060
  self,
1098
1061
  *,
@@ -1436,7 +1399,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1436
1399
  options: FinalRequestOptions,
1437
1400
  *,
1438
1401
  stream: Literal[False] = False,
1439
- remaining_retries: Optional[int] = None,
1440
1402
  ) -> ResponseT: ...
1441
1403
 
1442
1404
  @overload
@@ -1447,7 +1409,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1447
1409
  *,
1448
1410
  stream: Literal[True],
1449
1411
  stream_cls: type[_AsyncStreamT],
1450
- remaining_retries: Optional[int] = None,
1451
1412
  ) -> _AsyncStreamT: ...
1452
1413
 
1453
1414
  @overload
@@ -1458,7 +1419,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1458
1419
  *,
1459
1420
  stream: bool,
1460
1421
  stream_cls: type[_AsyncStreamT] | None = None,
1461
- remaining_retries: Optional[int] = None,
1462
1422
  ) -> ResponseT | _AsyncStreamT: ...
1463
1423
 
1464
1424
  async def request(
@@ -1468,120 +1428,111 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1468
1428
  *,
1469
1429
  stream: bool = False,
1470
1430
  stream_cls: type[_AsyncStreamT] | None = None,
1471
- remaining_retries: Optional[int] = None,
1472
- ) -> ResponseT | _AsyncStreamT:
1473
- if remaining_retries is not None:
1474
- retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
1475
- else:
1476
- retries_taken = 0
1477
-
1478
- return await self._request(
1479
- cast_to=cast_to,
1480
- options=options,
1481
- stream=stream,
1482
- stream_cls=stream_cls,
1483
- retries_taken=retries_taken,
1484
- )
1485
-
1486
- async def _request(
1487
- self,
1488
- cast_to: Type[ResponseT],
1489
- options: FinalRequestOptions,
1490
- *,
1491
- stream: bool,
1492
- stream_cls: type[_AsyncStreamT] | None,
1493
- retries_taken: int,
1494
1431
  ) -> ResponseT | _AsyncStreamT:
1495
1432
  if self._platform is None:
1496
1433
  # `get_platform` can make blocking IO calls so we
1497
1434
  # execute it earlier while we are in an async context
1498
1435
  self._platform = await asyncify(get_platform)()
1499
1436
 
1437
+ cast_to = self._maybe_override_cast_to(cast_to, options)
1438
+
1500
1439
  # create a copy of the options we were given so that if the
1501
1440
  # options are mutated later & we then retry, the retries are
1502
1441
  # given the original options
1503
1442
  input_options = model_copy(options)
1504
-
1505
- cast_to = self._maybe_override_cast_to(cast_to, options)
1506
- options = await self._prepare_options(options)
1507
-
1508
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
1509
- request = self._build_request(options, retries_taken=retries_taken)
1510
- await self._prepare_request(request)
1511
-
1512
- if options.idempotency_key:
1443
+ if input_options.idempotency_key is None and input_options.method.lower() != "get":
1513
1444
  # ensure the idempotency key is reused between requests
1514
- input_options.idempotency_key = options.idempotency_key
1445
+ input_options.idempotency_key = self._idempotency_key()
1515
1446
 
1516
- kwargs: HttpxSendArgs = {}
1517
- if self.custom_auth is not None:
1518
- kwargs["auth"] = self.custom_auth
1447
+ response: httpx.Response | None = None
1448
+ max_retries = input_options.get_max_retries(self.max_retries)
1519
1449
 
1520
- try:
1521
- response = await self._client.send(
1522
- request,
1523
- stream=stream or self._should_stream_response_body(request=request),
1524
- **kwargs,
1525
- )
1526
- except httpx.TimeoutException as err:
1527
- log.debug("Encountered httpx.TimeoutException", exc_info=True)
1450
+ retries_taken = 0
1451
+ for retries_taken in range(max_retries + 1):
1452
+ options = model_copy(input_options)
1453
+ options = await self._prepare_options(options)
1528
1454
 
1529
- if remaining_retries > 0:
1530
- return await self._retry_request(
1531
- input_options,
1532
- cast_to,
1533
- retries_taken=retries_taken,
1534
- stream=stream,
1535
- stream_cls=stream_cls,
1536
- response_headers=None,
1537
- )
1455
+ remaining_retries = max_retries - retries_taken
1456
+ request = self._build_request(options, retries_taken=retries_taken)
1457
+ await self._prepare_request(request)
1538
1458
 
1539
- log.debug("Raising timeout error")
1540
- raise APITimeoutError(request=request) from err
1541
- except Exception as err:
1542
- log.debug("Encountered Exception", exc_info=True)
1459
+ kwargs: HttpxSendArgs = {}
1460
+ if self.custom_auth is not None:
1461
+ kwargs["auth"] = self.custom_auth
1543
1462
 
1544
- if remaining_retries > 0:
1545
- return await self._retry_request(
1546
- input_options,
1547
- cast_to,
1548
- retries_taken=retries_taken,
1549
- stream=stream,
1550
- stream_cls=stream_cls,
1551
- response_headers=None,
1552
- )
1463
+ log.debug("Sending HTTP Request: %s %s", request.method, request.url)
1553
1464
 
1554
- log.debug("Raising connection error")
1555
- raise APIConnectionError(request=request) from err
1465
+ response = None
1466
+ try:
1467
+ response = await self._client.send(
1468
+ request,
1469
+ stream=stream or self._should_stream_response_body(request=request),
1470
+ **kwargs,
1471
+ )
1472
+ except httpx.TimeoutException as err:
1473
+ log.debug("Encountered httpx.TimeoutException", exc_info=True)
1474
+
1475
+ if remaining_retries > 0:
1476
+ await self._sleep_for_retry(
1477
+ retries_taken=retries_taken,
1478
+ max_retries=max_retries,
1479
+ options=input_options,
1480
+ response=None,
1481
+ )
1482
+ continue
1483
+
1484
+ log.debug("Raising timeout error")
1485
+ raise APITimeoutError(request=request) from err
1486
+ except Exception as err:
1487
+ log.debug("Encountered Exception", exc_info=True)
1488
+
1489
+ if remaining_retries > 0:
1490
+ await self._sleep_for_retry(
1491
+ retries_taken=retries_taken,
1492
+ max_retries=max_retries,
1493
+ options=input_options,
1494
+ response=None,
1495
+ )
1496
+ continue
1497
+
1498
+ log.debug("Raising connection error")
1499
+ raise APIConnectionError(request=request) from err
1500
+
1501
+ log.debug(
1502
+ 'HTTP Response: %s %s "%i %s" %s',
1503
+ request.method,
1504
+ request.url,
1505
+ response.status_code,
1506
+ response.reason_phrase,
1507
+ response.headers,
1508
+ )
1556
1509
 
1557
- log.debug(
1558
- 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
1559
- )
1510
+ try:
1511
+ response.raise_for_status()
1512
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
1513
+ log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
1514
+
1515
+ if remaining_retries > 0 and self._should_retry(err.response):
1516
+ await err.response.aclose()
1517
+ await self._sleep_for_retry(
1518
+ retries_taken=retries_taken,
1519
+ max_retries=max_retries,
1520
+ options=input_options,
1521
+ response=response,
1522
+ )
1523
+ continue
1560
1524
 
1561
- try:
1562
- response.raise_for_status()
1563
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
1564
- log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
1565
-
1566
- if remaining_retries > 0 and self._should_retry(err.response):
1567
- await err.response.aclose()
1568
- return await self._retry_request(
1569
- input_options,
1570
- cast_to,
1571
- retries_taken=retries_taken,
1572
- response_headers=err.response.headers,
1573
- stream=stream,
1574
- stream_cls=stream_cls,
1575
- )
1525
+ # If the response is streamed then we need to explicitly read the response
1526
+ # to completion before attempting to access the response text.
1527
+ if not err.response.is_closed:
1528
+ await err.response.aread()
1576
1529
 
1577
- # If the response is streamed then we need to explicitly read the response
1578
- # to completion before attempting to access the response text.
1579
- if not err.response.is_closed:
1580
- await err.response.aread()
1530
+ log.debug("Re-raising status error")
1531
+ raise self._make_status_error_from_response(err.response) from None
1581
1532
 
1582
- log.debug("Re-raising status error")
1583
- raise self._make_status_error_from_response(err.response) from None
1533
+ break
1584
1534
 
1535
+ assert response is not None, "could not resolve response (should never happen)"
1585
1536
  return await self._process_response(
1586
1537
  cast_to=cast_to,
1587
1538
  options=options,
@@ -1591,35 +1542,20 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1591
1542
  retries_taken=retries_taken,
1592
1543
  )
1593
1544
 
1594
- async def _retry_request(
1595
- self,
1596
- options: FinalRequestOptions,
1597
- cast_to: Type[ResponseT],
1598
- *,
1599
- retries_taken: int,
1600
- response_headers: httpx.Headers | None,
1601
- stream: bool,
1602
- stream_cls: type[_AsyncStreamT] | None,
1603
- ) -> ResponseT | _AsyncStreamT:
1604
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
1545
+ async def _sleep_for_retry(
1546
+ self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
1547
+ ) -> None:
1548
+ remaining_retries = max_retries - retries_taken
1605
1549
  if remaining_retries == 1:
1606
1550
  log.debug("1 retry left")
1607
1551
  else:
1608
1552
  log.debug("%i retries left", remaining_retries)
1609
1553
 
1610
- timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
1554
+ timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
1611
1555
  log.info("Retrying request to %s in %f seconds", options.url, timeout)
1612
1556
 
1613
1557
  await anyio.sleep(timeout)
1614
1558
 
1615
- return await self._request(
1616
- options=options,
1617
- cast_to=cast_to,
1618
- retries_taken=retries_taken + 1,
1619
- stream=stream,
1620
- stream_cls=stream_cls,
1621
- )
1622
-
1623
1559
  async def _process_response(
1624
1560
  self,
1625
1561
  *,
runwayml/_client.py CHANGED
@@ -19,10 +19,7 @@ from ._types import (
19
19
  ProxiesTypes,
20
20
  RequestOptions,
21
21
  )
22
- from ._utils import (
23
- is_given,
24
- get_async_library,
25
- )
22
+ from ._utils import is_given, get_async_library
26
23
  from ._version import __version__
27
24
  from .resources import tasks, organization, image_to_video
28
25
  from ._streaming import Stream as Stream, AsyncStream as AsyncStream
runwayml/_models.py CHANGED
@@ -626,8 +626,8 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
626
626
  # Note: if one variant defines an alias then they all should
627
627
  discriminator_alias = field_info.alias
628
628
 
629
- if field_info.annotation and is_literal_type(field_info.annotation):
630
- for entry in get_args(field_info.annotation):
629
+ if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
630
+ for entry in get_args(annotation):
631
631
  if isinstance(entry, str):
632
632
  mapping[entry] = variant
633
633
 
runwayml/_response.py CHANGED
@@ -233,7 +233,7 @@ class BaseAPIResponse(Generic[R]):
233
233
  # split is required to handle cases where additional information is included
234
234
  # in the response, e.g. application/json; charset=utf-8
235
235
  content_type, *_ = response.headers.get("content-type", "*").split(";")
236
- if content_type != "application/json":
236
+ if not content_type.endswith("json"):
237
237
  if is_basemodel(cast_to):
238
238
  try:
239
239
  data = response.json()
runwayml/_utils/_utils.py CHANGED
@@ -72,8 +72,16 @@ def _extract_items(
72
72
  from .._files import assert_is_file_content
73
73
 
74
74
  # We have exhausted the path, return the entry we found.
75
- assert_is_file_content(obj, key=flattened_key)
76
75
  assert flattened_key is not None
76
+
77
+ if is_list(obj):
78
+ files: list[tuple[str, FileTypes]] = []
79
+ for entry in obj:
80
+ assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
81
+ files.append((flattened_key + "[]", cast(FileTypes, entry)))
82
+ return files
83
+
84
+ assert_is_file_content(obj, key=flattened_key)
77
85
  return [(flattened_key, cast(FileTypes, obj))]
78
86
 
79
87
  index += 1
runwayml/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "runwayml"
4
- __version__ = "3.0.2" # x-release-please-version
4
+ __version__ = "3.0.4" # x-release-please-version
@@ -9,10 +9,7 @@ import httpx
9
9
 
10
10
  from ..types import image_to_video_create_params
11
11
  from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
12
- from .._utils import (
13
- maybe_transform,
14
- async_maybe_transform,
15
- )
12
+ from .._utils import maybe_transform, async_maybe_transform
16
13
  from .._compat import cached_property
17
14
  from .._resource import SyncAPIResource, AsyncAPIResource
18
15
  from .._response import (
@@ -52,10 +49,9 @@ class ImageToVideoResource(SyncAPIResource):
52
49
  *,
53
50
  model: Literal["gen4_turbo", "gen3a_turbo"],
54
51
  prompt_image: Union[str, Iterable[image_to_video_create_params.PromptImagePromptImage]],
52
+ ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"],
55
53
  duration: Literal[5, 10] | NotGiven = NOT_GIVEN,
56
54
  prompt_text: str | NotGiven = NOT_GIVEN,
57
- ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"]
58
- | NotGiven = NOT_GIVEN,
59
55
  seed: int | NotGiven = NOT_GIVEN,
60
56
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
61
57
  # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -74,11 +70,27 @@ class ImageToVideoResource(SyncAPIResource):
74
70
  frame of the generated video. See [our docs](/assets/inputs#images) on image
75
71
  inputs for more information.
76
72
 
77
- duration: The number of seconds of duration for the output video.
73
+ ratio: The resolution of the output video.
74
+
75
+ `gen4_turbo` supports the following values:
78
76
 
79
- prompt_text
77
+ - `1280:720`
78
+ - `720:1280`
79
+ - `1104:832`
80
+ - `832:1104`
81
+ - `960:960`
82
+ - `1584:672`
80
83
 
81
- ratio
84
+ `gen3a_turbo` supports the following values:
85
+
86
+ - `1280:768`
87
+ - `768:1280`
88
+
89
+ duration: The number of seconds of duration for the output video.
90
+
91
+ prompt_text: A non-empty string up to 1000 UTF-16 code points in length (that is,
92
+ `promptText.length === 1000` in JavaScript). This should describe in detail what
93
+ should appear in the output.
82
94
 
83
95
  seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
84
96
  get different results for the same other request parameters. Using the same seed
@@ -98,9 +110,9 @@ class ImageToVideoResource(SyncAPIResource):
98
110
  {
99
111
  "model": model,
100
112
  "prompt_image": prompt_image,
113
+ "ratio": ratio,
101
114
  "duration": duration,
102
115
  "prompt_text": prompt_text,
103
- "ratio": ratio,
104
116
  "seed": seed,
105
117
  },
106
118
  image_to_video_create_params.ImageToVideoCreateParams,
@@ -137,10 +149,9 @@ class AsyncImageToVideoResource(AsyncAPIResource):
137
149
  *,
138
150
  model: Literal["gen4_turbo", "gen3a_turbo"],
139
151
  prompt_image: Union[str, Iterable[image_to_video_create_params.PromptImagePromptImage]],
152
+ ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"],
140
153
  duration: Literal[5, 10] | NotGiven = NOT_GIVEN,
141
154
  prompt_text: str | NotGiven = NOT_GIVEN,
142
- ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"]
143
- | NotGiven = NOT_GIVEN,
144
155
  seed: int | NotGiven = NOT_GIVEN,
145
156
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
146
157
  # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -159,11 +170,27 @@ class AsyncImageToVideoResource(AsyncAPIResource):
159
170
  frame of the generated video. See [our docs](/assets/inputs#images) on image
160
171
  inputs for more information.
161
172
 
162
- duration: The number of seconds of duration for the output video.
173
+ ratio: The resolution of the output video.
174
+
175
+ `gen4_turbo` supports the following values:
176
+
177
+ - `1280:720`
178
+ - `720:1280`
179
+ - `1104:832`
180
+ - `832:1104`
181
+ - `960:960`
182
+ - `1584:672`
183
+
184
+ `gen3a_turbo` supports the following values:
163
185
 
164
- prompt_text
186
+ - `1280:768`
187
+ - `768:1280`
165
188
 
166
- ratio
189
+ duration: The number of seconds of duration for the output video.
190
+
191
+ prompt_text: A non-empty string up to 1000 UTF-16 code points in length (that is,
192
+ `promptText.length === 1000` in JavaScript). This should describe in detail what
193
+ should appear in the output.
167
194
 
168
195
  seed: If unspecified, a random number is chosen. Varying the seed integer is a way to
169
196
  get different results for the same other request parameters. Using the same seed
@@ -183,9 +210,9 @@ class AsyncImageToVideoResource(AsyncAPIResource):
183
210
  {
184
211
  "model": model,
185
212
  "prompt_image": prompt_image,
213
+ "ratio": ratio,
186
214
  "duration": duration,
187
215
  "prompt_text": prompt_text,
188
- "ratio": ratio,
189
216
  "seed": seed,
190
217
  },
191
218
  image_to_video_create_params.ImageToVideoCreateParams,
@@ -21,12 +21,35 @@ class ImageToVideoCreateParams(TypedDict, total=False):
21
21
  inputs for more information.
22
22
  """
23
23
 
24
+ ratio: Required[
25
+ Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"]
26
+ ]
27
+ """The resolution of the output video.
28
+
29
+ `gen4_turbo` supports the following values:
30
+
31
+ - `1280:720`
32
+ - `720:1280`
33
+ - `1104:832`
34
+ - `832:1104`
35
+ - `960:960`
36
+ - `1584:672`
37
+
38
+ `gen3a_turbo` supports the following values:
39
+
40
+ - `1280:768`
41
+ - `768:1280`
42
+ """
43
+
24
44
  duration: Literal[5, 10]
25
45
  """The number of seconds of duration for the output video."""
26
46
 
27
47
  prompt_text: Annotated[str, PropertyInfo(alias="promptText")]
28
-
29
- ratio: Literal["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672", "1280:768", "768:1280"]
48
+ """
49
+ A non-empty string up to 1000 UTF-16 code points in length (that is,
50
+ `promptText.length === 1000` in JavaScript). This should describe in detail what
51
+ should appear in the output.
52
+ """
30
53
 
31
54
  seed: int
32
55
  """If unspecified, a random number is chosen.
@@ -1,6 +1,5 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
-
4
3
  from .._models import BaseModel
5
4
 
6
5
  __all__ = ["ImageToVideoCreateResponse"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: runwayml
3
- Version: 3.0.2
3
+ Version: 3.0.4
4
4
  Summary: The official Python library for the runwayml API
5
5
  Project-URL: Homepage, https://github.com/runwayml/sdk-python
6
6
  Project-URL: Repository, https://github.com/runwayml/sdk-python
@@ -65,6 +65,7 @@ client = RunwayML(
65
65
  image_to_video = client.image_to_video.create(
66
66
  model="gen4_turbo",
67
67
  prompt_image="https://example.com/assets/bunny.jpg",
68
+ ratio="1280:720",
68
69
  prompt_text="The bunny is eating a carrot",
69
70
  )
70
71
  print(image_to_video.id)
@@ -93,6 +94,7 @@ async def main() -> None:
93
94
  image_to_video = await client.image_to_video.create(
94
95
  model="gen4_turbo",
95
96
  prompt_image="https://example.com/assets/bunny.jpg",
97
+ ratio="1280:720",
96
98
  prompt_text="The bunny is eating a carrot",
97
99
  )
98
100
  print(image_to_video.id)
@@ -131,6 +133,7 @@ try:
131
133
  client.image_to_video.create(
132
134
  model="gen4_turbo",
133
135
  prompt_image="https://example.com/assets/bunny.jpg",
136
+ ratio="1280:720",
134
137
  prompt_text="The bunny is eating a carrot",
135
138
  )
136
139
  except runwayml.APIConnectionError as e:
@@ -178,6 +181,7 @@ client = RunwayML(
178
181
  client.with_options(max_retries=5).image_to_video.create(
179
182
  model="gen4_turbo",
180
183
  prompt_image="https://example.com/assets/bunny.jpg",
184
+ ratio="1280:720",
181
185
  prompt_text="The bunny is eating a carrot",
182
186
  )
183
187
  ```
@@ -205,6 +209,7 @@ client = RunwayML(
205
209
  client.with_options(timeout=5.0).image_to_video.create(
206
210
  model="gen4_turbo",
207
211
  prompt_image="https://example.com/assets/bunny.jpg",
212
+ ratio="1280:720",
208
213
  prompt_text="The bunny is eating a carrot",
209
214
  )
210
215
  ```
@@ -250,6 +255,7 @@ client = RunwayML()
250
255
  response = client.image_to_video.with_raw_response.create(
251
256
  model="gen4_turbo",
252
257
  prompt_image="https://example.com/assets/bunny.jpg",
258
+ ratio="1280:720",
253
259
  prompt_text="The bunny is eating a carrot",
254
260
  )
255
261
  print(response.headers.get('X-My-Header'))
@@ -272,6 +278,7 @@ To stream the response body, use `.with_streaming_response` instead, which requi
272
278
  with client.image_to_video.with_streaming_response.create(
273
279
  model="gen4_turbo",
274
280
  prompt_image="https://example.com/assets/bunny.jpg",
281
+ ratio="1280:720",
275
282
  prompt_text="The bunny is eating a carrot",
276
283
  ) as response:
277
284
  print(response.headers.get("X-My-Header"))
@@ -1,17 +1,17 @@
1
1
  runwayml/__init__.py,sha256=iXnJfH73wbj9IxfCHpwfWBxgOa9C4FRrrbBZM5f3biw,2476
2
- runwayml/_base_client.py,sha256=3ELTaBPe0-UwldzM1dKETvVBba-XCEPF9w8Rrr0c6DU,66246
3
- runwayml/_client.py,sha256=oUOh2gdY1YtVefv51GTssitaCuDQIncvVlxTvzDJyec,17136
2
+ runwayml/_base_client.py,sha256=IHjCRq-jze0tVzSAP_zkZLHYG9lsvg4j_xwy2hv0ryQ,64846
3
+ runwayml/_client.py,sha256=RmchptAkzAmgp1RC40KQ0ZKqpdxfV9f8bHin3Bbmcmg,17123
4
4
  runwayml/_compat.py,sha256=VWemUKbj6DDkQ-O4baSpHVLJafotzeXmCQGJugfVTIw,6580
5
5
  runwayml/_constants.py,sha256=S14PFzyN9-I31wiV7SmIlL5Ga0MLHxdvegInGdXH7tM,462
6
6
  runwayml/_exceptions.py,sha256=p2Q8kywHCVQzArLQL4Ht-HetTBhAvevU6yDvEq7PpIE,3224
7
7
  runwayml/_files.py,sha256=mf4dOgL4b0ryyZlbqLhggD3GVgDf6XxdGFAgce01ugE,3549
8
- runwayml/_models.py,sha256=q-l1tes71l6z-D5ffu9-G4UigTVVeJwiwIzA_gO4RFo,29045
8
+ runwayml/_models.py,sha256=mB2r2VWQq49jG-F0RIXDrBxPp3v-Eg12wMOtVTNxtv4,29057
9
9
  runwayml/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846
10
10
  runwayml/_resource.py,sha256=BF-j3xY5eRTKmuTxg8eDhLtLP4MLB1phDh_B6BKipKA,1112
11
- runwayml/_response.py,sha256=3Tf7pmDYDMv5BJuF0ljEBtMMk5Q9T7jcWn7I6P-hbdM,28801
11
+ runwayml/_response.py,sha256=WxjSEXX-j01ZhlSxYyMCVSEKxo20pgy40RA7iyski8M,28800
12
12
  runwayml/_streaming.py,sha256=NSVuAgknVQWU1cgZEjQn01IdZKKynb5rOeYp5Lo-OEQ,10108
13
13
  runwayml/_types.py,sha256=oHct1QQY_lI8bepCgfWDZm2N5VNi0e6o1iLeiTh4Y_0,6145
14
- runwayml/_version.py,sha256=e3bUSp3FFS0--Sj49tZWT_zFZktA99W6UJB8VrDSsaU,160
14
+ runwayml/_version.py,sha256=I7DDYKHiDZ1-sTsDqu6oLn4iFOzIOQRbwfX5CIDPRz0,160
15
15
  runwayml/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  runwayml/_utils/__init__.py,sha256=PNZ_QJuzZEgyYXqkO1HVhGkj5IU9bglVUcw7H-Knjzw,2062
17
17
  runwayml/_utils/_logs.py,sha256=ZfS5W59hdqEBVV86lNrk28PhvUxtHOzs9JqiLhSu0pI,780
@@ -21,18 +21,18 @@ runwayml/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,2
21
21
  runwayml/_utils/_sync.py,sha256=TpGLrrhRNWTJtODNE6Fup3_k7zrWm1j2RlirzBwre-0,2862
22
22
  runwayml/_utils/_transform.py,sha256=n7kskEWz6o__aoNvhFoGVyDoalNe6mJwp-g7BWkdj88,15617
23
23
  runwayml/_utils/_typing.py,sha256=D0DbbNu8GnYQTSICnTSHDGsYXj8TcAKyhejb0XcnjtY,4602
24
- runwayml/_utils/_utils.py,sha256=8UmbPOy_AAr2uUjjFui-VZSrVBHRj6bfNEKRp5YZP2A,12004
24
+ runwayml/_utils/_utils.py,sha256=ts4CiiuNpFiGB6YMdkQRh2SZvYvsl7mAF-JWHCcLDf4,12312
25
25
  runwayml/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
26
26
  runwayml/resources/__init__.py,sha256=SqcC1MLwxPaz2c7gRRBlOn9-2pDPMKTXD2gFbG5FJ2E,1597
27
- runwayml/resources/image_to_video.py,sha256=A0nOe7KwLcJ_UiqHLk6Gm_h2iWbr7V37a3B7m1mBOxc,9141
27
+ runwayml/resources/image_to_video.py,sha256=n3xb3sm4qJK_yQlWK8dTF2BetoC6gA5p2V7jcuWy6Ik,10228
28
28
  runwayml/resources/organization.py,sha256=XBg5nhkycPU3rllRvf9aaeHuZNtzGDKHlLPrPqDCAsw,5419
29
29
  runwayml/resources/tasks.py,sha256=-VT3qetYcaqn4FskekxhN_fCTozMl1GqxGpGwxV8M60,9673
30
30
  runwayml/types/__init__.py,sha256=xfq4RirwNpSBy5xXra7CB8wa0029vKUH0DB6Zg02hFs,505
31
- runwayml/types/image_to_video_create_params.py,sha256=VNWGDEdqkhp-Br-19t8YYfaYMaXxHEmADwXZ1CUC4So,1882
32
- runwayml/types/image_to_video_create_response.py,sha256=l5GszzUSItV-ZYHCB8hH_GSVibUZEkzfRLrAhXkd8O4,346
31
+ runwayml/types/image_to_video_create_params.py,sha256=EtTTZJ4rWEcEknqRxFYDWtP3Zxox-_36NBj3fRGdP7o,2400
32
+ runwayml/types/image_to_video_create_response.py,sha256=WvZHbZxxJz8KerRNogzb1RYBrxa1x0iCPDi9-LCpHyE,345
33
33
  runwayml/types/organization_retrieve_response.py,sha256=DV46yEIRjmL05uISc2-PpM5BGWu8gniA9TQ056abWLA,2721
34
34
  runwayml/types/task_retrieve_response.py,sha256=v8y2bLxsW6srzScW-B3Akv72q_PI_NQmduGrGRQMHds,2139
35
- runwayml-3.0.2.dist-info/METADATA,sha256=DwcgvLuTCeZS2a2pqMOIeVkPSJEOKYL0ILJ_zC-ZCkw,13516
36
- runwayml-3.0.2.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
37
- runwayml-3.0.2.dist-info/licenses/LICENSE,sha256=baeFj6izBWIm6A5_7N3-WAsy_VYpDF05Dd4zS1zsfZI,11338
38
- runwayml-3.0.2.dist-info/RECORD,,
35
+ runwayml-3.0.4.dist-info/METADATA,sha256=llLabRfzpWOknEB_54TUcpQyjFFAk5Pvowxwp4hUGBc,13678
36
+ runwayml-3.0.4.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
37
+ runwayml-3.0.4.dist-info/licenses/LICENSE,sha256=baeFj6izBWIm6A5_7N3-WAsy_VYpDF05Dd4zS1zsfZI,11338
38
+ runwayml-3.0.4.dist-info/RECORD,,