livekit-plugins-aws 1.0.0rc3__tar.gz → 1.0.0rc5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of livekit-plugins-aws might be problematic. Click here for more details.

@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: livekit-plugins-aws
3
- Version: 1.0.0rc3
3
+ Version: 1.0.0rc5
4
4
  Summary: LiveKit Agents Plugin for services from AWS
5
5
  Project-URL: Documentation, https://docs.livekit.io
6
6
  Project-URL: Website, https://livekit.io/
7
7
  Project-URL: Source, https://github.com/livekit/agents
8
- Author-email: LiveKit <support@livekit.io>
8
+ Author-email: LiveKit <hello@livekit.io>
9
9
  License-Expression: Apache-2.0
10
10
  Keywords: audio,aws,livekit,realtime,video,webrtc
11
11
  Classifier: Intended Audience :: Developers
@@ -21,7 +21,7 @@ Requires-Python: >=3.9.0
21
21
  Requires-Dist: aiobotocore==2.19.0
22
22
  Requires-Dist: amazon-transcribe>=0.6.2
23
23
  Requires-Dist: boto3==1.36.3
24
- Requires-Dist: livekit-agents>=1.0.0.rc3
24
+ Requires-Dist: livekit-agents>=1.0.0.rc5
25
25
  Description-Content-Type: text/markdown
26
26
 
27
27
  # LiveKit Plugins AWS
@@ -35,14 +35,13 @@ from .log import logger
35
35
  from .utils import get_aws_credentials, to_chat_ctx, to_fnc_ctx
36
36
 
37
37
  TEXT_MODEL = Literal["anthropic.claude-3-5-sonnet-20241022-v2:0"]
38
- DEFAULT_REGION = "us-east-1"
39
38
 
40
39
 
41
40
  @dataclass
42
41
  class _LLMOptions:
43
42
  model: str | TEXT_MODEL
44
43
  temperature: NotGivenOr[float]
45
- tool_choice: NotGivenOr[ToolChoice | Literal["auto", "required", "none"]]
44
+ tool_choice: NotGivenOr[ToolChoice]
46
45
  max_output_tokens: NotGivenOr[int]
47
46
  top_p: NotGivenOr[float]
48
47
  additional_request_fields: NotGivenOr[dict[str, Any]]
@@ -59,7 +58,7 @@ class LLM(llm.LLM):
59
58
  temperature: NotGivenOr[float] = NOT_GIVEN,
60
59
  max_output_tokens: NotGivenOr[int] = NOT_GIVEN,
61
60
  top_p: NotGivenOr[float] = NOT_GIVEN,
62
- tool_choice: NotGivenOr[ToolChoice | Literal["auto", "required", "none"]] = NOT_GIVEN,
61
+ tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
63
62
  additional_request_fields: NotGivenOr[dict[str, Any]] = NOT_GIVEN,
64
63
  ) -> None:
65
64
  """
@@ -78,7 +77,7 @@ class LLM(llm.LLM):
78
77
  temperature (float, optional): Sampling temperature for response generation. Defaults to 0.8.
79
78
  max_output_tokens (int, optional): Maximum number of tokens to generate in the output. Defaults to None.
80
79
  top_p (float, optional): The nucleus sampling probability for response generation. Defaults to None.
81
- tool_choice (ToolChoice or Literal["auto", "required", "none"], optional): Specifies whether to use tools during response generation. Defaults to "auto".
80
+ tool_choice (ToolChoice, optional): Specifies whether to use tools during response generation. Defaults to "auto".
82
81
  additional_request_fields (dict[str, Any], optional): Additional request fields to send to the AWS Bedrock Converse API. Defaults to None.
83
82
  """ # noqa: E501
84
83
  super().__init__()
@@ -107,7 +106,7 @@ class LLM(llm.LLM):
107
106
  tools: list[FunctionTool] | None = None,
108
107
  conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS,
109
108
  temperature: NotGivenOr[float] = NOT_GIVEN,
110
- tool_choice: NotGivenOr[ToolChoice | Literal["auto", "required", "none"]] = NOT_GIVEN,
109
+ tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
111
110
  ) -> LLMStream:
112
111
  opts = {}
113
112
 
@@ -123,8 +122,8 @@ class LLM(llm.LLM):
123
122
  tool_config: dict[str, Any] = {"tools": to_fnc_ctx(tools)}
124
123
  tool_choice = tool_choice if is_given(tool_choice) else self._opts.tool_choice
125
124
  if is_given(tool_choice):
126
- if isinstance(tool_choice, ToolChoice):
127
- tool_config["toolChoice"] = {"tool": {"name": tool_choice.name}}
125
+ if isinstance(tool_choice, dict) and tool_choice.get("type") == "function":
126
+ tool_config["toolChoice"] = {"tool": {"name": tool_choice["function"]["name"]}}
128
127
  elif tool_choice == "required":
129
128
  tool_config["toolChoice"] = {"any": {}}
130
129
  elif tool_choice == "auto":
@@ -228,12 +227,24 @@ class LLMStream(llm.LLMStream):
228
227
  self._tool_call_id = tool_use["toolUseId"]
229
228
  self._fnc_name = tool_use["name"]
230
229
  self._fnc_raw_arguments = ""
230
+
231
231
  elif "contentBlockDelta" in chunk:
232
232
  delta = chunk["contentBlockDelta"]["delta"]
233
233
  if "toolUse" in delta:
234
234
  self._fnc_raw_arguments += delta["toolUse"]["input"]
235
235
  elif "text" in delta:
236
236
  self._text += delta["text"]
237
+
238
+ elif "metadata" in chunk:
239
+ metadata = chunk["metadata"]
240
+ return llm.ChatChunk(
241
+ request_id=request_id,
242
+ usage=llm.CompletionUsage(
243
+ completion_tokens=metadata["usage"]["outputTokens"],
244
+ prompt_tokens=metadata["usage"]["inputTokens"],
245
+ total_tokens=metadata["usage"]["totalTokens"],
246
+ ),
247
+ )
237
248
  elif "contentBlockStop" in chunk:
238
249
  if self._text:
239
250
  chat_chunk = llm.ChatChunk(
@@ -13,6 +13,8 @@ from livekit.agents.utils import is_given
13
13
 
14
14
  __all__ = ["to_fnc_ctx", "to_chat_ctx", "get_aws_credentials"]
15
15
 
16
+ DEFAULT_REGION = "us-east-1"
17
+
16
18
 
17
19
  def get_aws_credentials(
18
20
  api_key: NotGivenOr[str],
@@ -21,9 +23,7 @@ def get_aws_credentials(
21
23
  ):
22
24
  aws_region = region if is_given(region) else os.environ.get("AWS_DEFAULT_REGION")
23
25
  if not aws_region:
24
- raise ValueError(
25
- "AWS_DEFAULT_REGION must be set via argument or the AWS_DEFAULT_REGION environment variable." # noqa: E501
26
- )
26
+ aws_region = DEFAULT_REGION
27
27
 
28
28
  if is_given(api_key) and is_given(api_secret):
29
29
  session = boto3.Session(
@@ -53,7 +53,7 @@ def to_chat_ctx(chat_ctx: ChatContext, cache_key: Any) -> tuple[list[dict], dict
53
53
  for msg in chat_ctx.items:
54
54
  if msg.type == "message" and msg.role == "system":
55
55
  for content in msg.content:
56
- if isinstance(content, str):
56
+ if content and isinstance(content, str):
57
57
  system_message = {"text": content}
58
58
  continue
59
59
 
@@ -73,7 +73,7 @@ def to_chat_ctx(chat_ctx: ChatContext, cache_key: Any) -> tuple[list[dict], dict
73
73
 
74
74
  if msg.type == "message":
75
75
  for content in msg.content:
76
- if isinstance(content, str):
76
+ if content and isinstance(content, str):
77
77
  current_content.append({"text": content})
78
78
  elif isinstance(content, ImageContent):
79
79
  current_content.append(_build_image(content, cache_key))
@@ -127,6 +127,8 @@ def _build_tool_spec(fnc: FunctionTool) -> dict:
127
127
 
128
128
  def _build_image(image: ImageContent, cache_key: Any) -> dict:
129
129
  img = utils.serialize_image(image)
130
+ if img.external_url:
131
+ raise ValueError("external_url is not supported by AWS Bedrock.")
130
132
  if cache_key not in image._cache:
131
133
  image._cache[cache_key] = img.data_bytes
132
134
  return {
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- __version__ = '1.0.0.rc3'
15
+ __version__ = '1.0.0.rc5'
@@ -9,7 +9,7 @@ description = "LiveKit Agents Plugin for services from AWS"
9
9
  readme = "README.md"
10
10
  license = "Apache-2.0"
11
11
  requires-python = ">=3.9.0"
12
- authors = [{ name = "LiveKit", email = "support@livekit.io" }]
12
+ authors = [{ name = "LiveKit", email = "hello@livekit.io" }]
13
13
  keywords = ["webrtc", "realtime", "audio", "video", "livekit", "aws"]
14
14
  classifiers = [
15
15
  "Intended Audience :: Developers",
@@ -23,7 +23,7 @@ classifiers = [
23
23
  "Programming Language :: Python :: 3 :: Only",
24
24
  ]
25
25
  dependencies = [
26
- "livekit-agents>=1.0.0.rc3",
26
+ "livekit-agents>=1.0.0.rc5",
27
27
  "aiobotocore==2.19.0",
28
28
  "boto3==1.36.3",
29
29
  "amazon-transcribe>=0.6.2",