livekit-plugins-anthropic 0.2.12__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. livekit_plugins_anthropic-1.0.0/.gitignore +168 -0
  2. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/PKG-INFO +11 -21
  3. livekit_plugins_anthropic-1.0.0/livekit/plugins/anthropic/llm.py +319 -0
  4. livekit_plugins_anthropic-1.0.0/livekit/plugins/anthropic/utils.py +147 -0
  5. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/livekit/plugins/anthropic/version.py +1 -1
  6. livekit_plugins_anthropic-1.0.0/pyproject.toml +39 -0
  7. livekit_plugins_anthropic-0.2.12/livekit/plugins/anthropic/llm.py +0 -635
  8. livekit_plugins_anthropic-0.2.12/livekit_plugins_anthropic.egg-info/PKG-INFO +0 -47
  9. livekit_plugins_anthropic-0.2.12/livekit_plugins_anthropic.egg-info/SOURCES.txt +0 -14
  10. livekit_plugins_anthropic-0.2.12/livekit_plugins_anthropic.egg-info/dependency_links.txt +0 -1
  11. livekit_plugins_anthropic-0.2.12/livekit_plugins_anthropic.egg-info/requires.txt +0 -2
  12. livekit_plugins_anthropic-0.2.12/livekit_plugins_anthropic.egg-info/top_level.txt +0 -1
  13. livekit_plugins_anthropic-0.2.12/pyproject.toml +0 -3
  14. livekit_plugins_anthropic-0.2.12/setup.cfg +0 -4
  15. livekit_plugins_anthropic-0.2.12/setup.py +0 -59
  16. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/README.md +0 -0
  17. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/livekit/plugins/anthropic/__init__.py +0 -0
  18. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/livekit/plugins/anthropic/log.py +0 -0
  19. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/livekit/plugins/anthropic/models.py +0 -0
  20. {livekit_plugins_anthropic-0.2.12 → livekit_plugins_anthropic-1.0.0}/livekit/plugins/anthropic/py.typed +0 -0
@@ -0,0 +1,168 @@
1
+ **/.vscode
2
+ **/.DS_Store
3
+
4
+ # Byte-compiled / optimized / DLL files
5
+ __pycache__/
6
+ *.py[cod]
7
+ *$py.class
8
+
9
+ # C extensions
10
+ *.so
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Unit test / coverage reports
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+ cover/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ .pybuilder/
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ # For a library or package, you might want to ignore these files since the code is
90
+ # intended to run in multiple environments; otherwise, check them in:
91
+ # .python-version
92
+
93
+ # pipenv
94
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
96
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
97
+ # install all needed dependencies.
98
+ #Pipfile.lock
99
+
100
+ # poetry
101
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
103
+ # commonly ignored for libraries.
104
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105
+ #poetry.lock
106
+
107
+ # pdm
108
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109
+ #pdm.lock
110
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111
+ # in version control.
112
+ # https://pdm.fming.dev/#use-with-ide
113
+ .pdm.toml
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ .idea/
164
+
165
+ node_modules
166
+
167
+ credentials.json
168
+ pyrightconfig.json
@@ -1,36 +1,26 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: livekit-plugins-anthropic
3
- Version: 0.2.12
3
+ Version: 1.0.0
4
4
  Summary: Agent Framework plugin for services from Anthropic
5
- Home-page: https://github.com/livekit/agents
6
- License: Apache-2.0
7
5
  Project-URL: Documentation, https://docs.livekit.io
8
6
  Project-URL: Website, https://livekit.io/
9
7
  Project-URL: Source, https://github.com/livekit/agents
10
- Keywords: webrtc,realtime,audio,video,livekit
8
+ Author-email: LiveKit <hello@livekit.io>
9
+ License-Expression: Apache-2.0
10
+ Keywords: audio,livekit,realtime,video,webrtc
11
11
  Classifier: Intended Audience :: Developers
12
12
  Classifier: License :: OSI Approved :: Apache Software License
13
- Classifier: Topic :: Multimedia :: Sound/Audio
14
- Classifier: Topic :: Multimedia :: Video
15
- Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
13
  Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
17
15
  Classifier: Programming Language :: Python :: 3.9
18
16
  Classifier: Programming Language :: Python :: 3.10
19
- Classifier: Programming Language :: Python :: 3 :: Only
17
+ Classifier: Topic :: Multimedia :: Sound/Audio
18
+ Classifier: Topic :: Multimedia :: Video
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
20
  Requires-Python: >=3.9.0
21
- Description-Content-Type: text/markdown
22
- Requires-Dist: livekit-agents>=0.12.3
23
21
  Requires-Dist: anthropic>=0.34
24
- Dynamic: classifier
25
- Dynamic: description
26
- Dynamic: description-content-type
27
- Dynamic: home-page
28
- Dynamic: keywords
29
- Dynamic: license
30
- Dynamic: project-url
31
- Dynamic: requires-dist
32
- Dynamic: requires-python
33
- Dynamic: summary
22
+ Requires-Dist: livekit-agents>=1.0.0
23
+ Description-Content-Type: text/markdown
34
24
 
35
25
  # LiveKit Plugins Anthropic
36
26
 
@@ -0,0 +1,319 @@
1
+ # Copyright 2023 LiveKit, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import os
18
+ from collections.abc import Awaitable
19
+ from dataclasses import dataclass
20
+ from typing import Any, Literal
21
+
22
+ import httpx
23
+
24
+ import anthropic
25
+ from livekit.agents import APIConnectionError, APIStatusError, APITimeoutError, llm
26
+ from livekit.agents.llm import ToolChoice
27
+ from livekit.agents.llm.chat_context import ChatContext
28
+ from livekit.agents.llm.tool_context import FunctionTool
29
+ from livekit.agents.types import (
30
+ DEFAULT_API_CONNECT_OPTIONS,
31
+ NOT_GIVEN,
32
+ APIConnectOptions,
33
+ NotGivenOr,
34
+ )
35
+ from livekit.agents.utils import is_given
36
+
37
+ from .models import ChatModels
38
+ from .utils import to_chat_ctx, to_fnc_ctx
39
+
40
+
41
+ @dataclass
42
+ class _LLMOptions:
43
+ model: str | ChatModels
44
+ user: NotGivenOr[str]
45
+ temperature: NotGivenOr[float]
46
+ parallel_tool_calls: NotGivenOr[bool]
47
+ tool_choice: NotGivenOr[ToolChoice]
48
+ caching: NotGivenOr[Literal["ephemeral"]]
49
+ top_k: NotGivenOr[int]
50
+ max_tokens: NotGivenOr[int]
51
+ """If set to "ephemeral", the system prompt, tools, and chat history will be cached."""
52
+
53
+
54
+ class LLM(llm.LLM):
55
+ def __init__(
56
+ self,
57
+ *,
58
+ model: str | ChatModels = "claude-3-5-sonnet-20241022",
59
+ api_key: NotGivenOr[str] = NOT_GIVEN,
60
+ base_url: NotGivenOr[str] = NOT_GIVEN,
61
+ user: NotGivenOr[str] = NOT_GIVEN,
62
+ client: anthropic.AsyncClient | None = None,
63
+ top_k: NotGivenOr[int] = NOT_GIVEN,
64
+ max_tokens: NotGivenOr[int] = NOT_GIVEN,
65
+ temperature: NotGivenOr[float] = NOT_GIVEN,
66
+ parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN,
67
+ tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
68
+ caching: NotGivenOr[Literal["ephemeral"]] = NOT_GIVEN,
69
+ ) -> None:
70
+ """
71
+ Create a new instance of Anthropic LLM.
72
+
73
+ ``api_key`` must be set to your Anthropic API key, either using the argument or by setting
74
+ the ``ANTHROPIC_API_KEY`` environmental variable.
75
+
76
+ model (str | ChatModels): The model to use. Defaults to "claude-3-5-sonnet-20241022".
77
+ api_key (str, optional): The Anthropic API key. Defaults to the ANTHROPIC_API_KEY environment variable.
78
+ base_url (str, optional): The base URL for the Anthropic API. Defaults to None.
79
+ user (str, optional): The user for the Anthropic API. Defaults to None.
80
+ client (anthropic.AsyncClient | None): The Anthropic client to use. Defaults to None.
81
+ temperature (float, optional): The temperature for the Anthropic API. Defaults to None.
82
+ parallel_tool_calls (bool, optional): Whether to parallelize tool calls. Defaults to None.
83
+ tool_choice (ToolChoice, optional): The tool choice for the Anthropic API. Defaults to "auto".
84
+ caching (Literal["ephemeral"], optional): If set to "ephemeral", caching will be enabled for the system prompt, tools, and chat history.
85
+ """ # noqa: E501
86
+
87
+ super().__init__()
88
+
89
+ self._opts = _LLMOptions(
90
+ model=model,
91
+ user=user,
92
+ temperature=temperature,
93
+ parallel_tool_calls=parallel_tool_calls,
94
+ tool_choice=tool_choice,
95
+ caching=caching,
96
+ top_k=top_k,
97
+ max_tokens=max_tokens,
98
+ )
99
+ anthropic_api_key = api_key if is_given(api_key) else os.environ.get("ANTHROPIC_API_KEY")
100
+ if not anthropic_api_key:
101
+ raise ValueError("Anthropic API key is required")
102
+
103
+ self._client = anthropic.AsyncClient(
104
+ api_key=anthropic_api_key,
105
+ base_url=base_url if is_given(base_url) else None,
106
+ http_client=httpx.AsyncClient(
107
+ timeout=5.0,
108
+ follow_redirects=True,
109
+ limits=httpx.Limits(
110
+ max_connections=1000,
111
+ max_keepalive_connections=100,
112
+ keepalive_expiry=120,
113
+ ),
114
+ ),
115
+ )
116
+
117
+ def chat(
118
+ self,
119
+ *,
120
+ chat_ctx: ChatContext,
121
+ tools: list[FunctionTool] | None = None,
122
+ conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS,
123
+ parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN,
124
+ tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
125
+ extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN,
126
+ ) -> LLMStream:
127
+ extra = {}
128
+
129
+ if is_given(extra_kwargs):
130
+ extra.update(extra_kwargs)
131
+
132
+ if is_given(self._opts.user):
133
+ extra["user"] = self._opts.user
134
+
135
+ if is_given(self._opts.temperature):
136
+ extra["temperature"] = self._opts.temperature
137
+
138
+ if is_given(self._opts.top_k):
139
+ extra["top_k"] = self._opts.top_k
140
+
141
+ extra["max_tokens"] = self._opts.max_tokens if is_given(self._opts.max_tokens) else 1024
142
+
143
+ if tools:
144
+ extra["tools"] = to_fnc_ctx(tools, self._opts.caching)
145
+ tool_choice = tool_choice if is_given(tool_choice) else self._opts.tool_choice
146
+ if is_given(tool_choice):
147
+ anthropic_tool_choice: dict[str, Any] | None = {"type": "auto"}
148
+ if isinstance(tool_choice, dict) and tool_choice.get("type") == "function":
149
+ anthropic_tool_choice = {
150
+ "type": "tool",
151
+ "name": tool_choice["function"]["name"],
152
+ }
153
+ elif isinstance(tool_choice, str):
154
+ if tool_choice == "required":
155
+ anthropic_tool_choice = {"type": "any"}
156
+ elif tool_choice == "none":
157
+ extra["tools"] = []
158
+ anthropic_tool_choice = None
159
+ if anthropic_tool_choice is not None:
160
+ parallel_tool_calls = (
161
+ parallel_tool_calls
162
+ if is_given(parallel_tool_calls)
163
+ else self._opts.parallel_tool_calls
164
+ )
165
+ if is_given(parallel_tool_calls):
166
+ anthropic_tool_choice["disable_parallel_tool_use"] = not parallel_tool_calls
167
+ extra["tool_choice"] = anthropic_tool_choice
168
+
169
+ anthropic_ctx, system_message = to_chat_ctx(chat_ctx, id(self), caching=self._opts.caching)
170
+
171
+ if system_message:
172
+ extra["system"] = [system_message]
173
+
174
+ stream = self._client.messages.create(
175
+ messages=anthropic_ctx,
176
+ model=self._opts.model,
177
+ stream=True,
178
+ **extra,
179
+ )
180
+
181
+ return LLMStream(
182
+ self,
183
+ anthropic_stream=stream,
184
+ chat_ctx=chat_ctx,
185
+ tools=tools,
186
+ conn_options=conn_options,
187
+ )
188
+
189
+
190
+ class LLMStream(llm.LLMStream):
191
+ def __init__(
192
+ self,
193
+ llm: LLM,
194
+ *,
195
+ anthropic_stream: Awaitable[anthropic.AsyncStream[anthropic.types.RawMessageStreamEvent]],
196
+ chat_ctx: llm.ChatContext,
197
+ tools: list[FunctionTool] | None,
198
+ conn_options: APIConnectOptions,
199
+ ) -> None:
200
+ super().__init__(llm, chat_ctx=chat_ctx, tools=tools, conn_options=conn_options)
201
+ self._awaitable_anthropic_stream = anthropic_stream
202
+ self._anthropic_stream: (
203
+ anthropic.AsyncStream[anthropic.types.RawMessageStreamEvent] | None
204
+ ) = None
205
+
206
+ # current function call that we're waiting for full completion (args are streamed)
207
+ self._tool_call_id: str | None = None
208
+ self._fnc_name: str | None = None
209
+ self._fnc_raw_arguments: str | None = None
210
+
211
+ self._request_id: str = ""
212
+ self._ignoring_cot = False # ignore chain of thought
213
+ self._input_tokens = 0
214
+ self._cache_creation_tokens = 0
215
+ self._cache_read_tokens = 0
216
+ self._output_tokens = 0
217
+
218
+ async def _run(self) -> None:
219
+ retryable = True
220
+ try:
221
+ if not self._anthropic_stream:
222
+ self._anthropic_stream = await self._awaitable_anthropic_stream
223
+
224
+ async with self._anthropic_stream as stream:
225
+ async for event in stream:
226
+ chat_chunk = self._parse_event(event)
227
+ if chat_chunk is not None:
228
+ self._event_ch.send_nowait(chat_chunk)
229
+ retryable = False
230
+
231
+ self._event_ch.send_nowait(
232
+ llm.ChatChunk(
233
+ id=self._request_id,
234
+ usage=llm.CompletionUsage(
235
+ completion_tokens=self._output_tokens,
236
+ prompt_tokens=self._input_tokens,
237
+ total_tokens=self._input_tokens
238
+ + self._output_tokens
239
+ + self._cache_creation_tokens
240
+ + self._cache_read_tokens,
241
+ cache_creation_input_tokens=self._cache_creation_tokens,
242
+ cache_read_input_tokens=self._cache_read_tokens,
243
+ ),
244
+ )
245
+ )
246
+ except anthropic.APITimeoutError as e:
247
+ raise APITimeoutError(retryable=retryable) from e
248
+ except anthropic.APIStatusError as e:
249
+ raise APIStatusError(
250
+ e.message,
251
+ status_code=e.status_code,
252
+ request_id=e.request_id,
253
+ body=e.body,
254
+ ) from e
255
+ except Exception as e:
256
+ raise APIConnectionError(retryable=retryable) from e
257
+
258
+ def _parse_event(self, event: anthropic.types.RawMessageStreamEvent) -> llm.ChatChunk | None:
259
+ if event.type == "message_start":
260
+ self._request_id = event.message.id
261
+ self._input_tokens = event.message.usage.input_tokens
262
+ self._output_tokens = event.message.usage.output_tokens
263
+ if event.message.usage.cache_creation_input_tokens:
264
+ self._cache_creation_tokens = event.message.usage.cache_creation_input_tokens
265
+ if event.message.usage.cache_read_input_tokens:
266
+ self._cache_read_tokens = event.message.usage.cache_read_input_tokens
267
+ elif event.type == "message_delta":
268
+ self._output_tokens += event.usage.output_tokens
269
+ elif event.type == "content_block_start":
270
+ if event.content_block.type == "tool_use":
271
+ self._tool_call_id = event.content_block.id
272
+ self._fnc_name = event.content_block.name
273
+ self._fnc_raw_arguments = ""
274
+ elif event.type == "content_block_delta":
275
+ delta = event.delta
276
+ if delta.type == "text_delta":
277
+ text = delta.text
278
+
279
+ if self._tools is not None:
280
+ # anthropic may inject COC when using functions
281
+ if text.startswith("<thinking>"):
282
+ self._ignoring_cot = True
283
+ elif self._ignoring_cot and "</thinking>" in text:
284
+ text = text.split("</thinking>")[-1]
285
+ self._ignoring_cot = False
286
+
287
+ if self._ignoring_cot:
288
+ return None
289
+
290
+ return llm.ChatChunk(
291
+ id=self._request_id,
292
+ delta=llm.ChoiceDelta(content=text, role="assistant"),
293
+ )
294
+ elif delta.type == "input_json_delta":
295
+ assert self._fnc_raw_arguments is not None
296
+ self._fnc_raw_arguments += delta.partial_json
297
+
298
+ elif event.type == "content_block_stop":
299
+ if self._tool_call_id is not None:
300
+ assert self._fnc_name is not None
301
+ assert self._fnc_raw_arguments is not None
302
+
303
+ chat_chunk = llm.ChatChunk(
304
+ id=self._request_id,
305
+ delta=llm.ChoiceDelta(
306
+ role="assistant",
307
+ tool_calls=[
308
+ llm.FunctionToolCall(
309
+ arguments=self._fnc_raw_arguments or "",
310
+ name=self._fnc_name or "",
311
+ call_id=self._tool_call_id or "",
312
+ )
313
+ ],
314
+ ),
315
+ )
316
+ self._tool_call_id = self._fnc_raw_arguments = self._fnc_name = None
317
+ return chat_chunk
318
+
319
+ return None
@@ -0,0 +1,147 @@
1
+ import base64
2
+ import json
3
+ from typing import Any, Literal
4
+
5
+ import anthropic
6
+ from livekit.agents import llm
7
+ from livekit.agents.llm import FunctionTool
8
+
9
+ CACHE_CONTROL_EPHEMERAL = anthropic.types.CacheControlEphemeralParam(type="ephemeral")
10
+
11
+ __all__ = ["to_fnc_ctx", "to_chat_ctx"]
12
+
13
+
14
+ def to_fnc_ctx(
15
+ fncs: list[FunctionTool], caching: Literal["ephemeral"] | None
16
+ ) -> list[anthropic.types.ToolParam]:
17
+ tools: list[anthropic.types.ToolParam] = []
18
+ for i, fnc in enumerate(fncs):
19
+ cache_ctrl = (
20
+ CACHE_CONTROL_EPHEMERAL if (i == len(fncs) - 1) and caching == "ephemeral" else None
21
+ )
22
+ tools.append(_build_anthropic_schema(fnc, cache_ctrl=cache_ctrl))
23
+
24
+ return tools
25
+
26
+
27
+ def to_chat_ctx(
28
+ chat_ctx: llm.ChatContext,
29
+ cache_key: Any,
30
+ caching: Literal["ephemeral"] | None,
31
+ ) -> list[anthropic.types.MessageParam]:
32
+ messages: list[anthropic.types.MessageParam] = []
33
+ system_message: anthropic.types.TextBlockParam | None = None
34
+ current_role: str | None = None
35
+ content: list[anthropic.types.TextBlockParam] = []
36
+ for i, msg in enumerate(chat_ctx.items):
37
+ if msg.type == "message" and msg.role == "system":
38
+ for content in msg.content:
39
+ if content and isinstance(content, str):
40
+ system_message = anthropic.types.TextBlockParam(
41
+ text=content,
42
+ type="text",
43
+ cache_control=CACHE_CONTROL_EPHEMERAL if caching == "ephemeral" else None,
44
+ )
45
+ continue
46
+
47
+ cache_ctrl = (
48
+ CACHE_CONTROL_EPHEMERAL
49
+ if (i == len(chat_ctx.items) - 1) and caching == "ephemeral"
50
+ else None
51
+ )
52
+ if msg.type == "message":
53
+ role = "assistant" if msg.role == "assistant" else "user"
54
+ elif msg.type == "function_call":
55
+ role = "assistant"
56
+ elif msg.type == "function_call_output":
57
+ role = "user"
58
+
59
+ if role != current_role:
60
+ if current_role is not None and content:
61
+ messages.append(anthropic.types.MessageParam(role=current_role, content=content))
62
+ content = []
63
+ current_role = role
64
+
65
+ if msg.type == "message":
66
+ for c in msg.content:
67
+ if c and isinstance(c, str):
68
+ content.append(
69
+ anthropic.types.TextBlockParam(
70
+ text=c, type="text", cache_control=cache_ctrl
71
+ )
72
+ )
73
+ elif isinstance(c, llm.ImageContent):
74
+ content.append(_to_image_content(c, cache_key, cache_ctrl=cache_ctrl))
75
+ elif msg.type == "function_call":
76
+ content.append(
77
+ anthropic.types.ToolUseBlockParam(
78
+ id=msg.call_id,
79
+ type="tool_use",
80
+ name=msg.name,
81
+ input=json.loads(msg.arguments or "{}"),
82
+ cache_control=cache_ctrl,
83
+ )
84
+ )
85
+ elif msg.type == "function_call_output":
86
+ content.append(
87
+ anthropic.types.ToolResultBlockParam(
88
+ tool_use_id=msg.call_id,
89
+ type="tool_result",
90
+ content=msg.output,
91
+ cache_control=cache_ctrl,
92
+ )
93
+ )
94
+
95
+ if current_role is not None and content:
96
+ messages.append(anthropic.types.MessageParam(role=current_role, content=content))
97
+
98
+ # ensure the messages starts with a "user" message
99
+ if not messages or messages[0]["role"] != "user":
100
+ messages.insert(
101
+ 0,
102
+ anthropic.types.MessageParam(
103
+ role="user",
104
+ content=[anthropic.types.TextBlockParam(text="(empty)", type="text")],
105
+ ),
106
+ )
107
+
108
+ return messages, system_message
109
+
110
+
111
+ def _to_image_content(
112
+ image: llm.ImageContent,
113
+ cache_key: Any,
114
+ cache_ctrl: anthropic.types.CacheControlEphemeralParam | None,
115
+ ) -> anthropic.types.ImageBlockParam:
116
+ img = llm.utils.serialize_image(image)
117
+ if img.external_url:
118
+ return {
119
+ "type": "image",
120
+ "source": {"type": "url", "url": img.external_url},
121
+ "cache_control": cache_ctrl,
122
+ }
123
+ if cache_key not in image._cache:
124
+ image._cache[cache_key] = img.data_bytes
125
+ b64_data = base64.b64encode(image._cache[cache_key]).decode("utf-8")
126
+ return {
127
+ "type": "image",
128
+ "source": {
129
+ "type": "base64",
130
+ "data": f"data:{img.mime_type};base64,{b64_data}",
131
+ "media_type": img.mime_type,
132
+ },
133
+ "cache_control": cache_ctrl,
134
+ }
135
+
136
+
137
+ def _build_anthropic_schema(
138
+ function_tool: FunctionTool,
139
+ cache_ctrl: anthropic.types.CacheControlEphemeralParam | None = None,
140
+ ) -> anthropic.types.ToolParam:
141
+ fnc = llm.utils.build_legacy_openai_schema(function_tool, internally_tagged=True)
142
+ return anthropic.types.ToolParam(
143
+ name=fnc["name"],
144
+ description=fnc["description"] or "",
145
+ input_schema=fnc["parameters"],
146
+ cache_control=cache_ctrl,
147
+ )
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- __version__ = "0.2.12"
15
+ __version__ = "1.0.0"