livekit-plugins-anthropic 0.0.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- livekit/plugins/anthropic/__init__.py +23 -1
- livekit/plugins/anthropic/llm.py +489 -0
- livekit/plugins/anthropic/log.py +3 -0
- livekit/plugins/anthropic/models.py +8 -0
- livekit/plugins/anthropic/py.typed +0 -0
- livekit/plugins/anthropic/version.py +1 -1
- {livekit_plugins_anthropic-0.0.1.dist-info → livekit_plugins_anthropic-0.2.0.dist-info}/METADATA +20 -9
- livekit_plugins_anthropic-0.2.0.dist-info/RECORD +10 -0
- {livekit_plugins_anthropic-0.0.1.dist-info → livekit_plugins_anthropic-0.2.0.dist-info}/WHEEL +1 -1
- livekit/plugins/anthropic/claude.py +0 -126
- livekit_plugins_anthropic-0.0.1.dist-info/RECORD +0 -7
- {livekit_plugins_anthropic-0.0.1.dist-info → livekit_plugins_anthropic-0.2.0.dist-info}/top_level.txt +0 -0
@@ -12,4 +12,26 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
-
|
15
|
+
|
16
|
+
from .llm import LLM, LLMStream
|
17
|
+
from .log import logger
|
18
|
+
from .models import ChatModels
|
19
|
+
from .version import __version__
|
20
|
+
|
21
|
+
__all__ = [
|
22
|
+
"LLM",
|
23
|
+
"LLMStream",
|
24
|
+
"ChatModels",
|
25
|
+
"logger",
|
26
|
+
"__version__",
|
27
|
+
]
|
28
|
+
|
29
|
+
from livekit.agents import Plugin
|
30
|
+
|
31
|
+
|
32
|
+
class AnthropicPlugin(Plugin):
|
33
|
+
def __init__(self) -> None:
|
34
|
+
super().__init__(__name__, __version__, __package__, logger)
|
35
|
+
|
36
|
+
|
37
|
+
Plugin.register_plugin(AnthropicPlugin())
|
@@ -0,0 +1,489 @@
|
|
1
|
+
# Copyright 2023 LiveKit, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from __future__ import annotations
|
16
|
+
|
17
|
+
import base64
|
18
|
+
import inspect
|
19
|
+
import json
|
20
|
+
import os
|
21
|
+
from dataclasses import dataclass
|
22
|
+
from typing import Any, Awaitable, List, Tuple, get_args, get_origin
|
23
|
+
|
24
|
+
import httpx
|
25
|
+
from livekit import rtc
|
26
|
+
from livekit.agents import llm, utils
|
27
|
+
|
28
|
+
import anthropic
|
29
|
+
|
30
|
+
from .log import logger
|
31
|
+
from .models import (
|
32
|
+
ChatModels,
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
@dataclass
|
37
|
+
class LLMOptions:
|
38
|
+
model: str | ChatModels
|
39
|
+
user: str | None
|
40
|
+
|
41
|
+
|
42
|
+
class LLM(llm.LLM):
|
43
|
+
def __init__(
|
44
|
+
self,
|
45
|
+
*,
|
46
|
+
model: str | ChatModels = "claude-3-opus-20240229",
|
47
|
+
api_key: str | None = None,
|
48
|
+
base_url: str | None = None,
|
49
|
+
user: str | None = None,
|
50
|
+
client: anthropic.AsyncClient | None = None,
|
51
|
+
) -> None:
|
52
|
+
"""
|
53
|
+
Create a new instance of Anthropic LLM.
|
54
|
+
|
55
|
+
``api_key`` must be set to your Anthropic API key, either using the argument or by setting
|
56
|
+
the ``ANTHROPIC_API_KEY`` environmental variable.
|
57
|
+
"""
|
58
|
+
|
59
|
+
# throw an error on our end
|
60
|
+
api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
61
|
+
if api_key is None:
|
62
|
+
raise ValueError("Anthropic API key is required")
|
63
|
+
|
64
|
+
self._opts = LLMOptions(model=model, user=user)
|
65
|
+
self._client = client or anthropic.AsyncClient(
|
66
|
+
api_key=api_key,
|
67
|
+
base_url=base_url,
|
68
|
+
http_client=httpx.AsyncClient(
|
69
|
+
timeout=5.0,
|
70
|
+
follow_redirects=True,
|
71
|
+
limits=httpx.Limits(
|
72
|
+
max_connections=1000,
|
73
|
+
max_keepalive_connections=100,
|
74
|
+
keepalive_expiry=120,
|
75
|
+
),
|
76
|
+
),
|
77
|
+
)
|
78
|
+
|
79
|
+
def chat(
|
80
|
+
self,
|
81
|
+
*,
|
82
|
+
chat_ctx: llm.ChatContext,
|
83
|
+
fnc_ctx: llm.FunctionContext | None = None,
|
84
|
+
temperature: float | None = None,
|
85
|
+
n: int | None = 1,
|
86
|
+
parallel_tool_calls: bool | None = None,
|
87
|
+
) -> "LLMStream":
|
88
|
+
opts: dict[str, Any] = dict()
|
89
|
+
if fnc_ctx and len(fnc_ctx.ai_functions) > 0:
|
90
|
+
fncs_desc: list[anthropic.types.ToolParam] = []
|
91
|
+
for fnc in fnc_ctx.ai_functions.values():
|
92
|
+
fncs_desc.append(_build_function_description(fnc))
|
93
|
+
|
94
|
+
opts["tools"] = fncs_desc
|
95
|
+
|
96
|
+
if fnc_ctx and parallel_tool_calls is not None:
|
97
|
+
opts["parallel_tool_calls"] = parallel_tool_calls
|
98
|
+
|
99
|
+
latest_system_message = _latest_system_message(chat_ctx)
|
100
|
+
anthropic_ctx = _build_anthropic_context(chat_ctx.messages, id(self))
|
101
|
+
collaped_anthropic_ctx = _merge_messages(anthropic_ctx)
|
102
|
+
stream = self._client.messages.create(
|
103
|
+
max_tokens=opts.get("max_tokens", 1000),
|
104
|
+
system=latest_system_message,
|
105
|
+
messages=collaped_anthropic_ctx,
|
106
|
+
model=self._opts.model,
|
107
|
+
temperature=temperature or anthropic.NOT_GIVEN,
|
108
|
+
top_k=n or anthropic.NOT_GIVEN,
|
109
|
+
stream=True,
|
110
|
+
**opts,
|
111
|
+
)
|
112
|
+
|
113
|
+
return LLMStream(anthropic_stream=stream, chat_ctx=chat_ctx, fnc_ctx=fnc_ctx)
|
114
|
+
|
115
|
+
|
116
|
+
class LLMStream(llm.LLMStream):
|
117
|
+
def __init__(
|
118
|
+
self,
|
119
|
+
*,
|
120
|
+
anthropic_stream: Awaitable[
|
121
|
+
anthropic.AsyncStream[anthropic.types.RawMessageStreamEvent]
|
122
|
+
],
|
123
|
+
chat_ctx: llm.ChatContext,
|
124
|
+
fnc_ctx: llm.FunctionContext | None,
|
125
|
+
) -> None:
|
126
|
+
super().__init__(chat_ctx=chat_ctx, fnc_ctx=fnc_ctx)
|
127
|
+
self._awaitable_anthropic_stream = anthropic_stream
|
128
|
+
self._anthropic_stream: (
|
129
|
+
anthropic.AsyncStream[anthropic.types.RawMessageStreamEvent] | None
|
130
|
+
) = None
|
131
|
+
|
132
|
+
# current function call that we're waiting for full completion (args are streamed)
|
133
|
+
self._tool_call_id: str | None = None
|
134
|
+
self._fnc_name: str | None = None
|
135
|
+
self._fnc_raw_arguments: str | None = None
|
136
|
+
|
137
|
+
async def aclose(self) -> None:
|
138
|
+
if self._anthropic_stream:
|
139
|
+
await self._anthropic_stream.close()
|
140
|
+
|
141
|
+
return await super().aclose()
|
142
|
+
|
143
|
+
async def __anext__(self):
|
144
|
+
if not self._anthropic_stream:
|
145
|
+
self._anthropic_stream = await self._awaitable_anthropic_stream
|
146
|
+
|
147
|
+
async for event in self._anthropic_stream:
|
148
|
+
if event.type == "message_start":
|
149
|
+
pass
|
150
|
+
elif event.type == "message_delta":
|
151
|
+
pass
|
152
|
+
elif event.type == "message_stop":
|
153
|
+
pass
|
154
|
+
elif event.type == "content_block_start":
|
155
|
+
if event.content_block.type == "tool_use":
|
156
|
+
self._tool_call_id = event.content_block.id
|
157
|
+
self._fnc_raw_arguments = ""
|
158
|
+
self._fnc_name = event.content_block.name
|
159
|
+
elif event.type == "content_block_delta":
|
160
|
+
delta = event.delta
|
161
|
+
if delta.type == "text_delta":
|
162
|
+
return llm.ChatChunk(
|
163
|
+
choices=[
|
164
|
+
llm.Choice(
|
165
|
+
delta=llm.ChoiceDelta(
|
166
|
+
content=delta.text, role="assistant"
|
167
|
+
)
|
168
|
+
)
|
169
|
+
]
|
170
|
+
)
|
171
|
+
elif delta.type == "input_json_delta":
|
172
|
+
assert self._fnc_raw_arguments is not None
|
173
|
+
self._fnc_raw_arguments += delta.partial_json
|
174
|
+
elif event.type == "content_block_stop":
|
175
|
+
if self._tool_call_id is not None and self._fnc_ctx:
|
176
|
+
assert self._fnc_name is not None
|
177
|
+
assert self._fnc_raw_arguments is not None
|
178
|
+
fnc_info = _create_ai_function_info(
|
179
|
+
self._fnc_ctx,
|
180
|
+
self._tool_call_id,
|
181
|
+
self._fnc_name,
|
182
|
+
self._fnc_raw_arguments,
|
183
|
+
)
|
184
|
+
self._function_calls_info.append(fnc_info)
|
185
|
+
chunk = llm.ChatChunk(
|
186
|
+
choices=[
|
187
|
+
llm.Choice(
|
188
|
+
delta=llm.ChoiceDelta(
|
189
|
+
role="assistant", tool_calls=[fnc_info]
|
190
|
+
),
|
191
|
+
index=0,
|
192
|
+
)
|
193
|
+
]
|
194
|
+
)
|
195
|
+
self._tool_call_id = None
|
196
|
+
self._fnc_raw_arguments = None
|
197
|
+
self._fnc_name = None
|
198
|
+
return chunk
|
199
|
+
|
200
|
+
raise StopAsyncIteration
|
201
|
+
|
202
|
+
|
203
|
+
def _latest_system_message(chat_ctx: llm.ChatContext) -> str:
|
204
|
+
latest_system_message: llm.ChatMessage | None = None
|
205
|
+
for m in chat_ctx.messages:
|
206
|
+
if m.role == "system":
|
207
|
+
latest_system_message = m
|
208
|
+
continue
|
209
|
+
|
210
|
+
latest_system_str = ""
|
211
|
+
if latest_system_message:
|
212
|
+
if isinstance(latest_system_message.content, str):
|
213
|
+
latest_system_str = latest_system_message.content
|
214
|
+
elif isinstance(latest_system_message.content, list):
|
215
|
+
latest_system_str = " ".join(
|
216
|
+
[c for c in latest_system_message.content if isinstance(c, str)]
|
217
|
+
)
|
218
|
+
return latest_system_str
|
219
|
+
|
220
|
+
|
221
|
+
def _merge_messages(
|
222
|
+
messages: List[anthropic.types.MessageParam],
|
223
|
+
) -> List[anthropic.types.MessageParam]:
|
224
|
+
# Anthropic enforces alternating messages
|
225
|
+
combined_messages: list[anthropic.types.MessageParam] = []
|
226
|
+
for m in messages:
|
227
|
+
if len(combined_messages) == 0 or m["role"] != combined_messages[-1]["role"]:
|
228
|
+
combined_messages.append(m)
|
229
|
+
continue
|
230
|
+
last_message = combined_messages[-1]
|
231
|
+
if not isinstance(last_message["content"], list) or not isinstance(
|
232
|
+
m["content"], list
|
233
|
+
):
|
234
|
+
logger.error("message content is not a list")
|
235
|
+
continue
|
236
|
+
|
237
|
+
last_message["content"].extend(m["content"])
|
238
|
+
|
239
|
+
if len(combined_messages) == 0 or combined_messages[0]["role"] != "user":
|
240
|
+
combined_messages.insert(
|
241
|
+
0, {"role": "user", "content": [{"type": "text", "text": "(empty)"}]}
|
242
|
+
)
|
243
|
+
|
244
|
+
return combined_messages
|
245
|
+
|
246
|
+
|
247
|
+
def _build_anthropic_context(
|
248
|
+
chat_ctx: List[llm.ChatMessage], cache_key: Any
|
249
|
+
) -> List[anthropic.types.MessageParam]:
|
250
|
+
result: List[anthropic.types.MessageParam] = []
|
251
|
+
for msg in chat_ctx:
|
252
|
+
a_msg = _build_anthropic_message(msg, cache_key)
|
253
|
+
if a_msg:
|
254
|
+
result.append(a_msg)
|
255
|
+
return result
|
256
|
+
|
257
|
+
|
258
|
+
def _build_anthropic_message(msg: llm.ChatMessage, cache_key: Any):
|
259
|
+
if msg.role == "user" or msg.role == "assistant":
|
260
|
+
a_msg: anthropic.types.MessageParam = {
|
261
|
+
"role": msg.role,
|
262
|
+
"content": [],
|
263
|
+
}
|
264
|
+
assert isinstance(a_msg["content"], list)
|
265
|
+
a_content = a_msg["content"]
|
266
|
+
|
267
|
+
# add content if provided
|
268
|
+
if isinstance(msg.content, str):
|
269
|
+
a_msg["content"].append(
|
270
|
+
anthropic.types.TextBlock(
|
271
|
+
text=msg.content,
|
272
|
+
type="text",
|
273
|
+
)
|
274
|
+
)
|
275
|
+
elif isinstance(msg.content, list):
|
276
|
+
for cnt in msg.content:
|
277
|
+
if isinstance(cnt, str):
|
278
|
+
content: anthropic.types.TextBlock = anthropic.types.TextBlock(
|
279
|
+
text=cnt,
|
280
|
+
type="text",
|
281
|
+
)
|
282
|
+
a_content.append(content)
|
283
|
+
elif isinstance(cnt, llm.ChatImage):
|
284
|
+
a_content.append(_build_anthropic_image_content(cnt, cache_key))
|
285
|
+
return a_msg
|
286
|
+
elif msg.role == "tool":
|
287
|
+
ant_msg: anthropic.types.MessageParam = {
|
288
|
+
"role": "assistant",
|
289
|
+
"content": [],
|
290
|
+
}
|
291
|
+
assert isinstance(ant_msg["content"], list)
|
292
|
+
# make sure to provide when function has been called inside the context
|
293
|
+
# (+ raw_arguments)
|
294
|
+
if msg.tool_calls is not None:
|
295
|
+
for fnc in msg.tool_calls:
|
296
|
+
ant_msg["content"].append(
|
297
|
+
{
|
298
|
+
"id": fnc.tool_call_id,
|
299
|
+
"type": "tool_use",
|
300
|
+
"input": fnc.arguments,
|
301
|
+
"name": fnc.function_info.name,
|
302
|
+
}
|
303
|
+
)
|
304
|
+
if isinstance(msg.content, str):
|
305
|
+
ant_msg["content"].append(
|
306
|
+
{
|
307
|
+
"tool_use_id": fnc.tool_call_id,
|
308
|
+
"type": "tool_result",
|
309
|
+
"content": msg.content,
|
310
|
+
}
|
311
|
+
)
|
312
|
+
else:
|
313
|
+
logger.warning(
|
314
|
+
"tool result content is not a string, this is not supported by anthropic"
|
315
|
+
)
|
316
|
+
return ant_msg
|
317
|
+
|
318
|
+
return None
|
319
|
+
|
320
|
+
|
321
|
+
def _build_anthropic_image_content(
|
322
|
+
image: llm.ChatImage, cache_key: Any
|
323
|
+
) -> anthropic.types.ImageBlockParam:
|
324
|
+
if isinstance(image.image, str): # image url
|
325
|
+
logger.warning(
|
326
|
+
"image url not supported by anthropic, skipping image '%s'", image.image
|
327
|
+
)
|
328
|
+
elif isinstance(image.image, rtc.VideoFrame): # VideoFrame
|
329
|
+
if cache_key not in image._cache:
|
330
|
+
# inside our internal implementation, we allow to put extra metadata to
|
331
|
+
# each ChatImage (avoid to reencode each time we do a chatcompletion request)
|
332
|
+
opts = utils.images.EncodeOptions()
|
333
|
+
if image.inference_width and image.inference_height:
|
334
|
+
opts.resize_options = utils.images.ResizeOptions(
|
335
|
+
width=image.inference_width,
|
336
|
+
height=image.inference_height,
|
337
|
+
strategy="center_aspect_fit",
|
338
|
+
)
|
339
|
+
|
340
|
+
encoded_data = utils.images.encode(image.image, opts)
|
341
|
+
image._cache[cache_key] = base64.b64encode(encoded_data).decode("utf-8")
|
342
|
+
|
343
|
+
return {
|
344
|
+
"type": "image",
|
345
|
+
"source": {
|
346
|
+
"type": "base64",
|
347
|
+
"data": image._cache[cache_key],
|
348
|
+
"media_type": "image/jpeg",
|
349
|
+
},
|
350
|
+
}
|
351
|
+
|
352
|
+
raise ValueError(f"unknown image type {type(image.image)}")
|
353
|
+
|
354
|
+
|
355
|
+
def _create_ai_function_info(
|
356
|
+
fnc_ctx: llm.function_context.FunctionContext,
|
357
|
+
tool_call_id: str,
|
358
|
+
fnc_name: str,
|
359
|
+
raw_arguments: str, # JSON string
|
360
|
+
) -> llm.function_context.FunctionCallInfo:
|
361
|
+
if fnc_name not in fnc_ctx.ai_functions:
|
362
|
+
raise ValueError(f"AI function {fnc_name} not found")
|
363
|
+
|
364
|
+
parsed_arguments: dict[str, Any] = {}
|
365
|
+
try:
|
366
|
+
if raw_arguments: # ignore empty string
|
367
|
+
parsed_arguments = json.loads(raw_arguments)
|
368
|
+
except json.JSONDecodeError:
|
369
|
+
raise ValueError(
|
370
|
+
f"AI function {fnc_name} received invalid JSON arguments - {raw_arguments}"
|
371
|
+
)
|
372
|
+
|
373
|
+
fnc_info = fnc_ctx.ai_functions[fnc_name]
|
374
|
+
|
375
|
+
# Ensure all necessary arguments are present and of the correct type.
|
376
|
+
sanitized_arguments: dict[str, Any] = {}
|
377
|
+
for arg_info in fnc_info.arguments.values():
|
378
|
+
if arg_info.name not in parsed_arguments:
|
379
|
+
if arg_info.default is inspect.Parameter.empty:
|
380
|
+
raise ValueError(
|
381
|
+
f"AI function {fnc_name} missing required argument {arg_info.name}"
|
382
|
+
)
|
383
|
+
continue
|
384
|
+
|
385
|
+
arg_value = parsed_arguments[arg_info.name]
|
386
|
+
if get_origin(arg_info.type) is not None:
|
387
|
+
if not isinstance(arg_value, list):
|
388
|
+
raise ValueError(
|
389
|
+
f"AI function {fnc_name} argument {arg_info.name} should be a list"
|
390
|
+
)
|
391
|
+
|
392
|
+
inner_type = get_args(arg_info.type)[0]
|
393
|
+
sanitized_value = [
|
394
|
+
_sanitize_primitive(
|
395
|
+
value=v, expected_type=inner_type, choices=arg_info.choices
|
396
|
+
)
|
397
|
+
for v in arg_value
|
398
|
+
]
|
399
|
+
else:
|
400
|
+
sanitized_value = _sanitize_primitive(
|
401
|
+
value=arg_value, expected_type=arg_info.type, choices=arg_info.choices
|
402
|
+
)
|
403
|
+
|
404
|
+
sanitized_arguments[arg_info.name] = sanitized_value
|
405
|
+
|
406
|
+
return llm.function_context.FunctionCallInfo(
|
407
|
+
tool_call_id=tool_call_id,
|
408
|
+
raw_arguments=raw_arguments,
|
409
|
+
function_info=fnc_info,
|
410
|
+
arguments=sanitized_arguments,
|
411
|
+
)
|
412
|
+
|
413
|
+
|
414
|
+
def _build_function_description(
|
415
|
+
fnc_info: llm.function_context.FunctionInfo,
|
416
|
+
) -> anthropic.types.ToolParam:
|
417
|
+
def build_schema_field(arg_info: llm.function_context.FunctionArgInfo):
|
418
|
+
def type2str(t: type) -> str:
|
419
|
+
if t is str:
|
420
|
+
return "string"
|
421
|
+
elif t in (int, float):
|
422
|
+
return "number"
|
423
|
+
elif t is bool:
|
424
|
+
return "boolean"
|
425
|
+
|
426
|
+
raise ValueError(f"unsupported type {t} for ai_property")
|
427
|
+
|
428
|
+
p: dict[str, Any] = {}
|
429
|
+
if arg_info.default is inspect.Parameter.empty:
|
430
|
+
p["required"] = True
|
431
|
+
else:
|
432
|
+
p["required"] = False
|
433
|
+
|
434
|
+
if arg_info.description:
|
435
|
+
p["description"] = arg_info.description
|
436
|
+
|
437
|
+
if get_origin(arg_info.type) is list:
|
438
|
+
inner_type = get_args(arg_info.type)[0]
|
439
|
+
p["type"] = "array"
|
440
|
+
p["items"] = {}
|
441
|
+
p["items"]["type"] = type2str(inner_type)
|
442
|
+
|
443
|
+
if arg_info.choices:
|
444
|
+
p["items"]["enum"] = arg_info.choices
|
445
|
+
else:
|
446
|
+
p["type"] = type2str(arg_info.type)
|
447
|
+
if arg_info.choices:
|
448
|
+
p["enum"] = arg_info.choices
|
449
|
+
|
450
|
+
return p
|
451
|
+
|
452
|
+
input_schema: dict[str, object] = {"type": "object"}
|
453
|
+
|
454
|
+
for arg_info in fnc_info.arguments.values():
|
455
|
+
input_schema[arg_info.name] = build_schema_field(arg_info)
|
456
|
+
|
457
|
+
return {
|
458
|
+
"name": fnc_info.name,
|
459
|
+
"description": fnc_info.description,
|
460
|
+
"input_schema": input_schema,
|
461
|
+
}
|
462
|
+
|
463
|
+
|
464
|
+
def _sanitize_primitive(
|
465
|
+
*, value: Any, expected_type: type, choices: Tuple[Any] | None
|
466
|
+
) -> Any:
|
467
|
+
if expected_type is str:
|
468
|
+
if not isinstance(value, str):
|
469
|
+
raise ValueError(f"expected str, got {type(value)}")
|
470
|
+
elif expected_type in (int, float):
|
471
|
+
if not isinstance(value, (int, float)):
|
472
|
+
raise ValueError(f"expected number, got {type(value)}")
|
473
|
+
|
474
|
+
if expected_type is int:
|
475
|
+
if value % 1 != 0:
|
476
|
+
raise ValueError("expected int, got float")
|
477
|
+
|
478
|
+
value = int(value)
|
479
|
+
elif expected_type is float:
|
480
|
+
value = float(value)
|
481
|
+
|
482
|
+
elif expected_type is bool:
|
483
|
+
if not isinstance(value, bool):
|
484
|
+
raise ValueError(f"expected bool, got {type(value)}")
|
485
|
+
|
486
|
+
if choices and value not in choices:
|
487
|
+
raise ValueError(f"invalid value {value}, not in {choices}")
|
488
|
+
|
489
|
+
return value
|
File without changes
|
{livekit_plugins_anthropic-0.0.1.dist-info → livekit_plugins_anthropic-0.2.0.dist-info}/METADATA
RENAMED
@@ -1,12 +1,12 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: livekit-plugins-anthropic
|
3
|
-
Version: 0.0
|
4
|
-
Summary:
|
5
|
-
Home-page: https://github.com/livekit/
|
3
|
+
Version: 0.2.0
|
4
|
+
Summary: Agent Framework plugin for services from Anthropic
|
5
|
+
Home-page: https://github.com/livekit/agents
|
6
6
|
License: Apache-2.0
|
7
7
|
Project-URL: Documentation, https://docs.livekit.io
|
8
8
|
Project-URL: Website, https://livekit.io/
|
9
|
-
Project-URL: Source, https://github.com/livekit/
|
9
|
+
Project-URL: Source, https://github.com/livekit/agents
|
10
10
|
Keywords: webrtc,realtime,audio,video,livekit
|
11
11
|
Classifier: Intended Audience :: Developers
|
12
12
|
Classifier: License :: OSI Approved :: Apache Software License
|
@@ -14,13 +14,24 @@ Classifier: Topic :: Multimedia :: Sound/Audio
|
|
14
14
|
Classifier: Topic :: Multimedia :: Video
|
15
15
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
16
16
|
Classifier: Programming Language :: Python :: 3
|
17
|
-
Classifier: Programming Language :: Python :: 3.7
|
18
|
-
Classifier: Programming Language :: Python :: 3.8
|
19
17
|
Classifier: Programming Language :: Python :: 3.9
|
20
18
|
Classifier: Programming Language :: Python :: 3.10
|
21
19
|
Classifier: Programming Language :: Python :: 3 :: Only
|
22
|
-
Requires-Python: >=3.
|
20
|
+
Requires-Python: >=3.9.0
|
23
21
|
Description-Content-Type: text/markdown
|
24
|
-
Requires-Dist:
|
22
|
+
Requires-Dist: livekit-agents ~=0.8
|
23
|
+
Requires-Dist: anthropic ~=0.34
|
25
24
|
|
26
|
-
# LiveKit
|
25
|
+
# LiveKit Plugins Anthropic
|
26
|
+
|
27
|
+
Agent Framework plugin for services from Anthropic.
|
28
|
+
|
29
|
+
## Installation
|
30
|
+
|
31
|
+
```bash
|
32
|
+
pip install livekit-plugins-anthropic
|
33
|
+
```
|
34
|
+
|
35
|
+
## Pre-requisites
|
36
|
+
|
37
|
+
You'll need an API key from Anthropic. It can be set as an environment variable: `ANTHROPIC_API_KEY`
|
@@ -0,0 +1,10 @@
|
|
1
|
+
livekit/plugins/anthropic/__init__.py,sha256=g6KUqOfZo9DIBwBD98u6QOWY7pr8ZYJJ61fk3AWpoa4,1006
|
2
|
+
livekit/plugins/anthropic/llm.py,sha256=SJo_opc9_2rKYvcDW8-ltuOD-p7QUc0oROGDHu04htY,17162
|
3
|
+
livekit/plugins/anthropic/log.py,sha256=fG1pYSY88AnT738gZrmzF9FO4l4BdGENj3VKHMQB3Yo,72
|
4
|
+
livekit/plugins/anthropic/models.py,sha256=AVEhrEtKfWxsd-R03u7R74hcKjJq4oDVSTukvoPQGb0,179
|
5
|
+
livekit/plugins/anthropic/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
livekit/plugins/anthropic/version.py,sha256=cLFCdnm5S21CiJ5UJBcqfRvvFkCQ8p6M5fFUJVJkEiM,600
|
7
|
+
livekit_plugins_anthropic-0.2.0.dist-info/METADATA,sha256=1VWzsOFCxwtoB2m-NVZgKPoPI8xwsZctTbZJO8FYxbI,1264
|
8
|
+
livekit_plugins_anthropic-0.2.0.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
|
9
|
+
livekit_plugins_anthropic-0.2.0.dist-info/top_level.txt,sha256=OoDok3xUmXbZRvOrfvvXB-Juu4DX79dlq188E19YHoo,8
|
10
|
+
livekit_plugins_anthropic-0.2.0.dist-info/RECORD,,
|
@@ -1,126 +0,0 @@
|
|
1
|
-
# Copyright 2023 LiveKit, Inc.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
import os
|
16
|
-
import logging
|
17
|
-
import asyncio
|
18
|
-
from dataclasses import dataclass
|
19
|
-
from typing import AsyncIterable
|
20
|
-
from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
|
21
|
-
from enum import Enum
|
22
|
-
|
23
|
-
ClaudeMessageRole = Enum(
|
24
|
-
'MessageRole', ["system", "human", "assistant"])
|
25
|
-
|
26
|
-
|
27
|
-
@dataclass
|
28
|
-
class ClaudeMessage:
|
29
|
-
role: ClaudeMessageRole
|
30
|
-
content: str
|
31
|
-
|
32
|
-
def to_api(self):
|
33
|
-
if ClaudeMessageRole.system == self.role:
|
34
|
-
return f"{self.content}"
|
35
|
-
elif ClaudeMessageRole.human == self.role:
|
36
|
-
return f"{HUMAN_PROMPT} {self.content}"
|
37
|
-
elif ClaudeMessageRole.assistant == self.role:
|
38
|
-
return f"{AI_PROMPT} {self.content}"
|
39
|
-
else:
|
40
|
-
raise ValueError("Invalid message role")
|
41
|
-
|
42
|
-
|
43
|
-
class ClaudePlugin:
|
44
|
-
def __init__(self, model: str = 'claude-2', system_message: str = ''):
|
45
|
-
self._client = AsyncAnthropic(
|
46
|
-
api_key=os.environ["ANTHROPIC_API_KEY"])
|
47
|
-
self._model = model
|
48
|
-
self._system_message = system_message
|
49
|
-
self._messages: [ClaudeMessage] = []
|
50
|
-
self._producing_response = False
|
51
|
-
self._needs_interrupt = False
|
52
|
-
|
53
|
-
def interrupt(self):
|
54
|
-
if self._producing_response:
|
55
|
-
self._needs_interrupt = True
|
56
|
-
|
57
|
-
async def close(self):
|
58
|
-
pass
|
59
|
-
|
60
|
-
async def add_message(self, message: ClaudeMessage) -> AsyncIterable[str]:
|
61
|
-
self._messages.append(message)
|
62
|
-
async for text in self._generate_text_streamed():
|
63
|
-
yield text
|
64
|
-
|
65
|
-
async def _generate_text_streamed(self) -> AsyncIterable[str]:
|
66
|
-
system_message = ClaudeMessage(
|
67
|
-
role=ClaudeMessageRole.system, content=self._system_message)
|
68
|
-
|
69
|
-
try:
|
70
|
-
'''
|
71
|
-
Example Claude2 formatting for prompts:
|
72
|
-
|
73
|
-
Cats are wonderful animals and loved by everyone, no matter how many legs they have.
|
74
|
-
|
75
|
-
Human: I have two pet cats. One of them is missing a leg. The other one has a normal number of legs for a cat to have. In total, how many legs do my cats have?
|
76
|
-
|
77
|
-
Assistant: Can I think step-by-step?
|
78
|
-
|
79
|
-
Human: Yes, please do.
|
80
|
-
|
81
|
-
Assistant:
|
82
|
-
'''
|
83
|
-
prompt = ''.join([system_message.to_api()] + [m.to_api()
|
84
|
-
for m in self._messages] + [ClaudeMessage(role=ClaudeMessageRole.assistant, content="").to_api()])
|
85
|
-
chat_stream = await asyncio.wait_for(
|
86
|
-
self._client.completions.create(
|
87
|
-
model=self._model,
|
88
|
-
max_tokens_to_sample=300,
|
89
|
-
stream=True,
|
90
|
-
prompt=prompt
|
91
|
-
),
|
92
|
-
10
|
93
|
-
)
|
94
|
-
except TimeoutError:
|
95
|
-
yield "Sorry, I'm taking too long to respond. Please try again later."
|
96
|
-
return
|
97
|
-
|
98
|
-
self._producing_response = True
|
99
|
-
full_response = ""
|
100
|
-
|
101
|
-
while True:
|
102
|
-
try:
|
103
|
-
chunk = await asyncio.wait_for(anext(chat_stream, None), 5)
|
104
|
-
except TimeoutError:
|
105
|
-
break
|
106
|
-
except asyncio.CancelledError:
|
107
|
-
self._producing_response = False
|
108
|
-
self._needs_interrupt = False
|
109
|
-
break
|
110
|
-
|
111
|
-
if chunk is None:
|
112
|
-
break
|
113
|
-
content = chunk.completion
|
114
|
-
|
115
|
-
if self._needs_interrupt:
|
116
|
-
self._needs_interrupt = False
|
117
|
-
logging.info("Claude interrupted")
|
118
|
-
break
|
119
|
-
|
120
|
-
if content is not None:
|
121
|
-
full_response += content
|
122
|
-
yield content
|
123
|
-
|
124
|
-
self._messages.append(ClaudeMessage(
|
125
|
-
role=ClaudeMessageRole.assistant, content=full_response))
|
126
|
-
self._producing_response = False
|
@@ -1,7 +0,0 @@
|
|
1
|
-
livekit/plugins/anthropic/__init__.py,sha256=bQ7r_vtiEz_LIY6SwFrH2W8qP1YP17-Z8aoeFvT4CFM,645
|
2
|
-
livekit/plugins/anthropic/claude.py,sha256=ApgeJyzZOaxYIRKnW6QAjH8lBmxGcKt1Y10216VO4Iw,4276
|
3
|
-
livekit/plugins/anthropic/version.py,sha256=6n5MU9nSMToMkOa05fR2KUwXCu_cet-aGMSmD5ouRug,600
|
4
|
-
livekit_plugins_anthropic-0.0.1.dist-info/METADATA,sha256=Hxo7R0JWrupRHbEXq8-GKPM5vKjRtwg-pP3RveC3BHs,1129
|
5
|
-
livekit_plugins_anthropic-0.0.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
6
|
-
livekit_plugins_anthropic-0.0.1.dist-info/top_level.txt,sha256=OoDok3xUmXbZRvOrfvvXB-Juu4DX79dlq188E19YHoo,8
|
7
|
-
livekit_plugins_anthropic-0.0.1.dist-info/RECORD,,
|
File without changes
|