agno 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +12 -0
- agno/db/base.py +8 -4
- agno/db/dynamo/dynamo.py +69 -17
- agno/db/firestore/firestore.py +65 -28
- agno/db/gcs_json/gcs_json_db.py +70 -17
- agno/db/in_memory/in_memory_db.py +85 -14
- agno/db/json/json_db.py +79 -15
- agno/db/mongo/mongo.py +27 -8
- agno/db/mysql/mysql.py +17 -3
- agno/db/postgres/postgres.py +21 -3
- agno/db/redis/redis.py +38 -11
- agno/db/singlestore/singlestore.py +14 -3
- agno/db/sqlite/sqlite.py +34 -46
- agno/knowledge/reader/field_labeled_csv_reader.py +294 -0
- agno/knowledge/reader/pdf_reader.py +28 -52
- agno/knowledge/reader/reader_factory.py +12 -0
- agno/memory/manager.py +12 -4
- agno/models/anthropic/claude.py +4 -1
- agno/models/aws/bedrock.py +52 -112
- agno/models/openrouter/openrouter.py +39 -1
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +74 -0
- agno/os/app.py +76 -32
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +252 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/router.py +12 -0
- agno/os/mcp.py +3 -3
- agno/os/router.py +38 -8
- agno/os/routers/memory/memory.py +5 -3
- agno/os/routers/memory/schemas.py +1 -0
- agno/os/utils.py +37 -10
- agno/team/team.py +12 -0
- agno/tools/file.py +4 -2
- agno/tools/mcp.py +46 -1
- agno/utils/merge_dict.py +22 -1
- agno/utils/streamlit.py +1 -1
- agno/workflow/parallel.py +90 -14
- agno/workflow/step.py +30 -27
- agno/workflow/workflow.py +12 -6
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/METADATA +16 -14
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/RECORD +46 -39
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/WHEEL +0 -0
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/top_level.txt +0 -0
agno/memory/manager.py
CHANGED
|
@@ -236,8 +236,11 @@ class MemoryManager:
|
|
|
236
236
|
memory_id (str): The id of the memory to delete
|
|
237
237
|
user_id (Optional[str]): The user id to delete the memory from. If not provided, the memory is deleted from the "default" user.
|
|
238
238
|
"""
|
|
239
|
+
if user_id is None:
|
|
240
|
+
user_id = "default"
|
|
241
|
+
|
|
239
242
|
if self.db:
|
|
240
|
-
self._delete_db_memory(memory_id=memory_id)
|
|
243
|
+
self._delete_db_memory(memory_id=memory_id, user_id=user_id)
|
|
241
244
|
else:
|
|
242
245
|
log_warning("Memory DB not provided.")
|
|
243
246
|
return None
|
|
@@ -420,12 +423,16 @@ class MemoryManager:
|
|
|
420
423
|
log_warning(f"Error storing memory in db: {e}")
|
|
421
424
|
return f"Error adding memory: {e}"
|
|
422
425
|
|
|
423
|
-
def _delete_db_memory(self, memory_id: str) -> str:
|
|
426
|
+
def _delete_db_memory(self, memory_id: str, user_id: Optional[str] = None) -> str:
|
|
424
427
|
"""Use this function to delete a memory from the database."""
|
|
425
428
|
try:
|
|
426
429
|
if not self.db:
|
|
427
430
|
raise ValueError("Memory db not initialized")
|
|
428
|
-
|
|
431
|
+
|
|
432
|
+
if user_id is None:
|
|
433
|
+
user_id = "default"
|
|
434
|
+
|
|
435
|
+
self.db.delete_user_memory(memory_id=memory_id, user_id=user_id)
|
|
429
436
|
return "Memory deleted successfully"
|
|
430
437
|
except Exception as e:
|
|
431
438
|
log_warning(f"Error deleting memory in db: {e}")
|
|
@@ -1027,6 +1034,7 @@ class MemoryManager:
|
|
|
1027
1034
|
memory_id=memory_id,
|
|
1028
1035
|
memory=memory,
|
|
1029
1036
|
topics=topics,
|
|
1037
|
+
user_id=user_id,
|
|
1030
1038
|
input=input_string,
|
|
1031
1039
|
)
|
|
1032
1040
|
)
|
|
@@ -1044,7 +1052,7 @@ class MemoryManager:
|
|
|
1044
1052
|
str: A message indicating if the memory was deleted successfully or not.
|
|
1045
1053
|
"""
|
|
1046
1054
|
try:
|
|
1047
|
-
db.delete_user_memory(memory_id=memory_id)
|
|
1055
|
+
db.delete_user_memory(memory_id=memory_id, user_id=user_id)
|
|
1048
1056
|
log_debug("Memory deleted")
|
|
1049
1057
|
return "Memory deleted successfully"
|
|
1050
1058
|
except Exception as e:
|
agno/models/anthropic/claude.py
CHANGED
|
@@ -59,7 +59,7 @@ class Claude(Model):
|
|
|
59
59
|
For more information, see: https://docs.anthropic.com/en/api/messages
|
|
60
60
|
"""
|
|
61
61
|
|
|
62
|
-
id: str = "claude-
|
|
62
|
+
id: str = "claude-sonnet-4-5-20250929"
|
|
63
63
|
name: str = "Claude"
|
|
64
64
|
provider: str = "Anthropic"
|
|
65
65
|
|
|
@@ -78,6 +78,7 @@ class Claude(Model):
|
|
|
78
78
|
# Client parameters
|
|
79
79
|
api_key: Optional[str] = None
|
|
80
80
|
default_headers: Optional[Dict[str, Any]] = None
|
|
81
|
+
timeout: Optional[float] = None
|
|
81
82
|
client_params: Optional[Dict[str, Any]] = None
|
|
82
83
|
|
|
83
84
|
# Anthropic clients
|
|
@@ -93,6 +94,8 @@ class Claude(Model):
|
|
|
93
94
|
|
|
94
95
|
# Add API key to client parameters
|
|
95
96
|
client_params["api_key"] = self.api_key
|
|
97
|
+
if self.timeout is not None:
|
|
98
|
+
client_params["timeout"] = self.timeout
|
|
96
99
|
|
|
97
100
|
# Add additional client parameters
|
|
98
101
|
if self.client_params is not None:
|
agno/models/aws/bedrock.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Tuple, Ty
|
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
8
|
from agno.exceptions import AgnoError, ModelProviderError
|
|
9
|
-
from agno.models.base import
|
|
9
|
+
from agno.models.base import Model
|
|
10
10
|
from agno.models.message import Message
|
|
11
11
|
from agno.models.metrics import Metrics
|
|
12
12
|
from agno.models.response import ModelResponse
|
|
@@ -360,7 +360,7 @@ class AwsBedrock(Model):
|
|
|
360
360
|
formatted_messages, system_message = self._format_messages(messages)
|
|
361
361
|
|
|
362
362
|
tool_config = None
|
|
363
|
-
if tools
|
|
363
|
+
if tools:
|
|
364
364
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
365
365
|
|
|
366
366
|
body = {
|
|
@@ -408,7 +408,7 @@ class AwsBedrock(Model):
|
|
|
408
408
|
formatted_messages, system_message = self._format_messages(messages)
|
|
409
409
|
|
|
410
410
|
tool_config = None
|
|
411
|
-
if tools
|
|
411
|
+
if tools:
|
|
412
412
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
413
413
|
|
|
414
414
|
body = {
|
|
@@ -426,10 +426,14 @@ class AwsBedrock(Model):
|
|
|
426
426
|
|
|
427
427
|
assistant_message.metrics.start_timer()
|
|
428
428
|
|
|
429
|
+
# Track current tool being built across chunks
|
|
430
|
+
current_tool: Dict[str, Any] = {}
|
|
431
|
+
|
|
429
432
|
for chunk in self.get_client().converse_stream(modelId=self.id, messages=formatted_messages, **body)[
|
|
430
433
|
"stream"
|
|
431
434
|
]:
|
|
432
|
-
|
|
435
|
+
model_response, current_tool = self._parse_provider_response_delta(chunk, current_tool)
|
|
436
|
+
yield model_response
|
|
433
437
|
|
|
434
438
|
assistant_message.metrics.stop_timer()
|
|
435
439
|
|
|
@@ -456,7 +460,7 @@ class AwsBedrock(Model):
|
|
|
456
460
|
formatted_messages, system_message = self._format_messages(messages)
|
|
457
461
|
|
|
458
462
|
tool_config = None
|
|
459
|
-
if tools
|
|
463
|
+
if tools:
|
|
460
464
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
461
465
|
|
|
462
466
|
body = {
|
|
@@ -507,7 +511,7 @@ class AwsBedrock(Model):
|
|
|
507
511
|
formatted_messages, system_message = self._format_messages(messages)
|
|
508
512
|
|
|
509
513
|
tool_config = None
|
|
510
|
-
if tools
|
|
514
|
+
if tools:
|
|
511
515
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
512
516
|
|
|
513
517
|
body = {
|
|
@@ -525,10 +529,14 @@ class AwsBedrock(Model):
|
|
|
525
529
|
|
|
526
530
|
assistant_message.metrics.start_timer()
|
|
527
531
|
|
|
532
|
+
# Track current tool being built across chunks
|
|
533
|
+
current_tool: Dict[str, Any] = {}
|
|
534
|
+
|
|
528
535
|
async with self.get_async_client() as client:
|
|
529
536
|
response = await client.converse_stream(modelId=self.id, messages=formatted_messages, **body)
|
|
530
537
|
async for chunk in response["stream"]:
|
|
531
|
-
|
|
538
|
+
model_response, current_tool = self._parse_provider_response_delta(chunk, current_tool)
|
|
539
|
+
yield model_response
|
|
532
540
|
|
|
533
541
|
assistant_message.metrics.stop_timer()
|
|
534
542
|
|
|
@@ -617,122 +625,54 @@ class AwsBedrock(Model):
|
|
|
617
625
|
|
|
618
626
|
return model_response
|
|
619
627
|
|
|
620
|
-
def
|
|
621
|
-
self,
|
|
622
|
-
|
|
623
|
-
assistant_message: Message,
|
|
624
|
-
stream_data: MessageData,
|
|
625
|
-
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
626
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
627
|
-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
628
|
-
run_response: Optional[RunOutput] = None,
|
|
629
|
-
) -> Iterator[ModelResponse]:
|
|
630
|
-
"""
|
|
631
|
-
Process the synchronous response stream.
|
|
632
|
-
|
|
633
|
-
Args:
|
|
634
|
-
messages (List[Message]): The messages to include in the request.
|
|
635
|
-
assistant_message (Message): The assistant message.
|
|
636
|
-
stream_data (MessageData): The stream data.
|
|
637
|
-
"""
|
|
638
|
-
for response_delta in self.invoke_stream(
|
|
639
|
-
messages=messages,
|
|
640
|
-
assistant_message=assistant_message,
|
|
641
|
-
response_format=response_format,
|
|
642
|
-
tools=tools,
|
|
643
|
-
tool_choice=tool_choice,
|
|
644
|
-
run_response=run_response,
|
|
645
|
-
):
|
|
646
|
-
should_yield = False
|
|
647
|
-
|
|
648
|
-
if response_delta.content:
|
|
649
|
-
stream_data.response_content += response_delta.content
|
|
650
|
-
should_yield = True
|
|
651
|
-
|
|
652
|
-
if response_delta.tool_calls:
|
|
653
|
-
if stream_data.response_tool_calls is None:
|
|
654
|
-
stream_data.response_tool_calls = []
|
|
655
|
-
stream_data.response_tool_calls.extend(response_delta.tool_calls)
|
|
656
|
-
should_yield = True
|
|
657
|
-
|
|
658
|
-
if should_yield:
|
|
659
|
-
yield response_delta
|
|
660
|
-
|
|
661
|
-
async def aprocess_response_stream(
|
|
662
|
-
self,
|
|
663
|
-
messages: List[Message],
|
|
664
|
-
assistant_message: Message,
|
|
665
|
-
stream_data: MessageData,
|
|
666
|
-
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
667
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
668
|
-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
669
|
-
run_response: Optional[RunOutput] = None,
|
|
670
|
-
) -> AsyncIterator[ModelResponse]:
|
|
671
|
-
"""
|
|
672
|
-
Process the asynchronous response stream.
|
|
673
|
-
|
|
674
|
-
Args:
|
|
675
|
-
messages (List[Message]): The messages to include in the request.
|
|
676
|
-
assistant_message (Message): The assistant message.
|
|
677
|
-
stream_data (MessageData): The stream data.
|
|
678
|
-
"""
|
|
679
|
-
async for response_delta in self.ainvoke_stream(
|
|
680
|
-
messages=messages,
|
|
681
|
-
assistant_message=assistant_message,
|
|
682
|
-
response_format=response_format,
|
|
683
|
-
tools=tools,
|
|
684
|
-
tool_choice=tool_choice,
|
|
685
|
-
run_response=run_response,
|
|
686
|
-
):
|
|
687
|
-
should_yield = False
|
|
688
|
-
|
|
689
|
-
if response_delta.content:
|
|
690
|
-
stream_data.response_content += response_delta.content
|
|
691
|
-
should_yield = True
|
|
692
|
-
|
|
693
|
-
if response_delta.tool_calls:
|
|
694
|
-
if stream_data.response_tool_calls is None:
|
|
695
|
-
stream_data.response_tool_calls = []
|
|
696
|
-
stream_data.response_tool_calls.extend(response_delta.tool_calls)
|
|
697
|
-
should_yield = True
|
|
698
|
-
|
|
699
|
-
if should_yield:
|
|
700
|
-
yield response_delta
|
|
701
|
-
|
|
702
|
-
self._populate_assistant_message(assistant_message=assistant_message, provider_response=response_delta)
|
|
703
|
-
|
|
704
|
-
def _parse_provider_response_delta(self, response_delta: Dict[str, Any]) -> ModelResponse: # type: ignore
|
|
628
|
+
def _parse_provider_response_delta(
|
|
629
|
+
self, response_delta: Dict[str, Any], current_tool: Dict[str, Any]
|
|
630
|
+
) -> Tuple[ModelResponse, Dict[str, Any]]:
|
|
705
631
|
"""Parse the provider response delta for streaming.
|
|
706
632
|
|
|
707
633
|
Args:
|
|
708
634
|
response_delta: The streaming response delta from AWS Bedrock
|
|
635
|
+
current_tool: The current tool being built across chunks
|
|
709
636
|
|
|
710
637
|
Returns:
|
|
711
|
-
ModelResponse: The parsed model response delta
|
|
638
|
+
Tuple[ModelResponse, Dict[str, Any]]: The parsed model response delta and updated current_tool
|
|
712
639
|
"""
|
|
713
640
|
model_response = ModelResponse(role="assistant")
|
|
714
641
|
|
|
715
|
-
# Handle contentBlockDelta - text content
|
|
716
|
-
if "contentBlockDelta" in response_delta:
|
|
717
|
-
delta = response_delta["contentBlockDelta"]["delta"]
|
|
718
|
-
if "text" in delta:
|
|
719
|
-
model_response.content = delta["text"]
|
|
720
|
-
|
|
721
642
|
# Handle contentBlockStart - tool use start
|
|
722
|
-
|
|
643
|
+
if "contentBlockStart" in response_delta:
|
|
723
644
|
start = response_delta["contentBlockStart"]["start"]
|
|
724
645
|
if "toolUse" in start:
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
646
|
+
# Start a new tool
|
|
647
|
+
tool_use_data = start["toolUse"]
|
|
648
|
+
current_tool = {
|
|
649
|
+
"id": tool_use_data.get("toolUseId", ""),
|
|
650
|
+
"type": "function",
|
|
651
|
+
"function": {
|
|
652
|
+
"name": tool_use_data.get("name", ""),
|
|
653
|
+
"arguments": "", # Will be filled in subsequent deltas
|
|
654
|
+
},
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
# Handle contentBlockDelta - text content or tool input
|
|
658
|
+
elif "contentBlockDelta" in response_delta:
|
|
659
|
+
delta = response_delta["contentBlockDelta"]["delta"]
|
|
660
|
+
if "text" in delta:
|
|
661
|
+
model_response.content = delta["text"]
|
|
662
|
+
elif "toolUse" in delta and current_tool:
|
|
663
|
+
# Accumulate tool input
|
|
664
|
+
tool_input = delta["toolUse"].get("input", "")
|
|
665
|
+
if tool_input:
|
|
666
|
+
current_tool["function"]["arguments"] += tool_input
|
|
667
|
+
|
|
668
|
+
# Handle contentBlockStop - tool use complete
|
|
669
|
+
elif "contentBlockStop" in response_delta and current_tool:
|
|
670
|
+
# Tool is complete, add it to model response
|
|
671
|
+
model_response.tool_calls = [current_tool]
|
|
672
|
+
# Track tool_id in extra for format_function_call_results
|
|
673
|
+
model_response.extra = {"tool_ids": [current_tool["id"]]}
|
|
674
|
+
# Reset current_tool for next tool
|
|
675
|
+
current_tool = {}
|
|
736
676
|
|
|
737
677
|
# Handle metadata/usage information
|
|
738
678
|
elif "metadata" in response_delta or "messageStop" in response_delta:
|
|
@@ -740,7 +680,7 @@ class AwsBedrock(Model):
|
|
|
740
680
|
if "usage" in body:
|
|
741
681
|
model_response.response_usage = self._get_metrics(body["usage"])
|
|
742
682
|
|
|
743
|
-
return model_response
|
|
683
|
+
return model_response, current_tool
|
|
744
684
|
|
|
745
685
|
def _get_metrics(self, response_usage: Dict[str, Any]) -> Metrics:
|
|
746
686
|
"""
|
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
|
-
from typing import Optional
|
|
3
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel
|
|
4
6
|
|
|
5
7
|
from agno.models.openai.like import OpenAILike
|
|
8
|
+
from agno.run.agent import RunOutput
|
|
6
9
|
|
|
7
10
|
|
|
8
11
|
@dataclass
|
|
@@ -17,6 +20,9 @@ class OpenRouter(OpenAILike):
|
|
|
17
20
|
api_key (Optional[str]): The API key.
|
|
18
21
|
base_url (str): The base URL. Defaults to "https://openrouter.ai/api/v1".
|
|
19
22
|
max_tokens (int): The maximum number of tokens. Defaults to 1024.
|
|
23
|
+
fallback_models (Optional[List[str]]): List of fallback model IDs to use if the primary model
|
|
24
|
+
fails due to rate limits, timeouts, or unavailability. OpenRouter will automatically try
|
|
25
|
+
these models in order. Example: ["anthropic/claude-sonnet-4", "deepseek/deepseek-r1"]
|
|
20
26
|
"""
|
|
21
27
|
|
|
22
28
|
id: str = "gpt-4o"
|
|
@@ -26,3 +32,35 @@ class OpenRouter(OpenAILike):
|
|
|
26
32
|
api_key: Optional[str] = field(default_factory=lambda: getenv("OPENROUTER_API_KEY"))
|
|
27
33
|
base_url: str = "https://openrouter.ai/api/v1"
|
|
28
34
|
max_tokens: int = 1024
|
|
35
|
+
models: Optional[List[str]] = None # Dynamic model routing https://openrouter.ai/docs/features/model-routing
|
|
36
|
+
|
|
37
|
+
def get_request_params(
|
|
38
|
+
self,
|
|
39
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
40
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
41
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
42
|
+
run_response: Optional[RunOutput] = None,
|
|
43
|
+
) -> Dict[str, Any]:
|
|
44
|
+
"""
|
|
45
|
+
Returns keyword arguments for API requests, including fallback models configuration.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Dict[str, Any]: A dictionary of keyword arguments for API requests.
|
|
49
|
+
"""
|
|
50
|
+
# Get base request params from parent class
|
|
51
|
+
request_params = super().get_request_params(
|
|
52
|
+
response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Add fallback models to extra_body if specified
|
|
56
|
+
if self.models:
|
|
57
|
+
# Get existing extra_body or create new dict
|
|
58
|
+
extra_body = request_params.get("extra_body") or {}
|
|
59
|
+
|
|
60
|
+
# Merge fallback models into extra_body
|
|
61
|
+
extra_body["models"] = self.models
|
|
62
|
+
|
|
63
|
+
# Update request params
|
|
64
|
+
request_params["extra_body"] = extra_body
|
|
65
|
+
|
|
66
|
+
return request_params
|
|
File without changes
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from os import getenv
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from agno.models.anthropic import Claude as AnthropicClaude
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from anthropic import AnthropicVertex as AnthropicClient
|
|
9
|
+
from anthropic import (
|
|
10
|
+
AsyncAnthropicVertex as AsyncAnthropicClient,
|
|
11
|
+
)
|
|
12
|
+
except ImportError as e:
|
|
13
|
+
raise ImportError("`anthropic` not installed. Please install it with `pip install anthropic`") from e
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class Claude(AnthropicClaude):
|
|
18
|
+
"""
|
|
19
|
+
A class representing Anthropic Claude model.
|
|
20
|
+
|
|
21
|
+
For more information, see: https://docs.anthropic.com/en/api/messages
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
id: str = "claude-sonnet-4@20250514"
|
|
25
|
+
name: str = "Claude"
|
|
26
|
+
provider: str = "VertexAI"
|
|
27
|
+
|
|
28
|
+
# Client parameters
|
|
29
|
+
region: Optional[str] = None
|
|
30
|
+
project_id: Optional[str] = None
|
|
31
|
+
base_url: Optional[str] = None
|
|
32
|
+
|
|
33
|
+
# Anthropic clients
|
|
34
|
+
client: Optional[AnthropicClient] = None
|
|
35
|
+
async_client: Optional[AsyncAnthropicClient] = None
|
|
36
|
+
|
|
37
|
+
def _get_client_params(self) -> Dict[str, Any]:
|
|
38
|
+
client_params: Dict[str, Any] = {}
|
|
39
|
+
|
|
40
|
+
# Add API key to client parameters
|
|
41
|
+
client_params["region"] = self.region or getenv("CLOUD_ML_REGION")
|
|
42
|
+
client_params["project_id"] = self.project_id or getenv("ANTHROPIC_VERTEX_PROJECT_ID")
|
|
43
|
+
client_params["base_url"] = self.base_url or getenv("ANTHROPIC_VERTEX_BASE_URL")
|
|
44
|
+
if self.timeout is not None:
|
|
45
|
+
client_params["timeout"] = self.timeout
|
|
46
|
+
|
|
47
|
+
# Add additional client parameters
|
|
48
|
+
if self.client_params is not None:
|
|
49
|
+
client_params.update(self.client_params)
|
|
50
|
+
if self.default_headers is not None:
|
|
51
|
+
client_params["default_headers"] = self.default_headers
|
|
52
|
+
return client_params
|
|
53
|
+
|
|
54
|
+
def get_client(self) -> AnthropicClient:
|
|
55
|
+
"""
|
|
56
|
+
Returns an instance of the Anthropic client.
|
|
57
|
+
"""
|
|
58
|
+
if self.client and not self.client.is_closed():
|
|
59
|
+
return self.client
|
|
60
|
+
|
|
61
|
+
_client_params = self._get_client_params()
|
|
62
|
+
self.client = AnthropicClient(**_client_params)
|
|
63
|
+
return self.client
|
|
64
|
+
|
|
65
|
+
def get_async_client(self) -> AsyncAnthropicClient:
|
|
66
|
+
"""
|
|
67
|
+
Returns an instance of the async Anthropic client.
|
|
68
|
+
"""
|
|
69
|
+
if self.async_client:
|
|
70
|
+
return self.async_client
|
|
71
|
+
|
|
72
|
+
_client_params = self._get_client_params()
|
|
73
|
+
self.async_client = AsyncAnthropicClient(**_client_params)
|
|
74
|
+
return self.async_client
|