distributed-a2a 0.1.4__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {distributed_a2a-0.1.4/distributed_a2a.egg-info → distributed_a2a-0.1.5}/PKG-INFO +4 -1
  2. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a/agent.py +18 -5
  3. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a/executors.py +8 -3
  4. distributed_a2a-0.1.5/distributed_a2a/model.py +59 -0
  5. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a/server.py +31 -19
  6. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5/distributed_a2a.egg-info}/PKG-INFO +4 -1
  7. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a.egg-info/requires.txt +3 -0
  8. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/pyproject.toml +1 -1
  9. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/requirements.txt +3 -0
  10. distributed_a2a-0.1.4/distributed_a2a/model.py +0 -11
  11. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/LICENSE +0 -0
  12. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/MANIFEST.in +0 -0
  13. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/README.md +0 -0
  14. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a/__init__.py +0 -0
  15. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a/client.py +0 -0
  16. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a/registry.py +0 -0
  17. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a.egg-info/SOURCES.txt +0 -0
  18. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a.egg-info/dependency_links.txt +0 -0
  19. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/distributed_a2a.egg-info/top_level.txt +0 -0
  20. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/setup.cfg +0 -0
  21. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: distributed_a2a
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: A library for building A2A agents with routing capabilities
5
5
  Home-page: https://github.com/Barra-Technologies/distributed-a2a
6
6
  Author: Fabian Bell
@@ -23,9 +23,12 @@ Requires-Dist: langchain>=0.1.0
23
23
  Requires-Dist: langchain-core>=0.1.0
24
24
  Requires-Dist: langchain-openai>=0.0.5
25
25
  Requires-Dist: langgraph>=0.0.20
26
+ Requires-Dist: langgraph-dynamodb-checkpoint>=0.2.6.4
26
27
  Requires-Dist: pydantic>=2.0.0
27
28
  Requires-Dist: boto3>=1.28.0
28
29
  Requires-Dist: a2a>=0.1.0
30
+ Requires-Dist: build>=1.4.0
31
+ Requires-Dist: twine>=6.2.0
29
32
  Dynamic: author
30
33
  Dynamic: home-page
31
34
  Dynamic: license-file
@@ -4,10 +4,10 @@ from a2a.types import TaskState
4
4
  from langchain.agents import create_agent
5
5
  from langchain_core.runnables import RunnableConfig
6
6
  from langchain_core.tools import BaseTool
7
- from langgraph.checkpoint.memory import MemorySaver
7
+ from langgraph_dynamodb_checkpoint import DynamoDBSaver
8
8
  from pydantic import BaseModel, Field
9
9
 
10
- from .model import get_model
10
+ from .model import get_model, AgentConfig, LLMConfig
11
11
 
12
12
 
13
13
  class AgentResponse(BaseModel):
@@ -20,26 +20,39 @@ class AgentResponse(BaseModel):
20
20
  )
21
21
  )
22
22
 
23
+
23
24
  class RoutingResponse(AgentResponse):
24
25
  agent_card: str = Field(description="The stringified json of the agent card to be returned to the user")
25
26
 
27
+
26
28
  class StringResponse(AgentResponse):
27
29
  response: str = Field(description="The main response to be returned to the user")
28
30
 
29
31
 
30
32
  class StatusAgent[ResponseT: AgentResponse]:
31
33
 
32
- def __init__(self, system_prompt: str, name: str, api_key: str, is_routing: bool, tools: list[BaseTool]):
34
+ def __init__(self, llm_config: LLMConfig, name: str, system_prompt: str, api_key: str, is_routing: bool,
35
+ tools: list[BaseTool]):
33
36
  response_format: type[AgentResponse]
34
37
  if is_routing:
35
38
  response_format = RoutingResponse
36
39
  else:
37
40
  response_format = StringResponse
38
41
 
42
+
43
+ saver = DynamoDBSaver(
44
+ table_name=f"checkpoint_saver_{name}",
45
+ max_read_request_units=20, ## TODO find correct value for app
46
+ max_write_request_units=20, ## TODO find correct value for app
47
+ ttl_seconds=86400
48
+ )
39
49
  self.agent = create_agent(
40
- get_model(api_key),
50
+ get_model(api_key=api_key,
51
+ model=llm_config.model,
52
+ base_url=llm_config.base_url,
53
+ reasoning_effort=llm_config.reasoning_effort),
41
54
  tools=tools,
42
- checkpointer=MemorySaver(), # TODO replace by dynamodb
55
+ checkpointer=saver,
43
56
  system_prompt=system_prompt,
44
57
  response_format=response_format,
45
58
  name=name
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import logging
3
+ import os
3
4
 
4
5
  from a2a.server.agent_execution import AgentExecutor, RequestContext
5
6
  from a2a.server.events import EventQueue
@@ -8,6 +9,7 @@ from a2a.utils import new_text_artifact
8
9
  from langchain_core.tools import BaseTool
9
10
 
10
11
  from .agent import StatusAgent, RoutingResponse, StringResponse
12
+ from .model import AgentConfig
11
13
 
12
14
  logger = logging.getLogger(__name__)
13
15
 
@@ -20,16 +22,19 @@ You are a helpful routing assistant which routes user requests to specialized re
20
22
 
21
23
  class RoutingAgentExecutor(AgentExecutor):
22
24
 
23
- def __init__(self, api_key: str, system_prompt: str, routing_tool: BaseTool, tools: list[BaseTool] | None = None):
25
+ def __init__(self, agent_config: AgentConfig, routing_tool: BaseTool, tools: list[BaseTool] | None = None):
24
26
  super().__init__()
27
+ api_key = os.environ.get(agent_config.agent.llm.api_key_env)
25
28
  self.agent = StatusAgent[StringResponse](
26
- system_prompt=system_prompt,
27
- name="Router",
29
+ llm_config=agent_config.agent.llm,
30
+ system_prompt=agent_config.agent.system_prompt,
31
+ name=agent_config.agent.card.name,
28
32
  api_key=api_key,
29
33
  is_routing=False,
30
34
  tools=[] if tools is None else tools,
31
35
  )
32
36
  self.routing_agent = StatusAgent[RoutingResponse](
37
+ llm_config=agent_config.agent.llm,
33
38
  system_prompt=ROUTING_SYSTEM_PROMPT,
34
39
  name="Router",
35
40
  api_key=api_key,
@@ -0,0 +1,59 @@
1
+ import os
2
+ from typing import List, Any
3
+
4
+ from langchain_core.language_models import BaseChatModel
5
+ from langchain_openai import ChatOpenAI
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class SkillConfig(BaseModel):
10
+ id: str = Field(description="The id of the skill e.g. weather")
11
+ name: str = Field(description="The name of the skill e.g. weather")
12
+ description: str = Field(description="A short description of the skill")
13
+ tags: List[str] = Field(description="The tags associated with the skill")
14
+
15
+
16
+ class LLMConfig(BaseModel):
17
+ base_url: str = Field(description="The base url of the LLM provider")
18
+ model: str = Field(description="The model to use for the LLM e.g. gpt-3.5-turbo")
19
+ api_key_env: str = Field(description="The environment variable containing the api key for the LLM provider")
20
+ reasoning_effort: str = Field(description="The reasoning effort to use for the LLM e.g. high", default="high")
21
+
22
+
23
+ class CardConfig(BaseModel):
24
+ name: str = Field(description="The name of the agent" )
25
+ description: str = Field(description="A short description of the agent")
26
+ version: str = Field(description="The version of the agent")
27
+ default_input_modes: List[str] = Field(description="The default input modes supported by the agent", default=["text","text/plaintext"])
28
+ default_output_modes: List[str] = Field(description="The default output modes supported by the agent", default=["text","text/plaintext"])
29
+ preferred_transport_protocol: str = Field(description="The preferred transport protocol for the agent", default="HTTP+JSON")
30
+ url: str = Field(description="The url of the agent")
31
+ skills: List[SkillConfig] = Field(description="The skills supported by the agent", default=[])
32
+
33
+
34
+ class AgentItem(BaseModel):
35
+ card: CardConfig = Field(description="The agent card configuration node")
36
+ llm: LLMConfig = Field(description="The LLM configuration node")
37
+ system_prompt: str = Field(description="The system prompt to use for the LLM or a path to a file containing the system prompt")
38
+
39
+ def __init__(self, /, **data: Any) -> None:
40
+ prompt_or_path= data['system_prompt']
41
+ if os.path.exists(prompt_or_path):
42
+ with open(prompt_or_path, "r", encoding="utf-8") as f:
43
+ data['system_prompt'] = f.read()
44
+
45
+ super().__init__(**data)
46
+
47
+
48
+ class AgentConfig(BaseModel):
49
+ agent: AgentItem = Field(description="The agent configuration node")
50
+
51
+
52
+
53
+ def get_model(api_key: str, model: str, base_url: str, reasoning_effort: str) -> BaseChatModel:
54
+ return ChatOpenAI(
55
+ model=model,
56
+ base_url=base_url,
57
+ api_key=lambda: api_key,
58
+ reasoning_effort=reasoning_effort
59
+ )
@@ -1,16 +1,18 @@
1
1
  import asyncio
2
2
  import time
3
3
  from contextlib import asynccontextmanager
4
+ from typing import Any
4
5
 
5
6
  import boto3
6
7
  from a2a.server.apps import A2ARESTFastAPIApplication
7
8
  from a2a.server.request_handlers import DefaultRequestHandler
8
9
  from a2a.server.tasks import InMemoryTaskStore
9
10
  from a2a.types import AgentSkill, \
10
- AgentCapabilities, AgentCard, TransportProtocol
11
+ AgentCapabilities, AgentCard
11
12
  from fastapi import FastAPI
12
13
 
13
14
  from .executors import RoutingAgentExecutor
15
+ from .model import AgentConfig
14
16
  from .registry import DynamoDbRegistryLookup
15
17
 
16
18
  CAPABILITIES = AgentCapabilities(streaming=False, push_notifications=False)
@@ -35,34 +37,44 @@ async def heart_beat(name: str, agent_card_table: str, agent_card: AgentCard):
35
37
  )
36
38
 
37
39
 
38
- def load_app(name: str, description: str, skills: list[AgentSkill], api_key: str, system_prompt: str,
39
- host: str) -> FastAPI:
40
40
 
41
- routing_skill = AgentSkill(
41
+
42
+ def load_app(agent_config: dict[str, Any]) -> FastAPI:
43
+
44
+ agent_config= AgentConfig.model_validate(agent_config)
45
+
46
+ skills = [AgentSkill(
47
+ id=skill.id,
48
+ name=skill.name,
49
+ description=skill.description,
50
+ tags=skill.tags)
51
+ for skill in agent_config.agent.card.skills]
52
+ skills.append(AgentSkill(
42
53
  id='routing',
43
54
  name='Agent routing',
44
55
  description='Identifies the most suitable agent for the given task and returns the agent card',
45
56
  tags=['agent', 'routing']
46
- )
57
+ ))
47
58
 
48
59
  agent_card = AgentCard(
49
- name=name,
50
- description=description,
51
- url=host,
52
- version='1.0.0',
53
- default_input_modes=['text', 'text/plain'],
54
- default_output_modes=['text', 'text/plain'],
55
- capabilities=CAPABILITIES,
56
- skills=skills + [routing_skill],
57
- preferred_transport=TransportProtocol.http_json
60
+ name=agent_config.agent.card.name,
61
+ description=agent_config.agent.card.description,
62
+ url=agent_config.agent.card.url,
63
+ version=agent_config.agent.card.version,
64
+ default_input_modes=agent_config.agent.card.default_input_modes,
65
+ default_output_modes=agent_config.agent.card.default_output_modes,
66
+ skills=skills,
67
+ preferred_transport=agent_config.agent.card.preferred_transport_protocol,
68
+ capabilities=CAPABILITIES
58
69
  )
59
70
 
60
71
 
61
- executor = RoutingAgentExecutor(api_key=api_key, system_prompt=system_prompt, routing_tool=DynamoDbRegistryLookup(agent_card_tabel=AGENT_CARD_TABLE).as_tool())
72
+ executor = RoutingAgentExecutor(agent_config=agent_config,
73
+ routing_tool=DynamoDbRegistryLookup(agent_card_tabel=AGENT_CARD_TABLE).as_tool())
62
74
 
63
75
  @asynccontextmanager
64
76
  async def lifespan(_: FastAPI):
65
- asyncio.create_task(heart_beat(name=name, agent_card_table=AGENT_CARD_TABLE, agent_card=agent_card))
77
+ asyncio.create_task(heart_beat(name=agent_card.name, agent_card_table=AGENT_CARD_TABLE, agent_card=agent_card))
66
78
  yield
67
79
 
68
80
 
@@ -70,6 +82,6 @@ def load_app(name: str, description: str, skills: list[AgentSkill], api_key: str
70
82
  agent_card=agent_card,
71
83
  http_handler=DefaultRequestHandler(
72
84
  agent_executor=executor,
73
- task_store=InMemoryTaskStore()
74
- )
75
- ).build(title=name, lifespan=lifespan)
85
+ task_store=InMemoryTaskStore() #TODO replace with dynamodb store
86
+
87
+ )).build(title=agent_card.name, lifespan=lifespan, root_path=f"/{agent_config.agent.card.name}") #TODO use extra parameter
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: distributed_a2a
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: A library for building A2A agents with routing capabilities
5
5
  Home-page: https://github.com/Barra-Technologies/distributed-a2a
6
6
  Author: Fabian Bell
@@ -23,9 +23,12 @@ Requires-Dist: langchain>=0.1.0
23
23
  Requires-Dist: langchain-core>=0.1.0
24
24
  Requires-Dist: langchain-openai>=0.0.5
25
25
  Requires-Dist: langgraph>=0.0.20
26
+ Requires-Dist: langgraph-dynamodb-checkpoint>=0.2.6.4
26
27
  Requires-Dist: pydantic>=2.0.0
27
28
  Requires-Dist: boto3>=1.28.0
28
29
  Requires-Dist: a2a>=0.1.0
30
+ Requires-Dist: build>=1.4.0
31
+ Requires-Dist: twine>=6.2.0
29
32
  Dynamic: author
30
33
  Dynamic: home-page
31
34
  Dynamic: license-file
@@ -2,6 +2,9 @@ langchain>=0.1.0
2
2
  langchain-core>=0.1.0
3
3
  langchain-openai>=0.0.5
4
4
  langgraph>=0.0.20
5
+ langgraph-dynamodb-checkpoint>=0.2.6.4
5
6
  pydantic>=2.0.0
6
7
  boto3>=1.28.0
7
8
  a2a>=0.1.0
9
+ build>=1.4.0
10
+ twine>=6.2.0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "distributed_a2a"
7
- version = "0.1.4"
7
+ version = "0.1.5"
8
8
  description = "A library for building A2A agents with routing capabilities"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
@@ -2,6 +2,9 @@ langchain>=0.1.0
2
2
  langchain-core>=0.1.0
3
3
  langchain-openai>=0.0.5
4
4
  langgraph>=0.0.20
5
+ langgraph-dynamodb-checkpoint>=0.2.6.4
5
6
  pydantic>=2.0.0
6
7
  boto3>=1.28.0
7
8
  a2a>=0.1.0
9
+ build>=1.4.0
10
+ twine>=6.2.0
@@ -1,11 +0,0 @@
1
- from langchain_core.language_models import BaseChatModel
2
- from langchain_openai import ChatOpenAI
3
-
4
-
5
- def get_model(api_key: str) -> BaseChatModel:
6
- return ChatOpenAI(
7
- model="google/gemini-2.5-flash",
8
- base_url="https://openrouter.ai/api/v1",
9
- api_key=lambda: api_key,
10
- reasoning_effort="medium"
11
- )
File without changes