distributed-a2a 0.1.4__tar.gz → 0.1.5rc0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {distributed_a2a-0.1.4/distributed_a2a.egg-info → distributed_a2a-0.1.5rc0}/PKG-INFO +3 -1
  2. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a/agent.py +8 -5
  3. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a/executors.py +6 -2
  4. distributed_a2a-0.1.5rc0/distributed_a2a/model.py +50 -0
  5. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a/server.py +28 -17
  6. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0/distributed_a2a.egg-info}/PKG-INFO +3 -1
  7. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a.egg-info/requires.txt +2 -0
  8. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/pyproject.toml +1 -1
  9. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/requirements.txt +2 -0
  10. distributed_a2a-0.1.4/distributed_a2a/model.py +0 -11
  11. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/LICENSE +0 -0
  12. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/MANIFEST.in +0 -0
  13. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/README.md +0 -0
  14. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a/__init__.py +0 -0
  15. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a/client.py +0 -0
  16. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a/registry.py +0 -0
  17. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a.egg-info/SOURCES.txt +0 -0
  18. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a.egg-info/dependency_links.txt +0 -0
  19. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/distributed_a2a.egg-info/top_level.txt +0 -0
  20. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/setup.cfg +0 -0
  21. {distributed_a2a-0.1.4 → distributed_a2a-0.1.5rc0}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: distributed_a2a
3
- Version: 0.1.4
3
+ Version: 0.1.5rc0
4
4
  Summary: A library for building A2A agents with routing capabilities
5
5
  Home-page: https://github.com/Barra-Technologies/distributed-a2a
6
6
  Author: Fabian Bell
@@ -26,6 +26,8 @@ Requires-Dist: langgraph>=0.0.20
26
26
  Requires-Dist: pydantic>=2.0.0
27
27
  Requires-Dist: boto3>=1.28.0
28
28
  Requires-Dist: a2a>=0.1.0
29
+ Requires-Dist: build>=1.4.0
30
+ Requires-Dist: twine>=6.2.0
29
31
  Dynamic: author
30
32
  Dynamic: home-page
31
33
  Dynamic: license-file
@@ -7,7 +7,7 @@ from langchain_core.tools import BaseTool
7
7
  from langgraph.checkpoint.memory import MemorySaver
8
8
  from pydantic import BaseModel, Field
9
9
 
10
- from .model import get_model
10
+ from .model import get_model, AgentConfig
11
11
 
12
12
 
13
13
  class AgentResponse(BaseModel):
@@ -29,7 +29,7 @@ class StringResponse(AgentResponse):
29
29
 
30
30
  class StatusAgent[ResponseT: AgentResponse]:
31
31
 
32
- def __init__(self, system_prompt: str, name: str, api_key: str, is_routing: bool, tools: list[BaseTool]):
32
+ def __init__(self, agent_config: AgentConfig, api_key: str, is_routing: bool, tools: list[BaseTool]):
33
33
  response_format: type[AgentResponse]
34
34
  if is_routing:
35
35
  response_format = RoutingResponse
@@ -37,12 +37,15 @@ class StatusAgent[ResponseT: AgentResponse]:
37
37
  response_format = StringResponse
38
38
 
39
39
  self.agent = create_agent(
40
- get_model(api_key),
40
+ get_model(api_key=api_key,
41
+ model=agent_config.agent.llm.model,
42
+ base_url=agent_config.agent.llm.base_url,
43
+ reasoning_effort=agent_config.agent.llm.reasoning_effort),
41
44
  tools=tools,
42
45
  checkpointer=MemorySaver(), # TODO replace by dynamodb
43
- system_prompt=system_prompt,
46
+ system_prompt=agent_config.agent.llm.system_prompt,
44
47
  response_format=response_format,
45
- name=name
48
+ name=agent_config.agent.card.name
46
49
  )
47
50
 
48
51
  async def __call__(self, message: str, context_id: str = None) -> ResponseT:
@@ -1,13 +1,16 @@
1
1
  import json
2
2
  import logging
3
+ import os
3
4
 
4
5
  from a2a.server.agent_execution import AgentExecutor, RequestContext
5
6
  from a2a.server.events import EventQueue
6
7
  from a2a.types import TaskStatusUpdateEvent, TaskStatus, TaskState, TaskArtifactUpdateEvent, Artifact
7
8
  from a2a.utils import new_text_artifact
8
9
  from langchain_core.tools import BaseTool
10
+ from openai import api_key
9
11
 
10
12
  from .agent import StatusAgent, RoutingResponse, StringResponse
13
+ from .model import AgentConfig
11
14
 
12
15
  logger = logging.getLogger(__name__)
13
16
 
@@ -20,10 +23,11 @@ You are a helpful routing assistant which routes user requests to specialized re
20
23
 
21
24
  class RoutingAgentExecutor(AgentExecutor):
22
25
 
23
- def __init__(self, api_key: str, system_prompt: str, routing_tool: BaseTool, tools: list[BaseTool] | None = None):
26
+ def __init__(self, agent_config: AgentConfig, routing_tool: BaseTool, tools: list[BaseTool] | None = None):
24
27
  super().__init__()
28
+ api_key = os.environ.get(agent_config.agent.llm.api_key_env)
25
29
  self.agent = StatusAgent[StringResponse](
26
- system_prompt=system_prompt,
30
+ system_prompt=agent_config.agent.llm.system_prompt,
27
31
  name="Router",
28
32
  api_key=api_key,
29
33
  is_routing=False,
@@ -0,0 +1,50 @@
1
+ from typing import List
2
+
3
+ from langchain_core.language_models import BaseChatModel
4
+ from langchain_openai import ChatOpenAI
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class SkillConfig(BaseModel):
9
+ id: str
10
+ name: str
11
+ description: str
12
+ tags: List[str]
13
+
14
+
15
+ class LLMConfig(BaseModel):
16
+ base_url: str
17
+ model: str
18
+ api_key_env: str
19
+ reasoning_effort: str
20
+ system_prompt: str
21
+
22
+
23
+ class CardConfig(BaseModel):
24
+ name: str
25
+ version: str
26
+ default_input_modes: List[str]
27
+ default_output_modes: List[str]
28
+ preferred_transport_protocol: str
29
+ url: str
30
+ description: str
31
+ skills: List[SkillConfig]
32
+
33
+
34
+ class AgentItem(BaseModel):
35
+ card: CardConfig
36
+ llm: LLMConfig
37
+
38
+
39
+ class AgentConfig(BaseModel):
40
+ agent: AgentItem
41
+
42
+
43
+
44
+ def get_model(api_key: str, model: str, base_url: str, reasoning_effort: str) -> BaseChatModel:
45
+ return ChatOpenAI(
46
+ model=model,
47
+ base_url=base_url,
48
+ api_key=lambda: api_key,
49
+ reasoning_effort=reasoning_effort
50
+ )
@@ -1,16 +1,18 @@
1
1
  import asyncio
2
2
  import time
3
3
  from contextlib import asynccontextmanager
4
+ from typing import Any
4
5
 
5
6
  import boto3
6
7
  from a2a.server.apps import A2ARESTFastAPIApplication
7
8
  from a2a.server.request_handlers import DefaultRequestHandler
8
9
  from a2a.server.tasks import InMemoryTaskStore
9
10
  from a2a.types import AgentSkill, \
10
- AgentCapabilities, AgentCard, TransportProtocol
11
+ AgentCapabilities, AgentCard
11
12
  from fastapi import FastAPI
12
13
 
13
14
  from .executors import RoutingAgentExecutor
15
+ from .model import AgentConfig
14
16
  from .registry import DynamoDbRegistryLookup
15
17
 
16
18
  CAPABILITIES = AgentCapabilities(streaming=False, push_notifications=False)
@@ -35,34 +37,43 @@ async def heart_beat(name: str, agent_card_table: str, agent_card: AgentCard):
35
37
  )
36
38
 
37
39
 
38
- def load_app(name: str, description: str, skills: list[AgentSkill], api_key: str, system_prompt: str,
39
- host: str) -> FastAPI:
40
40
 
41
- routing_skill = AgentSkill(
41
+
42
+ def load_app(agent_config: dict[str, Any]) -> FastAPI:
43
+
44
+ agent_config= AgentConfig.model_validate(agent_config)
45
+
46
+ skills = [AgentSkill(
47
+ id=skill.id,
48
+ name=skill.name,
49
+ description=skill.description,
50
+ tags=skill.tags)
51
+ for skill in agent_config.skills]
52
+ skills.append(AgentSkill(
42
53
  id='routing',
43
54
  name='Agent routing',
44
55
  description='Identifies the most suitable agent for the given task and returns the agent card',
45
56
  tags=['agent', 'routing']
46
- )
57
+ ))
47
58
 
48
59
  agent_card = AgentCard(
49
- name=name,
50
- description=description,
51
- url=host,
52
- version='1.0.0',
53
- default_input_modes=['text', 'text/plain'],
54
- default_output_modes=['text', 'text/plain'],
55
- capabilities=CAPABILITIES,
56
- skills=skills + [routing_skill],
57
- preferred_transport=TransportProtocol.http_json
60
+ name=agent_config.agent.name,
61
+ description=agent_config.agent.description,
62
+ url=agent_config.agent.url,
63
+ version=agent_config.agent.version,
64
+ default_input_modes=agent_config.agent.default_input_modes,
65
+ default_output_modes=agent_config.agent.default_output_modes,
66
+ skills=skills,
67
+ preferred_transport=agent_config.agent.preferred_transport_protocol,
58
68
  )
59
69
 
60
70
 
61
- executor = RoutingAgentExecutor(api_key=api_key, system_prompt=system_prompt, routing_tool=DynamoDbRegistryLookup(agent_card_tabel=AGENT_CARD_TABLE).as_tool())
71
+ executor = RoutingAgentExecutor(agent_config=agent_config,
72
+ routing_tool=DynamoDbRegistryLookup(agent_card_tabel=AGENT_CARD_TABLE).as_tool())
62
73
 
63
74
  @asynccontextmanager
64
75
  async def lifespan(_: FastAPI):
65
- asyncio.create_task(heart_beat(name=name, agent_card_table=AGENT_CARD_TABLE, agent_card=agent_card))
76
+ asyncio.create_task(heart_beat(name=agent_card.name, agent_card_table=AGENT_CARD_TABLE, agent_card=agent_card))
66
77
  yield
67
78
 
68
79
 
@@ -70,6 +81,6 @@ def load_app(name: str, description: str, skills: list[AgentSkill], api_key: str
70
81
  agent_card=agent_card,
71
82
  http_handler=DefaultRequestHandler(
72
83
  agent_executor=executor,
73
- task_store=InMemoryTaskStore()
84
+ task_store=InMemoryTaskStore() #TODO replace with dynamodb store
74
85
  )
75
86
  ).build(title=name, lifespan=lifespan)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: distributed_a2a
3
- Version: 0.1.4
3
+ Version: 0.1.5rc0
4
4
  Summary: A library for building A2A agents with routing capabilities
5
5
  Home-page: https://github.com/Barra-Technologies/distributed-a2a
6
6
  Author: Fabian Bell
@@ -26,6 +26,8 @@ Requires-Dist: langgraph>=0.0.20
26
26
  Requires-Dist: pydantic>=2.0.0
27
27
  Requires-Dist: boto3>=1.28.0
28
28
  Requires-Dist: a2a>=0.1.0
29
+ Requires-Dist: build>=1.4.0
30
+ Requires-Dist: twine>=6.2.0
29
31
  Dynamic: author
30
32
  Dynamic: home-page
31
33
  Dynamic: license-file
@@ -5,3 +5,5 @@ langgraph>=0.0.20
5
5
  pydantic>=2.0.0
6
6
  boto3>=1.28.0
7
7
  a2a>=0.1.0
8
+ build>=1.4.0
9
+ twine>=6.2.0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "distributed_a2a"
7
- version = "0.1.4"
7
+ version = "0.1.5-rc"
8
8
  description = "A library for building A2A agents with routing capabilities"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
@@ -5,3 +5,5 @@ langgraph>=0.0.20
5
5
  pydantic>=2.0.0
6
6
  boto3>=1.28.0
7
7
  a2a>=0.1.0
8
+ build>=1.4.0
9
+ twine>=6.2.0
@@ -1,11 +0,0 @@
1
- from langchain_core.language_models import BaseChatModel
2
- from langchain_openai import ChatOpenAI
3
-
4
-
5
- def get_model(api_key: str) -> BaseChatModel:
6
- return ChatOpenAI(
7
- model="google/gemini-2.5-flash",
8
- base_url="https://openrouter.ai/api/v1",
9
- api_key=lambda: api_key,
10
- reasoning_effort="medium"
11
- )