distributed-a2a 0.1.5__tar.gz → 0.1.5rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {distributed_a2a-0.1.5/distributed_a2a.egg-info → distributed_a2a-0.1.5rc1}/PKG-INFO +1 -2
  2. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a/agent.py +9 -19
  3. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a/executors.py +3 -4
  4. distributed_a2a-0.1.5rc1/distributed_a2a/model.py +50 -0
  5. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a/server.py +9 -10
  6. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1/distributed_a2a.egg-info}/PKG-INFO +1 -2
  7. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a.egg-info/requires.txt +0 -1
  8. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/pyproject.toml +1 -1
  9. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/requirements.txt +1 -2
  10. distributed_a2a-0.1.5/distributed_a2a/model.py +0 -59
  11. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/LICENSE +0 -0
  12. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/MANIFEST.in +0 -0
  13. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/README.md +0 -0
  14. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a/__init__.py +0 -0
  15. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a/client.py +0 -0
  16. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a/registry.py +0 -0
  17. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a.egg-info/SOURCES.txt +0 -0
  18. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a.egg-info/dependency_links.txt +0 -0
  19. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/distributed_a2a.egg-info/top_level.txt +0 -0
  20. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/setup.cfg +0 -0
  21. {distributed_a2a-0.1.5 → distributed_a2a-0.1.5rc1}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: distributed_a2a
3
- Version: 0.1.5
3
+ Version: 0.1.5rc1
4
4
  Summary: A library for building A2A agents with routing capabilities
5
5
  Home-page: https://github.com/Barra-Technologies/distributed-a2a
6
6
  Author: Fabian Bell
@@ -23,7 +23,6 @@ Requires-Dist: langchain>=0.1.0
23
23
  Requires-Dist: langchain-core>=0.1.0
24
24
  Requires-Dist: langchain-openai>=0.0.5
25
25
  Requires-Dist: langgraph>=0.0.20
26
- Requires-Dist: langgraph-dynamodb-checkpoint>=0.2.6.4
27
26
  Requires-Dist: pydantic>=2.0.0
28
27
  Requires-Dist: boto3>=1.28.0
29
28
  Requires-Dist: a2a>=0.1.0
@@ -4,10 +4,10 @@ from a2a.types import TaskState
4
4
  from langchain.agents import create_agent
5
5
  from langchain_core.runnables import RunnableConfig
6
6
  from langchain_core.tools import BaseTool
7
- from langgraph_dynamodb_checkpoint import DynamoDBSaver
7
+ from langgraph.checkpoint.memory import MemorySaver
8
8
  from pydantic import BaseModel, Field
9
9
 
10
- from .model import get_model, AgentConfig, LLMConfig
10
+ from .model import get_model, AgentConfig
11
11
 
12
12
 
13
13
  class AgentResponse(BaseModel):
@@ -20,42 +20,32 @@ class AgentResponse(BaseModel):
20
20
  )
21
21
  )
22
22
 
23
-
24
23
  class RoutingResponse(AgentResponse):
25
24
  agent_card: str = Field(description="The stringified json of the agent card to be returned to the user")
26
25
 
27
-
28
26
  class StringResponse(AgentResponse):
29
27
  response: str = Field(description="The main response to be returned to the user")
30
28
 
31
29
 
32
30
  class StatusAgent[ResponseT: AgentResponse]:
33
31
 
34
- def __init__(self, llm_config: LLMConfig, name: str, system_prompt: str, api_key: str, is_routing: bool,
35
- tools: list[BaseTool]):
32
+ def __init__(self, agent_config: AgentConfig, api_key: str, is_routing: bool, tools: list[BaseTool]):
36
33
  response_format: type[AgentResponse]
37
34
  if is_routing:
38
35
  response_format = RoutingResponse
39
36
  else:
40
37
  response_format = StringResponse
41
38
 
42
-
43
- saver = DynamoDBSaver(
44
- table_name=f"checkpoint_saver_{name}",
45
- max_read_request_units=20, ## TODO find correct value for app
46
- max_write_request_units=20, ## TODO find correct value for app
47
- ttl_seconds=86400
48
- )
49
39
  self.agent = create_agent(
50
40
  get_model(api_key=api_key,
51
- model=llm_config.model,
52
- base_url=llm_config.base_url,
53
- reasoning_effort=llm_config.reasoning_effort),
41
+ model=agent_config.agent.llm.model,
42
+ base_url=agent_config.agent.llm.base_url,
43
+ reasoning_effort=agent_config.agent.llm.reasoning_effort),
54
44
  tools=tools,
55
- checkpointer=saver,
56
- system_prompt=system_prompt,
45
+ checkpointer=MemorySaver(), # TODO replace by dynamodb
46
+ system_prompt=agent_config.agent.llm.system_prompt,
57
47
  response_format=response_format,
58
- name=name
48
+ name=agent_config.agent.card.name
59
49
  )
60
50
 
61
51
  async def __call__(self, message: str, context_id: str = None) -> ResponseT:
@@ -7,6 +7,7 @@ from a2a.server.events import EventQueue
7
7
  from a2a.types import TaskStatusUpdateEvent, TaskStatus, TaskState, TaskArtifactUpdateEvent, Artifact
8
8
  from a2a.utils import new_text_artifact
9
9
  from langchain_core.tools import BaseTool
10
+ from openai import api_key
10
11
 
11
12
  from .agent import StatusAgent, RoutingResponse, StringResponse
12
13
  from .model import AgentConfig
@@ -26,15 +27,13 @@ class RoutingAgentExecutor(AgentExecutor):
26
27
  super().__init__()
27
28
  api_key = os.environ.get(agent_config.agent.llm.api_key_env)
28
29
  self.agent = StatusAgent[StringResponse](
29
- llm_config=agent_config.agent.llm,
30
- system_prompt=agent_config.agent.system_prompt,
31
- name=agent_config.agent.card.name,
30
+ system_prompt=agent_config.agent.llm.system_prompt,
31
+ name="Router",
32
32
  api_key=api_key,
33
33
  is_routing=False,
34
34
  tools=[] if tools is None else tools,
35
35
  )
36
36
  self.routing_agent = StatusAgent[RoutingResponse](
37
- llm_config=agent_config.agent.llm,
38
37
  system_prompt=ROUTING_SYSTEM_PROMPT,
39
38
  name="Router",
40
39
  api_key=api_key,
@@ -0,0 +1,50 @@
1
+ from typing import List
2
+
3
+ from langchain_core.language_models import BaseChatModel
4
+ from langchain_openai import ChatOpenAI
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class SkillConfig(BaseModel):
9
+ id: str
10
+ name: str
11
+ description: str
12
+ tags: List[str]
13
+
14
+
15
+ class LLMConfig(BaseModel):
16
+ base_url: str
17
+ model: str
18
+ api_key_env: str
19
+ reasoning_effort: str
20
+ system_prompt: str
21
+
22
+
23
+ class CardConfig(BaseModel):
24
+ name: str
25
+ version: str
26
+ default_input_modes: List[str]
27
+ default_output_modes: List[str]
28
+ preferred_transport_protocol: str
29
+ url: str
30
+ description: str
31
+ skills: List[SkillConfig]
32
+
33
+
34
+ class AgentItem(BaseModel):
35
+ card: CardConfig
36
+ llm: LLMConfig
37
+
38
+
39
+ class AgentConfig(BaseModel):
40
+ agent: AgentItem
41
+
42
+
43
+
44
+ def get_model(api_key: str, model: str, base_url: str, reasoning_effort: str) -> BaseChatModel:
45
+ return ChatOpenAI(
46
+ model=model,
47
+ base_url=base_url,
48
+ api_key=lambda: api_key,
49
+ reasoning_effort=reasoning_effort
50
+ )
@@ -57,15 +57,14 @@ def load_app(agent_config: dict[str, Any]) -> FastAPI:
57
57
  ))
58
58
 
59
59
  agent_card = AgentCard(
60
- name=agent_config.agent.card.name,
61
- description=agent_config.agent.card.description,
62
- url=agent_config.agent.card.url,
63
- version=agent_config.agent.card.version,
64
- default_input_modes=agent_config.agent.card.default_input_modes,
65
- default_output_modes=agent_config.agent.card.default_output_modes,
60
+ name=agent_config.agent.name,
61
+ description=agent_config.agent.description,
62
+ url=agent_config.agent.url,
63
+ version=agent_config.agent.version,
64
+ default_input_modes=agent_config.agent.default_input_modes,
65
+ default_output_modes=agent_config.agent.default_output_modes,
66
66
  skills=skills,
67
- preferred_transport=agent_config.agent.card.preferred_transport_protocol,
68
- capabilities=CAPABILITIES
67
+ preferred_transport=agent_config.agent.preferred_transport_protocol,
69
68
  )
70
69
 
71
70
 
@@ -83,5 +82,5 @@ def load_app(agent_config: dict[str, Any]) -> FastAPI:
83
82
  http_handler=DefaultRequestHandler(
84
83
  agent_executor=executor,
85
84
  task_store=InMemoryTaskStore() #TODO replace with dynamodb store
86
-
87
- )).build(title=agent_card.name, lifespan=lifespan, root_path=f"/{agent_config.agent.card.name}") #TODO use extra parameter
85
+ )
86
+ ).build(title=agent_card.name, lifespan=lifespan)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: distributed_a2a
3
- Version: 0.1.5
3
+ Version: 0.1.5rc1
4
4
  Summary: A library for building A2A agents with routing capabilities
5
5
  Home-page: https://github.com/Barra-Technologies/distributed-a2a
6
6
  Author: Fabian Bell
@@ -23,7 +23,6 @@ Requires-Dist: langchain>=0.1.0
23
23
  Requires-Dist: langchain-core>=0.1.0
24
24
  Requires-Dist: langchain-openai>=0.0.5
25
25
  Requires-Dist: langgraph>=0.0.20
26
- Requires-Dist: langgraph-dynamodb-checkpoint>=0.2.6.4
27
26
  Requires-Dist: pydantic>=2.0.0
28
27
  Requires-Dist: boto3>=1.28.0
29
28
  Requires-Dist: a2a>=0.1.0
@@ -2,7 +2,6 @@ langchain>=0.1.0
2
2
  langchain-core>=0.1.0
3
3
  langchain-openai>=0.0.5
4
4
  langgraph>=0.0.20
5
- langgraph-dynamodb-checkpoint>=0.2.6.4
6
5
  pydantic>=2.0.0
7
6
  boto3>=1.28.0
8
7
  a2a>=0.1.0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "distributed_a2a"
7
- version = "0.1.5"
7
+ version = "0.1.5rc1"
8
8
  description = "A library for building A2A agents with routing capabilities"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
@@ -2,9 +2,8 @@ langchain>=0.1.0
2
2
  langchain-core>=0.1.0
3
3
  langchain-openai>=0.0.5
4
4
  langgraph>=0.0.20
5
- langgraph-dynamodb-checkpoint>=0.2.6.4
6
5
  pydantic>=2.0.0
7
6
  boto3>=1.28.0
8
7
  a2a>=0.1.0
9
8
  build>=1.4.0
10
- twine>=6.2.0
9
+ twine>=6.2.0
@@ -1,59 +0,0 @@
1
- import os
2
- from typing import List, Any
3
-
4
- from langchain_core.language_models import BaseChatModel
5
- from langchain_openai import ChatOpenAI
6
- from pydantic import BaseModel, Field
7
-
8
-
9
- class SkillConfig(BaseModel):
10
- id: str = Field(description="The id of the skill e.g. weather")
11
- name: str = Field(description="The name of the skill e.g. weather")
12
- description: str = Field(description="A short description of the skill")
13
- tags: List[str] = Field(description="The tags associated with the skill")
14
-
15
-
16
- class LLMConfig(BaseModel):
17
- base_url: str = Field(description="The base url of the LLM provider")
18
- model: str = Field(description="The model to use for the LLM e.g. gpt-3.5-turbo")
19
- api_key_env: str = Field(description="The environment variable containing the api key for the LLM provider")
20
- reasoning_effort: str = Field(description="The reasoning effort to use for the LLM e.g. high", default="high")
21
-
22
-
23
- class CardConfig(BaseModel):
24
- name: str = Field(description="The name of the agent" )
25
- description: str = Field(description="A short description of the agent")
26
- version: str = Field(description="The version of the agent")
27
- default_input_modes: List[str] = Field(description="The default input modes supported by the agent", default=["text","text/plaintext"])
28
- default_output_modes: List[str] = Field(description="The default output modes supported by the agent", default=["text","text/plaintext"])
29
- preferred_transport_protocol: str = Field(description="The preferred transport protocol for the agent", default="HTTP+JSON")
30
- url: str = Field(description="The url of the agent")
31
- skills: List[SkillConfig] = Field(description="The skills supported by the agent", default=[])
32
-
33
-
34
- class AgentItem(BaseModel):
35
- card: CardConfig = Field(description="The agent card configuration node")
36
- llm: LLMConfig = Field(description="The LLM configuration node")
37
- system_prompt: str = Field(description="The system prompt to use for the LLM or a path to a file containing the system prompt")
38
-
39
- def __init__(self, /, **data: Any) -> None:
40
- prompt_or_path= data['system_prompt']
41
- if os.path.exists(prompt_or_path):
42
- with open(prompt_or_path, "r", encoding="utf-8") as f:
43
- data['system_prompt'] = f.read()
44
-
45
- super().__init__(**data)
46
-
47
-
48
- class AgentConfig(BaseModel):
49
- agent: AgentItem = Field(description="The agent configuration node")
50
-
51
-
52
-
53
- def get_model(api_key: str, model: str, base_url: str, reasoning_effort: str) -> BaseChatModel:
54
- return ChatOpenAI(
55
- model=model,
56
- base_url=base_url,
57
- api_key=lambda: api_key,
58
- reasoning_effort=reasoning_effort
59
- )