distributed-a2a 0.1.5rc2__tar.gz → 0.1.5rc4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {distributed_a2a-0.1.5rc2/distributed_a2a.egg-info → distributed_a2a-0.1.5rc4}/PKG-INFO +1 -1
- distributed_a2a-0.1.5rc4/distributed_a2a/model.py +50 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4/distributed_a2a.egg-info}/PKG-INFO +1 -1
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/pyproject.toml +1 -1
- distributed_a2a-0.1.5rc2/distributed_a2a/model.py +0 -50
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/LICENSE +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/MANIFEST.in +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/README.md +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a/__init__.py +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a/agent.py +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a/client.py +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a/executors.py +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a/registry.py +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a/server.py +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a.egg-info/SOURCES.txt +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a.egg-info/dependency_links.txt +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a.egg-info/requires.txt +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a.egg-info/top_level.txt +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/requirements.txt +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/setup.cfg +0 -0
- {distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/setup.py +0 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
from langchain_core.language_models import BaseChatModel
|
|
4
|
+
from langchain_openai import ChatOpenAI
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SkillConfig(BaseModel):
|
|
9
|
+
id: str = Field(description="The id of the skill e.g. weather")
|
|
10
|
+
name: str = Field(description="The name of the skill e.g. weather")
|
|
11
|
+
description: str = Field(description="A short description of the skill")
|
|
12
|
+
tags: List[str] = Field(description="The tags associated with the skill")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LLMConfig(BaseModel):
|
|
16
|
+
base_url: str = Field(description="The base url of the LLM provider")
|
|
17
|
+
model: str = Field(description="The model to use for the LLM e.g. gpt-3.5-turbo")
|
|
18
|
+
api_key_env: str = Field(description="The environment variable containing the api key for the LLM provider")
|
|
19
|
+
reasoning_effort: str = Field(description="The reasoning effort to use for the LLM e.g. high", default="high")
|
|
20
|
+
system_prompt: str = Field(description="The system prompt to use for the LLM")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class CardConfig(BaseModel):
|
|
24
|
+
name: str = Field(description="The name of the agent" )
|
|
25
|
+
description: str = Field(description="A short description of the agent")
|
|
26
|
+
version: str = Field(description="The version of the agent")
|
|
27
|
+
default_input_modes: List[str] = Field(description="The default input modes supported by the agent", default=["text","text/plaintext"])
|
|
28
|
+
default_output_modes: List[str] = Field(description="The default output modes supported by the agent", default=["text","text/plaintext"])
|
|
29
|
+
preferred_transport_protocol: str = Field(description="The preferred transport protocol for the agent", default="HTTP+JSON")
|
|
30
|
+
url: str = Field(description="The url of the agent")
|
|
31
|
+
skills: List[SkillConfig] = Field(description="The skills supported by the agent", default=[])
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class AgentItem(BaseModel):
|
|
35
|
+
card: CardConfig = Field(description="The agent card configuration node")
|
|
36
|
+
llm: LLMConfig = Field(description="The LLM configuration node")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class AgentConfig(BaseModel):
|
|
40
|
+
agent: AgentItem = Field(description="The agent configuration node")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_model(api_key: str, model: str, base_url: str, reasoning_effort: str) -> BaseChatModel:
|
|
45
|
+
return ChatOpenAI(
|
|
46
|
+
model=model,
|
|
47
|
+
base_url=base_url,
|
|
48
|
+
api_key=lambda: api_key,
|
|
49
|
+
reasoning_effort=reasoning_effort
|
|
50
|
+
)
|
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
from typing import List
|
|
2
|
-
|
|
3
|
-
from langchain_core.language_models import BaseChatModel
|
|
4
|
-
from langchain_openai import ChatOpenAI
|
|
5
|
-
from pydantic import BaseModel
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class SkillConfig(BaseModel):
|
|
9
|
-
id: str
|
|
10
|
-
name: str
|
|
11
|
-
description: str
|
|
12
|
-
tags: List[str]
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class LLMConfig(BaseModel):
|
|
16
|
-
base_url: str
|
|
17
|
-
model: str
|
|
18
|
-
api_key_env: str
|
|
19
|
-
reasoning_effort: str
|
|
20
|
-
system_prompt: str
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class CardConfig(BaseModel):
|
|
24
|
-
name: str
|
|
25
|
-
version: str
|
|
26
|
-
default_input_modes: List[str]
|
|
27
|
-
default_output_modes: List[str]
|
|
28
|
-
preferred_transport_protocol: str
|
|
29
|
-
url: str
|
|
30
|
-
description: str
|
|
31
|
-
skills: List[SkillConfig]
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class AgentItem(BaseModel):
|
|
35
|
-
card: CardConfig
|
|
36
|
-
llm: LLMConfig
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
class AgentConfig(BaseModel):
|
|
40
|
-
agent: AgentItem
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
def get_model(api_key: str, model: str, base_url: str, reasoning_effort: str) -> BaseChatModel:
|
|
45
|
-
return ChatOpenAI(
|
|
46
|
-
model=model,
|
|
47
|
-
base_url=base_url,
|
|
48
|
-
api_key=lambda: api_key,
|
|
49
|
-
reasoning_effort=reasoning_effort
|
|
50
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
{distributed_a2a-0.1.5rc2 → distributed_a2a-0.1.5rc4}/distributed_a2a.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|