idun-agent-engine 0.2.7__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. idun_agent_engine/_version.py +1 -1
  2. idun_agent_engine/agent/adk/__init__.py +5 -0
  3. idun_agent_engine/agent/adk/adk.py +296 -0
  4. idun_agent_engine/agent/base.py +7 -1
  5. idun_agent_engine/agent/haystack/haystack.py +5 -1
  6. idun_agent_engine/agent/langgraph/langgraph.py +158 -55
  7. idun_agent_engine/core/app_factory.py +9 -0
  8. idun_agent_engine/core/config_builder.py +222 -21
  9. idun_agent_engine/core/engine_config.py +1 -2
  10. idun_agent_engine/core/server_runner.py +2 -3
  11. idun_agent_engine/guardrails/__init__.py +0 -0
  12. idun_agent_engine/guardrails/base.py +24 -0
  13. idun_agent_engine/guardrails/guardrails_hub/guardrails_hub.py +101 -0
  14. idun_agent_engine/guardrails/guardrails_hub/utils.py +1 -0
  15. idun_agent_engine/mcp/__init__.py +5 -0
  16. idun_agent_engine/mcp/helpers.py +97 -0
  17. idun_agent_engine/mcp/registry.py +109 -0
  18. idun_agent_engine/observability/__init__.py +6 -2
  19. idun_agent_engine/observability/base.py +73 -12
  20. idun_agent_engine/observability/gcp_logging/__init__.py +0 -0
  21. idun_agent_engine/observability/gcp_logging/gcp_logging_handler.py +52 -0
  22. idun_agent_engine/observability/gcp_trace/__init__.py +0 -0
  23. idun_agent_engine/observability/gcp_trace/gcp_trace_handler.py +116 -0
  24. idun_agent_engine/observability/langfuse/langfuse_handler.py +17 -10
  25. idun_agent_engine/server/dependencies.py +13 -1
  26. idun_agent_engine/server/lifespan.py +80 -16
  27. idun_agent_engine/server/routers/agent.py +135 -27
  28. idun_agent_engine/server/routers/agui.py +47 -0
  29. idun_agent_engine/server/routers/base.py +55 -1
  30. idun_agent_engine/templates/__init__.py +1 -0
  31. idun_agent_engine/templates/correction.py +65 -0
  32. idun_agent_engine/templates/deep_research.py +40 -0
  33. idun_agent_engine/templates/translation.py +70 -0
  34. {idun_agent_engine-0.2.7.dist-info → idun_agent_engine-0.3.1.dist-info}/METADATA +62 -10
  35. idun_agent_engine-0.3.1.dist-info/RECORD +60 -0
  36. {idun_agent_engine-0.2.7.dist-info → idun_agent_engine-0.3.1.dist-info}/WHEEL +1 -1
  37. idun_platform_cli/groups/agent/package.py +3 -3
  38. idun_platform_cli/groups/agent/serve.py +8 -5
  39. idun_agent_engine/cli/__init__.py +0 -16
  40. idun_agent_engine-0.2.7.dist-info/RECORD +0 -43
  41. {idun_agent_engine-0.2.7.dist-info → idun_agent_engine-0.3.1.dist-info}/entry_points.txt +0 -0
@@ -3,17 +3,18 @@
3
3
  import logging
4
4
  from typing import Annotated
5
5
 
6
+ from ag_ui.core.types import RunAgentInput
7
+ from ag_ui.encoder import EventEncoder
8
+ from ag_ui_adk import ADKAgent as ADKAGUIAgent
9
+ from copilotkit import LangGraphAGUIAgent
6
10
  from fastapi import APIRouter, Depends, HTTPException, Request, status
7
11
  from fastapi.responses import StreamingResponse
8
12
  from idun_agent_schema.engine.api import ChatRequest, ChatResponse
13
+ from idun_agent_schema.engine.guardrails import Guardrail
9
14
 
10
15
  from idun_agent_engine.agent.base import BaseAgent
11
16
  from idun_agent_engine.server.dependencies import get_agent, get_copilotkit_agent
12
17
 
13
- from ag_ui.core.types import RunAgentInput
14
- from ag_ui.encoder import EventEncoder
15
- from copilotkit import LangGraphAGUIAgent
16
-
17
18
  logging.basicConfig(
18
19
  format="%(asctime)s %(levelname)-8s %(message)s",
19
20
  level=logging.INFO,
@@ -24,6 +25,25 @@ logger = logging.getLogger(__name__)
24
25
  agent_router = APIRouter()
25
26
 
26
27
 
28
+ def _format_deep_agent_response(response_content: list[dict[str, str]]) -> str:
29
+ """Deep Research Agent responds with a list contaning a single dict: {'type': 'text', 'text': 'Your text'}."""
30
+ try:
31
+ response = response_content[0]["text"]
32
+ return response
33
+ except KeyError as k:
34
+ raise ValueError("Cannot parse Deep Research Agent's response") from k
35
+
36
+
37
+ def _run_guardrails(
38
+ guardrails: list[Guardrail], message: dict[str, str] | str, position: str
39
+ ) -> None:
40
+ """Validates the request's message, by running it on given guardrails. If input is a dict -> input, else its an output guardrails."""
41
+ text = message["query"] if isinstance(message, dict) else message
42
+ for guard in guardrails:
43
+ if guard.position == position and not guard.validate(text): # type: ignore[attr-defined]
44
+ raise HTTPException(status_code=429, detail=guard.reject_message) # type: ignore[attr-defined]
45
+
46
+
27
47
  @agent_router.get("/config")
28
48
  async def get_config(request: Request):
29
49
  """Get the current agent configuration."""
@@ -35,21 +55,35 @@ async def get_config(request: Request):
35
55
  )
36
56
 
37
57
  config = request.app.state.engine_config.agent
38
- logger.info(f"Fetched config for agent: {config}")
58
+ logger.info(f"Fetched config for agent: {request.app.state.engine_config}")
39
59
  return {"config": config}
40
60
 
41
61
 
42
62
  @agent_router.post("/invoke", response_model=ChatResponse)
43
63
  async def invoke(
44
- request: ChatRequest,
64
+ chat_request: ChatRequest,
65
+ request: Request,
45
66
  agent: Annotated[BaseAgent, Depends(get_agent)],
46
67
  ):
47
68
  """Process a chat message with the agent without streaming."""
48
69
  try:
49
- message = {"query": request.query, "session_id": request.session_id}
50
- response_content = await agent.invoke(message)
70
+ message = {"query": chat_request.query, "session_id": chat_request.session_id}
71
+ guardrails = getattr(request.app.state, "guardrails", [])
72
+ if guardrails:
73
+ _run_guardrails(guardrails, message, position="input")
74
+ response_content = await agent.invoke(
75
+ {"query": message["query"], "session_id": message["session_id"]}
76
+ )
77
+ if guardrails:
78
+ _run_guardrails(guardrails, response_content, position="output")
79
+
80
+ if agent.name == "Deep Research Agent":
81
+ return ChatResponse(
82
+ session_id=message["session_id"],
83
+ response=_format_deep_agent_response(response_content),
84
+ )
85
+ return ChatResponse(session_id=message["session_id"], response=response_content)
51
86
 
52
- return ChatResponse(session_id=request.session_id, response=response_content)
53
87
  except Exception as e: # noqa: BLE001
54
88
  raise HTTPException(status_code=500, detail=str(e)) from e
55
89
 
@@ -70,27 +104,101 @@ async def stream(
70
104
  except Exception as e: # noqa: BLE001
71
105
  raise HTTPException(status_code=500, detail=str(e)) from e
72
106
 
107
+
73
108
  @agent_router.post("/copilotkit/stream")
74
109
  async def copilotkit_stream(
75
110
  input_data: RunAgentInput,
76
111
  request: Request,
77
- copilotkit_agent: Annotated[LangGraphAGUIAgent, Depends(get_copilotkit_agent)],
112
+ copilotkit_agent: Annotated[
113
+ LangGraphAGUIAgent | ADKAGUIAgent, Depends(get_copilotkit_agent)
114
+ ],
78
115
  ):
79
116
  """Process a message with the agent, streaming ag-ui events."""
80
- try:
81
- # Get the accept header from the request
82
- accept_header = request.headers.get("accept")
83
-
84
- # Create an event encoder to properly format SSE events
85
- encoder = EventEncoder(accept=accept_header or "") # type: ignore[arg-type]
86
-
87
- async def event_generator():
88
- async for event in copilotkit_agent.run(input_data):
89
- yield encoder.encode(event)
90
-
91
- return StreamingResponse(
92
- event_generator(), # type: ignore[arg-type]
93
- media_type=encoder.get_content_type()
94
- )
95
- except Exception as e: # noqa: BLE001
96
- raise HTTPException(status_code=500, detail=str(e)) from e
117
+ if isinstance(copilotkit_agent, LangGraphAGUIAgent):
118
+ try:
119
+ # Get the accept header from the request
120
+ accept_header = request.headers.get("accept")
121
+
122
+ # Create an event encoder to properly format SSE events
123
+ encoder = EventEncoder(accept=accept_header or "") # type: ignore[arg-type]
124
+
125
+ async def event_generator():
126
+ async for event in copilotkit_agent.run(input_data):
127
+ yield encoder.encode(event) # type: ignore[arg-type]
128
+
129
+ return StreamingResponse(
130
+ event_generator(), # type: ignore[arg-type]
131
+ media_type=encoder.get_content_type(),
132
+ )
133
+ except Exception as e: # noqa: BLE001
134
+ raise HTTPException(status_code=500, detail=str(e)) from e
135
+ elif isinstance(copilotkit_agent, ADKAGUIAgent):
136
+ try:
137
+ # Get the accept header from the request
138
+ accept_header = request.headers.get("accept")
139
+ agent_id = request.url.path.lstrip("/")
140
+
141
+ # Create an event encoder to properly format SSE events
142
+ encoder = EventEncoder(accept=accept_header or "")
143
+
144
+ async def event_generator():
145
+ """Generate events from ADK agent."""
146
+ try:
147
+ async for event in copilotkit_agent.run(input_data):
148
+ try:
149
+ encoded = encoder.encode(event)
150
+ logger.debug(f"HTTP Response: {encoded}")
151
+ yield encoded
152
+ except Exception as encoding_error:
153
+ # Handle encoding-specific errors
154
+ logger.error(
155
+ f"❌ Event encoding error: {encoding_error}",
156
+ exc_info=True,
157
+ )
158
+ # Create a RunErrorEvent for encoding failures
159
+ from ag_ui.core import EventType, RunErrorEvent
160
+
161
+ error_event = RunErrorEvent(
162
+ type=EventType.RUN_ERROR,
163
+ message=f"Event encoding failed: {str(encoding_error)}",
164
+ code="ENCODING_ERROR",
165
+ )
166
+ try:
167
+ error_encoded = encoder.encode(error_event)
168
+ yield error_encoded
169
+ except Exception:
170
+ # If we can't even encode the error event, yield a basic SSE error
171
+ logger.error(
172
+ "Failed to encode error event, yielding basic SSE error"
173
+ )
174
+ yield 'event: error\ndata: {"error": "Event encoding failed"}\n\n'
175
+ break # Stop the stream after an encoding error
176
+ except Exception as agent_error:
177
+ # Handle errors from ADKAgent.run() itself
178
+ logger.error(f"❌ ADKAgent error: {agent_error}", exc_info=True)
179
+ # ADKAgent should have yielded a RunErrorEvent, but if something went wrong
180
+ # in the async generator itself, we need to handle it
181
+ try:
182
+ from ag_ui.core import EventType, RunErrorEvent
183
+
184
+ error_event = RunErrorEvent(
185
+ type=EventType.RUN_ERROR,
186
+ message=f"Agent execution failed: {str(agent_error)}",
187
+ code="AGENT_ERROR",
188
+ )
189
+ error_encoded = encoder.encode(error_event)
190
+ yield error_encoded
191
+ except Exception:
192
+ # If we can't encode the error event, yield a basic SSE error
193
+ logger.error(
194
+ "Failed to encode agent error event, yielding basic SSE error"
195
+ )
196
+ yield 'event: error\ndata: {"error": "Agent execution failed"}\n\n'
197
+
198
+ return StreamingResponse(
199
+ event_generator(), media_type=encoder.get_content_type()
200
+ )
201
+ except Exception as e: # noqa: BLE001
202
+ raise HTTPException(status_code=500, detail=str(e)) from e
203
+ else:
204
+ raise HTTPException(status_code=400, detail="Invalid agent type")
@@ -0,0 +1,47 @@
1
+ # """AGUI routes for CopilotKit integration with LangGraph agents."""
2
+
3
+ # import logging
4
+ # from typing import Annotated
5
+
6
+ # from ag_ui_langgraph import add_langgraph_fastapi_endpoint
7
+ # from copilotkit import LangGraphAGUIAgent
8
+ # from ag_ui_adk import ADKAgent as ADKAGUIAgent
9
+ # from ag_ui_adk import add_adk_fastapi_endpoint
10
+ # from fastapi import APIRouter, Depends, HTTPException, Request
11
+
12
+ # from idun_agent_engine.agent.langgraph.langgraph import LanggraphAgent
13
+ # from idun_agent_engine.agent.adk.adk import AdkAgent
14
+ # from idun_agent_engine.server.dependencies import get_agent
15
+
16
+ # logging.basicConfig(
17
+ # format="%(asctime)s %(levelname)-8s %(message)s",
18
+ # level=logging.INFO,
19
+ # datefmt="%Y-%m-%d %H:%M:%S",
20
+ # )
21
+
22
+ # logger = logging.getLogger(__name__)
23
+
24
+
25
+ # def setup_agui_router(app, agent: LanggraphAgent | AdkAgent) -> LangGraphAGUIAgent | ADKAGUIAgent:
26
+ # """Set up AGUI routes for CopilotKit integration.
27
+
28
+ # This function adds the LangGraph agent as a CopilotKit-compatible endpoint.
29
+
30
+ # Args:
31
+ # app: The FastAPI application instance
32
+ # agent: The initialized LangGraph agent instance
33
+ # """
34
+ # try:
35
+ # if isinstance(agent, LanggraphAgent):
36
+ # # Create the AGUI agent wrapper
37
+ # agui_agent = agent.copilotkit_agent_instance
38
+ # elif isinstance(agent, AdkAgent):
39
+ # # Create the AGUI agent wrapper
40
+ # agui_agent = agent.copilotkit_agent_instance # TODO: duplicate in agent.adk.adk.py init
41
+ # else:
42
+ # raise ValueError(f"Unsupported agent type: {type(agent)}")
43
+ # return agui_agent
44
+ # logger.info(f"✅ AGUI endpoint configured at /agui for agent: {agent.name}")
45
+ # except Exception as e:
46
+ # logger.error(f"❌ Failed to setup AGUI router: {e}")
47
+ # raise HTTPException(status_code=500, detail=f"Failed to setup AGUI router: {e}") from e
@@ -1,18 +1,72 @@
1
1
  """Base routes for service health and landing info."""
2
2
 
3
- from fastapi import APIRouter
3
+ import os
4
+ from typing import Optional
5
+ from fastapi import APIRouter, Request, HTTPException
6
+ from pydantic import BaseModel
4
7
 
5
8
  from ..._version import __version__
9
+ from ...core.config_builder import ConfigBuilder
10
+ from ..lifespan import cleanup_agent, configure_app
6
11
 
7
12
  base_router = APIRouter()
8
13
 
9
14
 
15
+ class ReloadRequest(BaseModel):
16
+ """Request body for reload endpoint."""
17
+
18
+ path: Optional[str] = None
19
+
20
+
10
21
  @base_router.get("/health")
11
22
  def health_check():
12
23
  """Health check endpoint for monitoring and load balancers."""
13
24
  return {"status": "healthy", "engine_version": __version__}
14
25
 
15
26
 
27
+ @base_router.post("/reload")
28
+ async def reload_config(request: Request, body: Optional[ReloadRequest] = None):
29
+ """Reload the agent configuration from the manager or a file."""
30
+
31
+ try:
32
+ if body and body.path:
33
+ print(f"🔄 Reloading configuration from file: {body.path}...")
34
+ new_config = ConfigBuilder.load_from_file(body.path)
35
+ else:
36
+ print("🔄 Reloading configuration from manager...")
37
+ agent_api_key = os.getenv("IDUN_AGENT_API_KEY")
38
+ manager_host = os.getenv("IDUN_MANAGER_HOST")
39
+
40
+ if not agent_api_key or not manager_host:
41
+ raise HTTPException(
42
+ status_code=400,
43
+ detail="Cannot reload from manager: IDUN_AGENT_API_KEY or IDUN_MANAGER_HOST environment variables are missing.",
44
+ )
45
+
46
+ # Fetch new config
47
+ config_builder = ConfigBuilder().with_config_from_api(
48
+ agent_api_key=agent_api_key, url=manager_host
49
+ )
50
+ new_config = config_builder.build()
51
+
52
+ # Cleanup old agent
53
+ await cleanup_agent(request.app)
54
+
55
+ # Initialize new agent
56
+ await configure_app(request.app, new_config)
57
+
58
+ return {
59
+ "status": "success",
60
+ "message": "Agent configuration reloaded successfully",
61
+ }
62
+
63
+ except Exception as e:
64
+ print(f"❌ Error reloading configuration: {e}")
65
+ raise HTTPException(
66
+ status_code=500, detail=f"Failed to reload configuration: {str(e)}"
67
+ )
68
+
69
+
16
70
  # Add a root endpoint with helpful information
17
71
  @base_router.get("/")
18
72
  def read_root():
@@ -0,0 +1 @@
1
+ """Agent templates package."""
@@ -0,0 +1,65 @@
1
+ """Correction Agent Template."""
2
+
3
+ import os
4
+ from typing import TypedDict, Annotated, List, Any
5
+
6
+ try:
7
+ from langchain.chat_models import init_chat_model
8
+ except ImportError:
9
+ try:
10
+ from langchain_core.language_models import init_chat_model
11
+ except ImportError:
12
+ init_chat_model = None
13
+
14
+ from langchain_core.messages import SystemMessage, BaseMessage
15
+ from langgraph.graph import StateGraph, START, END
16
+ from langgraph.graph.message import add_messages
17
+
18
+
19
+ class State(TypedDict):
20
+ messages: Annotated[List[BaseMessage], add_messages]
21
+
22
+
23
+ MODEL_NAME = os.getenv("CORRECTION_MODEL", "gemini-2.5-flash")
24
+ LANGUAGE = os.getenv("CORRECTION_LANGUAGE", "French")
25
+
26
+ llm: Any = None
27
+ if init_chat_model and callable(init_chat_model):
28
+ try:
29
+ llm = init_chat_model(MODEL_NAME)
30
+ except Exception as e:
31
+ print(f"Warning: Failed to init model {MODEL_NAME}: {e}")
32
+ else:
33
+ print("Warning: init_chat_model not found in langchain.")
34
+
35
+
36
+
37
+ async def correct_text(state: State):
38
+ """Correct the spelling, syntax, and grammar of the text."""
39
+ if not llm:
40
+
41
+ return {
42
+ "messages": [
43
+ SystemMessage(content="Error: Model not initialized. Check logs.")
44
+ ]
45
+ }
46
+
47
+
48
+ prompt = (
49
+ f"You are a professional text corrector for {LANGUAGE}. "
50
+ f"Correct the spelling, syntax, grammar, and conjugation of the following text. "
51
+ f"Return ONLY the corrected text without any explanations or modifications to the meaning."
52
+ )
53
+
54
+ messages = [SystemMessage(content=prompt)] + state["messages"]
55
+
56
+ response = await llm.ainvoke(messages)
57
+ return {"messages": [response]}
58
+
59
+
60
+ workflow = StateGraph(State)
61
+ workflow.add_node("correct", correct_text)
62
+ workflow.add_edge(START, "correct")
63
+ workflow.add_edge("correct", END)
64
+
65
+ graph = workflow.compile()
@@ -0,0 +1,40 @@
1
+ """Deep Research Agent Template."""
2
+
3
+ import os
4
+ from deepagents import create_deep_agent
5
+ from tavily import TavilyClient
6
+
7
+ try:
8
+ from langchain.chat_models import init_chat_model
9
+ except ImportError:
10
+ try:
11
+ from langchain_core.language_models import init_chat_model
12
+ except ImportError:
13
+ init_chat_model = None
14
+
15
+ MODEL_NAME = os.getenv("DEEP_RESEARCH_MODEL", "gemini-2.5-flash")
16
+
17
+ SYSTEM_PROMPT = os.getenv(
18
+ "DEEP_RESEARCH_PROMPT", "Conduct research and write a polished report."
19
+ )
20
+
21
+ TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
22
+
23
+ tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
24
+
25
+
26
+ def internet_search(query: str, max_results: int = 5):
27
+ """Run a web search"""
28
+ return tavily_client.search(query, max_results=max_results)
29
+
30
+
31
+ llm = None
32
+ if init_chat_model:
33
+ try:
34
+ llm = init_chat_model(MODEL_NAME)
35
+ except Exception as e:
36
+ print(f"Warning: Failed to init model {MODEL_NAME}: {e}")
37
+ else:
38
+ print("Warning: init_chat_model not found in langchain.")
39
+
40
+ graph = create_deep_agent(llm, [internet_search], system_prompt=SYSTEM_PROMPT)
@@ -0,0 +1,70 @@
1
+ """Translation Agent Template."""
2
+
3
+ import os
4
+ from typing import TypedDict, Annotated, List
5
+
6
+ # Try importing init_chat_model, fallback if necessary
7
+ try:
8
+ from langchain.chat_models import init_chat_model
9
+ except ImportError:
10
+ try:
11
+ from langchain_core.language_models import init_chat_model
12
+ except ImportError:
13
+ init_chat_model = None
14
+
15
+ from langchain_core.messages import SystemMessage, BaseMessage
16
+ from langgraph.graph import StateGraph, START, END
17
+ from langgraph.graph.message import add_messages
18
+
19
+
20
+ # Define the state
21
+ class State(TypedDict):
22
+ messages: Annotated[List[BaseMessage], add_messages]
23
+
24
+
25
+ # Read configuration from environment variables
26
+ # These are set by ConfigBuilder when initializing the agent
27
+ MODEL_NAME = os.getenv("TRANSLATION_MODEL", "gemini-2.5-flash")
28
+ SOURCE_LANG = os.getenv("TRANSLATION_SOURCE_LANG", "English")
29
+ TARGET_LANG = os.getenv("TRANSLATION_TARGET_LANG", "French")
30
+
31
+ # Initialize the model
32
+ llm = None
33
+ if init_chat_model:
34
+ try:
35
+ # init_chat_model requires langchain>=0.2.x or similar.
36
+ # It auto-detects provider from model name (e.g. "gpt-4" -> openai, "claude" -> anthropic)
37
+ # provided the integration packages are installed.
38
+ llm = init_chat_model(MODEL_NAME)
39
+ except Exception as e:
40
+ print(f"Warning: Failed to init model {MODEL_NAME}: {e}")
41
+ else:
42
+ print("Warning: init_chat_model not found in langchain.")
43
+
44
+
45
+ async def translate(state: State):
46
+ """Translate the last message."""
47
+ if not llm:
48
+ return {
49
+ "messages": [
50
+ SystemMessage(content="Error: Model not initialized. Check logs.")
51
+ ]
52
+ }
53
+
54
+ prompt = (
55
+ f"You are a professional translator. Translate the following text "
56
+ f"from {SOURCE_LANG} to {TARGET_LANG}. Output ONLY the translation."
57
+ )
58
+
59
+ messages = [SystemMessage(content=prompt)] + state["messages"]
60
+
61
+ response = await llm.ainvoke(messages)
62
+ return {"messages": [response]}
63
+
64
+
65
+ workflow = StateGraph(State)
66
+ workflow.add_node("translate", translate)
67
+ workflow.add_edge(START, "translate")
68
+ workflow.add_edge("translate", END)
69
+
70
+ graph = workflow.compile()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: idun-agent-engine
3
- Version: 0.2.7
3
+ Version: 0.3.1
4
4
  Summary: Python SDK and runtime to serve AI agents with FastAPI, LangGraph, and observability.
5
5
  Project-URL: Homepage, https://github.com/geoffreyharrazi/idun-agent-platform
6
6
  Project-URL: Repository, https://github.com/geoffreyharrazi/idun-agent-platform
@@ -19,29 +19,43 @@ Classifier: Programming Language :: Python :: 3.13
19
19
  Classifier: Topic :: Software Development :: Libraries
20
20
  Classifier: Typing :: Typed
21
21
  Requires-Python: <3.14,>=3.12
22
+ Requires-Dist: ag-ui-adk<0.4.0,>=0.3.4
22
23
  Requires-Dist: ag-ui-langgraph<0.1.0,>=0.0.20
23
24
  Requires-Dist: ag-ui-protocol<0.2.0,>=0.1.8
24
25
  Requires-Dist: aiosqlite<0.22.0,>=0.21.0
25
26
  Requires-Dist: arize-phoenix-otel<1.0.0,>=0.2.0
26
27
  Requires-Dist: arize-phoenix<12.0.0,>=11.22.0
27
- Requires-Dist: click>=8.2.1
28
+ Requires-Dist: click>=8.2.0
28
29
  Requires-Dist: copilotkit<0.2.0,>=0.1.72
30
+ Requires-Dist: deepagents<1.0.0,>=0.2.8
29
31
  Requires-Dist: fastapi<0.116.0,>=0.115.0
30
- Requires-Dist: google-adk<2.0.0,>=1.9.0
32
+ Requires-Dist: google-adk<2.0.0,>=1.19.0
33
+ Requires-Dist: google-cloud-logging<4.0.0,>=3.10.0
34
+ Requires-Dist: guardrails-ai<0.8.0,>=0.7.0
31
35
  Requires-Dist: httpx<0.29.0,>=0.28.1
32
- Requires-Dist: idun-agent-schema<0.3.0,>=0.2.7
33
- Requires-Dist: langchain-core<0.4.0,>=0.3.72
34
- Requires-Dist: langchain-google-vertexai<3.0.0,>=2.0.27
35
- Requires-Dist: langchain<0.4,>=0.3.27
36
+ Requires-Dist: idun-agent-schema<1.0.0,>=0.2.7
37
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0
38
+ Requires-Dist: langchain-google-vertexai<4.0.0,>=2.0.27
39
+ Requires-Dist: langchain-mcp-adapters<0.2.0,>=0.1.0
40
+ Requires-Dist: langchain<2.0.0,>=1.0.0
36
41
  Requires-Dist: langfuse-haystack>=2.3.0
37
- Requires-Dist: langfuse==2.60.8
38
- Requires-Dist: langgraph-checkpoint-sqlite<3.0.0,>=2.0.11
39
- Requires-Dist: langgraph<0.7.0,>=0.6.3
42
+ Requires-Dist: langfuse<4.0.0,>=2.60.8
43
+ Requires-Dist: langgraph-checkpoint-postgres<4.0.0,>=3.0.0
44
+ Requires-Dist: langgraph-checkpoint-sqlite<4.0.0,>=3.0.0
45
+ Requires-Dist: langgraph<2.0.0,>=1.0.0
46
+ Requires-Dist: mcp<2.0.0,>=1.0.0
47
+ Requires-Dist: openinference-instrumentation-google-adk<1.0.0,>=0.1.0
48
+ Requires-Dist: openinference-instrumentation-guardrails<1.0.0,>=0.1.0
40
49
  Requires-Dist: openinference-instrumentation-langchain<1.0.0,>=0.1.13
50
+ Requires-Dist: openinference-instrumentation-mcp<2.0.0,>=1.0.0
51
+ Requires-Dist: openinference-instrumentation-vertexai<1.0.0,>=0.1.0
52
+ Requires-Dist: opentelemetry-exporter-gcp-trace<2.0.0,>=1.6.0
53
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0
41
54
  Requires-Dist: pydantic<3.0.0,>=2.11.7
42
55
  Requires-Dist: python-dotenv>=1.1.1
43
56
  Requires-Dist: sqlalchemy<3.0.0,>=2.0.36
44
57
  Requires-Dist: streamlit<2.0.0,>=1.47.1
58
+ Requires-Dist: tavily-python<0.8.0,>=0.7.9
45
59
  Requires-Dist: uvicorn<0.36.0,>=0.35.0
46
60
  Description-Content-Type: text/markdown
47
61
 
@@ -214,6 +228,7 @@ agent:
214
228
  - `agent.config.graph_definition` (str): absolute or relative `path/to/file.py:variable`
215
229
  - `agent.config.checkpointer` (sqlite): `{ type: "sqlite", db_url: "sqlite:///file.db" }`
216
230
  - `agent.config.observability` (optional): provider options as shown above
231
+ - `mcp_servers` (list, optional): collection of MCP servers that should be available to your agent runtime. Each entry matches the fields supported by `langchain-mcp-adapters` (name, transport, url/command, headers, etc.).
217
232
 
218
233
  Config can be sourced by:
219
234
 
@@ -221,6 +236,43 @@ Config can be sourced by:
221
236
  - `config_dict`: dict validated at runtime
222
237
  - `config_path`: path to YAML; defaults to `config.yaml`
223
238
 
239
+ ### MCP Servers
240
+
241
+ You can mount MCP servers directly in your engine config. The engine will automatically
242
+ create a `MultiServerMCPClient` and expose it on `app.state.mcp_registry`.
243
+
244
+ ```yaml
245
+ mcp_servers:
246
+ - name: "math"
247
+ transport: "stdio"
248
+ command: "python"
249
+ args:
250
+ - "/path/to/math_server.py"
251
+ - name: "weather"
252
+ transport: "streamable_http"
253
+ url: "http://localhost:8000/mcp"
254
+ ```
255
+
256
+ Inside your FastAPI dependencies or handlers:
257
+
258
+ ```python
259
+ from idun_agent_engine.server.dependencies import get_mcp_registry
260
+
261
+ @router.get("/mcp/{server}/tools")
262
+ async def list_tools(server: str, registry = Depends(get_mcp_registry)):
263
+ return await registry.get_tools(server)
264
+ ```
265
+
266
+ Or outside of FastAPI:
267
+
268
+ ```python
269
+ from langchain_mcp_adapters.tools import load_mcp_tools
270
+
271
+ registry = app.state.mcp_registry
272
+ async with registry.get_session("math") as session:
273
+ tools = await load_mcp_tools(session)
274
+ ```
275
+
224
276
  ## Examples
225
277
 
226
278
  The `examples/` folder contains complete projects: