gitlab-api 25.14.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitlab_api/__init__.py +78 -0
- gitlab_api/__main__.py +7 -0
- gitlab_api/decorators.py +20 -0
- gitlab_api/exceptions.py +42 -0
- gitlab_api/gitlab_agent.py +362 -0
- gitlab_api/gitlab_api.py +5142 -0
- gitlab_api/gitlab_gql.py +3390 -0
- gitlab_api/gitlab_input_models.py +3207 -0
- gitlab_api/gitlab_mcp.py +6772 -0
- gitlab_api/gitlab_response_models.py +3808 -0
- gitlab_api/mcp_config.json +7 -0
- gitlab_api/middlewares.py +121 -0
- gitlab_api/skills/gitlab-branches/SKILL.md +41 -0
- gitlab_api/skills/gitlab-commits/SKILL.md +104 -0
- gitlab_api/skills/gitlab-custom-api/SKILL.md +26 -0
- gitlab_api/skills/gitlab-deploy-tokens/SKILL.md +56 -0
- gitlab_api/skills/gitlab-environments/SKILL.md +80 -0
- gitlab_api/skills/gitlab-groups/SKILL.md +65 -0
- gitlab_api/skills/gitlab-jobs/SKILL.md +56 -0
- gitlab_api/skills/gitlab-members/SKILL.md +32 -0
- gitlab_api/skills/gitlab-merge-requests/SKILL.md +46 -0
- gitlab_api/skills/gitlab-merge-rules/SKILL.md +90 -0
- gitlab_api/skills/gitlab-packages/SKILL.md +37 -0
- gitlab_api/skills/gitlab-pipeline-schedules/SKILL.md +78 -0
- gitlab_api/skills/gitlab-pipelines/SKILL.md +35 -0
- gitlab_api/skills/gitlab-projects/SKILL.md +66 -0
- gitlab_api/skills/gitlab-protected-branches/SKILL.md +47 -0
- gitlab_api/skills/gitlab-releases/SKILL.md +78 -0
- gitlab_api/skills/gitlab-runners/SKILL.md +98 -0
- gitlab_api/skills/gitlab-tags/SKILL.md +58 -0
- gitlab_api/utils.py +110 -0
- gitlab_api-25.14.5.dist-info/METADATA +897 -0
- gitlab_api-25.14.5.dist-info/RECORD +46 -0
- gitlab_api-25.14.5.dist-info/WHEEL +5 -0
- gitlab_api-25.14.5.dist-info/entry_points.txt +3 -0
- gitlab_api-25.14.5.dist-info/licenses/LICENSE +20 -0
- gitlab_api-25.14.5.dist-info/top_level.txt +3 -0
- scripts/validate_a2a_agent.py +150 -0
- scripts/validate_agent.py +67 -0
- tests/test_gitlab_a2a_validation.py +164 -0
- tests/test_gitlab_api.py +208 -0
- tests/test_gitlab_mcp_validation.py +92 -0
- tests/test_gitlab_models.py +14046 -0
- tests/test_utils.py +1277 -0
- tests/test_verify_agent.py +213 -0
- tests/verify_a2a_queries.py +119 -0
gitlab_api/__init__.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# gitlab_api/__init__.py
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
import inspect
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
__all__: List[str] = []
|
|
8
|
+
|
|
9
|
+
# Core modules – always available (part of base dependencies)
|
|
10
|
+
CORE_MODULES = [
|
|
11
|
+
"gitlab_api.decorators",
|
|
12
|
+
"gitlab_api.exceptions",
|
|
13
|
+
"gitlab_api.gitlab_input_models",
|
|
14
|
+
"gitlab_api.gitlab_response_models",
|
|
15
|
+
"gitlab_api.gitlab_api",
|
|
16
|
+
"gitlab_api.utils",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
# Optional modules – only import if their dependencies are installed
|
|
20
|
+
OPTIONAL_MODULES = {
|
|
21
|
+
"gitlab_api.gitlab_gql": "gql", # Requires gql
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _import_module_safely(module_name: str):
|
|
26
|
+
"""Try to import a module and return it, or None if not available."""
|
|
27
|
+
try:
|
|
28
|
+
return importlib.import_module(module_name)
|
|
29
|
+
except ImportError:
|
|
30
|
+
# Optional: log at debug level why it failed
|
|
31
|
+
# import logging
|
|
32
|
+
# logging.debug(f"Optional module {module_name} not imported: {e}")
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _expose_members(module):
|
|
37
|
+
"""Expose public classes and functions from a module into globals and __all__."""
|
|
38
|
+
for name, obj in inspect.getmembers(module):
|
|
39
|
+
if (inspect.isclass(obj) or inspect.isfunction(obj)) and not name.startswith(
|
|
40
|
+
"_"
|
|
41
|
+
):
|
|
42
|
+
globals()[name] = obj
|
|
43
|
+
__all__.append(name)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# Always import core modules
|
|
47
|
+
for module_name in CORE_MODULES:
|
|
48
|
+
module = importlib.import_module(module_name)
|
|
49
|
+
_expose_members(module)
|
|
50
|
+
|
|
51
|
+
# Conditionally import optional modules
|
|
52
|
+
for module_name, extra_name in OPTIONAL_MODULES.items():
|
|
53
|
+
module = _import_module_safely(module_name)
|
|
54
|
+
if module is not None:
|
|
55
|
+
_expose_members(module)
|
|
56
|
+
# Optional: add a marker so users can check what's available
|
|
57
|
+
globals()[f"_{extra_name.upper()}_AVAILABLE"] = True
|
|
58
|
+
else:
|
|
59
|
+
globals()[f"_{extra_name.upper()}_AVAILABLE"] = False
|
|
60
|
+
|
|
61
|
+
# Optional: expose availability flags
|
|
62
|
+
_MCP_AVAILABLE = OPTIONAL_MODULES.get("gitlab_api.gitlab_mcp") in [
|
|
63
|
+
m.__name__ for m in globals().values() if hasattr(m, "__name__")
|
|
64
|
+
]
|
|
65
|
+
_A2A_AVAILABLE = "gitlab_api.gitlab_agent" in globals()
|
|
66
|
+
_GQL_AVAILABLE = "gitlab_api.gitlab_gql" in globals()
|
|
67
|
+
|
|
68
|
+
__all__.extend(["_MCP_AVAILABLE", "_A2A_AVAILABLE", "_GQL_AVAILABLE"])
|
|
69
|
+
|
|
70
|
+
"""
|
|
71
|
+
GitLab API - A Python Wrapper for GitLab
|
|
72
|
+
|
|
73
|
+
Features are conditionally loaded based on installed extras:
|
|
74
|
+
- base: core API client and models
|
|
75
|
+
- [mcp]: FastMCP server and tools
|
|
76
|
+
- [a2a]: Agent-to-Agent multi-agent system with Graphiti knowledge graph
|
|
77
|
+
- [gql]: GraphQL support
|
|
78
|
+
"""
|
gitlab_api/__main__.py
ADDED
gitlab_api/decorators.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
|
|
4
|
+
import functools
|
|
5
|
+
from gitlab_api.exceptions import LoginRequiredError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def require_auth(function):
|
|
9
|
+
"""
|
|
10
|
+
Wraps API calls in function that ensures headers are passed
|
|
11
|
+
with a token
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
@functools.wraps(function)
|
|
15
|
+
def wrapper(self, *args, **kwargs):
|
|
16
|
+
if not self.headers:
|
|
17
|
+
raise LoginRequiredError
|
|
18
|
+
return function(self, *args, **kwargs)
|
|
19
|
+
|
|
20
|
+
return wrapper
|
gitlab_api/exceptions.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class AuthError(Exception):
|
|
6
|
+
"""
|
|
7
|
+
Authentication error
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class UnauthorizedError(AuthError):
|
|
14
|
+
"""
|
|
15
|
+
Unauthorized error
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MissingParameterError(Exception):
|
|
22
|
+
"""
|
|
23
|
+
Missing Parameter error
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ParameterError(Exception):
|
|
30
|
+
"""
|
|
31
|
+
Parameter error
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class LoginRequiredError(Exception):
|
|
38
|
+
"""
|
|
39
|
+
Authentication error
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
pass
|
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import argparse
|
|
6
|
+
import logging
|
|
7
|
+
import uvicorn
|
|
8
|
+
from contextlib import asynccontextmanager
|
|
9
|
+
from typing import Optional, Any, List
|
|
10
|
+
|
|
11
|
+
from fastmcp import Client
|
|
12
|
+
from pydantic_ai import Agent, ModelSettings
|
|
13
|
+
from pydantic_ai.mcp import load_mcp_servers
|
|
14
|
+
from pydantic_ai.toolsets.fastmcp import FastMCPToolset
|
|
15
|
+
from pydantic_ai_skills import SkillsToolset
|
|
16
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
17
|
+
from pydantic_ai.models.anthropic import AnthropicModel
|
|
18
|
+
from pydantic_ai.models.google import GoogleModel
|
|
19
|
+
from pydantic_ai.models.huggingface import HuggingFaceModel
|
|
20
|
+
from fasta2a import Skill
|
|
21
|
+
from gitlab_api.utils import (
|
|
22
|
+
to_integer,
|
|
23
|
+
to_boolean,
|
|
24
|
+
load_skills_from_directory,
|
|
25
|
+
get_skills_path,
|
|
26
|
+
get_mcp_config_path,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from fastapi import FastAPI, Request
|
|
30
|
+
from starlette.responses import Response, StreamingResponse
|
|
31
|
+
from pydantic import ValidationError
|
|
32
|
+
from pydantic_ai.ui import SSE_CONTENT_TYPE
|
|
33
|
+
from pydantic_ai.ui.ag_ui import AGUIAdapter
|
|
34
|
+
|
|
35
|
+
logging.basicConfig(
|
|
36
|
+
level=logging.INFO,
|
|
37
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
38
|
+
handlers=[logging.StreamHandler()], # Output to console
|
|
39
|
+
)
|
|
40
|
+
logging.getLogger("pydantic_ai").setLevel(logging.INFO)
|
|
41
|
+
logging.getLogger("fastmcp").setLevel(logging.INFO)
|
|
42
|
+
logging.getLogger("httpx").setLevel(logging.INFO)
|
|
43
|
+
logger = logging.getLogger(__name__)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
DEFAULT_HOST = os.getenv("HOST", "0.0.0.0")
|
|
47
|
+
DEFAULT_PORT = to_integer(string=os.getenv("PORT", "9000"))
|
|
48
|
+
DEFAULT_DEBUG = to_boolean(string=os.getenv("DEBUG", "False"))
|
|
49
|
+
DEFAULT_PROVIDER = os.getenv("PROVIDER", "openai")
|
|
50
|
+
DEFAULT_MODEL_ID = os.getenv("MODEL_ID", "qwen/qwen3-8b")
|
|
51
|
+
DEFAULT_OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "http://127.0.0.1:1234/v1")
|
|
52
|
+
DEFAULT_OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "ollama")
|
|
53
|
+
DEFAULT_MCP_URL = os.getenv("MCP_URL", None)
|
|
54
|
+
DEFAULT_MCP_CONFIG = os.getenv("MCP_CONFIG", get_mcp_config_path())
|
|
55
|
+
DEFAULT_SKILLS_DIRECTORY = os.getenv("SKILLS_DIRECTORY", get_skills_path())
|
|
56
|
+
DEFAULT_ENABLE_WEB_UI = to_boolean(os.getenv("ENABLE_WEB_UI", "False"))
|
|
57
|
+
|
|
58
|
+
AGENT_NAME = "GitLab"
|
|
59
|
+
AGENT_DESCRIPTION = "An agent built with Agent Skills and GitLab MCP tools to maximize GitLab interactivity."
|
|
60
|
+
AGENT_SYSTEM_PROMPT = (
|
|
61
|
+
"You are the GitLab Agent.\n"
|
|
62
|
+
"You have access to all GitLab skills and toolsets to interact with the API.\n"
|
|
63
|
+
"Your responsibilities:\n"
|
|
64
|
+
"1. Analyze the user's request.\n"
|
|
65
|
+
"2. Identify the domain (e.g., branches, commits, MRs) and select the appropriate skills.\n"
|
|
66
|
+
"3. Use the skills to reference the tools you will need to search for using the tool_search skill.\n"
|
|
67
|
+
"4. If a complicated task requires multiple skills (e.g. 'check out branch X and verify the last commit'), "
|
|
68
|
+
" orchestrate them sequentially: call the Branch skill, then the Commit skill.\n"
|
|
69
|
+
"5. Always be warm, professional, and helpful.\n"
|
|
70
|
+
"6. Explain your plan in detail before executing."
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def create_model(
|
|
75
|
+
provider: str = DEFAULT_PROVIDER,
|
|
76
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
77
|
+
base_url: Optional[str] = DEFAULT_OPENAI_BASE_URL,
|
|
78
|
+
api_key: Optional[str] = DEFAULT_OPENAI_API_KEY,
|
|
79
|
+
):
|
|
80
|
+
if provider == "openai":
|
|
81
|
+
target_base_url = base_url or DEFAULT_OPENAI_BASE_URL
|
|
82
|
+
target_api_key = api_key or DEFAULT_OPENAI_API_KEY
|
|
83
|
+
if target_base_url:
|
|
84
|
+
os.environ["OPENAI_BASE_URL"] = target_base_url
|
|
85
|
+
if target_api_key:
|
|
86
|
+
os.environ["OPENAI_API_KEY"] = target_api_key
|
|
87
|
+
return OpenAIChatModel(model_id, provider="openai")
|
|
88
|
+
|
|
89
|
+
elif provider == "anthropic":
|
|
90
|
+
if api_key:
|
|
91
|
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
92
|
+
return AnthropicModel(model_id)
|
|
93
|
+
|
|
94
|
+
elif provider == "google":
|
|
95
|
+
if api_key:
|
|
96
|
+
os.environ["GEMINI_API_KEY"] = api_key
|
|
97
|
+
os.environ["GOOGLE_API_KEY"] = api_key
|
|
98
|
+
return GoogleModel(model_id)
|
|
99
|
+
|
|
100
|
+
elif provider == "huggingface":
|
|
101
|
+
if api_key:
|
|
102
|
+
os.environ["HF_TOKEN"] = api_key
|
|
103
|
+
return HuggingFaceModel(model_id)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def create_agent(
|
|
107
|
+
provider: str = DEFAULT_PROVIDER,
|
|
108
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
109
|
+
base_url: Optional[str] = None,
|
|
110
|
+
api_key: Optional[str] = None,
|
|
111
|
+
mcp_url: str = DEFAULT_MCP_URL,
|
|
112
|
+
mcp_config: str = DEFAULT_MCP_CONFIG,
|
|
113
|
+
skills_directory: Optional[str] = DEFAULT_SKILLS_DIRECTORY,
|
|
114
|
+
) -> Agent:
|
|
115
|
+
agent_toolsets = []
|
|
116
|
+
|
|
117
|
+
if mcp_config:
|
|
118
|
+
mcp_toolset = load_mcp_servers(mcp_config)
|
|
119
|
+
agent_toolsets.extend(mcp_toolset)
|
|
120
|
+
logger.info(f"Connected to MCP Config JSON: {mcp_toolset}")
|
|
121
|
+
elif mcp_url:
|
|
122
|
+
fastmcp_toolset = FastMCPToolset(Client[Any](mcp_url, timeout=3600))
|
|
123
|
+
agent_toolsets.append(fastmcp_toolset)
|
|
124
|
+
logger.info(f"Connected to MCP Server: {mcp_url}")
|
|
125
|
+
|
|
126
|
+
if skills_directory and os.path.exists(skills_directory):
|
|
127
|
+
logger.debug(f"Loading skills {skills_directory}")
|
|
128
|
+
skills = SkillsToolset(directories=[str(skills_directory)])
|
|
129
|
+
agent_toolsets.append(skills)
|
|
130
|
+
logger.info(f"Loaded Skills at {skills_directory}")
|
|
131
|
+
|
|
132
|
+
# Create the Model
|
|
133
|
+
model = create_model(provider, model_id, base_url, api_key)
|
|
134
|
+
|
|
135
|
+
logger.info("Initializing Agent...")
|
|
136
|
+
|
|
137
|
+
settings = ModelSettings(timeout=3600.0)
|
|
138
|
+
|
|
139
|
+
return Agent(
|
|
140
|
+
model=model,
|
|
141
|
+
system_prompt=AGENT_SYSTEM_PROMPT,
|
|
142
|
+
name="GitLab_Agent",
|
|
143
|
+
toolsets=agent_toolsets,
|
|
144
|
+
deps_type=Any,
|
|
145
|
+
model_settings=settings,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
async def chat(agent: Agent, prompt: str):
|
|
150
|
+
result = await agent.run(prompt)
|
|
151
|
+
print(f"Response:\n\n{result.output}")
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
async def node_chat(agent: Agent, prompt: str) -> List:
|
|
155
|
+
nodes = []
|
|
156
|
+
async with agent.iter(prompt) as agent_run:
|
|
157
|
+
async for node in agent_run:
|
|
158
|
+
nodes.append(node)
|
|
159
|
+
print(node)
|
|
160
|
+
return nodes
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
async def stream_chat(agent: Agent, prompt: str) -> None:
|
|
164
|
+
# Option A: Easiest & most common - just stream the final text output
|
|
165
|
+
async with agent.run_stream(prompt) as result:
|
|
166
|
+
async for text_chunk in result.stream_text(
|
|
167
|
+
delta=True
|
|
168
|
+
): # ← streams partial text deltas
|
|
169
|
+
print(text_chunk, end="", flush=True)
|
|
170
|
+
print("\nDone!") # optional
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def create_agent_server(
|
|
174
|
+
provider: str = DEFAULT_PROVIDER,
|
|
175
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
176
|
+
base_url: Optional[str] = None,
|
|
177
|
+
api_key: Optional[str] = None,
|
|
178
|
+
mcp_url: str = DEFAULT_MCP_URL,
|
|
179
|
+
mcp_config: str = DEFAULT_MCP_CONFIG,
|
|
180
|
+
skills_directory: Optional[str] = DEFAULT_SKILLS_DIRECTORY,
|
|
181
|
+
debug: Optional[bool] = DEFAULT_DEBUG,
|
|
182
|
+
host: Optional[str] = DEFAULT_HOST,
|
|
183
|
+
port: Optional[int] = DEFAULT_PORT,
|
|
184
|
+
enable_web_ui: bool = DEFAULT_ENABLE_WEB_UI,
|
|
185
|
+
):
|
|
186
|
+
print(
|
|
187
|
+
f"Starting {AGENT_NAME} with provider={provider}, model={model_id}, mcp={mcp_url} | {mcp_config}"
|
|
188
|
+
)
|
|
189
|
+
agent = create_agent(
|
|
190
|
+
provider=provider,
|
|
191
|
+
model_id=model_id,
|
|
192
|
+
base_url=base_url,
|
|
193
|
+
api_key=api_key,
|
|
194
|
+
mcp_url=mcp_url,
|
|
195
|
+
mcp_config=mcp_config,
|
|
196
|
+
skills_directory=skills_directory,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Define Skills for Agent Card (High-level capabilities)
|
|
200
|
+
if skills_directory and os.path.exists(skills_directory):
|
|
201
|
+
skills = load_skills_from_directory(skills_directory)
|
|
202
|
+
logger.info(f"Loaded {len(skills)} skills from {skills_directory}")
|
|
203
|
+
else:
|
|
204
|
+
skills = [
|
|
205
|
+
Skill(
|
|
206
|
+
id="gitlab_agent",
|
|
207
|
+
name="GitLab Agent",
|
|
208
|
+
description="This GitLab skill grants access to all GitLab tools provided by the GitLab MCP Server",
|
|
209
|
+
tags=["gitlab"],
|
|
210
|
+
input_modes=["text"],
|
|
211
|
+
output_modes=["text"],
|
|
212
|
+
)
|
|
213
|
+
]
|
|
214
|
+
# Create A2A app explicitly before main app to bind lifespan
|
|
215
|
+
a2a_app = agent.to_a2a(
|
|
216
|
+
name=AGENT_NAME,
|
|
217
|
+
description=AGENT_DESCRIPTION,
|
|
218
|
+
version="25.14.5",
|
|
219
|
+
skills=skills,
|
|
220
|
+
debug=debug,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
@asynccontextmanager
|
|
224
|
+
async def lifespan(app: FastAPI):
|
|
225
|
+
# Trigger A2A (sub-app) startup/shutdown events
|
|
226
|
+
# This is critical for TaskManager initialization in A2A
|
|
227
|
+
if hasattr(a2a_app, "router"):
|
|
228
|
+
async with a2a_app.router.lifespan_context(a2a_app):
|
|
229
|
+
yield
|
|
230
|
+
else:
|
|
231
|
+
yield
|
|
232
|
+
|
|
233
|
+
# Create main FastAPI app
|
|
234
|
+
app = FastAPI(
|
|
235
|
+
title=f"{AGENT_NAME} - A2A + AG-UI Server",
|
|
236
|
+
description=AGENT_DESCRIPTION,
|
|
237
|
+
debug=debug,
|
|
238
|
+
lifespan=lifespan,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Mount A2A as sub-app at /a2a
|
|
242
|
+
app.mount("/a2a", a2a_app)
|
|
243
|
+
|
|
244
|
+
# Add AG-UI endpoint (POST to /ag-ui)
|
|
245
|
+
@app.post("/ag-ui")
|
|
246
|
+
async def ag_ui_endpoint(request: Request) -> Response:
|
|
247
|
+
accept = request.headers.get("accept", SSE_CONTENT_TYPE)
|
|
248
|
+
try:
|
|
249
|
+
# Parse incoming AG-UI RunAgentInput from request body
|
|
250
|
+
run_input = AGUIAdapter.build_run_input(await request.body())
|
|
251
|
+
except ValidationError as e:
|
|
252
|
+
return Response(
|
|
253
|
+
content=json.dumps(e.json()),
|
|
254
|
+
media_type="application/json",
|
|
255
|
+
status_code=422,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Create adapter and run the agent → stream AG-UI events
|
|
259
|
+
adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept)
|
|
260
|
+
event_stream = adapter.run_stream() # Runs agent, yields events
|
|
261
|
+
sse_stream = adapter.encode_stream(event_stream) # Encodes to SSE
|
|
262
|
+
|
|
263
|
+
return StreamingResponse(
|
|
264
|
+
sse_stream,
|
|
265
|
+
media_type=accept,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# Mount Web UI if enabled
|
|
269
|
+
if enable_web_ui:
|
|
270
|
+
web_ui = agent.to_web(instructions=AGENT_SYSTEM_PROMPT)
|
|
271
|
+
app.mount("/", web_ui)
|
|
272
|
+
logger.info(
|
|
273
|
+
"Starting server on %s:%s (A2A at /a2a, AG-UI at /ag-ui, Web UI: %s)",
|
|
274
|
+
host,
|
|
275
|
+
port,
|
|
276
|
+
"Enabled at /" if enable_web_ui else "Disabled",
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
uvicorn.run(
|
|
280
|
+
app,
|
|
281
|
+
host=host,
|
|
282
|
+
port=port,
|
|
283
|
+
timeout_keep_alive=1800, # 30 minute timeout
|
|
284
|
+
timeout_graceful_shutdown=60,
|
|
285
|
+
log_level="debug" if debug else "info",
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def agent_server():
|
|
290
|
+
parser = argparse.ArgumentParser(
|
|
291
|
+
description=f"Run the {AGENT_NAME} A2A + AG-UI Server"
|
|
292
|
+
)
|
|
293
|
+
parser.add_argument(
|
|
294
|
+
"--host", default=DEFAULT_HOST, help="Host to bind the server to"
|
|
295
|
+
)
|
|
296
|
+
parser.add_argument(
|
|
297
|
+
"--port", type=int, default=DEFAULT_PORT, help="Port to bind the server to"
|
|
298
|
+
)
|
|
299
|
+
parser.add_argument("--debug", type=bool, default=DEFAULT_DEBUG, help="Debug mode")
|
|
300
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload")
|
|
301
|
+
|
|
302
|
+
parser.add_argument(
|
|
303
|
+
"--provider",
|
|
304
|
+
default=DEFAULT_PROVIDER,
|
|
305
|
+
choices=["openai", "anthropic", "google", "huggingface"],
|
|
306
|
+
help="LLM Provider",
|
|
307
|
+
)
|
|
308
|
+
parser.add_argument("--model-id", default=DEFAULT_MODEL_ID, help="LLM Model ID")
|
|
309
|
+
parser.add_argument(
|
|
310
|
+
"--base-url",
|
|
311
|
+
default=DEFAULT_OPENAI_BASE_URL,
|
|
312
|
+
help="LLM Base URL (for OpenAI compatible providers)",
|
|
313
|
+
)
|
|
314
|
+
parser.add_argument("--api-key", default=DEFAULT_OPENAI_API_KEY, help="LLM API Key")
|
|
315
|
+
parser.add_argument("--mcp-url", default=DEFAULT_MCP_URL, help="MCP Server URL")
|
|
316
|
+
parser.add_argument(
|
|
317
|
+
"--mcp-config", default=DEFAULT_MCP_CONFIG, help="MCP Server Config"
|
|
318
|
+
)
|
|
319
|
+
parser.add_argument(
|
|
320
|
+
"--web",
|
|
321
|
+
action="store_true",
|
|
322
|
+
default=DEFAULT_ENABLE_WEB_UI,
|
|
323
|
+
help="Enable Pydantic AI Web UI",
|
|
324
|
+
)
|
|
325
|
+
args = parser.parse_args()
|
|
326
|
+
|
|
327
|
+
if args.debug:
|
|
328
|
+
# Force reconfiguration of logging
|
|
329
|
+
for handler in logging.root.handlers[:]:
|
|
330
|
+
logging.root.removeHandler(handler)
|
|
331
|
+
|
|
332
|
+
logging.basicConfig(
|
|
333
|
+
level=logging.DEBUG,
|
|
334
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
335
|
+
handlers=[logging.StreamHandler()], # Output to console
|
|
336
|
+
force=True,
|
|
337
|
+
)
|
|
338
|
+
logging.getLogger("pydantic_ai").setLevel(logging.DEBUG)
|
|
339
|
+
logging.getLogger("fastmcp").setLevel(logging.DEBUG)
|
|
340
|
+
logging.getLogger("httpcore").setLevel(logging.DEBUG)
|
|
341
|
+
logging.getLogger("httpx").setLevel(logging.DEBUG)
|
|
342
|
+
logger.setLevel(logging.DEBUG)
|
|
343
|
+
logger.debug("Debug mode enabled")
|
|
344
|
+
|
|
345
|
+
# Create the agent with CLI args
|
|
346
|
+
# Create the agent with CLI args
|
|
347
|
+
create_agent_server(
|
|
348
|
+
provider=args.provider,
|
|
349
|
+
model_id=args.model_id,
|
|
350
|
+
base_url=args.base_url,
|
|
351
|
+
api_key=args.api_key,
|
|
352
|
+
mcp_url=args.mcp_url,
|
|
353
|
+
mcp_config=args.mcp_config,
|
|
354
|
+
debug=args.debug,
|
|
355
|
+
host=args.host,
|
|
356
|
+
port=args.port,
|
|
357
|
+
enable_web_ui=args.web,
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
if __name__ == "__main__":
|
|
362
|
+
agent_server()
|