datarobot-genai 0.2.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. datarobot_genai/__init__.py +19 -0
  2. datarobot_genai/core/__init__.py +0 -0
  3. datarobot_genai/core/agents/__init__.py +43 -0
  4. datarobot_genai/core/agents/base.py +195 -0
  5. datarobot_genai/core/chat/__init__.py +19 -0
  6. datarobot_genai/core/chat/auth.py +146 -0
  7. datarobot_genai/core/chat/client.py +178 -0
  8. datarobot_genai/core/chat/responses.py +297 -0
  9. datarobot_genai/core/cli/__init__.py +18 -0
  10. datarobot_genai/core/cli/agent_environment.py +47 -0
  11. datarobot_genai/core/cli/agent_kernel.py +211 -0
  12. datarobot_genai/core/custom_model.py +141 -0
  13. datarobot_genai/core/mcp/__init__.py +0 -0
  14. datarobot_genai/core/mcp/common.py +218 -0
  15. datarobot_genai/core/telemetry_agent.py +126 -0
  16. datarobot_genai/core/utils/__init__.py +3 -0
  17. datarobot_genai/core/utils/auth.py +234 -0
  18. datarobot_genai/core/utils/urls.py +64 -0
  19. datarobot_genai/crewai/__init__.py +24 -0
  20. datarobot_genai/crewai/agent.py +42 -0
  21. datarobot_genai/crewai/base.py +159 -0
  22. datarobot_genai/crewai/events.py +117 -0
  23. datarobot_genai/crewai/mcp.py +59 -0
  24. datarobot_genai/drmcp/__init__.py +78 -0
  25. datarobot_genai/drmcp/core/__init__.py +13 -0
  26. datarobot_genai/drmcp/core/auth.py +165 -0
  27. datarobot_genai/drmcp/core/clients.py +180 -0
  28. datarobot_genai/drmcp/core/config.py +364 -0
  29. datarobot_genai/drmcp/core/config_utils.py +174 -0
  30. datarobot_genai/drmcp/core/constants.py +18 -0
  31. datarobot_genai/drmcp/core/credentials.py +190 -0
  32. datarobot_genai/drmcp/core/dr_mcp_server.py +350 -0
  33. datarobot_genai/drmcp/core/dr_mcp_server_logo.py +136 -0
  34. datarobot_genai/drmcp/core/dynamic_prompts/__init__.py +13 -0
  35. datarobot_genai/drmcp/core/dynamic_prompts/controllers.py +130 -0
  36. datarobot_genai/drmcp/core/dynamic_prompts/dr_lib.py +70 -0
  37. datarobot_genai/drmcp/core/dynamic_prompts/register.py +205 -0
  38. datarobot_genai/drmcp/core/dynamic_prompts/utils.py +33 -0
  39. datarobot_genai/drmcp/core/dynamic_tools/__init__.py +14 -0
  40. datarobot_genai/drmcp/core/dynamic_tools/deployment/__init__.py +0 -0
  41. datarobot_genai/drmcp/core/dynamic_tools/deployment/adapters/__init__.py +14 -0
  42. datarobot_genai/drmcp/core/dynamic_tools/deployment/adapters/base.py +72 -0
  43. datarobot_genai/drmcp/core/dynamic_tools/deployment/adapters/default.py +82 -0
  44. datarobot_genai/drmcp/core/dynamic_tools/deployment/adapters/drum.py +238 -0
  45. datarobot_genai/drmcp/core/dynamic_tools/deployment/config.py +228 -0
  46. datarobot_genai/drmcp/core/dynamic_tools/deployment/controllers.py +63 -0
  47. datarobot_genai/drmcp/core/dynamic_tools/deployment/metadata.py +162 -0
  48. datarobot_genai/drmcp/core/dynamic_tools/deployment/register.py +87 -0
  49. datarobot_genai/drmcp/core/dynamic_tools/deployment/schemas/drum_agentic_fallback_schema.json +36 -0
  50. datarobot_genai/drmcp/core/dynamic_tools/deployment/schemas/drum_prediction_fallback_schema.json +10 -0
  51. datarobot_genai/drmcp/core/dynamic_tools/register.py +254 -0
  52. datarobot_genai/drmcp/core/dynamic_tools/schema.py +532 -0
  53. datarobot_genai/drmcp/core/exceptions.py +25 -0
  54. datarobot_genai/drmcp/core/logging.py +98 -0
  55. datarobot_genai/drmcp/core/mcp_instance.py +515 -0
  56. datarobot_genai/drmcp/core/memory_management/__init__.py +13 -0
  57. datarobot_genai/drmcp/core/memory_management/manager.py +820 -0
  58. datarobot_genai/drmcp/core/memory_management/memory_tools.py +201 -0
  59. datarobot_genai/drmcp/core/routes.py +439 -0
  60. datarobot_genai/drmcp/core/routes_utils.py +30 -0
  61. datarobot_genai/drmcp/core/server_life_cycle.py +107 -0
  62. datarobot_genai/drmcp/core/telemetry.py +424 -0
  63. datarobot_genai/drmcp/core/tool_config.py +111 -0
  64. datarobot_genai/drmcp/core/tool_filter.py +117 -0
  65. datarobot_genai/drmcp/core/utils.py +138 -0
  66. datarobot_genai/drmcp/server.py +19 -0
  67. datarobot_genai/drmcp/test_utils/__init__.py +13 -0
  68. datarobot_genai/drmcp/test_utils/clients/__init__.py +0 -0
  69. datarobot_genai/drmcp/test_utils/clients/anthropic.py +68 -0
  70. datarobot_genai/drmcp/test_utils/clients/base.py +300 -0
  71. datarobot_genai/drmcp/test_utils/clients/dr_gateway.py +58 -0
  72. datarobot_genai/drmcp/test_utils/clients/openai.py +68 -0
  73. datarobot_genai/drmcp/test_utils/elicitation_test_tool.py +89 -0
  74. datarobot_genai/drmcp/test_utils/integration_mcp_server.py +109 -0
  75. datarobot_genai/drmcp/test_utils/mcp_utils_ete.py +133 -0
  76. datarobot_genai/drmcp/test_utils/mcp_utils_integration.py +107 -0
  77. datarobot_genai/drmcp/test_utils/test_interactive.py +205 -0
  78. datarobot_genai/drmcp/test_utils/tool_base_ete.py +220 -0
  79. datarobot_genai/drmcp/test_utils/utils.py +91 -0
  80. datarobot_genai/drmcp/tools/__init__.py +14 -0
  81. datarobot_genai/drmcp/tools/clients/__init__.py +14 -0
  82. datarobot_genai/drmcp/tools/clients/atlassian.py +188 -0
  83. datarobot_genai/drmcp/tools/clients/confluence.py +584 -0
  84. datarobot_genai/drmcp/tools/clients/gdrive.py +832 -0
  85. datarobot_genai/drmcp/tools/clients/jira.py +334 -0
  86. datarobot_genai/drmcp/tools/clients/microsoft_graph.py +479 -0
  87. datarobot_genai/drmcp/tools/clients/s3.py +28 -0
  88. datarobot_genai/drmcp/tools/confluence/__init__.py +14 -0
  89. datarobot_genai/drmcp/tools/confluence/tools.py +321 -0
  90. datarobot_genai/drmcp/tools/gdrive/__init__.py +0 -0
  91. datarobot_genai/drmcp/tools/gdrive/tools.py +347 -0
  92. datarobot_genai/drmcp/tools/jira/__init__.py +14 -0
  93. datarobot_genai/drmcp/tools/jira/tools.py +243 -0
  94. datarobot_genai/drmcp/tools/microsoft_graph/__init__.py +13 -0
  95. datarobot_genai/drmcp/tools/microsoft_graph/tools.py +198 -0
  96. datarobot_genai/drmcp/tools/predictive/__init__.py +27 -0
  97. datarobot_genai/drmcp/tools/predictive/data.py +133 -0
  98. datarobot_genai/drmcp/tools/predictive/deployment.py +91 -0
  99. datarobot_genai/drmcp/tools/predictive/deployment_info.py +392 -0
  100. datarobot_genai/drmcp/tools/predictive/model.py +148 -0
  101. datarobot_genai/drmcp/tools/predictive/predict.py +254 -0
  102. datarobot_genai/drmcp/tools/predictive/predict_realtime.py +307 -0
  103. datarobot_genai/drmcp/tools/predictive/project.py +90 -0
  104. datarobot_genai/drmcp/tools/predictive/training.py +661 -0
  105. datarobot_genai/langgraph/__init__.py +0 -0
  106. datarobot_genai/langgraph/agent.py +341 -0
  107. datarobot_genai/langgraph/mcp.py +73 -0
  108. datarobot_genai/llama_index/__init__.py +16 -0
  109. datarobot_genai/llama_index/agent.py +50 -0
  110. datarobot_genai/llama_index/base.py +299 -0
  111. datarobot_genai/llama_index/mcp.py +79 -0
  112. datarobot_genai/nat/__init__.py +0 -0
  113. datarobot_genai/nat/agent.py +275 -0
  114. datarobot_genai/nat/datarobot_auth_provider.py +110 -0
  115. datarobot_genai/nat/datarobot_llm_clients.py +318 -0
  116. datarobot_genai/nat/datarobot_llm_providers.py +130 -0
  117. datarobot_genai/nat/datarobot_mcp_client.py +266 -0
  118. datarobot_genai/nat/helpers.py +87 -0
  119. datarobot_genai/py.typed +0 -0
  120. datarobot_genai-0.2.31.dist-info/METADATA +145 -0
  121. datarobot_genai-0.2.31.dist-info/RECORD +125 -0
  122. datarobot_genai-0.2.31.dist-info/WHEEL +4 -0
  123. datarobot_genai-0.2.31.dist-info/entry_points.txt +5 -0
  124. datarobot_genai-0.2.31.dist-info/licenses/AUTHORS +2 -0
  125. datarobot_genai-0.2.31.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,117 @@
1
+ # Copyright 2025 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from fastmcp.tools import Tool
17
+ from mcp.types import Tool as MCPTool
18
+
19
+
20
+ def filter_tools_by_tags(
21
+ tools: list[Tool | MCPTool],
22
+ tags: list[str] | None = None,
23
+ match_all: bool = False,
24
+ ) -> list[Tool | MCPTool]:
25
+ """
26
+ Filter tools by tags.
27
+
28
+ Args:
29
+ tools: List of tools to filter
30
+ tags: List of tags to filter by. If None, returns all tools
31
+ match_all: If True, tool must have all specified tags. If False, tool must have at least
32
+ one tag.
33
+
34
+ Returns
35
+ -------
36
+ List of tools that match the tag criteria
37
+ """
38
+ if not tags:
39
+ return tools
40
+
41
+ filtered_tools = []
42
+
43
+ for tool in tools:
44
+ tool_tags = get_tool_tags(tool)
45
+
46
+ if not tool_tags:
47
+ continue
48
+
49
+ if match_all:
50
+ # Tool must have all specified tags
51
+ if all(tag in tool_tags for tag in tags):
52
+ filtered_tools.append(tool)
53
+ elif any(tag in tool_tags for tag in tags):
54
+ # Tool must have at least one specified tag
55
+ filtered_tools.append(tool)
56
+
57
+ return filtered_tools
58
+
59
+
60
+ def get_tool_tags(tool: Tool | MCPTool) -> list[str]:
61
+ """
62
+ Get tags for a specific tool.
63
+
64
+ Args:
65
+ tool: The tool to get tags for
66
+
67
+ Returns
68
+ -------
69
+ List of tags for the tool
70
+ """
71
+ # Primary: native FastMCP meta location
72
+ if hasattr(tool, "meta") and getattr(tool, "meta"):
73
+ fastmcp_meta = tool.meta.get("_fastmcp", {})
74
+ meta_tags = fastmcp_meta.get("tags", [])
75
+ if isinstance(meta_tags, list):
76
+ return meta_tags
77
+
78
+ # Fallback: annotations.tags (for compatibility during transition)
79
+ if tool.annotations and hasattr(tool.annotations, "tags"):
80
+ tags = getattr(tool.annotations, "tags", [])
81
+ return tags if isinstance(tags, list) else []
82
+
83
+ return []
84
+
85
+
86
+ def list_all_tags(tools: list[Tool | MCPTool]) -> list[str]:
87
+ """
88
+ Get all unique tags from a list of tools.
89
+
90
+ Args:
91
+ tools: List of tools to extract tags from
92
+
93
+ Returns
94
+ -------
95
+ List of unique tags
96
+ """
97
+ all_tags = set()
98
+ for tool in tools:
99
+ tool_tags = get_tool_tags(tool)
100
+ all_tags.update(tool_tags)
101
+
102
+ return sorted(list(all_tags))
103
+
104
+
105
+ def get_tools_by_tag(tools: list[Tool | MCPTool], tag: str) -> list[Tool | MCPTool]:
106
+ """
107
+ Get all tools that have a specific tag.
108
+
109
+ Args:
110
+ tools: List of tools to search
111
+ tag: The tag to search for
112
+
113
+ Returns
114
+ -------
115
+ List of tools with the specified tag
116
+ """
117
+ return filter_tools_by_tags(tools, [tag])
@@ -0,0 +1,138 @@
1
+ # Copyright 2025 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import base64
15
+ import uuid
16
+ from typing import Any
17
+ from urllib.parse import urlparse
18
+
19
+ import boto3
20
+ from fastmcp.resources import HttpResource
21
+ from fastmcp.tools.tool import ToolResult
22
+ from pydantic import BaseModel
23
+
24
+ from .constants import MAX_INLINE_SIZE
25
+ from .mcp_instance import mcp
26
+
27
+
28
+ def generate_presigned_url(bucket: str, key: str, expires_in: int = 2592000) -> str:
29
+ """
30
+ Generate a presigned S3 URL for the given bucket and key.
31
+ Args:
32
+ bucket (str): S3 bucket name.
33
+ key (str): S3 object key.
34
+ expires_in (int): Expiration in seconds (default 30 days).
35
+
36
+ Returns
37
+ -------
38
+ str: Presigned S3 URL for get_object.
39
+ """
40
+ s3 = boto3.client("s3")
41
+ result = s3.generate_presigned_url(
42
+ "get_object", Params={"Bucket": bucket, "Key": key}, ExpiresIn=expires_in
43
+ )
44
+ return str(result)
45
+
46
+
47
+ class PredictionResponse(BaseModel):
48
+ type: str
49
+ data: str | None = None
50
+ resource_id: str | None = None
51
+ s3_url: str | None = None
52
+ show_explanations: bool | None = None
53
+
54
+
55
+ def predictions_result_response(
56
+ df: Any, bucket: str, key: str, resource_name: str, show_explanations: bool = False
57
+ ) -> PredictionResponse:
58
+ csv_str = df.to_csv(index=False)
59
+ if len(csv_str.encode("utf-8")) < MAX_INLINE_SIZE:
60
+ return PredictionResponse(type="inline", data=csv_str, show_explanations=show_explanations)
61
+ else:
62
+ resource = save_df_to_s3_and_register_resource(df, bucket, key, resource_name)
63
+ return PredictionResponse(
64
+ type="resource",
65
+ resource_id=str(resource.uri),
66
+ s3_url=resource.url,
67
+ show_explanations=show_explanations,
68
+ )
69
+
70
+
71
+ def save_df_to_s3_and_register_resource(
72
+ df: Any, bucket: str, key: str, resource_name: str, mime_type: str = "text/csv"
73
+ ) -> HttpResource:
74
+ """
75
+ Save a DataFrame to a temp CSV, upload to S3, register as a resource, and return the
76
+ presigned URL.
77
+ Args:
78
+ df (pd.DataFrame): DataFrame to save and upload.
79
+ bucket (str): S3 bucket name.
80
+ key (str): S3 object key.
81
+ resource_name (str): Name for the registered resource.
82
+ mime_type (str): MIME type for the resource (default 'text/csv').
83
+
84
+ Returns
85
+ -------
86
+ str: Presigned S3 URL for the uploaded file.
87
+ """
88
+ temp_csv = f"/tmp/{uuid.uuid4()}.csv"
89
+ df.to_csv(temp_csv, index=False)
90
+ s3 = boto3.client("s3")
91
+ s3.upload_file(temp_csv, bucket, key)
92
+ s3_url = generate_presigned_url(bucket, key)
93
+ resource = HttpResource(
94
+ uri="predictions://" + uuid.uuid4().hex, # type: ignore[arg-type]
95
+ url=s3_url,
96
+ name=resource_name,
97
+ mime_type=mime_type,
98
+ )
99
+ mcp.add_resource(resource)
100
+ return resource
101
+
102
+
103
+ def format_response_as_tool_result(data: bytes, content_type: str, charset: str) -> ToolResult:
104
+ """Format the deployment response into a ToolResult.
105
+
106
+ Using structured_content, to return as much information about
107
+ the response as possible, for LLMs to correctly interpret the
108
+ response.
109
+ """
110
+ charset = charset or "utf-8"
111
+ content_type = content_type.lower() if content_type else ""
112
+
113
+ if content_type.startswith("text/") or content_type == "application/json":
114
+ payload = {
115
+ "type": "text",
116
+ "mime_type": content_type,
117
+ "data": data.decode(charset),
118
+ }
119
+ elif content_type.startswith("image/"):
120
+ payload = {
121
+ "type": "image",
122
+ "mime_type": content_type,
123
+ "data_base64": base64.b64encode(data).decode(charset),
124
+ }
125
+ else:
126
+ payload = {
127
+ "type": "binary",
128
+ "mime_type": content_type,
129
+ "data_base64": base64.b64encode(data).decode(charset),
130
+ }
131
+
132
+ return ToolResult(structured_content=payload)
133
+
134
+
135
+ def is_valid_url(url: str) -> bool:
136
+ """Check if a URL is valid."""
137
+ result = urlparse(url)
138
+ return all([result.scheme, result.netloc])
@@ -0,0 +1,19 @@
1
+ # Copyright 2025 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from datarobot_genai.drmcp import create_mcp_server
16
+
17
+ if __name__ == "__main__":
18
+ server = create_mcp_server()
19
+ server.run(show_banner=True)
@@ -0,0 +1,13 @@
1
+ # Copyright 2025 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
File without changes
@@ -0,0 +1,68 @@
1
+ # Copyright 2026 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Anthropic LLM MCP Client implementation (example).
16
+
17
+ This is an example implementation showing how easy it is to add a new LLM provider.
18
+ Anthropic's API is OpenAI-compatible, so we can use the OpenAI SDK with their endpoint.
19
+ """
20
+
21
+ import openai
22
+
23
+ from .base import BaseLLMMCPClient
24
+
25
+
26
+ class AnthropicMCPClient(BaseLLMMCPClient):
27
+ """
28
+ Client for interacting with LLMs via MCP using Anthropic Claude.
29
+
30
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
31
+ Tools using FastMCP's built-in elicitation will work automatically.
32
+
33
+ Example:
34
+ ```python
35
+ config = {
36
+ "anthropic_api_key": "sk-ant-...",
37
+ "model": "claude-3-5-sonnet-20241022",
38
+ }
39
+ client = AnthropicMCPClient(str(config))
40
+ ```
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ config: str | dict,
46
+ ):
47
+ """
48
+ Initialize the LLM MCP client.
49
+
50
+ Args:
51
+ config: Configuration string or dict with:
52
+ - anthropic_api_key: Anthropic API key
53
+ - model: Model name (default: "claude-3-5-sonnet-20241022")
54
+ - save_llm_responses: Whether to save responses (default: True)
55
+ """
56
+ super().__init__(config)
57
+
58
+ def _create_llm_client(self, config_dict: dict) -> tuple[openai.OpenAI, str]:
59
+ """Create the LLM client for Anthropic (OpenAI-compatible endpoint)."""
60
+ anthropic_api_key = config_dict.get("anthropic_api_key")
61
+ model = config_dict.get("model", "claude-3-5-sonnet-20241022")
62
+
63
+ # Anthropic provides an OpenAI-compatible endpoint
64
+ client = openai.OpenAI(
65
+ api_key=anthropic_api_key,
66
+ base_url="https://api.anthropic.com/v1",
67
+ )
68
+ return client, model
@@ -0,0 +1,300 @@
1
+ # Copyright 2026 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Base classes for LLM MCP clients."""
16
+
17
+ import json
18
+ from abc import ABC
19
+ from abc import abstractmethod
20
+ from ast import literal_eval
21
+ from typing import Any
22
+
23
+ import openai
24
+ from mcp import ClientSession
25
+ from mcp.types import CallToolResult
26
+ from mcp.types import ListToolsResult
27
+ from mcp.types import TextContent
28
+ from openai.types.chat.chat_completion import ChatCompletion
29
+
30
+ from ..utils import save_response_to_file
31
+
32
+
33
+ class ToolCall:
34
+ """Represents a tool call with its parameters and reasoning."""
35
+
36
+ def __init__(self, tool_name: str, parameters: dict[str, Any], reasoning: str):
37
+ self.tool_name = tool_name
38
+ self.parameters = parameters
39
+ self.reasoning = reasoning
40
+
41
+
42
+ class LLMResponse:
43
+ """Represents an LLM response with content and tool calls."""
44
+
45
+ def __init__(self, content: str, tool_calls: list[ToolCall], tool_results: list[str]):
46
+ self.content = content
47
+ self.tool_calls = tool_calls
48
+ self.tool_results = tool_results
49
+
50
+
51
+ class BaseLLMMCPClient(ABC):
52
+ """
53
+ Base class for LLM MCP clients.
54
+
55
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
56
+ Tools using FastMCP's built-in elicitation will work automatically.
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ config: str | dict,
62
+ ):
63
+ """
64
+ Initialize the LLM MCP client.
65
+
66
+ Args:
67
+ config: Configuration string or dict with provider-specific keys.
68
+ """
69
+ config_dict = self._parse_config(config)
70
+ self.openai_client, self.model = self._create_llm_client(config_dict)
71
+ self.save_llm_responses = config_dict.get("save_llm_responses", True)
72
+ self.available_tools: list[dict[str, Any]] = []
73
+ self.available_prompts: list[dict[str, Any]] = []
74
+ self.available_resources: list[dict[str, Any]] = []
75
+
76
+ @staticmethod
77
+ def _parse_config(config: str | dict) -> dict:
78
+ """Parse config string to dict."""
79
+ if isinstance(config, str):
80
+ # Try JSON first (safer), fall back to literal_eval for Python dict strings
81
+ try:
82
+ return json.loads(config)
83
+ except json.JSONDecodeError:
84
+ # Fall back to literal_eval for Python dict literal strings
85
+ return literal_eval(config)
86
+ return config
87
+
88
+ @abstractmethod
89
+ def _create_llm_client(
90
+ self, config_dict: dict
91
+ ) -> tuple[openai.OpenAI | openai.AzureOpenAI, str]:
92
+ """
93
+ Create the LLM client.
94
+
95
+ Args:
96
+ config_dict: Parsed configuration dictionary
97
+
98
+ Returns
99
+ -------
100
+ Tuple of (LLM client instance, model name)
101
+ """
102
+ pass
103
+
104
+ async def _add_mcp_tool_to_available_tools(self, mcp_session: ClientSession) -> None:
105
+ """Add a tool to the available tools."""
106
+ tools_result: ListToolsResult = await mcp_session.list_tools()
107
+ self.available_tools = [
108
+ {
109
+ "type": "function",
110
+ "function": {
111
+ "name": tool.name,
112
+ "description": tool.description,
113
+ "parameters": tool.inputSchema,
114
+ },
115
+ }
116
+ for tool in tools_result.tools
117
+ ]
118
+
119
+ async def _call_mcp_tool(
120
+ self, tool_name: str, parameters: dict[str, Any], mcp_session: ClientSession
121
+ ) -> str:
122
+ """
123
+ Call an MCP tool and return the result as a string.
124
+
125
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
126
+ Tools using FastMCP's built-in elicitation will work automatically.
127
+
128
+ Args:
129
+ tool_name: Name of the tool to call
130
+ parameters: Parameters to pass to the tool
131
+ mcp_session: MCP client session
132
+
133
+ Returns
134
+ -------
135
+ Result text from the tool call
136
+ """
137
+ result: CallToolResult = await mcp_session.call_tool(tool_name, parameters)
138
+ content = (
139
+ result.content[0].text
140
+ if result.content and isinstance(result.content[0], TextContent)
141
+ else str(result.content)
142
+ )
143
+ if result.structuredContent is not None:
144
+ structured_content = json.dumps(result.structuredContent)
145
+ else:
146
+ structured_content = ""
147
+ return f"Content: {content}\nStructured content: {structured_content}"
148
+
149
+ async def _process_tool_calls(
150
+ self,
151
+ response: ChatCompletion,
152
+ messages: list[Any],
153
+ mcp_session: ClientSession,
154
+ ) -> tuple[list[ToolCall], list[str]]:
155
+ """Process tool calls from the response, and return the tool calls and tool results."""
156
+ tool_calls = []
157
+ tool_results = []
158
+
159
+ # If the response has tool calls, process them
160
+ if response.choices[0].message.tool_calls:
161
+ messages.append(response.choices[0].message) # Add assistant's message with tool calls
162
+
163
+ for tool_call in response.choices[0].message.tool_calls:
164
+ tool_name = tool_call.function.name # type: ignore[union-attr]
165
+ parameters = json.loads(tool_call.function.arguments) # type: ignore[union-attr]
166
+
167
+ tool_calls.append(
168
+ ToolCall(
169
+ tool_name=tool_name,
170
+ parameters=parameters,
171
+ reasoning="Tool selected by LLM",
172
+ )
173
+ )
174
+
175
+ try:
176
+ result_text = await self._call_mcp_tool(tool_name, parameters, mcp_session)
177
+ tool_results.append(result_text)
178
+
179
+ # Add tool result to messages
180
+ messages.append(
181
+ {
182
+ "role": "tool",
183
+ "content": result_text,
184
+ "tool_call_id": tool_call.id,
185
+ "name": tool_name,
186
+ }
187
+ )
188
+ except Exception as e:
189
+ error_msg = f"Error calling {tool_name}: {str(e)}"
190
+ tool_results.append(error_msg)
191
+ messages.append(
192
+ {
193
+ "role": "tool",
194
+ "content": error_msg,
195
+ "tool_call_id": tool_call.id,
196
+ "name": tool_name,
197
+ }
198
+ )
199
+
200
+ return tool_calls, tool_results
201
+
202
+ async def _get_llm_response(
203
+ self, messages: list[dict[str, Any]], allow_tool_calls: bool = True
204
+ ) -> Any:
205
+ """Get a response from the LLM with optional tool calling capability."""
206
+ kwargs = {
207
+ "model": self.model,
208
+ "messages": messages,
209
+ }
210
+
211
+ if allow_tool_calls and self.available_tools:
212
+ kwargs["tools"] = self.available_tools
213
+ kwargs["tool_choice"] = "auto"
214
+
215
+ return self.openai_client.chat.completions.create(**kwargs)
216
+
217
+ async def process_prompt_with_mcp_support(
218
+ self, prompt: str, mcp_session: ClientSession, output_file_name: str = ""
219
+ ) -> LLMResponse:
220
+ """
221
+ Process a prompt with MCP tool support and elicitation handling.
222
+
223
+ This method:
224
+ 1. Adds MCP tools to available tools
225
+ 2. Sends prompt to LLM
226
+ 3. Processes tool calls
227
+ 4. Continues until LLM provides final response
228
+
229
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
230
+
231
+ Args:
232
+ prompt: User prompt
233
+ mcp_session: MCP client session
234
+ output_file_name: Optional file name to save response
235
+
236
+ Returns
237
+ -------
238
+ LLMResponse with content, tool calls, and tool results
239
+ """
240
+ # Add MCP tools to available tools
241
+ await self._add_mcp_tool_to_available_tools(mcp_session)
242
+
243
+ if output_file_name:
244
+ print(f"Processing prompt for test: {output_file_name}")
245
+
246
+ # Initialize conversation
247
+ messages = [
248
+ {
249
+ "role": "system",
250
+ "content": (
251
+ "You are a helpful AI assistant that can use tools to help users. "
252
+ "If you need more information to provide a complete response, you can make "
253
+ "multiple tool calls or ask the user for more info, but prefer tool calls "
254
+ "when possible. "
255
+ "When dealing with file paths, use them as raw paths without converting "
256
+ "to file:// URLs."
257
+ ),
258
+ },
259
+ {"role": "user", "content": prompt},
260
+ ]
261
+
262
+ all_tool_calls = []
263
+ all_tool_results = []
264
+
265
+ while True:
266
+ # Get LLM response
267
+ response = await self._get_llm_response(messages)
268
+
269
+ # If no tool calls in response, this is the final response
270
+ if not response.choices[0].message.tool_calls:
271
+ final_response = response.choices[0].message.content
272
+ break
273
+
274
+ # Process tool calls
275
+ tool_calls, tool_results = await self._process_tool_calls(
276
+ response, messages, mcp_session
277
+ )
278
+ all_tool_calls.extend(tool_calls)
279
+ all_tool_results.extend(tool_results)
280
+
281
+ # Get another LLM response to see if we need more tool calls
282
+ response = await self._get_llm_response(messages, allow_tool_calls=True)
283
+
284
+ # If no more tool calls needed, this is the final response
285
+ if not response.choices[0].message.tool_calls:
286
+ final_response = response.choices[0].message.content
287
+ break
288
+
289
+ clean_content = final_response.replace("*", "").lower()
290
+
291
+ llm_response = LLMResponse(
292
+ content=clean_content,
293
+ tool_calls=all_tool_calls,
294
+ tool_results=all_tool_results,
295
+ )
296
+
297
+ if self.save_llm_responses:
298
+ save_response_to_file(llm_response, name=output_file_name)
299
+
300
+ return llm_response