explicator 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,308 @@
1
+ """MCP Server adapter for Explicator.
2
+
3
+ Exposes the ModelService as an MCP server with tools, resources, and prompts.
4
+ This adapter sits alongside the CLI as a parallel entry point into the application
5
+ layer — it does not bypass or duplicate any domain logic.
6
+
7
+ Start with:
8
+ python -m explicator.adapters.mcp_server
9
+ or via the entry point:
10
+ explicator-mcp
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import json
16
+
17
+ from mcp.server.fastmcp import FastMCP
18
+
19
+ from explicator.application.service import ModelService
20
+
21
+ # Module-level service instance set by the application wiring.
22
+ # Using a module-level variable allows the MCP decorators to close over it
23
+ # while keeping the server testable via set_service().
24
+ _service: ModelService | None = None
25
+
26
+ mcp = FastMCP("Explicator")
27
+
28
+
29
+ def set_service(service: ModelService) -> None:
30
+ """Wire the ModelService instance used by this MCP server."""
31
+ global _service
32
+ _service = service
33
+
34
+
35
+ def _get_service() -> ModelService:
36
+ if _service is None:
37
+ raise RuntimeError(
38
+ "ModelService has not been wired. "
39
+ "Call set_service() before starting the server."
40
+ )
41
+ return _service
42
+
43
+
44
+ # ------------------------------------------------------------------
45
+ # Tools — actions Claude can invoke
46
+ # ------------------------------------------------------------------
47
+
48
+
49
+ @mcp.tool()
50
+ def run_scenario(name: str, overrides: dict[str, float] | None = None) -> dict:
51
+ """
52
+ Run a named scenario through the portfolio model.
53
+
54
+ Applies any active session-level overrides plus any overrides provided here
55
+ (call-specific overrides take precedence). Returns the full set of model outputs.
56
+ """
57
+ try:
58
+ result = _get_service().run_scenario(name, overrides=overrides)
59
+ return result.to_dict()
60
+ except Exception as exc:
61
+ return {"error": str(exc)}
62
+
63
+
64
+ @mcp.tool()
65
+ def override_input(source: str, field: str, value: float) -> dict:
66
+ """
67
+ Apply a persistent session-level override to a model input.
68
+
69
+ This override is applied to all subsequent scenario runs until
70
+ reset_overrides is called. Use source to group related inputs
71
+ (e.g. "rates", "credit", "equity").
72
+ """
73
+ try:
74
+ message = _get_service().override_input(source, field, value)
75
+ return {"message": message, "source": source, "field": field, "value": value}
76
+ except Exception as exc:
77
+ return {"error": str(exc)}
78
+
79
+
80
+ @mcp.tool()
81
+ def reset_overrides() -> dict:
82
+ """Clear all active session-level overrides, restoring model defaults."""
83
+ try:
84
+ message = _get_service().reset_overrides()
85
+ return {"message": message}
86
+ except Exception as exc:
87
+ return {"error": str(exc)}
88
+
89
+
90
+ @mcp.tool()
91
+ def compare_scenarios(
92
+ scenario_a: str,
93
+ scenario_b: str,
94
+ metrics: list[str] | None = None,
95
+ ) -> dict:
96
+ """
97
+ Run two scenarios and return a structured side-by-side comparison.
98
+
99
+ Returns absolute deltas and percentage changes for each output metric.
100
+ If metrics is not provided, all shared output fields are compared.
101
+ """
102
+ try:
103
+ comparison = _get_service().compare_scenarios(scenario_a, scenario_b, metrics)
104
+ return comparison.to_dict()
105
+ except Exception as exc:
106
+ return {"error": str(exc)}
107
+
108
+
109
+ @mcp.tool()
110
+ def get_available_scenarios() -> dict:
111
+ """List all configured scenarios with names, descriptions, and stress rationale."""
112
+ try:
113
+ scenarios = _get_service().get_available_scenarios()
114
+ return {"scenarios": [s.to_dict() for s in scenarios]}
115
+ except Exception as exc:
116
+ return {"error": str(exc)}
117
+
118
+
119
+ # ------------------------------------------------------------------
120
+ # Resources — context Claude can read
121
+ # ------------------------------------------------------------------
122
+
123
+
124
+ @mcp.resource("model://schema")
125
+ def get_model_schema() -> str:
126
+ """
127
+ Full structured description of all model inputs, outputs, assumptions, and caveats.
128
+
129
+ This is the primary source of truth for Claude's understanding of the domain.
130
+ Includes financial meaning, units, typical ranges, and interpretation guidance
131
+ for every field.
132
+ """
133
+ try:
134
+ schema = _get_service().get_model_schema()
135
+ return json.dumps(schema.to_dict(), indent=2)
136
+ except Exception as exc:
137
+ return json.dumps({"error": str(exc)})
138
+
139
+
140
+ @mcp.resource("model://scenarios")
141
+ def get_scenarios_resource() -> str:
142
+ """Return scenario definitions, including what each is designed to stress-test."""
143
+ try:
144
+ scenarios = _get_service().get_available_scenarios()
145
+ return json.dumps([s.to_dict() for s in scenarios], indent=2)
146
+ except Exception as exc:
147
+ return json.dumps({"error": str(exc)})
148
+
149
+
150
+ @mcp.resource("model://results/latest")
151
+ def get_latest_results() -> str:
152
+ """Most recent run results for all scenarios executed this session."""
153
+ try:
154
+ results = _get_service().get_current_results()
155
+ return json.dumps(
156
+ {name: r.to_dict() for name, r in results.items()},
157
+ indent=2,
158
+ )
159
+ except Exception as exc:
160
+ return json.dumps({"error": str(exc)})
161
+
162
+
163
+ @mcp.resource("model://overrides/current")
164
+ def get_current_overrides() -> str:
165
+ """All input overrides currently active in this session."""
166
+ try:
167
+ overrides = _get_service().get_active_overrides()
168
+ return json.dumps([o.to_dict() for o in overrides], indent=2)
169
+ except Exception as exc:
170
+ return json.dumps({"error": str(exc)})
171
+
172
+
173
+ # ------------------------------------------------------------------
174
+ # Prompts — reusable AI prompt templates
175
+ # ------------------------------------------------------------------
176
+
177
+
178
+ @mcp.prompt()
179
+ def explain_scenario_result(scenario_name: str) -> str:
180
+ """Explain what drove the result of a given scenario in plain English."""
181
+ try:
182
+ results = _get_service().get_current_results()
183
+ schema = _get_service().get_model_schema()
184
+
185
+ if scenario_name not in results:
186
+ return (
187
+ f"No result found for scenario '{scenario_name}'. "
188
+ f"Run it first using the run_scenario tool."
189
+ )
190
+
191
+ result = results[scenario_name]
192
+ return (
193
+ f"You are a portfolio risk analyst. Explain the following scenario result "
194
+ f"in plain English to a non-technical stakeholder.\n\n"
195
+ f"Model: {schema.name}\n"
196
+ f"Scenario: {scenario_name}\n\n"
197
+ f"Inputs used:\n{json.dumps(result.inputs_used, indent=2)}\n\n"
198
+ f"Overrides applied:\n{json.dumps(result.overrides_applied, indent=2)}\n\n"
199
+ f"Outputs:\n{json.dumps(result.outputs, indent=2)}\n\n"
200
+ f"Focus on: what drove the key outputs, what risks this scenario reveals, "
201
+ f"and what a portfolio manager should take away from this result."
202
+ )
203
+ except Exception as exc:
204
+ return f"Error generating prompt: {exc}"
205
+
206
+
207
+ @mcp.prompt()
208
+ def compare_scenarios_narrative(scenario_a: str, scenario_b: str) -> str:
209
+ """Narrate the key differences between two scenario outcomes in plain English."""
210
+ try:
211
+ schema = _get_service().get_model_schema()
212
+ comparison = _get_service().compare_scenarios(scenario_a, scenario_b)
213
+ return (
214
+ f"You are a portfolio risk analyst. Compare these two scenario outcomes "
215
+ f"and narrate the key differences for a senior investment committee.\n\n"
216
+ f"Model: {schema.name}\n"
217
+ f"Comparing: '{scenario_a}' vs '{scenario_b}'\n\n"
218
+ "Comparison data:\n"
219
+ f"{json.dumps(comparison.to_dict(), indent=2)}\n\n"
220
+ "Focus on: the most significant differences, which scenario is more "
221
+ "stressful and why, and what risk factors are driving the divergence."
222
+ )
223
+ except Exception as exc:
224
+ return f"Error generating prompt: {exc}"
225
+
226
+
227
+ @mcp.prompt()
228
+ def summarise_portfolio_risk() -> str:
229
+ """Summarise current risk exposures across all scenarios that have been run."""
230
+ try:
231
+ results = _get_service().get_current_results()
232
+ schema = _get_service().get_model_schema()
233
+ overrides = _get_service().get_active_overrides()
234
+
235
+ if not results:
236
+ return (
237
+ "No scenarios have been run yet. Use the run_scenario tool to execute "
238
+ "one or more scenarios before requesting a risk summary."
239
+ )
240
+
241
+ return (
242
+ f"You are a chief risk officer. Summarise the portfolio's current risk "
243
+ f"exposures based on the scenario results below.\n\n"
244
+ f"Model: {schema.name}\n"
245
+ "Active overrides: "
246
+ f"{json.dumps([o.to_dict() for o in overrides], indent=2)}\n\n"
247
+ "Scenario results:\n"
248
+ f"{json.dumps({n: r.to_dict() for n, r in results.items()}, indent=2)}\n\n"
249
+ "Focus on: overall risk level, which scenarios are most severe, "
250
+ "key drivers of risk, and any concentrations or tail risks to flag."
251
+ )
252
+ except Exception as exc:
253
+ return f"Error generating prompt: {exc}"
254
+
255
+
256
+ @mcp.prompt()
257
+ def explain_input_sensitivity(input_field: str) -> str:
258
+ """Explain how sensitive the model is to a given input field."""
259
+ try:
260
+ schema = _get_service().get_model_schema()
261
+ field_info = next((i for i in schema.inputs if i.name == input_field), None)
262
+ field_desc = (
263
+ json.dumps(field_info.to_dict(), indent=2)
264
+ if field_info
265
+ else f"Field '{input_field}' not found in schema."
266
+ )
267
+ return (
268
+ "You are a quantitative analyst. Explain how sensitive this portfolio "
269
+ "model is to the following input, and what happens when it moves.\n\n"
270
+ f"Model: {schema.name}\n"
271
+ f"Input field: {input_field}\n"
272
+ f"Field details:\n{field_desc}\n\n"
273
+ "Explain: the financial meaning of this input, what drives it in practice, "
274
+ f"how the portfolio is exposed to it, and what a 1 standard deviation move "
275
+ f"might mean for portfolio outcomes."
276
+ )
277
+ except Exception as exc:
278
+ return f"Error generating prompt: {exc}"
279
+
280
+
281
+ # ------------------------------------------------------------------
282
+ # Entry point
283
+ # ------------------------------------------------------------------
284
+
285
+
286
+ def main() -> None:
287
+ """Start the MCP server.
288
+
289
+ Reads an optional service path from the first CLI argument::
290
+
291
+ explicator-mcp myapp.model:build_service
292
+
293
+ Falls back to stub wiring if no path is given.
294
+ """
295
+ import sys
296
+
297
+ if len(sys.argv) > 1:
298
+ import explicator as _explicator
299
+
300
+ service = _explicator.load_service(sys.argv[1])
301
+ else:
302
+ from explicator.adapters.data.in_memory import _build_stub_wiring
303
+
304
+ repository, runner = _build_stub_wiring()
305
+ service = ModelService(runner=runner, repository=repository)
306
+
307
+ set_service(service)
308
+ mcp.run()
@@ -0,0 +1 @@
1
+ """AI provider abstractions and tool dispatch for Explicator."""
@@ -0,0 +1,71 @@
1
+ """Tool execution dispatcher — shared across all AI providers.
2
+
3
+ When any provider returns a tool call, this dispatcher handles it by routing
4
+ to the appropriate ModelService method. Provider identity is irrelevant here:
5
+ the same dispatcher handles tool calls from Claude, Azure OpenAI, or any other provider.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+ from explicator.application.service import ModelService
13
+
14
+
15
+ class ToolDispatcher:
16
+ """
17
+ Routes tool call requests from AI providers to the ModelService.
18
+
19
+ Always returns a JSON-serialisable dict. Errors are returned as structured
20
+ dicts rather than raised, so the provider can relay them to the user.
21
+ """
22
+
23
+ def __init__(self, service: ModelService) -> None:
24
+ """Initialise with the ModelService to dispatch tool calls to."""
25
+ self._service = service
26
+ self._handlers: dict[str, Any] = {
27
+ "run_scenario": self._run_scenario,
28
+ "override_input": self._override_input,
29
+ "reset_overrides": self._reset_overrides,
30
+ "compare_scenarios": self._compare_scenarios,
31
+ "get_available_scenarios": self._get_available_scenarios,
32
+ }
33
+
34
+ def dispatch(self, name: str, arguments: dict[str, Any]) -> dict[str, Any]:
35
+ """
36
+ Execute a named tool with the given arguments.
37
+
38
+ Returns a JSON-serialisable dict. Never raises.
39
+ """
40
+ try:
41
+ handler = self._handlers.get(name)
42
+ if handler is None:
43
+ return {"error": f"Unknown tool '{name}'."}
44
+ return handler(**arguments)
45
+ except Exception as exc:
46
+ return {"error": str(exc)}
47
+
48
+ def _run_scenario(self, name: str, overrides: dict | None = None) -> dict:
49
+ result = self._service.run_scenario(name, overrides=overrides)
50
+ return result.to_dict()
51
+
52
+ def _override_input(self, source: str, field: str, value: float) -> dict:
53
+ message = self._service.override_input(source, field, value)
54
+ return {"message": message, "source": source, "field": field, "value": value}
55
+
56
+ def _reset_overrides(self) -> dict:
57
+ message = self._service.reset_overrides()
58
+ return {"message": message}
59
+
60
+ def _compare_scenarios(
61
+ self,
62
+ scenario_a: str,
63
+ scenario_b: str,
64
+ metrics: list[str] | None = None,
65
+ ) -> dict:
66
+ comparison = self._service.compare_scenarios(scenario_a, scenario_b, metrics)
67
+ return comparison.to_dict()
68
+
69
+ def _get_available_scenarios(self) -> dict:
70
+ scenarios = self._service.get_available_scenarios()
71
+ return {"scenarios": [s.to_dict() for s in scenarios]}
@@ -0,0 +1 @@
1
+ """Concrete AI provider adapters (Claude, Azure OpenAI)."""
@@ -0,0 +1,111 @@
1
+ """Azure OpenAI provider adapter.
2
+
3
+ This is the only module in the codebase that imports `openai`.
4
+ All Azure OpenAI-specific API details are encapsulated here.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+
11
+ from explicator.ai.providers.base import AIMessage, AIProvider, AIResponse
12
+
13
+
14
+ class AzureOpenAIProvider(AIProvider):
15
+ """AI provider adapter for Azure OpenAI."""
16
+
17
+ def __init__(
18
+ self,
19
+ api_key: str,
20
+ azure_endpoint: str,
21
+ deployment_name: str,
22
+ api_version: str = "2024-02-01",
23
+ ) -> None:
24
+ """Initialise the Azure OpenAI client with endpoint and deployment details."""
25
+ try:
26
+ from openai import AzureOpenAI
27
+ except ImportError as exc:
28
+ raise ImportError(
29
+ "The 'openai' package is required for AzureOpenAIProvider. "
30
+ "Install it with: pip install 'explicator[azure]'"
31
+ ) from exc
32
+
33
+ from openai import AzureOpenAI
34
+
35
+ self._client = AzureOpenAI(
36
+ api_key=api_key,
37
+ azure_endpoint=azure_endpoint,
38
+ api_version=api_version,
39
+ )
40
+ self._deployment = deployment_name
41
+
42
+ def chat(
43
+ self,
44
+ messages: list[AIMessage],
45
+ tools: list[dict],
46
+ system: str | None = None,
47
+ ) -> AIResponse:
48
+ """Send a conversation to Azure OpenAI and return a normalised response."""
49
+ oai_messages = []
50
+ if system:
51
+ oai_messages.append({"role": "system", "content": system})
52
+ oai_messages.extend([self._to_oai_message(m) for m in messages])
53
+
54
+ response = self._client.chat.completions.create(
55
+ model=self._deployment,
56
+ messages=oai_messages,
57
+ tools=tools, # OpenAI format matches our definitions directly
58
+ tool_choice="auto",
59
+ )
60
+
61
+ choice = response.choices[0]
62
+ msg = choice.message
63
+
64
+ tool_calls: list[dict] = []
65
+ if msg.tool_calls:
66
+ for tc in msg.tool_calls:
67
+ tool_calls.append(
68
+ {
69
+ "id": tc.id,
70
+ "name": tc.function.name,
71
+ "arguments": json.loads(tc.function.arguments),
72
+ }
73
+ )
74
+
75
+ return AIResponse(
76
+ message=AIMessage(
77
+ role="assistant",
78
+ content=msg.content,
79
+ tool_calls=tool_calls or None,
80
+ ),
81
+ tool_calls=tool_calls,
82
+ finish_reason=choice.finish_reason or "stop",
83
+ )
84
+
85
+ @staticmethod
86
+ def _to_oai_message(msg: AIMessage) -> dict:
87
+ if msg.role == "tool":
88
+ return {
89
+ "role": "tool",
90
+ "tool_call_id": msg.tool_call_id,
91
+ "name": msg.name or "",
92
+ "content": msg.content or "",
93
+ }
94
+ if msg.role == "assistant" and msg.tool_calls:
95
+ oai_tool_calls = [
96
+ {
97
+ "id": tc["id"],
98
+ "type": "function",
99
+ "function": {
100
+ "name": tc["name"],
101
+ "arguments": json.dumps(tc["arguments"]),
102
+ },
103
+ }
104
+ for tc in msg.tool_calls
105
+ ]
106
+ return {
107
+ "role": "assistant",
108
+ "content": msg.content,
109
+ "tool_calls": oai_tool_calls,
110
+ }
111
+ return {"role": msg.role, "content": msg.content or ""}
@@ -0,0 +1,60 @@
1
+ """Abstract AI provider interface.
2
+
3
+ All provider adapters implement this interface. The orchestration layer
4
+ only ever speaks AIMessage / AIResponse — never provider-specific types.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from abc import ABC, abstractmethod
10
+ from dataclasses import dataclass, field
11
+
12
+
13
+ @dataclass
14
+ class AIMessage:
15
+ """A single message in a conversation, normalised across all providers."""
16
+
17
+ role: str # "system" | "user" | "assistant" | "tool"
18
+ content: str | None = None
19
+ tool_calls: list[dict] | None = None # [{id, name, arguments}]
20
+ tool_call_id: str | None = None # for role="tool" responses
21
+ name: str | None = None # tool name for role="tool"
22
+
23
+
24
+ @dataclass
25
+ class AIResponse:
26
+ """Normalised response from an AI provider."""
27
+
28
+ message: AIMessage
29
+ tool_calls: list[dict] = field(default_factory=list) # [{id, name, arguments}]
30
+ finish_reason: str = "stop"
31
+
32
+
33
+ class AIProvider(ABC):
34
+ """
35
+ Abstract base for AI provider adapters.
36
+
37
+ Each concrete implementation is responsible for translating to and from
38
+ its provider's wire format. The orchestration layer must never contain
39
+ provider-specific code.
40
+ """
41
+
42
+ @abstractmethod
43
+ def chat(
44
+ self,
45
+ messages: list[AIMessage],
46
+ tools: list[dict],
47
+ system: str | None = None,
48
+ ) -> AIResponse:
49
+ """
50
+ Send a conversation to the provider and return a normalised response.
51
+
52
+ Args:
53
+ messages: Conversation history in normalised AIMessage format.
54
+ tools: Tool definitions in OpenAI function-calling JSON schema format.
55
+ system: Optional system prompt (passed separately, not as a message).
56
+
57
+ Returns:
58
+ Normalised AIResponse. Check finish_reason and tool_calls to determine
59
+ whether the model wants to invoke a tool or has finished responding.
60
+ """
@@ -0,0 +1,114 @@
1
+ """Anthropic Claude provider adapter.
2
+
3
+ This is the only module in the codebase that imports `anthropic`.
4
+ All Claude-specific API details are encapsulated here.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import Any
10
+
11
+ from explicator.ai.providers.base import AIMessage, AIProvider, AIResponse
12
+
13
+
14
+ class ClaudeProvider(AIProvider):
15
+ """AI provider adapter for Anthropic's Claude API."""
16
+
17
+ def __init__(
18
+ self,
19
+ api_key: str | None = None,
20
+ model: str = "claude-sonnet-4-6",
21
+ ) -> None:
22
+ """Initialise the Anthropic client with the given API key and model."""
23
+ try:
24
+ import anthropic
25
+ except ImportError as exc:
26
+ raise ImportError(
27
+ "The 'anthropic' package is required for ClaudeProvider. "
28
+ "Install it with: pip install 'explicator[claude]'"
29
+ ) from exc
30
+
31
+ self._client = anthropic.Anthropic(api_key=api_key)
32
+ self._model = model
33
+
34
+ def chat(
35
+ self,
36
+ messages: list[AIMessage],
37
+ tools: list[dict],
38
+ system: str | None = None,
39
+ ) -> AIResponse:
40
+ """Send a conversation to Claude and return a normalised response."""
41
+ anthropic_messages = [self._to_anthropic_message(m) for m in messages]
42
+ anthropic_tools = [self._to_anthropic_tool(t) for t in tools]
43
+
44
+ kwargs: dict[str, Any] = {
45
+ "model": self._model,
46
+ "max_tokens": 4096,
47
+ "messages": anthropic_messages,
48
+ "tools": anthropic_tools,
49
+ }
50
+ if system:
51
+ kwargs["system"] = system
52
+
53
+ response = self._client.messages.create(**kwargs)
54
+
55
+ tool_calls: list[dict] = []
56
+ text_content: str | None = None
57
+
58
+ for block in response.content:
59
+ if block.type == "tool_use":
60
+ tool_calls.append(
61
+ {"id": block.id, "name": block.name, "arguments": block.input}
62
+ )
63
+ elif block.type == "text":
64
+ text_content = block.text
65
+
66
+ finish_reason = "tool_calls" if tool_calls else "stop"
67
+
68
+ return AIResponse(
69
+ message=AIMessage(
70
+ role="assistant",
71
+ content=text_content,
72
+ tool_calls=tool_calls or None,
73
+ ),
74
+ tool_calls=tool_calls,
75
+ finish_reason=finish_reason,
76
+ )
77
+
78
+ @staticmethod
79
+ def _to_anthropic_message(msg: AIMessage) -> dict:
80
+ if msg.role == "tool":
81
+ return {
82
+ "role": "user",
83
+ "content": [
84
+ {
85
+ "type": "tool_result",
86
+ "tool_use_id": msg.tool_call_id,
87
+ "content": msg.content or "",
88
+ }
89
+ ],
90
+ }
91
+ if msg.role == "assistant" and msg.tool_calls:
92
+ content: list[dict] = []
93
+ if msg.content:
94
+ content.append({"type": "text", "text": msg.content})
95
+ for tc in msg.tool_calls:
96
+ content.append(
97
+ {
98
+ "type": "tool_use",
99
+ "id": tc["id"],
100
+ "name": tc["name"],
101
+ "input": tc["arguments"],
102
+ }
103
+ )
104
+ return {"role": "assistant", "content": content}
105
+ return {"role": msg.role, "content": msg.content or ""}
106
+
107
+ @staticmethod
108
+ def _to_anthropic_tool(tool: dict) -> dict:
109
+ fn = tool["function"]
110
+ return {
111
+ "name": fn["name"],
112
+ "description": fn["description"],
113
+ "input_schema": fn["parameters"],
114
+ }
@@ -0,0 +1 @@
1
+ """Tool definitions in OpenAI function-calling JSON schema format."""