explicator 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- explicator-0.1.0/PKG-INFO +21 -0
- explicator-0.1.0/pyproject.toml +31 -0
- explicator-0.1.0/src/explicator/__init__.py +169 -0
- explicator-0.1.0/src/explicator/adapters/__init__.py +1 -0
- explicator-0.1.0/src/explicator/adapters/cli/__init__.py +1 -0
- explicator-0.1.0/src/explicator/adapters/cli/main.py +158 -0
- explicator-0.1.0/src/explicator/adapters/data/__init__.py +1 -0
- explicator-0.1.0/src/explicator/adapters/data/in_memory.py +134 -0
- explicator-0.1.0/src/explicator/adapters/mcp_server/__init__.py +1 -0
- explicator-0.1.0/src/explicator/adapters/mcp_server/__main__.py +29 -0
- explicator-0.1.0/src/explicator/adapters/mcp_server/server.py +308 -0
- explicator-0.1.0/src/explicator/ai/__init__.py +1 -0
- explicator-0.1.0/src/explicator/ai/dispatcher.py +71 -0
- explicator-0.1.0/src/explicator/ai/providers/__init__.py +1 -0
- explicator-0.1.0/src/explicator/ai/providers/azure_openai.py +111 -0
- explicator-0.1.0/src/explicator/ai/providers/base.py +60 -0
- explicator-0.1.0/src/explicator/ai/providers/claude.py +114 -0
- explicator-0.1.0/src/explicator/ai/tools/__init__.py +1 -0
- explicator-0.1.0/src/explicator/ai/tools/definitions.py +124 -0
- explicator-0.1.0/src/explicator/application/__init__.py +1 -0
- explicator-0.1.0/src/explicator/application/service.py +154 -0
- explicator-0.1.0/src/explicator/config.py +72 -0
- explicator-0.1.0/src/explicator/domain/__init__.py +1 -0
- explicator-0.1.0/src/explicator/domain/models.py +160 -0
- explicator-0.1.0/src/explicator/domain/ports.py +43 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: explicator
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Provider-agnostic natural language AI interface for scenario-driven modelling
|
|
5
|
+
Author: Tim le Poidevin
|
|
6
|
+
Author-email: Tim le Poidevin <lepoidevin.tim@gmail.com>
|
|
7
|
+
Requires-Dist: mcp>=1.0.0
|
|
8
|
+
Requires-Dist: click>=8.0
|
|
9
|
+
Requires-Dist: python-dotenv>=1.0
|
|
10
|
+
Requires-Dist: openai>=1.0.0 ; extra == 'azure'
|
|
11
|
+
Requires-Dist: anthropic>=0.40.0 ; extra == 'claude'
|
|
12
|
+
Requires-Dist: pytest>=8.0 ; extra == 'dev'
|
|
13
|
+
Requires-Dist: pytest-asyncio>=0.23 ; extra == 'dev'
|
|
14
|
+
Requires-Dist: pytest-mock>=3.12 ; extra == 'dev'
|
|
15
|
+
Requires-Dist: mcp[cli] ; extra == 'dev'
|
|
16
|
+
Requires-Dist: pre-commit>=4.5.1 ; extra == 'dev'
|
|
17
|
+
Requires-Dist: ruff>=0.15.4 ; extra == 'dev'
|
|
18
|
+
Requires-Python: >=3.10
|
|
19
|
+
Provides-Extra: azure
|
|
20
|
+
Provides-Extra: claude
|
|
21
|
+
Provides-Extra: dev
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["uv_build>=0.6"]
|
|
3
|
+
build-backend = "uv_build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "explicator"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
authors = [{ name = "Tim le Poidevin", email = "lepoidevin.tim@gmail.com" }]
|
|
9
|
+
description = "Provider-agnostic natural language AI interface for scenario-driven modelling"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
dependencies = ["mcp>=1.0.0", "click>=8.0", "python-dotenv>=1.0"]
|
|
12
|
+
|
|
13
|
+
[project.optional-dependencies]
|
|
14
|
+
claude = ["anthropic>=0.40.0"]
|
|
15
|
+
azure = ["openai>=1.0.0"]
|
|
16
|
+
dev = [
|
|
17
|
+
"pytest>=8.0",
|
|
18
|
+
"pytest-asyncio>=0.23",
|
|
19
|
+
"pytest-mock>=3.12",
|
|
20
|
+
"mcp[cli]",
|
|
21
|
+
"pre-commit>=4.5.1",
|
|
22
|
+
"ruff>=0.15.4",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
[project.scripts]
|
|
26
|
+
explicator = "explicator.adapters.cli.main:cli"
|
|
27
|
+
explicator-mcp = "explicator.adapters.mcp_server.server:main"
|
|
28
|
+
|
|
29
|
+
[tool.pytest.ini_options]
|
|
30
|
+
testpaths = ["tests"]
|
|
31
|
+
asyncio_mode = "auto"
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"""Explicator — provider-agnostic AI interface for scenario-driven modelling."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from collections.abc import Callable
|
|
7
|
+
from typing import TYPE_CHECKING, Any
|
|
8
|
+
|
|
9
|
+
from explicator.adapters.data.in_memory import (
|
|
10
|
+
FunctionalScenarioRunner,
|
|
11
|
+
InMemoryModelRepository,
|
|
12
|
+
)
|
|
13
|
+
from explicator.application.service import ModelService
|
|
14
|
+
from explicator.domain.models import (
|
|
15
|
+
InputField,
|
|
16
|
+
ModelSchema,
|
|
17
|
+
OutputField,
|
|
18
|
+
Override,
|
|
19
|
+
ScenarioComparison,
|
|
20
|
+
ScenarioDefinition,
|
|
21
|
+
ScenarioResult,
|
|
22
|
+
)
|
|
23
|
+
from explicator.domain.ports import ModelRepository, ScenarioRunner
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
from explicator.ai.providers.base import AIProvider
|
|
27
|
+
|
|
28
|
+
__version__ = "0.1.0"
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
"ModelService",
|
|
32
|
+
"ModelSchema",
|
|
33
|
+
"InputField",
|
|
34
|
+
"OutputField",
|
|
35
|
+
"ScenarioDefinition",
|
|
36
|
+
"ScenarioResult",
|
|
37
|
+
"ScenarioComparison",
|
|
38
|
+
"Override",
|
|
39
|
+
"FunctionalScenarioRunner",
|
|
40
|
+
"InMemoryModelRepository",
|
|
41
|
+
"ModelRepository",
|
|
42
|
+
"ScenarioRunner",
|
|
43
|
+
"create",
|
|
44
|
+
"run_mcp",
|
|
45
|
+
"run_chat",
|
|
46
|
+
"load_service",
|
|
47
|
+
]
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def create(
|
|
51
|
+
model_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
|
52
|
+
base_inputs: dict[str, Any],
|
|
53
|
+
schema: ModelSchema,
|
|
54
|
+
scenarios: list[ScenarioDefinition],
|
|
55
|
+
) -> ModelService:
|
|
56
|
+
"""Create a ModelService from a model function, schema, and scenario list.
|
|
57
|
+
|
|
58
|
+
This is the quickest way to wrap a plain Python model function.
|
|
59
|
+
For custom storage or execution backends, instantiate ModelService directly
|
|
60
|
+
using your own ModelRepository and ScenarioRunner implementations.
|
|
61
|
+
|
|
62
|
+
Example::
|
|
63
|
+
|
|
64
|
+
service = explicator.create(
|
|
65
|
+
model_fn=my_model,
|
|
66
|
+
base_inputs={"rate": 5.0},
|
|
67
|
+
schema=my_schema,
|
|
68
|
+
scenarios=my_scenarios,
|
|
69
|
+
)
|
|
70
|
+
"""
|
|
71
|
+
repo = InMemoryModelRepository(schema=schema, scenarios=scenarios)
|
|
72
|
+
runner = FunctionalScenarioRunner(model_fn=model_fn, base_inputs=base_inputs)
|
|
73
|
+
return ModelService(runner=runner, repository=repo)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def run_mcp(service: ModelService) -> None:
|
|
77
|
+
"""Start the MCP server backed by the given service.
|
|
78
|
+
|
|
79
|
+
Intended as the entry point in your run script::
|
|
80
|
+
|
|
81
|
+
if __name__ == "__main__":
|
|
82
|
+
explicator.run_mcp(service)
|
|
83
|
+
"""
|
|
84
|
+
from explicator.adapters.mcp_server.server import mcp, set_service
|
|
85
|
+
|
|
86
|
+
set_service(service)
|
|
87
|
+
mcp.run()
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def run_chat(
|
|
91
|
+
service: ModelService,
|
|
92
|
+
*,
|
|
93
|
+
question: str | None = None,
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Start an interactive chat REPL, or answer a single question.
|
|
96
|
+
|
|
97
|
+
Requires an AI provider to be configured (e.g. ANTHROPIC_API_KEY).
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
service: The ModelService to query.
|
|
101
|
+
question: If provided, answer this single question and return.
|
|
102
|
+
If omitted, start an interactive REPL.
|
|
103
|
+
"""
|
|
104
|
+
from explicator.ai.dispatcher import ToolDispatcher
|
|
105
|
+
from explicator.ai.providers.base import AIMessage
|
|
106
|
+
from explicator.ai.tools.definitions import TOOL_DEFINITIONS
|
|
107
|
+
from explicator.config import build_provider
|
|
108
|
+
|
|
109
|
+
provider = build_provider()
|
|
110
|
+
dispatcher = ToolDispatcher(service)
|
|
111
|
+
|
|
112
|
+
def _turn(messages: list[AIMessage]) -> str:
|
|
113
|
+
while True:
|
|
114
|
+
response = provider.chat(messages, tools=TOOL_DEFINITIONS)
|
|
115
|
+
messages.append(response.message)
|
|
116
|
+
if not response.tool_calls:
|
|
117
|
+
return response.message.content or ""
|
|
118
|
+
for tc in response.tool_calls:
|
|
119
|
+
result = dispatcher.dispatch(tc["name"], tc["arguments"])
|
|
120
|
+
messages.append(
|
|
121
|
+
AIMessage(
|
|
122
|
+
role="tool",
|
|
123
|
+
content=json.dumps(result),
|
|
124
|
+
tool_call_id=tc["id"],
|
|
125
|
+
name=tc["name"],
|
|
126
|
+
)
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if question:
|
|
130
|
+
print(_turn([AIMessage(role="user", content=question)]))
|
|
131
|
+
return
|
|
132
|
+
|
|
133
|
+
print("Explicator chat — type 'exit' or Ctrl+D to quit.\n")
|
|
134
|
+
messages: list[AIMessage] = []
|
|
135
|
+
while True:
|
|
136
|
+
try:
|
|
137
|
+
user_input = input("You> ").strip()
|
|
138
|
+
except (EOFError, KeyboardInterrupt):
|
|
139
|
+
print()
|
|
140
|
+
break
|
|
141
|
+
if user_input.lower() in {"exit", "quit"}:
|
|
142
|
+
break
|
|
143
|
+
messages.append(AIMessage(role="user", content=user_input))
|
|
144
|
+
print(f"\n{_turn(messages)}\n")
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def load_service(path: str) -> ModelService:
|
|
148
|
+
"""Load a ModelService from a dotted import path.
|
|
149
|
+
|
|
150
|
+
The path format is ``'module.path:attribute'``, where the attribute is
|
|
151
|
+
either a ``ModelService`` instance or a zero-argument callable that returns
|
|
152
|
+
one.
|
|
153
|
+
|
|
154
|
+
Example::
|
|
155
|
+
|
|
156
|
+
service = explicator.load_service("myapp.model:build_service")
|
|
157
|
+
service = explicator.load_service("myapp.model:service")
|
|
158
|
+
"""
|
|
159
|
+
import importlib
|
|
160
|
+
|
|
161
|
+
if ":" not in path:
|
|
162
|
+
raise ValueError(
|
|
163
|
+
f"Service path must be 'module:attribute', got '{path}'. "
|
|
164
|
+
"Example: 'myapp.model:build_service'"
|
|
165
|
+
)
|
|
166
|
+
module_path, attr = path.rsplit(":", 1)
|
|
167
|
+
module = importlib.import_module(module_path)
|
|
168
|
+
obj = getattr(module, attr)
|
|
169
|
+
return obj() if callable(obj) else obj
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Explicator adapters — CLI, MCP server, and data implementations."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""CLI adapter for Explicator."""
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
"""CLI adapter for Explicator.
|
|
2
|
+
|
|
3
|
+
Provides a command-line interface for running scenarios and chatting with an AI.
|
|
4
|
+
No business logic lives here — this is a thin translation layer.
|
|
5
|
+
|
|
6
|
+
Usage::
|
|
7
|
+
|
|
8
|
+
explicator --service myapp.model:build_service run base_case
|
|
9
|
+
explicator --service myapp.model:build_service chat
|
|
10
|
+
EXPLICATOR_SERVICE=myapp.model:service explicator compare base stress
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import sys
|
|
17
|
+
|
|
18
|
+
import click
|
|
19
|
+
|
|
20
|
+
import explicator
|
|
21
|
+
from explicator.application.service import ModelService
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _load_service(path: str | None) -> ModelService:
|
|
25
|
+
"""Load the service from a path, or fall back to stub wiring."""
|
|
26
|
+
if path:
|
|
27
|
+
try:
|
|
28
|
+
return explicator.load_service(path)
|
|
29
|
+
except Exception as exc:
|
|
30
|
+
raise click.UsageError(
|
|
31
|
+
f"Could not load service from '{path}': {exc}"
|
|
32
|
+
) from exc
|
|
33
|
+
|
|
34
|
+
from explicator.adapters.data.in_memory import _build_stub_wiring
|
|
35
|
+
|
|
36
|
+
repository, runner = _build_stub_wiring()
|
|
37
|
+
return ModelService(runner=runner, repository=repository)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@click.group()
|
|
41
|
+
@click.option(
|
|
42
|
+
"--service",
|
|
43
|
+
"service_path",
|
|
44
|
+
envvar="EXPLICATOR_SERVICE",
|
|
45
|
+
default=None,
|
|
46
|
+
help="Service path: 'module:attribute'. Also reads EXPLICATOR_SERVICE env var.",
|
|
47
|
+
)
|
|
48
|
+
@click.pass_context
|
|
49
|
+
def cli(ctx: click.Context, service_path: str | None) -> None:
|
|
50
|
+
"""Explicator — natural language AI interface for scenario-driven modelling."""
|
|
51
|
+
ctx.ensure_object(dict)
|
|
52
|
+
ctx.obj["service"] = _load_service(service_path)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@cli.command("scenarios")
|
|
56
|
+
@click.pass_context
|
|
57
|
+
def list_scenarios(ctx: click.Context) -> None:
|
|
58
|
+
"""List all available scenarios."""
|
|
59
|
+
service: ModelService = ctx.obj["service"]
|
|
60
|
+
for s in service.get_available_scenarios():
|
|
61
|
+
click.echo(f"\n{s.name}")
|
|
62
|
+
click.echo(f" {s.description}")
|
|
63
|
+
click.echo(f" Stress rationale: {s.stress_rationale}")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@cli.command("run")
|
|
67
|
+
@click.argument("scenario_name")
|
|
68
|
+
@click.option(
|
|
69
|
+
"--override",
|
|
70
|
+
"-o",
|
|
71
|
+
multiple=True,
|
|
72
|
+
metavar="FIELD=VALUE",
|
|
73
|
+
help="Override an input field for this run, e.g. -o yield_10y=5.0",
|
|
74
|
+
)
|
|
75
|
+
@click.pass_context
|
|
76
|
+
def run_scenario(
|
|
77
|
+
ctx: click.Context, scenario_name: str, override: tuple[str, ...]
|
|
78
|
+
) -> None:
|
|
79
|
+
"""Run a named scenario and print results as JSON."""
|
|
80
|
+
service: ModelService = ctx.obj["service"]
|
|
81
|
+
|
|
82
|
+
overrides: dict = {}
|
|
83
|
+
for o in override:
|
|
84
|
+
if "=" not in o:
|
|
85
|
+
click.echo(f"Invalid override format '{o}'. Use FIELD=VALUE.", err=True)
|
|
86
|
+
sys.exit(1)
|
|
87
|
+
field, value = o.split("=", 1)
|
|
88
|
+
try:
|
|
89
|
+
overrides[field.strip()] = float(value.strip())
|
|
90
|
+
except ValueError:
|
|
91
|
+
click.echo(f"Override value must be numeric: '{value}'", err=True)
|
|
92
|
+
sys.exit(1)
|
|
93
|
+
|
|
94
|
+
result = service.run_scenario(scenario_name, overrides=overrides or None)
|
|
95
|
+
click.echo(json.dumps(result.to_dict(), indent=2))
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@cli.command("compare")
|
|
99
|
+
@click.argument("scenario_a")
|
|
100
|
+
@click.argument("scenario_b")
|
|
101
|
+
@click.option(
|
|
102
|
+
"--metric", "-m", multiple=True, help="Restrict comparison to this output field"
|
|
103
|
+
)
|
|
104
|
+
@click.pass_context
|
|
105
|
+
def compare(
|
|
106
|
+
ctx: click.Context,
|
|
107
|
+
scenario_a: str,
|
|
108
|
+
scenario_b: str,
|
|
109
|
+
metric: tuple[str, ...],
|
|
110
|
+
) -> None:
|
|
111
|
+
"""Compare two scenarios side by side."""
|
|
112
|
+
service: ModelService = ctx.obj["service"]
|
|
113
|
+
comparison = service.compare_scenarios(
|
|
114
|
+
scenario_a,
|
|
115
|
+
scenario_b,
|
|
116
|
+
metrics=list(metric) if metric else None,
|
|
117
|
+
)
|
|
118
|
+
click.echo(json.dumps(comparison.to_dict(), indent=2))
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@cli.command("schema")
|
|
122
|
+
@click.pass_context
|
|
123
|
+
def show_schema(ctx: click.Context) -> None:
|
|
124
|
+
"""Print the full model schema as JSON."""
|
|
125
|
+
service: ModelService = ctx.obj["service"]
|
|
126
|
+
click.echo(json.dumps(service.get_model_schema().to_dict(), indent=2))
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
@cli.command("override")
|
|
130
|
+
@click.argument("source")
|
|
131
|
+
@click.argument("field")
|
|
132
|
+
@click.argument("value", type=float)
|
|
133
|
+
@click.pass_context
|
|
134
|
+
def set_override(ctx: click.Context, source: str, field: str, value: float) -> None:
|
|
135
|
+
"""Apply a persistent session override to an input field."""
|
|
136
|
+
service: ModelService = ctx.obj["service"]
|
|
137
|
+
click.echo(service.override_input(source, field, value))
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@cli.command("reset")
|
|
141
|
+
@click.pass_context
|
|
142
|
+
def reset_overrides(ctx: click.Context) -> None:
|
|
143
|
+
"""Clear all active overrides and restore model defaults."""
|
|
144
|
+
service: ModelService = ctx.obj["service"]
|
|
145
|
+
click.echo(service.reset_overrides())
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
@cli.command("chat")
|
|
149
|
+
@click.argument("question", required=False)
|
|
150
|
+
@click.pass_context
|
|
151
|
+
def chat(ctx: click.Context, question: str | None) -> None:
|
|
152
|
+
"""Chat with an AI about the model. Omit QUESTION for interactive mode."""
|
|
153
|
+
service: ModelService = ctx.obj["service"]
|
|
154
|
+
try:
|
|
155
|
+
explicator.run_chat(service, question=question)
|
|
156
|
+
except Exception as exc:
|
|
157
|
+
click.echo(f"Error: {exc}", err=True)
|
|
158
|
+
sys.exit(1)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""In-memory data adapters for Explicator."""
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""In-memory implementations of the domain ports.
|
|
2
|
+
|
|
3
|
+
Useful for testing and for wiring up demo/example models.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from collections.abc import Callable
|
|
9
|
+
from datetime import UTC, datetime
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from explicator.domain.models import (
|
|
13
|
+
InputField,
|
|
14
|
+
ModelSchema,
|
|
15
|
+
OutputField,
|
|
16
|
+
ScenarioDefinition,
|
|
17
|
+
ScenarioResult,
|
|
18
|
+
)
|
|
19
|
+
from explicator.domain.ports import ModelRepository, ScenarioRunner
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class InMemoryModelRepository(ModelRepository):
|
|
23
|
+
"""
|
|
24
|
+
A fully in-memory ModelRepository.
|
|
25
|
+
|
|
26
|
+
Construct with a ModelSchema and list of ScenarioDefinitions.
|
|
27
|
+
Suitable for testing and for wrapping functional demo models.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
schema: ModelSchema,
|
|
33
|
+
scenarios: list[ScenarioDefinition],
|
|
34
|
+
) -> None:
|
|
35
|
+
"""Initialise with a schema and list of scenario definitions."""
|
|
36
|
+
self._schema = schema
|
|
37
|
+
self._scenarios = scenarios
|
|
38
|
+
|
|
39
|
+
def get_scenarios(self) -> list[ScenarioDefinition]:
|
|
40
|
+
"""Return all available scenario definitions."""
|
|
41
|
+
return list(self._scenarios)
|
|
42
|
+
|
|
43
|
+
def get_schema(self) -> ModelSchema:
|
|
44
|
+
"""Return the full model schema."""
|
|
45
|
+
return self._schema
|
|
46
|
+
|
|
47
|
+
def get_inputs(self) -> list[InputField]:
|
|
48
|
+
"""Return current state of all model inputs."""
|
|
49
|
+
return list(self._schema.inputs)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class FunctionalScenarioRunner(ScenarioRunner):
|
|
53
|
+
"""
|
|
54
|
+
A ScenarioRunner that delegates to a user-supplied callable.
|
|
55
|
+
|
|
56
|
+
The callable receives the merged inputs dict and returns an outputs dict.
|
|
57
|
+
This lets you wrap any pure Python model function without subclassing.
|
|
58
|
+
|
|
59
|
+
Example::
|
|
60
|
+
|
|
61
|
+
runner = FunctionalScenarioRunner(
|
|
62
|
+
model_fn=my_model,
|
|
63
|
+
base_inputs={"rate": 5.0, "spread": 1.2},
|
|
64
|
+
)
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
model_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
|
70
|
+
base_inputs: dict[str, Any],
|
|
71
|
+
) -> None:
|
|
72
|
+
"""Initialise with a model callable and its base input values."""
|
|
73
|
+
self._model_fn = model_fn
|
|
74
|
+
self._base_inputs = base_inputs
|
|
75
|
+
|
|
76
|
+
def run(
|
|
77
|
+
self,
|
|
78
|
+
scenario: ScenarioDefinition,
|
|
79
|
+
extra_overrides: dict[str, Any],
|
|
80
|
+
) -> ScenarioResult:
|
|
81
|
+
"""Run a scenario, merging its overrides and any extra overrides over base."""
|
|
82
|
+
inputs = dict(self._base_inputs)
|
|
83
|
+
inputs.update(scenario.overrides)
|
|
84
|
+
inputs.update(extra_overrides)
|
|
85
|
+
|
|
86
|
+
outputs = self._model_fn(inputs)
|
|
87
|
+
|
|
88
|
+
return ScenarioResult(
|
|
89
|
+
scenario_name=scenario.name,
|
|
90
|
+
inputs_used=inputs,
|
|
91
|
+
outputs=outputs,
|
|
92
|
+
overrides_applied={**scenario.overrides, **extra_overrides},
|
|
93
|
+
run_at=datetime.now(UTC).isoformat(),
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _build_stub_wiring() -> tuple[InMemoryModelRepository, FunctionalScenarioRunner]:
|
|
98
|
+
"""Build fallback stub wiring used when no real model is configured.
|
|
99
|
+
|
|
100
|
+
Returns a minimal repository and runner so the server starts without error.
|
|
101
|
+
Replace this in your own entry point with real implementations.
|
|
102
|
+
"""
|
|
103
|
+
schema = ModelSchema(
|
|
104
|
+
name="Stub Model",
|
|
105
|
+
description=(
|
|
106
|
+
"No model has been configured. "
|
|
107
|
+
"Wire a real ModelRepository and ScenarioRunner in your entry point."
|
|
108
|
+
),
|
|
109
|
+
inputs=[],
|
|
110
|
+
outputs=[
|
|
111
|
+
OutputField(
|
|
112
|
+
name="stub",
|
|
113
|
+
description="Placeholder output.",
|
|
114
|
+
units="",
|
|
115
|
+
interpretation="Replace this model with a real implementation.",
|
|
116
|
+
)
|
|
117
|
+
],
|
|
118
|
+
assumptions=["This is a stub."],
|
|
119
|
+
caveats=["Wire a real model before use."],
|
|
120
|
+
)
|
|
121
|
+
scenarios = [
|
|
122
|
+
ScenarioDefinition(
|
|
123
|
+
name="base",
|
|
124
|
+
description="Stub base case.",
|
|
125
|
+
stress_rationale="Stub — no real stress applied.",
|
|
126
|
+
overrides={},
|
|
127
|
+
)
|
|
128
|
+
]
|
|
129
|
+
repo = InMemoryModelRepository(schema=schema, scenarios=scenarios)
|
|
130
|
+
runner = FunctionalScenarioRunner(
|
|
131
|
+
model_fn=lambda inputs: {"stub": True, "message": "No model configured."},
|
|
132
|
+
base_inputs={},
|
|
133
|
+
)
|
|
134
|
+
return repo, runner
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""MCP server adapter for Explicator."""
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Entry point: python -m explicator.adapters.mcp_server [module:attribute]."""
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
import explicator
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def main() -> None:
|
|
9
|
+
"""Start the MCP server.
|
|
10
|
+
|
|
11
|
+
Accepts an optional service path argument::
|
|
12
|
+
|
|
13
|
+
python -m explicator.adapters.mcp_server myapp.model:build_service
|
|
14
|
+
|
|
15
|
+
Falls back to stub wiring if no path is given.
|
|
16
|
+
"""
|
|
17
|
+
if len(sys.argv) > 1:
|
|
18
|
+
service = explicator.load_service(sys.argv[1])
|
|
19
|
+
else:
|
|
20
|
+
from explicator.adapters.data.in_memory import _build_stub_wiring
|
|
21
|
+
from explicator.application.service import ModelService
|
|
22
|
+
|
|
23
|
+
repository, runner = _build_stub_wiring()
|
|
24
|
+
service = ModelService(runner=runner, repository=repository)
|
|
25
|
+
|
|
26
|
+
explicator.run_mcp(service)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
main()
|