cortex-solver 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,307 @@
1
+ """Post-build verification for 3D objects."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Iterable
7
+
8
+ from cortex.types import VerifyCheck, VerifyResult
9
+
10
+
11
+ @dataclass(frozen=True, slots=True)
12
+ class AxisDeviation:
13
+ axis: str
14
+ actual: float
15
+ expected: float
16
+ delta: float
17
+
18
+
19
+ def _get_list(value: object, length: int) -> list[float] | None:
20
+ if not isinstance(value, (list, tuple)):
21
+ return None
22
+ if len(value) != length:
23
+ return None
24
+ try:
25
+ return [float(item) for item in value]
26
+ except (TypeError, ValueError):
27
+ return None
28
+
29
+
30
+ def _axis_deviations(
31
+ actual: list[float], expected: list[float], axes: Iterable[str]
32
+ ) -> list[AxisDeviation]:
33
+ deviations: list[AxisDeviation] = []
34
+ for axis, actual_value, expected_value in zip(axes, actual, expected):
35
+ delta = actual_value - expected_value
36
+ deviations.append(
37
+ AxisDeviation(
38
+ axis=axis,
39
+ actual=actual_value,
40
+ expected=expected_value,
41
+ delta=delta,
42
+ )
43
+ )
44
+ return deviations
45
+
46
+
47
+ def _format_axis_details(
48
+ label: str, deviations: list[AxisDeviation], tolerance: float
49
+ ) -> str:
50
+ failures = []
51
+ for deviation in deviations:
52
+ if abs(deviation.delta) > tolerance:
53
+ failures.append(
54
+ f"{label}.{deviation.axis}: actual={deviation.actual:.4f}, "
55
+ f"expected={deviation.expected:.4f}, delta={deviation.delta:.4f}"
56
+ )
57
+ if not failures:
58
+ return f"{label} within tolerance"
59
+ return "; ".join(failures)
60
+
61
+
62
+ def _missing_details(field: str) -> str:
63
+ return f"Missing required field: {field}"
64
+
65
+
66
+ def _mesh_health_check(object_data: dict) -> VerifyCheck:
67
+ missing_fields = [
68
+ name
69
+ for name in (
70
+ "has_ngons",
71
+ "has_non_manifold",
72
+ "has_loose_vertices",
73
+ "normals_consistent",
74
+ )
75
+ if name not in object_data
76
+ ]
77
+ if missing_fields:
78
+ details = "; ".join(_missing_details(name) for name in missing_fields)
79
+ return VerifyCheck(
80
+ name="mesh_health",
81
+ passed=False,
82
+ details=details,
83
+ severity="critical",
84
+ )
85
+
86
+ has_non_manifold = bool(object_data.get("has_non_manifold"))
87
+ warnings = []
88
+ if object_data.get("has_ngons"):
89
+ warnings.append("Object has N-gons")
90
+ if object_data.get("has_loose_vertices"):
91
+ warnings.append("Loose vertices found")
92
+ if not object_data.get("normals_consistent"):
93
+ warnings.append("Inconsistent normals")
94
+ if has_non_manifold:
95
+ warnings.append("Non-manifold geometry detected")
96
+
97
+ passed = not has_non_manifold
98
+ details = "OK" if not warnings else "; ".join(warnings)
99
+ return VerifyCheck(
100
+ name="mesh_health",
101
+ passed=passed,
102
+ details=details,
103
+ severity="critical",
104
+ )
105
+
106
+
107
+ def _identity_check(object_data: dict, expected: dict) -> VerifyCheck:
108
+ scale = _get_list(object_data.get("scale"), 3)
109
+ if scale is None:
110
+ return VerifyCheck(
111
+ name="identity",
112
+ passed=False,
113
+ details=_missing_details("scale"),
114
+ severity="critical",
115
+ )
116
+
117
+ expected_collection = expected.get("collection")
118
+ actual_collection = object_data.get("collection")
119
+ if expected_collection is not None and actual_collection is None:
120
+ return VerifyCheck(
121
+ name="identity",
122
+ passed=False,
123
+ details=_missing_details("collection"),
124
+ severity="critical",
125
+ )
126
+
127
+ tolerance = 0.001
128
+ scale_ok = all(abs(value - 1.0) <= tolerance for value in scale)
129
+ collection_ok = (
130
+ expected_collection is None or actual_collection == expected_collection
131
+ )
132
+
133
+ failures = []
134
+ if not scale_ok:
135
+ deviations = _axis_deviations(scale, [1.0, 1.0, 1.0], ("x", "y", "z"))
136
+ failures.append(_format_axis_details("scale", deviations, tolerance))
137
+ if not collection_ok:
138
+ failures.append(
139
+ f"collection mismatch: actual={actual_collection!r}, expected={expected_collection!r}"
140
+ )
141
+
142
+ passed = scale_ok and collection_ok
143
+ details = "OK" if passed else "; ".join(failures)
144
+ return VerifyCheck(
145
+ name="identity",
146
+ passed=passed,
147
+ details=details,
148
+ severity="critical",
149
+ )
150
+
151
+
152
+ def _placement_check(object_data: dict, expected: dict) -> VerifyCheck:
153
+ actual_location = _get_list(object_data.get("location"), 3)
154
+ expected_location = _get_list(expected.get("location"), 3)
155
+ if actual_location is None:
156
+ return VerifyCheck(
157
+ name="placement",
158
+ passed=False,
159
+ details=_missing_details("location"),
160
+ severity="critical",
161
+ )
162
+ if expected_location is None:
163
+ return VerifyCheck(
164
+ name="placement",
165
+ passed=False,
166
+ details=_missing_details("expected.location"),
167
+ severity="critical",
168
+ )
169
+
170
+ tolerance = 0.001
171
+ deviations = _axis_deviations(actual_location, expected_location, ("x", "y", "z"))
172
+ passed = all(abs(dev.delta) <= tolerance for dev in deviations)
173
+ details = _format_axis_details("location", deviations, tolerance)
174
+ return VerifyCheck(
175
+ name="placement",
176
+ passed=passed,
177
+ details=details,
178
+ severity="critical",
179
+ )
180
+
181
+
182
+ def _neighbors_lookup(neighbors: object) -> dict[str, dict]:
183
+ if not isinstance(neighbors, list):
184
+ return {}
185
+ lookup: dict[str, dict] = {}
186
+ for neighbor in neighbors:
187
+ if isinstance(neighbor, dict) and "name" in neighbor:
188
+ lookup[str(neighbor["name"])] = neighbor
189
+ return lookup
190
+
191
+
192
+ def _connection_check(object_data: dict, expected: dict) -> VerifyCheck:
193
+ expected_neighbors = expected.get("neighbors")
194
+ if expected_neighbors is None:
195
+ return VerifyCheck(
196
+ name="connection",
197
+ passed=True,
198
+ details="No neighbor expectations",
199
+ severity="warning",
200
+ )
201
+ if not isinstance(expected_neighbors, list):
202
+ return VerifyCheck(
203
+ name="connection",
204
+ passed=False,
205
+ details=_missing_details("expected.neighbors"),
206
+ severity="warning",
207
+ )
208
+
209
+ actual_lookup = _neighbors_lookup(object_data.get("neighbors"))
210
+ tolerance = 0.002
211
+ failures: list[str] = []
212
+ for neighbor in expected_neighbors:
213
+ if not isinstance(neighbor, dict) or "name" not in neighbor:
214
+ failures.append("Invalid expected neighbor entry")
215
+ continue
216
+ name = str(neighbor.get("name"))
217
+ expected_distance = neighbor.get("min_distance")
218
+ if expected_distance is None:
219
+ failures.append(f"Missing expected min_distance for {name}")
220
+ continue
221
+ actual_neighbor = actual_lookup.get(name)
222
+ if actual_neighbor is None:
223
+ failures.append(f"Missing neighbor: {name}")
224
+ continue
225
+ actual_distance = actual_neighbor.get("closest_distance")
226
+ if actual_distance is None:
227
+ failures.append(f"Missing closest_distance for {name}")
228
+ continue
229
+ try:
230
+ actual_distance_value = float(actual_distance)
231
+ expected_distance_value = float(expected_distance)
232
+ except (TypeError, ValueError):
233
+ failures.append(f"Invalid distance data for {name}")
234
+ continue
235
+ delta = actual_distance_value - expected_distance_value
236
+ if abs(delta) > tolerance:
237
+ failures.append(
238
+ f"{name}: actual={actual_distance_value:.4f}, expected={expected_distance_value:.4f}, "
239
+ f"delta={delta:.4f}"
240
+ )
241
+
242
+ passed = not failures
243
+ details = "All connections within tolerance" if passed else "; ".join(failures)
244
+ return VerifyCheck(
245
+ name="connection",
246
+ passed=passed,
247
+ details=details,
248
+ severity="warning",
249
+ )
250
+
251
+
252
+ def _dimensions_check(object_data: dict, expected: dict) -> VerifyCheck:
253
+ actual_dimensions = _get_list(object_data.get("dimensions"), 3)
254
+ expected_dimensions = _get_list(expected.get("dimensions"), 3)
255
+ if actual_dimensions is None:
256
+ return VerifyCheck(
257
+ name="dimensions",
258
+ passed=False,
259
+ details=_missing_details("dimensions"),
260
+ severity="warning",
261
+ )
262
+ if expected_dimensions is None:
263
+ return VerifyCheck(
264
+ name="dimensions",
265
+ passed=False,
266
+ details=_missing_details("expected.dimensions"),
267
+ severity="warning",
268
+ )
269
+
270
+ tolerance = 0.005
271
+ deviations = _axis_deviations(
272
+ actual_dimensions, expected_dimensions, ("x", "y", "z")
273
+ )
274
+ passed = all(abs(dev.delta) <= tolerance for dev in deviations)
275
+ details = _format_axis_details("dimensions", deviations, tolerance)
276
+ return VerifyCheck(
277
+ name="dimensions",
278
+ passed=passed,
279
+ details=details,
280
+ severity="warning",
281
+ )
282
+
283
+
284
+ def _summarize(checks: list[VerifyCheck]) -> str:
285
+ failed = [check.name for check in checks if not check.passed]
286
+ if not failed:
287
+ return "All checks passed"
288
+ return f"{len(failed)} of {len(checks)} checks failed: {', '.join(failed)}"
289
+
290
+
291
+ def verify(object_name: str, object_data: dict, expected: dict) -> VerifyResult:
292
+ """Verify a post-build object against expected values."""
293
+ checks = [
294
+ _mesh_health_check(object_data),
295
+ _identity_check(object_data, expected),
296
+ _placement_check(object_data, expected),
297
+ _connection_check(object_data, expected),
298
+ _dimensions_check(object_data, expected),
299
+ ]
300
+ passed = all(check.passed for check in checks if check.severity == "critical")
301
+ summary = _summarize(checks)
302
+ return VerifyResult(
303
+ object_name=object_name,
304
+ passed=passed,
305
+ checks=checks,
306
+ summary=summary,
307
+ )
cortex/server.py ADDED
@@ -0,0 +1,226 @@
1
+ """Cortex MCP server — tool registration and stdio transport."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any
7
+
8
+ from mcp.server import Server
9
+ from mcp.server.stdio import stdio_server
10
+ from mcp.types import TextContent, Tool
11
+
12
+ from cortex.tools.decompose import handle_decompose
13
+ from cortex.tools.research import handle_research
14
+ from cortex.tools.validate import handle_validate
15
+ from cortex.tools.solve import handle_solve
16
+ from cortex.tools.solve_scene import handle_solve_scene
17
+ from cortex.tools.verify import handle_verify
18
+
19
+ # ---------------------------------------------------------------------------
20
+ # Server instance
21
+ # ---------------------------------------------------------------------------
22
+
23
+ server = Server("cortex")
24
+
25
+ # ---------------------------------------------------------------------------
26
+ # Tool definitions
27
+ # ---------------------------------------------------------------------------
28
+
29
+ TOOLS: list[Tool] = [
30
+ Tool(
31
+ name="decompose",
32
+ description=(
33
+ "Decompose a subject into a hierarchical part/object tree with "
34
+ "research checklists. First step in the build pipeline."
35
+ ),
36
+ inputSchema={
37
+ "type": "object",
38
+ "properties": {
39
+ "subject": {
40
+ "type": "string",
41
+ "description": "The object or scene to decompose (e.g. 'office chair', 'city block').",
42
+ },
43
+ "variant": {
44
+ "type": "string",
45
+ "description": "Optional variant (e.g. 'gaming', 'modern').",
46
+ "default": "",
47
+ },
48
+ "scope": {
49
+ "type": "string",
50
+ "enum": ["object", "scene"],
51
+ "description": "Whether to decompose as a single object or a scene of objects.",
52
+ "default": "object",
53
+ },
54
+ "detail_level": {
55
+ "type": "string",
56
+ "enum": ["basic", "detailed", "exhaustive"],
57
+ "description": "How deep to decompose.",
58
+ "default": "detailed",
59
+ },
60
+ },
61
+ "required": ["subject"],
62
+ },
63
+ ),
64
+ Tool(
65
+ name="research",
66
+ description=(
67
+ "Validate research data completeness and sanity-check dimensions. "
68
+ "Tells the LLM what data is still missing before recipe creation."
69
+ ),
70
+ inputSchema={
71
+ "type": "object",
72
+ "properties": {
73
+ "hierarchy": {
74
+ "type": "object",
75
+ "description": "Hierarchy from decompose output.",
76
+ },
77
+ "filled_data": {
78
+ "type": "object",
79
+ "description": "Partial research data: {part_name: {dimensions: {w,d,h}, ...}}.",
80
+ "default": None,
81
+ },
82
+ },
83
+ "required": ["hierarchy"],
84
+ },
85
+ ),
86
+ Tool(
87
+ name="validate",
88
+ description=(
89
+ "Validate a part recipe for structural correctness before solving. "
90
+ "Checks schema, references, cycles, and constraint validity."
91
+ ),
92
+ inputSchema={
93
+ "type": "object",
94
+ "properties": {
95
+ "recipe": {
96
+ "type": "object",
97
+ "description": "The part recipe to validate.",
98
+ },
99
+ },
100
+ "required": ["recipe"],
101
+ },
102
+ ),
103
+ Tool(
104
+ name="solve",
105
+ description=(
106
+ "Solve part positions from a validated recipe using constraint resolution. "
107
+ "Returns absolute positions, build order, and any conflicts."
108
+ ),
109
+ inputSchema={
110
+ "type": "object",
111
+ "properties": {
112
+ "recipe": {
113
+ "type": "object",
114
+ "description": "A validated part recipe.",
115
+ },
116
+ },
117
+ "required": ["recipe"],
118
+ },
119
+ ),
120
+ Tool(
121
+ name="solve_scene",
122
+ description=(
123
+ "Solve object placements in a scene from scene-level constraints. "
124
+ "Returns positions, rotations, and scales for each object."
125
+ ),
126
+ inputSchema={
127
+ "type": "object",
128
+ "properties": {
129
+ "scene_recipe": {
130
+ "type": "object",
131
+ "description": "A scene recipe with objects, zones, and constraints.",
132
+ },
133
+ },
134
+ "required": ["scene_recipe"],
135
+ },
136
+ ),
137
+ Tool(
138
+ name="verify",
139
+ description=(
140
+ "Verify a built object against expected solver output. "
141
+ "Checks mesh health, transforms, placement, connections, and dimensions."
142
+ ),
143
+ inputSchema={
144
+ "type": "object",
145
+ "properties": {
146
+ "object_name": {
147
+ "type": "string",
148
+ "description": "Name of the object to verify.",
149
+ },
150
+ "object_data": {
151
+ "type": "object",
152
+ "description": "Actual measured data from the 3D engine.",
153
+ },
154
+ "expected": {
155
+ "type": "object",
156
+ "description": "Expected values from solver output.",
157
+ },
158
+ },
159
+ "required": ["object_name", "object_data", "expected"],
160
+ },
161
+ ),
162
+ ]
163
+
164
+
165
+ # ---------------------------------------------------------------------------
166
+ # Handlers
167
+ # ---------------------------------------------------------------------------
168
+
169
+
170
+ @server.list_tools()
171
+ async def list_tools() -> list[Tool]:
172
+ return TOOLS
173
+
174
+
175
+ @server.call_tool()
176
+ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
177
+ """Route tool calls to the appropriate handler."""
178
+ handlers = {
179
+ "decompose": handle_decompose,
180
+ "research": handle_research,
181
+ "validate": handle_validate,
182
+ "solve": handle_solve,
183
+ "solve_scene": handle_solve_scene,
184
+ "verify": handle_verify,
185
+ }
186
+
187
+ handler = handlers.get(name)
188
+ if handler is None:
189
+ return [
190
+ TextContent(
191
+ type="text", text=json.dumps({"error": f"Unknown tool: {name}"})
192
+ )
193
+ ]
194
+
195
+ try:
196
+ result = handler(arguments)
197
+ return [TextContent(type="text", text=json.dumps(result, default=str))]
198
+ except Exception as exc:
199
+ return [
200
+ TextContent(
201
+ type="text",
202
+ text=json.dumps({"error": str(exc), "tool": name}),
203
+ )
204
+ ]
205
+
206
+
207
+ # ---------------------------------------------------------------------------
208
+ # Entry point
209
+ # ---------------------------------------------------------------------------
210
+
211
+
212
+ async def _run() -> None:
213
+ async with stdio_server() as (read_stream, write_stream):
214
+ await server.run(
215
+ read_stream, write_stream, server.create_initialization_options()
216
+ )
217
+
218
+
219
+ def main() -> None:
220
+ import asyncio
221
+
222
+ asyncio.run(_run())
223
+
224
+
225
+ if __name__ == "__main__":
226
+ main()
@@ -0,0 +1 @@
1
+ """MCP tool handlers for Cortex."""
@@ -0,0 +1,24 @@
1
+ """Tool handler for the ``decompose`` MCP tool."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from cortex.core.decomposer import decompose
8
+ from cortex.types import _to_dict
9
+
10
+
11
+ def handle_decompose(arguments: dict[str, Any]) -> dict[str, Any]:
12
+ """Parse input and delegate to the decomposer."""
13
+ subject: str = arguments["subject"]
14
+ variant: str = arguments.get("variant", "")
15
+ scope: str = arguments.get("scope", "object")
16
+ detail_level: str = arguments.get("detail_level", "detailed")
17
+
18
+ result = decompose(
19
+ subject=subject,
20
+ variant=variant,
21
+ scope=scope,
22
+ detail_level=detail_level,
23
+ )
24
+ return _to_dict(result)
@@ -0,0 +1,17 @@
1
+ """Tool handler for the ``research`` MCP tool."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from cortex.core.decomposer import research
8
+ from cortex.types import _to_dict
9
+
10
+
11
+ def handle_research(arguments: dict[str, Any]) -> dict[str, Any]:
12
+ """Parse input and delegate to the research checker."""
13
+ hierarchy: dict = arguments["hierarchy"]
14
+ filled_data: dict | None = arguments.get("filled_data")
15
+
16
+ result = research(hierarchy=hierarchy, filled_data=filled_data)
17
+ return _to_dict(result)
cortex/tools/solve.py ADDED
@@ -0,0 +1,16 @@
1
+ """Tool handler for the ``solve`` MCP tool."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from cortex.core.solver import solve
8
+ from cortex.tools.validate import _parse_recipe
9
+ from cortex.types import _to_dict
10
+
11
+
12
+ def handle_solve(arguments: dict[str, Any]) -> dict[str, Any]:
13
+ """Parse input and delegate to the solver."""
14
+ recipe = _parse_recipe(arguments["recipe"])
15
+ result = solve(recipe)
16
+ return _to_dict(result)
@@ -0,0 +1,57 @@
1
+ """Tool handler for the ``solve_scene`` MCP tool."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from cortex.core.scene_solver import solve_scene
8
+ from cortex.types import (
9
+ Dimensions,
10
+ PartSpec,
11
+ SceneConstraint,
12
+ SceneRecipe,
13
+ _to_dict,
14
+ )
15
+
16
+
17
+ def _parse_scene_recipe(raw: dict[str, Any]) -> SceneRecipe:
18
+ """Convert a raw JSON dict into a typed SceneRecipe."""
19
+ objects: dict[str, PartSpec] = {}
20
+ for name, spec in raw.get("objects", {}).items():
21
+ dims = spec.get("dimensions", {})
22
+ objects[name] = PartSpec(
23
+ name=name,
24
+ dimensions=Dimensions(
25
+ width=float(dims.get("w", dims.get("width", 0))),
26
+ depth=float(dims.get("d", dims.get("depth", 0))),
27
+ height=float(dims.get("h", dims.get("height", 0))),
28
+ ),
29
+ type=spec.get("type", ""),
30
+ params=spec.get("params", {}),
31
+ metadata=spec.get("metadata", {}),
32
+ )
33
+
34
+ constraints: list[SceneConstraint] = []
35
+ for c in raw.get("constraints", []):
36
+ constraints.append(
37
+ SceneConstraint(
38
+ type=c["type"],
39
+ object=c.get("object", c.get("objects", "")),
40
+ params=c.get("params", {}),
41
+ )
42
+ )
43
+
44
+ return SceneRecipe(
45
+ name=raw.get("name", ""),
46
+ bounds=raw.get("bounds", {"min": [0, 0, 0], "max": [10, 10, 10]}),
47
+ objects=objects,
48
+ zones=raw.get("zones", {}),
49
+ constraints=constraints,
50
+ )
51
+
52
+
53
+ def handle_solve_scene(arguments: dict[str, Any]) -> dict[str, Any]:
54
+ """Parse input and delegate to the scene solver."""
55
+ recipe = _parse_scene_recipe(arguments["scene_recipe"])
56
+ result = solve_scene(recipe)
57
+ return _to_dict(result)