raise-core 2.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- raise_core/__init__.py +12 -0
- raise_core/governance/__init__.py +7 -0
- raise_core/graph/__init__.py +6 -0
- raise_core/graph/backends/__init__.py +1 -0
- raise_core/graph/backends/filesystem.py +83 -0
- raise_core/graph/backends/models.py +22 -0
- raise_core/graph/backends/protocol.py +27 -0
- raise_core/graph/engine.py +236 -0
- raise_core/graph/models.py +208 -0
- raise_core/graph/query.py +496 -0
- raise_core/workflow/__init__.py +9 -0
- raise_core-2.2.1.dist-info/METADATA +9 -0
- raise_core-2.2.1.dist-info/RECORD +14 -0
- raise_core-2.2.1.dist-info/WHEEL +4 -0
raise_core/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""RaiSE Core - Shared domain models for the RaiSE framework.
|
|
2
|
+
|
|
3
|
+
rai-core is the shared domain contract between COMMUNITY (rai-cli) and PRO (rai-server).
|
|
4
|
+
It contains the vocabulary, protocols, and logic that any RaiSE component needs.
|
|
5
|
+
|
|
6
|
+
Domain axes:
|
|
7
|
+
- graph: Models, engine, query, scoring, backends (E275)
|
|
8
|
+
- workflow: Work item types, state machines, gates (future)
|
|
9
|
+
- governance: Extensible artifact type schema (future)
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
__version__ = "2.2.1"
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
"""Governance domain — extensible artifact type schema.
|
|
2
|
+
|
|
3
|
+
Placeholder for future implementation. Will contain:
|
|
4
|
+
- Extensible artifact type registry (beyond fixed CoreArtifactType)
|
|
5
|
+
- Governance vocabulary shared across CLI, server, and integrations
|
|
6
|
+
- Schema versioning and migration support
|
|
7
|
+
"""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Graph backend implementations — storage abstraction for the knowledge graph."""
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""Filesystem-based graph backend.
|
|
2
|
+
|
|
3
|
+
Built-in COMMUNITY backend — persists the knowledge graph to local JSON files.
|
|
4
|
+
Zero external dependencies beyond NetworkX (already a core dependency).
|
|
5
|
+
|
|
6
|
+
Architecture: ADR-036 (KnowledgeGraphBackend)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
import networkx as nx # type: ignore[import-untyped]
|
|
16
|
+
|
|
17
|
+
from raise_core.graph.backends.models import BackendHealth
|
|
18
|
+
from raise_core.graph.engine import Graph
|
|
19
|
+
|
|
20
|
+
__all__ = ["FilesystemGraphBackend", "get_active_backend"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class FilesystemGraphBackend:
|
|
24
|
+
"""Built-in graph backend — persists to local filesystem.
|
|
25
|
+
|
|
26
|
+
COMMUNITY backend. Zero external dependencies.
|
|
27
|
+
Registered as entry point 'local' in rai.graph.backends.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
path: Path to the graph JSON file (e.g. `.raise/rai/memory/index.json`).
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, path: Path) -> None:
|
|
34
|
+
self.path = path
|
|
35
|
+
|
|
36
|
+
def persist(self, graph: Graph) -> None:
|
|
37
|
+
"""Save graph to JSON file using NetworkX node_link_data format.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
graph: The graph to persist.
|
|
41
|
+
"""
|
|
42
|
+
data: dict[str, Any] = nx.node_link_data(graph.graph) # type: ignore[assignment]
|
|
43
|
+
self.path.parent.mkdir(parents=True, exist_ok=True)
|
|
44
|
+
self.path.write_text(json.dumps(data, indent=2, default=str), encoding="utf-8")
|
|
45
|
+
|
|
46
|
+
def load(self) -> Graph:
|
|
47
|
+
"""Load graph from the configured path.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
Graph instance with loaded data.
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
FileNotFoundError: If the file doesn't exist.
|
|
54
|
+
json.JSONDecodeError: If the file is not valid JSON.
|
|
55
|
+
"""
|
|
56
|
+
loaded_data: dict[str, Any] = json.loads(
|
|
57
|
+
self.path.read_text(encoding="utf-8")
|
|
58
|
+
)
|
|
59
|
+
instance = Graph()
|
|
60
|
+
instance.graph = nx.node_link_graph(
|
|
61
|
+
loaded_data, directed=True, multigraph=True
|
|
62
|
+
)
|
|
63
|
+
return instance
|
|
64
|
+
|
|
65
|
+
def health(self) -> BackendHealth:
|
|
66
|
+
"""Check backend health. Filesystem is always available."""
|
|
67
|
+
return BackendHealth(
|
|
68
|
+
status="healthy",
|
|
69
|
+
message="Filesystem backend operational",
|
|
70
|
+
metadata={"backend": "filesystem"},
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_active_backend(path: Path) -> FilesystemGraphBackend:
|
|
75
|
+
"""Resolve the active graph backend for the given path.
|
|
76
|
+
|
|
77
|
+
Returns FilesystemGraphBackend (COMMUNITY). Future: tier-based
|
|
78
|
+
selection via env vars (DualWriteBackend when RAI_SERVER_URL set).
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
path: Path to the graph JSON file.
|
|
82
|
+
"""
|
|
83
|
+
return FilesystemGraphBackend(path=path)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""Pydantic models for graph backend boundaries.
|
|
2
|
+
|
|
3
|
+
Architecture: ADR-036 (KnowledgeGraphBackend)
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BackendHealth(BaseModel):
|
|
14
|
+
"""Health check result for a graph backend."""
|
|
15
|
+
|
|
16
|
+
status: str = Field(
|
|
17
|
+
..., description="'healthy', 'degraded', or 'unavailable'"
|
|
18
|
+
)
|
|
19
|
+
message: str = Field(default="", description="Human-readable status detail")
|
|
20
|
+
metadata: dict[str, Any] = Field(
|
|
21
|
+
default_factory=dict, description="Backend-specific diagnostics"
|
|
22
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Protocol contract for graph backend implementations.
|
|
2
|
+
|
|
3
|
+
Architecture: ADR-036 (Graph Backend)
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import TYPE_CHECKING, Protocol, runtime_checkable
|
|
9
|
+
|
|
10
|
+
from raise_core.graph.backends.models import BackendHealth
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from raise_core.graph.engine import Graph
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@runtime_checkable
|
|
17
|
+
class KnowledgeGraphBackend(Protocol):
|
|
18
|
+
"""ADR-036: Graph storage abstraction.
|
|
19
|
+
|
|
20
|
+
Implementations: FilesystemGraphBackend (built-in), ApiGraphBackend (PRO).
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def persist(self, graph: Graph) -> None: ...
|
|
24
|
+
|
|
25
|
+
def load(self) -> Graph: ...
|
|
26
|
+
|
|
27
|
+
def health(self) -> BackendHealth: ...
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""Knowledge graph engine.
|
|
2
|
+
|
|
3
|
+
Wraps NetworkX MultiDiGraph for storing and querying cross-domain concepts.
|
|
4
|
+
This is a pure in-memory graph; persistence is handled by
|
|
5
|
+
KnowledgeGraphBackend implementations (ADR-036).
|
|
6
|
+
|
|
7
|
+
Architecture: ADR-019 Unified Context Graph Architecture
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from collections.abc import Iterator
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
import networkx as nx # type: ignore[import-untyped]
|
|
17
|
+
|
|
18
|
+
from raise_core.graph.models import (
|
|
19
|
+
EdgeType,
|
|
20
|
+
GraphEdge,
|
|
21
|
+
GraphNode,
|
|
22
|
+
NodeType,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Graph:
|
|
29
|
+
"""NetworkX-based knowledge graph.
|
|
30
|
+
|
|
31
|
+
Wraps a NetworkX MultiDiGraph to provide typed operations for adding,
|
|
32
|
+
retrieving, and persisting concepts and relationships.
|
|
33
|
+
|
|
34
|
+
Attributes:
|
|
35
|
+
graph: The underlying NetworkX MultiDiGraph.
|
|
36
|
+
|
|
37
|
+
Examples:
|
|
38
|
+
>>> g = Graph()
|
|
39
|
+
>>> node = GraphNode(
|
|
40
|
+
... id="PAT-001",
|
|
41
|
+
... type="pattern",
|
|
42
|
+
... content="Test pattern",
|
|
43
|
+
... created="2026-02-03"
|
|
44
|
+
... )
|
|
45
|
+
>>> g.add_concept(node)
|
|
46
|
+
>>> g.node_count
|
|
47
|
+
1
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(self) -> None:
|
|
51
|
+
"""Initialize an empty graph."""
|
|
52
|
+
self.graph: nx.MultiDiGraph[str] = nx.MultiDiGraph()
|
|
53
|
+
|
|
54
|
+
def _reconstruct_node(self, node_id: str, data: dict[str, Any]) -> GraphNode:
|
|
55
|
+
"""Reconstruct a typed GraphNode from serialized dict."""
|
|
56
|
+
data["id"] = node_id
|
|
57
|
+
node_type = data.get("type", "")
|
|
58
|
+
cls = GraphNode.registered_types().get(node_type)
|
|
59
|
+
if cls:
|
|
60
|
+
return cls.model_validate(data)
|
|
61
|
+
if node_type:
|
|
62
|
+
logger.warning(
|
|
63
|
+
"Node type '%s' not registered (missing plugin?). "
|
|
64
|
+
"Run 'rai memory build' to regenerate graph.",
|
|
65
|
+
node_type,
|
|
66
|
+
)
|
|
67
|
+
return GraphNode.model_validate(data)
|
|
68
|
+
|
|
69
|
+
def add_concept(self, node: GraphNode) -> None:
|
|
70
|
+
"""Add a concept node to the graph.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
node: The concept node to add.
|
|
74
|
+
"""
|
|
75
|
+
self.graph.add_node(node.id, **node.model_dump())
|
|
76
|
+
|
|
77
|
+
def add_relationship(self, edge: GraphEdge) -> None:
|
|
78
|
+
"""Add a relationship edge to the graph.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
edge: The concept edge to add.
|
|
82
|
+
"""
|
|
83
|
+
self.graph.add_edge(
|
|
84
|
+
edge.source,
|
|
85
|
+
edge.target,
|
|
86
|
+
type=edge.type,
|
|
87
|
+
weight=edge.weight,
|
|
88
|
+
**edge.metadata,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def get_concept(self, concept_id: str) -> GraphNode | None:
|
|
92
|
+
"""Get a concept by ID.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
concept_id: The unique concept identifier.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
The GraphNode if found, None otherwise.
|
|
99
|
+
"""
|
|
100
|
+
if concept_id not in self.graph.nodes:
|
|
101
|
+
return None
|
|
102
|
+
data = dict(self.graph.nodes[concept_id])
|
|
103
|
+
return self._reconstruct_node(concept_id, data)
|
|
104
|
+
|
|
105
|
+
def get_concepts_by_type(self, node_type: NodeType) -> list[GraphNode]:
|
|
106
|
+
"""Get all concepts of a specific type.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
node_type: The node type to filter by.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
List of GraphNode instances matching the type.
|
|
113
|
+
"""
|
|
114
|
+
concepts: list[GraphNode] = []
|
|
115
|
+
node_id: str
|
|
116
|
+
for node_id in self.graph.nodes:
|
|
117
|
+
data: dict[str, Any] = dict(self.graph.nodes[node_id])
|
|
118
|
+
if data.get("type") == node_type:
|
|
119
|
+
concepts.append(self._reconstruct_node(node_id, data))
|
|
120
|
+
return concepts
|
|
121
|
+
|
|
122
|
+
def get_neighbors(
|
|
123
|
+
self,
|
|
124
|
+
concept_id: str,
|
|
125
|
+
depth: int = 1,
|
|
126
|
+
edge_types: list[EdgeType] | None = None,
|
|
127
|
+
) -> list[GraphNode]:
|
|
128
|
+
"""Get neighboring concepts via BFS traversal.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
concept_id: Starting concept ID.
|
|
132
|
+
depth: Maximum traversal depth (default 1).
|
|
133
|
+
edge_types: Optional filter for edge types.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
List of neighboring GraphNode instances.
|
|
137
|
+
"""
|
|
138
|
+
if concept_id not in self.graph.nodes:
|
|
139
|
+
return []
|
|
140
|
+
|
|
141
|
+
visited: set[str] = {concept_id}
|
|
142
|
+
current_level: set[str] = {concept_id}
|
|
143
|
+
neighbors: list[GraphNode] = []
|
|
144
|
+
|
|
145
|
+
for _ in range(depth):
|
|
146
|
+
next_level: set[str] = set()
|
|
147
|
+
for nid in current_level:
|
|
148
|
+
# Get outgoing edges
|
|
149
|
+
out_edge: tuple[str, str, dict[str, Any]]
|
|
150
|
+
for out_edge in self.graph.out_edges(nid, data=True):
|
|
151
|
+
target: str = out_edge[1]
|
|
152
|
+
edge_data: dict[str, Any] = out_edge[2]
|
|
153
|
+
edge_matches = (
|
|
154
|
+
edge_types is None or edge_data.get("type") in edge_types
|
|
155
|
+
)
|
|
156
|
+
if edge_matches and target not in visited:
|
|
157
|
+
visited.add(target)
|
|
158
|
+
next_level.add(target)
|
|
159
|
+
# Get incoming edges
|
|
160
|
+
in_edge: tuple[str, str, dict[str, Any]]
|
|
161
|
+
for in_edge in self.graph.in_edges(nid, data=True):
|
|
162
|
+
source: str = in_edge[0]
|
|
163
|
+
edge_data = in_edge[2]
|
|
164
|
+
edge_matches = (
|
|
165
|
+
edge_types is None or edge_data.get("type") in edge_types
|
|
166
|
+
)
|
|
167
|
+
if edge_matches and source not in visited:
|
|
168
|
+
visited.add(source)
|
|
169
|
+
next_level.add(source)
|
|
170
|
+
current_level = next_level
|
|
171
|
+
|
|
172
|
+
# Convert to GraphNode instances
|
|
173
|
+
node_id: str
|
|
174
|
+
for node_id in visited:
|
|
175
|
+
if node_id != concept_id:
|
|
176
|
+
concept = self.get_concept(node_id)
|
|
177
|
+
if concept:
|
|
178
|
+
neighbors.append(concept)
|
|
179
|
+
|
|
180
|
+
return neighbors
|
|
181
|
+
|
|
182
|
+
def iter_concepts(self) -> Iterator[GraphNode]:
|
|
183
|
+
"""Iterate over all concepts in the graph.
|
|
184
|
+
|
|
185
|
+
Skips nodes that fail deserialization (e.g. schema drift from a removed
|
|
186
|
+
plugin) and emits a warning instead of crashing. See RAISE-136.
|
|
187
|
+
|
|
188
|
+
Yields:
|
|
189
|
+
GraphNode instances for each node.
|
|
190
|
+
"""
|
|
191
|
+
node_id: str
|
|
192
|
+
for node_id in self.graph.nodes:
|
|
193
|
+
data: dict[str, Any] = dict(self.graph.nodes[node_id])
|
|
194
|
+
try:
|
|
195
|
+
yield self._reconstruct_node(node_id, data)
|
|
196
|
+
except Exception as e:
|
|
197
|
+
logger.warning(
|
|
198
|
+
"Skipping node '%s' (type=%s): %s",
|
|
199
|
+
node_id,
|
|
200
|
+
data.get("type", "unknown"),
|
|
201
|
+
e,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def iter_relationships(self) -> Iterator[GraphEdge]:
|
|
205
|
+
"""Iterate over all relationships in the graph.
|
|
206
|
+
|
|
207
|
+
Yields:
|
|
208
|
+
GraphEdge instances for each edge.
|
|
209
|
+
"""
|
|
210
|
+
edge_tuple: tuple[str, str, dict[str, Any]]
|
|
211
|
+
for edge_tuple in self.graph.edges(data=True):
|
|
212
|
+
source: str = edge_tuple[0]
|
|
213
|
+
target: str = edge_tuple[1]
|
|
214
|
+
data: dict[str, Any] = edge_tuple[2]
|
|
215
|
+
edge_type: str = data.get("type", "related_to")
|
|
216
|
+
weight: float = float(data.get("weight", 1.0))
|
|
217
|
+
metadata: dict[str, Any] = {
|
|
218
|
+
k: v for k, v in data.items() if k not in ("type", "weight")
|
|
219
|
+
}
|
|
220
|
+
yield GraphEdge(
|
|
221
|
+
source=source,
|
|
222
|
+
target=target,
|
|
223
|
+
type=edge_type,
|
|
224
|
+
weight=weight,
|
|
225
|
+
metadata=metadata,
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
@property
|
|
229
|
+
def node_count(self) -> int:
|
|
230
|
+
"""Get the number of nodes in the graph."""
|
|
231
|
+
return self.graph.number_of_nodes()
|
|
232
|
+
|
|
233
|
+
@property
|
|
234
|
+
def edge_count(self) -> int:
|
|
235
|
+
"""Get the number of edges in the graph."""
|
|
236
|
+
return self.graph.number_of_edges()
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""Pydantic models for the knowledge graph.
|
|
2
|
+
|
|
3
|
+
Core data structures: nodes, edges, type systems. All knowledge in RaiSE
|
|
4
|
+
(patterns, governance, discovery, sessions) is represented as typed nodes
|
|
5
|
+
and directed edges in a queryable graph.
|
|
6
|
+
|
|
7
|
+
Architecture: ADR-019 Unified Context Graph Architecture
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from typing import Any, ClassVar
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, Field, model_validator
|
|
15
|
+
|
|
16
|
+
# --- Node type system (open for plugins) ---
|
|
17
|
+
NodeType = str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class GraphNode(BaseModel):
|
|
21
|
+
"""Base class for all knowledge graph nodes. Auto-registers subclasses.
|
|
22
|
+
|
|
23
|
+
Pattern: pytest Node + Airflow BaseOperator + Kedro AbstractDataset.
|
|
24
|
+
Subclasses define node_type and optionally add typed fields.
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
>>> class JiraSprintNode(GraphNode, node_type="jira.sprint"):
|
|
28
|
+
... sprint_id: str = ""
|
|
29
|
+
>>> node = JiraSprintNode(id="S1", content="Sprint 1", created="2026-01-01")
|
|
30
|
+
>>> node.type
|
|
31
|
+
'jira.sprint'
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
_registry: ClassVar[dict[str, type[GraphNode]]] = {}
|
|
35
|
+
|
|
36
|
+
id: str = Field(..., description="Unique identifier (e.g., 'PAT-001', '§2')")
|
|
37
|
+
type: str = Field(default="", description="Node type (auto-set by subclass)")
|
|
38
|
+
content: str = Field(..., description="Main text content or description")
|
|
39
|
+
source_file: str | None = Field(default=None, description="Path to source file")
|
|
40
|
+
created: str = Field(..., description="ISO timestamp when created")
|
|
41
|
+
metadata: dict[str, Any] = Field(
|
|
42
|
+
default_factory=dict, description="Type-specific attributes"
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
def __init_subclass__(cls, node_type: str | None = None, **kwargs: Any) -> None:
|
|
46
|
+
super().__init_subclass__(**kwargs)
|
|
47
|
+
if node_type is not None:
|
|
48
|
+
cls.__node_type__ = node_type # type: ignore[attr-defined]
|
|
49
|
+
GraphNode._registry[node_type] = cls
|
|
50
|
+
|
|
51
|
+
@model_validator(mode="before")
|
|
52
|
+
@classmethod
|
|
53
|
+
def _set_default_type(cls, data: dict[str, Any]) -> dict[str, Any]:
|
|
54
|
+
"""Auto-set type field from subclass registration."""
|
|
55
|
+
if hasattr(cls, "__node_type__"):
|
|
56
|
+
node_type: str = cls.__node_type__ # type: ignore[attr-defined]
|
|
57
|
+
data.setdefault("type", node_type)
|
|
58
|
+
return data
|
|
59
|
+
|
|
60
|
+
@classmethod
|
|
61
|
+
def resolve(cls, node_type: str) -> type[GraphNode]:
|
|
62
|
+
"""Resolve a node_type string to its registered class."""
|
|
63
|
+
return cls._registry[node_type]
|
|
64
|
+
|
|
65
|
+
@classmethod
|
|
66
|
+
def registered_types(cls) -> dict[str, type[GraphNode]]:
|
|
67
|
+
"""All registered node type mappings."""
|
|
68
|
+
return dict(cls._registry)
|
|
69
|
+
|
|
70
|
+
@property
|
|
71
|
+
def token_estimate(self) -> int:
|
|
72
|
+
"""Estimate tokens for this concept.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Estimated token count (content length // 4).
|
|
76
|
+
"""
|
|
77
|
+
return len(self.content) // 4
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# --- Core node types (18) — documented extension points ---
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class PatternNode(GraphNode, node_type="pattern"):
|
|
84
|
+
"""Learned patterns from memory. Extension: confidence scores, decay metadata."""
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class CalibrationNode(GraphNode, node_type="calibration"):
|
|
88
|
+
"""Velocity/estimation data. Extension: per-team calibration fields."""
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class SessionNode(GraphNode, node_type="session"):
|
|
92
|
+
"""Session history records. Extension: agent-specific session data."""
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class PrincipleNode(GraphNode, node_type="principle"):
|
|
96
|
+
"""Constitution principles. Extension: org-level principle overrides."""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class RequirementNode(GraphNode, node_type="requirement"):
|
|
100
|
+
"""PRD requirements. Extension: priority, stakeholder fields."""
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class OutcomeNode(GraphNode, node_type="outcome"):
|
|
104
|
+
"""Vision outcomes. Extension: OKR linkage fields."""
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class ProjectNode(GraphNode, node_type="project"):
|
|
108
|
+
"""Project definitions. Extension: multi-repo project metadata."""
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class EpicNode(GraphNode, node_type="epic"):
|
|
112
|
+
"""Epic scopes. Extension: Jira epic fields (key, board, sprint)."""
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class StoryNode(GraphNode, node_type="story"):
|
|
116
|
+
"""Story work items. Extension: PM tool fields (assignee, status)."""
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class SkillNode(GraphNode, node_type="skill"):
|
|
120
|
+
"""Skill metadata. Extension: registry, versioning, ownership."""
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class DecisionNode(GraphNode, node_type="decision"):
|
|
124
|
+
"""Architecture decisions. Extension: review status, superseded-by."""
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class GuardrailNode(GraphNode, node_type="guardrail"):
|
|
128
|
+
"""Code standards. Extension: enforcement level, exceptions."""
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class TermNode(GraphNode, node_type="term"):
|
|
132
|
+
"""Glossary definitions. Extension: translations, domain scope."""
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
class ComponentNode(GraphNode, node_type="component"):
|
|
136
|
+
"""Discovered code components. Extension: language-specific metadata."""
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class ModuleNode(GraphNode, node_type="module"):
|
|
140
|
+
"""Architecture module knowledge. Extension: dependency metrics."""
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class ArchitectureNode(GraphNode, node_type="architecture"):
|
|
144
|
+
"""Architecture docs. Extension: diagram links, review dates."""
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class BoundedContextNode(GraphNode, node_type="bounded_context"):
|
|
148
|
+
"""DDD bounded contexts. Extension: team ownership, API surface."""
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class LayerNode(GraphNode, node_type="layer"):
|
|
152
|
+
"""Architectural layers. Extension: deployment mapping."""
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class ReleaseNode(GraphNode, node_type="release"):
|
|
156
|
+
"""Release milestones. Extension: changelog, artifact URLs."""
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class ArtifactNode(GraphNode, node_type="artifact"):
|
|
160
|
+
"""Work artifacts (scope, design, plan docs). Extension: versioning, approval status."""
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# --- Edge type system (open for plugins, flat — no hierarchy needed) ---
|
|
164
|
+
EdgeType = str
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class CoreEdgeTypes:
|
|
168
|
+
"""Constants for the 11 core edge types."""
|
|
169
|
+
|
|
170
|
+
LEARNED_FROM = "learned_from"
|
|
171
|
+
GOVERNED_BY = "governed_by"
|
|
172
|
+
APPLIES_TO = "applies_to"
|
|
173
|
+
NEEDS_CONTEXT = "needs_context"
|
|
174
|
+
IMPLEMENTS = "implements"
|
|
175
|
+
PART_OF = "part_of"
|
|
176
|
+
RELATED_TO = "related_to"
|
|
177
|
+
DEPENDS_ON = "depends_on"
|
|
178
|
+
BELONGS_TO = "belongs_to"
|
|
179
|
+
IN_LAYER = "in_layer"
|
|
180
|
+
CONSTRAINED_BY = "constrained_by"
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
class GraphEdge(BaseModel):
|
|
184
|
+
"""An edge in the knowledge graph. Open type system.
|
|
185
|
+
|
|
186
|
+
Represents a directed relationship between two concepts.
|
|
187
|
+
|
|
188
|
+
Examples:
|
|
189
|
+
>>> edge = GraphEdge(
|
|
190
|
+
... source="PAT-001",
|
|
191
|
+
... target="SES-015",
|
|
192
|
+
... type="learned_from",
|
|
193
|
+
... weight=1.0,
|
|
194
|
+
... metadata={"confidence": 0.9}
|
|
195
|
+
... )
|
|
196
|
+
>>> edge.source
|
|
197
|
+
'PAT-001'
|
|
198
|
+
>>> edge.type
|
|
199
|
+
'learned_from'
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
source: str = Field(..., description="Source node ID")
|
|
203
|
+
target: str = Field(..., description="Target node ID")
|
|
204
|
+
type: EdgeType = Field(..., description="Relationship type")
|
|
205
|
+
weight: float = Field(default=1.0, description="Edge weight for ranking")
|
|
206
|
+
metadata: dict[str, Any] = Field(
|
|
207
|
+
default_factory=dict, description="Additional relationship attributes"
|
|
208
|
+
)
|
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
"""Knowledge graph query engine.
|
|
2
|
+
|
|
3
|
+
Provides query capabilities for the knowledge graph, enabling skills to
|
|
4
|
+
retrieve relevant patterns, calibration data, governance principles,
|
|
5
|
+
and work items.
|
|
6
|
+
|
|
7
|
+
Architecture: ADR-019 Unified Context Graph Architecture
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import time
|
|
13
|
+
from datetime import date
|
|
14
|
+
from enum import StrEnum
|
|
15
|
+
from math import exp, log, sqrt
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel, Field
|
|
19
|
+
|
|
20
|
+
from raise_core.graph.engine import Graph
|
|
21
|
+
from raise_core.graph.models import EdgeType, GraphNode, NodeType
|
|
22
|
+
|
|
23
|
+
# --- Scoring constants ---
|
|
24
|
+
SCORING_HALF_LIFE_DAYS: int = 30
|
|
25
|
+
SCORING_W_RECENCY: float = 0.3
|
|
26
|
+
SCORING_W_RELEVANCE: float = 0.7
|
|
27
|
+
SCORING_WILSON_Z: float = 1.96
|
|
28
|
+
SCORING_LOW_WILSON_THRESHOLD: float = 0.15
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class QueryStrategy(StrEnum):
|
|
32
|
+
"""Query strategy for context retrieval.
|
|
33
|
+
|
|
34
|
+
Attributes:
|
|
35
|
+
KEYWORD_SEARCH: Match keywords in node content, return top N by relevance.
|
|
36
|
+
CONCEPT_LOOKUP: Direct concept ID lookup with optional BFS neighbors.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
KEYWORD_SEARCH = "keyword_search"
|
|
40
|
+
CONCEPT_LOOKUP = "concept_lookup"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class Query(BaseModel):
|
|
44
|
+
"""Query parameters for context retrieval.
|
|
45
|
+
|
|
46
|
+
Attributes:
|
|
47
|
+
query: Query string (keywords or concept ID).
|
|
48
|
+
strategy: Query execution strategy.
|
|
49
|
+
max_depth: Maximum BFS traversal depth (0-5).
|
|
50
|
+
types: Optional filter for node types.
|
|
51
|
+
limit: Maximum number of results.
|
|
52
|
+
|
|
53
|
+
Examples:
|
|
54
|
+
>>> query = Query(query="planning estimation")
|
|
55
|
+
>>> query = Query(
|
|
56
|
+
... query="PAT-001",
|
|
57
|
+
... strategy=QueryStrategy.CONCEPT_LOOKUP,
|
|
58
|
+
... max_depth=2,
|
|
59
|
+
... )
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
query: str = Field(..., description="Query string (keywords or concept ID)")
|
|
63
|
+
strategy: QueryStrategy = Field(
|
|
64
|
+
default=QueryStrategy.KEYWORD_SEARCH,
|
|
65
|
+
description="Query execution strategy",
|
|
66
|
+
)
|
|
67
|
+
max_depth: int = Field(
|
|
68
|
+
default=1,
|
|
69
|
+
ge=0,
|
|
70
|
+
le=5,
|
|
71
|
+
description="Maximum BFS traversal depth",
|
|
72
|
+
)
|
|
73
|
+
types: list[NodeType] | None = Field(
|
|
74
|
+
default=None,
|
|
75
|
+
description="Filter by node types",
|
|
76
|
+
)
|
|
77
|
+
edge_types: list[EdgeType] | None = Field(
|
|
78
|
+
default=None,
|
|
79
|
+
description="Filter by edge types (concept_lookup only)",
|
|
80
|
+
)
|
|
81
|
+
limit: int = Field(
|
|
82
|
+
default=10,
|
|
83
|
+
ge=1,
|
|
84
|
+
le=50,
|
|
85
|
+
description="Maximum number of results",
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class ArchitecturalContext(BaseModel):
|
|
90
|
+
"""Full architectural context for a module.
|
|
91
|
+
|
|
92
|
+
Combines domain (bounded context), layer, constraints (guardrails),
|
|
93
|
+
and dependencies into a single structured result.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
module: GraphNode
|
|
97
|
+
domain: GraphNode | None = None
|
|
98
|
+
layer: GraphNode | None = None
|
|
99
|
+
constraints: list[GraphNode] = Field(default_factory=lambda: [])
|
|
100
|
+
dependencies: list[GraphNode] = Field(default_factory=lambda: [])
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class QueryMetadata(BaseModel):
|
|
104
|
+
"""Metadata about query result.
|
|
105
|
+
|
|
106
|
+
Attributes:
|
|
107
|
+
query: Original query string.
|
|
108
|
+
strategy: Strategy used for execution.
|
|
109
|
+
total_concepts: Number of concepts in result.
|
|
110
|
+
total_available: Total matching concepts before limit applied.
|
|
111
|
+
token_estimate: Estimated token count for result.
|
|
112
|
+
execution_time_ms: Query execution time in milliseconds.
|
|
113
|
+
types_found: Count of concepts by type.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
query: str = Field(..., description="Original query string")
|
|
117
|
+
strategy: QueryStrategy = Field(..., description="Strategy used")
|
|
118
|
+
total_concepts: int = Field(..., description="Number of concepts returned")
|
|
119
|
+
total_available: int = Field(
|
|
120
|
+
0, description="Total matching concepts before limit applied"
|
|
121
|
+
)
|
|
122
|
+
token_estimate: int = Field(..., description="Estimated token count")
|
|
123
|
+
execution_time_ms: float = Field(..., description="Execution time in ms")
|
|
124
|
+
types_found: dict[str, int] = Field(
|
|
125
|
+
default_factory=dict,
|
|
126
|
+
description="Count of concepts by type",
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class QueryResult(BaseModel):
|
|
131
|
+
"""Result of context query.
|
|
132
|
+
|
|
133
|
+
Attributes:
|
|
134
|
+
concepts: Concepts matching the query.
|
|
135
|
+
metadata: Query result metadata.
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
concepts: list[GraphNode] = Field(
|
|
139
|
+
default_factory=lambda: [],
|
|
140
|
+
description="Concepts matching the query",
|
|
141
|
+
)
|
|
142
|
+
metadata: QueryMetadata = Field(..., description="Query metadata")
|
|
143
|
+
|
|
144
|
+
def to_json(self) -> str:
|
|
145
|
+
"""Serialize result to JSON."""
|
|
146
|
+
return self.model_dump_json(indent=2)
|
|
147
|
+
|
|
148
|
+
@classmethod
|
|
149
|
+
def from_json(cls, json_str: str) -> QueryResult:
|
|
150
|
+
"""Deserialize result from JSON."""
|
|
151
|
+
return cls.model_validate_json(json_str)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def estimate_tokens(text: str) -> int:
|
|
155
|
+
"""Estimate token count for text.
|
|
156
|
+
|
|
157
|
+
Uses heuristic: character count // 4 (roughly 4 chars per token).
|
|
158
|
+
"""
|
|
159
|
+
return len(text) // 4
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def wilson_lower_bound(
|
|
163
|
+
positives: int,
|
|
164
|
+
negatives: int,
|
|
165
|
+
z: float = SCORING_WILSON_Z,
|
|
166
|
+
) -> float:
|
|
167
|
+
"""Compute Wilson score lower bound for binary ratings.
|
|
168
|
+
|
|
169
|
+
Proven at Reddit/Yelp/Amazon scale. Conservative with small sample sizes —
|
|
170
|
+
the correct approach for patterns with few evaluations.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
positives: Number of positive evaluations.
|
|
174
|
+
negatives: Number of negative evaluations.
|
|
175
|
+
z: Z-score for confidence level (default: 1.96 = 95%).
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Wilson lower bound in [0, 1].
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
ValueError: If total observations is 0.
|
|
182
|
+
"""
|
|
183
|
+
n = positives + negatives
|
|
184
|
+
if n == 0:
|
|
185
|
+
raise ValueError("Cannot compute Wilson lower bound with 0 observations")
|
|
186
|
+
p_hat = positives / n
|
|
187
|
+
z2 = z * z
|
|
188
|
+
numerator = (
|
|
189
|
+
p_hat + z2 / (2 * n) - z * sqrt((p_hat * (1 - p_hat) + z2 / (4 * n)) / n)
|
|
190
|
+
)
|
|
191
|
+
denominator = 1 + z2 / n
|
|
192
|
+
return numerator / denominator
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def calculate_relevance_score(
|
|
196
|
+
content: str,
|
|
197
|
+
keywords: list[str],
|
|
198
|
+
created: str,
|
|
199
|
+
metadata: dict[str, Any] | None = None,
|
|
200
|
+
) -> float:
|
|
201
|
+
"""Calculate composite relevance score for a concept.
|
|
202
|
+
|
|
203
|
+
Foundational patterns (foundational=True or base=True) are exempt from
|
|
204
|
+
temporal decay and score on keyword relevance only. All other patterns use:
|
|
205
|
+
score = (w_r * recency + w_k * keyword_relevance) * wilson_modifier
|
|
206
|
+
|
|
207
|
+
Where recency uses half-life exponential decay (H=30d) and wilson_modifier
|
|
208
|
+
is the Wilson score lower bound of positive evaluations.
|
|
209
|
+
"""
|
|
210
|
+
if metadata is None:
|
|
211
|
+
metadata = {}
|
|
212
|
+
|
|
213
|
+
content_lower = content.lower()
|
|
214
|
+
|
|
215
|
+
# Normalized keyword relevance
|
|
216
|
+
if keywords:
|
|
217
|
+
hits = sum(1 for kw in keywords if kw.lower() in content_lower)
|
|
218
|
+
relevance = hits / len(keywords)
|
|
219
|
+
else:
|
|
220
|
+
relevance = 0.0
|
|
221
|
+
|
|
222
|
+
# Foundational patterns: exempt from decay — check both field names (PAT-E-153)
|
|
223
|
+
if metadata.get("foundational") or metadata.get("base"):
|
|
224
|
+
return round(relevance, 4)
|
|
225
|
+
|
|
226
|
+
# Recency: half-life exponential decay
|
|
227
|
+
try:
|
|
228
|
+
created_date = date.fromisoformat(created[:10])
|
|
229
|
+
age_days = max(0, (date.today() - created_date).days)
|
|
230
|
+
except (ValueError, IndexError):
|
|
231
|
+
age_days = 0
|
|
232
|
+
recency = exp(-log(2) / SCORING_HALF_LIFE_DAYS * age_days)
|
|
233
|
+
|
|
234
|
+
base = SCORING_W_RECENCY * recency + SCORING_W_RELEVANCE * relevance
|
|
235
|
+
|
|
236
|
+
# Wilson validation modifier
|
|
237
|
+
evaluations = metadata.get("evaluations") or 0
|
|
238
|
+
if not evaluations:
|
|
239
|
+
return round(base, 4)
|
|
240
|
+
|
|
241
|
+
positives = metadata.get("positives") or 0
|
|
242
|
+
negatives = metadata.get("negatives") or 0
|
|
243
|
+
if positives + negatives == 0:
|
|
244
|
+
return round(base, 4) # defensive guard for data inconsistency
|
|
245
|
+
|
|
246
|
+
modifier = wilson_lower_bound(positives, negatives)
|
|
247
|
+
return round(base * modifier, 4)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class QueryEngine:
|
|
251
|
+
"""Query engine for the knowledge graph.
|
|
252
|
+
|
|
253
|
+
Provides keyword search and concept lookup capabilities for
|
|
254
|
+
retrieving relevant context from the graph.
|
|
255
|
+
|
|
256
|
+
Attributes:
|
|
257
|
+
graph: The knowledge graph to query.
|
|
258
|
+
|
|
259
|
+
Examples:
|
|
260
|
+
>>> engine = QueryEngine(graph)
|
|
261
|
+
>>> result = engine.query(Query(query="planning estimation"))
|
|
262
|
+
>>> print(f"Found {len(result.concepts)} concepts")
|
|
263
|
+
"""
|
|
264
|
+
|
|
265
|
+
def __init__(self, graph: Graph) -> None:
|
|
266
|
+
"""Initialize query engine with graph.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
graph: Knowledge graph to query.
|
|
270
|
+
"""
|
|
271
|
+
self.graph = graph
|
|
272
|
+
|
|
273
|
+
def query(self, query: Query) -> QueryResult:
|
|
274
|
+
"""Execute query and return results."""
|
|
275
|
+
start_time = time.time()
|
|
276
|
+
|
|
277
|
+
# Execute strategy
|
|
278
|
+
if query.strategy == QueryStrategy.KEYWORD_SEARCH:
|
|
279
|
+
concepts, total_available = self._keyword_search(query)
|
|
280
|
+
else: # CONCEPT_LOOKUP
|
|
281
|
+
concepts, total_available = self._concept_lookup(query)
|
|
282
|
+
|
|
283
|
+
execution_time_ms = (time.time() - start_time) * 1000
|
|
284
|
+
|
|
285
|
+
# Calculate metadata
|
|
286
|
+
metadata = self._calculate_metadata(
|
|
287
|
+
query, concepts, execution_time_ms, total_available
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
return QueryResult(concepts=concepts, metadata=metadata)
|
|
291
|
+
|
|
292
|
+
def _keyword_search(
|
|
293
|
+
self, query: Query
|
|
294
|
+
) -> tuple[list[GraphNode], int]:
|
|
295
|
+
"""Execute keyword search strategy.
|
|
296
|
+
|
|
297
|
+
Matches keywords against node content, returns top N by relevance.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
query: Query parameters.
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
Tuple of (matching concepts sorted by relevance, total matches before limit).
|
|
304
|
+
"""
|
|
305
|
+
keywords = query.query.lower().split()
|
|
306
|
+
if not keywords:
|
|
307
|
+
return [], 0
|
|
308
|
+
|
|
309
|
+
scored_concepts: list[tuple[float, GraphNode]] = []
|
|
310
|
+
|
|
311
|
+
for concept in self.graph.iter_concepts():
|
|
312
|
+
# Apply type filter
|
|
313
|
+
if query.types and concept.type not in query.types:
|
|
314
|
+
continue
|
|
315
|
+
|
|
316
|
+
# Check if any keyword matches (include node type in searchable text)
|
|
317
|
+
searchable = f"{concept.type} {concept.content}".lower()
|
|
318
|
+
if not any(kw in searchable for kw in keywords):
|
|
319
|
+
continue
|
|
320
|
+
|
|
321
|
+
# Calculate relevance score
|
|
322
|
+
score = calculate_relevance_score(
|
|
323
|
+
concept.content,
|
|
324
|
+
keywords,
|
|
325
|
+
concept.created,
|
|
326
|
+
concept.metadata,
|
|
327
|
+
)
|
|
328
|
+
scored_concepts.append((score, concept))
|
|
329
|
+
|
|
330
|
+
# Sort by score descending
|
|
331
|
+
scored_concepts.sort(key=lambda x: x[0], reverse=True)
|
|
332
|
+
|
|
333
|
+
total_available = len(scored_concepts)
|
|
334
|
+
|
|
335
|
+
# Apply limit
|
|
336
|
+
limited = [concept for _, concept in scored_concepts[: query.limit]]
|
|
337
|
+
return limited, total_available
|
|
338
|
+
|
|
339
|
+
def _concept_lookup(
|
|
340
|
+
self, query: Query
|
|
341
|
+
) -> tuple[list[GraphNode], int]:
|
|
342
|
+
"""Execute concept lookup strategy.
|
|
343
|
+
|
|
344
|
+
Direct ID lookup with optional BFS neighbor traversal.
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
query: Query parameters.
|
|
348
|
+
|
|
349
|
+
Returns:
|
|
350
|
+
Tuple of (concepts list, total matches before limit).
|
|
351
|
+
"""
|
|
352
|
+
concept_id = query.query
|
|
353
|
+
|
|
354
|
+
# Direct lookup
|
|
355
|
+
concept = self.graph.get_concept(concept_id)
|
|
356
|
+
if concept is None:
|
|
357
|
+
return [], 0
|
|
358
|
+
|
|
359
|
+
# Apply type filter to main concept
|
|
360
|
+
if query.types and concept.type not in query.types:
|
|
361
|
+
concepts: list[GraphNode] = []
|
|
362
|
+
else:
|
|
363
|
+
concepts = [concept]
|
|
364
|
+
|
|
365
|
+
# Get neighbors if depth > 0
|
|
366
|
+
if query.max_depth > 0:
|
|
367
|
+
neighbors = self.graph.get_neighbors(
|
|
368
|
+
concept_id, depth=query.max_depth, edge_types=query.edge_types
|
|
369
|
+
)
|
|
370
|
+
for neighbor in neighbors:
|
|
371
|
+
if query.types and neighbor.type not in query.types:
|
|
372
|
+
continue
|
|
373
|
+
if neighbor.id not in [c.id for c in concepts]:
|
|
374
|
+
concepts.append(neighbor)
|
|
375
|
+
|
|
376
|
+
total_available = len(concepts)
|
|
377
|
+
|
|
378
|
+
# Apply limit
|
|
379
|
+
return concepts[: query.limit], total_available
|
|
380
|
+
|
|
381
|
+
def _calculate_metadata(
|
|
382
|
+
self,
|
|
383
|
+
query: Query,
|
|
384
|
+
concepts: list[GraphNode],
|
|
385
|
+
execution_time_ms: float,
|
|
386
|
+
total_available: int,
|
|
387
|
+
) -> QueryMetadata:
|
|
388
|
+
"""Calculate metadata for query result."""
|
|
389
|
+
total_text = " ".join(c.content for c in concepts)
|
|
390
|
+
token_estimate = estimate_tokens(total_text)
|
|
391
|
+
|
|
392
|
+
types_found: dict[str, int] = {}
|
|
393
|
+
for concept in concepts:
|
|
394
|
+
node_type = concept.type
|
|
395
|
+
types_found[node_type] = types_found.get(node_type, 0) + 1
|
|
396
|
+
|
|
397
|
+
return QueryMetadata(
|
|
398
|
+
query=query.query,
|
|
399
|
+
strategy=query.strategy,
|
|
400
|
+
total_concepts=len(concepts),
|
|
401
|
+
total_available=total_available,
|
|
402
|
+
token_estimate=token_estimate,
|
|
403
|
+
execution_time_ms=execution_time_ms,
|
|
404
|
+
types_found=types_found,
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
# =========================================================================
|
|
408
|
+
# Architectural Context Helpers
|
|
409
|
+
# =========================================================================
|
|
410
|
+
|
|
411
|
+
def find_domain_for(self, module_id: str) -> GraphNode | None:
|
|
412
|
+
"""Find the bounded context a module belongs to."""
|
|
413
|
+
neighbors = self.graph.get_neighbors(
|
|
414
|
+
module_id, depth=1, edge_types=["belongs_to"]
|
|
415
|
+
)
|
|
416
|
+
for node in neighbors:
|
|
417
|
+
if node.type == "bounded_context":
|
|
418
|
+
return node
|
|
419
|
+
return None
|
|
420
|
+
|
|
421
|
+
def find_layer_for(self, module_id: str) -> GraphNode | None:
|
|
422
|
+
"""Find the architectural layer a module belongs to."""
|
|
423
|
+
neighbors = self.graph.get_neighbors(
|
|
424
|
+
module_id, depth=1, edge_types=["in_layer"]
|
|
425
|
+
)
|
|
426
|
+
for node in neighbors:
|
|
427
|
+
if node.type == "layer":
|
|
428
|
+
return node
|
|
429
|
+
return None
|
|
430
|
+
|
|
431
|
+
def find_constraints_for(self, module_id: str) -> list[GraphNode]:
|
|
432
|
+
"""Find all guardrails that constrain a module."""
|
|
433
|
+
domain = self.find_domain_for(module_id)
|
|
434
|
+
if domain is None:
|
|
435
|
+
return []
|
|
436
|
+
|
|
437
|
+
neighbors = self.graph.get_neighbors(
|
|
438
|
+
domain.id, depth=1, edge_types=["constrained_by"]
|
|
439
|
+
)
|
|
440
|
+
return [n for n in neighbors if n.type == "guardrail"]
|
|
441
|
+
|
|
442
|
+
def find_release_for(self, epic_id: str) -> GraphNode | None:
|
|
443
|
+
"""Find the release an epic belongs to.
|
|
444
|
+
|
|
445
|
+
Follows outgoing ``part_of`` edge from the epic node to find
|
|
446
|
+
a release node.
|
|
447
|
+
|
|
448
|
+
Args:
|
|
449
|
+
epic_id: Epic node ID (e.g., ``"epic-e19"``).
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
The release node, or None if not found.
|
|
453
|
+
"""
|
|
454
|
+
neighbors = self.graph.get_neighbors(
|
|
455
|
+
epic_id, depth=1, edge_types=["part_of"]
|
|
456
|
+
)
|
|
457
|
+
for node in neighbors:
|
|
458
|
+
if node.type == "release":
|
|
459
|
+
return node
|
|
460
|
+
return None
|
|
461
|
+
|
|
462
|
+
def get_architectural_context(
|
|
463
|
+
self, module_id: str
|
|
464
|
+
) -> ArchitecturalContext | None:
|
|
465
|
+
"""Get full architectural context for a module.
|
|
466
|
+
|
|
467
|
+
Combines domain, layer, constraints, and dependencies into a
|
|
468
|
+
single structured result.
|
|
469
|
+
|
|
470
|
+
Args:
|
|
471
|
+
module_id: Module node ID (e.g., ``"mod-memory"``).
|
|
472
|
+
|
|
473
|
+
Returns:
|
|
474
|
+
ArchitecturalContext with all available information,
|
|
475
|
+
or None if module doesn't exist.
|
|
476
|
+
"""
|
|
477
|
+
module = self.graph.get_concept(module_id)
|
|
478
|
+
if module is None:
|
|
479
|
+
return None
|
|
480
|
+
|
|
481
|
+
domain = self.find_domain_for(module_id)
|
|
482
|
+
layer = self.find_layer_for(module_id)
|
|
483
|
+
constraints = self.find_constraints_for(module_id)
|
|
484
|
+
|
|
485
|
+
dep_neighbors = self.graph.get_neighbors(
|
|
486
|
+
module_id, depth=1, edge_types=["depends_on"]
|
|
487
|
+
)
|
|
488
|
+
dependencies = [n for n in dep_neighbors if n.type == "module"]
|
|
489
|
+
|
|
490
|
+
return ArchitecturalContext(
|
|
491
|
+
module=module,
|
|
492
|
+
domain=domain,
|
|
493
|
+
layer=layer,
|
|
494
|
+
constraints=constraints,
|
|
495
|
+
dependencies=dependencies,
|
|
496
|
+
)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Workflow domain — work item types, state machines, and gates.
|
|
2
|
+
|
|
3
|
+
Placeholder for future implementation. Will contain:
|
|
4
|
+
- WorkItemType definitions (Epic, Story, Task)
|
|
5
|
+
- State machine definitions per work item type
|
|
6
|
+
- Gate protocol and default gate implementations
|
|
7
|
+
- Default RaiSE workflow (out of the box)
|
|
8
|
+
- Per-org/repo workflow override mechanism
|
|
9
|
+
"""
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: raise-core
|
|
3
|
+
Version: 2.2.1
|
|
4
|
+
Summary: RaiSE Core - Shared domain models for the RaiSE framework
|
|
5
|
+
Author-email: Emilio Osorio <emilio@humansys.ai>
|
|
6
|
+
License: Apache-2.0
|
|
7
|
+
Requires-Python: >=3.12
|
|
8
|
+
Requires-Dist: networkx>=3.6.1
|
|
9
|
+
Requires-Dist: pydantic>=2.6.0
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
raise_core/__init__.py,sha256=k9H7Ugb7H-4BjSp1DXygg1z0I8wAjcfh-zKQ2hAoEGw,446
|
|
2
|
+
raise_core/governance/__init__.py,sha256=fkHSO067VVCdflCJizCyTbguMzd2BX5JZDM7ASePTbg,294
|
|
3
|
+
raise_core/graph/__init__.py,sha256=HjYzw0aSbKxNUofXZr9Aq0QNJEjQ0IGc79PW4Oq53fc,217
|
|
4
|
+
raise_core/graph/engine.py,sha256=XunBrcC_lcqZne2LwZKJYRm3kEej5joJl4dNj8vggH0,7633
|
|
5
|
+
raise_core/graph/models.py,sha256=W7ZBlbLzOnjHYILg4ZxBJ5uSzCbn21T6elIqb5CuZ7c,6766
|
|
6
|
+
raise_core/graph/query.py,sha256=yVHNaQYqcDS-NoBZUsHiO5AaSafpN3-b4XpKeXIc7w4,15604
|
|
7
|
+
raise_core/graph/backends/__init__.py,sha256=SmLh7oEOzpw7DinzwWqHwlWGHXqGbzA46XIAf-srTNE,85
|
|
8
|
+
raise_core/graph/backends/filesystem.py,sha256=76rfHA0x8xBPk0s4O_J8ZGc13iTf66vTivrUJoaM3-o,2573
|
|
9
|
+
raise_core/graph/backends/models.py,sha256=sS_NL01ZRgcc3ZlbhFGprW4CAHKFc3SpDjn0isDft0Q,581
|
|
10
|
+
raise_core/graph/backends/protocol.py,sha256=g9cQMwq2RoNs2mn1hiTtDnWPPyqRP3e7SHN-Xj7BSWs,642
|
|
11
|
+
raise_core/workflow/__init__.py,sha256=t41RK0x99n6EPk214ASFcLsm_xK2F0IxKSMs34u1KOs,353
|
|
12
|
+
raise_core-2.2.1.dist-info/METADATA,sha256=iAj0RJysG9Jl5NNaaTK523sPLhf5aW7SGIJm3h7zYwg,276
|
|
13
|
+
raise_core-2.2.1.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
14
|
+
raise_core-2.2.1.dist-info/RECORD,,
|