mcp-eregistrations-bpa 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-eregistrations-bpa might be problematic. Click here for more details.
- mcp_eregistrations_bpa/__init__.py +121 -0
- mcp_eregistrations_bpa/__main__.py +6 -0
- mcp_eregistrations_bpa/arazzo/__init__.py +21 -0
- mcp_eregistrations_bpa/arazzo/expression.py +379 -0
- mcp_eregistrations_bpa/audit/__init__.py +56 -0
- mcp_eregistrations_bpa/audit/context.py +66 -0
- mcp_eregistrations_bpa/audit/logger.py +236 -0
- mcp_eregistrations_bpa/audit/models.py +131 -0
- mcp_eregistrations_bpa/auth/__init__.py +64 -0
- mcp_eregistrations_bpa/auth/callback.py +391 -0
- mcp_eregistrations_bpa/auth/cas.py +409 -0
- mcp_eregistrations_bpa/auth/oidc.py +252 -0
- mcp_eregistrations_bpa/auth/permissions.py +162 -0
- mcp_eregistrations_bpa/auth/token_manager.py +348 -0
- mcp_eregistrations_bpa/bpa_client/__init__.py +84 -0
- mcp_eregistrations_bpa/bpa_client/client.py +740 -0
- mcp_eregistrations_bpa/bpa_client/endpoints.py +193 -0
- mcp_eregistrations_bpa/bpa_client/errors.py +276 -0
- mcp_eregistrations_bpa/bpa_client/models.py +203 -0
- mcp_eregistrations_bpa/config.py +349 -0
- mcp_eregistrations_bpa/db/__init__.py +21 -0
- mcp_eregistrations_bpa/db/connection.py +64 -0
- mcp_eregistrations_bpa/db/migrations.py +168 -0
- mcp_eregistrations_bpa/exceptions.py +39 -0
- mcp_eregistrations_bpa/py.typed +0 -0
- mcp_eregistrations_bpa/rollback/__init__.py +19 -0
- mcp_eregistrations_bpa/rollback/manager.py +616 -0
- mcp_eregistrations_bpa/server.py +152 -0
- mcp_eregistrations_bpa/tools/__init__.py +372 -0
- mcp_eregistrations_bpa/tools/actions.py +155 -0
- mcp_eregistrations_bpa/tools/analysis.py +352 -0
- mcp_eregistrations_bpa/tools/audit.py +399 -0
- mcp_eregistrations_bpa/tools/behaviours.py +1042 -0
- mcp_eregistrations_bpa/tools/bots.py +627 -0
- mcp_eregistrations_bpa/tools/classifications.py +575 -0
- mcp_eregistrations_bpa/tools/costs.py +765 -0
- mcp_eregistrations_bpa/tools/debug_strategies.py +351 -0
- mcp_eregistrations_bpa/tools/debugger.py +1230 -0
- mcp_eregistrations_bpa/tools/determinants.py +2235 -0
- mcp_eregistrations_bpa/tools/document_requirements.py +670 -0
- mcp_eregistrations_bpa/tools/export.py +899 -0
- mcp_eregistrations_bpa/tools/fields.py +162 -0
- mcp_eregistrations_bpa/tools/form_errors.py +36 -0
- mcp_eregistrations_bpa/tools/formio_helpers.py +971 -0
- mcp_eregistrations_bpa/tools/forms.py +1269 -0
- mcp_eregistrations_bpa/tools/jsonlogic_builder.py +466 -0
- mcp_eregistrations_bpa/tools/large_response.py +163 -0
- mcp_eregistrations_bpa/tools/messages.py +523 -0
- mcp_eregistrations_bpa/tools/notifications.py +241 -0
- mcp_eregistrations_bpa/tools/registration_institutions.py +680 -0
- mcp_eregistrations_bpa/tools/registrations.py +897 -0
- mcp_eregistrations_bpa/tools/role_status.py +447 -0
- mcp_eregistrations_bpa/tools/role_units.py +400 -0
- mcp_eregistrations_bpa/tools/roles.py +1236 -0
- mcp_eregistrations_bpa/tools/rollback.py +335 -0
- mcp_eregistrations_bpa/tools/services.py +674 -0
- mcp_eregistrations_bpa/tools/workflows.py +2487 -0
- mcp_eregistrations_bpa/tools/yaml_transformer.py +991 -0
- mcp_eregistrations_bpa/workflows/__init__.py +28 -0
- mcp_eregistrations_bpa/workflows/loader.py +440 -0
- mcp_eregistrations_bpa/workflows/models.py +336 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/METADATA +965 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/RECORD +66 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/WHEEL +4 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/entry_points.txt +2 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/licenses/LICENSE +86 -0
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Arazzo workflow orchestration for BPA service design.
|
|
2
|
+
|
|
3
|
+
This module provides:
|
|
4
|
+
- Workflow catalog loading and discovery
|
|
5
|
+
- Intent-to-workflow matching
|
|
6
|
+
- Input extraction and validation
|
|
7
|
+
- Workflow execution and progress reporting
|
|
8
|
+
- Error recovery and rollback
|
|
9
|
+
- Workflow chaining and composition
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from mcp_eregistrations_bpa.workflows.loader import (
|
|
13
|
+
WorkflowCatalog,
|
|
14
|
+
get_workflow_catalog,
|
|
15
|
+
)
|
|
16
|
+
from mcp_eregistrations_bpa.workflows.models import (
|
|
17
|
+
WorkflowDefinition,
|
|
18
|
+
WorkflowInput,
|
|
19
|
+
WorkflowStep,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"WorkflowCatalog",
|
|
24
|
+
"WorkflowDefinition",
|
|
25
|
+
"WorkflowInput",
|
|
26
|
+
"WorkflowStep",
|
|
27
|
+
"get_workflow_catalog",
|
|
28
|
+
]
|
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
"""Arazzo workflow loader and catalog manager.
|
|
2
|
+
|
|
3
|
+
Loads Arazzo YAML workflow specifications and provides a catalog
|
|
4
|
+
for workflow discovery and execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import yaml
|
|
14
|
+
|
|
15
|
+
from mcp_eregistrations_bpa.workflows.models import (
|
|
16
|
+
OPERATION_TO_TOOL_MAP,
|
|
17
|
+
InputType,
|
|
18
|
+
WorkflowDefinition,
|
|
19
|
+
WorkflowInput,
|
|
20
|
+
WorkflowStep,
|
|
21
|
+
derive_category,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Default workflow directory (relative to project root)
|
|
25
|
+
DEFAULT_WORKFLOW_DIR = "_bmad-output/arazzo-workflows"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class WorkflowCatalog:
|
|
29
|
+
"""Catalog of available Arazzo workflows.
|
|
30
|
+
|
|
31
|
+
Loads and indexes workflows from Arazzo YAML files for discovery
|
|
32
|
+
and execution by the workflow orchestration tools.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self) -> None:
|
|
36
|
+
"""Initialize an empty workflow catalog."""
|
|
37
|
+
self._workflows: dict[str, WorkflowDefinition] = {}
|
|
38
|
+
self._loaded = False
|
|
39
|
+
self._workflow_dir: Path | None = None
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
def workflow_count(self) -> int:
|
|
43
|
+
"""Get the number of loaded workflows."""
|
|
44
|
+
return len(self._workflows)
|
|
45
|
+
|
|
46
|
+
@property
|
|
47
|
+
def categories(self) -> list[str]:
|
|
48
|
+
"""Get unique categories from all workflows."""
|
|
49
|
+
cats = set()
|
|
50
|
+
for wf in self._workflows.values():
|
|
51
|
+
cats.add(wf.category)
|
|
52
|
+
return sorted(cats)
|
|
53
|
+
|
|
54
|
+
def load_from_directory(self, workflow_dir: str | Path | None = None) -> None:
|
|
55
|
+
"""Load all Arazzo workflows from a directory.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
workflow_dir: Path to the workflow directory.
|
|
59
|
+
If None, searches for the default location.
|
|
60
|
+
|
|
61
|
+
Raises:
|
|
62
|
+
FileNotFoundError: If the workflow directory doesn't exist.
|
|
63
|
+
"""
|
|
64
|
+
if workflow_dir is None:
|
|
65
|
+
workflow_dir = self._find_workflow_directory()
|
|
66
|
+
|
|
67
|
+
self._workflow_dir = Path(workflow_dir)
|
|
68
|
+
if not self._workflow_dir.exists():
|
|
69
|
+
raise FileNotFoundError(
|
|
70
|
+
f"Workflow directory not found: {self._workflow_dir}"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Load all .arazzo.yaml files
|
|
74
|
+
for yaml_file in self._workflow_dir.glob("*.arazzo.yaml"):
|
|
75
|
+
self._load_arazzo_file(yaml_file)
|
|
76
|
+
|
|
77
|
+
self._loaded = True
|
|
78
|
+
|
|
79
|
+
def _find_workflow_directory(self) -> Path:
|
|
80
|
+
"""Find the workflow directory by searching from current directory upward.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Path to the workflow directory.
|
|
84
|
+
|
|
85
|
+
Raises:
|
|
86
|
+
FileNotFoundError: If the workflow directory is not found.
|
|
87
|
+
"""
|
|
88
|
+
# Start from current working directory
|
|
89
|
+
current = Path.cwd()
|
|
90
|
+
|
|
91
|
+
# Search upward for the workflow directory
|
|
92
|
+
for _ in range(10): # Limit search depth
|
|
93
|
+
candidate = current / DEFAULT_WORKFLOW_DIR
|
|
94
|
+
if candidate.exists():
|
|
95
|
+
return candidate
|
|
96
|
+
|
|
97
|
+
parent = current.parent
|
|
98
|
+
if parent == current:
|
|
99
|
+
break
|
|
100
|
+
current = parent
|
|
101
|
+
|
|
102
|
+
# Try relative to this module's location
|
|
103
|
+
module_dir = Path(__file__).parent.parent.parent.parent
|
|
104
|
+
candidate = module_dir / DEFAULT_WORKFLOW_DIR
|
|
105
|
+
if candidate.exists():
|
|
106
|
+
return candidate
|
|
107
|
+
|
|
108
|
+
raise FileNotFoundError(
|
|
109
|
+
f"Workflow directory '{DEFAULT_WORKFLOW_DIR}' not found. "
|
|
110
|
+
"Ensure you're running from the project root."
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
def _load_arazzo_file(self, yaml_file: Path) -> None:
|
|
114
|
+
"""Load workflows from a single Arazzo YAML file.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
yaml_file: Path to the Arazzo YAML file.
|
|
118
|
+
"""
|
|
119
|
+
with open(yaml_file, encoding="utf-8") as f:
|
|
120
|
+
data = yaml.safe_load(f)
|
|
121
|
+
|
|
122
|
+
if not data or "workflows" not in data:
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
assert self._workflow_dir is not None # Set by load_from_directory before call
|
|
126
|
+
source_file = str(yaml_file.relative_to(self._workflow_dir.parent.parent))
|
|
127
|
+
|
|
128
|
+
for workflow_data in data.get("workflows", []):
|
|
129
|
+
workflow = self._parse_workflow(workflow_data, source_file)
|
|
130
|
+
if workflow:
|
|
131
|
+
self._workflows[workflow.workflow_id] = workflow
|
|
132
|
+
|
|
133
|
+
def _parse_workflow(
|
|
134
|
+
self, data: dict[str, Any], source_file: str
|
|
135
|
+
) -> WorkflowDefinition | None:
|
|
136
|
+
"""Parse a single workflow definition from Arazzo data.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
data: The workflow dictionary from the YAML.
|
|
140
|
+
source_file: Path to the source file.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Parsed WorkflowDefinition or None if invalid.
|
|
144
|
+
"""
|
|
145
|
+
workflow_id = data.get("workflowId")
|
|
146
|
+
if not workflow_id:
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
summary = data.get("summary", "")
|
|
150
|
+
description = data.get("description", "")
|
|
151
|
+
category = derive_category(workflow_id, summary, description)
|
|
152
|
+
|
|
153
|
+
# Parse inputs
|
|
154
|
+
inputs = self._parse_inputs(data.get("inputs", {}))
|
|
155
|
+
|
|
156
|
+
# Parse steps
|
|
157
|
+
steps = self._parse_steps(data.get("steps", []))
|
|
158
|
+
|
|
159
|
+
# Parse outputs
|
|
160
|
+
outputs = data.get("outputs", {})
|
|
161
|
+
if isinstance(outputs, list):
|
|
162
|
+
# Convert list to dict with same key/value
|
|
163
|
+
outputs = {o: o for o in outputs}
|
|
164
|
+
|
|
165
|
+
return WorkflowDefinition(
|
|
166
|
+
workflow_id=workflow_id,
|
|
167
|
+
summary=summary,
|
|
168
|
+
description=description,
|
|
169
|
+
category=category,
|
|
170
|
+
inputs=inputs,
|
|
171
|
+
steps=steps,
|
|
172
|
+
outputs=outputs,
|
|
173
|
+
failure_actions=data.get("failureActions", []),
|
|
174
|
+
source_file=source_file,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
def _parse_inputs(self, inputs_data: dict[str, Any]) -> list[WorkflowInput]:
|
|
178
|
+
"""Parse workflow input definitions.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
inputs_data: The inputs section from the workflow.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
List of WorkflowInput objects.
|
|
185
|
+
"""
|
|
186
|
+
result: list[WorkflowInput] = []
|
|
187
|
+
|
|
188
|
+
if not isinstance(inputs_data, dict):
|
|
189
|
+
return result
|
|
190
|
+
|
|
191
|
+
properties = inputs_data.get("properties", {})
|
|
192
|
+
required = inputs_data.get("required", [])
|
|
193
|
+
|
|
194
|
+
for name, prop_data in properties.items():
|
|
195
|
+
input_type = self._parse_input_type(prop_data.get("type", "string"))
|
|
196
|
+
result.append(
|
|
197
|
+
WorkflowInput(
|
|
198
|
+
name=name,
|
|
199
|
+
input_type=input_type,
|
|
200
|
+
required=name in required,
|
|
201
|
+
description=prop_data.get("description", ""),
|
|
202
|
+
default=prop_data.get("default"),
|
|
203
|
+
enum_values=prop_data.get("enum"),
|
|
204
|
+
pattern=prop_data.get("pattern"),
|
|
205
|
+
min_length=prop_data.get("minLength"),
|
|
206
|
+
max_length=prop_data.get("maxLength"),
|
|
207
|
+
minimum=prop_data.get("minimum"),
|
|
208
|
+
maximum=prop_data.get("maximum"),
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
return result
|
|
213
|
+
|
|
214
|
+
def _parse_input_type(self, type_str: str) -> InputType:
|
|
215
|
+
"""Parse input type string to InputType enum.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
type_str: The type string from YAML.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
The corresponding InputType.
|
|
222
|
+
"""
|
|
223
|
+
type_map = {
|
|
224
|
+
"string": InputType.STRING,
|
|
225
|
+
"integer": InputType.INTEGER,
|
|
226
|
+
"number": InputType.NUMBER,
|
|
227
|
+
"boolean": InputType.BOOLEAN,
|
|
228
|
+
"array": InputType.ARRAY,
|
|
229
|
+
"object": InputType.OBJECT,
|
|
230
|
+
}
|
|
231
|
+
return type_map.get(type_str.lower(), InputType.STRING)
|
|
232
|
+
|
|
233
|
+
def _parse_steps(self, steps_data: list[dict[str, Any]]) -> list[WorkflowStep]:
|
|
234
|
+
"""Parse workflow step definitions.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
steps_data: The steps list from the workflow.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
List of WorkflowStep objects.
|
|
241
|
+
"""
|
|
242
|
+
result: list[WorkflowStep] = []
|
|
243
|
+
|
|
244
|
+
for step_data in steps_data:
|
|
245
|
+
step_id = step_data.get("stepId")
|
|
246
|
+
if not step_id:
|
|
247
|
+
continue
|
|
248
|
+
|
|
249
|
+
operation_id = step_data.get("operationId")
|
|
250
|
+
mcp_tool = OPERATION_TO_TOOL_MAP.get(operation_id) if operation_id else None
|
|
251
|
+
|
|
252
|
+
# Parse request body
|
|
253
|
+
request_body = {}
|
|
254
|
+
if "requestBody" in step_data:
|
|
255
|
+
rb = step_data["requestBody"]
|
|
256
|
+
if isinstance(rb, dict) and "payload" in rb:
|
|
257
|
+
request_body = rb["payload"]
|
|
258
|
+
|
|
259
|
+
result.append(
|
|
260
|
+
WorkflowStep(
|
|
261
|
+
step_id=step_id,
|
|
262
|
+
description=step_data.get("description", ""),
|
|
263
|
+
operation_id=operation_id,
|
|
264
|
+
mcp_tool=mcp_tool,
|
|
265
|
+
request_body=request_body,
|
|
266
|
+
parameters=step_data.get("parameters", []),
|
|
267
|
+
success_criteria=step_data.get("successCriteria", []),
|
|
268
|
+
on_success=step_data.get("onSuccess", []),
|
|
269
|
+
on_failure=step_data.get("onFailure", []),
|
|
270
|
+
outputs=step_data.get("outputs", {}),
|
|
271
|
+
condition=step_data.get("condition"),
|
|
272
|
+
)
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
return result
|
|
276
|
+
|
|
277
|
+
def get_workflow(self, workflow_id: str) -> WorkflowDefinition | None:
|
|
278
|
+
"""Get a workflow by ID.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
workflow_id: The workflow ID to look up.
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
The workflow definition or None if not found.
|
|
285
|
+
"""
|
|
286
|
+
self._ensure_loaded()
|
|
287
|
+
return self._workflows.get(workflow_id)
|
|
288
|
+
|
|
289
|
+
def list_workflows(self, category: str | None = None) -> list[dict[str, Any]]:
|
|
290
|
+
"""List all workflows, optionally filtered by category.
|
|
291
|
+
|
|
292
|
+
Args:
|
|
293
|
+
category: Optional category to filter by.
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
List of workflow catalog entries.
|
|
297
|
+
"""
|
|
298
|
+
self._ensure_loaded()
|
|
299
|
+
|
|
300
|
+
result = []
|
|
301
|
+
for wf in self._workflows.values():
|
|
302
|
+
if category is None or wf.category == category:
|
|
303
|
+
result.append(wf.to_catalog_entry())
|
|
304
|
+
|
|
305
|
+
return result
|
|
306
|
+
|
|
307
|
+
def search_workflows(self, query: str, limit: int = 10) -> list[dict[str, Any]]:
|
|
308
|
+
"""Search workflows by keyword.
|
|
309
|
+
|
|
310
|
+
Searches in workflow ID, summary, and description.
|
|
311
|
+
Returns matches with relevance scores.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
query: The search query.
|
|
315
|
+
limit: Maximum number of results.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
List of matches with relevance scores.
|
|
319
|
+
"""
|
|
320
|
+
self._ensure_loaded()
|
|
321
|
+
|
|
322
|
+
query_lower = query.lower()
|
|
323
|
+
query_words = set(re.split(r"\W+", query_lower))
|
|
324
|
+
|
|
325
|
+
matches: list[tuple[float, WorkflowDefinition]] = []
|
|
326
|
+
|
|
327
|
+
for wf in self._workflows.values():
|
|
328
|
+
score = self._calculate_relevance(wf, query_lower, query_words)
|
|
329
|
+
if score > 0:
|
|
330
|
+
matches.append((score, wf))
|
|
331
|
+
|
|
332
|
+
# Sort by relevance (descending)
|
|
333
|
+
matches.sort(key=lambda x: x[0], reverse=True)
|
|
334
|
+
|
|
335
|
+
return [
|
|
336
|
+
{
|
|
337
|
+
"id": wf.workflow_id,
|
|
338
|
+
"summary": wf.summary,
|
|
339
|
+
"category": wf.category,
|
|
340
|
+
"relevance": round(score, 2),
|
|
341
|
+
}
|
|
342
|
+
for score, wf in matches[:limit]
|
|
343
|
+
]
|
|
344
|
+
|
|
345
|
+
def _calculate_relevance(
|
|
346
|
+
self,
|
|
347
|
+
workflow: WorkflowDefinition,
|
|
348
|
+
query_lower: str,
|
|
349
|
+
query_words: set[str],
|
|
350
|
+
) -> float:
|
|
351
|
+
"""Calculate relevance score for a workflow against a query.
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
workflow: The workflow to score.
|
|
355
|
+
query_lower: The lowercase query string.
|
|
356
|
+
query_words: Set of query words.
|
|
357
|
+
|
|
358
|
+
Returns:
|
|
359
|
+
Relevance score (0-1).
|
|
360
|
+
"""
|
|
361
|
+
score = 0.0
|
|
362
|
+
|
|
363
|
+
# Exact ID match
|
|
364
|
+
if query_lower in workflow.workflow_id.lower():
|
|
365
|
+
score += 0.5
|
|
366
|
+
|
|
367
|
+
# ID word match
|
|
368
|
+
id_words = set(re.split(r"(?=[A-Z])|_|-", workflow.workflow_id.lower()))
|
|
369
|
+
id_matches = len(query_words & id_words)
|
|
370
|
+
if id_matches > 0:
|
|
371
|
+
score += 0.3 * (id_matches / len(query_words))
|
|
372
|
+
|
|
373
|
+
# Summary match
|
|
374
|
+
summary_lower = workflow.summary.lower()
|
|
375
|
+
if query_lower in summary_lower:
|
|
376
|
+
score += 0.3
|
|
377
|
+
else:
|
|
378
|
+
summary_words = set(re.split(r"\W+", summary_lower))
|
|
379
|
+
summary_matches = len(query_words & summary_words)
|
|
380
|
+
if summary_matches > 0:
|
|
381
|
+
score += 0.2 * (summary_matches / len(query_words))
|
|
382
|
+
|
|
383
|
+
# Description match
|
|
384
|
+
desc_lower = workflow.description.lower()
|
|
385
|
+
if query_lower in desc_lower:
|
|
386
|
+
score += 0.1
|
|
387
|
+
else:
|
|
388
|
+
desc_words = set(re.split(r"\W+", desc_lower))
|
|
389
|
+
desc_matches = len(query_words & desc_words)
|
|
390
|
+
if desc_matches > 0:
|
|
391
|
+
score += 0.05 * (desc_matches / len(query_words))
|
|
392
|
+
|
|
393
|
+
# Category match
|
|
394
|
+
if query_lower in workflow.category.lower():
|
|
395
|
+
score += 0.1
|
|
396
|
+
|
|
397
|
+
return min(score, 1.0)
|
|
398
|
+
|
|
399
|
+
def _ensure_loaded(self) -> None:
|
|
400
|
+
"""Ensure workflows are loaded, loading if necessary."""
|
|
401
|
+
if not self._loaded:
|
|
402
|
+
self.load_from_directory()
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
# Global catalog instance
|
|
406
|
+
_catalog: WorkflowCatalog | None = None
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def get_workflow_catalog() -> WorkflowCatalog:
|
|
410
|
+
"""Get the global workflow catalog instance.
|
|
411
|
+
|
|
412
|
+
Loads workflows on first access.
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
The global WorkflowCatalog instance.
|
|
416
|
+
"""
|
|
417
|
+
global _catalog
|
|
418
|
+
if _catalog is None:
|
|
419
|
+
_catalog = WorkflowCatalog()
|
|
420
|
+
try:
|
|
421
|
+
_catalog.load_from_directory()
|
|
422
|
+
except FileNotFoundError:
|
|
423
|
+
# Catalog will be empty but usable
|
|
424
|
+
pass
|
|
425
|
+
return _catalog
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def reload_workflow_catalog(workflow_dir: str | Path | None = None) -> WorkflowCatalog:
|
|
429
|
+
"""Reload the workflow catalog from disk.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
workflow_dir: Optional path to the workflow directory.
|
|
433
|
+
|
|
434
|
+
Returns:
|
|
435
|
+
The reloaded WorkflowCatalog instance.
|
|
436
|
+
"""
|
|
437
|
+
global _catalog
|
|
438
|
+
_catalog = WorkflowCatalog()
|
|
439
|
+
_catalog.load_from_directory(workflow_dir)
|
|
440
|
+
return _catalog
|