forge-dev 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- forge_core/__init__.py +3 -0
- forge_core/agents/__init__.py +1 -0
- forge_core/auditor.py +330 -0
- forge_core/cli.py +552 -0
- forge_core/detector.py +209 -0
- forge_core/editor_bridge.py +543 -0
- forge_core/models.py +332 -0
- forge_core/phases/__init__.py +1 -0
- forge_core/phases/coherence.py +293 -0
- forge_core/phases/context.py +264 -0
- forge_core/phases/intake.py +340 -0
- forge_core/registry.py +247 -0
- forge_core/standards/api-first-design.yaml +24 -0
- forge_core/standards/microservice-packaging.yaml +30 -0
- forge_core/standards/observability.yaml +31 -0
- forge_core/standards/security-baseline.yaml +43 -0
- forge_core/standards/type-safety.yaml +23 -0
- forge_core/templates/__init__.py +1 -0
- forge_core/utils/__init__.py +1 -0
- forge_dev-0.1.0.dist-info/METADATA +134 -0
- forge_dev-0.1.0.dist-info/RECORD +25 -0
- forge_dev-0.1.0.dist-info/WHEEL +4 -0
- forge_dev-0.1.0.dist-info/entry_points.txt +2 -0
- mcp_server/__init__.py +1 -0
- mcp_server/server.py +1086 -0
mcp_server/server.py
ADDED
|
@@ -0,0 +1,1086 @@
|
|
|
1
|
+
"""Forge MCP Server — exposes Forge capabilities to any MCP-compatible editor.
|
|
2
|
+
|
|
3
|
+
This server makes Forge accessible from Cursor, Copilot, Windsurf,
|
|
4
|
+
Claude Code, or any editor that supports MCP.
|
|
5
|
+
|
|
6
|
+
Tools exposed:
|
|
7
|
+
- forge_init: Initialize a project
|
|
8
|
+
- forge_intake: Process a requirement
|
|
9
|
+
- forge_context: Get/set project context
|
|
10
|
+
- forge_status: Project status
|
|
11
|
+
- forge_audit: Run audit agents
|
|
12
|
+
- forge_assess: Maturity assessment
|
|
13
|
+
- forge_journal: Add journal entries
|
|
14
|
+
- forge_standards: Manage standards
|
|
15
|
+
- forge_coherence_check: Validate changes
|
|
16
|
+
- forge_mcps: MCP registry
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import json
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
from mcp.server.fastmcp import FastMCP
|
|
26
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
27
|
+
|
|
28
|
+
# ── Server Init ────────────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
mcp = FastMCP("forge_mcp")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# ── Input Models ───────────────────────────────────────────────────────────
|
|
34
|
+
|
|
35
|
+
class ForgeInitInput(BaseModel):
|
|
36
|
+
"""Input for initializing Forge in a project directory."""
|
|
37
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
38
|
+
|
|
39
|
+
path: str = Field(
|
|
40
|
+
default=".",
|
|
41
|
+
description="Path to the project directory. Defaults to current directory.",
|
|
42
|
+
)
|
|
43
|
+
name: Optional[str] = Field(
|
|
44
|
+
default=None,
|
|
45
|
+
description="Project name. Defaults to directory name.",
|
|
46
|
+
)
|
|
47
|
+
backend: Optional[str] = Field(
|
|
48
|
+
default=None,
|
|
49
|
+
description=(
|
|
50
|
+
"Backend framework override. Options: python/fastapi, python/django, "
|
|
51
|
+
"node/express, node/fastify, node/nestjs"
|
|
52
|
+
),
|
|
53
|
+
)
|
|
54
|
+
project_type: Optional[str] = Field(
|
|
55
|
+
default=None,
|
|
56
|
+
description="Project type: saas, api, internal-tool, library, cli, microservice",
|
|
57
|
+
)
|
|
58
|
+
description: Optional[str] = Field(
|
|
59
|
+
default=None,
|
|
60
|
+
description="One-line project description.",
|
|
61
|
+
)
|
|
62
|
+
regulatory: Optional[list[str]] = Field(
|
|
63
|
+
default=None,
|
|
64
|
+
description="Regulatory requirements: hipaa, ferpa, gdpr, soc2, pci-dss",
|
|
65
|
+
)
|
|
66
|
+
ai_enabled: Optional[bool] = Field(
|
|
67
|
+
default=None,
|
|
68
|
+
description="Whether the project uses AI capabilities internally.",
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class ForgeIntakeInput(BaseModel):
|
|
73
|
+
"""Input for processing a requirement document."""
|
|
74
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
75
|
+
|
|
76
|
+
content: str = Field(
|
|
77
|
+
...,
|
|
78
|
+
description=(
|
|
79
|
+
"The requirement content. Can be a PRD, user story, epic, "
|
|
80
|
+
"feature request, or any description of what to build."
|
|
81
|
+
),
|
|
82
|
+
min_length=10,
|
|
83
|
+
)
|
|
84
|
+
path: str = Field(
|
|
85
|
+
default=".",
|
|
86
|
+
description="Path to the project directory.",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ForgeJournalInput(BaseModel):
|
|
91
|
+
"""Input for adding a journal entry."""
|
|
92
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
93
|
+
|
|
94
|
+
entry: str = Field(
|
|
95
|
+
...,
|
|
96
|
+
description=(
|
|
97
|
+
"The journal entry. Record learnings, workarounds, nuances, "
|
|
98
|
+
"or any project-specific context that agents should know."
|
|
99
|
+
),
|
|
100
|
+
min_length=5,
|
|
101
|
+
)
|
|
102
|
+
path: str = Field(
|
|
103
|
+
default=".",
|
|
104
|
+
description="Path to the project directory.",
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class ForgeStandardInput(BaseModel):
|
|
109
|
+
"""Input for adding a new standard."""
|
|
110
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
111
|
+
|
|
112
|
+
name: str = Field(..., description="Standard name", min_length=2, max_length=100)
|
|
113
|
+
area: str = Field(
|
|
114
|
+
...,
|
|
115
|
+
description="Area this standard governs (e.g., security, api-design, testing, frontend)",
|
|
116
|
+
)
|
|
117
|
+
description: str = Field(
|
|
118
|
+
...,
|
|
119
|
+
description="What this standard requires — be specific and actionable.",
|
|
120
|
+
)
|
|
121
|
+
enforcement: str = Field(
|
|
122
|
+
default="required",
|
|
123
|
+
description="Enforcement level: required, recommended, optional",
|
|
124
|
+
)
|
|
125
|
+
rules: Optional[list[str]] = Field(
|
|
126
|
+
default=None,
|
|
127
|
+
description="Specific rules this standard enforces.",
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class ForgePathInput(BaseModel):
|
|
132
|
+
"""Simple input with just a project path."""
|
|
133
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
134
|
+
|
|
135
|
+
path: str = Field(
|
|
136
|
+
default=".",
|
|
137
|
+
description="Path to the project directory.",
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class ForgeContextOverrideInput(BaseModel):
|
|
142
|
+
"""Input for overriding specific context values."""
|
|
143
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
144
|
+
|
|
145
|
+
path: str = Field(default=".", description="Path to the project directory.")
|
|
146
|
+
overrides: dict = Field(
|
|
147
|
+
...,
|
|
148
|
+
description=(
|
|
149
|
+
"Key-value pairs to override in the project context. "
|
|
150
|
+
"Example: {\"backend\": \"node/fastify\", \"auth\": \"auth0\"}"
|
|
151
|
+
),
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
# ── Tools ──────────────────────────────────────────────────────────────────
|
|
156
|
+
|
|
157
|
+
@mcp.tool(
|
|
158
|
+
name="forge_init",
|
|
159
|
+
annotations={
|
|
160
|
+
"title": "Initialize Forge Project",
|
|
161
|
+
"readOnlyHint": False,
|
|
162
|
+
"destructiveHint": False,
|
|
163
|
+
"idempotentHint": True,
|
|
164
|
+
"openWorldHint": False,
|
|
165
|
+
},
|
|
166
|
+
)
|
|
167
|
+
async def forge_init(params: ForgeInitInput) -> str:
|
|
168
|
+
"""Initialize Forge in a project directory.
|
|
169
|
+
|
|
170
|
+
Detects the current state of the directory (empty, has docs, has code,
|
|
171
|
+
already initialized) and sets up the .forge/ directory with context,
|
|
172
|
+
journal, and configuration.
|
|
173
|
+
|
|
174
|
+
Returns the project detection results, resolved context, and next steps.
|
|
175
|
+
"""
|
|
176
|
+
from forge_core.detector import detect_project
|
|
177
|
+
from forge_core.phases.context import resolve_context, save_context
|
|
178
|
+
from forge_core.registry import ensure_registry, load_user_config, record_project
|
|
179
|
+
|
|
180
|
+
project_path = Path(params.path).resolve()
|
|
181
|
+
|
|
182
|
+
ensure_registry()
|
|
183
|
+
user_config = load_user_config()
|
|
184
|
+
detection = detect_project(project_path)
|
|
185
|
+
|
|
186
|
+
if detection.has_forge:
|
|
187
|
+
from forge_core.phases.context import load_context
|
|
188
|
+
context = load_context(project_path)
|
|
189
|
+
return json.dumps({
|
|
190
|
+
"status": "already_initialized",
|
|
191
|
+
"project": context.name if context else project_path.name,
|
|
192
|
+
"message": "Project already has Forge. Use forge_status, forge_assess, or forge_audit.",
|
|
193
|
+
"detection": detection.summary(),
|
|
194
|
+
}, indent=2)
|
|
195
|
+
|
|
196
|
+
# Build overrides from params
|
|
197
|
+
overrides: dict = {}
|
|
198
|
+
if params.project_type:
|
|
199
|
+
overrides["type"] = params.project_type
|
|
200
|
+
if params.description:
|
|
201
|
+
overrides["description"] = params.description
|
|
202
|
+
if params.regulatory:
|
|
203
|
+
overrides["regulatory"] = params.regulatory
|
|
204
|
+
if params.backend:
|
|
205
|
+
overrides["backend"] = params.backend
|
|
206
|
+
if params.ai_enabled is not None:
|
|
207
|
+
overrides["ai"] = {"enabled": params.ai_enabled}
|
|
208
|
+
|
|
209
|
+
context = resolve_context(
|
|
210
|
+
detection, user_config,
|
|
211
|
+
project_name=params.name,
|
|
212
|
+
overrides=overrides,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
context_path = save_context(context, project_path)
|
|
216
|
+
|
|
217
|
+
# Create journal
|
|
218
|
+
journal_path = project_path / ".forge" / "journal.md"
|
|
219
|
+
if not journal_path.exists():
|
|
220
|
+
journal_path.write_text(
|
|
221
|
+
f"# {context.name} — Forge Journal\n\n"
|
|
222
|
+
"Record project-specific learnings, workarounds, and nuances here.\n\n"
|
|
223
|
+
"## Entries\n\n"
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
record_project(
|
|
227
|
+
context.name,
|
|
228
|
+
str(project_path),
|
|
229
|
+
{"type": context.type.value, "backend": context.backend.value, "cloud": context.cloud.value},
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Determine what questions to ask if info is missing
|
|
233
|
+
needs_input = []
|
|
234
|
+
if not params.description and detection.is_empty:
|
|
235
|
+
needs_input.append("description: What does this project do?")
|
|
236
|
+
if user_config.backend is None and not params.backend:
|
|
237
|
+
needs_input.append("backend: Which backend framework? (python/fastapi, node/express, etc.)")
|
|
238
|
+
|
|
239
|
+
result = {
|
|
240
|
+
"status": "initialized",
|
|
241
|
+
"project": context.name,
|
|
242
|
+
"detection": {
|
|
243
|
+
"state": detection.state.value,
|
|
244
|
+
"code_files": len(detection.code_files),
|
|
245
|
+
"doc_files": len(detection.doc_files),
|
|
246
|
+
"detected_stack": detection.detected_stack,
|
|
247
|
+
},
|
|
248
|
+
"context": {
|
|
249
|
+
"backend": context.backend.value,
|
|
250
|
+
"frontend": context.frontend.value,
|
|
251
|
+
"cloud": context.cloud.value,
|
|
252
|
+
"database": context.database.value,
|
|
253
|
+
"auth": context.auth.value,
|
|
254
|
+
"ai_enabled": context.ai.enabled,
|
|
255
|
+
"type": context.type.value,
|
|
256
|
+
},
|
|
257
|
+
"files_created": [str(context_path), str(journal_path)],
|
|
258
|
+
"next_steps": [
|
|
259
|
+
"forge_intake — process a requirement document",
|
|
260
|
+
"forge_plan — generate implementation plan",
|
|
261
|
+
],
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
if needs_input:
|
|
265
|
+
result["needs_input"] = needs_input
|
|
266
|
+
result["message"] = (
|
|
267
|
+
"Project initialized with defaults. Consider providing: "
|
|
268
|
+
+ "; ".join(needs_input)
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
return json.dumps(result, indent=2)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
@mcp.tool(
|
|
275
|
+
name="forge_intake",
|
|
276
|
+
annotations={
|
|
277
|
+
"title": "Process Requirement into Forge Brief",
|
|
278
|
+
"readOnlyHint": False,
|
|
279
|
+
"destructiveHint": False,
|
|
280
|
+
"idempotentHint": False,
|
|
281
|
+
"openWorldHint": False,
|
|
282
|
+
},
|
|
283
|
+
)
|
|
284
|
+
async def forge_intake(params: ForgeIntakeInput) -> str:
|
|
285
|
+
"""Analyze a requirement and produce a structured Forge Brief.
|
|
286
|
+
|
|
287
|
+
Accepts any kind of requirement: PRD, user story, epic, feature request,
|
|
288
|
+
or informal description. Classifies it, detects gaps, and produces
|
|
289
|
+
an AI-optimized implementation order.
|
|
290
|
+
|
|
291
|
+
The output includes a prompt ready to send to an LLM for full analysis,
|
|
292
|
+
plus the basic classification results.
|
|
293
|
+
"""
|
|
294
|
+
from forge_core.phases.intake import (
|
|
295
|
+
build_intake_prompt,
|
|
296
|
+
classify_requirement,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
project_path = Path(params.path).resolve()
|
|
300
|
+
forge_dir = project_path / ".forge"
|
|
301
|
+
|
|
302
|
+
if not forge_dir.exists():
|
|
303
|
+
return json.dumps({
|
|
304
|
+
"error": "No Forge project found. Run forge_init first.",
|
|
305
|
+
})
|
|
306
|
+
|
|
307
|
+
req_type = classify_requirement(params.content)
|
|
308
|
+
prompt = build_intake_prompt(params.content, req_type)
|
|
309
|
+
|
|
310
|
+
# Save prompt for processing
|
|
311
|
+
prompt_path = forge_dir / "intake_prompt.md"
|
|
312
|
+
prompt_path.write_text(prompt)
|
|
313
|
+
|
|
314
|
+
return json.dumps({
|
|
315
|
+
"status": "intake_ready",
|
|
316
|
+
"requirement_type": req_type.value,
|
|
317
|
+
"content_length": len(params.content),
|
|
318
|
+
"prompt_saved_to": str(prompt_path),
|
|
319
|
+
"llm_prompt": prompt,
|
|
320
|
+
"instruction": (
|
|
321
|
+
"Send the llm_prompt to Claude or another LLM to generate the full brief. "
|
|
322
|
+
"The response should be a JSON object matching the ForgeBrief schema. "
|
|
323
|
+
"Save the result to .forge/brief.yaml"
|
|
324
|
+
),
|
|
325
|
+
}, indent=2)
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
@mcp.tool(
|
|
329
|
+
name="forge_status",
|
|
330
|
+
annotations={
|
|
331
|
+
"title": "Get Forge Project Status",
|
|
332
|
+
"readOnlyHint": True,
|
|
333
|
+
"destructiveHint": False,
|
|
334
|
+
"idempotentHint": True,
|
|
335
|
+
"openWorldHint": False,
|
|
336
|
+
},
|
|
337
|
+
)
|
|
338
|
+
async def forge_status(params: ForgePathInput) -> str:
|
|
339
|
+
"""Get the current status of a Forge project.
|
|
340
|
+
|
|
341
|
+
Shows: context, brief, plan, journal, and audit state.
|
|
342
|
+
Also shows the resolved stack, cloud, and auth configuration.
|
|
343
|
+
"""
|
|
344
|
+
from forge_core.phases.context import load_context
|
|
345
|
+
from forge_core.phases.intake import load_brief
|
|
346
|
+
from forge_core.registry import get_forge_version
|
|
347
|
+
|
|
348
|
+
project_path = Path(params.path).resolve()
|
|
349
|
+
forge_dir = project_path / ".forge"
|
|
350
|
+
|
|
351
|
+
if not forge_dir.exists():
|
|
352
|
+
return json.dumps({"error": "No Forge project found. Run forge_init first."})
|
|
353
|
+
|
|
354
|
+
context = load_context(project_path)
|
|
355
|
+
brief = load_brief(project_path)
|
|
356
|
+
version = get_forge_version()
|
|
357
|
+
|
|
358
|
+
result = {
|
|
359
|
+
"forge_version": version,
|
|
360
|
+
"project": context.name if context else project_path.name,
|
|
361
|
+
"components": {
|
|
362
|
+
"context": "ready" if context else "missing",
|
|
363
|
+
"brief": "ready" if brief else "not_started",
|
|
364
|
+
"plan": "ready" if (forge_dir / "plan.yaml").exists() else "not_started",
|
|
365
|
+
"journal": "ready" if (forge_dir / "journal.md").exists() else "missing",
|
|
366
|
+
},
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
if context:
|
|
370
|
+
result["stack"] = {
|
|
371
|
+
"backend": context.backend.value,
|
|
372
|
+
"frontend": context.frontend.value,
|
|
373
|
+
"cloud": context.cloud.value,
|
|
374
|
+
"database": context.database.value,
|
|
375
|
+
"auth": context.auth.value,
|
|
376
|
+
"ai_enabled": context.ai.enabled,
|
|
377
|
+
"type": context.type.value,
|
|
378
|
+
"regulatory": [r.value for r in context.regulatory],
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
if brief:
|
|
382
|
+
result["brief_summary"] = {
|
|
383
|
+
"source_type": brief.source_type.value,
|
|
384
|
+
"completeness": brief.completeness_score,
|
|
385
|
+
"features": len(brief.features),
|
|
386
|
+
"gaps": len(brief.gaps),
|
|
387
|
+
"mvp_defined": brief.mvp_defined,
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
return json.dumps(result, indent=2)
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
@mcp.tool(
|
|
394
|
+
name="forge_journal",
|
|
395
|
+
annotations={
|
|
396
|
+
"title": "Add Project Journal Entry",
|
|
397
|
+
"readOnlyHint": False,
|
|
398
|
+
"destructiveHint": False,
|
|
399
|
+
"idempotentHint": False,
|
|
400
|
+
"openWorldHint": False,
|
|
401
|
+
},
|
|
402
|
+
)
|
|
403
|
+
async def forge_journal(params: ForgeJournalInput) -> str:
|
|
404
|
+
"""Add a learning, workaround, or nuance to the project journal.
|
|
405
|
+
|
|
406
|
+
The journal captures project-specific context that doesn't fit in
|
|
407
|
+
standards or patterns. Things like:
|
|
408
|
+
- "To access Azure DB, install pg in temp and use tunnel"
|
|
409
|
+
- "The legacy API returns dates in MM/DD/YYYY not ISO"
|
|
410
|
+
- "Rate limit on external API is 100/min, batch accordingly"
|
|
411
|
+
|
|
412
|
+
Forge agents read the journal for project-specific awareness.
|
|
413
|
+
"""
|
|
414
|
+
from datetime import datetime, timezone
|
|
415
|
+
|
|
416
|
+
project_path = Path(params.path).resolve()
|
|
417
|
+
journal_path = project_path / ".forge" / "journal.md"
|
|
418
|
+
|
|
419
|
+
if not journal_path.exists():
|
|
420
|
+
return json.dumps({"error": "No Forge project found. Run forge_init first."})
|
|
421
|
+
|
|
422
|
+
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
|
|
423
|
+
with open(journal_path, "a") as f:
|
|
424
|
+
f.write(f"\n### {timestamp}\n{params.entry}\n")
|
|
425
|
+
|
|
426
|
+
return json.dumps({
|
|
427
|
+
"status": "entry_added",
|
|
428
|
+
"timestamp": timestamp,
|
|
429
|
+
"entry": params.entry,
|
|
430
|
+
})
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
@mcp.tool(
|
|
434
|
+
name="forge_standards_add",
|
|
435
|
+
annotations={
|
|
436
|
+
"title": "Add a New Standard",
|
|
437
|
+
"readOnlyHint": False,
|
|
438
|
+
"destructiveHint": False,
|
|
439
|
+
"idempotentHint": True,
|
|
440
|
+
"openWorldHint": False,
|
|
441
|
+
},
|
|
442
|
+
)
|
|
443
|
+
async def forge_standards_add(params: ForgeStandardInput) -> str:
|
|
444
|
+
"""Add a new standard to the Forge registry.
|
|
445
|
+
|
|
446
|
+
Before adding, runs a coherence check against existing standards
|
|
447
|
+
to detect conflicts, overlaps, or philosophy changes.
|
|
448
|
+
|
|
449
|
+
Standards are stored globally in ~/.forge/user/standards/ and
|
|
450
|
+
apply to all future projects. Existing projects can be re-assessed
|
|
451
|
+
with forge_assess to check compliance.
|
|
452
|
+
"""
|
|
453
|
+
import yaml
|
|
454
|
+
|
|
455
|
+
from forge_core.phases.coherence import check_new_standard
|
|
456
|
+
from forge_core.registry import USER_DIR, load_standards, version_workflow
|
|
457
|
+
|
|
458
|
+
existing = load_standards()
|
|
459
|
+
|
|
460
|
+
new_standard = {
|
|
461
|
+
"name": params.name,
|
|
462
|
+
"area": params.area,
|
|
463
|
+
"description": params.description,
|
|
464
|
+
"enforcement": params.enforcement,
|
|
465
|
+
}
|
|
466
|
+
if params.rules:
|
|
467
|
+
new_standard["rules"] = params.rules
|
|
468
|
+
|
|
469
|
+
report = check_new_standard(new_standard, existing)
|
|
470
|
+
|
|
471
|
+
if not report.passed:
|
|
472
|
+
return json.dumps({
|
|
473
|
+
"status": "blocked",
|
|
474
|
+
"message": "Coherence check failed — resolve errors before adding.",
|
|
475
|
+
"issues": [
|
|
476
|
+
{
|
|
477
|
+
"severity": i.severity,
|
|
478
|
+
"category": i.category,
|
|
479
|
+
"description": i.description,
|
|
480
|
+
"suggestion": i.suggestion,
|
|
481
|
+
}
|
|
482
|
+
for i in report.issues
|
|
483
|
+
],
|
|
484
|
+
}, indent=2)
|
|
485
|
+
|
|
486
|
+
# Save
|
|
487
|
+
standards_dir = USER_DIR / "standards"
|
|
488
|
+
standards_dir.mkdir(parents=True, exist_ok=True)
|
|
489
|
+
std_path = standards_dir / f"{params.name.lower().replace(' ', '-')}.yaml"
|
|
490
|
+
with open(std_path, "w") as f:
|
|
491
|
+
yaml.dump(new_standard, f, default_flow_style=False)
|
|
492
|
+
|
|
493
|
+
version_id = version_workflow(f"Added standard: {params.name}")
|
|
494
|
+
|
|
495
|
+
result = {
|
|
496
|
+
"status": "added",
|
|
497
|
+
"standard": params.name,
|
|
498
|
+
"area": params.area,
|
|
499
|
+
"file": str(std_path),
|
|
500
|
+
"workflow_version": version_id,
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
if report.issues:
|
|
504
|
+
result["warnings"] = [
|
|
505
|
+
{
|
|
506
|
+
"severity": i.severity,
|
|
507
|
+
"category": i.category,
|
|
508
|
+
"description": i.description,
|
|
509
|
+
"suggestion": i.suggestion,
|
|
510
|
+
}
|
|
511
|
+
for i in report.issues
|
|
512
|
+
]
|
|
513
|
+
|
|
514
|
+
return json.dumps(result, indent=2)
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
@mcp.tool(
|
|
518
|
+
name="forge_standards_list",
|
|
519
|
+
annotations={
|
|
520
|
+
"title": "List All Standards",
|
|
521
|
+
"readOnlyHint": True,
|
|
522
|
+
"destructiveHint": False,
|
|
523
|
+
"idempotentHint": True,
|
|
524
|
+
"openWorldHint": False,
|
|
525
|
+
},
|
|
526
|
+
)
|
|
527
|
+
async def forge_standards_list(params: ForgePathInput) -> str:
|
|
528
|
+
"""List all active Forge standards (core + user).
|
|
529
|
+
|
|
530
|
+
Shows standards from both the upstream core and user customizations.
|
|
531
|
+
"""
|
|
532
|
+
from forge_core.registry import load_standards
|
|
533
|
+
|
|
534
|
+
standards = load_standards()
|
|
535
|
+
|
|
536
|
+
return json.dumps({
|
|
537
|
+
"total": len(standards),
|
|
538
|
+
"standards": [
|
|
539
|
+
{
|
|
540
|
+
"name": s.get("name", "unnamed"),
|
|
541
|
+
"area": s.get("area", s.get("category", "general")),
|
|
542
|
+
"description": s.get("description", ""),
|
|
543
|
+
"source": s.get("_source", "unknown"),
|
|
544
|
+
"enforcement": s.get("enforcement", "required"),
|
|
545
|
+
}
|
|
546
|
+
for s in standards
|
|
547
|
+
],
|
|
548
|
+
}, indent=2)
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
@mcp.tool(
|
|
552
|
+
name="forge_coherence_check",
|
|
553
|
+
annotations={
|
|
554
|
+
"title": "Run Coherence Check",
|
|
555
|
+
"readOnlyHint": True,
|
|
556
|
+
"destructiveHint": False,
|
|
557
|
+
"idempotentHint": True,
|
|
558
|
+
"openWorldHint": False,
|
|
559
|
+
},
|
|
560
|
+
)
|
|
561
|
+
async def forge_coherence_check(params: ForgePathInput) -> str:
|
|
562
|
+
"""Run a full coherence check on all standards and workflow configuration.
|
|
563
|
+
|
|
564
|
+
Detects: conflicts between standards, overlapping areas, enforcement
|
|
565
|
+
mismatches, and potential race conditions between agents.
|
|
566
|
+
"""
|
|
567
|
+
from forge_core.phases.coherence import check_standards_coherence
|
|
568
|
+
from forge_core.registry import load_standards
|
|
569
|
+
|
|
570
|
+
standards = load_standards()
|
|
571
|
+
report = check_standards_coherence(standards)
|
|
572
|
+
|
|
573
|
+
return json.dumps({
|
|
574
|
+
"passed": report.passed,
|
|
575
|
+
"total_standards": len(standards),
|
|
576
|
+
"issues": [
|
|
577
|
+
{
|
|
578
|
+
"severity": i.severity,
|
|
579
|
+
"category": i.category,
|
|
580
|
+
"description": i.description,
|
|
581
|
+
"suggestion": i.suggestion,
|
|
582
|
+
}
|
|
583
|
+
for i in report.issues
|
|
584
|
+
],
|
|
585
|
+
"summary": report.summary(),
|
|
586
|
+
}, indent=2)
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
@mcp.tool(
|
|
590
|
+
name="forge_mcps_list",
|
|
591
|
+
annotations={
|
|
592
|
+
"title": "List MCP Registry",
|
|
593
|
+
"readOnlyHint": True,
|
|
594
|
+
"destructiveHint": False,
|
|
595
|
+
"idempotentHint": True,
|
|
596
|
+
"openWorldHint": False,
|
|
597
|
+
},
|
|
598
|
+
)
|
|
599
|
+
async def forge_mcps_list(params: ForgePathInput) -> str:
|
|
600
|
+
"""List all MCPs in the Forge registry.
|
|
601
|
+
|
|
602
|
+
Shows configured MCP servers with their auto-suggest conditions.
|
|
603
|
+
These MCPs are recommended/configured automatically when scaffolding
|
|
604
|
+
new projects.
|
|
605
|
+
"""
|
|
606
|
+
from forge_core.registry import load_mcps
|
|
607
|
+
|
|
608
|
+
mcps = load_mcps()
|
|
609
|
+
|
|
610
|
+
return json.dumps({
|
|
611
|
+
"total": len(mcps),
|
|
612
|
+
"mcps": [
|
|
613
|
+
{
|
|
614
|
+
"name": m.name,
|
|
615
|
+
"description": m.description,
|
|
616
|
+
"url": m.url,
|
|
617
|
+
"transport": m.transport,
|
|
618
|
+
"auto_suggest": m.auto_suggest,
|
|
619
|
+
"conditions": m.conditions,
|
|
620
|
+
}
|
|
621
|
+
for m in mcps
|
|
622
|
+
],
|
|
623
|
+
}, indent=2)
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
@mcp.tool(
|
|
627
|
+
name="forge_context_get",
|
|
628
|
+
annotations={
|
|
629
|
+
"title": "Get Project Context",
|
|
630
|
+
"readOnlyHint": True,
|
|
631
|
+
"destructiveHint": False,
|
|
632
|
+
"idempotentHint": True,
|
|
633
|
+
"openWorldHint": False,
|
|
634
|
+
},
|
|
635
|
+
)
|
|
636
|
+
async def forge_context_get(params: ForgePathInput) -> str:
|
|
637
|
+
"""Get the full project context including stack, infra, and all decisions.
|
|
638
|
+
|
|
639
|
+
Returns the complete .forge/context.yaml as structured data.
|
|
640
|
+
"""
|
|
641
|
+
from forge_core.phases.context import load_context
|
|
642
|
+
|
|
643
|
+
project_path = Path(params.path).resolve()
|
|
644
|
+
context = load_context(project_path)
|
|
645
|
+
|
|
646
|
+
if not context:
|
|
647
|
+
return json.dumps({"error": "No context found. Run forge_init first."})
|
|
648
|
+
|
|
649
|
+
return json.dumps(context.model_dump(mode="json"), indent=2)
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
@mcp.tool(
|
|
653
|
+
name="forge_context_override",
|
|
654
|
+
annotations={
|
|
655
|
+
"title": "Override Project Context Values",
|
|
656
|
+
"readOnlyHint": False,
|
|
657
|
+
"destructiveHint": False,
|
|
658
|
+
"idempotentHint": True,
|
|
659
|
+
"openWorldHint": False,
|
|
660
|
+
},
|
|
661
|
+
)
|
|
662
|
+
async def forge_context_override(params: ForgeContextOverrideInput) -> str:
|
|
663
|
+
"""Override specific values in the project context.
|
|
664
|
+
|
|
665
|
+
Use this to change stack decisions, auth patterns, or any other
|
|
666
|
+
configuration without re-running forge_init.
|
|
667
|
+
"""
|
|
668
|
+
import yaml
|
|
669
|
+
|
|
670
|
+
from forge_core.phases.context import load_context, save_context
|
|
671
|
+
|
|
672
|
+
project_path = Path(params.path).resolve()
|
|
673
|
+
context = load_context(project_path)
|
|
674
|
+
|
|
675
|
+
if not context:
|
|
676
|
+
return json.dumps({"error": "No context found. Run forge_init first."})
|
|
677
|
+
|
|
678
|
+
# Apply overrides
|
|
679
|
+
context_data = context.model_dump(mode="json")
|
|
680
|
+
for key, value in params.overrides.items():
|
|
681
|
+
if isinstance(value, dict) and key in context_data and isinstance(context_data[key], dict):
|
|
682
|
+
context_data[key].update(value)
|
|
683
|
+
else:
|
|
684
|
+
context_data[key] = value
|
|
685
|
+
|
|
686
|
+
from forge_core.models import ProjectContext
|
|
687
|
+
updated_context = ProjectContext(**context_data)
|
|
688
|
+
save_context(updated_context, project_path)
|
|
689
|
+
|
|
690
|
+
return json.dumps({
|
|
691
|
+
"status": "updated",
|
|
692
|
+
"overrides_applied": params.overrides,
|
|
693
|
+
"context": updated_context.model_dump(mode="json"),
|
|
694
|
+
}, indent=2)
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
@mcp.tool(
|
|
698
|
+
name="forge_detect",
|
|
699
|
+
annotations={
|
|
700
|
+
"title": "Detect Project State",
|
|
701
|
+
"readOnlyHint": True,
|
|
702
|
+
"destructiveHint": False,
|
|
703
|
+
"idempotentHint": True,
|
|
704
|
+
"openWorldHint": False,
|
|
705
|
+
},
|
|
706
|
+
)
|
|
707
|
+
async def forge_detect(params: ForgePathInput) -> str:
|
|
708
|
+
"""Detect the current state of a project directory.
|
|
709
|
+
|
|
710
|
+
Analyzes the directory to determine: empty, has docs, has code,
|
|
711
|
+
or has Forge already. Also detects the technology stack from
|
|
712
|
+
config files and code patterns.
|
|
713
|
+
"""
|
|
714
|
+
from forge_core.detector import detect_project
|
|
715
|
+
|
|
716
|
+
project_path = Path(params.path).resolve()
|
|
717
|
+
detection = detect_project(project_path)
|
|
718
|
+
|
|
719
|
+
return json.dumps({
|
|
720
|
+
"state": detection.state.value,
|
|
721
|
+
"has_forge": detection.has_forge,
|
|
722
|
+
"has_git": detection.has_git,
|
|
723
|
+
"code_files": len(detection.code_files),
|
|
724
|
+
"doc_files": len(detection.doc_files),
|
|
725
|
+
"config_files": [f.name for f in detection.config_files],
|
|
726
|
+
"detected_stack": detection.detected_stack,
|
|
727
|
+
"is_greenfield": detection.is_greenfield,
|
|
728
|
+
"summary": detection.summary(),
|
|
729
|
+
}, indent=2)
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
@mcp.tool(
|
|
733
|
+
name="forge_implementation_phases",
|
|
734
|
+
annotations={
|
|
735
|
+
"title": "Get AI-Optimized Implementation Phases",
|
|
736
|
+
"readOnlyHint": True,
|
|
737
|
+
"destructiveHint": False,
|
|
738
|
+
"idempotentHint": True,
|
|
739
|
+
"openWorldHint": False,
|
|
740
|
+
},
|
|
741
|
+
)
|
|
742
|
+
async def forge_implementation_phases(params: ForgePathInput) -> str:
|
|
743
|
+
"""Get the standard AI-optimized implementation phase ordering.
|
|
744
|
+
|
|
745
|
+
Returns the 10-phase implementation order designed to minimize
|
|
746
|
+
AI coding agent hallucinations by ensuring complete context at
|
|
747
|
+
each step: types → infra → auth → data → services → api →
|
|
748
|
+
frontend → observability → testing → cicd.
|
|
749
|
+
"""
|
|
750
|
+
from forge_core.phases.intake import get_implementation_phases
|
|
751
|
+
|
|
752
|
+
phases = get_implementation_phases()
|
|
753
|
+
|
|
754
|
+
return json.dumps({
|
|
755
|
+
"total_phases": len(phases),
|
|
756
|
+
"phases": phases,
|
|
757
|
+
"philosophy": (
|
|
758
|
+
"Each phase provides complete context for the next. "
|
|
759
|
+
"Types and contracts first so the AI agent knows all shapes. "
|
|
760
|
+
"Infra and auth before any business logic so security is built-in. "
|
|
761
|
+
"Services before API so endpoints are just thin wrappers. "
|
|
762
|
+
"Frontend last because it consumes typed API contracts."
|
|
763
|
+
),
|
|
764
|
+
}, indent=2)
|
|
765
|
+
|
|
766
|
+
|
|
767
|
+
# ── Editor Bridge Tools ────────────────────────────────────────────────────
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
class ForgeSyncInput(BaseModel):
|
|
771
|
+
"""Input for generating editor instruction files."""
|
|
772
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
773
|
+
|
|
774
|
+
path: str = Field(default=".", description="Path to the project directory.")
|
|
775
|
+
format: str = Field(
|
|
776
|
+
default="claude",
|
|
777
|
+
description="Editor format: claude, cursor, copilot, generic, all",
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
class ForgeAuditFileInput(BaseModel):
|
|
782
|
+
"""Input for auditing a specific file or code snippet."""
|
|
783
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
784
|
+
|
|
785
|
+
code: str = Field(
|
|
786
|
+
...,
|
|
787
|
+
description="The code content to audit against Forge standards.",
|
|
788
|
+
min_length=5,
|
|
789
|
+
)
|
|
790
|
+
file_path: str = Field(
|
|
791
|
+
default="unknown",
|
|
792
|
+
description="The file path for context (e.g., src/api/claims.py).",
|
|
793
|
+
)
|
|
794
|
+
path: str = Field(default=".", description="Path to the project directory.")
|
|
795
|
+
|
|
796
|
+
|
|
797
|
+
class ForgeAuditProjectInput(BaseModel):
|
|
798
|
+
"""Input for auditing the full project."""
|
|
799
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
800
|
+
|
|
801
|
+
path: str = Field(default=".", description="Path to the project directory.")
|
|
802
|
+
target_files: Optional[list[str]] = Field(
|
|
803
|
+
default=None,
|
|
804
|
+
description="Specific files to audit. None means full project.",
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
@mcp.tool(
|
|
809
|
+
name="forge_sync",
|
|
810
|
+
annotations={
|
|
811
|
+
"title": "Generate Editor Instruction Files",
|
|
812
|
+
"readOnlyHint": False,
|
|
813
|
+
"destructiveHint": False,
|
|
814
|
+
"idempotentHint": True,
|
|
815
|
+
"openWorldHint": False,
|
|
816
|
+
},
|
|
817
|
+
)
|
|
818
|
+
async def forge_sync(params: ForgeSyncInput) -> str:
|
|
819
|
+
"""Generate CLAUDE.md, .cursorrules, or equivalent from Forge governance.
|
|
820
|
+
|
|
821
|
+
This is the core bridge between Forge and the AI editor. It translates
|
|
822
|
+
ALL governance knowledge — standards, patterns, anti-patterns, project
|
|
823
|
+
context, journal learnings, security rules, observability requirements,
|
|
824
|
+
MCP recommendations — into a single file the editor reads and follows.
|
|
825
|
+
|
|
826
|
+
Run this after:
|
|
827
|
+
- forge init (first time)
|
|
828
|
+
- Adding new standards
|
|
829
|
+
- Changing project context
|
|
830
|
+
- Adding journal entries
|
|
831
|
+
- Upgrading Forge
|
|
832
|
+
"""
|
|
833
|
+
from forge_core.editor_bridge import write_editor_file
|
|
834
|
+
from forge_core.phases.context import load_context
|
|
835
|
+
|
|
836
|
+
project_path = Path(params.path).resolve()
|
|
837
|
+
context = load_context(project_path)
|
|
838
|
+
|
|
839
|
+
if not context:
|
|
840
|
+
return json.dumps({"error": "No Forge project found. Run forge_init first."})
|
|
841
|
+
|
|
842
|
+
if params.format == "all":
|
|
843
|
+
formats = ["claude", "cursor", "copilot", "generic"]
|
|
844
|
+
else:
|
|
845
|
+
formats = [params.format]
|
|
846
|
+
|
|
847
|
+
files_created = []
|
|
848
|
+
for fmt in formats:
|
|
849
|
+
path = write_editor_file(project_path, context, fmt)
|
|
850
|
+
files_created.append(str(path))
|
|
851
|
+
|
|
852
|
+
return json.dumps({
|
|
853
|
+
"status": "synced",
|
|
854
|
+
"files_created": files_created,
|
|
855
|
+
"instruction": (
|
|
856
|
+
"The editor instruction file has been generated. "
|
|
857
|
+
"The AI editor will read it automatically on next interaction. "
|
|
858
|
+
"Re-run forge_sync after changing standards or context."
|
|
859
|
+
),
|
|
860
|
+
}, indent=2)
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
@mcp.tool(
|
|
864
|
+
name="forge_audit_code",
|
|
865
|
+
annotations={
|
|
866
|
+
"title": "Audit Code Against Standards",
|
|
867
|
+
"readOnlyHint": True,
|
|
868
|
+
"destructiveHint": False,
|
|
869
|
+
"idempotentHint": True,
|
|
870
|
+
"openWorldHint": False,
|
|
871
|
+
},
|
|
872
|
+
)
|
|
873
|
+
async def forge_audit_code(params: ForgeAuditFileInput) -> str:
|
|
874
|
+
"""Audit a code snippet or file against all Forge standards.
|
|
875
|
+
|
|
876
|
+
Returns the audit prompt that can be processed by the editor's LLM
|
|
877
|
+
to produce findings. The prompt includes all applicable standards,
|
|
878
|
+
patterns, security rules, and project context.
|
|
879
|
+
|
|
880
|
+
Use this when:
|
|
881
|
+
- You just generated new code and want to validate it
|
|
882
|
+
- You're reviewing existing code for compliance
|
|
883
|
+
- You want to check if code meets regulatory requirements
|
|
884
|
+
"""
|
|
885
|
+
from forge_core.auditor import build_file_audit_prompt
|
|
886
|
+
from forge_core.phases.context import load_context
|
|
887
|
+
from forge_core.registry import load_standards
|
|
888
|
+
|
|
889
|
+
project_path = Path(params.path).resolve()
|
|
890
|
+
context = load_context(project_path)
|
|
891
|
+
|
|
892
|
+
if not context:
|
|
893
|
+
return json.dumps({"error": "No Forge project found. Run forge_init first."})
|
|
894
|
+
|
|
895
|
+
standards = load_standards()
|
|
896
|
+
prompt = build_file_audit_prompt(params.code, params.file_path, context, standards)
|
|
897
|
+
|
|
898
|
+
return json.dumps({
|
|
899
|
+
"status": "audit_ready",
|
|
900
|
+
"file": params.file_path,
|
|
901
|
+
"standards_checked": len(standards),
|
|
902
|
+
"regulatory": [r.value for r in context.regulatory],
|
|
903
|
+
"audit_prompt": prompt,
|
|
904
|
+
"instruction": (
|
|
905
|
+
"Process the audit_prompt with your LLM to generate findings. "
|
|
906
|
+
"The response will be a JSON array of findings with severity, "
|
|
907
|
+
"description, and fix for each issue."
|
|
908
|
+
),
|
|
909
|
+
}, indent=2)
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
@mcp.tool(
|
|
913
|
+
name="forge_audit_project",
|
|
914
|
+
annotations={
|
|
915
|
+
"title": "Audit Entire Project",
|
|
916
|
+
"readOnlyHint": True,
|
|
917
|
+
"destructiveHint": False,
|
|
918
|
+
"idempotentHint": True,
|
|
919
|
+
"openWorldHint": False,
|
|
920
|
+
},
|
|
921
|
+
)
|
|
922
|
+
async def forge_audit_project(params: ForgeAuditProjectInput) -> str:
|
|
923
|
+
"""Generate a full project audit prompt against all Forge standards.
|
|
924
|
+
|
|
925
|
+
Produces a comprehensive audit prompt that checks the entire project
|
|
926
|
+
(or specific files) against all standards, patterns, security rules,
|
|
927
|
+
and regulatory requirements.
|
|
928
|
+
"""
|
|
929
|
+
from forge_core.auditor import build_audit_prompt
|
|
930
|
+
from forge_core.phases.context import load_context
|
|
931
|
+
from forge_core.registry import load_standards
|
|
932
|
+
|
|
933
|
+
project_path = Path(params.path).resolve()
|
|
934
|
+
context = load_context(project_path)
|
|
935
|
+
|
|
936
|
+
if not context:
|
|
937
|
+
return json.dumps({"error": "No Forge project found. Run forge_init first."})
|
|
938
|
+
|
|
939
|
+
standards = load_standards()
|
|
940
|
+
prompt = build_audit_prompt(project_path, context, standards, params.target_files)
|
|
941
|
+
|
|
942
|
+
# Save prompt
|
|
943
|
+
audit_dir = project_path / ".forge" / "audit"
|
|
944
|
+
audit_dir.mkdir(parents=True, exist_ok=True)
|
|
945
|
+
prompt_path = audit_dir / "full_audit_prompt.md"
|
|
946
|
+
prompt_path.write_text(prompt)
|
|
947
|
+
|
|
948
|
+
return json.dumps({
|
|
949
|
+
"status": "audit_ready",
|
|
950
|
+
"standards_checked": len(standards),
|
|
951
|
+
"regulatory": [r.value for r in context.regulatory],
|
|
952
|
+
"target_files": params.target_files or "all",
|
|
953
|
+
"prompt_saved_to": str(prompt_path),
|
|
954
|
+
"audit_prompt": prompt,
|
|
955
|
+
}, indent=2)
|
|
956
|
+
|
|
957
|
+
|
|
958
|
+
@mcp.tool(
|
|
959
|
+
name="forge_get_rules_for",
|
|
960
|
+
annotations={
|
|
961
|
+
"title": "Get Rules for a Specific Task",
|
|
962
|
+
"readOnlyHint": True,
|
|
963
|
+
"destructiveHint": False,
|
|
964
|
+
"idempotentHint": True,
|
|
965
|
+
"openWorldHint": False,
|
|
966
|
+
},
|
|
967
|
+
)
|
|
968
|
+
async def forge_get_rules_for(params: ForgeIntakeInput) -> str:
|
|
969
|
+
"""Get all applicable Forge rules for a specific development task.
|
|
970
|
+
|
|
971
|
+
Given a description of what you're about to build (e.g., "new API endpoint
|
|
972
|
+
for claim submission", "React component for dashboard", "database migration
|
|
973
|
+
for user table"), returns ALL rules, patterns, and guidelines that apply.
|
|
974
|
+
|
|
975
|
+
This is the recommender function — it tells the editor everything it needs
|
|
976
|
+
to know BEFORE writing code, so the code comes out right the first time.
|
|
977
|
+
|
|
978
|
+
Use this at the START of any task, before writing code.
|
|
979
|
+
"""
|
|
980
|
+
from forge_core.phases.context import load_context
|
|
981
|
+
from forge_core.registry import load_mcps, load_standards
|
|
982
|
+
|
|
983
|
+
project_path = Path(params.path).resolve()
|
|
984
|
+
context = load_context(project_path)
|
|
985
|
+
|
|
986
|
+
if not context:
|
|
987
|
+
return json.dumps({"error": "No Forge project found. Run forge_init first."})
|
|
988
|
+
|
|
989
|
+
standards = load_standards()
|
|
990
|
+
mcps = load_mcps()
|
|
991
|
+
task_lower = params.content.lower()
|
|
992
|
+
|
|
993
|
+
# Determine which categories are relevant to this task
|
|
994
|
+
relevant_categories = set()
|
|
995
|
+
|
|
996
|
+
# API-related task
|
|
997
|
+
if any(w in task_lower for w in ["api", "endpoint", "route", "controller", "rest", "graphql"]):
|
|
998
|
+
relevant_categories.update(["api-design", "security", "observability", "code-quality"])
|
|
999
|
+
|
|
1000
|
+
# Frontend task
|
|
1001
|
+
if any(w in task_lower for w in ["react", "component", "frontend", "ui", "page", "form"]):
|
|
1002
|
+
relevant_categories.update(["code-quality"])
|
|
1003
|
+
|
|
1004
|
+
# Database task
|
|
1005
|
+
if any(w in task_lower for w in ["database", "migration", "model", "schema", "query", "sql"]):
|
|
1006
|
+
relevant_categories.update(["security", "code-quality"])
|
|
1007
|
+
|
|
1008
|
+
# Auth task
|
|
1009
|
+
if any(w in task_lower for w in ["auth", "login", "permission", "rbac", "role"]):
|
|
1010
|
+
relevant_categories.update(["security"])
|
|
1011
|
+
|
|
1012
|
+
# AI task
|
|
1013
|
+
if any(w in task_lower for w in ["ai", "llm", "model", "prompt", "embedding", "inference"]):
|
|
1014
|
+
relevant_categories.update(["security", "observability"])
|
|
1015
|
+
|
|
1016
|
+
# Default: include everything for unclassified tasks
|
|
1017
|
+
if not relevant_categories:
|
|
1018
|
+
relevant_categories = {"api-design", "security", "observability", "code-quality", "architecture"}
|
|
1019
|
+
|
|
1020
|
+
# Filter standards by relevance
|
|
1021
|
+
applicable_standards = []
|
|
1022
|
+
for std in standards:
|
|
1023
|
+
area = std.get("area", std.get("category", "general"))
|
|
1024
|
+
if area in relevant_categories or "general" in relevant_categories:
|
|
1025
|
+
applicable_standards.append({
|
|
1026
|
+
"name": std.get("name", "unnamed"),
|
|
1027
|
+
"area": area,
|
|
1028
|
+
"enforcement": std.get("enforcement", "required"),
|
|
1029
|
+
"rules": std.get("rules", []),
|
|
1030
|
+
"examples": std.get("examples", {}),
|
|
1031
|
+
})
|
|
1032
|
+
|
|
1033
|
+
# Always include these for any task
|
|
1034
|
+
always_include = {
|
|
1035
|
+
"type_checking": context.standards.type_checking,
|
|
1036
|
+
"linting": context.standards.linting,
|
|
1037
|
+
"test_coverage_min": context.standards.test_coverage_min,
|
|
1038
|
+
}
|
|
1039
|
+
|
|
1040
|
+
# Load journal for project-specific context
|
|
1041
|
+
journal_path = project_path / ".forge" / "journal.md"
|
|
1042
|
+
journal_content = ""
|
|
1043
|
+
if journal_path.exists():
|
|
1044
|
+
content = journal_path.read_text()
|
|
1045
|
+
if "## Entries" in content:
|
|
1046
|
+
journal_content = content.split("## Entries", 1)[1].strip()
|
|
1047
|
+
|
|
1048
|
+
result = {
|
|
1049
|
+
"task": params.content,
|
|
1050
|
+
"project": context.name,
|
|
1051
|
+
"stack": {
|
|
1052
|
+
"backend": context.backend.value,
|
|
1053
|
+
"frontend": context.frontend.value,
|
|
1054
|
+
"auth": context.auth.value,
|
|
1055
|
+
"database": context.database.value,
|
|
1056
|
+
},
|
|
1057
|
+
"applicable_standards": applicable_standards,
|
|
1058
|
+
"code_quality": always_include,
|
|
1059
|
+
"regulatory": [r.value for r in context.regulatory],
|
|
1060
|
+
"api_config": {
|
|
1061
|
+
"versioning": context.api.versioning,
|
|
1062
|
+
"spec": context.api.spec,
|
|
1063
|
+
"mcp_ready": context.api.mcp_ready,
|
|
1064
|
+
} if "api-design" in relevant_categories else None,
|
|
1065
|
+
"observability_config": {
|
|
1066
|
+
"apm": context.observability.apm,
|
|
1067
|
+
"metrics": context.observability.metrics,
|
|
1068
|
+
"logs": context.observability.logs,
|
|
1069
|
+
} if "observability" in relevant_categories else None,
|
|
1070
|
+
"ai_config": {
|
|
1071
|
+
"enabled": context.ai.enabled,
|
|
1072
|
+
"providers": context.ai.providers,
|
|
1073
|
+
"safety_checks": context.ai.safety_checks,
|
|
1074
|
+
} if context.ai.enabled else None,
|
|
1075
|
+
}
|
|
1076
|
+
|
|
1077
|
+
if journal_content:
|
|
1078
|
+
result["project_specific_context"] = journal_content
|
|
1079
|
+
|
|
1080
|
+
return json.dumps(result, indent=2)
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
# ── Server Entry Point ────────────────────────────────────────────────────
|
|
1084
|
+
|
|
1085
|
+
if __name__ == "__main__":
|
|
1086
|
+
mcp.run()
|