xenfra-sdk 0.2.5__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +46 -2
- xenfra_sdk/blueprints/base.py +150 -0
- xenfra_sdk/blueprints/factory.py +99 -0
- xenfra_sdk/blueprints/node.py +219 -0
- xenfra_sdk/blueprints/python.py +57 -0
- xenfra_sdk/blueprints/railpack.py +99 -0
- xenfra_sdk/blueprints/schema.py +70 -0
- xenfra_sdk/cli/main.py +175 -49
- xenfra_sdk/client.py +6 -2
- xenfra_sdk/constants.py +26 -0
- xenfra_sdk/db/session.py +8 -3
- xenfra_sdk/detection.py +262 -191
- xenfra_sdk/dockerizer.py +76 -120
- xenfra_sdk/engine.py +758 -172
- xenfra_sdk/events.py +254 -0
- xenfra_sdk/exceptions.py +9 -0
- xenfra_sdk/governance.py +150 -0
- xenfra_sdk/manifest.py +93 -138
- xenfra_sdk/mcp_client.py +7 -5
- xenfra_sdk/{models.py → models/__init__.py} +17 -1
- xenfra_sdk/models/context.py +61 -0
- xenfra_sdk/orchestrator.py +223 -99
- xenfra_sdk/privacy.py +11 -0
- xenfra_sdk/protocol.py +38 -0
- xenfra_sdk/railpack_adapter.py +357 -0
- xenfra_sdk/railpack_detector.py +587 -0
- xenfra_sdk/railpack_manager.py +312 -0
- xenfra_sdk/recipes.py +152 -19
- xenfra_sdk/resources/activity.py +45 -0
- xenfra_sdk/resources/build.py +157 -0
- xenfra_sdk/resources/deployments.py +22 -2
- xenfra_sdk/resources/intelligence.py +25 -0
- xenfra_sdk-0.2.6.dist-info/METADATA +118 -0
- xenfra_sdk-0.2.6.dist-info/RECORD +49 -0
- {xenfra_sdk-0.2.5.dist-info → xenfra_sdk-0.2.6.dist-info}/WHEEL +1 -1
- xenfra_sdk/templates/Caddyfile.j2 +0 -14
- xenfra_sdk/templates/Dockerfile.j2 +0 -41
- xenfra_sdk/templates/cloud-init.sh.j2 +0 -90
- xenfra_sdk/templates/docker-compose-multi.yml.j2 +0 -29
- xenfra_sdk/templates/docker-compose.yml.j2 +0 -30
- xenfra_sdk-0.2.5.dist-info/METADATA +0 -116
- xenfra_sdk-0.2.5.dist-info/RECORD +0 -38
xenfra_sdk/__init__.py
CHANGED
|
@@ -1,16 +1,23 @@
|
|
|
1
1
|
# This file makes src/xenfra_sdk a Python package.
|
|
2
2
|
|
|
3
3
|
from .client import XenfraClient
|
|
4
|
-
from .
|
|
4
|
+
from .engine import InfraEngine
|
|
5
|
+
from .events import BuildEvent, DeploymentPhase, EventEmitter, EventStatus
|
|
6
|
+
from .exceptions import AuthenticationError, DeploymentError, XenfraAPIError, XenfraError
|
|
7
|
+
from .protocol import ProtocolRegistry
|
|
5
8
|
from .models import (
|
|
9
|
+
BalanceRead,
|
|
6
10
|
CodebaseAnalysisResponse,
|
|
7
11
|
DiagnosisResponse,
|
|
12
|
+
DropletCostRead,
|
|
8
13
|
PatchObject,
|
|
9
14
|
ProjectRead,
|
|
10
15
|
)
|
|
11
16
|
|
|
12
|
-
# Microservices support
|
|
17
|
+
# Microservices & Config support
|
|
13
18
|
from .manifest import (
|
|
19
|
+
XenfraConfig,
|
|
20
|
+
load_xenfra_config,
|
|
14
21
|
ServiceDefinition,
|
|
15
22
|
load_services_from_xenfra_yaml,
|
|
16
23
|
is_microservices_project,
|
|
@@ -37,6 +44,22 @@ from .security_scanner import (
|
|
|
37
44
|
Severity,
|
|
38
45
|
)
|
|
39
46
|
|
|
47
|
+
# Railpack Integration
|
|
48
|
+
from .railpack_detector import (
|
|
49
|
+
RailpackDetector,
|
|
50
|
+
RailpackDetectionResult,
|
|
51
|
+
EnvVariable,
|
|
52
|
+
get_railpack_detector,
|
|
53
|
+
)
|
|
54
|
+
from .railpack_manager import (
|
|
55
|
+
RailpackManager,
|
|
56
|
+
get_railpack_manager,
|
|
57
|
+
)
|
|
58
|
+
from .railpack_adapter import (
|
|
59
|
+
RailpackAdapter,
|
|
60
|
+
RailpackPlan,
|
|
61
|
+
)
|
|
62
|
+
|
|
40
63
|
__all__ = [
|
|
41
64
|
"XenfraClient",
|
|
42
65
|
"XenfraError",
|
|
@@ -46,6 +69,11 @@ __all__ = [
|
|
|
46
69
|
"CodebaseAnalysisResponse",
|
|
47
70
|
"PatchObject",
|
|
48
71
|
"ProjectRead",
|
|
72
|
+
"BalanceRead",
|
|
73
|
+
"DropletCostRead",
|
|
74
|
+
# Config
|
|
75
|
+
"XenfraConfig",
|
|
76
|
+
"load_xenfra_config",
|
|
49
77
|
# Microservices
|
|
50
78
|
"ServiceDefinition",
|
|
51
79
|
"load_services_from_xenfra_yaml",
|
|
@@ -58,4 +86,20 @@ __all__ = [
|
|
|
58
86
|
"detect_pyproject_services",
|
|
59
87
|
"ServiceOrchestrator",
|
|
60
88
|
"get_orchestrator_for_project",
|
|
89
|
+
"InfraEngine",
|
|
90
|
+
"DeploymentError",
|
|
91
|
+
"ProtocolRegistry",
|
|
92
|
+
"EventEmitter",
|
|
93
|
+
"BuildEvent",
|
|
94
|
+
"DeploymentPhase",
|
|
95
|
+
"EventStatus",
|
|
96
|
+
# Railpack Integration
|
|
97
|
+
"RailpackDetector",
|
|
98
|
+
"RailpackDetectionResult",
|
|
99
|
+
"EnvVariable",
|
|
100
|
+
"get_railpack_detector",
|
|
101
|
+
"RailpackManager",
|
|
102
|
+
"get_railpack_manager",
|
|
103
|
+
"RailpackAdapter",
|
|
104
|
+
"RailpackPlan",
|
|
61
105
|
]
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Dict, List, Optional
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from xenfra_sdk.blueprints.schema import (
|
|
7
|
+
DeploymentBlueprintManifest, DockerfileModel, ComposeModel, ServiceDetail,
|
|
8
|
+
DeployModel, DeployResourcesModel, ResourceLimitsModel, ResourceReservationsModel
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
from xenfra_sdk.constants import DEFAULT_PORT_RANGE_START
|
|
12
|
+
|
|
13
|
+
class BaseBlueprint(ABC):
|
|
14
|
+
"""
|
|
15
|
+
Abstract base class for all Xenfra Blueprints (Build Packs).
|
|
16
|
+
|
|
17
|
+
A Blueprint is responsible for:
|
|
18
|
+
1. Analyzing code to detect requirements.
|
|
19
|
+
2. Building a valid Pydantic manifest.
|
|
20
|
+
3. Rendering that manifest to files (Dockerfile, compose, env).
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, context: dict):
|
|
24
|
+
self.context = context
|
|
25
|
+
self.framework = context.get("framework")
|
|
26
|
+
self.port = context.get("port") or DEFAULT_PORT_RANGE_START
|
|
27
|
+
self.file_manifest = context.get("file_manifest", [])
|
|
28
|
+
self.resource_limits = context.get("resource_limits", {})
|
|
29
|
+
|
|
30
|
+
def _generate_deploy_model(self) -> DeployModel:
|
|
31
|
+
"""Centralized helper for resource governance."""
|
|
32
|
+
return DeployModel(
|
|
33
|
+
resources=DeployResourcesModel(
|
|
34
|
+
limits=ResourceLimitsModel(
|
|
35
|
+
memory=self.resource_limits.get("memory", "512m"),
|
|
36
|
+
cpus=self.resource_limits.get("cpus", "0.5"),
|
|
37
|
+
),
|
|
38
|
+
reservations=ResourceReservationsModel(
|
|
39
|
+
memory=self.resource_limits.get("memory_reserved", "128m"),
|
|
40
|
+
cpus=self.resource_limits.get("cpus_reserved", "0.25"),
|
|
41
|
+
),
|
|
42
|
+
)
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
@abstractmethod
|
|
46
|
+
def generate_manifest(self) -> DeploymentBlueprintManifest:
|
|
47
|
+
"""Analyze context and files to build the Pydantic manifest."""
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
def render(self) -> Dict[str, str]:
|
|
51
|
+
"""Renders the generated manifest into final deployment strings."""
|
|
52
|
+
manifest = self.generate_manifest()
|
|
53
|
+
result = {}
|
|
54
|
+
|
|
55
|
+
# 1. Render Dockerfile
|
|
56
|
+
result["Dockerfile"] = self._render_dockerfile(manifest.dockerfile)
|
|
57
|
+
|
|
58
|
+
# 2. Render docker-compose.yml
|
|
59
|
+
result["docker-compose.yml"] = self._render_compose(manifest.compose)
|
|
60
|
+
|
|
61
|
+
# 3. Render .env
|
|
62
|
+
if manifest.env_file:
|
|
63
|
+
result[".env"] = "\n".join([f'{k}="{v}"' for k, v in manifest.env_file.items()])
|
|
64
|
+
|
|
65
|
+
# 4. Render Caddyfile (if provided)
|
|
66
|
+
if manifest.caddyfile:
|
|
67
|
+
result["Caddyfile"] = manifest.caddyfile
|
|
68
|
+
|
|
69
|
+
# 5. Render Railpack Plan (if provided)
|
|
70
|
+
if hasattr(manifest, "railpack_plan") and manifest.railpack_plan:
|
|
71
|
+
result["railpack-plan.json"] = manifest.railpack_plan
|
|
72
|
+
|
|
73
|
+
return result
|
|
74
|
+
|
|
75
|
+
def _render_dockerfile(self, model: DockerfileModel) -> str:
|
|
76
|
+
"""Converts DockerfileModel to a string."""
|
|
77
|
+
lines = [f"FROM {model.base_image}"]
|
|
78
|
+
|
|
79
|
+
# Inject ARG instructions (Build-time variables)
|
|
80
|
+
# Must come after FROM (usually) or before depending on if they affect FROM
|
|
81
|
+
# Standard practice: FROM -> ARG -> WORKDIR -> COPY -> RUN
|
|
82
|
+
if model.args:
|
|
83
|
+
for arg in model.args:
|
|
84
|
+
lines.append(f"ARG {arg}")
|
|
85
|
+
|
|
86
|
+
# APT packages
|
|
87
|
+
if model.system_packages:
|
|
88
|
+
packages = " ".join(model.system_packages)
|
|
89
|
+
lines.append("RUN apt-get update && apt-get install -y " + packages + " && rm -rf /var/lib/apt/lists/*")
|
|
90
|
+
|
|
91
|
+
lines.append(f"WORKDIR {model.workdir}")
|
|
92
|
+
|
|
93
|
+
# Env vars (Non-secret defaults only)
|
|
94
|
+
# We rely on .env + docker-compose for actual deployment values.
|
|
95
|
+
# ARGs are already available during build.
|
|
96
|
+
arg_keys = set(model.args)
|
|
97
|
+
for k, v in model.env_vars.items():
|
|
98
|
+
if k in arg_keys:
|
|
99
|
+
# Skip ENV if it's already an ARG, to keep secrets out of Dockerfile and fix syntax errors
|
|
100
|
+
continue
|
|
101
|
+
# Quote values for safety
|
|
102
|
+
lines.append(f'ENV {k}="{v}"')
|
|
103
|
+
|
|
104
|
+
# Copy commands
|
|
105
|
+
if model.copy_dirs:
|
|
106
|
+
for d in model.copy_dirs:
|
|
107
|
+
lines.append(f"COPY {d} .")
|
|
108
|
+
else:
|
|
109
|
+
lines.append("COPY . .")
|
|
110
|
+
|
|
111
|
+
# Custom RUN commands
|
|
112
|
+
for cmd in model.run_commands:
|
|
113
|
+
lines.append(f"RUN {cmd}")
|
|
114
|
+
|
|
115
|
+
if model.expose_port:
|
|
116
|
+
lines.append(f"EXPOSE {model.expose_port}")
|
|
117
|
+
|
|
118
|
+
if model.entrypoint:
|
|
119
|
+
if isinstance(model.entrypoint, list):
|
|
120
|
+
ep = ", ".join([f'"{x}"' for x in model.entrypoint])
|
|
121
|
+
lines.append(f"ENTRYPOINT [{ep}]")
|
|
122
|
+
else:
|
|
123
|
+
lines.append(f"ENTRYPOINT {model.entrypoint}")
|
|
124
|
+
|
|
125
|
+
if model.command:
|
|
126
|
+
if isinstance(model.command, list):
|
|
127
|
+
cmd = ", ".join([f'"{x}"' for x in model.command])
|
|
128
|
+
lines.append(f"CMD [{cmd}]")
|
|
129
|
+
else:
|
|
130
|
+
lines.append(f"CMD {model.command}")
|
|
131
|
+
|
|
132
|
+
return "\n".join(lines)
|
|
133
|
+
|
|
134
|
+
def _render_compose(self, model: ComposeModel) -> str:
|
|
135
|
+
"""Converts ComposeModel to a YAML string."""
|
|
136
|
+
# We use a custom dumper to ensure services come first etc if needed
|
|
137
|
+
# But standard yaml works for start
|
|
138
|
+
data = model.model_dump(by_alias=True, exclude_none=True)
|
|
139
|
+
return yaml.dump(data, sort_keys=False, default_flow_style=False)
|
|
140
|
+
|
|
141
|
+
def has_file(self, filename: str) -> bool:
|
|
142
|
+
"""Helper to check if a file exists in the manifest."""
|
|
143
|
+
return any(f.get("path") == filename for f in self.file_manifest)
|
|
144
|
+
|
|
145
|
+
def get_file_content(self, filename: str) -> Optional[str]:
|
|
146
|
+
"""Helper to get file content from manifest."""
|
|
147
|
+
for f in self.file_manifest:
|
|
148
|
+
if f.get("path") == filename:
|
|
149
|
+
return f.get("content")
|
|
150
|
+
return None
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from typing import Type, Dict, List
|
|
2
|
+
from xenfra_sdk.blueprints.base import BaseBlueprint
|
|
3
|
+
from xenfra_sdk.blueprints.python import PythonBlueprint
|
|
4
|
+
from xenfra_sdk.blueprints.node import NodeBlueprint
|
|
5
|
+
from xenfra_sdk.blueprints.railpack import RailpackBlueprint
|
|
6
|
+
from xenfra_sdk.governance import get_resource_limits, ResourceLimits
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def resolve_blueprint_class(context: dict) -> Type[BaseBlueprint]:
|
|
10
|
+
"""
|
|
11
|
+
The 'Check-In Desk' for manifest generation.
|
|
12
|
+
Decides between Sovereign (Lean) and Specialist (Railpack) paths.
|
|
13
|
+
|
|
14
|
+
Priority:
|
|
15
|
+
1. Explicit framework selection (user-specified)
|
|
16
|
+
2. File-based detection fallback
|
|
17
|
+
"""
|
|
18
|
+
framework = str(context.get("framework", "")).lower().strip()
|
|
19
|
+
file_manifest = context.get("file_manifest", [])
|
|
20
|
+
file_names = {f.get("path") for f in file_manifest}
|
|
21
|
+
|
|
22
|
+
# 1. EXPLICIT FRAMEWORK SELECTION (takes priority)
|
|
23
|
+
# If user explicitly selected a framework, respect that choice
|
|
24
|
+
SOVEREIGN_PYTHON = ("python", "fastapi", "flask", "django")
|
|
25
|
+
|
|
26
|
+
# Modern Node.js frameworks that need railpack (auto-detect + build)
|
|
27
|
+
# These have complex build pipelines that railpack/nixpacks handles better
|
|
28
|
+
RAILPACK_NODE = ("next", "nextjs", "nuxt", "vite", "nestjs", "nest")
|
|
29
|
+
|
|
30
|
+
# Simple Node.js that can use our lean NodeBlueprint
|
|
31
|
+
# Only for Express-style apps without complex build steps
|
|
32
|
+
SOVEREIGN_NODE = ("express",)
|
|
33
|
+
|
|
34
|
+
if framework in SOVEREIGN_PYTHON:
|
|
35
|
+
# Check for complex package managers (uv, poetry, pipenv) -> Use Railpack
|
|
36
|
+
if any(f in file_names for f in ("uv.lock", "poetry.lock", "Pipfile.lock")):
|
|
37
|
+
return RailpackBlueprint
|
|
38
|
+
return PythonBlueprint
|
|
39
|
+
|
|
40
|
+
# Route modern frameworks to RailpackBlueprint
|
|
41
|
+
if framework in RAILPACK_NODE:
|
|
42
|
+
return RailpackBlueprint
|
|
43
|
+
|
|
44
|
+
# Generic "node" or "nodejs" → check file_manifest for framework hints
|
|
45
|
+
if framework in ("node", "nodejs"):
|
|
46
|
+
# Check for Next.js, Nuxt, etc. config files
|
|
47
|
+
nextjs_configs = {"next.config.js", "next.config.ts", "next.config.mjs"}
|
|
48
|
+
nuxt_configs = {"nuxt.config.js", "nuxt.config.ts"}
|
|
49
|
+
vite_configs = {"vite.config.js", "vite.config.ts"}
|
|
50
|
+
|
|
51
|
+
if nextjs_configs & file_names or nuxt_configs & file_names or vite_configs & file_names:
|
|
52
|
+
return RailpackBlueprint
|
|
53
|
+
|
|
54
|
+
# Default to RailpackBlueprint for generic Node.js (safer, handles more cases)
|
|
55
|
+
return RailpackBlueprint
|
|
56
|
+
|
|
57
|
+
if framework in SOVEREIGN_NODE:
|
|
58
|
+
return NodeBlueprint
|
|
59
|
+
|
|
60
|
+
# 2. FILE-BASED DETECTION FALLBACK (only if no explicit framework)
|
|
61
|
+
# Check for Python Sovereign Path (Strict Golden Path)
|
|
62
|
+
if "requirements.txt" in file_names and "pyproject.toml" not in file_names:
|
|
63
|
+
return PythonBlueprint
|
|
64
|
+
|
|
65
|
+
# Check for Node Sovereign Path (Strict Golden Path)
|
|
66
|
+
# Only for simple Express apps with package-lock.json
|
|
67
|
+
if "package-lock.json" in file_names and "package.json" in file_names:
|
|
68
|
+
# If has modern framework config files, use Railpack
|
|
69
|
+
nextjs_configs = {"next.config.js", "next.config.ts", "next.config.mjs"}
|
|
70
|
+
if nextjs_configs & file_names:
|
|
71
|
+
return RailpackBlueprint
|
|
72
|
+
# Otherwise, still prefer RailpackBlueprint for safety
|
|
73
|
+
return RailpackBlueprint
|
|
74
|
+
|
|
75
|
+
# 3. Specialist Path (The Delegate)
|
|
76
|
+
# Handles EVERYTHING else: Rust, Go, Bun, UV, Poetry, etc.
|
|
77
|
+
return RailpackBlueprint
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def render_blueprint(context: dict) -> Dict[str, str]:
|
|
81
|
+
"""
|
|
82
|
+
Main entry point for manifest generation.
|
|
83
|
+
Resolves the blueprint and renders it to strings.
|
|
84
|
+
|
|
85
|
+
ZEN GAP FIX: Automatically injects tier-based resource limits.
|
|
86
|
+
"""
|
|
87
|
+
# Inject resource limits if tier is provided but resource_limits isn't
|
|
88
|
+
if "resource_limits" not in context and "tier" in context:
|
|
89
|
+
limits = get_resource_limits(context["tier"])
|
|
90
|
+
context["resource_limits"] = {
|
|
91
|
+
"memory": limits.memory,
|
|
92
|
+
"cpus": limits.cpus,
|
|
93
|
+
"memory_reserved": limits.memory_reserved,
|
|
94
|
+
"cpus_reserved": limits.cpus_reserved,
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
blueprint_class = resolve_blueprint_class(context)
|
|
98
|
+
blueprint = blueprint_class(context)
|
|
99
|
+
return blueprint.render()
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import json
|
|
3
|
+
from typing import Dict, Any, List
|
|
4
|
+
from xenfra_sdk.blueprints.base import BaseBlueprint
|
|
5
|
+
from xenfra_sdk.blueprints.schema import DeploymentBlueprintManifest, DockerfileModel, ComposeModel, ServiceDetail
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class NodeBlueprint(BaseBlueprint):
|
|
9
|
+
"""
|
|
10
|
+
Sovereign Lean Build-Pack for Node.js with Smart Detection.
|
|
11
|
+
|
|
12
|
+
Detects sub-frameworks: Next.js, Vite, Express, Nest, etc.
|
|
13
|
+
Detects package managers: npm, pnpm, yarn, bun.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
# === PROTOCOL COMPLIANT: Static Recipe Pattern ===
|
|
17
|
+
# No if-else chains - use dictionary-based detection
|
|
18
|
+
PACKAGE_MANAGER_RECIPES = {
|
|
19
|
+
"bun.lockb": {"install": "bun install", "run": "bun run", "binary": "bun"},
|
|
20
|
+
"pnpm-lock.yaml": {"install": "pnpm install", "run": "pnpm run", "binary": "pnpm"},
|
|
21
|
+
"yarn.lock": {"install": "yarn install", "run": "yarn", "binary": "yarn"},
|
|
22
|
+
"package-lock.json": {"install": "npm install", "run": "npm run", "binary": "npm"},
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
FRAMEWORK_RECIPES = {
|
|
26
|
+
"next": {
|
|
27
|
+
"build": "{run} build",
|
|
28
|
+
"start": "{run} start",
|
|
29
|
+
"base_image": "node:18-slim",
|
|
30
|
+
},
|
|
31
|
+
"vite": {
|
|
32
|
+
"build": "{run} build",
|
|
33
|
+
"start": "{run} preview",
|
|
34
|
+
"base_image": "node:18-slim",
|
|
35
|
+
},
|
|
36
|
+
"nuxt": {
|
|
37
|
+
"build": "{run} build",
|
|
38
|
+
"start": "node .output/server/index.mjs",
|
|
39
|
+
"base_image": "node:18-slim",
|
|
40
|
+
},
|
|
41
|
+
"express": {
|
|
42
|
+
"build": None, # Express usually doesn't need build
|
|
43
|
+
"start": "node {entrypoint}",
|
|
44
|
+
"base_image": "node:18-slim",
|
|
45
|
+
},
|
|
46
|
+
"nestjs": {
|
|
47
|
+
"build": "{run} build",
|
|
48
|
+
"start": "node dist/main.js",
|
|
49
|
+
"base_image": "node:18-slim",
|
|
50
|
+
},
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
def generate_manifest(self) -> DeploymentBlueprintManifest:
|
|
54
|
+
# Smart Detection: Try nixpacks -> Static Recipes -> Default
|
|
55
|
+
detection = self._detect_node_environment()
|
|
56
|
+
|
|
57
|
+
# 1. Build Dockerfile
|
|
58
|
+
dockerfile = DockerfileModel(
|
|
59
|
+
base_image=detection["base_image"],
|
|
60
|
+
expose_port=self.port,
|
|
61
|
+
run_commands=detection["run_commands"],
|
|
62
|
+
command=detection["command"],
|
|
63
|
+
env_vars={"PORT": str(self.port)}
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# 2. Build Compose with Resource Governance
|
|
67
|
+
service = ServiceDetail(
|
|
68
|
+
build=".",
|
|
69
|
+
ports=[f"{self.port}:{self.port}"],
|
|
70
|
+
environment={
|
|
71
|
+
**self.context.get("env_vars", {}),
|
|
72
|
+
"PORT": str(self.port),
|
|
73
|
+
"NODE_ENV": "production"
|
|
74
|
+
},
|
|
75
|
+
command=detection["command"],
|
|
76
|
+
deploy=self._generate_deploy_model()
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
return DeploymentBlueprintManifest(
|
|
80
|
+
dockerfile=dockerfile,
|
|
81
|
+
compose=ComposeModel(services={"app": service}),
|
|
82
|
+
env_file=self.context.get("env_vars", {})
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
def _detect_node_environment(self) -> Dict[str, Any]:
|
|
86
|
+
"""
|
|
87
|
+
Multi-stage detection for Node.js projects.
|
|
88
|
+
Stage A: Try nixpacks CLI (most accurate)
|
|
89
|
+
Stage B: Parse package.json for framework/package manager
|
|
90
|
+
Stage C: Default npm start fallback
|
|
91
|
+
"""
|
|
92
|
+
file_names = {f.get("path", "") for f in self.file_manifest}
|
|
93
|
+
pkg_json = self._get_package_json()
|
|
94
|
+
|
|
95
|
+
# Stage A: Try Railpack CLI (Railway's latest buildpack)
|
|
96
|
+
try:
|
|
97
|
+
result = subprocess.run(
|
|
98
|
+
["railpack", "plan", ".", "--json"],
|
|
99
|
+
capture_output=True,
|
|
100
|
+
text=True,
|
|
101
|
+
check=True,
|
|
102
|
+
timeout=30
|
|
103
|
+
)
|
|
104
|
+
plan = json.loads(result.stdout)
|
|
105
|
+
phases = plan.get("phases", {})
|
|
106
|
+
|
|
107
|
+
# Extract nixpacks detection
|
|
108
|
+
build_cmds = phases.get("build", {}).get("cmds", [])
|
|
109
|
+
start_cmd = phases.get("start", {}).get("cmd", "npm start")
|
|
110
|
+
|
|
111
|
+
return {
|
|
112
|
+
"base_image": "node:18-slim",
|
|
113
|
+
"run_commands": self._build_run_commands(build_cmds),
|
|
114
|
+
"command": start_cmd,
|
|
115
|
+
"detection": "nixpacks"
|
|
116
|
+
}
|
|
117
|
+
except (subprocess.CalledProcessError, FileNotFoundError,
|
|
118
|
+
subprocess.TimeoutExpired, json.JSONDecodeError):
|
|
119
|
+
pass # Fall through to Stage B
|
|
120
|
+
|
|
121
|
+
# Stage B: Static Recipe Detection
|
|
122
|
+
pkg_manager = self._detect_package_manager(file_names)
|
|
123
|
+
framework = self._detect_node_framework(pkg_json)
|
|
124
|
+
entrypoint = self._detect_entrypoint(pkg_json, file_names)
|
|
125
|
+
|
|
126
|
+
# Build commands based on detected framework
|
|
127
|
+
run_prefix = pkg_manager["run"]
|
|
128
|
+
recipe = self.FRAMEWORK_RECIPES.get(framework, {})
|
|
129
|
+
|
|
130
|
+
build_cmd = recipe.get("build")
|
|
131
|
+
start_cmd = recipe.get("start", "{run} start")
|
|
132
|
+
|
|
133
|
+
# Format commands with detected values
|
|
134
|
+
if build_cmd:
|
|
135
|
+
build_cmd = build_cmd.format(run=run_prefix)
|
|
136
|
+
start_cmd = start_cmd.format(run=run_prefix, entrypoint=entrypoint)
|
|
137
|
+
|
|
138
|
+
run_commands = [
|
|
139
|
+
pkg_manager['install'], # e.g., "npm install" - renderer adds RUN
|
|
140
|
+
]
|
|
141
|
+
if build_cmd:
|
|
142
|
+
run_commands.append(build_cmd) # e.g., "npm run build" - renderer adds RUN
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
"base_image": recipe.get("base_image", "node:18-slim"),
|
|
146
|
+
"run_commands": run_commands,
|
|
147
|
+
"command": start_cmd,
|
|
148
|
+
"detection": f"static:{framework}:{pkg_manager['binary']}"
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
def _detect_package_manager(self, file_names: set) -> Dict[str, str]:
|
|
152
|
+
"""Detect package manager from lockfile."""
|
|
153
|
+
for lockfile, recipe in self.PACKAGE_MANAGER_RECIPES.items():
|
|
154
|
+
if lockfile in file_names:
|
|
155
|
+
return {**recipe, "lockfile": lockfile}
|
|
156
|
+
# Default to npm
|
|
157
|
+
return {**self.PACKAGE_MANAGER_RECIPES["package-lock.json"], "lockfile": "package*.json"}
|
|
158
|
+
|
|
159
|
+
def _detect_node_framework(self, pkg_json: Dict) -> str:
|
|
160
|
+
"""Detect Node.js framework from package.json dependencies."""
|
|
161
|
+
deps = {
|
|
162
|
+
**pkg_json.get("dependencies", {}),
|
|
163
|
+
**pkg_json.get("devDependencies", {})
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
# Priority order for framework detection
|
|
167
|
+
framework_deps = [
|
|
168
|
+
("next", "next"),
|
|
169
|
+
("@nestjs/core", "nestjs"),
|
|
170
|
+
("nuxt", "nuxt"),
|
|
171
|
+
("vite", "vite"),
|
|
172
|
+
("express", "express"),
|
|
173
|
+
]
|
|
174
|
+
|
|
175
|
+
for dep_name, framework in framework_deps:
|
|
176
|
+
if dep_name in deps:
|
|
177
|
+
return framework
|
|
178
|
+
|
|
179
|
+
return "express" # Default assumption for Node.js
|
|
180
|
+
|
|
181
|
+
def _detect_entrypoint(self, pkg_json: Dict, file_names: set) -> str:
|
|
182
|
+
"""Detect main entrypoint file."""
|
|
183
|
+
# Check package.json main field
|
|
184
|
+
main = pkg_json.get("main")
|
|
185
|
+
if main:
|
|
186
|
+
return main
|
|
187
|
+
|
|
188
|
+
# Check common entrypoint patterns
|
|
189
|
+
common_entrypoints = ["index.js", "server.js", "app.js", "src/index.js", "src/server.js"]
|
|
190
|
+
for entry in common_entrypoints:
|
|
191
|
+
if entry in file_names:
|
|
192
|
+
return entry
|
|
193
|
+
|
|
194
|
+
return "index.js"
|
|
195
|
+
|
|
196
|
+
def _get_package_json(self) -> Dict:
|
|
197
|
+
"""Extract package.json content from file manifest."""
|
|
198
|
+
for f in self.file_manifest:
|
|
199
|
+
if f.get("path") == "package.json":
|
|
200
|
+
content = f.get("content", "{}")
|
|
201
|
+
try:
|
|
202
|
+
return json.loads(content)
|
|
203
|
+
except json.JSONDecodeError:
|
|
204
|
+
return {}
|
|
205
|
+
return {}
|
|
206
|
+
|
|
207
|
+
def _build_run_commands(self, build_cmds: List[str]) -> List[str]:
|
|
208
|
+
"""Format build commands for Dockerfile (without RUN prefix - renderer adds it)."""
|
|
209
|
+
commands = [
|
|
210
|
+
"npm install", # Renderer adds RUN prefix
|
|
211
|
+
]
|
|
212
|
+
for cmd in build_cmds:
|
|
213
|
+
if cmd and not cmd.startswith("npm install"):
|
|
214
|
+
commands.append(cmd) # Renderer adds RUN prefix
|
|
215
|
+
return commands
|
|
216
|
+
|
|
217
|
+
def _get_default_command(self) -> str:
|
|
218
|
+
"""Return default command for this blueprint."""
|
|
219
|
+
return "npm start"
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from xenfra_sdk.blueprints.base import BaseBlueprint
|
|
2
|
+
from xenfra_sdk.blueprints.schema import DeploymentBlueprintManifest, DockerfileModel, ComposeModel, ServiceDetail
|
|
3
|
+
from xenfra_sdk.dockerizer import detect_entrypoint
|
|
4
|
+
|
|
5
|
+
class PythonBlueprint(BaseBlueprint):
|
|
6
|
+
"""
|
|
7
|
+
Sovereign Lean Build-Pack for Python.
|
|
8
|
+
Only handles the 'Golden Path' (requirements.txt).
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def generate_manifest(self) -> DeploymentBlueprintManifest:
|
|
12
|
+
# 1. Deterministic Defaults (with proactive detection)
|
|
13
|
+
base_image = "python:3.11-slim"
|
|
14
|
+
entrypoint = self.context.get("entrypoint")
|
|
15
|
+
if not entrypoint:
|
|
16
|
+
entrypoint = detect_entrypoint(self.file_manifest, framework=self.framework)
|
|
17
|
+
|
|
18
|
+
command = self.context.get("command")
|
|
19
|
+
if not command:
|
|
20
|
+
if self.framework == "fastapi":
|
|
21
|
+
command = f"uvicorn {entrypoint} --host 0.0.0.0 --port {self.port}"
|
|
22
|
+
elif self.framework == "django":
|
|
23
|
+
# Django typically needs migrations and runs with gunicorn
|
|
24
|
+
app_name = entrypoint.split(":")[0]
|
|
25
|
+
command = f"python manage.py migrate && gunicorn {app_name}.wsgi:application --bind 0.0.0.0:{self.port}"
|
|
26
|
+
elif self.framework == "flask":
|
|
27
|
+
# Flask also runs well with gunicorn
|
|
28
|
+
command = f"gunicorn --bind 0.0.0.0:{self.port} {entrypoint}"
|
|
29
|
+
else:
|
|
30
|
+
py_file = entrypoint.split(":")[0].replace(".", "/") + ".py"
|
|
31
|
+
command = f"python -u {py_file}"
|
|
32
|
+
|
|
33
|
+
# 2. Build Dockerfile
|
|
34
|
+
dockerfile = DockerfileModel(
|
|
35
|
+
base_image="python:3.11", # Using non-slim for build tools
|
|
36
|
+
expose_port=self.port,
|
|
37
|
+
run_commands=[
|
|
38
|
+
"pip install --upgrade pip",
|
|
39
|
+
"pip install --no-cache-dir -r requirements.txt"
|
|
40
|
+
],
|
|
41
|
+
command=command.split()
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# 3. Build Compose with Resource Governance
|
|
45
|
+
service = ServiceDetail(
|
|
46
|
+
build=".",
|
|
47
|
+
ports=[f"{self.port}:{self.port}"],
|
|
48
|
+
environment=self.context.get("env_vars", {}),
|
|
49
|
+
command=command,
|
|
50
|
+
deploy=self._generate_deploy_model()
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
return DeploymentBlueprintManifest(
|
|
54
|
+
dockerfile=dockerfile,
|
|
55
|
+
compose=ComposeModel(services={"app": service}),
|
|
56
|
+
env_file=self.context.get("env_vars", {})
|
|
57
|
+
)
|