msaas-feature-flags 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- msaas_feature_flags-0.1.0/.gitignore +23 -0
- msaas_feature_flags-0.1.0/PKG-INFO +15 -0
- msaas_feature_flags-0.1.0/pyproject.toml +35 -0
- msaas_feature_flags-0.1.0/src/feature_flags/__init__.py +40 -0
- msaas_feature_flags-0.1.0/src/feature_flags/config.py +46 -0
- msaas_feature_flags-0.1.0/src/feature_flags/evaluator.py +148 -0
- msaas_feature_flags-0.1.0/src/feature_flags/models.py +79 -0
- msaas_feature_flags-0.1.0/src/feature_flags/router.py +82 -0
- msaas_feature_flags-0.1.0/src/feature_flags/store.py +129 -0
- msaas_feature_flags-0.1.0/tests/__init__.py +0 -0
- msaas_feature_flags-0.1.0/tests/test_evaluator.py +299 -0
- msaas_feature_flags-0.1.0/tests/test_models.py +119 -0
- msaas_feature_flags-0.1.0/tests/test_router.py +186 -0
- msaas_feature_flags-0.1.0/tests/test_store.py +203 -0
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
node_modules/
|
|
2
|
+
dist/
|
|
3
|
+
.next/
|
|
4
|
+
.turbo/
|
|
5
|
+
*.pyc
|
|
6
|
+
__pycache__/
|
|
7
|
+
.venv/
|
|
8
|
+
*.egg-info/
|
|
9
|
+
.pytest_cache/
|
|
10
|
+
.ruff_cache/
|
|
11
|
+
.env
|
|
12
|
+
.env.*
|
|
13
|
+
!.env.example
|
|
14
|
+
!.env.*.example
|
|
15
|
+
!.env.*.template
|
|
16
|
+
.DS_Store
|
|
17
|
+
coverage/
|
|
18
|
+
|
|
19
|
+
# Runtime artifacts
|
|
20
|
+
logs_llm/
|
|
21
|
+
vectors.db
|
|
22
|
+
vectors.db-shm
|
|
23
|
+
vectors.db-wal
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: msaas-feature-flags
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Feature flag service with targeting rules, percentage rollout, and caching
|
|
5
|
+
License: MIT
|
|
6
|
+
Requires-Python: >=3.12
|
|
7
|
+
Requires-Dist: asyncpg>=0.30.0
|
|
8
|
+
Requires-Dist: fastapi>=0.115.0
|
|
9
|
+
Requires-Dist: msaas-api-core
|
|
10
|
+
Requires-Dist: msaas-errors
|
|
11
|
+
Requires-Dist: pydantic>=2.0
|
|
12
|
+
Provides-Extra: dev
|
|
13
|
+
Requires-Dist: httpx>=0.27.0; extra == 'dev'
|
|
14
|
+
Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
|
|
15
|
+
Requires-Dist: pytest>=8.0; extra == 'dev'
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "msaas-feature-flags"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Feature flag service with targeting rules, percentage rollout, and caching"
|
|
5
|
+
requires-python = ">=3.12"
|
|
6
|
+
license = { text = "MIT" }
|
|
7
|
+
dependencies = [
|
|
8
|
+
"msaas-api-core",
|
|
9
|
+
"msaas-errors",
|
|
10
|
+
"fastapi>=0.115.0",
|
|
11
|
+
"pydantic>=2.0",
|
|
12
|
+
"asyncpg>=0.30.0",
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
[project.optional-dependencies]
|
|
16
|
+
dev = [
|
|
17
|
+
"pytest>=8.0",
|
|
18
|
+
"pytest-asyncio>=0.24.0",
|
|
19
|
+
"httpx>=0.27.0",
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
[build-system]
|
|
23
|
+
requires = ["hatchling"]
|
|
24
|
+
build-backend = "hatchling.build"
|
|
25
|
+
|
|
26
|
+
[tool.hatch.build.targets.wheel]
|
|
27
|
+
packages = ["src/feature_flags"]
|
|
28
|
+
|
|
29
|
+
[tool.pytest.ini_options]
|
|
30
|
+
testpaths = ["tests"]
|
|
31
|
+
asyncio_mode = "auto"
|
|
32
|
+
|
|
33
|
+
[tool.uv.sources]
|
|
34
|
+
msaas-api-core = { workspace = true }
|
|
35
|
+
msaas-errors = { workspace = true }
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Willian Flags -- Feature flag service with targeting, rollout, and caching."""
|
|
2
|
+
|
|
3
|
+
from feature_flags.config import FlagConfig, close_pool, get_pool, init_flags
|
|
4
|
+
from feature_flags.evaluator import clear_cache, evaluate_all, evaluate_flag
|
|
5
|
+
from feature_flags.models import (
|
|
6
|
+
FeatureFlag,
|
|
7
|
+
FlagEvaluation,
|
|
8
|
+
TargetingRule,
|
|
9
|
+
UserContext,
|
|
10
|
+
)
|
|
11
|
+
from feature_flags.router import FlagRouter
|
|
12
|
+
from feature_flags.store import (
|
|
13
|
+
create_flag,
|
|
14
|
+
create_table,
|
|
15
|
+
delete_flag,
|
|
16
|
+
get_flag,
|
|
17
|
+
list_flags,
|
|
18
|
+
update_flag,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
__all__ = [
|
|
22
|
+
"FlagConfig",
|
|
23
|
+
"FlagRouter",
|
|
24
|
+
"FeatureFlag",
|
|
25
|
+
"FlagEvaluation",
|
|
26
|
+
"TargetingRule",
|
|
27
|
+
"UserContext",
|
|
28
|
+
"clear_cache",
|
|
29
|
+
"close_pool",
|
|
30
|
+
"create_flag",
|
|
31
|
+
"create_table",
|
|
32
|
+
"delete_flag",
|
|
33
|
+
"evaluate_all",
|
|
34
|
+
"evaluate_flag",
|
|
35
|
+
"get_flag",
|
|
36
|
+
"get_pool",
|
|
37
|
+
"init_flags",
|
|
38
|
+
"list_flags",
|
|
39
|
+
"update_flag",
|
|
40
|
+
]
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Feature flag configuration and connection pool management."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncpg
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
_pool: asyncpg.Pool | None = None
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FlagConfig(BaseModel):
|
|
12
|
+
"""Configuration for the feature flag service."""
|
|
13
|
+
|
|
14
|
+
database_url: str = Field(
|
|
15
|
+
description="PostgreSQL connection string (asyncpg format)",
|
|
16
|
+
)
|
|
17
|
+
cache_ttl_seconds: int = Field(
|
|
18
|
+
default=60,
|
|
19
|
+
description="In-memory cache TTL in seconds",
|
|
20
|
+
)
|
|
21
|
+
default_enabled: bool = Field(
|
|
22
|
+
default=False,
|
|
23
|
+
description="Default flag state when a flag key is not found",
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
async def init_flags(config: FlagConfig) -> asyncpg.Pool:
|
|
28
|
+
"""Create and store the asyncpg connection pool."""
|
|
29
|
+
global _pool
|
|
30
|
+
_pool = await asyncpg.create_pool(config.database_url, min_size=2, max_size=10)
|
|
31
|
+
return _pool
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_pool() -> asyncpg.Pool:
|
|
35
|
+
"""Return the current pool. Raises if init_flags() was not called."""
|
|
36
|
+
if _pool is None:
|
|
37
|
+
raise RuntimeError("Feature flags not initialized. Call init_flags() first.")
|
|
38
|
+
return _pool
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
async def close_pool() -> None:
|
|
42
|
+
"""Gracefully close the connection pool."""
|
|
43
|
+
global _pool
|
|
44
|
+
if _pool is not None:
|
|
45
|
+
await _pool.close()
|
|
46
|
+
_pool = None
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""Flag evaluation engine with targeting rules, percentage rollout, and caching."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import hashlib
|
|
6
|
+
import time
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from feature_flags.config import FlagConfig
|
|
10
|
+
from feature_flags.models import (
|
|
11
|
+
FeatureFlag,
|
|
12
|
+
FlagEvaluation,
|
|
13
|
+
Operator,
|
|
14
|
+
TargetingRule,
|
|
15
|
+
UserContext,
|
|
16
|
+
)
|
|
17
|
+
from feature_flags.store import get_flag, list_flags
|
|
18
|
+
|
|
19
|
+
# ---------------------------------------------------------------------------
|
|
20
|
+
# In-memory cache
|
|
21
|
+
# ---------------------------------------------------------------------------
|
|
22
|
+
|
|
23
|
+
_cache: dict[str, tuple[FeatureFlag, float]] = {}
|
|
24
|
+
_config: FlagConfig | None = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def configure_evaluator(config: FlagConfig) -> None:
|
|
28
|
+
"""Set the evaluator config (TTL, defaults). Called during app startup."""
|
|
29
|
+
global _config
|
|
30
|
+
_config = config
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def clear_cache() -> None:
|
|
34
|
+
"""Flush all cached flag entries."""
|
|
35
|
+
_cache.clear()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _get_ttl() -> int:
|
|
39
|
+
return _config.cache_ttl_seconds if _config else 60
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _default_enabled() -> bool:
|
|
43
|
+
return _config.default_enabled if _config else False
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _cache_get(key: str) -> FeatureFlag | None:
|
|
47
|
+
entry = _cache.get(key)
|
|
48
|
+
if entry is None:
|
|
49
|
+
return None
|
|
50
|
+
flag, ts = entry
|
|
51
|
+
if time.monotonic() - ts > _get_ttl():
|
|
52
|
+
del _cache[key]
|
|
53
|
+
return None
|
|
54
|
+
return flag
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _cache_set(key: str, flag: FeatureFlag) -> None:
|
|
58
|
+
_cache[key] = (flag, time.monotonic())
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# ---------------------------------------------------------------------------
|
|
62
|
+
# Deterministic hash for percentage rollout
|
|
63
|
+
# ---------------------------------------------------------------------------
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _rollout_hash(user_id: str, flag_key: str) -> int:
|
|
67
|
+
"""Return a deterministic 0-99 bucket from user_id + flag_key."""
|
|
68
|
+
digest = hashlib.md5(f"{user_id}:{flag_key}".encode(), usedforsecurity=False).digest()
|
|
69
|
+
return int.from_bytes(digest[:4], "little") % 100
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
# ---------------------------------------------------------------------------
|
|
73
|
+
# Targeting rule matching
|
|
74
|
+
# ---------------------------------------------------------------------------
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _match_rule(rule: TargetingRule, user_ctx: UserContext) -> bool:
|
|
78
|
+
"""Evaluate a single targeting rule against a user context."""
|
|
79
|
+
attr_value = user_ctx.attributes.get(rule.attribute)
|
|
80
|
+
return _compare(attr_value, rule.operator, rule.value)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _compare(attr_value: Any, op: Operator, rule_value: Any) -> bool:
|
|
84
|
+
if attr_value is None:
|
|
85
|
+
return False
|
|
86
|
+
if op == Operator.eq:
|
|
87
|
+
return attr_value == rule_value
|
|
88
|
+
if op == Operator.neq:
|
|
89
|
+
return attr_value != rule_value
|
|
90
|
+
if op == Operator.in_:
|
|
91
|
+
return attr_value in rule_value if isinstance(rule_value, list) else False
|
|
92
|
+
if op == Operator.not_in:
|
|
93
|
+
return attr_value not in rule_value if isinstance(rule_value, list) else True
|
|
94
|
+
if op == Operator.gte:
|
|
95
|
+
return attr_value >= rule_value
|
|
96
|
+
if op == Operator.lte:
|
|
97
|
+
return attr_value <= rule_value
|
|
98
|
+
return False
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# ---------------------------------------------------------------------------
|
|
102
|
+
# Public API
|
|
103
|
+
# ---------------------------------------------------------------------------
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
async def evaluate_flag(key: str, user_context: UserContext) -> FlagEvaluation:
|
|
107
|
+
"""Evaluate a single feature flag for the given user context.
|
|
108
|
+
|
|
109
|
+
Resolution order:
|
|
110
|
+
1. Flag not found -> default_enabled
|
|
111
|
+
2. Flag disabled -> disabled
|
|
112
|
+
3. Targeting rules (all must match if present)
|
|
113
|
+
4. Percentage rollout
|
|
114
|
+
"""
|
|
115
|
+
flag = _cache_get(key)
|
|
116
|
+
if flag is None:
|
|
117
|
+
flag = await get_flag(key)
|
|
118
|
+
if flag is None:
|
|
119
|
+
return FlagEvaluation(flag_key=key, enabled=_default_enabled(), reason="flag_not_found")
|
|
120
|
+
_cache_set(key, flag)
|
|
121
|
+
|
|
122
|
+
if not flag.enabled:
|
|
123
|
+
return FlagEvaluation(flag_key=key, enabled=False, reason="flag_disabled")
|
|
124
|
+
|
|
125
|
+
# Targeting rules: ALL must match (AND logic)
|
|
126
|
+
if flag.targeting_rules:
|
|
127
|
+
all_match = all(_match_rule(r, user_context) for r in flag.targeting_rules)
|
|
128
|
+
if not all_match:
|
|
129
|
+
return FlagEvaluation(flag_key=key, enabled=False, reason="targeting_rule_mismatch")
|
|
130
|
+
|
|
131
|
+
# Percentage rollout
|
|
132
|
+
if flag.rollout_percentage < 100:
|
|
133
|
+
bucket = _rollout_hash(user_context.user_id, key)
|
|
134
|
+
if bucket >= flag.rollout_percentage:
|
|
135
|
+
return FlagEvaluation(flag_key=key, enabled=False, reason="rollout_excluded")
|
|
136
|
+
|
|
137
|
+
return FlagEvaluation(flag_key=key, enabled=True, reason="enabled")
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
async def evaluate_all(user_context: UserContext) -> list[FlagEvaluation]:
|
|
141
|
+
"""Evaluate every flag in the store for the given user context."""
|
|
142
|
+
flags = await list_flags()
|
|
143
|
+
results: list[FlagEvaluation] = []
|
|
144
|
+
for flag in flags:
|
|
145
|
+
_cache_set(flag.key, flag)
|
|
146
|
+
result = await evaluate_flag(flag.key, user_context)
|
|
147
|
+
results.append(result)
|
|
148
|
+
return results
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""Pydantic models for feature flags, targeting, and evaluation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from enum import StrEnum
|
|
7
|
+
from typing import Any
|
|
8
|
+
from uuid import UUID
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Operator(StrEnum):
|
|
14
|
+
"""Supported targeting rule operators."""
|
|
15
|
+
|
|
16
|
+
eq = "eq"
|
|
17
|
+
neq = "neq"
|
|
18
|
+
in_ = "in"
|
|
19
|
+
not_in = "not_in"
|
|
20
|
+
gte = "gte"
|
|
21
|
+
lte = "lte"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class TargetingRule(BaseModel):
|
|
25
|
+
"""A single targeting rule matched against user attributes."""
|
|
26
|
+
|
|
27
|
+
attribute: str = Field(description="User attribute to evaluate (e.g. 'plan', 'country')")
|
|
28
|
+
operator: Operator = Field(description="Comparison operator")
|
|
29
|
+
value: Any = Field(description="Value to compare against")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class FeatureFlag(BaseModel):
|
|
33
|
+
"""Full representation of a feature flag."""
|
|
34
|
+
|
|
35
|
+
id: UUID
|
|
36
|
+
key: str = Field(description="Unique flag identifier (e.g. 'dark-mode')")
|
|
37
|
+
name: str = Field(description="Human-readable name")
|
|
38
|
+
description: str = Field(default="")
|
|
39
|
+
enabled: bool = Field(default=False)
|
|
40
|
+
rollout_percentage: int = Field(default=100, ge=0, le=100)
|
|
41
|
+
targeting_rules: list[TargetingRule] = Field(default_factory=list)
|
|
42
|
+
created_at: datetime
|
|
43
|
+
updated_at: datetime
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class CreateFlagInput(BaseModel):
|
|
47
|
+
"""Input for creating a new feature flag."""
|
|
48
|
+
|
|
49
|
+
key: str
|
|
50
|
+
name: str
|
|
51
|
+
description: str = ""
|
|
52
|
+
enabled: bool = False
|
|
53
|
+
rollout_percentage: int = Field(default=100, ge=0, le=100)
|
|
54
|
+
targeting_rules: list[TargetingRule] = Field(default_factory=list)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class UpdateFlagInput(BaseModel):
|
|
58
|
+
"""Partial update for a feature flag. Only provided fields are updated."""
|
|
59
|
+
|
|
60
|
+
name: str | None = None
|
|
61
|
+
description: str | None = None
|
|
62
|
+
enabled: bool | None = None
|
|
63
|
+
rollout_percentage: int | None = Field(default=None, ge=0, le=100)
|
|
64
|
+
targeting_rules: list[TargetingRule] | None = None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class UserContext(BaseModel):
|
|
68
|
+
"""Context about the current user, used for flag targeting and rollout."""
|
|
69
|
+
|
|
70
|
+
user_id: str
|
|
71
|
+
attributes: dict[str, Any] = Field(default_factory=dict)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class FlagEvaluation(BaseModel):
|
|
75
|
+
"""Result of evaluating a single flag for a user."""
|
|
76
|
+
|
|
77
|
+
flag_key: str
|
|
78
|
+
enabled: bool
|
|
79
|
+
reason: str
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""FastAPI router factory for feature flag CRUD and evaluation endpoints."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from errors import (
|
|
6
|
+
ConflictError,
|
|
7
|
+
NotFoundError,
|
|
8
|
+
)
|
|
9
|
+
from fastapi import APIRouter
|
|
10
|
+
|
|
11
|
+
from feature_flags.evaluator import evaluate_all, evaluate_flag
|
|
12
|
+
from feature_flags.models import (
|
|
13
|
+
CreateFlagInput,
|
|
14
|
+
FeatureFlag,
|
|
15
|
+
FlagEvaluation,
|
|
16
|
+
UpdateFlagInput,
|
|
17
|
+
UserContext,
|
|
18
|
+
)
|
|
19
|
+
from feature_flags.store import (
|
|
20
|
+
create_flag,
|
|
21
|
+
delete_flag,
|
|
22
|
+
get_flag,
|
|
23
|
+
list_flags,
|
|
24
|
+
update_flag,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def FlagRouter(*, prefix: str = "/flags", tags: list[str] | None = None) -> APIRouter:
|
|
29
|
+
"""Create a FastAPI router with feature flag CRUD and evaluation endpoints.
|
|
30
|
+
|
|
31
|
+
Usage:
|
|
32
|
+
app.include_router(FlagRouter())
|
|
33
|
+
"""
|
|
34
|
+
router = APIRouter(prefix=prefix, tags=tags or ["feature-flags"])
|
|
35
|
+
|
|
36
|
+
@router.get("", response_model=list[FeatureFlag])
|
|
37
|
+
async def handle_list_flags() -> list[FeatureFlag]:
|
|
38
|
+
"""List all feature flags."""
|
|
39
|
+
return await list_flags()
|
|
40
|
+
|
|
41
|
+
@router.post("", response_model=FeatureFlag, status_code=201)
|
|
42
|
+
async def handle_create_flag(body: CreateFlagInput) -> FeatureFlag:
|
|
43
|
+
"""Create a new feature flag."""
|
|
44
|
+
existing = await get_flag(body.key)
|
|
45
|
+
if existing:
|
|
46
|
+
raise ConflictError(f"Flag '{body.key}' already exists")
|
|
47
|
+
return await create_flag(body)
|
|
48
|
+
|
|
49
|
+
@router.get("/{key}", response_model=FeatureFlag)
|
|
50
|
+
async def handle_get_flag(key: str) -> FeatureFlag:
|
|
51
|
+
"""Get a single feature flag by key."""
|
|
52
|
+
flag = await get_flag(key)
|
|
53
|
+
if not flag:
|
|
54
|
+
raise NotFoundError(f"Flag '{key}' not found")
|
|
55
|
+
return flag
|
|
56
|
+
|
|
57
|
+
@router.patch("/{key}", response_model=FeatureFlag)
|
|
58
|
+
async def handle_update_flag(key: str, body: UpdateFlagInput) -> FeatureFlag:
|
|
59
|
+
"""Partially update a feature flag."""
|
|
60
|
+
flag = await update_flag(key, body)
|
|
61
|
+
if not flag:
|
|
62
|
+
raise NotFoundError(f"Flag '{key}' not found")
|
|
63
|
+
return flag
|
|
64
|
+
|
|
65
|
+
@router.delete("/{key}", status_code=204)
|
|
66
|
+
async def handle_delete_flag(key: str) -> None:
|
|
67
|
+
"""Delete a feature flag."""
|
|
68
|
+
deleted = await delete_flag(key)
|
|
69
|
+
if not deleted:
|
|
70
|
+
raise NotFoundError(f"Flag '{key}' not found")
|
|
71
|
+
|
|
72
|
+
@router.post("/evaluate", response_model=list[FlagEvaluation])
|
|
73
|
+
async def handle_evaluate(body: UserContext) -> list[FlagEvaluation]:
|
|
74
|
+
"""Evaluate all flags for a given user context."""
|
|
75
|
+
return await evaluate_all(body)
|
|
76
|
+
|
|
77
|
+
@router.post("/evaluate/{key}", response_model=FlagEvaluation)
|
|
78
|
+
async def handle_evaluate_single(key: str, body: UserContext) -> FlagEvaluation:
|
|
79
|
+
"""Evaluate a single flag for a given user context."""
|
|
80
|
+
return await evaluate_flag(key, body)
|
|
81
|
+
|
|
82
|
+
return router
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"""asyncpg-backed CRUD operations for feature flags."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
import asyncpg
|
|
8
|
+
|
|
9
|
+
from feature_flags.config import get_pool
|
|
10
|
+
from feature_flags.models import (
|
|
11
|
+
CreateFlagInput,
|
|
12
|
+
FeatureFlag,
|
|
13
|
+
TargetingRule,
|
|
14
|
+
UpdateFlagInput,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
CREATE_TABLE_SQL = """
|
|
18
|
+
CREATE TABLE IF NOT EXISTS feature_flags (
|
|
19
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
20
|
+
key TEXT UNIQUE NOT NULL,
|
|
21
|
+
name TEXT NOT NULL,
|
|
22
|
+
description TEXT NOT NULL DEFAULT '',
|
|
23
|
+
enabled BOOLEAN NOT NULL DEFAULT FALSE,
|
|
24
|
+
rollout_percentage INTEGER NOT NULL DEFAULT 100
|
|
25
|
+
CHECK (rollout_percentage >= 0 AND rollout_percentage <= 100),
|
|
26
|
+
targeting_rules JSONB NOT NULL DEFAULT '[]'::jsonb,
|
|
27
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
28
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
|
29
|
+
);
|
|
30
|
+
CREATE INDEX IF NOT EXISTS idx_feature_flags_key ON feature_flags (key);
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
async def create_table(pool: asyncpg.Pool | None = None) -> None:
|
|
35
|
+
"""Create the feature_flags table if it does not exist."""
|
|
36
|
+
conn_pool = pool or get_pool()
|
|
37
|
+
await conn_pool.execute(CREATE_TABLE_SQL)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _row_to_flag(row: asyncpg.Record) -> FeatureFlag:
|
|
41
|
+
"""Convert a database row to a FeatureFlag model."""
|
|
42
|
+
rules_raw = row["targeting_rules"]
|
|
43
|
+
if isinstance(rules_raw, str):
|
|
44
|
+
rules_raw = json.loads(rules_raw)
|
|
45
|
+
rules = [TargetingRule(**r) for r in rules_raw]
|
|
46
|
+
return FeatureFlag(
|
|
47
|
+
id=row["id"],
|
|
48
|
+
key=row["key"],
|
|
49
|
+
name=row["name"],
|
|
50
|
+
description=row["description"],
|
|
51
|
+
enabled=row["enabled"],
|
|
52
|
+
rollout_percentage=row["rollout_percentage"],
|
|
53
|
+
targeting_rules=rules,
|
|
54
|
+
created_at=row["created_at"],
|
|
55
|
+
updated_at=row["updated_at"],
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def create_flag(data: CreateFlagInput, pool: asyncpg.Pool | None = None) -> FeatureFlag:
|
|
60
|
+
"""Insert a new feature flag and return it."""
|
|
61
|
+
conn_pool = pool or get_pool()
|
|
62
|
+
rules_json = json.dumps([r.model_dump(mode="json") for r in data.targeting_rules])
|
|
63
|
+
row = await conn_pool.fetchrow(
|
|
64
|
+
"""
|
|
65
|
+
INSERT INTO feature_flags (key, name, description, enabled, rollout_percentage, targeting_rules)
|
|
66
|
+
VALUES ($1, $2, $3, $4, $5, $6::jsonb)
|
|
67
|
+
RETURNING *
|
|
68
|
+
""",
|
|
69
|
+
data.key,
|
|
70
|
+
data.name,
|
|
71
|
+
data.description,
|
|
72
|
+
data.enabled,
|
|
73
|
+
data.rollout_percentage,
|
|
74
|
+
rules_json,
|
|
75
|
+
)
|
|
76
|
+
return _row_to_flag(row)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
async def get_flag(key: str, pool: asyncpg.Pool | None = None) -> FeatureFlag | None:
|
|
80
|
+
"""Fetch a single flag by its key. Returns None if not found."""
|
|
81
|
+
conn_pool = pool or get_pool()
|
|
82
|
+
row = await conn_pool.fetchrow("SELECT * FROM feature_flags WHERE key = $1", key)
|
|
83
|
+
return _row_to_flag(row) if row else None
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
async def list_flags(pool: asyncpg.Pool | None = None) -> list[FeatureFlag]:
|
|
87
|
+
"""Fetch all feature flags ordered by creation date."""
|
|
88
|
+
conn_pool = pool or get_pool()
|
|
89
|
+
rows = await conn_pool.fetch("SELECT * FROM feature_flags ORDER BY created_at")
|
|
90
|
+
return [_row_to_flag(r) for r in rows]
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
async def update_flag(
|
|
94
|
+
key: str, data: UpdateFlagInput, pool: asyncpg.Pool | None = None
|
|
95
|
+
) -> FeatureFlag | None:
|
|
96
|
+
"""Partially update a flag. Only non-None fields are updated."""
|
|
97
|
+
conn_pool = pool or get_pool()
|
|
98
|
+
sets: list[str] = []
|
|
99
|
+
values: list[object] = []
|
|
100
|
+
idx = 1
|
|
101
|
+
|
|
102
|
+
for field_name in ("name", "description", "enabled", "rollout_percentage"):
|
|
103
|
+
val = getattr(data, field_name)
|
|
104
|
+
if val is not None:
|
|
105
|
+
sets.append(f"{field_name} = ${idx}")
|
|
106
|
+
values.append(val)
|
|
107
|
+
idx += 1
|
|
108
|
+
|
|
109
|
+
if data.targeting_rules is not None:
|
|
110
|
+
sets.append(f"targeting_rules = ${idx}::jsonb")
|
|
111
|
+
values.append(json.dumps([r.model_dump(mode="json") for r in data.targeting_rules]))
|
|
112
|
+
idx += 1
|
|
113
|
+
|
|
114
|
+
if not sets:
|
|
115
|
+
return await get_flag(key, conn_pool)
|
|
116
|
+
|
|
117
|
+
sets.append("updated_at = now()")
|
|
118
|
+
values.append(key)
|
|
119
|
+
|
|
120
|
+
query = f"UPDATE feature_flags SET {', '.join(sets)} WHERE key = ${idx} RETURNING *"
|
|
121
|
+
row = await conn_pool.fetchrow(query, *values)
|
|
122
|
+
return _row_to_flag(row) if row else None
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
async def delete_flag(key: str, pool: asyncpg.Pool | None = None) -> bool:
|
|
126
|
+
"""Delete a flag by key. Returns True if a row was deleted."""
|
|
127
|
+
conn_pool = pool or get_pool()
|
|
128
|
+
result = await conn_pool.execute("DELETE FROM feature_flags WHERE key = $1", key)
|
|
129
|
+
return result == "DELETE 1"
|
|
File without changes
|