sovereign 0.14.2__py3-none-any.whl → 1.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sovereign might be problematic. Click here for more details.
- sovereign/__init__.py +17 -78
- sovereign/app.py +74 -59
- sovereign/cache/__init__.py +245 -0
- sovereign/cache/backends/__init__.py +110 -0
- sovereign/cache/backends/s3.py +161 -0
- sovereign/cache/filesystem.py +74 -0
- sovereign/cache/types.py +17 -0
- sovereign/configuration.py +607 -0
- sovereign/constants.py +1 -0
- sovereign/context.py +271 -100
- sovereign/dynamic_config/__init__.py +112 -0
- sovereign/dynamic_config/deser.py +78 -0
- sovereign/dynamic_config/loaders.py +120 -0
- sovereign/error_info.py +61 -0
- sovereign/events.py +49 -0
- sovereign/logging/access_logger.py +85 -0
- sovereign/logging/application_logger.py +54 -0
- sovereign/logging/base_logger.py +41 -0
- sovereign/logging/bootstrapper.py +36 -0
- sovereign/logging/types.py +10 -0
- sovereign/middlewares.py +8 -7
- sovereign/modifiers/lib.py +2 -1
- sovereign/rendering.py +124 -0
- sovereign/rendering_common.py +91 -0
- sovereign/response_class.py +18 -0
- sovereign/server.py +123 -28
- sovereign/statistics.py +19 -21
- sovereign/templates/base.html +59 -46
- sovereign/templates/resources.html +203 -102
- sovereign/testing/loaders.py +9 -0
- sovereign/{modifiers/test.py → testing/modifiers.py} +0 -2
- sovereign/tracing.py +103 -0
- sovereign/types.py +304 -0
- sovereign/utils/auth.py +27 -13
- sovereign/utils/crypto/__init__.py +0 -0
- sovereign/utils/crypto/crypto.py +135 -0
- sovereign/utils/crypto/suites/__init__.py +21 -0
- sovereign/utils/crypto/suites/aes_gcm_cipher.py +42 -0
- sovereign/utils/crypto/suites/base_cipher.py +21 -0
- sovereign/utils/crypto/suites/disabled_cipher.py +25 -0
- sovereign/utils/crypto/suites/fernet_cipher.py +29 -0
- sovereign/utils/dictupdate.py +3 -2
- sovereign/utils/eds.py +40 -22
- sovereign/utils/entry_point_loader.py +18 -0
- sovereign/utils/mock.py +60 -17
- sovereign/utils/resources.py +17 -0
- sovereign/utils/templates.py +4 -2
- sovereign/utils/timer.py +5 -3
- sovereign/utils/version_info.py +8 -0
- sovereign/utils/weighted_clusters.py +2 -1
- sovereign/v2/__init__.py +0 -0
- sovereign/v2/data/data_store.py +621 -0
- sovereign/v2/data/render_discovery_response.py +24 -0
- sovereign/v2/data/repositories.py +90 -0
- sovereign/v2/data/utils.py +33 -0
- sovereign/v2/data/worker_queue.py +273 -0
- sovereign/v2/jobs/refresh_context.py +117 -0
- sovereign/v2/jobs/render_discovery_job.py +145 -0
- sovereign/v2/logging.py +81 -0
- sovereign/v2/types.py +41 -0
- sovereign/v2/web.py +101 -0
- sovereign/v2/worker.py +199 -0
- sovereign/views/__init__.py +7 -0
- sovereign/views/api.py +82 -0
- sovereign/views/crypto.py +46 -15
- sovereign/views/discovery.py +52 -67
- sovereign/views/healthchecks.py +107 -20
- sovereign/views/interface.py +173 -117
- sovereign/worker.py +193 -0
- {sovereign-0.14.2.dist-info → sovereign-1.0.0a4.dist-info}/METADATA +81 -73
- sovereign-1.0.0a4.dist-info/RECORD +85 -0
- {sovereign-0.14.2.dist-info → sovereign-1.0.0a4.dist-info}/WHEEL +1 -1
- sovereign-1.0.0a4.dist-info/entry_points.txt +46 -0
- sovereign_files/__init__.py +0 -0
- sovereign_files/static/darkmode.js +51 -0
- sovereign_files/static/node_expression.js +42 -0
- sovereign_files/static/panel.js +76 -0
- sovereign_files/static/resources.css +246 -0
- sovereign_files/static/resources.js +642 -0
- sovereign_files/static/sass/style.scss +33 -0
- sovereign_files/static/style.css +16143 -0
- sovereign_files/static/style.css.map +1 -0
- sovereign/config_loader.py +0 -225
- sovereign/discovery.py +0 -175
- sovereign/logs.py +0 -131
- sovereign/schemas.py +0 -715
- sovereign/sources/__init__.py +0 -3
- sovereign/sources/file.py +0 -21
- sovereign/sources/inline.py +0 -38
- sovereign/sources/lib.py +0 -40
- sovereign/sources/poller.py +0 -298
- sovereign/static/sass/style.scss +0 -27
- sovereign/static/style.css +0 -13553
- sovereign/templates/ul_filter.html +0 -22
- sovereign/utils/crypto.py +0 -64
- sovereign/views/admin.py +0 -120
- sovereign-0.14.2.dist-info/LICENSE.txt +0 -13
- sovereign-0.14.2.dist-info/RECORD +0 -45
- sovereign-0.14.2.dist-info/entry_points.txt +0 -10
sovereign/v2/logging.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
from typing import Any, MutableMapping
|
|
4
|
+
|
|
5
|
+
import structlog
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
# noinspection PyProtectedMember
|
|
9
|
+
from structlog.dev import RichTracebackFormatter
|
|
10
|
+
from structlog.typing import FilteringBoundLogger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_named_logger(name: str, level: int = logging.INFO) -> FilteringBoundLogger:
|
|
14
|
+
"""
|
|
15
|
+
Gets a structured logger with a specific name to allow us to control log levels separately.
|
|
16
|
+
|
|
17
|
+
Set LOG_FORMAT=human for pretty-printed, colourful output.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
# noinspection PyUnusedLocal
|
|
21
|
+
def filter_by_level(
|
|
22
|
+
logger: Any, method_name: str, event_dict: MutableMapping[str, Any]
|
|
23
|
+
) -> MutableMapping[str, Any]:
|
|
24
|
+
level_map = {
|
|
25
|
+
"debug": logging.DEBUG,
|
|
26
|
+
"info": logging.INFO,
|
|
27
|
+
"warn": logging.WARN,
|
|
28
|
+
"warning": logging.WARNING,
|
|
29
|
+
"error": logging.ERROR,
|
|
30
|
+
"critical": logging.CRITICAL,
|
|
31
|
+
"exception": logging.ERROR,
|
|
32
|
+
}
|
|
33
|
+
method_level = level_map.get(method_name, logging.INFO)
|
|
34
|
+
if method_level < level:
|
|
35
|
+
raise structlog.DropEvent
|
|
36
|
+
return event_dict
|
|
37
|
+
|
|
38
|
+
# noinspection PyUnusedLocal
|
|
39
|
+
def serialise_pydantic_models(
|
|
40
|
+
logger: FilteringBoundLogger,
|
|
41
|
+
method_name: str,
|
|
42
|
+
event_dict: MutableMapping[str, Any],
|
|
43
|
+
) -> MutableMapping[str, Any]:
|
|
44
|
+
for key, value in event_dict.items():
|
|
45
|
+
if isinstance(value, BaseModel):
|
|
46
|
+
event_dict[key] = value.model_dump()
|
|
47
|
+
return event_dict
|
|
48
|
+
|
|
49
|
+
log_format = os.environ.get("LOG_FORMAT", "json").lower()
|
|
50
|
+
is_human_format = log_format == "human"
|
|
51
|
+
|
|
52
|
+
base_processors = [
|
|
53
|
+
filter_by_level,
|
|
54
|
+
structlog.stdlib.add_log_level,
|
|
55
|
+
serialise_pydantic_models,
|
|
56
|
+
structlog.processors.TimeStamper(
|
|
57
|
+
fmt="iso" if not is_human_format else "%Y-%m-%d %H:%M:%S"
|
|
58
|
+
),
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
if is_human_format:
|
|
62
|
+
# human-readable format with colours
|
|
63
|
+
final_processors = base_processors + [
|
|
64
|
+
structlog.processors.UnicodeDecoder(),
|
|
65
|
+
structlog.dev.ConsoleRenderer(
|
|
66
|
+
colors=True,
|
|
67
|
+
exception_formatter=RichTracebackFormatter(show_locals=False),
|
|
68
|
+
pad_event=30,
|
|
69
|
+
sort_keys=False,
|
|
70
|
+
),
|
|
71
|
+
]
|
|
72
|
+
else:
|
|
73
|
+
# JSON format for production/machine consumption
|
|
74
|
+
current_processors = list(structlog.get_config()["processors"])
|
|
75
|
+
final_processors = base_processors + current_processors
|
|
76
|
+
|
|
77
|
+
return structlog.wrap_logger(
|
|
78
|
+
structlog.PrintLogger(),
|
|
79
|
+
final_processors,
|
|
80
|
+
context_class=dict,
|
|
81
|
+
).bind(logger_name=name)
|
sovereign/v2/types.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import pydantic
|
|
4
|
+
from pydantic import TypeAdapter
|
|
5
|
+
|
|
6
|
+
from sovereign.types import DiscoveryRequest, DiscoveryResponse
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Context(pydantic.BaseModel):
|
|
10
|
+
name: str
|
|
11
|
+
data: Any
|
|
12
|
+
data_hash: int
|
|
13
|
+
last_refreshed_at: int | None = None
|
|
14
|
+
refresh_after: int
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class DiscoveryEntry(pydantic.BaseModel):
|
|
18
|
+
request_hash: str
|
|
19
|
+
template: str
|
|
20
|
+
request: DiscoveryRequest
|
|
21
|
+
response: DiscoveryResponse | None
|
|
22
|
+
last_rendered_at: int | None = None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class RefreshContextJob(pydantic.BaseModel):
|
|
26
|
+
context_name: str
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class RenderDiscoveryJob(pydantic.BaseModel):
|
|
30
|
+
request_hash: str
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
QueueJob = RefreshContextJob | RenderDiscoveryJob
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
queue_job_type_adapter = TypeAdapter(QueueJob)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class WorkerNode(pydantic.BaseModel):
|
|
40
|
+
node_id: str
|
|
41
|
+
last_heartbeat: int
|
sovereign/v2/web.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import threading
|
|
5
|
+
|
|
6
|
+
from structlog.typing import FilteringBoundLogger
|
|
7
|
+
|
|
8
|
+
from sovereign import config
|
|
9
|
+
from sovereign.types import DiscoveryRequest, DiscoveryResponse
|
|
10
|
+
from sovereign.v2.data.repositories import DiscoveryEntryRepository
|
|
11
|
+
from sovereign.v2.data.utils import get_data_store, get_queue
|
|
12
|
+
from sovereign.v2.logging import get_named_logger
|
|
13
|
+
from sovereign.v2.types import DiscoveryEntry, RenderDiscoveryJob
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
async def wait_for_discovery_response(
|
|
17
|
+
request: DiscoveryRequest,
|
|
18
|
+
) -> DiscoveryResponse | None:
|
|
19
|
+
# 1 - check if the entry already exists in the database with a non-empty response
|
|
20
|
+
# 2 - if it does, return it
|
|
21
|
+
# 3 - if it doesn't, enqueue a new job to render it
|
|
22
|
+
# 4 - poll for up to CACHE_READ_TIMEOUT seconds, if we find a response, return it
|
|
23
|
+
|
|
24
|
+
request_hash = request.cache_key(config.cache.hash_rules)
|
|
25
|
+
|
|
26
|
+
logger: FilteringBoundLogger = get_named_logger(
|
|
27
|
+
f"{__name__}.{wait_for_discovery_response.__qualname__} ({__file__})",
|
|
28
|
+
level=logging.DEBUG,
|
|
29
|
+
).bind(
|
|
30
|
+
request_hash=request_hash,
|
|
31
|
+
template_resource_type=request.template.resource_type,
|
|
32
|
+
process_id=os.getpid(),
|
|
33
|
+
thread_id=threading.get_ident(),
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
logger.debug("Starting lookup for discovery response")
|
|
37
|
+
|
|
38
|
+
data_store = get_data_store()
|
|
39
|
+
discovery_entry_repository = DiscoveryEntryRepository(data_store)
|
|
40
|
+
|
|
41
|
+
queue = get_queue()
|
|
42
|
+
|
|
43
|
+
discovery_entry = discovery_entry_repository.get(request_hash)
|
|
44
|
+
|
|
45
|
+
if not discovery_entry:
|
|
46
|
+
logger.debug(
|
|
47
|
+
"No existing discovery entry found, creating new entry and enqueuing job"
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# we need to save this request to the database
|
|
51
|
+
discovery_entry = DiscoveryEntry(
|
|
52
|
+
request_hash=request_hash,
|
|
53
|
+
template=request.template.resource_type,
|
|
54
|
+
request=request,
|
|
55
|
+
response=None,
|
|
56
|
+
)
|
|
57
|
+
discovery_entry_repository.save(discovery_entry)
|
|
58
|
+
|
|
59
|
+
if not discovery_entry.response:
|
|
60
|
+
# enqueue a job to render this discovery request (duplicates handled in the worker)
|
|
61
|
+
job = RenderDiscoveryJob(request_hash=request_hash)
|
|
62
|
+
queue.put(job)
|
|
63
|
+
|
|
64
|
+
if discovery_entry.response:
|
|
65
|
+
logger.debug("Returning cached response immediately")
|
|
66
|
+
return discovery_entry.response
|
|
67
|
+
|
|
68
|
+
# wait for up to CACHE_READ_TIMEOUT seconds for the response to be populated
|
|
69
|
+
logger.debug(
|
|
70
|
+
"Polling for response",
|
|
71
|
+
timeout=config.cache.read_timeout,
|
|
72
|
+
poll_interval=config.cache.poll_interval_secs,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
start_time = asyncio.get_event_loop().time()
|
|
76
|
+
attempts = 0
|
|
77
|
+
|
|
78
|
+
while (
|
|
79
|
+
asyncio.get_event_loop().time() - start_time
|
|
80
|
+
) < config.cache.read_timeout and discovery_entry.response is None:
|
|
81
|
+
attempts += 1
|
|
82
|
+
discovery_entry = discovery_entry_repository.get(request_hash)
|
|
83
|
+
if discovery_entry is None:
|
|
84
|
+
logger.error("No discovery entry found while polling for response")
|
|
85
|
+
return None
|
|
86
|
+
await asyncio.sleep(config.cache.poll_interval_secs)
|
|
87
|
+
|
|
88
|
+
elapsed_time = asyncio.get_event_loop().time() - start_time
|
|
89
|
+
|
|
90
|
+
if discovery_entry.response:
|
|
91
|
+
logger.debug(
|
|
92
|
+
"Response received after polling",
|
|
93
|
+
attempts=attempts,
|
|
94
|
+
elapsed_time=elapsed_time,
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
logger.error(
|
|
98
|
+
"Timeout waiting for response", attempts=attempts, elapsed_time=elapsed_time
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
return discovery_entry.response
|
sovereign/v2/worker.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
import threading
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
from structlog.typing import FilteringBoundLogger
|
|
8
|
+
|
|
9
|
+
from sovereign import stats
|
|
10
|
+
from sovereign.configuration import config
|
|
11
|
+
from sovereign.dynamic_config import Loadable
|
|
12
|
+
from sovereign.v2.data.data_store import DataStoreProtocol
|
|
13
|
+
from sovereign.v2.data.repositories import (
|
|
14
|
+
ContextRepository,
|
|
15
|
+
DiscoveryEntryRepository,
|
|
16
|
+
WorkerNodeRepository,
|
|
17
|
+
)
|
|
18
|
+
from sovereign.v2.data.utils import get_data_store, get_queue
|
|
19
|
+
from sovereign.v2.data.worker_queue import QueueProtocol
|
|
20
|
+
from sovereign.v2.jobs.refresh_context import get_refresh_after, refresh_context
|
|
21
|
+
from sovereign.v2.jobs.render_discovery_job import render_discovery_response
|
|
22
|
+
from sovereign.v2.logging import get_named_logger
|
|
23
|
+
from sovereign.v2.types import (
|
|
24
|
+
QueueJob,
|
|
25
|
+
RefreshContextJob,
|
|
26
|
+
RenderDiscoveryJob,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Worker:
|
|
31
|
+
context_repository: ContextRepository
|
|
32
|
+
discovery_entry_repository: DiscoveryEntryRepository
|
|
33
|
+
worker_node_repository: WorkerNodeRepository
|
|
34
|
+
|
|
35
|
+
queue: QueueProtocol
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
data_store: DataStoreProtocol | None = None,
|
|
40
|
+
node_id: str | None = None,
|
|
41
|
+
queue: QueueProtocol | None = None,
|
|
42
|
+
) -> None:
|
|
43
|
+
self.logger: FilteringBoundLogger = get_named_logger(
|
|
44
|
+
f"{self.__class__.__module__}.{self.__class__.__qualname__}",
|
|
45
|
+
level=logging.INFO,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
self.node_id = (
|
|
49
|
+
node_id
|
|
50
|
+
if node_id is not None
|
|
51
|
+
else f"{time.time()}{random.randint(0, 1000000)}"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
data_store = data_store if data_store is not None else get_data_store()
|
|
55
|
+
|
|
56
|
+
self.context_repository = ContextRepository(data_store)
|
|
57
|
+
self.discovery_entry_repository = DiscoveryEntryRepository(data_store)
|
|
58
|
+
self.worker_node_repository = WorkerNodeRepository(data_store)
|
|
59
|
+
|
|
60
|
+
self.queue = queue if queue is not None else get_queue()
|
|
61
|
+
|
|
62
|
+
def start(self):
|
|
63
|
+
# start the context refresh loop and daemonise it
|
|
64
|
+
threading.Thread(daemon=True, target=self.context_refresh_loop).start()
|
|
65
|
+
|
|
66
|
+
# pull from the queue for eternity and process the messages
|
|
67
|
+
while True:
|
|
68
|
+
try:
|
|
69
|
+
if message := self.queue.get():
|
|
70
|
+
job_type = type(message.job).__name__
|
|
71
|
+
stats.increment(
|
|
72
|
+
"v2.worker.queue.message_received",
|
|
73
|
+
tags=[f"job_type:{job_type}"],
|
|
74
|
+
)
|
|
75
|
+
self.process_job(message.job)
|
|
76
|
+
self.queue.ack(message.receipt_handle)
|
|
77
|
+
stats.increment(
|
|
78
|
+
"v2.worker.queue.message_acked", tags=[f"job_type:{job_type}"]
|
|
79
|
+
)
|
|
80
|
+
except Exception:
|
|
81
|
+
stats.increment("v2.worker.queue.error")
|
|
82
|
+
self.logger.exception("Error while processing job")
|
|
83
|
+
|
|
84
|
+
def process_job(self, job: QueueJob):
|
|
85
|
+
self.logger.info(
|
|
86
|
+
"Processing job from queue",
|
|
87
|
+
job_type=type(job),
|
|
88
|
+
job=job,
|
|
89
|
+
node_id=self.node_id,
|
|
90
|
+
process_id=os.getpid(),
|
|
91
|
+
thread_id=threading.get_ident(),
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
match job:
|
|
95
|
+
case RefreshContextJob():
|
|
96
|
+
refresh_context(
|
|
97
|
+
job.context_name,
|
|
98
|
+
self.node_id,
|
|
99
|
+
config,
|
|
100
|
+
self.context_repository,
|
|
101
|
+
self.discovery_entry_repository,
|
|
102
|
+
self.queue,
|
|
103
|
+
)
|
|
104
|
+
case RenderDiscoveryJob():
|
|
105
|
+
render_discovery_response(
|
|
106
|
+
job.request_hash,
|
|
107
|
+
self.context_repository,
|
|
108
|
+
self.discovery_entry_repository,
|
|
109
|
+
self.node_id,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def context_refresh_loop(self):
|
|
113
|
+
self.logger.info(
|
|
114
|
+
"Starting context refresh loop",
|
|
115
|
+
node_id=self.node_id,
|
|
116
|
+
process_id=os.getpid(),
|
|
117
|
+
thread_id=threading.get_ident(),
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
is_leader = False
|
|
121
|
+
|
|
122
|
+
while True:
|
|
123
|
+
try:
|
|
124
|
+
self.worker_node_repository.send_heartbeat(self.node_id)
|
|
125
|
+
self.worker_node_repository.prune_dead_nodes()
|
|
126
|
+
|
|
127
|
+
if not self.worker_node_repository.get_leader_node_id() == self.node_id:
|
|
128
|
+
is_leader = False
|
|
129
|
+
self.logger.info(
|
|
130
|
+
"This node is not the leader, checking again in 60 seconds",
|
|
131
|
+
node_id=self.node_id,
|
|
132
|
+
process_id=os.getpid(),
|
|
133
|
+
thread_id=threading.get_ident(),
|
|
134
|
+
)
|
|
135
|
+
time.sleep(60)
|
|
136
|
+
continue
|
|
137
|
+
|
|
138
|
+
# I am the leader
|
|
139
|
+
if not is_leader:
|
|
140
|
+
is_leader = True
|
|
141
|
+
self.logger.info(
|
|
142
|
+
"This node is the leader, begin refreshing contexts",
|
|
143
|
+
node_id=self.node_id,
|
|
144
|
+
process_id=os.getpid(),
|
|
145
|
+
thread_id=threading.get_ident(),
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
name: str
|
|
149
|
+
loadable: Loadable
|
|
150
|
+
for name, loadable in config.template_context.context.items():
|
|
151
|
+
refresh_after: int | None = (
|
|
152
|
+
self.context_repository.get_refresh_after(name)
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
time_now = time.time()
|
|
156
|
+
|
|
157
|
+
if refresh_after is None or refresh_after <= time.time():
|
|
158
|
+
job = RefreshContextJob(context_name=name)
|
|
159
|
+
|
|
160
|
+
self.logger.info(
|
|
161
|
+
"Queuing context refresh",
|
|
162
|
+
node_id=self.node_id,
|
|
163
|
+
process_id=os.getpid(),
|
|
164
|
+
thread_id=threading.get_ident(),
|
|
165
|
+
name=name,
|
|
166
|
+
refresh_after=refresh_after,
|
|
167
|
+
refresh_after_seconds=(refresh_after or time_now)
|
|
168
|
+
- time_now,
|
|
169
|
+
)
|
|
170
|
+
self.queue.put(job)
|
|
171
|
+
stats.increment(
|
|
172
|
+
"v2.worker.context_refresh.queued", tags=[f"context:{name}"]
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# update refresh_after to ensure that, at most, we refresh once per interval
|
|
176
|
+
self.context_repository.update_refresh_after(
|
|
177
|
+
name, get_refresh_after(config, loadable)
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
stats.increment(
|
|
181
|
+
"v2.worker.context_refresh.skipped",
|
|
182
|
+
tags=[f"context:{name}"],
|
|
183
|
+
)
|
|
184
|
+
self.logger.debug(
|
|
185
|
+
"Skipping context refresh",
|
|
186
|
+
node_id=self.node_id,
|
|
187
|
+
process_id=os.getpid(),
|
|
188
|
+
thread_id=threading.get_ident(),
|
|
189
|
+
name=name,
|
|
190
|
+
refresh_after=refresh_after,
|
|
191
|
+
refresh_after_seconds=(refresh_after or time_now)
|
|
192
|
+
- time_now,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
time.sleep(1)
|
|
196
|
+
except Exception:
|
|
197
|
+
stats.increment("v2.worker.context_refresh.error")
|
|
198
|
+
self.logger.exception("Error while refreshing context")
|
|
199
|
+
time.sleep(5)
|
sovereign/views/__init__.py
CHANGED
sovereign/views/api.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Annotated, Optional
|
|
3
|
+
|
|
4
|
+
from fastapi import APIRouter, Path, Query
|
|
5
|
+
from fastapi.responses import Response
|
|
6
|
+
|
|
7
|
+
from sovereign.cache import Entry
|
|
8
|
+
from sovereign.configuration import ConfiguredResourceTypes, config
|
|
9
|
+
from sovereign.utils.mock import mock_discovery_request
|
|
10
|
+
from sovereign.v2.web import wait_for_discovery_response
|
|
11
|
+
from sovereign.views import reader
|
|
12
|
+
|
|
13
|
+
router = APIRouter()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _traverse(data, prefix, expressions):
|
|
17
|
+
for key, value in data.items():
|
|
18
|
+
path = f"{prefix}.{key}" if prefix else key
|
|
19
|
+
if isinstance(value, dict):
|
|
20
|
+
yield from _traverse(value, path, expressions)
|
|
21
|
+
else:
|
|
22
|
+
yield f"{path}={value}"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def expand_metadata_to_expr(m):
|
|
26
|
+
exprs = []
|
|
27
|
+
yield from _traverse(m, "", exprs)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# noinspection DuplicatedCode
|
|
31
|
+
@router.get("/resources/{resource_type}", summary="Get resources for a given type")
|
|
32
|
+
async def resource(
|
|
33
|
+
resource_type: Annotated[ConfiguredResourceTypes, Path(title="xDS Resource type")],
|
|
34
|
+
resource_name: Optional[str] = Query(None, title="Resource name"),
|
|
35
|
+
api_version: Optional[str] = Query("v3", title="Envoy API version"),
|
|
36
|
+
service_cluster: Optional[str] = Query("*", title="Envoy Service cluster"),
|
|
37
|
+
region: Optional[str] = Query(None, title="Locality Zone"),
|
|
38
|
+
version: Optional[str] = Query(None, title="Envoy Semantic Version"),
|
|
39
|
+
metadata: Optional[str] = Query(None, title="Envoy node metadata to filter by"),
|
|
40
|
+
) -> Response:
|
|
41
|
+
# todo: rewrite for worker v2
|
|
42
|
+
|
|
43
|
+
expressions = [f"cluster={service_cluster}"]
|
|
44
|
+
try:
|
|
45
|
+
data = {"metadata": json.loads(metadata or "{}")}
|
|
46
|
+
for expr in expand_metadata_to_expr(data):
|
|
47
|
+
expressions.append(expr)
|
|
48
|
+
except Exception:
|
|
49
|
+
pass
|
|
50
|
+
kwargs = {
|
|
51
|
+
"api_version": api_version,
|
|
52
|
+
"resource_type": ConfiguredResourceTypes(resource_type).value,
|
|
53
|
+
"resource_names": resource_name,
|
|
54
|
+
"version": version,
|
|
55
|
+
"region": region,
|
|
56
|
+
"expressions": expressions,
|
|
57
|
+
}
|
|
58
|
+
req = mock_discovery_request(**{k: v for k, v in kwargs.items() if v is not None}) # type: ignore
|
|
59
|
+
|
|
60
|
+
entry: Entry | None = None
|
|
61
|
+
|
|
62
|
+
if config.worker_v2_enabled:
|
|
63
|
+
# we're set up to use v2 of the worker
|
|
64
|
+
discovery_response = await wait_for_discovery_response(req)
|
|
65
|
+
if discovery_response is not None:
|
|
66
|
+
entry = Entry(
|
|
67
|
+
text=discovery_response.model_dump_json(indent=None),
|
|
68
|
+
len=len(discovery_response.resources),
|
|
69
|
+
version=discovery_response.version_info,
|
|
70
|
+
node=req.node,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
else:
|
|
74
|
+
entry = await reader.blocking_read(req) # ty: ignore[possibly-missing-attribute]
|
|
75
|
+
|
|
76
|
+
if content := getattr(entry, "text", None):
|
|
77
|
+
return Response(content, media_type="application/json")
|
|
78
|
+
else:
|
|
79
|
+
return Response(
|
|
80
|
+
json.dumps({"title": "No resources found", "status": 404}),
|
|
81
|
+
media_type="application/json+problem",
|
|
82
|
+
)
|
sovereign/views/crypto.py
CHANGED
|
@@ -1,31 +1,38 @@
|
|
|
1
|
-
from typing import Dict
|
|
2
|
-
|
|
1
|
+
from typing import Any, Dict, Optional
|
|
2
|
+
|
|
3
3
|
from fastapi import APIRouter, Body
|
|
4
4
|
from fastapi.responses import JSONResponse
|
|
5
|
-
from
|
|
6
|
-
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from sovereign import logs, server_cipher_container
|
|
8
|
+
from sovereign.configuration import EncryptionConfig
|
|
9
|
+
from sovereign.response_class import json_response_class
|
|
10
|
+
from sovereign.utils.crypto.crypto import CipherContainer
|
|
11
|
+
from sovereign.utils.crypto.suites import EncryptionType
|
|
7
12
|
|
|
8
13
|
router = APIRouter()
|
|
9
14
|
|
|
10
15
|
|
|
11
16
|
class EncryptionRequest(BaseModel):
|
|
12
17
|
data: str = Field(..., title="Text to be encrypted", min_length=1, max_length=65535)
|
|
13
|
-
key: str = Field(
|
|
18
|
+
key: Optional[str] = Field(
|
|
14
19
|
None,
|
|
15
|
-
title="Optional
|
|
20
|
+
title="Optional encryption key to use to encrypt",
|
|
16
21
|
min_length=44,
|
|
17
22
|
max_length=44,
|
|
18
23
|
)
|
|
24
|
+
encryption_type: str = Field(default="fernet", title="Encryption type to be used")
|
|
19
25
|
|
|
20
26
|
|
|
21
27
|
class DecryptionRequest(BaseModel):
|
|
22
28
|
data: str = Field(..., title="Text to be decrypted", min_length=1, max_length=65535)
|
|
23
29
|
key: str = Field(
|
|
24
30
|
...,
|
|
25
|
-
title="
|
|
31
|
+
title="Encryption key to use to decrypt",
|
|
26
32
|
min_length=44,
|
|
27
33
|
max_length=44,
|
|
28
34
|
)
|
|
35
|
+
encryption_type: str = Field(default="fernet", title="Encryption type to be used")
|
|
29
36
|
|
|
30
37
|
|
|
31
38
|
class DecryptableRequest(BaseModel):
|
|
@@ -37,17 +44,37 @@ class DecryptableRequest(BaseModel):
|
|
|
37
44
|
summary="Decrypt provided encrypted data using a provided key",
|
|
38
45
|
response_class=json_response_class,
|
|
39
46
|
)
|
|
40
|
-
async def _decrypt(request: DecryptionRequest = Body(None)) ->
|
|
41
|
-
|
|
47
|
+
async def _decrypt(request: DecryptionRequest = Body(None)) -> dict[str, Any]:
|
|
48
|
+
user_cipher_container = CipherContainer.from_encryption_configs(
|
|
49
|
+
encryption_configs=[
|
|
50
|
+
EncryptionConfig(
|
|
51
|
+
encryption_key=request.key,
|
|
52
|
+
encryption_type=EncryptionType(request.encryption_type),
|
|
53
|
+
)
|
|
54
|
+
],
|
|
55
|
+
logger=logs.application_logger.logger,
|
|
56
|
+
)
|
|
57
|
+
return {**user_cipher_container.decrypt_with_type(request.data)}
|
|
42
58
|
|
|
43
59
|
|
|
44
60
|
@router.post(
|
|
45
61
|
"/encrypt",
|
|
46
|
-
summary="Encrypt provided data using this servers key",
|
|
62
|
+
summary="Encrypt provided data using this servers key or provided key",
|
|
47
63
|
response_class=json_response_class,
|
|
48
64
|
)
|
|
49
|
-
async def _encrypt(request: EncryptionRequest = Body(None)) ->
|
|
50
|
-
|
|
65
|
+
async def _encrypt(request: EncryptionRequest = Body(None)) -> dict[str, Any]:
|
|
66
|
+
if request.key:
|
|
67
|
+
user_cipher_container = CipherContainer.from_encryption_configs(
|
|
68
|
+
encryption_configs=[
|
|
69
|
+
EncryptionConfig(
|
|
70
|
+
encryption_key=request.key,
|
|
71
|
+
encryption_type=EncryptionType(request.encryption_type),
|
|
72
|
+
)
|
|
73
|
+
],
|
|
74
|
+
logger=logs.application_logger.logger,
|
|
75
|
+
)
|
|
76
|
+
return {**user_cipher_container.encrypt(request.data)}
|
|
77
|
+
return {**server_cipher_container.encrypt(request.data)}
|
|
51
78
|
|
|
52
79
|
|
|
53
80
|
@router.post(
|
|
@@ -56,7 +83,7 @@ async def _encrypt(request: EncryptionRequest = Body(None)) -> Dict[str, str]:
|
|
|
56
83
|
response_class=json_response_class,
|
|
57
84
|
)
|
|
58
85
|
async def _decryptable(request: DecryptableRequest = Body(None)) -> JSONResponse:
|
|
59
|
-
|
|
86
|
+
server_cipher_container.decrypt(request.data)
|
|
60
87
|
return json_response_class({})
|
|
61
88
|
|
|
62
89
|
|
|
@@ -65,5 +92,9 @@ async def _decryptable(request: DecryptableRequest = Body(None)) -> JSONResponse
|
|
|
65
92
|
summary="Generate a new asymmetric encryption key",
|
|
66
93
|
response_class=json_response_class,
|
|
67
94
|
)
|
|
68
|
-
def _generate_key() -> Dict[str, str]:
|
|
69
|
-
|
|
95
|
+
def _generate_key(encryption_type: str = "fernet") -> Dict[str, str]:
|
|
96
|
+
cipher_suite = CipherContainer.get_cipher_suite(EncryptionType(encryption_type))
|
|
97
|
+
return {
|
|
98
|
+
"key": cipher_suite.generate_key().decode(),
|
|
99
|
+
"encryption_type": encryption_type,
|
|
100
|
+
}
|