sovereign 0.14.2__py3-none-any.whl → 1.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sovereign might be problematic. Click here for more details.

Files changed (99) hide show
  1. sovereign/__init__.py +17 -78
  2. sovereign/app.py +74 -59
  3. sovereign/cache/__init__.py +245 -0
  4. sovereign/cache/backends/__init__.py +110 -0
  5. sovereign/cache/backends/s3.py +161 -0
  6. sovereign/cache/filesystem.py +74 -0
  7. sovereign/cache/types.py +17 -0
  8. sovereign/configuration.py +607 -0
  9. sovereign/constants.py +1 -0
  10. sovereign/context.py +271 -100
  11. sovereign/dynamic_config/__init__.py +112 -0
  12. sovereign/dynamic_config/deser.py +78 -0
  13. sovereign/dynamic_config/loaders.py +120 -0
  14. sovereign/error_info.py +61 -0
  15. sovereign/events.py +49 -0
  16. sovereign/logging/access_logger.py +85 -0
  17. sovereign/logging/application_logger.py +54 -0
  18. sovereign/logging/base_logger.py +41 -0
  19. sovereign/logging/bootstrapper.py +36 -0
  20. sovereign/logging/types.py +10 -0
  21. sovereign/middlewares.py +8 -7
  22. sovereign/modifiers/lib.py +2 -1
  23. sovereign/rendering.py +124 -0
  24. sovereign/rendering_common.py +91 -0
  25. sovereign/response_class.py +18 -0
  26. sovereign/server.py +123 -28
  27. sovereign/statistics.py +19 -21
  28. sovereign/templates/base.html +59 -46
  29. sovereign/templates/resources.html +203 -102
  30. sovereign/testing/loaders.py +9 -0
  31. sovereign/{modifiers/test.py → testing/modifiers.py} +0 -2
  32. sovereign/tracing.py +103 -0
  33. sovereign/types.py +304 -0
  34. sovereign/utils/auth.py +27 -13
  35. sovereign/utils/crypto/__init__.py +0 -0
  36. sovereign/utils/crypto/crypto.py +135 -0
  37. sovereign/utils/crypto/suites/__init__.py +21 -0
  38. sovereign/utils/crypto/suites/aes_gcm_cipher.py +42 -0
  39. sovereign/utils/crypto/suites/base_cipher.py +21 -0
  40. sovereign/utils/crypto/suites/disabled_cipher.py +25 -0
  41. sovereign/utils/crypto/suites/fernet_cipher.py +29 -0
  42. sovereign/utils/dictupdate.py +3 -2
  43. sovereign/utils/eds.py +40 -22
  44. sovereign/utils/entry_point_loader.py +18 -0
  45. sovereign/utils/mock.py +60 -17
  46. sovereign/utils/resources.py +17 -0
  47. sovereign/utils/templates.py +4 -2
  48. sovereign/utils/timer.py +5 -3
  49. sovereign/utils/version_info.py +8 -0
  50. sovereign/utils/weighted_clusters.py +2 -1
  51. sovereign/v2/__init__.py +0 -0
  52. sovereign/v2/data/data_store.py +621 -0
  53. sovereign/v2/data/render_discovery_response.py +24 -0
  54. sovereign/v2/data/repositories.py +90 -0
  55. sovereign/v2/data/utils.py +33 -0
  56. sovereign/v2/data/worker_queue.py +273 -0
  57. sovereign/v2/jobs/refresh_context.py +117 -0
  58. sovereign/v2/jobs/render_discovery_job.py +145 -0
  59. sovereign/v2/logging.py +81 -0
  60. sovereign/v2/types.py +41 -0
  61. sovereign/v2/web.py +101 -0
  62. sovereign/v2/worker.py +199 -0
  63. sovereign/views/__init__.py +7 -0
  64. sovereign/views/api.py +82 -0
  65. sovereign/views/crypto.py +46 -15
  66. sovereign/views/discovery.py +52 -67
  67. sovereign/views/healthchecks.py +107 -20
  68. sovereign/views/interface.py +173 -117
  69. sovereign/worker.py +193 -0
  70. {sovereign-0.14.2.dist-info → sovereign-1.0.0a4.dist-info}/METADATA +81 -73
  71. sovereign-1.0.0a4.dist-info/RECORD +85 -0
  72. {sovereign-0.14.2.dist-info → sovereign-1.0.0a4.dist-info}/WHEEL +1 -1
  73. sovereign-1.0.0a4.dist-info/entry_points.txt +46 -0
  74. sovereign_files/__init__.py +0 -0
  75. sovereign_files/static/darkmode.js +51 -0
  76. sovereign_files/static/node_expression.js +42 -0
  77. sovereign_files/static/panel.js +76 -0
  78. sovereign_files/static/resources.css +246 -0
  79. sovereign_files/static/resources.js +642 -0
  80. sovereign_files/static/sass/style.scss +33 -0
  81. sovereign_files/static/style.css +16143 -0
  82. sovereign_files/static/style.css.map +1 -0
  83. sovereign/config_loader.py +0 -225
  84. sovereign/discovery.py +0 -175
  85. sovereign/logs.py +0 -131
  86. sovereign/schemas.py +0 -715
  87. sovereign/sources/__init__.py +0 -3
  88. sovereign/sources/file.py +0 -21
  89. sovereign/sources/inline.py +0 -38
  90. sovereign/sources/lib.py +0 -40
  91. sovereign/sources/poller.py +0 -298
  92. sovereign/static/sass/style.scss +0 -27
  93. sovereign/static/style.css +0 -13553
  94. sovereign/templates/ul_filter.html +0 -22
  95. sovereign/utils/crypto.py +0 -64
  96. sovereign/views/admin.py +0 -120
  97. sovereign-0.14.2.dist-info/LICENSE.txt +0 -13
  98. sovereign-0.14.2.dist-info/RECORD +0 -45
  99. sovereign-0.14.2.dist-info/entry_points.txt +0 -10
@@ -0,0 +1,90 @@
1
+ import time
2
+
3
+ from sovereign import stats
4
+ from sovereign.v2.data.data_store import ComparisonOperator, DataStoreProtocol, DataType
5
+ from sovereign.v2.types import Context, DiscoveryEntry, WorkerNode
6
+
7
+
8
+ class ContextRepository:
9
+ def __init__(self, data_store: DataStoreProtocol):
10
+ self.data_store: DataStoreProtocol = data_store
11
+
12
+ @stats.timed("repository.context.get_ms")
13
+ def get(self, name: str) -> Context | None:
14
+ return self.data_store.get(DataType.Context, name)
15
+
16
+ @stats.timed("v2.repository.context.get_hash_ms")
17
+ def get_hash(self, name: str) -> int | None:
18
+ return self.data_store.get_property(DataType.Context, name, "data_hash")
19
+
20
+ def get_refresh_after(self, name: str) -> int | None:
21
+ return self.data_store.get_property(DataType.Context, name, "refresh_after")
22
+
23
+ @stats.timed("v2.repository.context.save_ms")
24
+ def save(self, context: Context) -> bool:
25
+ return self.data_store.set(DataType.Context, context.name, context)
26
+
27
+ @stats.timed("v2.repository.context.update_refresh_after_ms")
28
+ def update_refresh_after(self, name: str, refresh_after: int) -> bool:
29
+ return self.data_store.set_property(
30
+ DataType.Context, name, "refresh_after", refresh_after
31
+ )
32
+
33
+
34
+ class DiscoveryEntryRepository:
35
+ def __init__(self, data_store: DataStoreProtocol):
36
+ self.data_store = data_store
37
+
38
+ @stats.timed("v2.repository.discovery_entry.get_ms")
39
+ def get(self, request_hash: str) -> DiscoveryEntry | None:
40
+ return self.data_store.get(DataType.DiscoveryEntry, request_hash)
41
+
42
+ @stats.timed("v2.repository.discovery_entry.find_by_template_ms")
43
+ def find_all_request_hashes_by_template(self, template: str) -> list[str]:
44
+ return self.data_store.find_all_matching_property(
45
+ DataType.DiscoveryEntry,
46
+ "template",
47
+ ComparisonOperator.EqualTo,
48
+ template,
49
+ "request_hash",
50
+ )
51
+
52
+ @stats.timed("v2.repository.discovery_entry.save_ms")
53
+ def save(self, entry: DiscoveryEntry) -> bool:
54
+ return self.data_store.set(DataType.DiscoveryEntry, entry.request_hash, entry)
55
+
56
+
57
+ class WorkerNodeRepository:
58
+ def __init__(self, data_store: DataStoreProtocol):
59
+ self.data_store = data_store
60
+
61
+ @stats.timed("v2.repository.worker_node.heartbeat_ms")
62
+ def send_heartbeat(self, node_id: str) -> bool:
63
+ now = int(time.time())
64
+ return self.data_store.set(
65
+ DataType.WorkerNode,
66
+ node_id,
67
+ WorkerNode(node_id=node_id, last_heartbeat=now),
68
+ )
69
+
70
+ @stats.timed("v2.repository.worker_node.get_leader_ms")
71
+ def get_leader_node_id(self) -> str | None:
72
+ node: WorkerNode | None = self.data_store.min_by_property(
73
+ DataType.WorkerNode, "node_id"
74
+ )
75
+ if node:
76
+ return node.node_id
77
+ return None
78
+
79
+ @stats.timed("v2.repository.worker_node.prune_ms")
80
+ def prune_dead_nodes(self) -> bool:
81
+ """
82
+ Remove any nodes that have not sent a heartbeat in the last 10 minutes.
83
+ """
84
+ now = int(time.time())
85
+ return self.data_store.delete_matching(
86
+ DataType.WorkerNode,
87
+ "last_heartbeat",
88
+ ComparisonOperator.LessThanOrEqualTo,
89
+ now - 600,
90
+ )
@@ -0,0 +1,33 @@
1
+ from sovereign import config
2
+ from sovereign.utils.entry_point_loader import EntryPointLoader
3
+ from sovereign.v2.data.data_store import DataStoreProtocol
4
+ from sovereign.v2.data.worker_queue import QueueProtocol
5
+
6
+
7
+ def get_data_store() -> DataStoreProtocol:
8
+ entry_points = EntryPointLoader("data_stores")
9
+ data_store: DataStoreProtocol | None = None
10
+
11
+ for entry_point in entry_points.groups["data_stores"]:
12
+ if entry_point.name == config.worker_v2_data_store_provider:
13
+ data_store = entry_point.load()()
14
+ break
15
+
16
+ if data_store is None:
17
+ raise ValueError(
18
+ f"Data store '{config.worker_v2_data_store_provider}' not found in entry points"
19
+ )
20
+
21
+ return data_store
22
+
23
+
24
+ def get_queue() -> QueueProtocol:
25
+ entry_points = EntryPointLoader("queues")
26
+
27
+ for entry_point in entry_points.groups["queues"]:
28
+ if entry_point.name == config.worker_v2_queue_provider:
29
+ return entry_point.load()()
30
+
31
+ raise ValueError(
32
+ f"Queue '{config.worker_v2_queue_provider}' not found in entry points"
33
+ )
@@ -0,0 +1,273 @@
1
+ import logging
2
+ import sqlite3
3
+ import time
4
+ import uuid
5
+ from dataclasses import dataclass
6
+ from typing import Protocol, runtime_checkable
7
+
8
+ from structlog.typing import FilteringBoundLogger
9
+
10
+ from sovereign import config
11
+ from sovereign.v2.logging import get_named_logger
12
+ from sovereign.v2.types import QueueJob, queue_job_type_adapter
13
+
14
+ if config.worker_v2_queue_invsibility_time is None:
15
+ DEFAULT_VISIBILITY_TIMEOUT_SECONDS = int(config.cache.read_timeout) + 30
16
+ else:
17
+ DEFAULT_VISIBILITY_TIMEOUT_SECONDS = config.worker_v2_queue_invsibility_time
18
+
19
+
20
+ @dataclass
21
+ class QueueMessage:
22
+ """A message retrieved from the queue, containing the job and a receipt handle for acknowledgement."""
23
+
24
+ job: QueueJob
25
+ receipt_handle: str
26
+
27
+
28
+ @runtime_checkable
29
+ class QueueProtocol(Protocol):
30
+ def put(self, job: QueueJob) -> str | None: ...
31
+
32
+ def get(self) -> QueueMessage | None: ...
33
+
34
+ def ack(self, receipt_handle: str) -> bool: ...
35
+
36
+
37
+ class InMemoryQueue(QueueProtocol):
38
+ """
39
+ Messages become invisible when retrieved and must be acknowledged within the
40
+ visibility timeout, otherwise they become visible again for other workers.
41
+ """
42
+
43
+ def __init__(
44
+ self, visibility_timeout: int | None = DEFAULT_VISIBILITY_TIMEOUT_SECONDS
45
+ ) -> None:
46
+ self.logger: FilteringBoundLogger = get_named_logger(
47
+ f"{self.__class__.__module__}.{self.__class__.__qualname__}",
48
+ level=logging.DEBUG,
49
+ )
50
+
51
+ self.visibility_timeout: int = (
52
+ visibility_timeout
53
+ if visibility_timeout is not None
54
+ else DEFAULT_VISIBILITY_TIMEOUT_SECONDS
55
+ )
56
+
57
+ # storage for messages: message_id -> (job, invisible_until, receipt_handle)
58
+ self._messages: dict[str, tuple[QueueJob, float | None, str | None]] = {}
59
+
60
+ def put(self, job: QueueJob) -> str | None:
61
+ message_id = str(uuid.uuid4())
62
+ self._messages[message_id] = (job, None, None) # visible, no receipt handle
63
+ self.logger.debug(
64
+ "Putting job in queue",
65
+ job=job,
66
+ message_id=message_id,
67
+ queue_size=len(self._messages),
68
+ )
69
+ return message_id
70
+
71
+ def get(self) -> QueueMessage | None:
72
+ timeout = 30
73
+ start_time = int(time.time())
74
+ poll_interval_seconds = 0.5
75
+
76
+ while int(time.time()) - start_time < timeout:
77
+ now = int(time.time())
78
+ # find first visible message (not invisible, or invisibility expired)
79
+ for message_id, (job, invisible_until, _) in self._messages.items():
80
+ # check if message is visible
81
+ if invisible_until is None or invisible_until <= now:
82
+ # make it invisible and generate new receipt handle
83
+ receipt_handle = str(uuid.uuid4())
84
+ new_invisible_until = now + self.visibility_timeout
85
+ self._messages[message_id] = (
86
+ job,
87
+ new_invisible_until,
88
+ receipt_handle,
89
+ )
90
+
91
+ self.logger.debug(
92
+ "Retrieved job from queue",
93
+ message_id=message_id,
94
+ receipt_handle=receipt_handle,
95
+ invisible_until=new_invisible_until,
96
+ )
97
+ return QueueMessage(job=job, receipt_handle=receipt_handle)
98
+
99
+ time.sleep(poll_interval_seconds)
100
+
101
+ return None
102
+
103
+ def ack(self, receipt_handle: str) -> bool:
104
+ """
105
+ Acknowledge a message, permanently removing it from the queue.
106
+
107
+ Returns True if the message was successfully acknowledged, False if the
108
+ receipt handle was invalid (message doesn't exist or was redelivered).
109
+ """
110
+ for message_id, (job, invisible_until, stored_receipt) in list(
111
+ self._messages.items()
112
+ ):
113
+ if stored_receipt == receipt_handle:
114
+ del self._messages[message_id]
115
+ self.logger.debug(
116
+ "Acknowledged job",
117
+ message_id=message_id,
118
+ receipt_handle=receipt_handle,
119
+ )
120
+ return True
121
+
122
+ self.logger.warning(
123
+ "Failed to acknowledge job, invalid receipt handle",
124
+ receipt_handle=receipt_handle,
125
+ )
126
+ return False
127
+
128
+ def is_empty(self) -> bool:
129
+ return not self._messages
130
+
131
+
132
+ class SqliteQueue(QueueProtocol):
133
+ """
134
+ SQLite-backed queue with visibility timeout support.
135
+
136
+ Messages become invisible when retrieved and must be acknowledged within the
137
+ visibility timeout, otherwise they become visible again for other workers.
138
+ """
139
+
140
+ def __init__(self, visibility_timeout: int = DEFAULT_VISIBILITY_TIMEOUT_SECONDS):
141
+ self.logger: FilteringBoundLogger = get_named_logger(
142
+ f"{self.__class__.__module__}.{self.__class__.__qualname__}",
143
+ level=logging.DEBUG,
144
+ )
145
+ self.visibility_timeout = visibility_timeout
146
+ self.db_path = config.worker_v2_queue_path
147
+ self._init_db()
148
+
149
+ def _get_connection(self) -> sqlite3.Connection:
150
+ # check_same_thread=False allows SQLite connections to be shared across threads
151
+ # and means that we need to ensure thread safety ourselves.
152
+ # isolation_level=None uses autocommit mode,
153
+ # which prevents "cannot commit - no transaction is active" errors in multi-threaded contexts.
154
+ conn = sqlite3.connect(
155
+ self.db_path, check_same_thread=False, isolation_level=None
156
+ )
157
+ conn.row_factory = sqlite3.Row
158
+ return conn
159
+
160
+ def _init_db(self):
161
+ try:
162
+ with self._get_connection() as conn:
163
+ conn.execute("""
164
+ CREATE TABLE IF NOT EXISTS queue
165
+ (
166
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
167
+ data TEXT NOT NULL,
168
+ invisible_until INT,
169
+ receipt_handle TEXT
170
+ )
171
+ """)
172
+ conn.execute(
173
+ "CREATE INDEX IF NOT EXISTS idx_invisible_until ON queue (invisible_until)"
174
+ )
175
+ conn.execute(
176
+ "CREATE INDEX IF NOT EXISTS idx_receipt_handle ON queue (receipt_handle)"
177
+ )
178
+ conn.commit()
179
+ except Exception:
180
+ self.logger.exception("Failed to initialise SQLite queue database")
181
+ raise
182
+
183
+ def put(self, job: QueueJob) -> str | None:
184
+ try:
185
+ with self._get_connection() as conn:
186
+ cursor = conn.execute(
187
+ "INSERT INTO queue (data, invisible_until, receipt_handle) VALUES (?, NULL, NULL)",
188
+ (job.model_dump_json(),),
189
+ )
190
+ job_id = str(cursor.lastrowid)
191
+ self.logger.debug("Put job in SQLite queue", job=job, job_id=job_id)
192
+ return str(job_id)
193
+ except Exception:
194
+ self.logger.exception("Failed to put job in SQLite queue", job=job)
195
+ return None
196
+
197
+ def get(self) -> QueueMessage | None:
198
+ timeout = 30
199
+ start_time = time.time()
200
+ poll_interval_seconds = 0.5
201
+
202
+ while time.time() - start_time < timeout:
203
+ try:
204
+ with self._get_connection() as conn:
205
+ now = int(time.time())
206
+ # find first visible message (invisible_until is NULL or expired)
207
+ cursor = conn.execute(
208
+ """
209
+ SELECT id, data
210
+ FROM queue
211
+ WHERE invisible_until IS NULL
212
+ OR invisible_until <= ? LIMIT 1
213
+ """,
214
+ (now,),
215
+ )
216
+ row = cursor.fetchone()
217
+ if row:
218
+ # generate receipt handle and make message invisible
219
+ receipt_handle = str(uuid.uuid4())
220
+ invisible_until = now + self.visibility_timeout
221
+ conn.execute(
222
+ "UPDATE queue SET invisible_until = ?, receipt_handle = ? WHERE id = ?",
223
+ (invisible_until, receipt_handle, row["id"]),
224
+ )
225
+ conn.commit()
226
+ self.logger.debug(
227
+ "Retrieved job from queue",
228
+ job_id=row["id"],
229
+ receipt_handle=receipt_handle,
230
+ invisible_until=invisible_until,
231
+ )
232
+ job = queue_job_type_adapter.validate_json(row["data"])
233
+ return QueueMessage(job=job, receipt_handle=receipt_handle)
234
+ except Exception:
235
+ self.logger.exception("Failed to get job from SQLite queue")
236
+ return None
237
+
238
+ time.sleep(poll_interval_seconds)
239
+
240
+ return None
241
+
242
+ def ack(self, receipt_handle: str) -> bool:
243
+ """
244
+ Acknowledge a message, permanently removing it from the queue.
245
+
246
+ Returns True if the message was successfully acknowledged, False if the
247
+ receipt handle was invalid (message doesn't exist or was redelivered).
248
+ """
249
+ try:
250
+ with self._get_connection() as conn:
251
+ cursor = conn.execute(
252
+ "DELETE FROM queue WHERE receipt_handle = ?",
253
+ (receipt_handle,),
254
+ )
255
+ conn.commit()
256
+ if cursor.rowcount > 0:
257
+ self.logger.debug(
258
+ "Acknowledged job",
259
+ receipt_handle=receipt_handle,
260
+ )
261
+ return True
262
+ else:
263
+ self.logger.warning(
264
+ "Failed to acknowledge job, invalid receipt handle",
265
+ receipt_handle=receipt_handle,
266
+ )
267
+ return False
268
+ except Exception:
269
+ self.logger.exception(
270
+ "Failed to acknowledge job in SQLite queue",
271
+ receipt_handle=receipt_handle,
272
+ )
273
+ return False
@@ -0,0 +1,117 @@
1
+ import datetime
2
+ import logging
3
+ import os
4
+ import threading
5
+ import time
6
+ import zlib
7
+ from typing import Any
8
+
9
+ from croniter import croniter
10
+ from structlog.typing import FilteringBoundLogger
11
+
12
+ from sovereign.configuration import SovereignConfigv2
13
+ from sovereign.context import CronInterval, SecondsInterval, TaskInterval, stats
14
+ from sovereign.dynamic_config import Loadable
15
+ from sovereign.utils.timer import wait_until
16
+ from sovereign.v2.data.repositories import ContextRepository, DiscoveryEntryRepository
17
+ from sovereign.v2.data.worker_queue import QueueProtocol
18
+ from sovereign.v2.logging import get_named_logger
19
+ from sovereign.v2.types import Context, RenderDiscoveryJob
20
+
21
+
22
+ def refresh_context(
23
+ name: str,
24
+ node_id: str,
25
+ config: SovereignConfigv2,
26
+ context_repository: ContextRepository,
27
+ discovery_job_repository: DiscoveryEntryRepository,
28
+ queue: QueueProtocol,
29
+ ):
30
+ with stats.timed("v2.worker.job.refresh_context_ms", context=name):
31
+ loadable = config.template_context.context[name]
32
+
33
+ logger: FilteringBoundLogger = get_named_logger(
34
+ f"{__name__}.{refresh_context.__qualname__} ({__file__})",
35
+ level=logging.DEBUG,
36
+ ).bind(
37
+ name=name,
38
+ node_id=node_id,
39
+ process_id=os.getpid(),
40
+ thread_id=threading.get_ident(),
41
+ )
42
+
43
+ try:
44
+ value: Any = loadable.load()
45
+ context_hash = _get_hash(value)
46
+
47
+ if context_repository.get_hash(name) != context_hash:
48
+ context = Context(
49
+ name=name,
50
+ data=value,
51
+ data_hash=context_hash,
52
+ last_refreshed_at=int(time.time()),
53
+ refresh_after=get_refresh_after(config, loadable),
54
+ )
55
+ context_repository.save(context)
56
+
57
+ request_hashes: set[str] = set()
58
+
59
+ for version, version_templates in (
60
+ {"default": config.templates.default} | config.templates.versions
61
+ ).items():
62
+ for template in version_templates:
63
+ if name in template.depends_on:
64
+ for request_hash in discovery_job_repository.find_all_request_hashes_by_template(
65
+ template.type
66
+ ):
67
+ request_hashes.add(request_hash)
68
+
69
+ for request_hash in request_hashes:
70
+ logger.info(
71
+ "Queuing render for discovery request because context changed",
72
+ request_hash=request_hash,
73
+ context=name,
74
+ )
75
+ queue.put(RenderDiscoveryJob(request_hash=request_hash))
76
+ except Exception:
77
+ # if loadable.retry_policy is not None:
78
+ # print(loadable.retry_policy)
79
+ # todo: handle exceptions/retries
80
+ # todo: use the default retry logic instead
81
+ logger.exception("Failed to load context")
82
+
83
+
84
+ def _get_hash(value: Any) -> int:
85
+ data: bytes = repr(value).encode()
86
+ return zlib.adler32(data) & 0xFFFFFFFF
87
+
88
+
89
+ # noinspection PyUnreachableCode
90
+ def _seconds_til_next_run(task_interval: TaskInterval) -> int:
91
+ match task_interval.value:
92
+ case CronInterval(cron=expression):
93
+ cron = croniter(expression)
94
+ next_date = cron.get_next(datetime.datetime)
95
+ return int(wait_until(next_date))
96
+ case SecondsInterval(seconds=seconds):
97
+ return seconds
98
+ case _:
99
+ return 0
100
+
101
+
102
+ def get_refresh_after(config: SovereignConfigv2, loadable: Loadable) -> int:
103
+ interval = loadable.interval
104
+
105
+ # get the default interval from config if not specified in loadable
106
+ if interval is None:
107
+ template_context_config = config.template_context
108
+ if template_context_config.refresh_rate is not None:
109
+ interval = str(template_context_config.refresh_rate)
110
+ elif template_context_config.refresh_cron is not None:
111
+ interval = template_context_config.refresh_cron
112
+ else:
113
+ interval = "60"
114
+
115
+ task_interval = TaskInterval.from_str(interval)
116
+
117
+ return int(time.time() + _seconds_til_next_run(task_interval))
@@ -0,0 +1,145 @@
1
+ import logging
2
+ import os
3
+ import threading
4
+ import time
5
+
6
+ from structlog.typing import FilteringBoundLogger
7
+
8
+ from sovereign import config, disabled_ciphersuite, server_cipher_container, stats
9
+ from sovereign.rendering_common import (
10
+ add_type_urls,
11
+ deserialize_config,
12
+ filter_resources,
13
+ )
14
+ from sovereign.types import DiscoveryResponse, ProcessedTemplate
15
+ from sovereign.utils import templates
16
+ from sovereign.v2.data.repositories import ContextRepository, DiscoveryEntryRepository
17
+ from sovereign.v2.logging import get_named_logger
18
+ from sovereign.v2.types import Context, DiscoveryEntry
19
+
20
+
21
+ # noinspection DuplicatedCode
22
+ def render_discovery_response(
23
+ request_hash: str,
24
+ context_repository: ContextRepository,
25
+ discovery_entry_repository: DiscoveryEntryRepository,
26
+ node_id: str,
27
+ ):
28
+ logger: FilteringBoundLogger = get_named_logger(
29
+ f"{__name__}.{render_discovery_response.__qualname__} ({__file__})",
30
+ level=logging.DEBUG,
31
+ ).bind(
32
+ request_hash=request_hash,
33
+ node_id=node_id,
34
+ process_id=os.getpid(),
35
+ thread_id=threading.get_ident(),
36
+ )
37
+
38
+ try:
39
+ logger.debug("Starting rendering of discovery response")
40
+
41
+ discovery_entry = discovery_entry_repository.get(request_hash)
42
+
43
+ if discovery_entry is None:
44
+ logger.error("No discovery entry found for request hash")
45
+ return True # don't retry this job, it won't succeed
46
+
47
+ request = discovery_entry.request
48
+
49
+ with stats.timed(
50
+ "v2.worker.job.render_discovery_response_ms",
51
+ template=discovery_entry.request.template.resource_type,
52
+ ):
53
+ logger = logger.bind(
54
+ template=discovery_entry.request.template.resource_type
55
+ )
56
+
57
+ dependencies = request.template.depends_on
58
+ contexts: dict[str, Context | None] = {
59
+ name: context_repository.get(name) for name in dependencies
60
+ }
61
+
62
+ missing_contexts = [
63
+ name
64
+ for name, context in contexts.items()
65
+ if context is None or context.last_refreshed_at is None
66
+ ]
67
+ if missing_contexts:
68
+ logger.error(
69
+ "Cannot render template for request, required contexts not yet loaded",
70
+ missing_contexts=missing_contexts,
71
+ )
72
+ return False
73
+
74
+ # in order to handle duplicate jobs for the same request_hash, check the last_rendered_at property - if this is
75
+ # greater than the all the last_refreshed_at values for the contexts, then we can skip rendering
76
+ refresh_times = [
77
+ context.last_refreshed_at
78
+ for context in contexts.values()
79
+ if context is not None and context.last_refreshed_at is not None
80
+ ]
81
+
82
+ if refresh_times:
83
+ latest_context_refresh = max(refresh_times)
84
+
85
+ if (
86
+ discovery_entry.last_rendered_at
87
+ and latest_context_refresh < discovery_entry.last_rendered_at
88
+ ):
89
+ # the template was last rendered after all the contexts were refreshed, so we can skip rendering
90
+ logger.info("Skipping rendering for duplicate job")
91
+ return True
92
+
93
+ raw_contexts = {
94
+ name: context.data
95
+ for (name, context) in contexts.items()
96
+ if context is not None
97
+ }
98
+
99
+ logger.debug(
100
+ "Contexts loaded for rendering discovery response",
101
+ contexts=raw_contexts.keys(),
102
+ depends_on=request.template.depends_on,
103
+ )
104
+
105
+ if request.is_internal_request:
106
+ raw_contexts["__hide_from_ui"] = lambda v: "(value hidden)"
107
+ raw_contexts["crypto"] = disabled_ciphersuite
108
+ else:
109
+ raw_contexts["__hide_from_ui"] = lambda v: v
110
+ raw_contexts["crypto"] = server_cipher_container
111
+
112
+ raw_contexts["config"] = config
113
+
114
+ result = request.template.generate(
115
+ discovery_request=request,
116
+ host_header=request.desired_controlplane,
117
+ resource_names=request.resources,
118
+ utils=templates,
119
+ **raw_contexts,
120
+ )
121
+
122
+ if not request.template.is_python_source:
123
+ assert isinstance(result, str)
124
+ result = deserialize_config(result)
125
+
126
+ assert isinstance(result, dict)
127
+ resources = filter_resources(result["resources"], request.resources)
128
+ add_type_urls(request.api_version, request.resource_type, resources)
129
+ processed_template = ProcessedTemplate(resources=resources)
130
+ response = DiscoveryResponse(
131
+ resources=resources, version_info=processed_template.version_info
132
+ )
133
+
134
+ if not discovery_entry_repository.save(
135
+ DiscoveryEntry(
136
+ request_hash=request_hash,
137
+ template=request.template.resource_type,
138
+ request=request,
139
+ response=response,
140
+ last_rendered_at=int(time.time()),
141
+ )
142
+ ):
143
+ logger.error("Failed to save discovery entry")
144
+ finally:
145
+ logger.debug("Finished rendering of discovery response")