scalable-pypeline 1.2.3__py2.py3-none-any.whl → 2.0.2__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pypeline/__init__.py +1 -1
- pypeline/barrier.py +34 -0
- pypeline/composition.py +349 -0
- pypeline/constants.py +51 -84
- pypeline/dramatiq.py +470 -0
- pypeline/extensions.py +9 -8
- pypeline/flask/__init__.py +3 -5
- pypeline/flask/api/pipelines.py +109 -148
- pypeline/flask/api/schedules.py +14 -39
- pypeline/flask/decorators.py +18 -53
- pypeline/flask/flask_pypeline.py +156 -0
- pypeline/middleware.py +61 -0
- pypeline/pipeline_config_schema.py +105 -92
- pypeline/pypeline_yaml.py +458 -0
- pypeline/schedule_config_schema.py +35 -120
- pypeline/utils/config_utils.py +52 -310
- pypeline/utils/module_utils.py +35 -71
- pypeline/utils/pipeline_utils.py +161 -0
- scalable_pypeline-2.0.2.dist-info/METADATA +217 -0
- scalable_pypeline-2.0.2.dist-info/RECORD +27 -0
- scalable_pypeline-2.0.2.dist-info/entry_points.txt +3 -0
- tests/fixtures/__init__.py +0 -1
- pypeline/celery.py +0 -206
- pypeline/celery_beat.py +0 -254
- pypeline/flask/api/utils.py +0 -35
- pypeline/flask/flask_sermos.py +0 -156
- pypeline/generators.py +0 -196
- pypeline/logging_config.py +0 -171
- pypeline/pipeline/__init__.py +0 -0
- pypeline/pipeline/chained_task.py +0 -70
- pypeline/pipeline/generator.py +0 -254
- pypeline/sermos_yaml.py +0 -442
- pypeline/utils/graph_utils.py +0 -144
- pypeline/utils/task_utils.py +0 -552
- scalable_pypeline-1.2.3.dist-info/METADATA +0 -163
- scalable_pypeline-1.2.3.dist-info/RECORD +0 -33
- scalable_pypeline-1.2.3.dist-info/entry_points.txt +0 -2
- tests/fixtures/s3_fixtures.py +0 -52
- {scalable_pypeline-1.2.3.dist-info → scalable_pypeline-2.0.2.dist-info}/LICENSE +0 -0
- {scalable_pypeline-1.2.3.dist-info → scalable_pypeline-2.0.2.dist-info}/WHEEL +0 -0
- {scalable_pypeline-1.2.3.dist-info → scalable_pypeline-2.0.2.dist-info}/top_level.txt +0 -0
pypeline/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ =
|
1
|
+
__version__ = "2.0.2"
|
pypeline/barrier.py
ADDED
@@ -0,0 +1,34 @@
|
|
1
|
+
import time
|
2
|
+
|
3
|
+
import redis
|
4
|
+
|
5
|
+
|
6
|
+
class LockingParallelBarrier:
|
7
|
+
def __init__(self, redis_url, task_key="task_counter", lock_key="task_lock"):
|
8
|
+
# Connect to Redis using the provided URL
|
9
|
+
self.redis = redis.StrictRedis.from_url(redis_url, decode_responses=True)
|
10
|
+
self.task_key = task_key
|
11
|
+
self.lock_key = lock_key
|
12
|
+
|
13
|
+
def acquire_lock(self, timeout=5):
|
14
|
+
"""Acquire a lock using Redis."""
|
15
|
+
while True:
|
16
|
+
if self.redis.set(self.lock_key, "locked", nx=True, ex=timeout):
|
17
|
+
return True
|
18
|
+
time.sleep(0.1)
|
19
|
+
|
20
|
+
def release_lock(self):
|
21
|
+
"""Release the lock in Redis."""
|
22
|
+
self.redis.delete(self.lock_key)
|
23
|
+
|
24
|
+
def set_task_count(self, count):
|
25
|
+
"""Initialize the task counter in Redis."""
|
26
|
+
self.redis.set(self.task_key, count)
|
27
|
+
|
28
|
+
def decrement_task_count(self):
|
29
|
+
"""Decrement the task counter in Redis."""
|
30
|
+
return self.redis.decr(self.task_key)
|
31
|
+
|
32
|
+
def get_task_count(self):
|
33
|
+
"""Get the current value of the task counter."""
|
34
|
+
return int(self.redis.get(self.task_key) or 0)
|
pypeline/composition.py
ADDED
@@ -0,0 +1,349 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import copy
|
4
|
+
import json
|
5
|
+
import time
|
6
|
+
import typing
|
7
|
+
from uuid import uuid4
|
8
|
+
|
9
|
+
from dramatiq.broker import get_broker
|
10
|
+
from dramatiq.results import ResultMissing
|
11
|
+
from db_medley.redis_conf import RedisConnector
|
12
|
+
from redis.exceptions import RedisError
|
13
|
+
|
14
|
+
from pypeline.barrier import LockingParallelBarrier
|
15
|
+
from pypeline.constants import DEFAULT_RESULT_TTL
|
16
|
+
from pypeline.dramatiq import REDIS_URL
|
17
|
+
|
18
|
+
from dramatiq.message import Message
|
19
|
+
|
20
|
+
|
21
|
+
class parallel_pipeline:
|
22
|
+
"""Chain actors together, passing the result of one actor to the
|
23
|
+
next one in line.
|
24
|
+
|
25
|
+
Parameters:
|
26
|
+
children(typing.List[typing.List[Message]]): A sequence of messages or
|
27
|
+
pipelines. Child pipelines are flattened into the resulting
|
28
|
+
pipeline.
|
29
|
+
broker(Broker): The broker to run the pipeline on. Defaults to
|
30
|
+
the current global broker.
|
31
|
+
"""
|
32
|
+
|
33
|
+
messages: list[Message]
|
34
|
+
|
35
|
+
def __init__(self, messages: typing.List[typing.List[Message]], broker=None):
|
36
|
+
self.broker = broker or get_broker()
|
37
|
+
self.messages = messages
|
38
|
+
self.execution_id = str(uuid4())
|
39
|
+
execution_graph = []
|
40
|
+
|
41
|
+
for message_group in self.messages:
|
42
|
+
sub_execution_group = []
|
43
|
+
group_completion_uuid = str(uuid4())
|
44
|
+
for m in message_group:
|
45
|
+
m.kwargs["event"]["execution_id"] = self.execution_id
|
46
|
+
m.options["group_completion_uuid"] = group_completion_uuid
|
47
|
+
message_dict = copy.deepcopy(m.asdict())
|
48
|
+
sub_execution_group.append(message_dict)
|
49
|
+
# Last item in the group is the id of the group to be executed
|
50
|
+
execution_graph.append(sub_execution_group)
|
51
|
+
|
52
|
+
self.execution_graph = execution_graph
|
53
|
+
|
54
|
+
for message_group in self.messages:
|
55
|
+
for m in message_group:
|
56
|
+
m.options["execution_graph"] = execution_graph
|
57
|
+
|
58
|
+
def __len__(self):
|
59
|
+
"""Returns the length of the parallel_pipeline."""
|
60
|
+
count = 0
|
61
|
+
for message_group in self.messages:
|
62
|
+
count = count + len(message_group)
|
63
|
+
|
64
|
+
return count
|
65
|
+
|
66
|
+
def __str__(self): # pragma: no cover
|
67
|
+
"""Return a string representation of the parallel_pipeline.
|
68
|
+
|
69
|
+
This representation shows the order of execution for each group of messages.
|
70
|
+
"""
|
71
|
+
result = []
|
72
|
+
|
73
|
+
for i, message_group in enumerate(self.messages):
|
74
|
+
group_str = f"Group {i + 1}: [\n"
|
75
|
+
for j, message in enumerate(message_group):
|
76
|
+
message_str = f" Message {j + 1}: {message.actor_name}\n"
|
77
|
+
group_str += message_str
|
78
|
+
group_str += "]\n"
|
79
|
+
result.append(group_str)
|
80
|
+
|
81
|
+
return "".join(result)
|
82
|
+
|
83
|
+
@property
|
84
|
+
def completed(self):
|
85
|
+
return self.completed_count == len(self)
|
86
|
+
|
87
|
+
@property
|
88
|
+
def completed_count(self):
|
89
|
+
count = 0
|
90
|
+
|
91
|
+
for message_group in self.messages:
|
92
|
+
for message in message_group:
|
93
|
+
try:
|
94
|
+
message.get_result()
|
95
|
+
count = count + 1
|
96
|
+
except ResultMissing:
|
97
|
+
pass
|
98
|
+
return count
|
99
|
+
|
100
|
+
def run(self, *, delay=None):
|
101
|
+
"""Run this parallel_pipeline.
|
102
|
+
|
103
|
+
Parameters:
|
104
|
+
delay(int): The minimum amount of time, in milliseconds, the
|
105
|
+
parallel_pipeline should be delayed by. If both parallel_pipeline's delay and
|
106
|
+
first message's delay are provided, the bigger value will be
|
107
|
+
used.
|
108
|
+
|
109
|
+
Returns:
|
110
|
+
parallel_pipeline: Itself.
|
111
|
+
"""
|
112
|
+
starting_group = self.messages[0]
|
113
|
+
|
114
|
+
completion_uuid = starting_group[0].options["group_completion_uuid"]
|
115
|
+
locking_parallel_barrier = LockingParallelBarrier(
|
116
|
+
REDIS_URL, task_key=completion_uuid, lock_key=f"{completion_uuid}-lock"
|
117
|
+
)
|
118
|
+
locking_parallel_barrier.set_task_count(len(starting_group))
|
119
|
+
|
120
|
+
for m in starting_group:
|
121
|
+
self.broker.enqueue(m, delay=delay)
|
122
|
+
|
123
|
+
return self
|
124
|
+
|
125
|
+
def get_result(self, *, block=False, timeout=None):
|
126
|
+
"""Get the result of this pipeline.
|
127
|
+
|
128
|
+
Pipeline results are represented by the result of the last
|
129
|
+
message in the chain.
|
130
|
+
|
131
|
+
Parameters:
|
132
|
+
block(bool): Whether or not to block until a result is set.
|
133
|
+
timeout(int): The maximum amount of time, in ms, to wait for
|
134
|
+
a result when block is True. Defaults to 10 seconds.
|
135
|
+
|
136
|
+
Raises:
|
137
|
+
ResultMissing: When block is False and the result isn't set.
|
138
|
+
ResultTimeout: When waiting for a result times out.
|
139
|
+
|
140
|
+
Returns:
|
141
|
+
object: The result.
|
142
|
+
"""
|
143
|
+
last_message = self.messages[-1][-1]
|
144
|
+
|
145
|
+
backend = self.broker.get_results_backend()
|
146
|
+
return last_message.get_result(backend=backend, block=block, timeout=timeout)
|
147
|
+
|
148
|
+
def get_results(self, *, block=False, timeout=None):
|
149
|
+
"""Get the results of each job in the pipeline.
|
150
|
+
|
151
|
+
Parameters:
|
152
|
+
block(bool): Whether or not to block until a result is set.
|
153
|
+
timeout(int): The maximum amount of time, in ms, to wait for
|
154
|
+
a result when block is True. Defaults to 10 seconds.
|
155
|
+
|
156
|
+
Raises:
|
157
|
+
ResultMissing: When block is False and the result isn't set.
|
158
|
+
ResultTimeout: When waiting for a result times out.
|
159
|
+
|
160
|
+
Returns:
|
161
|
+
A result generator.
|
162
|
+
"""
|
163
|
+
deadline = None
|
164
|
+
if timeout:
|
165
|
+
deadline = time.monotonic() + timeout / 1000
|
166
|
+
|
167
|
+
for message_group in self.messages:
|
168
|
+
for message in message_group:
|
169
|
+
if deadline:
|
170
|
+
timeout = max(0, int((deadline - time.monotonic()) * 1000))
|
171
|
+
|
172
|
+
backend = self.broker.get_results_backend()
|
173
|
+
yield {
|
174
|
+
message.actor_name: message.get_result(
|
175
|
+
backend=backend, block=block, timeout=timeout
|
176
|
+
)
|
177
|
+
}
|
178
|
+
|
179
|
+
def to_json(self) -> str:
|
180
|
+
"""Convert the execution graph to a JSON string representation.
|
181
|
+
|
182
|
+
This method serializes the execution graph of the pipeline into a JSON string.
|
183
|
+
This serialized form can be used to save the pipeline state or share it across different systems,
|
184
|
+
enabling the retrieval of a pipeline "run" for obtaining its results at a later time.
|
185
|
+
|
186
|
+
:return: A JSON string representing the execution graph.
|
187
|
+
:rtype: str
|
188
|
+
"""
|
189
|
+
return json.dumps(self.execution_graph)
|
190
|
+
|
191
|
+
@classmethod
|
192
|
+
def from_json(cls, json_data: str) -> parallel_pipeline:
|
193
|
+
"""Create a ParallelPipeline object from a JSON string representation of the execution graph.
|
194
|
+
|
195
|
+
This class method deserializes a JSON string into a list of messages, each representing
|
196
|
+
a task or operation in the pipeline. The method reconstructs the execution graph using
|
197
|
+
the `dramatiq.message.Message` objects and returns an instance of the `parallel_pipeline` class.
|
198
|
+
|
199
|
+
:param json_data: A JSON string containing the serialized execution graph.
|
200
|
+
:type json_data: str
|
201
|
+
:return: An instance of `parallel_pipeline` reconstructed from the JSON data.
|
202
|
+
:rtype: parallel_pipeline
|
203
|
+
"""
|
204
|
+
execution_graph = json.loads(json_data)
|
205
|
+
|
206
|
+
messages = []
|
207
|
+
|
208
|
+
for message_group in execution_graph:
|
209
|
+
temp_group = []
|
210
|
+
for message in message_group:
|
211
|
+
temp_group.append(Message(**message))
|
212
|
+
messages.append(temp_group)
|
213
|
+
|
214
|
+
return cls(messages)
|
215
|
+
|
216
|
+
|
217
|
+
class PipelineResult:
|
218
|
+
"""
|
219
|
+
A class to manage and retrieve the results of a parallel pipeline execution.
|
220
|
+
|
221
|
+
The `PipelineResult` class provides methods for creating a result entry in a Redis database,
|
222
|
+
loading pipeline data from Redis, and retrieving the status and results of the pipeline execution.
|
223
|
+
|
224
|
+
Attributes:
|
225
|
+
pipeline (parallel_pipeline): The pipeline object representing the execution graph.
|
226
|
+
execution_id (str): A unique identifier for the execution of the pipeline.
|
227
|
+
redis_key (str): The key used to store and retrieve pipeline data from Redis.
|
228
|
+
redis_conn: A Redis connection object used to interact with the Redis database.
|
229
|
+
result_ttl (int): Time-to-live (TTL) for the result entry in Redis, in seconds.
|
230
|
+
"""
|
231
|
+
|
232
|
+
def __init__(self, execution_id: str, result_ttl: int = DEFAULT_RESULT_TTL):
|
233
|
+
"""
|
234
|
+
Initialize a PipelineResult object with an execution ID and optional result TTL.
|
235
|
+
|
236
|
+
:param execution_id: A unique identifier for the pipeline execution.
|
237
|
+
:type execution_id: str
|
238
|
+
:param result_ttl: The time-to-live (TTL) for the result entry in Redis. Defaults to DEFAULT_RESULT_TTL.
|
239
|
+
:type result_ttl: int
|
240
|
+
"""
|
241
|
+
self.pipeline: parallel_pipeline = None
|
242
|
+
self.execution_id = execution_id
|
243
|
+
self.redis_key = f"{execution_id}-results-key"
|
244
|
+
self.redis_conn = RedisConnector().get_connection()
|
245
|
+
self.result_ttl = result_ttl
|
246
|
+
|
247
|
+
def create_result_entry(self, pipeline_json_str: str):
|
248
|
+
"""
|
249
|
+
Store the serialized pipeline data in Redis with a specified TTL.
|
250
|
+
|
251
|
+
This method saves the JSON string representation of the pipeline in the Redis database
|
252
|
+
using the execution ID as the key. The entry is stored with a time-to-live (TTL) defined by `result_ttl`.
|
253
|
+
|
254
|
+
:param pipeline_json_str: A JSON string representing the pipeline execution graph.
|
255
|
+
:type pipeline_json_str: str
|
256
|
+
:raises ValueError: If the provided pipeline data is None or an empty string.
|
257
|
+
:raises RedisError: If there is an issue connecting to Redis or setting the value.
|
258
|
+
"""
|
259
|
+
if not pipeline_json_str:
|
260
|
+
raise ValueError("No pipeline data passed to create result store")
|
261
|
+
|
262
|
+
try:
|
263
|
+
self.redis_conn.setex(self.redis_key, self.result_ttl, pipeline_json_str)
|
264
|
+
except RedisError as e:
|
265
|
+
raise RuntimeError(f"Failed to store pipeline data in Redis: {e}")
|
266
|
+
|
267
|
+
def load(self):
|
268
|
+
"""
|
269
|
+
Load the pipeline data from Redis and reconstruct the pipeline object.
|
270
|
+
|
271
|
+
This method retrieves the JSON string stored in Redis and deserializes it
|
272
|
+
into a `parallel_pipeline` object, enabling access to the pipeline's execution details.
|
273
|
+
|
274
|
+
:raises RedisError: If there is an issue connecting to Redis or retrieving the data.
|
275
|
+
"""
|
276
|
+
try:
|
277
|
+
pipeline_data = self.redis_conn.get(self.redis_key)
|
278
|
+
if pipeline_data:
|
279
|
+
self.pipeline = parallel_pipeline.from_json(pipeline_data)
|
280
|
+
else:
|
281
|
+
self.pipeline = None
|
282
|
+
except RedisError as e:
|
283
|
+
raise RuntimeError(f"Failed to load pipeline data from Redis: {e}")
|
284
|
+
|
285
|
+
@property
|
286
|
+
def status(self) -> str:
|
287
|
+
"""
|
288
|
+
Get the current status of the pipeline execution.
|
289
|
+
|
290
|
+
This property checks the completion status of the pipeline and returns its current state.
|
291
|
+
|
292
|
+
:return: The status of the pipeline execution, which can be "complete", "pending", or "unavailable".
|
293
|
+
:rtype: str
|
294
|
+
"""
|
295
|
+
if not self.pipeline:
|
296
|
+
return "unavailable"
|
297
|
+
return "complete" if self.pipeline.completed else "pending"
|
298
|
+
|
299
|
+
def get_results(self) -> dict:
|
300
|
+
"""
|
301
|
+
Retrieve all results from the pipeline execution with unique actor identifiers.
|
302
|
+
|
303
|
+
This method aggregates results from the pipeline and ensures that each actor's result
|
304
|
+
has a unique identifier by appending a numeric suffix to duplicate actor names.
|
305
|
+
|
306
|
+
:return: A dictionary containing all results from the pipeline execution, keyed by unique actor identifiers.
|
307
|
+
:rtype: dict
|
308
|
+
"""
|
309
|
+
if not self.pipeline:
|
310
|
+
return {}
|
311
|
+
|
312
|
+
results = {}
|
313
|
+
for result in self.pipeline.get_results():
|
314
|
+
for actor, res in result.items():
|
315
|
+
unique_actor = self._get_unique_actor_name(actor, results)
|
316
|
+
results[unique_actor] = res
|
317
|
+
return results
|
318
|
+
|
319
|
+
def get_result(self):
|
320
|
+
"""
|
321
|
+
Retrieve a single result from the pipeline execution.
|
322
|
+
|
323
|
+
This method returns the result of a single execution step from the pipeline, if available.
|
324
|
+
|
325
|
+
:return: The result of a single execution step from the pipeline, or None if no pipeline is loaded.
|
326
|
+
"""
|
327
|
+
if self.pipeline:
|
328
|
+
return self.pipeline.get_result()
|
329
|
+
|
330
|
+
def _get_unique_actor_name(self, actor: str, results: dict) -> str:
|
331
|
+
"""
|
332
|
+
Generate a unique actor name by appending a numeric suffix if necessary.
|
333
|
+
|
334
|
+
:param actor: The base name of the actor.
|
335
|
+
:type actor: str
|
336
|
+
:param results: The current dictionary of results to check for uniqueness.
|
337
|
+
:type results: dict
|
338
|
+
:return: A unique actor name.
|
339
|
+
:rtype: str
|
340
|
+
"""
|
341
|
+
if actor not in results:
|
342
|
+
return actor
|
343
|
+
|
344
|
+
suffix = 0
|
345
|
+
new_actor = f"{actor}-{suffix}"
|
346
|
+
while new_actor in results:
|
347
|
+
suffix += 1
|
348
|
+
new_actor = f"{actor}-{suffix}"
|
349
|
+
return new_actor
|
pypeline/constants.py
CHANGED
@@ -1,105 +1,72 @@
|
|
1
|
-
"""
|
1
|
+
""" Pypeline Constants
|
2
2
|
"""
|
3
3
|
import os
|
4
|
-
from urllib.parse import urljoin
|
5
4
|
|
6
|
-
|
5
|
+
# Pypeline configuration defaults
|
6
|
+
PYPELINE_YAML_PATH = os.environ.get("PYPELINE_YAML_PATH", "pypeline.yaml")
|
7
|
+
PYPELINE_CLIENT_PKG_NAME = os.environ.get("PYPELINE_CLIENT_PKG_NAME", None)
|
8
|
+
WORKER_NAME = os.environ.get("WORKER_NAME", None)
|
9
|
+
API_ACCESS_KEY = os.environ.get("API_ACCESS_KEY", None)
|
10
|
+
DEFAULT_BROKER_CALLABLE = os.environ.get(
|
11
|
+
"DEFAULT_BROKER_CLS", "pypeline.dramatiq:configure_default_broker"
|
12
|
+
)
|
7
13
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
DEFAULT_REGULATOR_TASK = 'pypeline.celery.task_chain_regulator'
|
12
|
-
DEFAULT_SUCCESS_TASK = 'pypeline.celery.pipeline_success'
|
14
|
+
# Pypeline broker connections
|
15
|
+
RABBIT_URL = os.environ.get("RABBIT_URL", "amqp://admin:password@127.0.0.1:5672")
|
16
|
+
REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0")
|
13
17
|
|
14
|
-
|
15
|
-
|
18
|
+
# Pypeline task defaults
|
19
|
+
PARALLEL_PIPELINE_CALLBACK_BARRIER_TTL = int(
|
20
|
+
os.getenv("DRAMATIQ_PARALLEL_PIPELINE_CALLBACK_BARRIER_TTL", "86400000")
|
21
|
+
)
|
22
|
+
DEFAULT_RESULT_TTL = int(os.getenv("DEFAULT_RESULT_TTL", 86400)) # seconds (1 day)
|
23
|
+
DEFAULT_TASK_TTL = int(os.getenv("DEFAULT_TASK_TTL", 600)) # seconds (10 minutes)
|
24
|
+
DEFAULT_TASK_MAX_RETRY = int(os.getenv("DEFAULT_TASK_MAX_RETRY", 3))
|
25
|
+
DEFAULT_TASK_MIN_BACKOFF = int(os.getenv("DEFAULT_TASK_MIN_BACKOFF", 15)) # seconds
|
26
|
+
DEFAULT_TASK_MAX_BACKOFF = int(
|
27
|
+
os.getenv("DEFAULT_TASK_MAX_BACKOFF", 3600)
|
28
|
+
) # seconds (1 hour)
|
16
29
|
|
17
|
-
PIPELINE_RUN_WRAPPER_CACHE_KEY = 'sermos_{}_{}' # pipeline_id + execution_id
|
18
|
-
PIPELINE_RESULT_CACHE_KEY = 'sermos_result_{}' # execution_id
|
19
30
|
|
20
|
-
|
21
|
-
|
22
|
-
# Each pipeline config/schedule config is specific to an individual deployment,
|
23
|
-
# however, we cache only with the pipeline_id here because the usage of this
|
24
|
-
# cache key is restricted to the redis instance associated with the deployment.
|
25
|
-
PIPELINE_CONFIG_CACHE_KEY = 'sermos_pipeline_config_{}' # pipeline_id
|
26
|
-
SCHEDULE_CONFIG_CACHE_KEY = 'sermos_schedule_config'
|
27
|
-
CONFIG_REFRESH_RATE = int(os.environ.get('CONFIG_REFRESH_RATE', 30)) # seconds
|
28
|
-
|
29
|
-
# TODO where on earth is this crazy time format coming from?
|
30
|
-
SCHEDULE_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
|
31
|
-
|
32
|
-
AUTH_LOCK_KEY = os.environ.get('AUTH_LOCK_KEY', 'sermos-auth-lock')
|
33
|
-
AUTH_LOCK_DURATION = int(os.environ.get('AUTH_LOCK_DURATION', 30))
|
34
|
-
|
35
|
-
STORED_MODEL_KEY = '{}_{}{}'
|
36
|
-
|
37
|
-
# Yaml path is relative to package
|
38
|
-
SERMOS_YAML_PATH = os.environ.get('SERMOS_YAML_PATH', 'sermos.yaml')
|
39
|
-
SERMOS_ACCESS_KEY = os.environ.get('SERMOS_ACCESS_KEY', None)
|
40
|
-
SERMOS_CLIENT_PKG_NAME = os.environ.get('SERMOS_CLIENT_PKG_NAME', None)
|
41
|
-
SERMOS_DEPLOYMENT_ID = os.environ.get('SERMOS_DEPLOYMENT_ID', 'local')
|
42
|
-
LOCAL_DEPLOYMENT_VALUE = os.environ.get('LOCAL_DEPLOYMENT_VALUE', 'local')
|
43
|
-
DEFAULT_BASE_URL = os.environ.get('SERMOS_BASE_URL', 'https://console.sermos.ai')
|
44
|
-
if DEFAULT_BASE_URL != 'local':
|
45
|
-
DEFAULT_BASE_URL += '/api/v1/'
|
46
|
-
DEPLOYMENTS_URL = "{}deployments/{}"
|
47
|
-
DEPLOYMENTS_DEPLOY_URL = "{}deployments/{}/deploy"
|
48
|
-
DEPLOYMENTS_SERVICES_URL = "{}deployments/{}/services"
|
49
|
-
DEPLOYMENTS_SERVICE_URL = "{}deployments/{}/services/{}"
|
50
|
-
DEFAULT_AUTH_URL = urljoin(DEFAULT_BASE_URL, 'auth')
|
51
|
-
USING_SERMOS_CLOUD = DEFAULT_BASE_URL != LOCAL_DEPLOYMENT_VALUE
|
52
|
-
DEFAULT_CONFIG_RETRIEVAL_PAGE_SIZE = 25
|
53
|
-
WORKFLOW_PROCESSOR_DEFAULT_QUEUE = 'celery'
|
31
|
+
MS_IN_SECONDS = 1000
|
32
|
+
API_PATH_V1 = "/api/v1"
|
54
33
|
|
55
34
|
# Default 'responses' dictionary when decorating endpoints with @api.doc()
|
56
35
|
# Extend as necessary.
|
57
36
|
API_DOC_RESPONSES = {
|
58
|
-
200: {
|
59
|
-
|
60
|
-
'description': 'Successful response.'
|
61
|
-
},
|
62
|
-
400: {
|
63
|
-
'code': 400,
|
64
|
-
'description': 'Malformed request. Verify payload is correct.'
|
65
|
-
},
|
37
|
+
200: {"code": 200, "description": "Successful response."},
|
38
|
+
400: {"code": 400, "description": "Malformed request. Verify payload is correct."},
|
66
39
|
401: {
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
}
|
40
|
+
"code": 401,
|
41
|
+
"description": "Unauthorized. Verify your API Key (`accesskey`) header.",
|
42
|
+
},
|
71
43
|
}
|
72
44
|
|
73
45
|
# Default 'params' dictionary when decorating endpoints with @api.doc()
|
74
46
|
# Extend as necessary.
|
75
47
|
API_DOC_PARAMS = {
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
48
|
+
"accesskey": {
|
49
|
+
"in": "header",
|
50
|
+
"name": "accesskey",
|
51
|
+
"description": "Your API Consumer's `accesskey`",
|
52
|
+
"type": "string",
|
53
|
+
"required": False,
|
82
54
|
}
|
83
55
|
}
|
84
56
|
|
85
57
|
DEFAULT_OPENAPI_CONFIG = (
|
86
|
-
(
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
(
|
91
|
-
|
92
|
-
|
93
|
-
(
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
model_version: str,
|
102
|
-
model_postfix: str = ''):
|
103
|
-
""" Ensures we're consistently creating the keys for storing/retrieving.
|
104
|
-
"""
|
105
|
-
return STORED_MODEL_KEY.format(model_prefix, model_version, model_postfix)
|
58
|
+
("SWAGGER_UI_DOC_EXPANSION", "list"),
|
59
|
+
("API_DOCUMENTATION_TITLE", "Pypeline API Specs"),
|
60
|
+
("API_DOCUMENTATION_DESCRIPTION", "Available API Endpoints"),
|
61
|
+
("OPENAPI_VERSION", "3.0.2"),
|
62
|
+
("OPENAPI_URL_PREFIX", "/api/v1"),
|
63
|
+
("OPENAPI_SWAGGER_APP_NAME", "Pypeline - API Reference"),
|
64
|
+
("OPENAPI_SWAGGER_UI_PATH", "/docs"),
|
65
|
+
("OPENAPI_SWAGGER_BASE_TEMPLATE", "swagger/swagger_ui.html"),
|
66
|
+
("OPENAPI_SWAGGER_URL", "/docs"),
|
67
|
+
(
|
68
|
+
"OPENAPI_SWAGGER_UI_URL",
|
69
|
+
"https://cdnjs.cloudflare.com/ajax/libs/swagger-ui/3.24.2/",
|
70
|
+
),
|
71
|
+
("EXPLAIN_TEMPLATE_LOADING", False),
|
72
|
+
)
|