UncountablePythonSDK 0.0.68__py3-none-any.whl → 0.0.70__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {UncountablePythonSDK-0.0.68.dist-info → UncountablePythonSDK-0.0.70.dist-info}/METADATA +3 -1
  2. {UncountablePythonSDK-0.0.68.dist-info → UncountablePythonSDK-0.0.70.dist-info}/RECORD +47 -19
  3. docs/requirements.txt +1 -1
  4. examples/integration-server/jobs/materials_auto/example_cron.py +18 -0
  5. examples/integration-server/jobs/materials_auto/profile.yaml +19 -0
  6. examples/integration-server/pyproject.toml +224 -0
  7. examples/set_recipe_metadata_file.py +40 -0
  8. examples/set_recipe_output_file_sdk.py +26 -0
  9. uncountable/core/environment.py +5 -1
  10. uncountable/integration/cli.py +1 -0
  11. uncountable/integration/cron.py +12 -28
  12. uncountable/integration/db/connect.py +12 -2
  13. uncountable/integration/db/session.py +25 -0
  14. uncountable/integration/entrypoint.py +6 -6
  15. uncountable/integration/executors/generic_upload_executor.py +5 -1
  16. uncountable/integration/job.py +44 -17
  17. uncountable/integration/queue_runner/__init__.py +0 -0
  18. uncountable/integration/queue_runner/command_server/__init__.py +24 -0
  19. uncountable/integration/queue_runner/command_server/command_client.py +68 -0
  20. uncountable/integration/queue_runner/command_server/command_server.py +64 -0
  21. uncountable/integration/queue_runner/command_server/protocol/__init__.py +0 -0
  22. uncountable/integration/queue_runner/command_server/protocol/command_server.proto +22 -0
  23. uncountable/integration/queue_runner/command_server/protocol/command_server_pb2.py +40 -0
  24. uncountable/integration/queue_runner/command_server/protocol/command_server_pb2.pyi +38 -0
  25. uncountable/integration/queue_runner/command_server/protocol/command_server_pb2_grpc.py +129 -0
  26. uncountable/integration/queue_runner/command_server/types.py +52 -0
  27. uncountable/integration/queue_runner/datastore/__init__.py +3 -0
  28. uncountable/integration/queue_runner/datastore/datastore_sqlite.py +93 -0
  29. uncountable/integration/queue_runner/datastore/interface.py +19 -0
  30. uncountable/integration/queue_runner/datastore/model.py +17 -0
  31. uncountable/integration/queue_runner/job_scheduler.py +119 -0
  32. uncountable/integration/queue_runner/queue_runner.py +26 -0
  33. uncountable/integration/queue_runner/types.py +7 -0
  34. uncountable/integration/queue_runner/worker.py +109 -0
  35. uncountable/integration/scan_profiles.py +2 -0
  36. uncountable/integration/scheduler.py +144 -0
  37. uncountable/integration/webhook_server/entrypoint.py +45 -45
  38. uncountable/types/__init__.py +4 -0
  39. uncountable/types/api/recipes/get_recipes_data.py +1 -0
  40. uncountable/types/api/recipes/set_recipe_output_file.py +46 -0
  41. uncountable/types/client_base.py +20 -0
  42. uncountable/types/entity_t.py +2 -0
  43. uncountable/types/queued_job.py +16 -0
  44. uncountable/types/queued_job_t.py +107 -0
  45. uncountable/types/recipe_metadata_t.py +1 -0
  46. {UncountablePythonSDK-0.0.68.dist-info → UncountablePythonSDK-0.0.70.dist-info}/WHEEL +0 -0
  47. {UncountablePythonSDK-0.0.68.dist-info → UncountablePythonSDK-0.0.70.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,19 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from uncountable.types import queued_job_t
4
+
5
+
6
+ class Datastore(ABC):
7
+ @abstractmethod
8
+ def add_job_to_queue(
9
+ self, job_payload: queued_job_t.QueuedJobPayload, job_ref_name: str
10
+ ) -> queued_job_t.QueuedJob: ...
11
+
12
+ @abstractmethod
13
+ def remove_job_from_queue(self, queued_job_uuid: str) -> None: ...
14
+
15
+ @abstractmethod
16
+ def increment_num_attempts(self, queued_job_uuid: str) -> int: ...
17
+
18
+ @abstractmethod
19
+ def load_job_queue(self) -> list[queued_job_t.QueuedJob]: ...
@@ -0,0 +1,17 @@
1
+ from sqlalchemy import JSON, BigInteger, Column, DateTime, Text
2
+ from sqlalchemy.orm import declarative_base
3
+ from sqlalchemy.sql import func
4
+
5
+ Base = declarative_base()
6
+
7
+
8
+ class QueuedJob(Base):
9
+ __tablename__ = "queued_jobs"
10
+
11
+ id = Column(Text, primary_key=True)
12
+ job_ref_name = Column(Text, nullable=False, index=True)
13
+ submitted_at = Column(
14
+ DateTime(timezone=True), server_default=func.current_timestamp(), nullable=False
15
+ )
16
+ payload = Column(JSON, nullable=False)
17
+ num_attempts = Column(BigInteger, nullable=False, default=0, server_default="0")
@@ -0,0 +1,119 @@
1
+ import asyncio
2
+ import typing
3
+ from concurrent.futures import ProcessPoolExecutor
4
+ from dataclasses import dataclass
5
+
6
+ from uncountable.integration.db.connect import IntegrationDBService, create_db_engine
7
+ from uncountable.integration.db.session import get_session_maker
8
+ from uncountable.integration.queue_runner.command_server import (
9
+ CommandEnqueueJobResponse,
10
+ CommandQueue,
11
+ CommandTask,
12
+ )
13
+ from uncountable.integration.queue_runner.datastore import DatastoreSqlite
14
+ from uncountable.integration.queue_runner.datastore.interface import Datastore
15
+ from uncountable.integration.queue_runner.worker import Worker
16
+ from uncountable.integration.scan_profiles import load_profiles
17
+ from uncountable.types import job_definition_t, queued_job_t
18
+
19
+ from .types import ResultQueue, ResultTask
20
+
21
+ _MAX_JOB_WORKERS = 5
22
+
23
+
24
+ @dataclass(kw_only=True, frozen=True)
25
+ class JobListenerKey:
26
+ profile_name: str
27
+ subqueue_name: str = "default"
28
+
29
+
30
+ def _get_job_worker_key(
31
+ job_definition: job_definition_t.JobDefinition, profile_name: str
32
+ ) -> JobListenerKey:
33
+ return JobListenerKey(profile_name=profile_name)
34
+
35
+
36
+ def on_worker_crash(
37
+ worker_key: JobListenerKey,
38
+ ) -> typing.Callable[[asyncio.Task], None]:
39
+ def hook(task: asyncio.Task) -> None:
40
+ raise Exception(
41
+ f"worker {worker_key.profile_name}_{worker_key.subqueue_name} crashed unexpectedly"
42
+ )
43
+
44
+ return hook
45
+
46
+
47
+ def _start_workers(
48
+ process_pool: ProcessPoolExecutor, result_queue: ResultQueue, datastore: Datastore
49
+ ) -> dict[str, Worker]:
50
+ profiles = load_profiles()
51
+ job_queue_worker_lookup: dict[JobListenerKey, Worker] = {}
52
+ job_worker_lookup: dict[str, Worker] = {}
53
+ job_definition_lookup: dict[str, job_definition_t.JobDefinition] = {}
54
+ for profile in profiles:
55
+ for job_definition in profile.definition.jobs:
56
+ job_definition_lookup[job_definition.id] = job_definition
57
+ job_worker_key = _get_job_worker_key(job_definition, profile.name)
58
+ if job_worker_key not in job_queue_worker_lookup:
59
+ worker = Worker(
60
+ process_pool=process_pool,
61
+ listen_queue=asyncio.Queue(),
62
+ result_queue=result_queue,
63
+ datastore=datastore,
64
+ )
65
+ task = asyncio.create_task(worker.run_worker_loop())
66
+ task.add_done_callback(on_worker_crash(job_worker_key))
67
+ job_queue_worker_lookup[job_worker_key] = worker
68
+ job_worker_lookup[job_definition.id] = job_queue_worker_lookup[
69
+ job_worker_key
70
+ ]
71
+ return job_worker_lookup
72
+
73
+
74
+ async def start_scheduler(command_queue: CommandQueue) -> None:
75
+ result_queue: ResultQueue = asyncio.Queue()
76
+ engine = create_db_engine(IntegrationDBService.RUNNER)
77
+ session_maker = get_session_maker(engine)
78
+
79
+ datastore = DatastoreSqlite(session_maker)
80
+ datastore.setup(engine)
81
+
82
+ with ProcessPoolExecutor(max_workers=_MAX_JOB_WORKERS) as process_pool:
83
+ job_worker_lookup = _start_workers(
84
+ process_pool, result_queue, datastore=datastore
85
+ )
86
+
87
+ queued_jobs = datastore.load_job_queue()
88
+
89
+ async def enqueue_queued_job(queued_job: queued_job_t.QueuedJob) -> None:
90
+ worker = job_worker_lookup[queued_job.job_ref_name]
91
+ await worker.listen_queue.put(queued_job)
92
+
93
+ for queued_job in queued_jobs:
94
+ await enqueue_queued_job(queued_job)
95
+
96
+ result_task: ResultTask = asyncio.create_task(result_queue.get())
97
+ command_task: CommandTask = asyncio.create_task(command_queue.get())
98
+ while True:
99
+ finished, _ = await asyncio.wait(
100
+ [result_task, command_task], return_when=asyncio.FIRST_COMPLETED
101
+ )
102
+
103
+ for task in finished:
104
+ if task == command_task:
105
+ command = command_task.result()
106
+ queued_job = datastore.add_job_to_queue(
107
+ job_payload=command.payload, job_ref_name=command.job_ref_name
108
+ )
109
+ await command.response_queue.put(
110
+ CommandEnqueueJobResponse(
111
+ queued_job_uuid=queued_job.queued_job_uuid
112
+ )
113
+ )
114
+ await enqueue_queued_job(queued_job)
115
+ command_task = asyncio.create_task(command_queue.get())
116
+ elif task == result_task:
117
+ queued_job_result = result_task.result()
118
+ datastore.remove_job_from_queue(queued_job_result.queued_job_uuid)
119
+ result_task = asyncio.create_task(result_queue.get())
@@ -0,0 +1,26 @@
1
+ import asyncio
2
+
3
+ from uncountable.integration.queue_runner.command_server import serve
4
+ from uncountable.integration.queue_runner.command_server.types import CommandQueue
5
+ from uncountable.integration.queue_runner.job_scheduler import start_scheduler
6
+
7
+
8
+ async def queue_runner_loop() -> None:
9
+ command_queue: CommandQueue = asyncio.Queue()
10
+
11
+ command_server = asyncio.create_task(serve(command_queue))
12
+
13
+ scheduler = asyncio.create_task(start_scheduler(command_queue))
14
+
15
+ await scheduler
16
+ await command_server
17
+
18
+
19
+ def start_queue_runner() -> None:
20
+ loop = asyncio.new_event_loop()
21
+ loop.run_until_complete(queue_runner_loop())
22
+ loop.close()
23
+
24
+
25
+ if __name__ == "__main__":
26
+ start_queue_runner()
@@ -0,0 +1,7 @@
1
+ from asyncio import Queue, Task
2
+
3
+ from uncountable.types import queued_job_t
4
+
5
+ ListenQueue = Queue[queued_job_t.QueuedJob]
6
+ ResultQueue = Queue[queued_job_t.QueuedJobResult]
7
+ ResultTask = Task[queued_job_t.QueuedJobResult]
@@ -0,0 +1,109 @@
1
+ import asyncio
2
+ from concurrent.futures import ProcessPoolExecutor
3
+ from dataclasses import dataclass
4
+
5
+ from uncountable.core.async_batch import AsyncBatchProcessor
6
+ from uncountable.integration.construct_client import construct_uncountable_client
7
+ from uncountable.integration.executors.executors import execute_job
8
+ from uncountable.integration.job import JobArguments
9
+ from uncountable.integration.queue_runner.datastore.interface import Datastore
10
+ from uncountable.integration.queue_runner.types import ListenQueue, ResultQueue
11
+ from uncountable.integration.scan_profiles import load_profiles
12
+ from uncountable.integration.telemetry import JobLogger, get_otel_tracer
13
+ from uncountable.types import base_t, job_definition_t, queued_job_t
14
+
15
+
16
+ class Worker:
17
+ def __init__(
18
+ self,
19
+ *,
20
+ process_pool: ProcessPoolExecutor,
21
+ listen_queue: ListenQueue,
22
+ result_queue: ResultQueue,
23
+ datastore: Datastore,
24
+ ) -> None:
25
+ self.process_pool = process_pool
26
+ self.listen_queue = listen_queue
27
+ self.result_queue = result_queue
28
+ self.datastore = datastore
29
+
30
+ async def run_worker_loop(self) -> None:
31
+ while True:
32
+ queued_job = await self.listen_queue.get()
33
+ self.datastore.increment_num_attempts(queued_job.queued_job_uuid)
34
+ loop = asyncio.get_event_loop()
35
+ result = await loop.run_in_executor(
36
+ self.process_pool, run_queued_job, queued_job
37
+ )
38
+ assert isinstance(result, job_definition_t.JobResult)
39
+ await self.result_queue.put(
40
+ queued_job_t.QueuedJobResult(
41
+ job_result=result, queued_job_uuid=queued_job.queued_job_uuid
42
+ )
43
+ )
44
+
45
+
46
+ @dataclass(kw_only=True)
47
+ class RegisteredJobDetails:
48
+ profile_metadata: job_definition_t.ProfileMetadata
49
+ job_definition: job_definition_t.JobDefinition
50
+
51
+
52
+ def get_registered_job_details(job_ref_name: str) -> RegisteredJobDetails:
53
+ profiles = load_profiles()
54
+ for profile in profiles:
55
+ for job_definition in profile.definition.jobs:
56
+ if job_definition.id == job_ref_name:
57
+ return RegisteredJobDetails(
58
+ profile_metadata=job_definition_t.ProfileMetadata(
59
+ name=profile.name,
60
+ base_url=profile.definition.base_url,
61
+ auth_retrieval=profile.definition.auth_retrieval,
62
+ client_options=profile.definition.client_options,
63
+ ),
64
+ job_definition=job_definition,
65
+ )
66
+ raise Exception(f"profile not found for job {job_ref_name}")
67
+
68
+
69
+ def _resolve_queued_job_payload(queued_job: queued_job_t.QueuedJob) -> base_t.JsonValue:
70
+ match queued_job.payload.invocation_context:
71
+ case queued_job_t.InvocationContextCron():
72
+ return None
73
+ case queued_job_t.InvocationContextManual():
74
+ return None
75
+ case queued_job_t.InvocationContextWebhook():
76
+ return queued_job.payload.invocation_context.webhook_payload
77
+
78
+
79
+ def run_queued_job(
80
+ queued_job: queued_job_t.QueuedJob,
81
+ ) -> job_definition_t.JobResult:
82
+ with get_otel_tracer().start_as_current_span(name="run_queued_job") as span:
83
+ job_details = get_registered_job_details(queued_job.job_ref_name)
84
+ job_logger = JobLogger(
85
+ base_span=span,
86
+ profile_metadata=job_details.profile_metadata,
87
+ job_definition=job_details.job_definition,
88
+ )
89
+ client = construct_uncountable_client(
90
+ profile_meta=job_details.profile_metadata, job_logger=job_logger
91
+ )
92
+ batch_processor = AsyncBatchProcessor(client=client)
93
+
94
+ payload = _resolve_queued_job_payload(queued_job)
95
+
96
+ args = JobArguments(
97
+ job_definition=job_details.job_definition,
98
+ client=client,
99
+ batch_processor=batch_processor,
100
+ profile_metadata=job_details.profile_metadata,
101
+ logger=job_logger,
102
+ payload=payload,
103
+ )
104
+
105
+ return execute_job(
106
+ args=args,
107
+ profile_metadata=job_details.profile_metadata,
108
+ job_definition=job_details.job_definition,
109
+ )
@@ -1,3 +1,4 @@
1
+ import functools
1
2
  import os
2
3
  from dataclasses import dataclass
3
4
  from importlib import resources
@@ -14,6 +15,7 @@ class ProfileDetails:
14
15
  definition: job_definition_t.ProfileDefinition
15
16
 
16
17
 
18
+ @functools.cache
17
19
  def load_profiles() -> list[ProfileDetails]:
18
20
  profiles_module = os.environ["UNC_PROFILES_MODULE"]
19
21
  profiles = [
@@ -0,0 +1,144 @@
1
+ import multiprocessing
2
+ import subprocess
3
+ import sys
4
+ import time
5
+ from dataclasses import dataclass
6
+ from datetime import datetime, timezone
7
+
8
+ from opentelemetry.trace import get_current_span
9
+
10
+ from uncountable.core.environment import get_local_admin_server_port
11
+ from uncountable.integration.entrypoint import main as cron_target
12
+ from uncountable.integration.queue_runner.command_server import (
13
+ CommandServerTimeout,
14
+ check_health,
15
+ )
16
+ from uncountable.integration.queue_runner.queue_runner import start_queue_runner
17
+ from uncountable.integration.telemetry import Logger
18
+
19
+ SHUTDOWN_TIMEOUT_SECS = 30
20
+
21
+
22
+ @dataclass(kw_only=True)
23
+ class ProcessInfo:
24
+ name: str
25
+ process: multiprocessing.Process | subprocess.Popen[bytes]
26
+
27
+ @property
28
+ def is_alive(self) -> bool:
29
+ match self.process:
30
+ case multiprocessing.Process():
31
+ return self.process.is_alive()
32
+ case subprocess.Popen():
33
+ return self.process.poll() is None
34
+
35
+ @property
36
+ def pid(self) -> int | None:
37
+ return self.process.pid
38
+
39
+
40
+ def handle_shutdown(logger: Logger, processes: list[ProcessInfo]) -> None:
41
+ logger.log_info("received shutdown command, shutting down sub-processes")
42
+ for proc_info in processes:
43
+ if proc_info.is_alive:
44
+ proc_info.process.terminate()
45
+
46
+ shutdown_start = time.time()
47
+ still_living_processes = processes
48
+ while (
49
+ time.time() - shutdown_start < SHUTDOWN_TIMEOUT_SECS
50
+ and len(still_living_processes) > 0
51
+ ):
52
+ current_loop_processes = [*still_living_processes]
53
+ logger.log_info(
54
+ "waiting for sub-processes to shut down",
55
+ attributes={
56
+ "still_living_processes": [
57
+ proc_info.name for proc_info in still_living_processes
58
+ ]
59
+ },
60
+ )
61
+ still_living_processes = []
62
+ for proc_info in current_loop_processes:
63
+ if not proc_info.is_alive:
64
+ logger.log_info(f"{proc_info.name} shut down successfully")
65
+ else:
66
+ still_living_processes.append(proc_info)
67
+ time.sleep(1)
68
+
69
+ for proc_info in still_living_processes:
70
+ logger.log_warning(
71
+ f"{proc_info.name} failed to shut down after {SHUTDOWN_TIMEOUT_SECS} seconds, forcefully terminating"
72
+ )
73
+ proc_info.process.kill()
74
+
75
+
76
+ def check_process_alive(logger: Logger, processes: list[ProcessInfo]) -> None:
77
+ for proc_info in processes:
78
+ if not proc_info.is_alive:
79
+ logger.log_error(
80
+ f"process {proc_info.name} shut down unexpectedly! shutting down scheduler"
81
+ )
82
+ handle_shutdown(logger, processes)
83
+ sys.exit(1)
84
+
85
+
86
+ def _wait_queue_runner_online() -> None:
87
+ _MAX_QUEUE_RUNNER_HEALTH_CHECKS = 10
88
+ _QUEUE_RUNNER_HEALTH_CHECK_DELAY_SECS = 1
89
+
90
+ num_attempts = 0
91
+ before = datetime.now(timezone.utc)
92
+ while num_attempts < _MAX_QUEUE_RUNNER_HEALTH_CHECKS:
93
+ try:
94
+ if check_health(port=get_local_admin_server_port()):
95
+ return
96
+ except CommandServerTimeout:
97
+ pass
98
+ num_attempts += 1
99
+ time.sleep(_QUEUE_RUNNER_HEALTH_CHECK_DELAY_SECS)
100
+ after = datetime.now(timezone.utc)
101
+ duration_secs = (after - before).seconds
102
+ raise Exception(f"queue runner failed to come online after {duration_secs} seconds")
103
+
104
+
105
+ def main() -> None:
106
+ logger = Logger(get_current_span())
107
+ processes: list[ProcessInfo] = []
108
+
109
+ def add_process(process: ProcessInfo) -> None:
110
+ processes.append(process)
111
+ logger.log_info(f"started process {process.name}")
112
+
113
+ runner_process = multiprocessing.Process(target=start_queue_runner)
114
+ runner_process.start()
115
+ add_process(ProcessInfo(name="queue runner", process=runner_process))
116
+
117
+ try:
118
+ _wait_queue_runner_online()
119
+ except Exception as e:
120
+ logger.log_exception(e)
121
+ handle_shutdown(logger, processes=processes)
122
+ return
123
+
124
+ cron_process = multiprocessing.Process(target=cron_target)
125
+ cron_process.start()
126
+ add_process(ProcessInfo(name="cron server", process=cron_process))
127
+
128
+ uwsgi_process = subprocess.Popen([
129
+ "/app/env/bin/uwsgi",
130
+ "-H",
131
+ "/app/env",
132
+ "--die-on-term",
133
+ ])
134
+ add_process(ProcessInfo(name="uwsgi", process=uwsgi_process))
135
+
136
+ try:
137
+ while True:
138
+ check_process_alive(logger, processes=processes)
139
+ time.sleep(1)
140
+ except KeyboardInterrupt:
141
+ handle_shutdown(logger, processes=processes)
142
+
143
+
144
+ main()
@@ -1,4 +1,5 @@
1
1
  import hmac
2
+ import typing
2
3
  from dataclasses import dataclass
3
4
 
4
5
  import flask
@@ -6,15 +7,21 @@ import simplejson
6
7
  from flask.typing import ResponseReturnValue
7
8
  from flask.wrappers import Response
8
9
  from opentelemetry.trace import get_current_span
9
- from uncountable.core.async_batch import AsyncBatchProcessor
10
- from uncountable.core.environment import get_integration_env, get_webhook_server_port
11
- from uncountable.integration.construct_client import construct_uncountable_client
12
- from uncountable.integration.executors.executors import execute_job
13
- from uncountable.integration.job import WebhookJobArguments
10
+ from uncountable.core.environment import (
11
+ get_integration_env,
12
+ get_local_admin_server_port,
13
+ get_webhook_server_port,
14
+ )
15
+ from uncountable.integration.queue_runner.command_server.command_client import (
16
+ send_job_queue_message,
17
+ )
18
+ from uncountable.integration.queue_runner.command_server.types import (
19
+ CommandServerException,
20
+ )
14
21
  from uncountable.integration.scan_profiles import load_profiles
15
22
  from uncountable.integration.secret_retrieval.retrieve_secret import retrieve_secret
16
- from uncountable.integration.telemetry import JobLogger, Logger, get_otel_tracer
17
- from uncountable.types import job_definition_t, webhook_job_t
23
+ from uncountable.integration.telemetry import Logger
24
+ from uncountable.types import base_t, job_definition_t, queued_job_t, webhook_job_t
18
25
 
19
26
  from pkgs.argument_parser import CachedParser
20
27
 
@@ -66,7 +73,7 @@ class WebhookException(BaseException):
66
73
 
67
74
  def _parse_webhook_payload(
68
75
  *, raw_request_body: bytes, signature_key: str, passed_signature: str
69
- ) -> webhook_job_t.WebhookEventBody:
76
+ ) -> base_t.JsonValue:
70
77
  request_body_signature = hmac.new(
71
78
  signature_key.encode("utf-8"), msg=raw_request_body, digestmod="sha256"
72
79
  ).hexdigest()
@@ -76,7 +83,7 @@ def _parse_webhook_payload(
76
83
 
77
84
  try:
78
85
  request_body = simplejson.loads(raw_request_body.decode())
79
- return webhook_payload_parser.parse_api(request_body)
86
+ return typing.cast(base_t.JsonValue, request_body)
80
87
  except (simplejson.JSONDecodeError, ValueError) as e:
81
88
  raise WebhookException.body_parse_error() from e
82
89
 
@@ -110,31 +117,20 @@ def register_route(
110
117
  passed_signature=passed_signature,
111
118
  )
112
119
 
113
- with get_otel_tracer().start_as_current_span(
114
- "webhook_executor"
115
- ) as span:
116
- job_logger = JobLogger(
117
- profile_metadata=profile_metadata,
118
- job_definition=job,
119
- base_span=span,
120
- )
121
- client = construct_uncountable_client(
122
- profile_meta=profile_meta, job_logger=job_logger
123
- )
124
- execute_job(
125
- job_definition=job,
126
- profile_metadata=profile_meta,
127
- args=WebhookJobArguments(
128
- job_definition=job,
129
- profile_metadata=profile_metadata,
130
- client=client,
131
- batch_processor=AsyncBatchProcessor(client=client),
132
- logger=job_logger,
133
- payload=webhook_payload,
120
+ try:
121
+ send_job_queue_message(
122
+ job_ref_name=job.id,
123
+ payload=queued_job_t.QueuedJobPayload(
124
+ invocation_context=queued_job_t.InvocationContextWebhook(
125
+ webhook_payload=webhook_payload
126
+ )
134
127
  ),
128
+ port=get_local_admin_server_port(),
135
129
  )
130
+ except CommandServerException as e:
131
+ raise WebhookException.unknown_error() from e
136
132
 
137
- return flask.jsonify(WebhookResponse())
133
+ return flask.jsonify(WebhookResponse())
138
134
  except WebhookException as e:
139
135
  server_logger.log_exception(e)
140
136
  return e.make_error_response()
@@ -145,20 +141,24 @@ def register_route(
145
141
  server_logger.log_info(f"job {job.id} webhook registered at: {route}")
146
142
 
147
143
 
148
- profiles = load_profiles()
149
- for profile in profiles:
150
- server_logger = Logger(get_current_span())
151
- profile_metadata = job_definition_t.ProfileMetadata(
152
- name=profile.name,
153
- auth_retrieval=profile.definition.auth_retrieval,
154
- base_url=profile.definition.base_url,
155
- client_options=profile.definition.client_options,
156
- )
157
- for job in profile.definition.jobs:
158
- if isinstance(job, job_definition_t.WebhookJobDefinition):
159
- register_route(
160
- server_logger=server_logger, profile_meta=profile_metadata, job=job
161
- )
144
+ def main() -> None:
145
+ profiles = load_profiles()
146
+ for profile in profiles:
147
+ server_logger = Logger(get_current_span())
148
+ profile_metadata = job_definition_t.ProfileMetadata(
149
+ name=profile.name,
150
+ auth_retrieval=profile.definition.auth_retrieval,
151
+ base_url=profile.definition.base_url,
152
+ client_options=profile.definition.client_options,
153
+ )
154
+ for job in profile.definition.jobs:
155
+ if isinstance(job, job_definition_t.WebhookJobDefinition):
156
+ register_route(
157
+ server_logger=server_logger, profile_meta=profile_metadata, job=job
158
+ )
159
+
160
+
161
+ main()
162
162
 
163
163
 
164
164
  if __name__ == "__main__":
@@ -63,6 +63,7 @@ from . import overrides_t as overrides_t
63
63
  from . import permissions_t as permissions_t
64
64
  from . import phases_t as phases_t
65
65
  from . import post_base_t as post_base_t
66
+ from . import queued_job_t as queued_job_t
66
67
  from . import recipe_identifiers_t as recipe_identifiers_t
67
68
  from . import recipe_inputs_t as recipe_inputs_t
68
69
  from . import recipe_links_t as recipe_links_t
@@ -86,6 +87,7 @@ from .api.inputs import set_intermediate_type as set_intermediate_type_t
86
87
  from .api.recipes import set_recipe_inputs as set_recipe_inputs_t
87
88
  from .api.recipes import set_recipe_metadata as set_recipe_metadata_t
88
89
  from .api.recipes import set_recipe_output_annotations as set_recipe_output_annotations_t
90
+ from .api.recipes import set_recipe_output_file as set_recipe_output_file_t
89
91
  from .api.recipes import set_recipe_outputs as set_recipe_outputs_t
90
92
  from .api.recipes import set_recipe_tags as set_recipe_tags_t
91
93
  from .api.entity import set_values as set_values_t
@@ -162,6 +164,7 @@ __all__: list[str] = [
162
164
  "permissions_t",
163
165
  "phases_t",
164
166
  "post_base_t",
167
+ "queued_job_t",
165
168
  "recipe_identifiers_t",
166
169
  "recipe_inputs_t",
167
170
  "recipe_links_t",
@@ -185,6 +188,7 @@ __all__: list[str] = [
185
188
  "set_recipe_inputs_t",
186
189
  "set_recipe_metadata_t",
187
190
  "set_recipe_output_annotations_t",
191
+ "set_recipe_output_file_t",
188
192
  "set_recipe_outputs_t",
189
193
  "set_recipe_tags_t",
190
194
  "set_values_t",
@@ -97,6 +97,7 @@ class RecipeInput:
97
97
  curve_id: typing.Optional[base_t.ObjectId]
98
98
  actual_quantity_json: base_t.JsonValue
99
99
  behavior: str
100
+ ingredient_role_id: typing.Optional[base_t.ObjectId]
100
101
  quantity_dec: typing.Optional[Decimal] = None
101
102
  actual_quantity_dec: typing.Optional[Decimal] = None
102
103
 
@@ -0,0 +1,46 @@
1
+ # DO NOT MODIFY -- This file is generated by type_spec
2
+ # flake8: noqa: F821
3
+ # ruff: noqa: E402 Q003
4
+ # fmt: off
5
+ # isort: skip_file
6
+ from __future__ import annotations
7
+ import typing # noqa: F401
8
+ import datetime # noqa: F401
9
+ from decimal import Decimal # noqa: F401
10
+ import dataclasses
11
+ from ... import base_t
12
+ from ... import response_t
13
+
14
+ __all__: list[str] = [
15
+ "Arguments",
16
+ "Data",
17
+ "ENDPOINT_METHOD",
18
+ "ENDPOINT_PATH",
19
+ "RecipeOutputFileValue",
20
+ ]
21
+
22
+ ENDPOINT_METHOD = "POST"
23
+ ENDPOINT_PATH = "api/external/recipes/external_set_recipe_output_file"
24
+
25
+
26
+ # DO NOT MODIFY -- This file is generated by type_spec
27
+ @dataclasses.dataclass(kw_only=True)
28
+ class RecipeOutputFileValue:
29
+ recipe_id: base_t.ObjectId
30
+ output_id: base_t.ObjectId
31
+ experiment_num: int
32
+ condition_id: typing.Optional[base_t.ObjectId] = None
33
+ file_id: typing.Optional[base_t.ObjectId] = None
34
+
35
+
36
+ # DO NOT MODIFY -- This file is generated by type_spec
37
+ @dataclasses.dataclass(kw_only=True)
38
+ class Arguments:
39
+ output_file_data: RecipeOutputFileValue
40
+
41
+
42
+ # DO NOT MODIFY -- This file is generated by type_spec
43
+ @dataclasses.dataclass(kw_only=True)
44
+ class Data(response_t.Response):
45
+ pass
46
+ # DO NOT MODIFY -- This file is generated by type_spec