climate-ref-celery 0.5.0__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/.gitignore +0 -1
  2. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/PKG-INFO +1 -1
  3. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/pyproject.toml +1 -1
  4. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/cli.py +6 -1
  5. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/executor.py +5 -10
  6. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/tasks.py +10 -35
  7. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/tests/unit/test_cli.py +5 -3
  8. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/tests/unit/test_executor.py +2 -2
  9. climate_ref_celery-0.5.1/tests/unit/test_tasks.py +23 -0
  10. climate_ref_celery-0.5.0/tests/unit/test_tasks.py +0 -47
  11. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/LICENCE +0 -0
  12. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/NOTICE +0 -0
  13. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/README.md +0 -0
  14. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/__init__.py +0 -0
  15. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/app.py +0 -0
  16. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/celeryconf/__init__.py +0 -0
  17. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/celeryconf/base.py +0 -0
  18. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/celeryconf/dev.py +0 -0
  19. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/celeryconf/prod.py +0 -0
  20. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/py.typed +0 -0
  21. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/src/climate_ref_celery/worker_tasks.py +0 -0
  22. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/tests/conftest.py +0 -0
  23. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/tests/unit/test_app.py +0 -0
  24. {climate_ref_celery-0.5.0 → climate_ref_celery-0.5.1}/tests/unit/test_worker_tasks.py +0 -0
@@ -74,7 +74,6 @@ coverage.xml
74
74
  *.pot
75
75
 
76
76
  # Django stuff:
77
- *.log
78
77
  local_settings.py
79
78
  db.sqlite3
80
79
  db.sqlite3-journal
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: climate-ref-celery
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: Celery app for mananging tasks and workers
5
5
  Author-email: Jared Lewis <jared.lewis@climate-resource.com>
6
6
  License-File: LICENCE
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "climate-ref-celery"
3
- version = "0.5.0"
3
+ version = "0.5.1"
4
4
  description = "Celery app for mananging tasks and workers"
5
5
  readme = "README.md"
6
6
  authors = [
@@ -54,6 +54,7 @@ def import_provider(provider_package: str) -> DiagnosticProvider:
54
54
 
55
55
  @app.command()
56
56
  def start_worker(
57
+ ctx: typer.Context,
57
58
  loglevel: str = typer.Option("info", help="Log level for the worker"),
58
59
  package: str | None = typer.Option(help="Package to import tasks from", default=None),
59
60
  extra_args: list[str] = typer.Argument(None, help="Additional arguments for the worker"),
@@ -75,6 +76,10 @@ def start_worker(
75
76
  # Attempt to import the provider
76
77
  provider = import_provider(package)
77
78
 
79
+ if hasattr(ctx.obj, "config"):
80
+ # Configure the provider so that it knows where the conda environments are
81
+ provider.configure(ctx.obj.config)
82
+
78
83
  # Wrap each diagnostics in the provider with a celery tasks
79
84
  register_celery_tasks(celery_app, provider)
80
85
  queue = provider.slug
@@ -84,7 +89,7 @@ def start_worker(
84
89
 
85
90
  queue = "celery"
86
91
 
87
- argv = ["worker", f"--loglevel={loglevel}", f"--queues={queue}", *(extra_args or [])]
92
+ argv = ["worker", "-E", f"--loglevel={loglevel}", f"--queues={queue}", *(extra_args or [])]
88
93
  celery_app.worker_main(argv=argv)
89
94
 
90
95
 
@@ -14,9 +14,8 @@ from climate_ref.config import Config
14
14
  from climate_ref.models import Execution
15
15
  from climate_ref_celery.app import app
16
16
  from climate_ref_celery.tasks import generate_task_name
17
- from climate_ref_core.diagnostics import Diagnostic, ExecutionDefinition, ExecutionResult
17
+ from climate_ref_core.diagnostics import ExecutionDefinition, ExecutionResult
18
18
  from climate_ref_core.executor import Executor
19
- from climate_ref_core.providers import DiagnosticProvider
20
19
 
21
20
 
22
21
  class CeleryExecutor(Executor):
@@ -43,8 +42,6 @@ class CeleryExecutor(Executor):
43
42
 
44
43
  def run(
45
44
  self,
46
- provider: DiagnosticProvider,
47
- diagnostic: Diagnostic,
48
45
  definition: ExecutionDefinition,
49
46
  execution: Execution | None = None,
50
47
  ) -> None:
@@ -62,10 +59,6 @@ class CeleryExecutor(Executor):
62
59
 
63
60
  Parameters
64
61
  ----------
65
- provider
66
- Provider for the diagnostic
67
- diagnostic
68
- Diagnostic to run
69
62
  definition
70
63
  A description of the information needed for this execution of the diagnostic
71
64
  This includes relative paths to the data files,
@@ -78,12 +71,14 @@ class CeleryExecutor(Executor):
78
71
  """
79
72
  from climate_ref_celery.worker_tasks import handle_result
80
73
 
81
- name = generate_task_name(provider, diagnostic)
74
+ diagnostic = definition.diagnostic
75
+
76
+ name = generate_task_name(diagnostic.provider, diagnostic)
82
77
 
83
78
  async_result = app.send_task(
84
79
  name,
85
80
  args=[definition, self.config.log_level],
86
- queue=provider.slug,
81
+ queue=diagnostic.provider.slug,
87
82
  link=handle_result.s(execution_id=execution.id).set(queue="celery") if execution else None,
88
83
  )
89
84
  logger.debug(f"Celery task {async_result.id} submitted")
@@ -13,13 +13,10 @@ The main process is responsible for tracking what diagnostics have been register
13
13
  and to respond to new workers coming online.
14
14
  """
15
15
 
16
- from collections.abc import Callable
17
-
18
16
  from celery import Celery
19
- from loguru import logger
20
17
 
21
- from climate_ref_core.diagnostics import Diagnostic, ExecutionDefinition, ExecutionResult
22
- from climate_ref_core.logging import redirect_logs
18
+ from climate_ref_core.diagnostics import Diagnostic
19
+ from climate_ref_core.executor import execute_locally
23
20
  from climate_ref_core.providers import DiagnosticProvider
24
21
 
25
22
 
@@ -30,32 +27,6 @@ def generate_task_name(provider: DiagnosticProvider, diagnostic: Diagnostic) ->
30
27
  return f"{provider.slug}.{diagnostic.slug}"
31
28
 
32
29
 
33
- def _diagnostic_task_factory(
34
- diagnostic: Diagnostic,
35
- ) -> Callable[
36
- [ExecutionDefinition, str],
37
- ExecutionResult,
38
- ]:
39
- """
40
- Create a new task for the given diagnostic
41
- """
42
-
43
- def task(definition: ExecutionDefinition, log_level: str) -> ExecutionResult:
44
- """
45
- Task to run the diagnostic
46
- """
47
- logger.info(f"Running diagnostic {diagnostic.name} with definition {definition}")
48
- try:
49
- with redirect_logs(definition, log_level):
50
- return diagnostic.run(definition)
51
- except Exception:
52
- logger.exception(f"Error running diagnostic {diagnostic.slug}:{definition.key}")
53
- # TODO: This exception should be caught and a unsuccessful result returned.
54
- raise
55
-
56
- return task
57
-
58
-
59
30
  def register_celery_tasks(app: Celery, provider: DiagnosticProvider) -> None:
60
31
  """
61
32
  Register all tasks for the given provider
@@ -69,10 +40,14 @@ def register_celery_tasks(app: Celery, provider: DiagnosticProvider) -> None:
69
40
  provider
70
41
  The provider to register tasks for
71
42
  """
72
- for metric in provider.diagnostics():
73
- print(f"Registering task for diagnostic {metric.name}")
43
+ for diagnostic in provider.diagnostics():
44
+ print(f"Registering task for diagnostic {diagnostic.name}")
45
+
46
+ # The task function is the same for all diagnostics
47
+ # The diagnostic is included in the definition
48
+ # The queue is important to ensure that the task is run in the correct worker
74
49
  app.task( # type: ignore
75
- _diagnostic_task_factory(metric),
76
- name=generate_task_name(provider, metric),
50
+ execute_locally,
51
+ name=generate_task_name(provider, diagnostic),
77
52
  queue=provider.slug,
78
53
  )
@@ -37,7 +37,7 @@ def test_start_worker_success(mocker, mock_create_celery_app, mock_register_cele
37
37
  mock_import_module.assert_called_once_with("test_package")
38
38
  mock_register_celery_tasks.assert_called_once_with(mock_create_celery_app.return_value, mock_provider)
39
39
  mock_celery_app.worker_main.assert_called_once_with(
40
- argv=["worker", "--loglevel=info", "--queues=example"]
40
+ argv=["worker", "-E", "--loglevel=info", "--queues=example"]
41
41
  )
42
42
 
43
43
 
@@ -47,7 +47,9 @@ def test_start_core_worker_success(mock_create_celery_app, mock_register_celery_
47
47
  result = runner.invoke(app, ["start-worker"])
48
48
 
49
49
  assert result.exit_code == 0
50
- mock_celery_app.worker_main.assert_called_once_with(argv=["worker", "--loglevel=info", "--queues=celery"])
50
+ mock_celery_app.worker_main.assert_called_once_with(
51
+ argv=["worker", "-E", "--loglevel=info", "--queues=celery"]
52
+ )
51
53
 
52
54
 
53
55
  def test_start_worker_success_extra_args(mocker, mock_create_celery_app, mock_register_celery_tasks):
@@ -73,7 +75,7 @@ def test_start_worker_success_extra_args(mocker, mock_create_celery_app, mock_re
73
75
 
74
76
  assert result.exit_code == 0, result.output
75
77
  mock_worker_main.worker_main.assert_called_once_with(
76
- argv=["worker", "--loglevel=error", "--queues=example", "--extra-args", "--concurrency=2"]
78
+ argv=["worker", "-E", "--loglevel=error", "--queues=example", "--extra-args", "--concurrency=2"]
77
79
  )
78
80
 
79
81
 
@@ -10,7 +10,7 @@ def test_run_metric(provider, config, mock_diagnostic, metric_definition, mocker
10
10
  mock_execution_result = mocker.MagicMock()
11
11
 
12
12
  if include_execution_result:
13
- executor.run(provider, mock_diagnostic, metric_definition, mock_execution_result)
13
+ executor.run(metric_definition, mock_execution_result)
14
14
 
15
15
  mock_app.send_task.assert_called_once_with(
16
16
  "mock_provider.mock",
@@ -19,7 +19,7 @@ def test_run_metric(provider, config, mock_diagnostic, metric_definition, mocker
19
19
  queue="mock_provider",
20
20
  )
21
21
  else:
22
- executor.run(provider, mock_diagnostic, metric_definition, None)
22
+ executor.run(metric_definition, None)
23
23
 
24
24
  mock_app.send_task.assert_called_once_with(
25
25
  "mock_provider.mock",
@@ -0,0 +1,23 @@
1
+ from celery import Celery
2
+ from climate_ref_celery.tasks import generate_task_name, register_celery_tasks
3
+
4
+
5
+ def test_generate_task_name(mock_diagnostic):
6
+ assert mock_diagnostic.slug == "mock"
7
+ assert mock_diagnostic.provider.slug == "mock_provider"
8
+ assert generate_task_name(mock_diagnostic.provider, mock_diagnostic) == "mock_provider.mock"
9
+
10
+
11
+ def test_registry_celery_tasks(provider, mocker):
12
+ """
13
+ Test that the tasks are registered correctly
14
+ """
15
+ assert len(provider) == 2
16
+
17
+ mock_app = mocker.MagicMock(spec=Celery)
18
+ register_celery_tasks(mock_app, provider)
19
+
20
+ assert mock_app.task.call_count == 2
21
+ assert mock_app.task.call_args_list[0].kwargs["name"] == "mock_provider.mock"
22
+ assert mock_app.task.call_args_list[1].kwargs["name"] == "mock_provider.failed"
23
+ assert mock_app.task.call_args_list[0].kwargs["queue"] == "mock_provider"
@@ -1,47 +0,0 @@
1
- from unittest.mock import Mock
2
-
3
- from celery import Celery
4
- from climate_ref_celery.tasks import _diagnostic_task_factory, register_celery_tasks
5
-
6
- from climate_ref_core.diagnostics import ExecutionDefinition
7
- from climate_ref_core.providers import DiagnosticProvider
8
-
9
-
10
- def test_diagnostic_task_factory(tmp_path, caplog):
11
- # Mock Diagnostic and ExecutionDefinition
12
- mock_metric = Mock()
13
-
14
- definition = ExecutionDefinition(
15
- key="test", datasets=None, output_directory=tmp_path / "output", root_directory=tmp_path
16
- )
17
-
18
- # Create task using factory
19
- task = _diagnostic_task_factory(mock_metric)
20
-
21
- # Run task and check result
22
- result = task(definition, "INFO")
23
- assert result == mock_metric.run.return_value
24
- mock_metric.run.assert_called_once_with(definition)
25
-
26
-
27
- def test_register_celery_tasks(mocker):
28
- mock_task_factory = mocker.patch("climate_ref_celery.tasks._diagnostic_task_factory")
29
- # Mock Celery app and DiagnosticProvider
30
- mock_app = Mock(spec=Celery)
31
- mock_provider = Mock(spec=DiagnosticProvider)
32
- mock_provider.slug = "test_provider"
33
- mock_provider.diagnostics.return_value = [Mock(), Mock()]
34
- mock_provider.diagnostics.return_value[0].slug = "metric1"
35
- mock_provider.diagnostics.return_value[1].slug = "metric2"
36
-
37
- # Register tasks
38
- register_celery_tasks(mock_app, mock_provider)
39
-
40
- # Check that tasks are registered
41
- assert mock_app.task.call_count == 2
42
- mock_app.task.assert_any_call(
43
- mock_task_factory(mock_provider.diagnostics()[0]), name="test_provider.metric1", queue="test_provider"
44
- )
45
- mock_app.task.assert_any_call(
46
- mock_task_factory(mock_provider.diagnostics()[1]), name="test_provider.metric2", queue="test_provider"
47
- )