beanqueue 0.1.2__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {beanqueue-0.1.2 → beanqueue-0.1.3}/PKG-INFO +109 -28
  2. {beanqueue-0.1.2 → beanqueue-0.1.3}/README.md +108 -27
  3. beanqueue-0.1.3/bq/__init__.py +11 -0
  4. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/cmds/process.py +30 -20
  5. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/config.py +9 -0
  6. beanqueue-0.1.3/bq/constants.py +4 -0
  7. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/container.py +29 -6
  8. beanqueue-0.1.3/bq/models/__init__.py +8 -0
  9. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/models/task.py +40 -26
  10. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/models/worker.py +25 -13
  11. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/processors/registry.py +7 -7
  12. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/services/dispatch.py +14 -11
  13. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/services/worker.py +26 -12
  14. {beanqueue-0.1.2 → beanqueue-0.1.3}/pyproject.toml +1 -1
  15. beanqueue-0.1.2/bq/models/__init__.py +0 -4
  16. beanqueue-0.1.2/bq/services/__init__.py +0 -0
  17. {beanqueue-0.1.2 → beanqueue-0.1.3}/LICENSE +0 -0
  18. {beanqueue-0.1.2/bq → beanqueue-0.1.3/bq/cmds}/__init__.py +0 -0
  19. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/cmds/create_tables.py +1 -1
  20. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/cmds/submit.py +1 -1
  21. {beanqueue-0.1.2/bq/cmds → beanqueue-0.1.3/bq/db}/__init__.py +0 -0
  22. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/db/base.py +0 -0
  23. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/db/session.py +0 -0
  24. {beanqueue-0.1.2 → beanqueue-0.1.3}/bq/models/helpers.py +0 -0
  25. {beanqueue-0.1.2/bq/db → beanqueue-0.1.3/bq/processors}/__init__.py +0 -0
  26. {beanqueue-0.1.2/bq/processors → beanqueue-0.1.3/bq/services}/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: beanqueue
3
- Version: 0.1.2
3
+ Version: 0.1.3
4
4
  Summary: BeanQueue or BQ for short, PostgreSQL SKIP LOCK based worker queue library
5
5
  License: MIT
6
6
  Author: Fang-Pen Lin
@@ -19,7 +19,7 @@ Requires-Dist: venusian (>=3.1.0,<4.0.0)
19
19
  Description-Content-Type: text/markdown
20
20
 
21
21
  # BeanQueue [![CircleCI](https://dl.circleci.com/status-badge/img/gh/LaunchPlatform/bq/tree/master.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/LaunchPlatform/beanhub-extract/tree/master)
22
- BeanQueue, a lightweight worker queue framework based on [SQLAlchemy](https://www.sqlalchemy.org/), [PostgreSQL SKIP LOCKED queries](https://www.2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5/) and [NOTIFY](https://www.postgresql.org/docs/current/sql-notify.html) / [LISTEN](https://www.postgresql.org/docs/current/sql-listen.html) statements.
22
+ BeanQueue, a lightweight worker queue framework based on [SQLAlchemy](https://www.sqlalchemy.org/), PostgreSQL [SKIP LOCKED queries](https://www.2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5/) and [NOTIFY](https://www.postgresql.org/docs/current/sql-notify.html) / [LISTEN](https://www.postgresql.org/docs/current/sql-listen.html) statements.
23
23
 
24
24
  **Notice**: Still in its early stage, we built this for [BeanHub](https://beanhub.io)'s internal usage. May change rapidly. Use at your own risk for now.
25
25
 
@@ -29,7 +29,7 @@ BeanQueue, a lightweight worker queue framework based on [SQLAlchemy](https://ww
29
29
  - **Easy-to-deploy**: Only rely on PostgreSQL
30
30
  - **Easy-to-use**: Provide command line tools for processing tasks, also helpers for generating tasks models
31
31
  - **Auto-notify**: Notify will automatically be generated and send for inserted or update tasks
32
- - **Worker heartbeat and auto-reschedule**: Each worker keeps updating heartbeat, if one is dead, the others will reschedule the tasks
32
+ - **Worker heartbeat and auto-reschedule**: Each worker keeps updating heartbeat, if one is found dead, the others will reschedule the tasks
33
33
  - **Customizable**: Use it as an library and build your own worker queue
34
34
  - **Native DB operations**: Commit your tasks with other db entries altogether without worrying about data inconsistent issue
35
35
 
@@ -46,14 +46,13 @@ You can define a task processor like this
46
46
  ```python
47
47
  from sqlalchemy.orm import Session
48
48
 
49
- from bq.processors.registry import processor
50
- from bq import models
51
- from .. import my_models
49
+ import bq
50
+ from .. import models
52
51
  from .. import image_utils
53
52
 
54
- @processor(channel="images")
55
- def resize_image(db: Session, task: models.Task, width: int, height: int):
56
- image = db.query(my_models.Image).filter(my_models.Image.task == task).one()
53
+ @bq.processor(channel="images")
54
+ def resize_image(db: Session, task: bq.Task, width: int, height: int):
55
+ image = db.query(models.Image).filter(models.Image.task == task).one()
57
56
  image_utils.resize(image, size=(width, height))
58
57
  db.add(image)
59
58
  # by default the `processor` decorator has `auto_complete` flag turns on,
@@ -63,21 +62,21 @@ def resize_image(db: Session, task: models.Task, width: int, height: int):
63
62
  The `db` and `task` keyword arguments are optional.
64
63
  If you don't need to access the task object, you can simply define the function without these two parameters.
65
64
 
66
- To submit a task, you can either use `bq.models.Task` model object to construct the task object, insert into the
65
+ To submit a task, you can either use `bq.Task` model object to construct the task object, insert into the
67
66
  database session and commit.
68
67
 
69
68
  ```python
70
- from bq import models
69
+ import bq
71
70
  from .db import Session
72
- from .. import my_models
71
+ from .. import models
73
72
 
74
73
  db = Session()
75
- task = models.Task(
74
+ task = bq.Task(
76
75
  channel="files",
77
76
  module="my_pkgs.files.processors",
78
77
  name="upload_to_s3_for_backup",
79
78
  )
80
- file = my_models.File(
79
+ file = models.File(
81
80
  task=task,
82
81
  blob_name="...",
83
82
  )
@@ -112,6 +111,7 @@ To run the worker, you can do this:
112
111
  BQ_PROCESSOR_PACKAGES='["my_pkgs.processors"]' python -m bq.cmds.process images
113
112
  ```
114
113
 
114
+ The `BQ_PROCESSOR_PACKAGES` is a JSON list contains the Python packages where you define your processors (the functions you decorated with `bq.processors.registry.processor`).
115
115
  To submit a task for testing purpose, you can do
116
116
 
117
117
  ```bash
@@ -136,24 +136,104 @@ If you want to configure BeanQueue programmatically for the command lines, you c
136
136
  For example:
137
137
 
138
138
  ```python
139
- import bq.cmds.process
140
- from bq.container import Container
141
- from bq.config import Config
142
-
143
- container = Container()
144
- container.wire(modules=[bq.cmds.process])
145
- with container.config.override(
146
- Config(
147
- PROCESSOR_PACKAGES=["my_pkgs.processors"],
148
- DATABASE_URL="postgresql://...",
149
- BATCH_SIZE=10,
150
- )
151
- ):
152
- bq.cmds.process.process_tasks(channels=("images",))
139
+ import bq
140
+ from bq.cmds.process import process_tasks
141
+ from .my_config import config
142
+
143
+ container = bq.Container()
144
+ container.wire(packages=[bq])
145
+ config = bq.Config(
146
+ PROCESSOR_PACKAGES=["my_pkgs.processors"],
147
+ DATABASE_URL=str(config.DATABASE_URL),
148
+ BATCH_SIZE=10,
149
+ )
150
+ with container.config.override(config):
151
+ process_tasks(channels=("images",))
153
152
  ```
154
153
 
155
154
  Many other behaviors of this framework can also be modified by overriding the container defined at [bq/container.py](bq/container.py).
156
155
 
156
+ ### Define your own tables
157
+
158
+ BeanQueue is designed to be as customizable as much as possible.
159
+ Of course, you can define your own SQLAlchemy model instead of using the ones we provided.
160
+
161
+ To make defining your own `Task` model or `Worker` model much easier, you can use our mixin classes:
162
+
163
+ - `bq.TaskModelMixin`: provides task model columns
164
+ - `bq.TaskModelRefWorkerMixin`: provides foreign key column and relationship to `bq.Worker`
165
+ - `bq.WorkerModelMixin`: provides worker model columns
166
+ - `bq.WorkerRefMixin`: provides relationship to `bq.Task`
167
+
168
+ Here's an example for defining your own Task model:
169
+
170
+ ```python
171
+ import uuid
172
+
173
+ import bq
174
+ from sqlalchemy import ForeignKey
175
+ from sqlalchemy.dialects.postgresql import UUID
176
+ from sqlalchemy.orm import Mapped
177
+ from sqlalchemy.orm import mapped_column
178
+ from sqlalchemy.orm import relationship
179
+
180
+ from .base_class import Base
181
+
182
+
183
+ class Task(bq.TaskModelMixin, Base):
184
+ __tablename__ = "task"
185
+ worker_id: Mapped[uuid.UUID] = mapped_column(
186
+ UUID(as_uuid=True),
187
+ ForeignKey("worker.id", onupdate="CASCADE"),
188
+ nullable=True,
189
+ index=True,
190
+ )
191
+
192
+ worker: Mapped["Worker"] = relationship(
193
+ "Worker", back_populates="tasks", uselist=False
194
+ )
195
+ ```
196
+
197
+ To make task insert and update with state changing to `PENDING` send out NOTIFY "channel" statement automatically, you can also use `bq.models.task.listen_events` helper to register our SQLAlchemy event handlers automatically like this
198
+
199
+ ```python
200
+ from bq.models.task import listen_events
201
+ listen_events(Task)
202
+ ```
203
+
204
+ You just see how easy it is to define your Task model. Now, here's an example for defining your own Worker model:
205
+
206
+ ```python
207
+ import bq
208
+ from sqlalchemy.orm import Mapped
209
+ from sqlalchemy.orm import relationship
210
+
211
+ from .base_class import Base
212
+
213
+
214
+ class Worker(bq.WorkerModelMixin, Base):
215
+ __tablename__ = "worker"
216
+
217
+ tasks: Mapped[list["Task"]] = relationship(
218
+ "Task",
219
+ back_populates="worker",
220
+ cascade="all,delete",
221
+ order_by="Task.created_at",
222
+ )
223
+ ```
224
+
225
+ With the model class ready, you only need to change the `TASK_MODEL` and `WORKER_MODEL` of `Config` to the full Python module name plus the class name like this.
226
+
227
+ ```python
228
+ import bq
229
+ config = bq.Config(
230
+ TASK_MODEL="my_pkgs.models.Task",
231
+ WORKER_MODEL="my_pkgs.models.Worker",
232
+ # ... other configs
233
+ )
234
+ # Override container...
235
+ ```
236
+
157
237
  ## Why?
158
238
 
159
239
  There are countless worker queue projects. Why make yet another one?
@@ -230,6 +310,7 @@ A modern accounting book service based on the most popular open source version c
230
310
 
231
311
  - [solid_queue](https://github.com/rails/solid_queue)
232
312
  - [postgres-tq](https://github.com/flix-tech/postgres-tq)
313
+ - [pq](https://github.com/malthe/pq/)
233
314
  - [PgQueuer](https://github.com/janbjorge/PgQueuer)
234
315
  - [hatchet](https://github.com/hatchet-dev/hatchet)
235
316
 
@@ -1,5 +1,5 @@
1
1
  # BeanQueue [![CircleCI](https://dl.circleci.com/status-badge/img/gh/LaunchPlatform/bq/tree/master.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/LaunchPlatform/beanhub-extract/tree/master)
2
- BeanQueue, a lightweight worker queue framework based on [SQLAlchemy](https://www.sqlalchemy.org/), [PostgreSQL SKIP LOCKED queries](https://www.2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5/) and [NOTIFY](https://www.postgresql.org/docs/current/sql-notify.html) / [LISTEN](https://www.postgresql.org/docs/current/sql-listen.html) statements.
2
+ BeanQueue, a lightweight worker queue framework based on [SQLAlchemy](https://www.sqlalchemy.org/), PostgreSQL [SKIP LOCKED queries](https://www.2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5/) and [NOTIFY](https://www.postgresql.org/docs/current/sql-notify.html) / [LISTEN](https://www.postgresql.org/docs/current/sql-listen.html) statements.
3
3
 
4
4
  **Notice**: Still in its early stage, we built this for [BeanHub](https://beanhub.io)'s internal usage. May change rapidly. Use at your own risk for now.
5
5
 
@@ -9,7 +9,7 @@ BeanQueue, a lightweight worker queue framework based on [SQLAlchemy](https://ww
9
9
  - **Easy-to-deploy**: Only rely on PostgreSQL
10
10
  - **Easy-to-use**: Provide command line tools for processing tasks, also helpers for generating tasks models
11
11
  - **Auto-notify**: Notify will automatically be generated and send for inserted or update tasks
12
- - **Worker heartbeat and auto-reschedule**: Each worker keeps updating heartbeat, if one is dead, the others will reschedule the tasks
12
+ - **Worker heartbeat and auto-reschedule**: Each worker keeps updating heartbeat, if one is found dead, the others will reschedule the tasks
13
13
  - **Customizable**: Use it as an library and build your own worker queue
14
14
  - **Native DB operations**: Commit your tasks with other db entries altogether without worrying about data inconsistent issue
15
15
 
@@ -26,14 +26,13 @@ You can define a task processor like this
26
26
  ```python
27
27
  from sqlalchemy.orm import Session
28
28
 
29
- from bq.processors.registry import processor
30
- from bq import models
31
- from .. import my_models
29
+ import bq
30
+ from .. import models
32
31
  from .. import image_utils
33
32
 
34
- @processor(channel="images")
35
- def resize_image(db: Session, task: models.Task, width: int, height: int):
36
- image = db.query(my_models.Image).filter(my_models.Image.task == task).one()
33
+ @bq.processor(channel="images")
34
+ def resize_image(db: Session, task: bq.Task, width: int, height: int):
35
+ image = db.query(models.Image).filter(models.Image.task == task).one()
37
36
  image_utils.resize(image, size=(width, height))
38
37
  db.add(image)
39
38
  # by default the `processor` decorator has `auto_complete` flag turns on,
@@ -43,21 +42,21 @@ def resize_image(db: Session, task: models.Task, width: int, height: int):
43
42
  The `db` and `task` keyword arguments are optional.
44
43
  If you don't need to access the task object, you can simply define the function without these two parameters.
45
44
 
46
- To submit a task, you can either use `bq.models.Task` model object to construct the task object, insert into the
45
+ To submit a task, you can either use `bq.Task` model object to construct the task object, insert into the
47
46
  database session and commit.
48
47
 
49
48
  ```python
50
- from bq import models
49
+ import bq
51
50
  from .db import Session
52
- from .. import my_models
51
+ from .. import models
53
52
 
54
53
  db = Session()
55
- task = models.Task(
54
+ task = bq.Task(
56
55
  channel="files",
57
56
  module="my_pkgs.files.processors",
58
57
  name="upload_to_s3_for_backup",
59
58
  )
60
- file = my_models.File(
59
+ file = models.File(
61
60
  task=task,
62
61
  blob_name="...",
63
62
  )
@@ -92,6 +91,7 @@ To run the worker, you can do this:
92
91
  BQ_PROCESSOR_PACKAGES='["my_pkgs.processors"]' python -m bq.cmds.process images
93
92
  ```
94
93
 
94
+ The `BQ_PROCESSOR_PACKAGES` is a JSON list contains the Python packages where you define your processors (the functions you decorated with `bq.processors.registry.processor`).
95
95
  To submit a task for testing purpose, you can do
96
96
 
97
97
  ```bash
@@ -116,24 +116,104 @@ If you want to configure BeanQueue programmatically for the command lines, you c
116
116
  For example:
117
117
 
118
118
  ```python
119
- import bq.cmds.process
120
- from bq.container import Container
121
- from bq.config import Config
122
-
123
- container = Container()
124
- container.wire(modules=[bq.cmds.process])
125
- with container.config.override(
126
- Config(
127
- PROCESSOR_PACKAGES=["my_pkgs.processors"],
128
- DATABASE_URL="postgresql://...",
129
- BATCH_SIZE=10,
130
- )
131
- ):
132
- bq.cmds.process.process_tasks(channels=("images",))
119
+ import bq
120
+ from bq.cmds.process import process_tasks
121
+ from .my_config import config
122
+
123
+ container = bq.Container()
124
+ container.wire(packages=[bq])
125
+ config = bq.Config(
126
+ PROCESSOR_PACKAGES=["my_pkgs.processors"],
127
+ DATABASE_URL=str(config.DATABASE_URL),
128
+ BATCH_SIZE=10,
129
+ )
130
+ with container.config.override(config):
131
+ process_tasks(channels=("images",))
133
132
  ```
134
133
 
135
134
  Many other behaviors of this framework can also be modified by overriding the container defined at [bq/container.py](bq/container.py).
136
135
 
136
+ ### Define your own tables
137
+
138
+ BeanQueue is designed to be as customizable as much as possible.
139
+ Of course, you can define your own SQLAlchemy model instead of using the ones we provided.
140
+
141
+ To make defining your own `Task` model or `Worker` model much easier, you can use our mixin classes:
142
+
143
+ - `bq.TaskModelMixin`: provides task model columns
144
+ - `bq.TaskModelRefWorkerMixin`: provides foreign key column and relationship to `bq.Worker`
145
+ - `bq.WorkerModelMixin`: provides worker model columns
146
+ - `bq.WorkerRefMixin`: provides relationship to `bq.Task`
147
+
148
+ Here's an example for defining your own Task model:
149
+
150
+ ```python
151
+ import uuid
152
+
153
+ import bq
154
+ from sqlalchemy import ForeignKey
155
+ from sqlalchemy.dialects.postgresql import UUID
156
+ from sqlalchemy.orm import Mapped
157
+ from sqlalchemy.orm import mapped_column
158
+ from sqlalchemy.orm import relationship
159
+
160
+ from .base_class import Base
161
+
162
+
163
+ class Task(bq.TaskModelMixin, Base):
164
+ __tablename__ = "task"
165
+ worker_id: Mapped[uuid.UUID] = mapped_column(
166
+ UUID(as_uuid=True),
167
+ ForeignKey("worker.id", onupdate="CASCADE"),
168
+ nullable=True,
169
+ index=True,
170
+ )
171
+
172
+ worker: Mapped["Worker"] = relationship(
173
+ "Worker", back_populates="tasks", uselist=False
174
+ )
175
+ ```
176
+
177
+ To make task insert and update with state changing to `PENDING` send out NOTIFY "channel" statement automatically, you can also use `bq.models.task.listen_events` helper to register our SQLAlchemy event handlers automatically like this
178
+
179
+ ```python
180
+ from bq.models.task import listen_events
181
+ listen_events(Task)
182
+ ```
183
+
184
+ You just see how easy it is to define your Task model. Now, here's an example for defining your own Worker model:
185
+
186
+ ```python
187
+ import bq
188
+ from sqlalchemy.orm import Mapped
189
+ from sqlalchemy.orm import relationship
190
+
191
+ from .base_class import Base
192
+
193
+
194
+ class Worker(bq.WorkerModelMixin, Base):
195
+ __tablename__ = "worker"
196
+
197
+ tasks: Mapped[list["Task"]] = relationship(
198
+ "Task",
199
+ back_populates="worker",
200
+ cascade="all,delete",
201
+ order_by="Task.created_at",
202
+ )
203
+ ```
204
+
205
+ With the model class ready, you only need to change the `TASK_MODEL` and `WORKER_MODEL` of `Config` to the full Python module name plus the class name like this.
206
+
207
+ ```python
208
+ import bq
209
+ config = bq.Config(
210
+ TASK_MODEL="my_pkgs.models.Task",
211
+ WORKER_MODEL="my_pkgs.models.Worker",
212
+ # ... other configs
213
+ )
214
+ # Override container...
215
+ ```
216
+
137
217
  ## Why?
138
218
 
139
219
  There are countless worker queue projects. Why make yet another one?
@@ -210,5 +290,6 @@ A modern accounting book service based on the most popular open source version c
210
290
 
211
291
  - [solid_queue](https://github.com/rails/solid_queue)
212
292
  - [postgres-tq](https://github.com/flix-tech/postgres-tq)
293
+ - [pq](https://github.com/malthe/pq/)
213
294
  - [PgQueuer](https://github.com/janbjorge/PgQueuer)
214
295
  - [hatchet](https://github.com/hatchet-dev/hatchet)
@@ -0,0 +1,11 @@
1
+ from .config import Config # noqa
2
+ from .container import Container # noqa
3
+ from .models import Task # noqa
4
+ from .models import TaskModelMixin
5
+ from .models import TaskModelRefWorkerMixin
6
+ from .models import TaskState # noqa
7
+ from .models import Worker # noqa
8
+ from .models import WorkerModelMixin # noqa
9
+ from .models import WorkerRefMixin # noqa
10
+ from .models import WorkerState # noqa
11
+ from .processors.registry import processor # noqa
@@ -6,7 +6,6 @@ import sys
6
6
  import threading
7
7
  import time
8
8
  import typing
9
- import uuid
10
9
 
11
10
  import click
12
11
  from dependency_injector.wiring import inject
@@ -14,6 +13,7 @@ from dependency_injector.wiring import Provide
14
13
  from sqlalchemy import func
15
14
  from sqlalchemy.orm import Session as DBSession
16
15
 
16
+ from .. import constants
17
17
  from .. import models
18
18
  from ..config import Config
19
19
  from ..container import Container
@@ -22,27 +22,32 @@ from ..services.dispatch import DispatchService
22
22
  from ..services.worker import WorkerService
23
23
 
24
24
 
25
+ @inject
25
26
  def update_workers(
26
- make_session: typing.Callable[[], DBSession],
27
- worker_id: uuid.UUID,
28
- heartbeat_period: int,
29
- heartbeat_timeout: int,
27
+ worker_id: typing.Any,
28
+ config: Config = Provide[Container.config],
29
+ session_factory: typing.Callable = Provide[Container.session_factory],
30
+ make_dispatch_service: typing.Callable = Provide[Container.make_dispatch_service],
31
+ make_worker_service: typing.Callable = Provide[Container.make_worker_service],
30
32
  ):
31
- db: DBSession = make_session()
32
- worker_service = WorkerService(session=db)
33
- dispatch_service = DispatchService(session=db)
34
- current_worker = db.get(models.Worker, worker_id)
33
+ db: DBSession = session_factory()
34
+ worker_service: WorkerService = make_worker_service(session=db)
35
+ dispatch_service: DispatchService = make_dispatch_service(session=db)
36
+ current_worker = worker_service.get_worker(worker_id)
35
37
  logger = logging.getLogger(__name__)
36
38
  logger.info(
37
39
  "Updating worker %s with heartbeat_period=%s, heartbeat_timeout=%s",
38
40
  current_worker.id,
39
- heartbeat_period,
40
- heartbeat_timeout,
41
+ config.WORKER_HEARTBEAT_PERIOD,
42
+ config.WORKER_HEARTBEAT_TIMEOUT,
41
43
  )
42
44
  while True:
43
- dead_workers = worker_service.fetch_dead_workers(timeout=heartbeat_timeout)
45
+ dead_workers = worker_service.fetch_dead_workers(
46
+ timeout=config.WORKER_HEARTBEAT_TIMEOUT
47
+ )
44
48
  task_count = worker_service.reschedule_dead_tasks(
45
- dead_workers.with_entities(models.Worker.id)
49
+ # TODO: a better way to abstract this?
50
+ dead_workers.with_entities(current_worker.__class__.id)
46
51
  )
47
52
  found_dead_worker = False
48
53
  for dead_worker in dead_workers:
@@ -58,7 +63,16 @@ def update_workers(
58
63
  if found_dead_worker:
59
64
  db.commit()
60
65
 
61
- time.sleep(heartbeat_period)
66
+ if current_worker.state != models.WorkerState.RUNNING:
67
+ # This probably means we are somehow very slow to update the heartbeat in time, or the timeout window
68
+ # is set too short. It could also be the administrator update the worker state to something else than
69
+ # RUNNING. Regardless the reason, let's stop processing.
70
+ logger.warning(
71
+ "Current worker %s state is %s instead of running, quit processing"
72
+ )
73
+ sys.exit(0)
74
+
75
+ time.sleep(config.WORKER_HEARTBEAT_PERIOD)
62
76
  current_worker.last_heartbeat = func.now()
63
77
  db.add(current_worker)
64
78
  db.commit()
@@ -68,7 +82,6 @@ def update_workers(
68
82
  def process_tasks(
69
83
  channels: tuple[str, ...],
70
84
  config: Config = Provide[Container.config],
71
- session_factory: typing.Callable = Provide[Container.session_factory],
72
85
  db: DBSession = Provide[Container.session],
73
86
  dispatch_service: DispatchService = Provide[Container.dispatch_service],
74
87
  worker_service: WorkerService = Provide[Container.worker_service],
@@ -76,7 +89,7 @@ def process_tasks(
76
89
  logger = logging.getLogger(__name__)
77
90
 
78
91
  if not channels:
79
- channels = ["default"]
92
+ channels = [constants.DEFAULT_CHANNEL]
80
93
 
81
94
  if not config.PROCESSOR_PACKAGES:
82
95
  logger.error("No PROCESSOR_PACKAGES provided")
@@ -93,7 +106,7 @@ def process_tasks(
93
106
  " Processor module %r, processor %r", module, processor.name
94
107
  )
95
108
 
96
- worker = models.Worker(name=platform.node(), channels=channels)
109
+ worker = worker_service.make_worker(name=platform.node(), channels=channels)
97
110
  db.add(worker)
98
111
  dispatch_service.listen(channels)
99
112
  db.commit()
@@ -104,10 +117,7 @@ def process_tasks(
104
117
  worker_update_thread = threading.Thread(
105
118
  target=functools.partial(
106
119
  update_workers,
107
- make_session=session_factory,
108
120
  worker_id=worker.id,
109
- heartbeat_period=config.WORKER_HEARTBEAT_PERIOD,
110
- heartbeat_timeout=config.WORKER_HEARTBEAT_TIMEOUT,
111
121
  ),
112
122
  name="update_workers",
113
123
  )
@@ -3,6 +3,7 @@ import typing
3
3
  from pydantic import field_validator
4
4
  from pydantic import PostgresDsn
5
5
  from pydantic import ValidationInfo
6
+ from pydantic_core import MultiHostUrl
6
7
  from pydantic_settings import BaseSettings
7
8
  from pydantic_settings import SettingsConfigDict
8
9
 
@@ -23,6 +24,12 @@ class Config(BaseSettings):
23
24
  # Timeout of worker heartbeat in seconds
24
25
  WORKER_HEARTBEAT_TIMEOUT: int = 100
25
26
 
27
+ # which task model to use
28
+ TASK_MODEL: str = "bq.Task"
29
+
30
+ # which worker model to use
31
+ WORKER_MODEL: str = "bq.Worker"
32
+
26
33
  POSTGRES_SERVER: str = "localhost"
27
34
  POSTGRES_USER: str = "bq"
28
35
  POSTGRES_PASSWORD: str = ""
@@ -36,6 +43,8 @@ class Config(BaseSettings):
36
43
  ) -> typing.Any:
37
44
  if isinstance(v, str):
38
45
  return v
46
+ if isinstance(v, MultiHostUrl):
47
+ return v
39
48
  return PostgresDsn.build(
40
49
  scheme="postgresql",
41
50
  username=info.data.get("POSTGRES_USER"),
@@ -0,0 +1,4 @@
1
+ # the name of default channel to use if not provided
2
+ DEFAULT_CHANNEL = "default"
3
+ # category value for venusian to scan functions decorated with `processor`
4
+ BQ_PROCESSOR_CATEGORY = "bq_processor"
@@ -1,4 +1,5 @@
1
1
  import functools
2
+ import importlib
2
3
  import typing
3
4
 
4
5
  from dependency_injector import containers
@@ -14,6 +15,12 @@ from .services.dispatch import DispatchService
14
15
  from .services.worker import WorkerService
15
16
 
16
17
 
18
+ def get_model_class(name: str) -> typing.Type:
19
+ module_name, model_name = name.rsplit(".", 1)
20
+ module = importlib.import_module(module_name)
21
+ return getattr(module, model_name)
22
+
23
+
17
24
  def make_db_engine(config: Config) -> Engine:
18
25
  return create_engine(str(config.DATABASE_URL), poolclass=SingletonThreadPool)
19
26
 
@@ -26,12 +33,16 @@ def make_session(factory: typing.Callable) -> DBSession:
26
33
  return factory()
27
34
 
28
35
 
29
- def make_dispatch_service(session: DBSession) -> DispatchService:
30
- return DispatchService(session)
36
+ def make_dispatch_service(config: Config, session: DBSession) -> DispatchService:
37
+ return DispatchService(session, task_model=get_model_class(config.TASK_MODEL))
31
38
 
32
39
 
33
- def make_worker_service(session: DBSession) -> WorkerService:
34
- return WorkerService(session)
40
+ def make_worker_service(config: Config, session: DBSession) -> WorkerService:
41
+ return WorkerService(
42
+ session,
43
+ task_model=get_model_class(config.TASK_MODEL),
44
+ worker_model=get_model_class(config.WORKER_MODEL),
45
+ )
35
46
 
36
47
 
37
48
  class Container(containers.DeclarativeContainer):
@@ -46,9 +57,21 @@ class Container(containers.DeclarativeContainer):
46
57
  session: DBSession = providers.Singleton(make_session, factory=session_factory)
47
58
 
48
59
  dispatch_service: DispatchService = providers.Singleton(
49
- make_dispatch_service, session=session
60
+ make_dispatch_service,
61
+ config=config,
62
+ session=session,
50
63
  )
51
64
 
52
65
  worker_service: WorkerService = providers.Singleton(
53
- make_worker_service, session=session
66
+ make_worker_service, config=config, session=session
67
+ )
68
+
69
+ make_dispatch_service = providers.Singleton(
70
+ lambda config: functools.partial(make_dispatch_service, config=config),
71
+ config=config,
72
+ )
73
+
74
+ make_worker_service = providers.Singleton(
75
+ lambda config: functools.partial(make_worker_service, config=config),
76
+ config=config,
54
77
  )
@@ -0,0 +1,8 @@
1
+ from .task import Task
2
+ from .task import TaskModelMixin
3
+ from .task import TaskModelRefWorkerMixin
4
+ from .task import TaskState
5
+ from .worker import Worker
6
+ from .worker import WorkerModelMixin
7
+ from .worker import WorkerRefMixin
8
+ from .worker import WorkerState
@@ -1,6 +1,8 @@
1
+ import datetime
1
2
  import enum
3
+ import typing
4
+ import uuid
2
5
 
3
- from sqlalchemy import Column
4
6
  from sqlalchemy import Connection
5
7
  from sqlalchemy import DateTime
6
8
  from sqlalchemy import Enum
@@ -11,6 +13,9 @@ from sqlalchemy import inspect
11
13
  from sqlalchemy import String
12
14
  from sqlalchemy.dialects.postgresql import JSONB
13
15
  from sqlalchemy.dialects.postgresql import UUID
16
+ from sqlalchemy.orm import declared_attr
17
+ from sqlalchemy.orm import Mapped
18
+ from sqlalchemy.orm import mapped_column
14
19
  from sqlalchemy.orm import Mapper
15
20
  from sqlalchemy.orm import relationship
16
21
 
@@ -29,18 +34,12 @@ class TaskState(enum.Enum):
29
34
  FAILED = "FAILED"
30
35
 
31
36
 
32
- class Task(Base):
33
- id = Column(
37
+ class TaskModelMixin:
38
+ id: Mapped[uuid.UUID] = mapped_column(
34
39
  UUID(as_uuid=True), primary_key=True, server_default=func.gen_random_uuid()
35
40
  )
36
- # foreign key id of assigned worker
37
- worker_id = Column(
38
- UUID(as_uuid=True),
39
- ForeignKey("bq_workers.id", name="fk_workers_id"),
40
- nullable=True,
41
- )
42
41
  # current state of the task
43
- state = Column(
42
+ state: Mapped[TaskState] = mapped_column(
44
43
  Enum(TaskState),
45
44
  nullable=False,
46
45
  default=TaskState.PENDING,
@@ -48,24 +47,37 @@ class Task(Base):
48
47
  index=True,
49
48
  )
50
49
  # channel for workers and job creator to listen/notify
51
- channel = Column(String, nullable=False, index=True)
50
+ channel: Mapped[str] = mapped_column(String, nullable=False, index=True)
52
51
  # module of the processor function
53
- module = Column(String, nullable=False)
52
+ module: Mapped[str] = mapped_column(String, nullable=False)
54
53
  # func name of the processor func
55
- func_name = Column(String, nullable=False)
54
+ func_name: Mapped[str] = mapped_column(String, nullable=False)
56
55
  # keyword arguments
57
- kwargs = Column(JSONB, nullable=True)
56
+ kwargs: Mapped[typing.Optional[typing.Any]] = mapped_column(JSONB, nullable=True)
58
57
  # Result of the task
59
- result = Column(JSONB, nullable=True)
58
+ result: Mapped[typing.Optional[typing.Any]] = mapped_column(JSONB, nullable=True)
60
59
  # Error message
61
- error_message = Column(String, nullable=True)
60
+ error_message: Mapped[typing.Optional[str]] = mapped_column(String, nullable=True)
62
61
  # created datetime of the task
63
- created_at = Column(
62
+ created_at: Mapped[datetime.datetime] = mapped_column(
64
63
  DateTime(timezone=True), nullable=False, server_default=func.now()
65
64
  )
66
65
 
67
- worker = relationship("Worker", back_populates="tasks", uselist=False)
68
66
 
67
+ class TaskModelRefWorkerMixin:
68
+ # foreign key id of assigned worker
69
+ worker_id: Mapped[uuid.UUID] = mapped_column(
70
+ UUID(as_uuid=True),
71
+ ForeignKey("bq_workers.id", name="fk_workers_id"),
72
+ nullable=True,
73
+ )
74
+
75
+ @declared_attr
76
+ def worker(cls) -> Mapped["Worker"]:
77
+ return relationship("Worker", back_populates="tasks", uselist=False)
78
+
79
+
80
+ class Task(TaskModelMixin, TaskModelRefWorkerMixin, Base):
69
81
  __tablename__ = "bq_tasks"
70
82
 
71
83
  def __repr__(self) -> str:
@@ -99,22 +111,24 @@ def notify_if_needed(connection: Connection, task: Task):
99
111
  connection.exec_driver_sql(f"NOTIFY {quoted_channel}")
100
112
 
101
113
 
102
- @event.listens_for(Task, "after_insert")
103
114
  def task_insert_notify(mapper: Mapper, connection: Connection, target: Task):
104
- from .. import models
105
-
106
- if target.state != models.TaskState.PENDING:
115
+ if target.state != TaskState.PENDING:
107
116
  return
108
117
  notify_if_needed(connection, target)
109
118
 
110
119
 
111
- @event.listens_for(Task, "after_update")
112
120
  def task_update_notify(mapper: Mapper, connection: Connection, target: Task):
113
- from .. import models
114
-
115
121
  history = inspect(target).attrs.state.history
116
122
  if not history.has_changes():
117
123
  return
118
- if target.state != models.TaskState.PENDING:
124
+ if target.state != TaskState.PENDING:
119
125
  return
120
126
  notify_if_needed(connection, target)
127
+
128
+
129
+ def listen_events(model_cls: typing.Type):
130
+ event.listens_for(model_cls, "after_insert")(task_insert_notify)
131
+ event.listens_for(model_cls, "after_update")(task_update_notify)
132
+
133
+
134
+ listen_events(Task)
@@ -1,4 +1,6 @@
1
+ import datetime
1
2
  import enum
3
+ import uuid
2
4
 
3
5
  from sqlalchemy import Column
4
6
  from sqlalchemy import DateTime
@@ -7,6 +9,10 @@ from sqlalchemy import func
7
9
  from sqlalchemy import String
8
10
  from sqlalchemy.dialects.postgresql import ARRAY
9
11
  from sqlalchemy.dialects.postgresql import UUID
12
+ from sqlalchemy.orm import declared_attr
13
+ from sqlalchemy.orm import Mapped
14
+ from sqlalchemy.orm import mapped_column
15
+ from sqlalchemy.orm import Mapper
10
16
  from sqlalchemy.orm import relationship
11
17
 
12
18
  from ..db.base import Base
@@ -22,12 +28,12 @@ class WorkerState(enum.Enum):
22
28
  NO_HEARTBEAT = "NO_HEARTBEAT"
23
29
 
24
30
 
25
- class Worker(Base):
26
- id = Column(
31
+ class WorkerModelMixin:
32
+ id: Mapped[uuid.UUID] = mapped_column(
27
33
  UUID(as_uuid=True), primary_key=True, server_default=func.gen_random_uuid()
28
34
  )
29
35
  # current state of the worker
30
- state = Column(
36
+ state: Mapped[WorkerState] = mapped_column(
31
37
  Enum(WorkerState),
32
38
  nullable=False,
33
39
  default=WorkerState.RUNNING,
@@ -35,28 +41,34 @@ class Worker(Base):
35
41
  index=True,
36
42
  )
37
43
  # name of the worker
38
- name = Column(String, nullable=False)
44
+ name: Mapped[str] = mapped_column(String, nullable=False)
39
45
  # the channels we are processing
40
- channels = Column(ARRAY(String), nullable=False)
46
+ channels: Mapped[list[str]] = mapped_column(ARRAY(String), nullable=False)
41
47
  # last heartbeat of this worker
42
- last_heartbeat = Column(
48
+ last_heartbeat: Mapped[datetime.datetime] = mapped_column(
43
49
  DateTime(timezone=True),
44
50
  nullable=False,
45
51
  server_default=func.now(),
46
52
  index=True,
47
53
  )
48
54
  # created datetime of the worker
49
- created_at = Column(
55
+ created_at: Mapped[datetime.datetime] = mapped_column(
50
56
  DateTime(timezone=True), nullable=False, server_default=func.now()
51
57
  )
52
58
 
53
- tasks = relationship(
54
- "Task",
55
- back_populates="worker",
56
- cascade="all,delete",
57
- order_by="Task.created_at",
58
- )
59
59
 
60
+ class WorkerRefMixin:
61
+ @declared_attr
62
+ def tasks(cls) -> Mapped[list["Task"]]:
63
+ return relationship(
64
+ "Task",
65
+ back_populates="worker",
66
+ cascade="all,delete",
67
+ order_by="Task.created_at",
68
+ )
69
+
70
+
71
+ class Worker(WorkerModelMixin, WorkerRefMixin, Base):
60
72
  __tablename__ = "bq_workers"
61
73
 
62
74
  def __repr__(self) -> str:
@@ -7,9 +7,8 @@ import typing
7
7
  import venusian
8
8
  from sqlalchemy.orm import object_session
9
9
 
10
- from bq import models
11
-
12
- BQ_PROCESSOR_CATEGORY = "bq_processor"
10
+ from .. import constants
11
+ from .. import models
13
12
 
14
13
 
15
14
  @dataclasses.dataclass(frozen=True)
@@ -51,9 +50,10 @@ def process_task(task: models.Task, processor: Processor):
51
50
  if "db" in func_signature.parameters:
52
51
  base_kwargs["db"] = db
53
52
  with db.begin_nested() as savepoint:
53
+ if "savepoint" in func_signature.parameters:
54
+ base_kwargs["savepoint"] = savepoint
54
55
  try:
55
56
  result = processor.func(**base_kwargs, **task.kwargs)
56
- savepoint.commit()
57
57
  except Exception as exc:
58
58
  logger.error("Unhandled exception for task %s", task.id, exc_info=True)
59
59
  if processor.auto_rollback_on_exc:
@@ -100,7 +100,7 @@ class Registry:
100
100
 
101
101
 
102
102
  def processor(
103
- channel: str,
103
+ channel: str = constants.DEFAULT_CHANNEL,
104
104
  auto_complete: bool = True,
105
105
  auto_rollback_on_exc: bool = True,
106
106
  task_cls: typing.Type = models.Task,
@@ -121,7 +121,7 @@ def processor(
121
121
  raise ValueError("Name is not the same")
122
122
  scanner.registry.add(processor)
123
123
 
124
- venusian.attach(helper_obj, callback, category=BQ_PROCESSOR_CATEGORY)
124
+ venusian.attach(helper_obj, callback, category=constants.BQ_PROCESSOR_CATEGORY)
125
125
  return helper_obj
126
126
 
127
127
  return decorator
@@ -132,5 +132,5 @@ def collect(packages: list[typing.Any], registry: Registry | None = None) -> Reg
132
132
  registry = Registry()
133
133
  scanner = venusian.Scanner(registry=registry)
134
134
  for package in packages:
135
- scanner.scan(package, categories=(BQ_PROCESSOR_CATEGORY,))
135
+ scanner.scan(package, categories=(constants.BQ_PROCESSOR_CATEGORY,))
136
136
  return registry
@@ -17,28 +17,29 @@ class Notification:
17
17
 
18
18
 
19
19
  class DispatchService:
20
- def __init__(self, session: Session):
20
+ def __init__(self, session: Session, task_model: typing.Type = models.Task):
21
21
  self.session = session
22
+ self.task_model: typing.Type[models.Task] = task_model
22
23
 
23
24
  def make_task_query(self, channels: typing.Sequence[str], limit: int = 1) -> Query:
24
25
  return (
25
- self.session.query(models.Task.id)
26
- .filter(models.Task.channel.in_(channels))
27
- .filter(models.Task.state == models.TaskState.PENDING)
28
- .order_by(models.Task.created_at)
26
+ self.session.query(self.task_model.id)
27
+ .filter(self.task_model.channel.in_(channels))
28
+ .filter(self.task_model.state == models.TaskState.PENDING)
29
+ .order_by(self.task_model.created_at)
29
30
  .limit(limit)
30
31
  .with_for_update(skip_locked=True)
31
32
  )
32
33
 
33
- def make_update_query(self, task_query: typing.Any, worker_id: uuid.UUID):
34
+ def make_update_query(self, task_query: typing.Any, worker_id: typing.Any):
34
35
  return (
35
- models.Task.__table__.update()
36
- .where(models.Task.id.in_(task_query))
36
+ self.task_model.__table__.update()
37
+ .where(self.task_model.id.in_(task_query))
37
38
  .values(
38
39
  state=models.TaskState.PROCESSING,
39
40
  worker_id=worker_id,
40
41
  )
41
- .returning(models.Task.id)
42
+ .returning(self.task_model.id)
42
43
  )
43
44
 
44
45
  def dispatch(
@@ -52,9 +53,11 @@ class DispatchService:
52
53
  self.make_update_query(task_subquery, worker_id=worker_id)
53
54
  )
54
55
  ]
55
- # TODO: ideally returning with (models.Task) should return the whole model, but SQLAlchemy is returning
56
+ # TODO: ideally returning with (self.task_model) should return the whole model, but SQLAlchemy is returning
56
57
  # it columns in rows. We can save a round trip if we can find out how to solve this
57
- return self.session.query(models.Task).filter(models.Task.id.in_(task_ids))
58
+ return self.session.query(self.task_model).filter(
59
+ self.task_model.id.in_(task_ids)
60
+ )
58
61
 
59
62
  def listen(self, channels: typing.Sequence[str]):
60
63
  conn = self.session.connection()
@@ -9,8 +9,21 @@ from .. import models
9
9
 
10
10
 
11
11
  class WorkerService:
12
- def __init__(self, session: Session):
12
+ def __init__(
13
+ self,
14
+ session: Session,
15
+ task_model: typing.Type = models.Task,
16
+ worker_model: typing.Type = models.Worker,
17
+ ):
13
18
  self.session = session
19
+ self.task_model: typing.Type[models.Task] = task_model
20
+ self.worker_model: typing.Type[models.Worker] = worker_model
21
+
22
+ def get_worker(self, id: typing.Any) -> typing.Any:
23
+ return self.session.get(self.worker_model, id)
24
+
25
+ def make_worker(self, name: str, channels: tuple[str, ...]):
26
+ return self.worker_model(name=name, channels=channels)
14
27
 
15
28
  def update_heartbeat(self, worker: models.Worker):
16
29
  worker.last_heartbeat = func.now()
@@ -18,24 +31,24 @@ class WorkerService:
18
31
 
19
32
  def make_dead_worker_query(self, timeout: int, limit: int = 5) -> Query:
20
33
  return (
21
- self.session.query(models.Worker.id)
34
+ self.session.query(self.worker_model.id)
22
35
  .filter(
23
- models.Worker.last_heartbeat
36
+ self.worker_model.last_heartbeat
24
37
  < (func.now() - datetime.timedelta(seconds=timeout))
25
38
  )
26
- .filter(models.Worker.state == models.WorkerState.RUNNING)
39
+ .filter(self.worker_model.state == models.WorkerState.RUNNING)
27
40
  .limit(limit)
28
41
  .with_for_update(skip_locked=True)
29
42
  )
30
43
 
31
44
  def make_update_dead_worker_query(self, worker_query: typing.Any):
32
45
  return (
33
- models.Worker.__table__.update()
34
- .where(models.Worker.id.in_(worker_query))
46
+ self.worker_model.__table__.update()
47
+ .where(self.worker_model.id.in_(worker_query))
35
48
  .values(
36
49
  state=models.WorkerState.NO_HEARTBEAT,
37
50
  )
38
- .returning(models.Worker.id)
51
+ .returning(self.worker_model.id)
39
52
  )
40
53
 
41
54
  def fetch_dead_workers(self, timeout: int, limit: int = 5) -> Query:
@@ -49,17 +62,18 @@ class WorkerService:
49
62
  ]
50
63
  # TODO: ideally returning with (models.Task) should return the whole model, but SQLAlchemy is returning
51
64
  # it columns in rows. We can save a round trip if we can find out how to solve this
52
- return self.session.query(models.Worker).filter(
53
- models.Worker.id.in_(worker_ids)
65
+ return self.session.query(self.worker_model).filter(
66
+ self.worker_model.id.in_(worker_ids)
54
67
  )
55
68
 
56
69
  def make_update_tasks_query(self, worker_query: typing.Any):
57
70
  return (
58
- models.Task.__table__.update()
59
- .where(models.Task.worker_id.in_(worker_query))
60
- .where(models.Task.state == models.TaskState.PROCESSING)
71
+ self.task_model.__table__.update()
72
+ .where(self.task_model.worker_id.in_(worker_query))
73
+ .where(self.task_model.state == models.TaskState.PROCESSING)
61
74
  .values(
62
75
  state=models.TaskState.PENDING,
76
+ worker_id=None,
63
77
  )
64
78
  )
65
79
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "beanqueue"
3
- version = "0.1.2"
3
+ version = "0.1.3"
4
4
  description = "BeanQueue or BQ for short, PostgreSQL SKIP LOCK based worker queue library"
5
5
  authors = ["Fang-Pen Lin <fangpen@launchplatform.com>"]
6
6
  license = "MIT"
@@ -1,4 +0,0 @@
1
- from .task import Task
2
- from .task import TaskState
3
- from .worker import Worker
4
- from .worker import WorkerState
File without changes
File without changes
@@ -13,13 +13,13 @@ from ..db.base import Base
13
13
  @click.command()
14
14
  @inject
15
15
  def main(engine: Engine = Provide[Container.db_engine]):
16
- logging.basicConfig(level=logging.INFO)
17
16
  logger = logging.getLogger(__name__)
18
17
  Base.metadata.create_all(bind=engine)
19
18
  logger.info("Done, tables created")
20
19
 
21
20
 
22
21
  if __name__ == "__main__":
22
+ logging.basicConfig(level=logging.INFO)
23
23
  container = Container()
24
24
  container.wire(modules=[__name__])
25
25
  main()
@@ -25,7 +25,6 @@ def main(
25
25
  kwargs: str | None,
26
26
  db: Session = Provide[Container.session],
27
27
  ):
28
- logging.basicConfig(level=logging.INFO)
29
28
  logger = logging.getLogger(__name__)
30
29
 
31
30
  logger.info(
@@ -43,6 +42,7 @@ def main(
43
42
 
44
43
 
45
44
  if __name__ == "__main__":
45
+ logging.basicConfig(level=logging.INFO)
46
46
  container = Container()
47
47
  container.wire(modules=[__name__])
48
48
  main()
File without changes
File without changes