diracx-db 0.0.1a21__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of diracx-db might be problematic. Click here for more details.

@@ -1,32 +1,73 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any
3
+ import logging
4
+ from contextlib import asynccontextmanager
5
+ from functools import partial
6
+ from typing import Any, AsyncGenerator
4
7
 
5
- import sqlalchemy
8
+ from sqlalchemy import (
9
+ BigInteger,
10
+ Column,
11
+ Executable,
12
+ MetaData,
13
+ Table,
14
+ and_,
15
+ delete,
16
+ exists,
17
+ insert,
18
+ literal,
19
+ or_,
20
+ select,
21
+ update,
22
+ )
23
+ from sqlalchemy.exc import IntegrityError, NoResultFound
6
24
 
25
+ from diracx.core.exceptions import (
26
+ SandboxAlreadyAssignedError,
27
+ SandboxAlreadyInsertedError,
28
+ SandboxNotFoundError,
29
+ )
7
30
  from diracx.core.models import SandboxInfo, SandboxType, UserInfo
8
- from diracx.db.sql.utils import BaseSQLDB, utcnow
31
+ from diracx.db.sql.utils.base import BaseSQLDB
32
+ from diracx.db.sql.utils.functions import days_since, utcnow
9
33
 
10
34
  from .schema import Base as SandboxMetadataDBBase
11
- from .schema import sb_EntityMapping, sb_Owners, sb_SandBoxes
35
+ from .schema import SandBoxes, SBEntityMapping, SBOwners
36
+
37
+ logger = logging.getLogger(__name__)
12
38
 
13
39
 
14
40
  class SandboxMetadataDB(BaseSQLDB):
15
41
  metadata = SandboxMetadataDBBase.metadata
16
42
 
17
- async def upsert_owner(self, user: UserInfo) -> int:
43
+ # Temporary table to store the sandboxes to delete, see `select_and_delete_expired`
44
+ _temp_table = Table(
45
+ "sb_to_delete",
46
+ MetaData(),
47
+ Column("SBId", BigInteger, primary_key=True),
48
+ prefixes=["TEMPORARY"],
49
+ )
50
+
51
+ async def get_owner_id(self, user: UserInfo) -> int | None:
18
52
  """Get the id of the owner from the database."""
19
- # TODO: Follow https://github.com/DIRACGrid/diracx/issues/49
20
- stmt = sqlalchemy.select(sb_Owners.OwnerID).where(
21
- sb_Owners.Owner == user.preferred_username,
22
- sb_Owners.OwnerGroup == user.dirac_group,
23
- sb_Owners.VO == user.vo,
53
+ stmt = select(SBOwners.OwnerID).where(
54
+ SBOwners.Owner == user.preferred_username,
55
+ SBOwners.OwnerGroup == user.dirac_group,
56
+ SBOwners.VO == user.vo,
24
57
  )
25
- result = await self.conn.execute(stmt)
26
- if owner_id := result.scalar_one_or_none():
27
- return owner_id
58
+ return (await self.conn.execute(stmt)).scalar_one_or_none()
28
59
 
29
- stmt = sqlalchemy.insert(sb_Owners).values(
60
+ async def get_sandbox_owner_id(self, pfn: str, se_name: str) -> int | None:
61
+ """Get the id of the owner of a sandbox."""
62
+ stmt = select(SBOwners.OwnerID).where(
63
+ SBOwners.OwnerID == SandBoxes.OwnerId,
64
+ SandBoxes.SEName == se_name,
65
+ SandBoxes.SEPFN == pfn,
66
+ )
67
+ return (await self.conn.execute(stmt)).scalar_one_or_none()
68
+
69
+ async def insert_owner(self, user: UserInfo) -> int:
70
+ stmt = insert(SBOwners).values(
30
71
  Owner=user.preferred_username,
31
72
  OwnerGroup=user.dirac_group,
32
73
  VO=user.vo,
@@ -48,12 +89,10 @@ class SandboxMetadataDB(BaseSQLDB):
48
89
  return "/" + "/".join(parts)
49
90
 
50
91
  async def insert_sandbox(
51
- self, se_name: str, user: UserInfo, pfn: str, size: int
92
+ self, owner_id: int, se_name: str, pfn: str, size: int
52
93
  ) -> None:
53
94
  """Add a new sandbox in SandboxMetadataDB."""
54
- # TODO: Follow https://github.com/DIRACGrid/diracx/issues/49
55
- owner_id = await self.upsert_owner(user)
56
- stmt = sqlalchemy.insert(sb_SandBoxes).values(
95
+ stmt = insert(SandBoxes).values(
57
96
  OwnerId=owner_id,
58
97
  SEName=se_name,
59
98
  SEPFN=pfn,
@@ -62,28 +101,36 @@ class SandboxMetadataDB(BaseSQLDB):
62
101
  LastAccessTime=utcnow(),
63
102
  )
64
103
  try:
65
- result = await self.conn.execute(stmt)
66
- except sqlalchemy.exc.IntegrityError:
67
- await self.update_sandbox_last_access_time(se_name, pfn)
68
- else:
69
- assert result.rowcount == 1
104
+ await self.conn.execute(stmt)
105
+ except IntegrityError as e:
106
+ raise SandboxAlreadyInsertedError(pfn, se_name) from e
70
107
 
71
108
  async def update_sandbox_last_access_time(self, se_name: str, pfn: str) -> None:
72
109
  stmt = (
73
- sqlalchemy.update(sb_SandBoxes)
74
- .where(sb_SandBoxes.SEName == se_name, sb_SandBoxes.SEPFN == pfn)
110
+ update(SandBoxes)
111
+ .where(SandBoxes.SEName == se_name, SandBoxes.SEPFN == pfn)
75
112
  .values(LastAccessTime=utcnow())
76
113
  )
77
114
  result = await self.conn.execute(stmt)
78
- assert result.rowcount == 1
115
+ if result.rowcount == 0:
116
+ # If the update didn't affect any row, the sandbox doesn't exist
117
+ raise SandboxNotFoundError(pfn, se_name)
118
+ elif result.rowcount != 1:
119
+ raise NotImplementedError(
120
+ "More than one sandbox was updated. This should not happen."
121
+ )
79
122
 
80
- async def sandbox_is_assigned(self, pfn: str, se_name: str) -> bool:
123
+ async def sandbox_is_assigned(self, pfn: str, se_name: str) -> bool | None:
81
124
  """Checks if a sandbox exists and has been assigned."""
82
- stmt: sqlalchemy.Executable = sqlalchemy.select(sb_SandBoxes.Assigned).where(
83
- sb_SandBoxes.SEName == se_name, sb_SandBoxes.SEPFN == pfn
125
+ stmt: Executable = select(SandBoxes.Assigned).where(
126
+ SandBoxes.SEName == se_name, SandBoxes.SEPFN == pfn
84
127
  )
85
128
  result = await self.conn.execute(stmt)
86
- is_assigned = result.scalar_one()
129
+ try:
130
+ is_assigned = result.scalar_one()
131
+ except NoResultFound as e:
132
+ raise SandboxNotFoundError(pfn, se_name) from e
133
+
87
134
  return is_assigned
88
135
 
89
136
  @staticmethod
@@ -97,11 +144,11 @@ class SandboxMetadataDB(BaseSQLDB):
97
144
  """Get the sandbox assign to job."""
98
145
  entity_id = self.jobid_to_entity_id(job_id)
99
146
  stmt = (
100
- sqlalchemy.select(sb_SandBoxes.SEPFN)
101
- .where(sb_SandBoxes.SBId == sb_EntityMapping.SBId)
147
+ select(SandBoxes.SEPFN)
148
+ .where(SandBoxes.SBId == SBEntityMapping.SBId)
102
149
  .where(
103
- sb_EntityMapping.EntityId == entity_id,
104
- sb_EntityMapping.Type == sb_type,
150
+ SBEntityMapping.EntityId == entity_id,
151
+ SBEntityMapping.Type == sb_type,
105
152
  )
106
153
  )
107
154
  result = await self.conn.execute(stmt)
@@ -114,58 +161,118 @@ class SandboxMetadataDB(BaseSQLDB):
114
161
  sb_type: SandboxType,
115
162
  se_name: str,
116
163
  ) -> None:
117
- """Mapp sandbox and jobs."""
164
+ """Map sandbox and jobs."""
118
165
  for job_id in jobs_ids:
119
166
  # Define the entity id as 'Entity:entity_id' due to the DB definition:
120
167
  entity_id = self.jobid_to_entity_id(job_id)
121
- select_sb_id = sqlalchemy.select(
122
- sb_SandBoxes.SBId,
123
- sqlalchemy.literal(entity_id).label("EntityId"),
124
- sqlalchemy.literal(sb_type).label("Type"),
168
+ select_sb_id = select(
169
+ SandBoxes.SBId,
170
+ literal(entity_id).label("EntityId"),
171
+ literal(sb_type).label("Type"),
125
172
  ).where(
126
- sb_SandBoxes.SEName == se_name,
127
- sb_SandBoxes.SEPFN == pfn,
173
+ SandBoxes.SEName == se_name,
174
+ SandBoxes.SEPFN == pfn,
128
175
  )
129
- stmt = sqlalchemy.insert(sb_EntityMapping).from_select(
176
+ stmt = insert(SBEntityMapping).from_select(
130
177
  ["SBId", "EntityId", "Type"], select_sb_id
131
178
  )
132
- await self.conn.execute(stmt)
179
+ try:
180
+ await self.conn.execute(stmt)
181
+ except IntegrityError as e:
182
+ raise SandboxAlreadyAssignedError(pfn, se_name) from e
133
183
 
134
- stmt = (
135
- sqlalchemy.update(sb_SandBoxes)
136
- .where(sb_SandBoxes.SEPFN == pfn)
137
- .values(Assigned=True)
138
- )
184
+ stmt = update(SandBoxes).where(SandBoxes.SEPFN == pfn).values(Assigned=True)
139
185
  result = await self.conn.execute(stmt)
186
+ if result.rowcount == 0:
187
+ # If the update didn't affect any row, the sandbox doesn't exist
188
+ # It means the previous insert didn't have any effect
189
+ raise SandboxNotFoundError(pfn, se_name)
190
+
140
191
  assert result.rowcount == 1
141
192
 
142
193
  async def unassign_sandboxes_to_jobs(self, jobs_ids: list[int]) -> None:
143
194
  """Delete mapping between jobs and sandboxes."""
144
195
  for job_id in jobs_ids:
145
196
  entity_id = self.jobid_to_entity_id(job_id)
146
- sb_sel_stmt = sqlalchemy.select(sb_SandBoxes.SBId)
197
+ sb_sel_stmt = select(SandBoxes.SBId)
147
198
  sb_sel_stmt = sb_sel_stmt.join(
148
- sb_EntityMapping, sb_EntityMapping.SBId == sb_SandBoxes.SBId
199
+ SBEntityMapping, SBEntityMapping.SBId == SandBoxes.SBId
149
200
  )
150
- sb_sel_stmt = sb_sel_stmt.where(sb_EntityMapping.EntityId == entity_id)
201
+ sb_sel_stmt = sb_sel_stmt.where(SBEntityMapping.EntityId == entity_id)
151
202
 
152
203
  result = await self.conn.execute(sb_sel_stmt)
153
204
  sb_ids = [row.SBId for row in result]
154
205
 
155
- del_stmt = sqlalchemy.delete(sb_EntityMapping).where(
156
- sb_EntityMapping.EntityId == entity_id
206
+ del_stmt = delete(SBEntityMapping).where(
207
+ SBEntityMapping.EntityId == entity_id
157
208
  )
158
209
  await self.conn.execute(del_stmt)
159
210
 
160
- sb_entity_sel_stmt = sqlalchemy.select(sb_EntityMapping.SBId).where(
161
- sb_EntityMapping.SBId.in_(sb_ids)
211
+ sb_entity_sel_stmt = select(SBEntityMapping.SBId).where(
212
+ SBEntityMapping.SBId.in_(sb_ids)
162
213
  )
163
214
  result = await self.conn.execute(sb_entity_sel_stmt)
164
215
  remaining_sb_ids = [row.SBId for row in result]
165
216
  if not remaining_sb_ids:
166
217
  unassign_stmt = (
167
- sqlalchemy.update(sb_SandBoxes)
168
- .where(sb_SandBoxes.SBId.in_(sb_ids))
218
+ update(SandBoxes)
219
+ .where(SandBoxes.SBId.in_(sb_ids))
169
220
  .values(Assigned=False)
170
221
  )
171
222
  await self.conn.execute(unassign_stmt)
223
+
224
+ @asynccontextmanager
225
+ async def delete_unused_sandboxes(
226
+ self, *, limit: int | None = None
227
+ ) -> AsyncGenerator[AsyncGenerator[str, None], None]:
228
+ """Get the sandbox PFNs to delete.
229
+
230
+ The result of this function can be used as an async context manager
231
+ to yield the PFNs of the sandboxes to delete. The context manager
232
+ will automatically remove the sandboxes from the database upon exit.
233
+
234
+ Args:
235
+ limit: If not None, the maximum number of sandboxes to delete.
236
+
237
+ """
238
+ conditions = [
239
+ # If it has assigned to a job but is no longer mapped it can be removed
240
+ and_(
241
+ SandBoxes.Assigned,
242
+ ~exists().where(SBEntityMapping.SBId == SandBoxes.SBId),
243
+ ),
244
+ # If the sandbox is still unassigned after 15 days, remove it
245
+ and_(~SandBoxes.Assigned, days_since(SandBoxes.LastAccessTime) >= 15),
246
+ ]
247
+ # Sandboxes which are not on S3 will be handled by legacy DIRAC
248
+ condition = and_(SandBoxes.SEPFN.like("/S3/%"), or_(*conditions))
249
+
250
+ # Copy the in-flight rows to a temporary table
251
+ await self.conn.run_sync(partial(self._temp_table.create, checkfirst=True))
252
+ select_stmt = select(SandBoxes.SBId).where(condition)
253
+ if limit:
254
+ select_stmt = select_stmt.limit(limit)
255
+ insert_stmt = insert(self._temp_table).from_select(["SBId"], select_stmt)
256
+ await self.conn.execute(insert_stmt)
257
+
258
+ try:
259
+ # Select the sandbox PFNs from the temporary table and yield them
260
+ select_stmt = select(SandBoxes.SEPFN).join(
261
+ self._temp_table, self._temp_table.c.SBId == SandBoxes.SBId
262
+ )
263
+
264
+ async def yield_pfns() -> AsyncGenerator[str, None]:
265
+ async for row in await self.conn.stream(select_stmt):
266
+ yield row.SEPFN
267
+
268
+ yield yield_pfns()
269
+
270
+ # Delete the sandboxes from the main table
271
+ delete_stmt = delete(SandBoxes).where(
272
+ SandBoxes.SBId.in_(select(self._temp_table.c.SBId))
273
+ )
274
+ result = await self.conn.execute(delete_stmt)
275
+ logger.info("Deleted %d expired/unassigned sandboxes", result.rowcount)
276
+
277
+ finally:
278
+ await self.conn.run_sync(partial(self._temp_table.drop, checkfirst=True))
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from sqlalchemy import (
2
4
  BigInteger,
3
5
  Boolean,
@@ -14,16 +16,19 @@ from diracx.db.sql.utils import Column, DateNowColumn
14
16
  Base = declarative_base()
15
17
 
16
18
 
17
- class sb_Owners(Base):
19
+ class SBOwners(Base):
18
20
  __tablename__ = "sb_Owners"
19
21
  OwnerID = Column(Integer, autoincrement=True)
20
22
  Owner = Column(String(32))
21
23
  OwnerGroup = Column(String(32))
22
24
  VO = Column(String(64))
23
- __table_args__ = (PrimaryKeyConstraint("OwnerID"),)
25
+ __table_args__ = (
26
+ PrimaryKeyConstraint("OwnerID"),
27
+ UniqueConstraint("Owner", "OwnerGroup", "VO", name="unique_owner_group_vo"),
28
+ )
24
29
 
25
30
 
26
- class sb_SandBoxes(Base):
31
+ class SandBoxes(Base):
27
32
  __tablename__ = "sb_SandBoxes"
28
33
  SBId = Column(Integer, autoincrement=True)
29
34
  OwnerId = Column(Integer)
@@ -40,7 +45,7 @@ class sb_SandBoxes(Base):
40
45
  )
41
46
 
42
47
 
43
- class sb_EntityMapping(Base):
48
+ class SBEntityMapping(Base):
44
49
  __tablename__ = "sb_EntityMapping"
45
50
  SBId = Column(Integer)
46
51
  EntityId = Column(String(128))
@@ -7,8 +7,6 @@ from sqlalchemy import delete, func, select, update
7
7
  if TYPE_CHECKING:
8
8
  pass
9
9
 
10
- from diracx.core.properties import JOB_SHARING, SecurityProperty
11
-
12
10
  from ..utils import BaseSQLDB
13
11
  from .schema import (
14
12
  BannedSitesQueue,
@@ -49,126 +47,23 @@ class TaskQueueDB(BaseSQLDB):
49
47
  )
50
48
  return dict((await self.conn.execute(stmt)).one()._mapping)
51
49
 
52
- async def remove_job(self, job_id: int):
53
- """Remove a job from the task queues."""
54
- stmt = delete(JobsQueue).where(JobsQueue.JobId == job_id)
55
- await self.conn.execute(stmt)
56
-
57
- async def remove_jobs(self, job_ids: list[int]):
58
- """Remove jobs from the task queues."""
59
- stmt = delete(JobsQueue).where(JobsQueue.JobId.in_(job_ids))
60
- await self.conn.execute(stmt)
61
-
62
- async def delete_task_queue_if_empty(
63
- self,
64
- tq_id: int,
65
- tq_owner: str,
66
- tq_group: str,
67
- job_share: int,
68
- group_properties: set[SecurityProperty],
69
- enable_shares_correction: bool,
70
- allow_background_tqs: bool,
71
- ):
72
- """Try to delete a task queue if it's empty."""
73
- # Check if the task queue is empty
74
- stmt = (
75
- select(TaskQueues.TQId)
76
- .where(TaskQueues.Enabled >= 1)
77
- .where(TaskQueues.TQId == tq_id)
78
- .where(~TaskQueues.TQId.in_(select(JobsQueue.TQId)))
79
- )
80
- rows = await self.conn.execute(stmt)
81
- if not rows.rowcount:
82
- return
83
-
84
- # Deleting the task queue (the other tables will be deleted in cascade)
85
- stmt = delete(TaskQueues).where(TaskQueues.TQId == tq_id)
86
- await self.conn.execute(stmt)
87
-
88
- await self.recalculate_tq_shares_for_entity(
89
- tq_owner,
90
- tq_group,
91
- job_share,
92
- group_properties,
93
- enable_shares_correction,
94
- allow_background_tqs,
95
- )
96
-
97
- async def recalculate_tq_shares_for_entity(
98
- self,
99
- owner: str,
100
- group: str,
101
- job_share: int,
102
- group_properties: set[SecurityProperty],
103
- enable_shares_correction: bool,
104
- allow_background_tqs: bool,
105
- ):
106
- """Recalculate the shares for a user/userGroup combo."""
107
- if JOB_SHARING in group_properties:
108
- # If group has JobSharing just set prio for that entry, user is irrelevant
109
- return await self.__set_priorities_for_entity(
110
- owner, group, job_share, group_properties, allow_background_tqs
111
- )
112
-
50
+ async def get_task_queue_owners_by_group(self, group: str) -> dict[str, int]:
51
+ """Get the owners for a task queue and group."""
113
52
  stmt = (
114
53
  select(TaskQueues.Owner, func.count(TaskQueues.Owner))
115
54
  .where(TaskQueues.OwnerGroup == group)
116
55
  .group_by(TaskQueues.Owner)
117
56
  )
118
57
  rows = await self.conn.execute(stmt)
119
- # make the rows a list of tuples
120
58
  # Get owners in this group and the amount of times they appear
121
- # TODO: I guess the rows are already a list of tupes
59
+ # TODO: I guess the rows are already a list of tuples
122
60
  # maybe refactor
123
- data = [(r[0], r[1]) for r in rows if r]
124
- numOwners = len(data)
125
- # If there are no owners do now
126
- if numOwners == 0:
127
- return
128
- # Split the share amongst the number of owners
129
- entities_shares = {row[0]: job_share / numOwners for row in data}
130
-
131
- # TODO: implement the following
132
- # If corrector is enabled let it work it's magic
133
- # if enable_shares_correction:
134
- # entities_shares = await self.__shares_corrector.correct_shares(
135
- # entitiesShares, group=group
136
- # )
137
-
138
- # Keep updating
139
- owners = dict(data)
140
- # IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified
141
- # (The number of owners didn't change)
142
- if owner in owners and owners[owner] > 1:
143
- await self.__set_priorities_for_entity(
144
- owner,
145
- group,
146
- entities_shares[owner],
147
- group_properties,
148
- allow_background_tqs,
149
- )
150
- return
151
- # Oops the number of owners may have changed so we recalculate the prio for all owners in the group
152
- for owner in owners:
153
- await self.__set_priorities_for_entity(
154
- owner,
155
- group,
156
- entities_shares[owner],
157
- group_properties,
158
- allow_background_tqs,
159
- )
160
-
161
- async def __set_priorities_for_entity(
162
- self,
163
- owner: str,
164
- group: str,
165
- share,
166
- properties: set[SecurityProperty],
167
- allow_background_tqs: bool,
168
- ):
169
- """Set the priority for a user/userGroup combo given a splitted share."""
170
- from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import calculate_priority
61
+ return {r[0]: r[1] for r in rows if r}
171
62
 
63
+ async def get_task_queue_priorities(
64
+ self, group: str, owner: str | None = None
65
+ ) -> dict[int, float]:
66
+ """Get the priorities for a list of task queues."""
172
67
  stmt = (
173
68
  select(
174
69
  TaskQueues.TQId,
@@ -178,24 +73,48 @@ class TaskQueueDB(BaseSQLDB):
178
73
  .where(TaskQueues.OwnerGroup == group)
179
74
  .group_by(TaskQueues.TQId)
180
75
  )
181
- if JOB_SHARING not in properties:
76
+ if owner:
182
77
  stmt = stmt.where(TaskQueues.Owner == owner)
183
78
  rows = await self.conn.execute(stmt)
184
- tq_dict: dict[int, float] = {tq_id: priority for tq_id, priority in rows}
79
+ return {tq_id: priority for tq_id, priority in rows}
185
80
 
186
- if not tq_dict:
187
- return
81
+ async def remove_jobs(self, job_ids: list[int]):
82
+ """Remove jobs from the task queues."""
83
+ stmt = delete(JobsQueue).where(JobsQueue.JobId.in_(job_ids))
84
+ await self.conn.execute(stmt)
188
85
 
189
- rows = await self.retrieve_task_queues(list(tq_dict))
86
+ async def is_task_queue_empty(self, tq_id: int) -> bool:
87
+ """Check if a task queue is empty."""
88
+ stmt = (
89
+ select(TaskQueues.TQId)
90
+ .where(TaskQueues.Enabled >= 1)
91
+ .where(TaskQueues.TQId == tq_id)
92
+ .where(~TaskQueues.TQId.in_(select(JobsQueue.TQId)))
93
+ )
94
+ rows = await self.conn.execute(stmt)
95
+ return not rows.rowcount
190
96
 
191
- prio_dict = calculate_priority(tq_dict, rows, share, allow_background_tqs)
97
+ async def delete_task_queue(
98
+ self,
99
+ tq_id: int,
100
+ ):
101
+ """Delete a task queue."""
102
+ # Deleting the task queue (the other tables will be deleted in cascade)
103
+ stmt = delete(TaskQueues).where(TaskQueues.TQId == tq_id)
104
+ await self.conn.execute(stmt)
192
105
 
193
- # Execute updates
194
- for prio, tqs in prio_dict.items():
195
- update_stmt = (
196
- update(TaskQueues).where(TaskQueues.TQId.in_(tqs)).values(Priority=prio)
197
- )
198
- await self.conn.execute(update_stmt)
106
+ async def set_priorities_for_entity(
107
+ self,
108
+ tq_ids: list[int],
109
+ priority: float,
110
+ ):
111
+ """Set the priority for a user/userGroup combo given a split share."""
112
+ update_stmt = (
113
+ update(TaskQueues)
114
+ .where(TaskQueues.TQId.in_(tq_ids))
115
+ .values(Priority=priority)
116
+ )
117
+ await self.conn.execute(update_stmt)
199
118
 
200
119
  async def retrieve_task_queues(self, tq_id_list=None):
201
120
  """Get all the task queues."""
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from sqlalchemy import (
2
4
  BigInteger,
3
5
  Boolean,