diracx-db 0.0.1a16__py3-none-any.whl → 0.0.1a18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diracx/db/os/utils.py +60 -11
- diracx/db/sql/__init__.py +3 -1
- diracx/db/sql/auth/db.py +10 -19
- diracx/db/sql/auth/schema.py +5 -7
- diracx/db/sql/dummy/db.py +2 -3
- diracx/db/sql/{jobs → job}/db.py +12 -452
- diracx/db/sql/{jobs → job}/schema.py +2 -118
- diracx/db/sql/job_logging/__init__.py +0 -0
- diracx/db/sql/job_logging/db.py +161 -0
- diracx/db/sql/job_logging/schema.py +25 -0
- diracx/db/sql/sandbox_metadata/db.py +12 -10
- diracx/db/sql/task_queue/__init__.py +0 -0
- diracx/db/sql/task_queue/db.py +261 -0
- diracx/db/sql/task_queue/schema.py +109 -0
- diracx/db/sql/utils/__init__.py +418 -0
- diracx/db/sql/{jobs/status_utility.py → utils/job_status.py} +12 -19
- {diracx_db-0.0.1a16.dist-info → diracx_db-0.0.1a18.dist-info}/METADATA +5 -5
- diracx_db-0.0.1a18.dist-info/RECORD +33 -0
- {diracx_db-0.0.1a16.dist-info → diracx_db-0.0.1a18.dist-info}/WHEEL +1 -1
- diracx/db/sql/utils.py +0 -234
- diracx_db-0.0.1a16.dist-info/RECORD +0 -27
- /diracx/db/sql/{jobs → job}/__init__.py +0 -0
- {diracx_db-0.0.1a16.dist-info → diracx_db-0.0.1a18.dist-info}/entry_points.txt +0 -0
- {diracx_db-0.0.1a16.dist-info → diracx_db-0.0.1a18.dist-info}/top_level.txt +0 -0
diracx/db/sql/{jobs → job}/db.py
RENAMED
@@ -1,7 +1,6 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import logging
|
4
|
-
import time
|
5
4
|
from datetime import datetime, timezone
|
6
5
|
from typing import TYPE_CHECKING, Any
|
7
6
|
|
@@ -15,34 +14,20 @@ from diracx.core.exceptions import InvalidQueryError, JobNotFound
|
|
15
14
|
from diracx.core.models import (
|
16
15
|
JobMinorStatus,
|
17
16
|
JobStatus,
|
18
|
-
JobStatusReturn,
|
19
17
|
LimitedJobStatusReturn,
|
20
18
|
ScalarSearchOperator,
|
21
19
|
ScalarSearchSpec,
|
22
20
|
SearchSpec,
|
23
|
-
SortDirection,
|
24
21
|
SortSpec,
|
25
22
|
)
|
26
|
-
from diracx.core.properties import JOB_SHARING, SecurityProperty
|
27
23
|
|
28
|
-
from ..utils import BaseSQLDB, apply_search_filters
|
24
|
+
from ..utils import BaseSQLDB, apply_search_filters, apply_sort_constraints
|
29
25
|
from .schema import (
|
30
|
-
BannedSitesQueue,
|
31
|
-
GridCEsQueue,
|
32
26
|
InputData,
|
33
27
|
JobCommands,
|
34
28
|
JobDBBase,
|
35
29
|
JobJDLs,
|
36
|
-
JobLoggingDBBase,
|
37
30
|
Jobs,
|
38
|
-
JobsQueue,
|
39
|
-
JobTypesQueue,
|
40
|
-
LoggingInfo,
|
41
|
-
PlatformsQueue,
|
42
|
-
SitesQueue,
|
43
|
-
TagsQueue,
|
44
|
-
TaskQueueDBBase,
|
45
|
-
TaskQueues,
|
46
31
|
)
|
47
32
|
|
48
33
|
|
@@ -74,7 +59,7 @@ class JobDB(BaseSQLDB):
|
|
74
59
|
columns = _get_columns(Jobs.__table__, group_by)
|
75
60
|
|
76
61
|
stmt = select(*columns, func.count(Jobs.JobID).label("count"))
|
77
|
-
stmt = apply_search_filters(Jobs.__table__, stmt, search)
|
62
|
+
stmt = apply_search_filters(Jobs.__table__.columns.__getitem__, stmt, search)
|
78
63
|
stmt = stmt.group_by(*columns)
|
79
64
|
|
80
65
|
# Execute the query
|
@@ -98,27 +83,8 @@ class JobDB(BaseSQLDB):
|
|
98
83
|
columns = _get_columns(Jobs.__table__, parameters)
|
99
84
|
stmt = select(*columns)
|
100
85
|
|
101
|
-
stmt = apply_search_filters(Jobs.__table__, stmt, search)
|
102
|
-
|
103
|
-
# Apply any sort constraints
|
104
|
-
sort_columns = []
|
105
|
-
for sort in sorts:
|
106
|
-
if sort["parameter"] not in Jobs.__table__.columns:
|
107
|
-
raise InvalidQueryError(
|
108
|
-
f"Cannot sort by {sort['parameter']}: unknown column"
|
109
|
-
)
|
110
|
-
column = Jobs.__table__.columns[sort["parameter"]]
|
111
|
-
sorted_column = None
|
112
|
-
if sort["direction"] == SortDirection.ASC:
|
113
|
-
sorted_column = column.asc()
|
114
|
-
elif sort["direction"] == SortDirection.DESC:
|
115
|
-
sorted_column = column.desc()
|
116
|
-
else:
|
117
|
-
raise InvalidQueryError(f"Unknown sort {sort['direction']=}")
|
118
|
-
sort_columns.append(sorted_column)
|
119
|
-
|
120
|
-
if sort_columns:
|
121
|
-
stmt = stmt.order_by(*sort_columns)
|
86
|
+
stmt = apply_search_filters(Jobs.__table__.columns.__getitem__, stmt, search)
|
87
|
+
stmt = apply_sort_constraints(Jobs.__table__.columns.__getitem__, stmt, sorts)
|
122
88
|
|
123
89
|
if distinct:
|
124
90
|
stmt = stmt.distinct()
|
@@ -160,9 +126,7 @@ class JobDB(BaseSQLDB):
|
|
160
126
|
await self.conn.execute(stmt)
|
161
127
|
|
162
128
|
async def setJobAttributes(self, job_id, jobData):
|
163
|
-
"""
|
164
|
-
TODO: add myDate and force parameters
|
165
|
-
"""
|
129
|
+
"""TODO: add myDate and force parameters."""
|
166
130
|
if "Status" in jobData:
|
167
131
|
jobData = jobData | {"LastUpdateTime": datetime.now(tz=timezone.utc)}
|
168
132
|
stmt = update(Jobs).where(Jobs.JobID == job_id).values(jobData)
|
@@ -178,9 +142,8 @@ class JobDB(BaseSQLDB):
|
|
178
142
|
job_attrs,
|
179
143
|
vo,
|
180
144
|
):
|
181
|
-
"""
|
182
|
-
|
183
|
-
Prepare subJDL with Job Requirements
|
145
|
+
"""Check Consistency of Submitted JDL and set some defaults
|
146
|
+
Prepare subJDL with Job Requirements.
|
184
147
|
"""
|
185
148
|
from DIRAC.Core.Utilities.DErrno import EWMSSUBM, cmpError
|
186
149
|
from DIRAC.Core.Utilities.ReturnValues import returnValueOrRaise
|
@@ -330,7 +293,7 @@ class JobDB(BaseSQLDB):
|
|
330
293
|
}
|
331
294
|
|
332
295
|
async def rescheduleJob(self, job_id) -> dict[str, Any]:
|
333
|
-
"""Reschedule given job"""
|
296
|
+
"""Reschedule given job."""
|
334
297
|
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
|
335
298
|
from DIRAC.Core.Utilities.ReturnValues import SErrorException
|
336
299
|
|
@@ -476,7 +439,7 @@ class JobDB(BaseSQLDB):
|
|
476
439
|
raise JobNotFound(job_id) from e
|
477
440
|
|
478
441
|
async def set_job_command(self, job_id: int, command: str, arguments: str = ""):
|
479
|
-
"""Store a command to be passed to the job together with the next heart beat"""
|
442
|
+
"""Store a command to be passed to the job together with the next heart beat."""
|
480
443
|
try:
|
481
444
|
stmt = insert(JobCommands).values(
|
482
445
|
JobID=job_id,
|
@@ -489,9 +452,7 @@ class JobDB(BaseSQLDB):
|
|
489
452
|
raise JobNotFound(job_id) from e
|
490
453
|
|
491
454
|
async def delete_jobs(self, job_ids: list[int]):
|
492
|
-
"""
|
493
|
-
Delete jobs from the database
|
494
|
-
"""
|
455
|
+
"""Delete jobs from the database."""
|
495
456
|
stmt = delete(JobJDLs).where(JobJDLs.JobID.in_(job_ids))
|
496
457
|
await self.conn.execute(stmt)
|
497
458
|
|
@@ -499,7 +460,7 @@ class JobDB(BaseSQLDB):
|
|
499
460
|
self, properties: dict[int, dict[str, Any]], update_timestamp: bool = False
|
500
461
|
) -> int:
|
501
462
|
"""Update the job parameters
|
502
|
-
All the jobs must update the same properties
|
463
|
+
All the jobs must update the same properties.
|
503
464
|
|
504
465
|
:param properties: {job_id : {prop1: val1, prop2:val2}
|
505
466
|
:param update_timestamp: if True, update the LastUpdate to now
|
@@ -507,11 +468,8 @@ class JobDB(BaseSQLDB):
|
|
507
468
|
:return rowcount
|
508
469
|
|
509
470
|
"""
|
510
|
-
|
511
471
|
# Check that all we always update the same set of properties
|
512
|
-
required_parameters_set =
|
513
|
-
[tuple(sorted(k.keys())) for k in properties.values()]
|
514
|
-
)
|
472
|
+
required_parameters_set = {tuple(sorted(k.keys())) for k in properties.values()}
|
515
473
|
|
516
474
|
if len(required_parameters_set) != 1:
|
517
475
|
raise NotImplementedError(
|
@@ -532,401 +490,3 @@ class JobDB(BaseSQLDB):
|
|
532
490
|
rows = await self.conn.execute(stmt, update_parameters)
|
533
491
|
|
534
492
|
return rows.rowcount
|
535
|
-
|
536
|
-
|
537
|
-
MAGIC_EPOC_NUMBER = 1270000000
|
538
|
-
|
539
|
-
|
540
|
-
class JobLoggingDB(BaseSQLDB):
|
541
|
-
"""Frontend for the JobLoggingDB. Provides the ability to store changes with timestamps"""
|
542
|
-
|
543
|
-
metadata = JobLoggingDBBase.metadata
|
544
|
-
|
545
|
-
async def insert_record(
|
546
|
-
self,
|
547
|
-
job_id: int,
|
548
|
-
status: JobStatus,
|
549
|
-
minor_status: str,
|
550
|
-
application_status: str,
|
551
|
-
date: datetime,
|
552
|
-
source: str,
|
553
|
-
):
|
554
|
-
"""
|
555
|
-
Add a new entry to the JobLoggingDB table. One, two or all the three status
|
556
|
-
components (status, minorStatus, applicationStatus) can be specified.
|
557
|
-
Optionally the time stamp of the status can
|
558
|
-
be provided in a form of a string in a format '%Y-%m-%d %H:%M:%S' or
|
559
|
-
as datetime.datetime object. If the time stamp is not provided the current
|
560
|
-
UTC time is used.
|
561
|
-
"""
|
562
|
-
|
563
|
-
# First, fetch the maximum SeqNum for the given job_id
|
564
|
-
seqnum_stmt = select(func.coalesce(func.max(LoggingInfo.SeqNum) + 1, 1)).where(
|
565
|
-
LoggingInfo.JobID == job_id
|
566
|
-
)
|
567
|
-
seqnum = await self.conn.scalar(seqnum_stmt)
|
568
|
-
|
569
|
-
epoc = (
|
570
|
-
time.mktime(date.timetuple())
|
571
|
-
+ date.microsecond / 1000000.0
|
572
|
-
- MAGIC_EPOC_NUMBER
|
573
|
-
)
|
574
|
-
|
575
|
-
stmt = insert(LoggingInfo).values(
|
576
|
-
JobID=int(job_id),
|
577
|
-
SeqNum=seqnum,
|
578
|
-
Status=status,
|
579
|
-
MinorStatus=minor_status,
|
580
|
-
ApplicationStatus=application_status[:255],
|
581
|
-
StatusTime=date,
|
582
|
-
StatusTimeOrder=epoc,
|
583
|
-
Source=source[:32],
|
584
|
-
)
|
585
|
-
await self.conn.execute(stmt)
|
586
|
-
|
587
|
-
async def get_records(self, job_id: int) -> list[JobStatusReturn]:
|
588
|
-
"""Returns a Status,MinorStatus,ApplicationStatus,StatusTime,Source tuple
|
589
|
-
for each record found for job specified by its jobID in historical order
|
590
|
-
"""
|
591
|
-
|
592
|
-
stmt = (
|
593
|
-
select(
|
594
|
-
LoggingInfo.Status,
|
595
|
-
LoggingInfo.MinorStatus,
|
596
|
-
LoggingInfo.ApplicationStatus,
|
597
|
-
LoggingInfo.StatusTime,
|
598
|
-
LoggingInfo.Source,
|
599
|
-
)
|
600
|
-
.where(LoggingInfo.JobID == int(job_id))
|
601
|
-
.order_by(LoggingInfo.StatusTimeOrder, LoggingInfo.StatusTime)
|
602
|
-
)
|
603
|
-
rows = await self.conn.execute(stmt)
|
604
|
-
|
605
|
-
values = []
|
606
|
-
for (
|
607
|
-
status,
|
608
|
-
minor_status,
|
609
|
-
application_status,
|
610
|
-
status_time,
|
611
|
-
status_source,
|
612
|
-
) in rows:
|
613
|
-
values.append(
|
614
|
-
[
|
615
|
-
status,
|
616
|
-
minor_status,
|
617
|
-
application_status,
|
618
|
-
status_time.replace(tzinfo=timezone.utc),
|
619
|
-
status_source,
|
620
|
-
]
|
621
|
-
)
|
622
|
-
|
623
|
-
# If no value has been set for the application status in the first place,
|
624
|
-
# We put this status to unknown
|
625
|
-
res = []
|
626
|
-
if values:
|
627
|
-
if values[0][2] == "idem":
|
628
|
-
values[0][2] = "Unknown"
|
629
|
-
|
630
|
-
# We replace "idem" values by the value previously stated
|
631
|
-
for i in range(1, len(values)):
|
632
|
-
for j in range(3):
|
633
|
-
if values[i][j] == "idem":
|
634
|
-
values[i][j] = values[i - 1][j]
|
635
|
-
|
636
|
-
# And we replace arrays with tuples
|
637
|
-
for (
|
638
|
-
status,
|
639
|
-
minor_status,
|
640
|
-
application_status,
|
641
|
-
status_time,
|
642
|
-
status_source,
|
643
|
-
) in values:
|
644
|
-
res.append(
|
645
|
-
JobStatusReturn(
|
646
|
-
Status=status,
|
647
|
-
MinorStatus=minor_status,
|
648
|
-
ApplicationStatus=application_status,
|
649
|
-
StatusTime=status_time,
|
650
|
-
Source=status_source,
|
651
|
-
)
|
652
|
-
)
|
653
|
-
|
654
|
-
return res
|
655
|
-
|
656
|
-
async def delete_records(self, job_ids: list[int]):
|
657
|
-
"""Delete logging records for given jobs"""
|
658
|
-
stmt = delete(LoggingInfo).where(LoggingInfo.JobID.in_(job_ids))
|
659
|
-
await self.conn.execute(stmt)
|
660
|
-
|
661
|
-
async def get_wms_time_stamps(self, job_id):
|
662
|
-
"""Get TimeStamps for job MajorState transitions
|
663
|
-
return a {State:timestamp} dictionary
|
664
|
-
"""
|
665
|
-
|
666
|
-
result = {}
|
667
|
-
stmt = select(
|
668
|
-
LoggingInfo.Status,
|
669
|
-
LoggingInfo.StatusTimeOrder,
|
670
|
-
).where(LoggingInfo.JobID == job_id)
|
671
|
-
rows = await self.conn.execute(stmt)
|
672
|
-
if not rows.rowcount:
|
673
|
-
raise JobNotFound(job_id) from None
|
674
|
-
|
675
|
-
for event, etime in rows:
|
676
|
-
result[event] = str(etime + MAGIC_EPOC_NUMBER)
|
677
|
-
|
678
|
-
return result
|
679
|
-
|
680
|
-
|
681
|
-
class TaskQueueDB(BaseSQLDB):
|
682
|
-
metadata = TaskQueueDBBase.metadata
|
683
|
-
|
684
|
-
async def get_tq_infos_for_jobs(
|
685
|
-
self, job_ids: list[int]
|
686
|
-
) -> set[tuple[int, str, str, str]]:
|
687
|
-
"""
|
688
|
-
Get the task queue info for given jobs
|
689
|
-
"""
|
690
|
-
stmt = (
|
691
|
-
select(
|
692
|
-
TaskQueues.TQId, TaskQueues.Owner, TaskQueues.OwnerGroup, TaskQueues.VO
|
693
|
-
)
|
694
|
-
.join(JobsQueue, TaskQueues.TQId == JobsQueue.TQId)
|
695
|
-
.where(JobsQueue.JobId.in_(job_ids))
|
696
|
-
)
|
697
|
-
return set(
|
698
|
-
(int(row[0]), str(row[1]), str(row[2]), str(row[3]))
|
699
|
-
for row in (await self.conn.execute(stmt)).all()
|
700
|
-
)
|
701
|
-
|
702
|
-
async def get_owner_for_task_queue(self, tq_id: int) -> dict[str, str]:
|
703
|
-
"""
|
704
|
-
Get the owner and owner group for a task queue
|
705
|
-
"""
|
706
|
-
stmt = select(TaskQueues.Owner, TaskQueues.OwnerGroup, TaskQueues.VO).where(
|
707
|
-
TaskQueues.TQId == tq_id
|
708
|
-
)
|
709
|
-
return dict((await self.conn.execute(stmt)).one()._mapping)
|
710
|
-
|
711
|
-
async def remove_job(self, job_id: int):
|
712
|
-
"""
|
713
|
-
Remove a job from the task queues
|
714
|
-
"""
|
715
|
-
stmt = delete(JobsQueue).where(JobsQueue.JobId == job_id)
|
716
|
-
await self.conn.execute(stmt)
|
717
|
-
|
718
|
-
async def remove_jobs(self, job_ids: list[int]):
|
719
|
-
"""
|
720
|
-
Remove jobs from the task queues
|
721
|
-
"""
|
722
|
-
stmt = delete(JobsQueue).where(JobsQueue.JobId.in_(job_ids))
|
723
|
-
await self.conn.execute(stmt)
|
724
|
-
|
725
|
-
async def delete_task_queue_if_empty(
|
726
|
-
self,
|
727
|
-
tq_id: int,
|
728
|
-
tq_owner: str,
|
729
|
-
tq_group: str,
|
730
|
-
job_share: int,
|
731
|
-
group_properties: set[SecurityProperty],
|
732
|
-
enable_shares_correction: bool,
|
733
|
-
allow_background_tqs: bool,
|
734
|
-
):
|
735
|
-
"""
|
736
|
-
Try to delete a task queue if it's empty
|
737
|
-
"""
|
738
|
-
# Check if the task queue is empty
|
739
|
-
stmt = (
|
740
|
-
select(TaskQueues.TQId)
|
741
|
-
.where(TaskQueues.Enabled >= 1)
|
742
|
-
.where(TaskQueues.TQId == tq_id)
|
743
|
-
.where(~TaskQueues.TQId.in_(select(JobsQueue.TQId)))
|
744
|
-
)
|
745
|
-
rows = await self.conn.execute(stmt)
|
746
|
-
if not rows.rowcount:
|
747
|
-
return
|
748
|
-
|
749
|
-
# Deleting the task queue (the other tables will be deleted in cascade)
|
750
|
-
stmt = delete(TaskQueues).where(TaskQueues.TQId == tq_id)
|
751
|
-
await self.conn.execute(stmt)
|
752
|
-
|
753
|
-
await self.recalculate_tq_shares_for_entity(
|
754
|
-
tq_owner,
|
755
|
-
tq_group,
|
756
|
-
job_share,
|
757
|
-
group_properties,
|
758
|
-
enable_shares_correction,
|
759
|
-
allow_background_tqs,
|
760
|
-
)
|
761
|
-
|
762
|
-
async def recalculate_tq_shares_for_entity(
|
763
|
-
self,
|
764
|
-
owner: str,
|
765
|
-
group: str,
|
766
|
-
job_share: int,
|
767
|
-
group_properties: set[SecurityProperty],
|
768
|
-
enable_shares_correction: bool,
|
769
|
-
allow_background_tqs: bool,
|
770
|
-
):
|
771
|
-
"""
|
772
|
-
Recalculate the shares for a user/userGroup combo
|
773
|
-
"""
|
774
|
-
if JOB_SHARING in group_properties:
|
775
|
-
# If group has JobSharing just set prio for that entry, user is irrelevant
|
776
|
-
return await self.__set_priorities_for_entity(
|
777
|
-
owner, group, job_share, group_properties, allow_background_tqs
|
778
|
-
)
|
779
|
-
|
780
|
-
stmt = (
|
781
|
-
select(TaskQueues.Owner, func.count(TaskQueues.Owner))
|
782
|
-
.where(TaskQueues.OwnerGroup == group)
|
783
|
-
.group_by(TaskQueues.Owner)
|
784
|
-
)
|
785
|
-
rows = await self.conn.execute(stmt)
|
786
|
-
# make the rows a list of tuples
|
787
|
-
# Get owners in this group and the amount of times they appear
|
788
|
-
# TODO: I guess the rows are already a list of tupes
|
789
|
-
# maybe refactor
|
790
|
-
data = [(r[0], r[1]) for r in rows if r]
|
791
|
-
numOwners = len(data)
|
792
|
-
# If there are no owners do now
|
793
|
-
if numOwners == 0:
|
794
|
-
return
|
795
|
-
# Split the share amongst the number of owners
|
796
|
-
entities_shares = {row[0]: job_share / numOwners for row in data}
|
797
|
-
|
798
|
-
# TODO: implement the following
|
799
|
-
# If corrector is enabled let it work it's magic
|
800
|
-
# if enable_shares_correction:
|
801
|
-
# entities_shares = await self.__shares_corrector.correct_shares(
|
802
|
-
# entitiesShares, group=group
|
803
|
-
# )
|
804
|
-
|
805
|
-
# Keep updating
|
806
|
-
owners = dict(data)
|
807
|
-
# IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified
|
808
|
-
# (The number of owners didn't change)
|
809
|
-
if owner in owners and owners[owner] > 1:
|
810
|
-
await self.__set_priorities_for_entity(
|
811
|
-
owner,
|
812
|
-
group,
|
813
|
-
entities_shares[owner],
|
814
|
-
group_properties,
|
815
|
-
allow_background_tqs,
|
816
|
-
)
|
817
|
-
return
|
818
|
-
# Oops the number of owners may have changed so we recalculate the prio for all owners in the group
|
819
|
-
for owner in owners:
|
820
|
-
await self.__set_priorities_for_entity(
|
821
|
-
owner,
|
822
|
-
group,
|
823
|
-
entities_shares[owner],
|
824
|
-
group_properties,
|
825
|
-
allow_background_tqs,
|
826
|
-
)
|
827
|
-
|
828
|
-
async def __set_priorities_for_entity(
|
829
|
-
self,
|
830
|
-
owner: str,
|
831
|
-
group: str,
|
832
|
-
share,
|
833
|
-
properties: set[SecurityProperty],
|
834
|
-
allow_background_tqs: bool,
|
835
|
-
):
|
836
|
-
"""
|
837
|
-
Set the priority for a user/userGroup combo given a splitted share
|
838
|
-
"""
|
839
|
-
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import calculate_priority
|
840
|
-
|
841
|
-
stmt = (
|
842
|
-
select(
|
843
|
-
TaskQueues.TQId,
|
844
|
-
func.sum(JobsQueue.RealPriority) / func.count(JobsQueue.RealPriority),
|
845
|
-
)
|
846
|
-
.join(JobsQueue, TaskQueues.TQId == JobsQueue.TQId)
|
847
|
-
.where(TaskQueues.OwnerGroup == group)
|
848
|
-
.group_by(TaskQueues.TQId)
|
849
|
-
)
|
850
|
-
if JOB_SHARING not in properties:
|
851
|
-
stmt = stmt.where(TaskQueues.Owner == owner)
|
852
|
-
rows = await self.conn.execute(stmt)
|
853
|
-
tq_dict: dict[int, float] = {tq_id: priority for tq_id, priority in rows}
|
854
|
-
|
855
|
-
if not tq_dict:
|
856
|
-
return
|
857
|
-
|
858
|
-
rows = await self.retrieve_task_queues(list(tq_dict))
|
859
|
-
|
860
|
-
prio_dict = calculate_priority(tq_dict, rows, share, allow_background_tqs)
|
861
|
-
|
862
|
-
# Execute updates
|
863
|
-
for prio, tqs in prio_dict.items():
|
864
|
-
update_stmt = (
|
865
|
-
update(TaskQueues).where(TaskQueues.TQId.in_(tqs)).values(Priority=prio)
|
866
|
-
)
|
867
|
-
await self.conn.execute(update_stmt)
|
868
|
-
|
869
|
-
async def retrieve_task_queues(self, tq_id_list=None):
|
870
|
-
"""
|
871
|
-
Get all the task queues
|
872
|
-
"""
|
873
|
-
if tq_id_list is not None and not tq_id_list:
|
874
|
-
# Empty list => Fast-track no matches
|
875
|
-
return {}
|
876
|
-
|
877
|
-
stmt = (
|
878
|
-
select(
|
879
|
-
TaskQueues.TQId,
|
880
|
-
TaskQueues.Priority,
|
881
|
-
func.count(JobsQueue.TQId).label("Jobs"),
|
882
|
-
TaskQueues.Owner,
|
883
|
-
TaskQueues.OwnerGroup,
|
884
|
-
TaskQueues.VO,
|
885
|
-
TaskQueues.CPUTime,
|
886
|
-
)
|
887
|
-
.join(JobsQueue, TaskQueues.TQId == JobsQueue.TQId)
|
888
|
-
.join(SitesQueue, TaskQueues.TQId == SitesQueue.TQId)
|
889
|
-
.join(GridCEsQueue, TaskQueues.TQId == GridCEsQueue.TQId)
|
890
|
-
.group_by(
|
891
|
-
TaskQueues.TQId,
|
892
|
-
TaskQueues.Priority,
|
893
|
-
TaskQueues.Owner,
|
894
|
-
TaskQueues.OwnerGroup,
|
895
|
-
TaskQueues.VO,
|
896
|
-
TaskQueues.CPUTime,
|
897
|
-
)
|
898
|
-
)
|
899
|
-
if tq_id_list is not None:
|
900
|
-
stmt = stmt.where(TaskQueues.TQId.in_(tq_id_list))
|
901
|
-
|
902
|
-
tq_data: dict[int, dict[str, list[str]]] = dict(
|
903
|
-
dict(row._mapping) for row in await self.conn.execute(stmt)
|
904
|
-
)
|
905
|
-
# TODO: the line above should be equivalent to the following commented code, check this is the case
|
906
|
-
# for record in rows:
|
907
|
-
# tqId = record[0]
|
908
|
-
# tqData[tqId] = {
|
909
|
-
# "Priority": record[1],
|
910
|
-
# "Jobs": record[2],
|
911
|
-
# "Owner": record[3],
|
912
|
-
# "OwnerGroup": record[4],
|
913
|
-
# "VO": record[5],
|
914
|
-
# "CPUTime": record[6],
|
915
|
-
# }
|
916
|
-
|
917
|
-
for tq_id in tq_data:
|
918
|
-
# TODO: maybe factorize this handy tuple list
|
919
|
-
for table, field in {
|
920
|
-
(SitesQueue, "Sites"),
|
921
|
-
(GridCEsQueue, "GridCEs"),
|
922
|
-
(BannedSitesQueue, "BannedSites"),
|
923
|
-
(PlatformsQueue, "Platforms"),
|
924
|
-
(JobTypesQueue, "JobTypes"),
|
925
|
-
(TagsQueue, "Tags"),
|
926
|
-
}:
|
927
|
-
stmt = select(table.Value).where(table.TQId == tq_id)
|
928
|
-
tq_data[tq_id][field] = list(
|
929
|
-
row[0] for row in await self.conn.execute(stmt)
|
930
|
-
)
|
931
|
-
|
932
|
-
return tq_data
|
@@ -1,29 +1,22 @@
|
|
1
1
|
import sqlalchemy.types as types
|
2
2
|
from sqlalchemy import (
|
3
|
-
BigInteger,
|
4
|
-
Boolean,
|
5
3
|
DateTime,
|
6
4
|
Enum,
|
7
|
-
Float,
|
8
5
|
ForeignKey,
|
9
6
|
Index,
|
10
7
|
Integer,
|
11
|
-
Numeric,
|
12
|
-
PrimaryKeyConstraint,
|
13
8
|
String,
|
14
9
|
Text,
|
15
10
|
)
|
16
11
|
from sqlalchemy.orm import declarative_base
|
17
12
|
|
18
|
-
from ..utils import Column,
|
13
|
+
from ..utils import Column, NullColumn
|
19
14
|
|
20
15
|
JobDBBase = declarative_base()
|
21
|
-
JobLoggingDBBase = declarative_base()
|
22
|
-
TaskQueueDBBase = declarative_base()
|
23
16
|
|
24
17
|
|
25
18
|
class EnumBackedBool(types.TypeDecorator):
|
26
|
-
"""Maps a ``EnumBackedBool()`` column to True/False in Python"""
|
19
|
+
"""Maps a ``EnumBackedBool()`` column to True/False in Python."""
|
27
20
|
|
28
21
|
impl = types.Enum
|
29
22
|
cache_ok: bool = True
|
@@ -179,112 +172,3 @@ class JobCommands(JobDBBase):
|
|
179
172
|
Status = Column(String(64), default="Received")
|
180
173
|
ReceptionTime = Column(DateTime, primary_key=True)
|
181
174
|
ExecutionTime = NullColumn(DateTime)
|
182
|
-
|
183
|
-
|
184
|
-
class LoggingInfo(JobLoggingDBBase):
|
185
|
-
__tablename__ = "LoggingInfo"
|
186
|
-
JobID = Column(Integer)
|
187
|
-
SeqNum = Column(Integer)
|
188
|
-
Status = Column(String(32), default="")
|
189
|
-
MinorStatus = Column(String(128), default="")
|
190
|
-
ApplicationStatus = Column(String(255), default="")
|
191
|
-
StatusTime = DateNowColumn()
|
192
|
-
# TODO: Check that this corresponds to the DOUBLE(12,3) type in MySQL
|
193
|
-
StatusTimeOrder = Column(Numeric(precision=12, scale=3), default=0)
|
194
|
-
Source = Column(String(32), default="Unknown", name="StatusSource")
|
195
|
-
__table_args__ = (PrimaryKeyConstraint("JobID", "SeqNum"),)
|
196
|
-
|
197
|
-
|
198
|
-
class TaskQueues(TaskQueueDBBase):
|
199
|
-
__tablename__ = "tq_TaskQueues"
|
200
|
-
TQId = Column(Integer, primary_key=True)
|
201
|
-
Owner = Column(String(255), nullable=False)
|
202
|
-
OwnerGroup = Column(String(32), nullable=False)
|
203
|
-
VO = Column(String(32), nullable=False)
|
204
|
-
CPUTime = Column(BigInteger, nullable=False)
|
205
|
-
Priority = Column(Float, nullable=False)
|
206
|
-
Enabled = Column(Boolean, nullable=False, default=0)
|
207
|
-
__table_args__ = (Index("TQOwner", "Owner", "OwnerGroup", "CPUTime"),)
|
208
|
-
|
209
|
-
|
210
|
-
class JobsQueue(TaskQueueDBBase):
|
211
|
-
__tablename__ = "tq_Jobs"
|
212
|
-
TQId = Column(
|
213
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
214
|
-
)
|
215
|
-
JobId = Column(Integer, primary_key=True)
|
216
|
-
Priority = Column(Integer, nullable=False)
|
217
|
-
RealPriority = Column(Float, nullable=False)
|
218
|
-
__table_args__ = (Index("TaskIndex", "TQId"),)
|
219
|
-
|
220
|
-
|
221
|
-
class SitesQueue(TaskQueueDBBase):
|
222
|
-
__tablename__ = "tq_TQToSites"
|
223
|
-
TQId = Column(
|
224
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
225
|
-
)
|
226
|
-
Value = Column(String(64), primary_key=True)
|
227
|
-
__table_args__ = (
|
228
|
-
Index("SitesTaskIndex", "TQId"),
|
229
|
-
Index("SitesIndex", "Value"),
|
230
|
-
)
|
231
|
-
|
232
|
-
|
233
|
-
class GridCEsQueue(TaskQueueDBBase):
|
234
|
-
__tablename__ = "tq_TQToGridCEs"
|
235
|
-
TQId = Column(
|
236
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
237
|
-
)
|
238
|
-
Value = Column(String(64), primary_key=True)
|
239
|
-
__table_args__ = (
|
240
|
-
Index("GridCEsTaskIndex", "TQId"),
|
241
|
-
Index("GridCEsValueIndex", "Value"),
|
242
|
-
)
|
243
|
-
|
244
|
-
|
245
|
-
class BannedSitesQueue(TaskQueueDBBase):
|
246
|
-
__tablename__ = "tq_TQToBannedSites"
|
247
|
-
TQId = Column(
|
248
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
249
|
-
)
|
250
|
-
Value = Column(String(64), primary_key=True)
|
251
|
-
__table_args__ = (
|
252
|
-
Index("BannedSitesTaskIndex", "TQId"),
|
253
|
-
Index("BannedSitesValueIndex", "Value"),
|
254
|
-
)
|
255
|
-
|
256
|
-
|
257
|
-
class PlatformsQueue(TaskQueueDBBase):
|
258
|
-
__tablename__ = "tq_TQToPlatforms"
|
259
|
-
TQId = Column(
|
260
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
261
|
-
)
|
262
|
-
Value = Column(String(64), primary_key=True)
|
263
|
-
__table_args__ = (
|
264
|
-
Index("PlatformsTaskIndex", "TQId"),
|
265
|
-
Index("PlatformsValueIndex", "Value"),
|
266
|
-
)
|
267
|
-
|
268
|
-
|
269
|
-
class JobTypesQueue(TaskQueueDBBase):
|
270
|
-
__tablename__ = "tq_TQToJobTypes"
|
271
|
-
TQId = Column(
|
272
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
273
|
-
)
|
274
|
-
Value = Column(String(64), primary_key=True)
|
275
|
-
__table_args__ = (
|
276
|
-
Index("JobTypesTaskIndex", "TQId"),
|
277
|
-
Index("JobTypesValueIndex", "Value"),
|
278
|
-
)
|
279
|
-
|
280
|
-
|
281
|
-
class TagsQueue(TaskQueueDBBase):
|
282
|
-
__tablename__ = "tq_TQToTags"
|
283
|
-
TQId = Column(
|
284
|
-
Integer, ForeignKey("tq_TaskQueues.TQId", ondelete="CASCADE"), primary_key=True
|
285
|
-
)
|
286
|
-
Value = Column(String(64), primary_key=True)
|
287
|
-
__table_args__ = (
|
288
|
-
Index("TagsTaskIndex", "TQId"),
|
289
|
-
Index("TagsValueIndex", "Value"),
|
290
|
-
)
|
File without changes
|