diracx-db 0.0.1a17__py3-none-any.whl → 0.0.1a19__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
- import time
5
4
  from datetime import datetime, timezone
6
5
  from typing import TYPE_CHECKING, Any
7
6
 
@@ -15,34 +14,20 @@ from diracx.core.exceptions import InvalidQueryError, JobNotFound
15
14
  from diracx.core.models import (
16
15
  JobMinorStatus,
17
16
  JobStatus,
18
- JobStatusReturn,
19
17
  LimitedJobStatusReturn,
20
18
  ScalarSearchOperator,
21
19
  ScalarSearchSpec,
22
20
  SearchSpec,
23
- SortDirection,
24
21
  SortSpec,
25
22
  )
26
- from diracx.core.properties import JOB_SHARING, SecurityProperty
27
23
 
28
- from ..utils import BaseSQLDB, apply_search_filters
24
+ from ..utils import BaseSQLDB, apply_search_filters, apply_sort_constraints
29
25
  from .schema import (
30
- BannedSitesQueue,
31
- GridCEsQueue,
32
26
  InputData,
33
27
  JobCommands,
34
28
  JobDBBase,
35
29
  JobJDLs,
36
- JobLoggingDBBase,
37
30
  Jobs,
38
- JobsQueue,
39
- JobTypesQueue,
40
- LoggingInfo,
41
- PlatformsQueue,
42
- SitesQueue,
43
- TagsQueue,
44
- TaskQueueDBBase,
45
- TaskQueues,
46
31
  )
47
32
 
48
33
 
@@ -74,7 +59,7 @@ class JobDB(BaseSQLDB):
74
59
  columns = _get_columns(Jobs.__table__, group_by)
75
60
 
76
61
  stmt = select(*columns, func.count(Jobs.JobID).label("count"))
77
- stmt = apply_search_filters(Jobs.__table__, stmt, search)
62
+ stmt = apply_search_filters(Jobs.__table__.columns.__getitem__, stmt, search)
78
63
  stmt = stmt.group_by(*columns)
79
64
 
80
65
  # Execute the query
@@ -98,27 +83,8 @@ class JobDB(BaseSQLDB):
98
83
  columns = _get_columns(Jobs.__table__, parameters)
99
84
  stmt = select(*columns)
100
85
 
101
- stmt = apply_search_filters(Jobs.__table__, stmt, search)
102
-
103
- # Apply any sort constraints
104
- sort_columns = []
105
- for sort in sorts:
106
- if sort["parameter"] not in Jobs.__table__.columns:
107
- raise InvalidQueryError(
108
- f"Cannot sort by {sort['parameter']}: unknown column"
109
- )
110
- column = Jobs.__table__.columns[sort["parameter"]]
111
- sorted_column = None
112
- if sort["direction"] == SortDirection.ASC:
113
- sorted_column = column.asc()
114
- elif sort["direction"] == SortDirection.DESC:
115
- sorted_column = column.desc()
116
- else:
117
- raise InvalidQueryError(f"Unknown sort {sort['direction']=}")
118
- sort_columns.append(sorted_column)
119
-
120
- if sort_columns:
121
- stmt = stmt.order_by(*sort_columns)
86
+ stmt = apply_search_filters(Jobs.__table__.columns.__getitem__, stmt, search)
87
+ stmt = apply_sort_constraints(Jobs.__table__.columns.__getitem__, stmt, sorts)
122
88
 
123
89
  if distinct:
124
90
  stmt = stmt.distinct()
@@ -160,9 +126,7 @@ class JobDB(BaseSQLDB):
160
126
  await self.conn.execute(stmt)
161
127
 
162
128
  async def setJobAttributes(self, job_id, jobData):
163
- """
164
- TODO: add myDate and force parameters
165
- """
129
+ """TODO: add myDate and force parameters."""
166
130
  if "Status" in jobData:
167
131
  jobData = jobData | {"LastUpdateTime": datetime.now(tz=timezone.utc)}
168
132
  stmt = update(Jobs).where(Jobs.JobID == job_id).values(jobData)
@@ -178,9 +142,8 @@ class JobDB(BaseSQLDB):
178
142
  job_attrs,
179
143
  vo,
180
144
  ):
181
- """
182
- Check Consistency of Submitted JDL and set some defaults
183
- Prepare subJDL with Job Requirements
145
+ """Check Consistency of Submitted JDL and set some defaults
146
+ Prepare subJDL with Job Requirements.
184
147
  """
185
148
  from DIRAC.Core.Utilities.DErrno import EWMSSUBM, cmpError
186
149
  from DIRAC.Core.Utilities.ReturnValues import returnValueOrRaise
@@ -330,7 +293,7 @@ class JobDB(BaseSQLDB):
330
293
  }
331
294
 
332
295
  async def rescheduleJob(self, job_id) -> dict[str, Any]:
333
- """Reschedule given job"""
296
+ """Reschedule given job."""
334
297
  from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
335
298
  from DIRAC.Core.Utilities.ReturnValues import SErrorException
336
299
 
@@ -476,7 +439,7 @@ class JobDB(BaseSQLDB):
476
439
  raise JobNotFound(job_id) from e
477
440
 
478
441
  async def set_job_command(self, job_id: int, command: str, arguments: str = ""):
479
- """Store a command to be passed to the job together with the next heart beat"""
442
+ """Store a command to be passed to the job together with the next heart beat."""
480
443
  try:
481
444
  stmt = insert(JobCommands).values(
482
445
  JobID=job_id,
@@ -489,9 +452,7 @@ class JobDB(BaseSQLDB):
489
452
  raise JobNotFound(job_id) from e
490
453
 
491
454
  async def delete_jobs(self, job_ids: list[int]):
492
- """
493
- Delete jobs from the database
494
- """
455
+ """Delete jobs from the database."""
495
456
  stmt = delete(JobJDLs).where(JobJDLs.JobID.in_(job_ids))
496
457
  await self.conn.execute(stmt)
497
458
 
@@ -499,7 +460,7 @@ class JobDB(BaseSQLDB):
499
460
  self, properties: dict[int, dict[str, Any]], update_timestamp: bool = False
500
461
  ) -> int:
501
462
  """Update the job parameters
502
- All the jobs must update the same properties
463
+ All the jobs must update the same properties.
503
464
 
504
465
  :param properties: {job_id : {prop1: val1, prop2:val2}
505
466
  :param update_timestamp: if True, update the LastUpdate to now
@@ -507,11 +468,8 @@ class JobDB(BaseSQLDB):
507
468
  :return rowcount
508
469
 
509
470
  """
510
-
511
471
  # Check that all we always update the same set of properties
512
- required_parameters_set = set(
513
- [tuple(sorted(k.keys())) for k in properties.values()]
514
- )
472
+ required_parameters_set = {tuple(sorted(k.keys())) for k in properties.values()}
515
473
 
516
474
  if len(required_parameters_set) != 1:
517
475
  raise NotImplementedError(
@@ -532,401 +490,3 @@ class JobDB(BaseSQLDB):
532
490
  rows = await self.conn.execute(stmt, update_parameters)
533
491
 
534
492
  return rows.rowcount
535
-
536
-
537
- MAGIC_EPOC_NUMBER = 1270000000
538
-
539
-
540
- class JobLoggingDB(BaseSQLDB):
541
- """Frontend for the JobLoggingDB. Provides the ability to store changes with timestamps"""
542
-
543
- metadata = JobLoggingDBBase.metadata
544
-
545
- async def insert_record(
546
- self,
547
- job_id: int,
548
- status: JobStatus,
549
- minor_status: str,
550
- application_status: str,
551
- date: datetime,
552
- source: str,
553
- ):
554
- """
555
- Add a new entry to the JobLoggingDB table. One, two or all the three status
556
- components (status, minorStatus, applicationStatus) can be specified.
557
- Optionally the time stamp of the status can
558
- be provided in a form of a string in a format '%Y-%m-%d %H:%M:%S' or
559
- as datetime.datetime object. If the time stamp is not provided the current
560
- UTC time is used.
561
- """
562
-
563
- # First, fetch the maximum SeqNum for the given job_id
564
- seqnum_stmt = select(func.coalesce(func.max(LoggingInfo.SeqNum) + 1, 1)).where(
565
- LoggingInfo.JobID == job_id
566
- )
567
- seqnum = await self.conn.scalar(seqnum_stmt)
568
-
569
- epoc = (
570
- time.mktime(date.timetuple())
571
- + date.microsecond / 1000000.0
572
- - MAGIC_EPOC_NUMBER
573
- )
574
-
575
- stmt = insert(LoggingInfo).values(
576
- JobID=int(job_id),
577
- SeqNum=seqnum,
578
- Status=status,
579
- MinorStatus=minor_status,
580
- ApplicationStatus=application_status[:255],
581
- StatusTime=date,
582
- StatusTimeOrder=epoc,
583
- Source=source[:32],
584
- )
585
- await self.conn.execute(stmt)
586
-
587
- async def get_records(self, job_id: int) -> list[JobStatusReturn]:
588
- """Returns a Status,MinorStatus,ApplicationStatus,StatusTime,Source tuple
589
- for each record found for job specified by its jobID in historical order
590
- """
591
-
592
- stmt = (
593
- select(
594
- LoggingInfo.Status,
595
- LoggingInfo.MinorStatus,
596
- LoggingInfo.ApplicationStatus,
597
- LoggingInfo.StatusTime,
598
- LoggingInfo.Source,
599
- )
600
- .where(LoggingInfo.JobID == int(job_id))
601
- .order_by(LoggingInfo.StatusTimeOrder, LoggingInfo.StatusTime)
602
- )
603
- rows = await self.conn.execute(stmt)
604
-
605
- values = []
606
- for (
607
- status,
608
- minor_status,
609
- application_status,
610
- status_time,
611
- status_source,
612
- ) in rows:
613
- values.append(
614
- [
615
- status,
616
- minor_status,
617
- application_status,
618
- status_time.replace(tzinfo=timezone.utc),
619
- status_source,
620
- ]
621
- )
622
-
623
- # If no value has been set for the application status in the first place,
624
- # We put this status to unknown
625
- res = []
626
- if values:
627
- if values[0][2] == "idem":
628
- values[0][2] = "Unknown"
629
-
630
- # We replace "idem" values by the value previously stated
631
- for i in range(1, len(values)):
632
- for j in range(3):
633
- if values[i][j] == "idem":
634
- values[i][j] = values[i - 1][j]
635
-
636
- # And we replace arrays with tuples
637
- for (
638
- status,
639
- minor_status,
640
- application_status,
641
- status_time,
642
- status_source,
643
- ) in values:
644
- res.append(
645
- JobStatusReturn(
646
- Status=status,
647
- MinorStatus=minor_status,
648
- ApplicationStatus=application_status,
649
- StatusTime=status_time,
650
- Source=status_source,
651
- )
652
- )
653
-
654
- return res
655
-
656
- async def delete_records(self, job_ids: list[int]):
657
- """Delete logging records for given jobs"""
658
- stmt = delete(LoggingInfo).where(LoggingInfo.JobID.in_(job_ids))
659
- await self.conn.execute(stmt)
660
-
661
- async def get_wms_time_stamps(self, job_id):
662
- """Get TimeStamps for job MajorState transitions
663
- return a {State:timestamp} dictionary
664
- """
665
-
666
- result = {}
667
- stmt = select(
668
- LoggingInfo.Status,
669
- LoggingInfo.StatusTimeOrder,
670
- ).where(LoggingInfo.JobID == job_id)
671
- rows = await self.conn.execute(stmt)
672
- if not rows.rowcount:
673
- raise JobNotFound(job_id) from None
674
-
675
- for event, etime in rows:
676
- result[event] = str(etime + MAGIC_EPOC_NUMBER)
677
-
678
- return result
679
-
680
-
681
- class TaskQueueDB(BaseSQLDB):
682
- metadata = TaskQueueDBBase.metadata
683
-
684
- async def get_tq_infos_for_jobs(
685
- self, job_ids: list[int]
686
- ) -> set[tuple[int, str, str, str]]:
687
- """
688
- Get the task queue info for given jobs
689
- """
690
- stmt = (
691
- select(
692
- TaskQueues.TQId, TaskQueues.Owner, TaskQueues.OwnerGroup, TaskQueues.VO
693
- )
694
- .join(JobsQueue, TaskQueues.TQId == JobsQueue.TQId)
695
- .where(JobsQueue.JobId.in_(job_ids))
696
- )
697
- return set(
698
- (int(row[0]), str(row[1]), str(row[2]), str(row[3]))
699
- for row in (await self.conn.execute(stmt)).all()
700
- )
701
-
702
- async def get_owner_for_task_queue(self, tq_id: int) -> dict[str, str]:
703
- """
704
- Get the owner and owner group for a task queue
705
- """
706
- stmt = select(TaskQueues.Owner, TaskQueues.OwnerGroup, TaskQueues.VO).where(
707
- TaskQueues.TQId == tq_id
708
- )
709
- return dict((await self.conn.execute(stmt)).one()._mapping)
710
-
711
- async def remove_job(self, job_id: int):
712
- """
713
- Remove a job from the task queues
714
- """
715
- stmt = delete(JobsQueue).where(JobsQueue.JobId == job_id)
716
- await self.conn.execute(stmt)
717
-
718
- async def remove_jobs(self, job_ids: list[int]):
719
- """
720
- Remove jobs from the task queues
721
- """
722
- stmt = delete(JobsQueue).where(JobsQueue.JobId.in_(job_ids))
723
- await self.conn.execute(stmt)
724
-
725
- async def delete_task_queue_if_empty(
726
- self,
727
- tq_id: int,
728
- tq_owner: str,
729
- tq_group: str,
730
- job_share: int,
731
- group_properties: set[SecurityProperty],
732
- enable_shares_correction: bool,
733
- allow_background_tqs: bool,
734
- ):
735
- """
736
- Try to delete a task queue if it's empty
737
- """
738
- # Check if the task queue is empty
739
- stmt = (
740
- select(TaskQueues.TQId)
741
- .where(TaskQueues.Enabled >= 1)
742
- .where(TaskQueues.TQId == tq_id)
743
- .where(~TaskQueues.TQId.in_(select(JobsQueue.TQId)))
744
- )
745
- rows = await self.conn.execute(stmt)
746
- if not rows.rowcount:
747
- return
748
-
749
- # Deleting the task queue (the other tables will be deleted in cascade)
750
- stmt = delete(TaskQueues).where(TaskQueues.TQId == tq_id)
751
- await self.conn.execute(stmt)
752
-
753
- await self.recalculate_tq_shares_for_entity(
754
- tq_owner,
755
- tq_group,
756
- job_share,
757
- group_properties,
758
- enable_shares_correction,
759
- allow_background_tqs,
760
- )
761
-
762
- async def recalculate_tq_shares_for_entity(
763
- self,
764
- owner: str,
765
- group: str,
766
- job_share: int,
767
- group_properties: set[SecurityProperty],
768
- enable_shares_correction: bool,
769
- allow_background_tqs: bool,
770
- ):
771
- """
772
- Recalculate the shares for a user/userGroup combo
773
- """
774
- if JOB_SHARING in group_properties:
775
- # If group has JobSharing just set prio for that entry, user is irrelevant
776
- return await self.__set_priorities_for_entity(
777
- owner, group, job_share, group_properties, allow_background_tqs
778
- )
779
-
780
- stmt = (
781
- select(TaskQueues.Owner, func.count(TaskQueues.Owner))
782
- .where(TaskQueues.OwnerGroup == group)
783
- .group_by(TaskQueues.Owner)
784
- )
785
- rows = await self.conn.execute(stmt)
786
- # make the rows a list of tuples
787
- # Get owners in this group and the amount of times they appear
788
- # TODO: I guess the rows are already a list of tupes
789
- # maybe refactor
790
- data = [(r[0], r[1]) for r in rows if r]
791
- numOwners = len(data)
792
- # If there are no owners do now
793
- if numOwners == 0:
794
- return
795
- # Split the share amongst the number of owners
796
- entities_shares = {row[0]: job_share / numOwners for row in data}
797
-
798
- # TODO: implement the following
799
- # If corrector is enabled let it work it's magic
800
- # if enable_shares_correction:
801
- # entities_shares = await self.__shares_corrector.correct_shares(
802
- # entitiesShares, group=group
803
- # )
804
-
805
- # Keep updating
806
- owners = dict(data)
807
- # IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified
808
- # (The number of owners didn't change)
809
- if owner in owners and owners[owner] > 1:
810
- await self.__set_priorities_for_entity(
811
- owner,
812
- group,
813
- entities_shares[owner],
814
- group_properties,
815
- allow_background_tqs,
816
- )
817
- return
818
- # Oops the number of owners may have changed so we recalculate the prio for all owners in the group
819
- for owner in owners:
820
- await self.__set_priorities_for_entity(
821
- owner,
822
- group,
823
- entities_shares[owner],
824
- group_properties,
825
- allow_background_tqs,
826
- )
827
-
828
- async def __set_priorities_for_entity(
829
- self,
830
- owner: str,
831
- group: str,
832
- share,
833
- properties: set[SecurityProperty],
834
- allow_background_tqs: bool,
835
- ):
836
- """
837
- Set the priority for a user/userGroup combo given a splitted share
838
- """
839
- from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import calculate_priority
840
-
841
- stmt = (
842
- select(
843
- TaskQueues.TQId,
844
- func.sum(JobsQueue.RealPriority) / func.count(JobsQueue.RealPriority),
845
- )
846
- .join(JobsQueue, TaskQueues.TQId == JobsQueue.TQId)
847
- .where(TaskQueues.OwnerGroup == group)
848
- .group_by(TaskQueues.TQId)
849
- )
850
- if JOB_SHARING not in properties:
851
- stmt = stmt.where(TaskQueues.Owner == owner)
852
- rows = await self.conn.execute(stmt)
853
- tq_dict: dict[int, float] = {tq_id: priority for tq_id, priority in rows}
854
-
855
- if not tq_dict:
856
- return
857
-
858
- rows = await self.retrieve_task_queues(list(tq_dict))
859
-
860
- prio_dict = calculate_priority(tq_dict, rows, share, allow_background_tqs)
861
-
862
- # Execute updates
863
- for prio, tqs in prio_dict.items():
864
- update_stmt = (
865
- update(TaskQueues).where(TaskQueues.TQId.in_(tqs)).values(Priority=prio)
866
- )
867
- await self.conn.execute(update_stmt)
868
-
869
- async def retrieve_task_queues(self, tq_id_list=None):
870
- """
871
- Get all the task queues
872
- """
873
- if tq_id_list is not None and not tq_id_list:
874
- # Empty list => Fast-track no matches
875
- return {}
876
-
877
- stmt = (
878
- select(
879
- TaskQueues.TQId,
880
- TaskQueues.Priority,
881
- func.count(JobsQueue.TQId).label("Jobs"),
882
- TaskQueues.Owner,
883
- TaskQueues.OwnerGroup,
884
- TaskQueues.VO,
885
- TaskQueues.CPUTime,
886
- )
887
- .join(JobsQueue, TaskQueues.TQId == JobsQueue.TQId)
888
- .join(SitesQueue, TaskQueues.TQId == SitesQueue.TQId)
889
- .join(GridCEsQueue, TaskQueues.TQId == GridCEsQueue.TQId)
890
- .group_by(
891
- TaskQueues.TQId,
892
- TaskQueues.Priority,
893
- TaskQueues.Owner,
894
- TaskQueues.OwnerGroup,
895
- TaskQueues.VO,
896
- TaskQueues.CPUTime,
897
- )
898
- )
899
- if tq_id_list is not None:
900
- stmt = stmt.where(TaskQueues.TQId.in_(tq_id_list))
901
-
902
- tq_data: dict[int, dict[str, list[str]]] = dict(
903
- dict(row._mapping) for row in await self.conn.execute(stmt)
904
- )
905
- # TODO: the line above should be equivalent to the following commented code, check this is the case
906
- # for record in rows:
907
- # tqId = record[0]
908
- # tqData[tqId] = {
909
- # "Priority": record[1],
910
- # "Jobs": record[2],
911
- # "Owner": record[3],
912
- # "OwnerGroup": record[4],
913
- # "VO": record[5],
914
- # "CPUTime": record[6],
915
- # }
916
-
917
- for tq_id in tq_data:
918
- # TODO: maybe factorize this handy tuple list
919
- for table, field in {
920
- (SitesQueue, "Sites"),
921
- (GridCEsQueue, "GridCEs"),
922
- (BannedSitesQueue, "BannedSites"),
923
- (PlatformsQueue, "Platforms"),
924
- (JobTypesQueue, "JobTypes"),
925
- (TagsQueue, "Tags"),
926
- }:
927
- stmt = select(table.Value).where(table.TQId == tq_id)
928
- tq_data[tq_id][field] = list(
929
- row[0] for row in await self.conn.execute(stmt)
930
- )
931
-
932
- return tq_data
@@ -0,0 +1,129 @@
1
+ from sqlalchemy import (
2
+ DateTime,
3
+ Enum,
4
+ ForeignKey,
5
+ Index,
6
+ Integer,
7
+ String,
8
+ Text,
9
+ )
10
+ from sqlalchemy.orm import declarative_base
11
+
12
+ from ..utils import Column, EnumBackedBool, NullColumn
13
+
14
+ JobDBBase = declarative_base()
15
+
16
+
17
+ class Jobs(JobDBBase):
18
+ __tablename__ = "Jobs"
19
+
20
+ JobID = Column(
21
+ "JobID",
22
+ Integer,
23
+ ForeignKey("JobJDLs.JobID", ondelete="CASCADE"),
24
+ primary_key=True,
25
+ default=0,
26
+ )
27
+ JobType = Column("JobType", String(32), default="user")
28
+ JobGroup = Column("JobGroup", String(32), default="00000000")
29
+ Site = Column("Site", String(100), default="ANY")
30
+ JobName = Column("JobName", String(128), default="Unknown")
31
+ Owner = Column("Owner", String(64), default="Unknown")
32
+ OwnerGroup = Column("OwnerGroup", String(128), default="Unknown")
33
+ VO = Column("VO", String(32))
34
+ SubmissionTime = NullColumn("SubmissionTime", DateTime)
35
+ RescheduleTime = NullColumn("RescheduleTime", DateTime)
36
+ LastUpdateTime = NullColumn("LastUpdateTime", DateTime)
37
+ StartExecTime = NullColumn("StartExecTime", DateTime)
38
+ HeartBeatTime = NullColumn("HeartBeatTime", DateTime)
39
+ EndExecTime = NullColumn("EndExecTime", DateTime)
40
+ Status = Column("Status", String(32), default="Received")
41
+ MinorStatus = Column("MinorStatus", String(128), default="Unknown")
42
+ ApplicationStatus = Column("ApplicationStatus", String(255), default="Unknown")
43
+ UserPriority = Column("UserPriority", Integer, default=0)
44
+ RescheduleCounter = Column("RescheduleCounter", Integer, default=0)
45
+ VerifiedFlag = Column("VerifiedFlag", EnumBackedBool(), default=False)
46
+ # TODO: Should this be True/False/"Failed"? Or True/False/Null?
47
+ AccountedFlag = Column(
48
+ "AccountedFlag", Enum("True", "False", "Failed"), default="False"
49
+ )
50
+
51
+ __table_args__ = (
52
+ Index("JobType", "JobType"),
53
+ Index("JobGroup", "JobGroup"),
54
+ Index("Site", "Site"),
55
+ Index("Owner", "Owner"),
56
+ Index("OwnerGroup", "OwnerGroup"),
57
+ Index("Status", "Status"),
58
+ Index("MinorStatus", "MinorStatus"),
59
+ Index("ApplicationStatus", "ApplicationStatus"),
60
+ Index("StatusSite", "Status", "Site"),
61
+ Index("LastUpdateTime", "LastUpdateTime"),
62
+ )
63
+
64
+
65
+ class JobJDLs(JobDBBase):
66
+ __tablename__ = "JobJDLs"
67
+ JobID = Column(Integer, autoincrement=True, primary_key=True)
68
+ JDL = Column(Text)
69
+ JobRequirements = Column(Text)
70
+ OriginalJDL = Column(Text)
71
+
72
+
73
+ class InputData(JobDBBase):
74
+ __tablename__ = "InputData"
75
+ JobID = Column(
76
+ Integer, ForeignKey("Jobs.JobID", ondelete="CASCADE"), primary_key=True
77
+ )
78
+ LFN = Column(String(255), default="", primary_key=True)
79
+ Status = Column(String(32), default="AprioriGood")
80
+
81
+
82
+ class JobParameters(JobDBBase):
83
+ __tablename__ = "JobParameters"
84
+ JobID = Column(
85
+ Integer, ForeignKey("Jobs.JobID", ondelete="CASCADE"), primary_key=True
86
+ )
87
+ Name = Column(String(100), primary_key=True)
88
+ Value = Column(Text)
89
+
90
+
91
+ class OptimizerParameters(JobDBBase):
92
+ __tablename__ = "OptimizerParameters"
93
+ JobID = Column(
94
+ Integer, ForeignKey("Jobs.JobID", ondelete="CASCADE"), primary_key=True
95
+ )
96
+ Name = Column(String(100), primary_key=True)
97
+ Value = Column(Text)
98
+
99
+
100
+ class AtticJobParameters(JobDBBase):
101
+ __tablename__ = "AtticJobParameters"
102
+ JobID = Column(
103
+ Integer, ForeignKey("Jobs.JobID", ondelete="CASCADE"), primary_key=True
104
+ )
105
+ Name = Column(String(100), primary_key=True)
106
+ Value = Column(Text)
107
+ RescheduleCycle = Column(Integer)
108
+
109
+
110
+ class HeartBeatLoggingInfo(JobDBBase):
111
+ __tablename__ = "HeartBeatLoggingInfo"
112
+ JobID = Column(
113
+ Integer, ForeignKey("Jobs.JobID", ondelete="CASCADE"), primary_key=True
114
+ )
115
+ Name = Column(String(100), primary_key=True)
116
+ Value = Column(Text)
117
+ HeartBeatTime = Column(DateTime, primary_key=True)
118
+
119
+
120
+ class JobCommands(JobDBBase):
121
+ __tablename__ = "JobCommands"
122
+ JobID = Column(
123
+ Integer, ForeignKey("Jobs.JobID", ondelete="CASCADE"), primary_key=True
124
+ )
125
+ Command = Column(String(100))
126
+ Arguments = Column(String(100))
127
+ Status = Column(String(64), default="Received")
128
+ ReceptionTime = Column(DateTime, primary_key=True)
129
+ ExecutionTime = NullColumn(DateTime)
File without changes