flux-batch 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flux_batch/jobspec.py +26 -1
- flux_batch/models.py +5 -0
- flux_batch/script/__init__.py +16 -0
- flux_batch/script/save_logs.sh +16 -0
- flux_batch/service/__init__.py +1 -1
- flux_batch/service/scribe/__init__.py +1 -0
- flux_batch/service/scribe/__main__.py +113 -0
- flux_batch/service/scribe/database.py +150 -0
- flux_batch/service/scribe/models.py +94 -0
- flux_batch/service/scribe/template.py +12 -0
- flux_batch/version.py +3 -2
- {flux_batch-0.0.0.dist-info → flux_batch-0.0.1.dist-info}/METADATA +19 -5
- flux_batch-0.0.1.dist-info/RECORD +28 -0
- flux_batch-0.0.0.dist-info/RECORD +0 -21
- {flux_batch-0.0.0.dist-info → flux_batch-0.0.1.dist-info}/LICENSE +0 -0
- {flux_batch-0.0.0.dist-info → flux_batch-0.0.1.dist-info}/NOTICE +0 -0
- {flux_batch-0.0.0.dist-info → flux_batch-0.0.1.dist-info}/WHEEL +0 -0
- {flux_batch-0.0.0.dist-info → flux_batch-0.0.1.dist-info}/entry_points.txt +0 -0
- {flux_batch-0.0.0.dist-info → flux_batch-0.0.1.dist-info}/top_level.txt +0 -0
flux_batch/jobspec.py
CHANGED
|
@@ -2,6 +2,7 @@ import shlex
|
|
|
2
2
|
from typing import List
|
|
3
3
|
|
|
4
4
|
import flux_batch.models as models
|
|
5
|
+
import flux_batch.script as scripts
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
class BatchJobspecV1:
|
|
@@ -119,14 +120,38 @@ class BatchJobspecV1:
|
|
|
119
120
|
4. Add jobs/commands
|
|
120
121
|
5. Stop services
|
|
121
122
|
6. And epilogs
|
|
122
|
-
|
|
123
|
+
7. Custom scripts
|
|
123
124
|
|
|
125
|
+
Yes, it's redundant to write them as comments but I like the organization. -v
|
|
126
|
+
"""
|
|
127
|
+
# hashbang
|
|
124
128
|
lines = ["#!/bin/bash"]
|
|
129
|
+
|
|
130
|
+
# prologs
|
|
125
131
|
lines.extend(self.prologs)
|
|
126
132
|
for s in self.services:
|
|
127
133
|
lines.append(f"systemctl --user start {s}")
|
|
134
|
+
|
|
135
|
+
# commands that are derived from jobs or command
|
|
128
136
|
lines.extend(self.commands)
|
|
137
|
+
|
|
138
|
+
# stop services
|
|
129
139
|
for s in reversed(self.services):
|
|
130
140
|
lines.append(f"systemctl --user stop {s}")
|
|
141
|
+
|
|
142
|
+
# epilogs
|
|
131
143
|
lines.extend(self.epilogs)
|
|
144
|
+
|
|
145
|
+
# custom user scripts
|
|
146
|
+
if self.attributes.logs_dir is not None:
|
|
147
|
+
lines.append(self.script_save_logs())
|
|
132
148
|
return "\n".join(lines)
|
|
149
|
+
|
|
150
|
+
def script_save_logs(self):
|
|
151
|
+
"""
|
|
152
|
+
Custom saving of logs. This is what we wrote for our peformance study!
|
|
153
|
+
"""
|
|
154
|
+
script_path = scripts.get_script("save_logs.sh")
|
|
155
|
+
|
|
156
|
+
# Determine output directory (use home default if not defined)
|
|
157
|
+
return f"bash {script_path} {self.attributes.logs_dir}"
|
flux_batch/models.py
CHANGED
|
@@ -24,6 +24,11 @@ class BatchAttributesV1:
|
|
|
24
24
|
Explicitly defined arguments allowed by flux batch for V1 spec
|
|
25
25
|
"""
|
|
26
26
|
|
|
27
|
+
# These are added / custom to our module
|
|
28
|
+
# If logs directory defined (not None) save output there
|
|
29
|
+
# We force the user to provide something.
|
|
30
|
+
logs_dir: Optional[Union[bool, str]] = None
|
|
31
|
+
|
|
27
32
|
# Resources
|
|
28
33
|
nslots: Optional[int] = None # -n
|
|
29
34
|
cores_per_slot: Optional[int] = None # -c
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import flux_batch
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def get_script(name):
|
|
7
|
+
"""
|
|
8
|
+
Get a script by name
|
|
9
|
+
"""
|
|
10
|
+
# Find the path to the installed script
|
|
11
|
+
base_path = os.path.dirname(os.path.abspath(flux_batch.__file__))
|
|
12
|
+
script_path = os.path.join(base_path, "script", name)
|
|
13
|
+
if not os.path.exists(script_path):
|
|
14
|
+
print(f"Warning: script {name} does not exist")
|
|
15
|
+
return
|
|
16
|
+
return script_path
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
output=$1
|
|
3
|
+
mkdir -p $output
|
|
4
|
+
|
|
5
|
+
echo "Saving logs and job metadata to ${output}"
|
|
6
|
+
|
|
7
|
+
# This will save logs, events, and jobspecs
|
|
8
|
+
for jobid in $(flux jobs -a --json | jq -r .jobs[].id)
|
|
9
|
+
do
|
|
10
|
+
echo "Parsing jobid ${jobid}"
|
|
11
|
+
flux job attach $jobid &> $output/${jobid}.out
|
|
12
|
+
echo "START OF JOBSPEC" >> $output/${jobid}.out
|
|
13
|
+
flux job info $jobid jobspec >> $output/${jobid}.out
|
|
14
|
+
echo "START OF EVENTLOG" >> $output/${jobid}.out
|
|
15
|
+
flux job info $jobid guest.exec.eventlog >> $output/${jobid}.out
|
|
16
|
+
done
|
flux_batch/service/__init__.py
CHANGED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .template import SERVICE_TEMPLATE
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import errno
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import sys
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
import flux
|
|
9
|
+
import flux.job
|
|
10
|
+
|
|
11
|
+
# Not necessary, but it makes it pretty
|
|
12
|
+
from rich import print
|
|
13
|
+
|
|
14
|
+
# Use the synchronous version of the backend to avoid asyncio-in-thread conflicts
|
|
15
|
+
from flux_batch.service.scribe.database import SQLAlchemyBackend
|
|
16
|
+
|
|
17
|
+
# Setup logging to stderr (to avoid polluting stdout if run manually)
|
|
18
|
+
logging.basicConfig(
|
|
19
|
+
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s", stream=sys.stderr
|
|
20
|
+
)
|
|
21
|
+
logger = logging.getLogger("flux-scribe")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class JournalScribe:
|
|
25
|
+
def __init__(self, db_url: str):
|
|
26
|
+
"""
|
|
27
|
+
Initializes the Scribe with a synchronous DB backend and a Flux Journal Consumer.
|
|
28
|
+
"""
|
|
29
|
+
# Setup Database
|
|
30
|
+
logger.info(f"Connecting to Database: {db_url}")
|
|
31
|
+
self.db = SQLAlchemyBackend(db_url)
|
|
32
|
+
self.db.initialize()
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
self.handle = flux.Flux()
|
|
36
|
+
logger.info("Connected to Flux instance.")
|
|
37
|
+
except Exception as e:
|
|
38
|
+
logger.critical(f"Failed to connect to Flux: {e}")
|
|
39
|
+
sys.exit(1)
|
|
40
|
+
|
|
41
|
+
# Initialize Journal Consumer
|
|
42
|
+
# This consumes the global event log for the entire instance
|
|
43
|
+
self.consumer = flux.job.JournalConsumer(self.handle)
|
|
44
|
+
self.running = True
|
|
45
|
+
|
|
46
|
+
def _normalize_event(self, event) -> dict:
|
|
47
|
+
"""
|
|
48
|
+
Converts a Flux event object into the dictionary format expected by record_event.
|
|
49
|
+
Matches the logic provided in your EventsEngine reference.
|
|
50
|
+
"""
|
|
51
|
+
# Convert the SWIG/CFFI event object to a dictionary
|
|
52
|
+
payload = dict(event)
|
|
53
|
+
|
|
54
|
+
return {
|
|
55
|
+
"id": str(getattr(event, "jobid", "unknown")),
|
|
56
|
+
"type": getattr(event, "name", "unknown"),
|
|
57
|
+
"timestamp": getattr(event, "timestamp", time.time()),
|
|
58
|
+
"payload": payload,
|
|
59
|
+
"R": getattr(event, "R", None),
|
|
60
|
+
"jobspec": getattr(event, "jobspec", None),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
def run(self):
|
|
64
|
+
"""
|
|
65
|
+
Main execution loop. Polls the journal and writes to the DB.
|
|
66
|
+
"""
|
|
67
|
+
try:
|
|
68
|
+
logger.info("🚀 Flux Scribe (Journal Consumer) started.")
|
|
69
|
+
self.consumer.start()
|
|
70
|
+
|
|
71
|
+
while self.running:
|
|
72
|
+
try:
|
|
73
|
+
# Non-blocking poll (100ms timeout)
|
|
74
|
+
# This allows the loop to check for shutdown signals regularly
|
|
75
|
+
event = self.consumer.poll(timeout=0.1)
|
|
76
|
+
|
|
77
|
+
if event:
|
|
78
|
+
print(event)
|
|
79
|
+
# We only care about events associated with a job
|
|
80
|
+
if hasattr(event, "jobid"):
|
|
81
|
+
clean_event = self._normalize_event(event)
|
|
82
|
+
self.db.record_event("local", clean_event)
|
|
83
|
+
else:
|
|
84
|
+
# If no event, yield a tiny bit of CPU
|
|
85
|
+
time.sleep(0.01)
|
|
86
|
+
|
|
87
|
+
except EnvironmentError as e:
|
|
88
|
+
# Ignore timeouts (no data)
|
|
89
|
+
if e.errno == errno.ETIMEDOUT:
|
|
90
|
+
continue
|
|
91
|
+
logger.error(f"Flux connection error: {e}")
|
|
92
|
+
time.sleep(1)
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
logger.error(f"Unexpected error in event loop: {e}")
|
|
96
|
+
time.sleep(1)
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.critical(f"EventsEngine crashed: {e}")
|
|
100
|
+
finally:
|
|
101
|
+
self.db.close()
|
|
102
|
+
logger.info("EventsEngine thread exiting.")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def main():
|
|
106
|
+
# Retrieve DB path from environment or use a default
|
|
107
|
+
db_path = os.environ.get("FLUX_SCRIBE_DATABASE", "sqlite:///server_state.db")
|
|
108
|
+
scribe = JournalScribe(db_path)
|
|
109
|
+
scribe.run()
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
if __name__ == "__main__":
|
|
113
|
+
main()
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from sqlalchemy import and_, create_engine, select, update
|
|
5
|
+
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
|
|
6
|
+
from sqlalchemy.orm import sessionmaker
|
|
7
|
+
|
|
8
|
+
from flux_batch.service.scribe.models import Base, EventModel, EventRecord, JobModel, JobRecord
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _record_event_internal(session, cluster: str, event: Dict[str, Any]):
|
|
12
|
+
"""
|
|
13
|
+
Shared synchronous logic for recording events.
|
|
14
|
+
Used by both Sync and Async backends.
|
|
15
|
+
"""
|
|
16
|
+
job_id = event.get("id")
|
|
17
|
+
event_type = event.get("type")
|
|
18
|
+
data = event.get("payload", {})
|
|
19
|
+
timestamp = event.get("timestamp", time.time())
|
|
20
|
+
|
|
21
|
+
new_event = EventModel(
|
|
22
|
+
job_id=job_id,
|
|
23
|
+
cluster=cluster,
|
|
24
|
+
timestamp=timestamp,
|
|
25
|
+
event_type=event_type,
|
|
26
|
+
payload=data,
|
|
27
|
+
)
|
|
28
|
+
session.add(new_event)
|
|
29
|
+
|
|
30
|
+
if event_type == "submit":
|
|
31
|
+
stmt = select(JobModel).where(and_(JobModel.job_id == job_id, JobModel.cluster == cluster))
|
|
32
|
+
job = session.execute(stmt).scalar_one_or_none()
|
|
33
|
+
|
|
34
|
+
if not job:
|
|
35
|
+
job = JobModel(
|
|
36
|
+
job_id=job_id,
|
|
37
|
+
cluster=cluster,
|
|
38
|
+
user=str(data.get("userid", "unknown")),
|
|
39
|
+
state="submitted",
|
|
40
|
+
workdir=data.get("cwd", ""),
|
|
41
|
+
submit_time=timestamp,
|
|
42
|
+
last_updated=timestamp,
|
|
43
|
+
)
|
|
44
|
+
session.add(job)
|
|
45
|
+
else:
|
|
46
|
+
job.state = "submitted"
|
|
47
|
+
job.last_updated = timestamp
|
|
48
|
+
|
|
49
|
+
# state transitions
|
|
50
|
+
elif event_type == "state" or (event_type and event_type.endswith(".finish")):
|
|
51
|
+
state_name = data.get("state_name", event_type)
|
|
52
|
+
stmt = select(JobModel).where(and_(JobModel.job_id == job_id, JobModel.cluster == cluster))
|
|
53
|
+
job = session.execute(stmt).scalar_one_or_none()
|
|
54
|
+
if job:
|
|
55
|
+
job.state = state_name
|
|
56
|
+
job.last_updated = time.time()
|
|
57
|
+
if "status" in data:
|
|
58
|
+
job.exit_code = data["status"]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class AsyncSQLAlchemyBackend:
|
|
62
|
+
"""
|
|
63
|
+
Asynchronous backend for the MCP Gateway.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(self, db_url: str):
|
|
67
|
+
self.engine = create_async_engine(db_url, echo=False)
|
|
68
|
+
self.SessionLocal = async_sessionmaker(self.engine, expire_on_commit=False)
|
|
69
|
+
|
|
70
|
+
async def initialize(self):
|
|
71
|
+
async with self.engine.begin() as conn:
|
|
72
|
+
await conn.run_sync(Base.metadata.create_all)
|
|
73
|
+
|
|
74
|
+
async def close(self):
|
|
75
|
+
await self.engine.dispose()
|
|
76
|
+
|
|
77
|
+
async def record_event(self, cluster: str, event: Dict[str, Any]):
|
|
78
|
+
async with self.SessionLocal() as session:
|
|
79
|
+
# run_sync bridges our shared logic into the async session
|
|
80
|
+
await session.run_sync(_record_event_internal, cluster, event)
|
|
81
|
+
await session.commit()
|
|
82
|
+
|
|
83
|
+
async def get_job(self, cluster: str, job_id: int) -> Optional[JobRecord]:
|
|
84
|
+
async with self.SessionLocal() as session:
|
|
85
|
+
result = await session.execute(
|
|
86
|
+
select(JobModel).where(and_(JobModel.job_id == job_id, JobModel.cluster == cluster))
|
|
87
|
+
)
|
|
88
|
+
job = result.scalar_one_or_none()
|
|
89
|
+
return job.to_record() if job else None
|
|
90
|
+
|
|
91
|
+
async def get_event_history(self, cluster: str, job_id: int) -> List[EventRecord]:
|
|
92
|
+
async with self.SessionLocal() as session:
|
|
93
|
+
result = await session.execute(
|
|
94
|
+
select(EventModel)
|
|
95
|
+
.where(and_(EventModel.job_id == job_id, EventModel.cluster == cluster))
|
|
96
|
+
.order_by(EventModel.timestamp.asc())
|
|
97
|
+
)
|
|
98
|
+
return [e.to_record() for e in result.scalars().all()]
|
|
99
|
+
|
|
100
|
+
async def search_jobs(
|
|
101
|
+
self, cluster: str = None, state: str = None, limit: int = 10
|
|
102
|
+
) -> List[JobRecord]:
|
|
103
|
+
async with self.SessionLocal() as session:
|
|
104
|
+
stmt = select(JobModel)
|
|
105
|
+
if cluster:
|
|
106
|
+
stmt = stmt.where(JobModel.cluster == cluster)
|
|
107
|
+
if state:
|
|
108
|
+
stmt = stmt.where(JobModel.state == state)
|
|
109
|
+
result = await session.execute(stmt.limit(limit))
|
|
110
|
+
return [j.to_record() for j in result.scalars().all()]
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class SQLAlchemyBackend:
|
|
114
|
+
"""
|
|
115
|
+
Synchronous backend for the standalone Scribe daemon.
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
def __init__(self, db_url: str):
|
|
119
|
+
# strip 'aiosqlite+' or similar if passed from shared config
|
|
120
|
+
url = db_url.replace("+aiosqlite", "").replace("+asyncpg", "")
|
|
121
|
+
self.engine = create_engine(url, echo=False)
|
|
122
|
+
self.SessionLocal = sessionmaker(bind=self.engine, expire_on_commit=False)
|
|
123
|
+
|
|
124
|
+
def initialize(self):
|
|
125
|
+
Base.metadata.create_all(self.engine)
|
|
126
|
+
|
|
127
|
+
def close(self):
|
|
128
|
+
self.engine.dispose()
|
|
129
|
+
|
|
130
|
+
def record_event(self, cluster: str, event: Dict[str, Any]):
|
|
131
|
+
with self.SessionLocal() as session:
|
|
132
|
+
with session.begin():
|
|
133
|
+
_record_event_internal(session, cluster, event)
|
|
134
|
+
|
|
135
|
+
def get_unwatched_job_ids(self, cluster: str) -> List[int]:
|
|
136
|
+
"""Specific for Scribe: find jobs that need a watcher."""
|
|
137
|
+
with self.SessionLocal() as session:
|
|
138
|
+
stmt = select(JobModel.job_id).where(
|
|
139
|
+
and_(JobModel.cluster == cluster, JobModel.state == "submitted")
|
|
140
|
+
)
|
|
141
|
+
return list(session.execute(stmt).scalars().all())
|
|
142
|
+
|
|
143
|
+
def mark_job_as_watched(self, cluster: str, job_id: int):
|
|
144
|
+
with self.SessionLocal() as session:
|
|
145
|
+
with session.begin():
|
|
146
|
+
session.execute(
|
|
147
|
+
update(JobModel)
|
|
148
|
+
.where(and_(JobModel.job_id == job_id, JobModel.cluster == cluster))
|
|
149
|
+
.values(state="watching")
|
|
150
|
+
)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any, Dict, Optional
|
|
3
|
+
|
|
4
|
+
from sqlalchemy import JSON, Float, Integer, String
|
|
5
|
+
from sqlalchemy.ext.asyncio import AsyncAttrs
|
|
6
|
+
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
|
7
|
+
|
|
8
|
+
# DTOs are "Public Data Transfer Objects" and they are used by
|
|
9
|
+
# our interfaces and tools
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class JobRecord:
|
|
14
|
+
"""
|
|
15
|
+
Represents a snapshot of a job state.
|
|
16
|
+
Returned by get_job() and search_jobs().
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
job_id: int
|
|
20
|
+
cluster: str
|
|
21
|
+
state: str
|
|
22
|
+
user: str
|
|
23
|
+
workdir: Optional[str] = None
|
|
24
|
+
exit_code: Optional[int] = None
|
|
25
|
+
submit_time: float = 0.0
|
|
26
|
+
last_updated: float = 0.0
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class EventRecord:
|
|
31
|
+
"""
|
|
32
|
+
Represents a single historical event.
|
|
33
|
+
Returned by get_event_history().
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
timestamp: float
|
|
37
|
+
event_type: str
|
|
38
|
+
payload: Dict[str, Any]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# Database models for SQLAlchemy ORM
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Base(AsyncAttrs, DeclarativeBase):
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class JobModel(Base):
|
|
49
|
+
__tablename__ = "jobs"
|
|
50
|
+
|
|
51
|
+
# Composite Primary Key
|
|
52
|
+
job_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
53
|
+
cluster: Mapped[str] = mapped_column(String(255), primary_key=True)
|
|
54
|
+
|
|
55
|
+
state: Mapped[str] = mapped_column(String(50))
|
|
56
|
+
user: Mapped[str] = mapped_column(String(255), nullable=True)
|
|
57
|
+
workdir: Mapped[Optional[str]] = mapped_column(String, nullable=True)
|
|
58
|
+
exit_code: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
|
|
59
|
+
submit_time: Mapped[float] = mapped_column(Float, default=0.0)
|
|
60
|
+
last_updated: Mapped[float] = mapped_column(Float, default=0.0)
|
|
61
|
+
|
|
62
|
+
def to_record(self) -> JobRecord:
|
|
63
|
+
"""
|
|
64
|
+
Helper to convert ORM model to public DTO
|
|
65
|
+
"""
|
|
66
|
+
return JobRecord(
|
|
67
|
+
job_id=self.job_id,
|
|
68
|
+
cluster=self.cluster,
|
|
69
|
+
state=self.state,
|
|
70
|
+
user=self.user,
|
|
71
|
+
workdir=self.workdir,
|
|
72
|
+
exit_code=self.exit_code,
|
|
73
|
+
submit_time=self.submit_time,
|
|
74
|
+
last_updated=self.last_updated,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class EventModel(Base):
|
|
79
|
+
__tablename__ = "events"
|
|
80
|
+
|
|
81
|
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
|
82
|
+
job_id: Mapped[int] = mapped_column(Integer, index=True)
|
|
83
|
+
cluster: Mapped[str] = mapped_column(String(255), index=True)
|
|
84
|
+
timestamp: Mapped[float] = mapped_column(Float)
|
|
85
|
+
event_type: Mapped[str] = mapped_column(String(50))
|
|
86
|
+
payload: Mapped[Dict[str, Any]] = mapped_column(JSON)
|
|
87
|
+
|
|
88
|
+
def to_record(self) -> EventRecord:
|
|
89
|
+
"""
|
|
90
|
+
Helper to convert ORM model to public DTO
|
|
91
|
+
"""
|
|
92
|
+
return EventRecord(
|
|
93
|
+
timestamp=self.timestamp, event_type=self.event_type, payload=self.payload
|
|
94
|
+
)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# Template for the Scribe Journal Consumer
|
|
2
|
+
SERVICE_TEMPLATE = """[Unit]
|
|
3
|
+
Description=Flux Scribe Journal Consumer
|
|
4
|
+
After=network.target
|
|
5
|
+
|
|
6
|
+
[Service]
|
|
7
|
+
ExecStart={python_path} -m flux_batch.service.scribe
|
|
8
|
+
Restart=on-failure
|
|
9
|
+
|
|
10
|
+
[Install]
|
|
11
|
+
WantedBy=default.target
|
|
12
|
+
"""
|
flux_batch/version.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
__version__ = "0.0.
|
|
1
|
+
__version__ = "0.0.1"
|
|
2
2
|
AUTHOR = "Vanessa Sochat"
|
|
3
3
|
AUTHOR_EMAIL = "vsoch@users.noreply.github.com"
|
|
4
4
|
NAME = "flux-batch"
|
|
@@ -13,4 +13,5 @@ INSTALL_REQUIRES = (
|
|
|
13
13
|
)
|
|
14
14
|
|
|
15
15
|
TESTS_REQUIRES = (("pytest", {"min_version": "4.6.2"}),)
|
|
16
|
-
|
|
16
|
+
SCRIBE_REQUIRES = (("sqlalchemy", {"min_version": None}), ("rich", {"min_version": None}))
|
|
17
|
+
INSTALL_REQUIRES_ALL = INSTALL_REQUIRES + TESTS_REQUIRES + SCRIBE_REQUIRES
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: flux-batch
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.1
|
|
4
4
|
Summary: Python SDK for flux batch jobs and services
|
|
5
5
|
Home-page: https://github.com/converged-computing/flux-batch
|
|
6
6
|
Author: Vanessa Sochat
|
|
@@ -26,6 +26,11 @@ Provides-Extra: all
|
|
|
26
26
|
Requires-Dist: pyyaml ; extra == 'all'
|
|
27
27
|
Requires-Dist: ply ; extra == 'all'
|
|
28
28
|
Requires-Dist: pytest >=4.6.2 ; extra == 'all'
|
|
29
|
+
Requires-Dist: sqlalchemy ; extra == 'all'
|
|
30
|
+
Requires-Dist: rich ; extra == 'all'
|
|
31
|
+
Provides-Extra: scribe
|
|
32
|
+
Requires-Dist: sqlalchemy ; extra == 'scribe'
|
|
33
|
+
Requires-Dist: rich ; extra == 'scribe'
|
|
29
34
|
|
|
30
35
|
# flux-batch
|
|
31
36
|
|
|
@@ -46,6 +51,7 @@ Requires-Dist: pytest >=4.6.2 ; extra == 'all'
|
|
|
46
51
|
|
|
47
52
|
- **flux-scribe**: Write job events to a local sqlite database via the JournalConsumer (not added yet, written and needs testing)
|
|
48
53
|
|
|
54
|
+
|
|
49
55
|
## Usage
|
|
50
56
|
|
|
51
57
|
This is a small Flux utility that makes it easy to create Flux batch jobs and services.
|
|
@@ -60,9 +66,15 @@ flux start
|
|
|
60
66
|
pip install -e . --break-system-packages
|
|
61
67
|
```
|
|
62
68
|
|
|
63
|
-
###
|
|
69
|
+
### Examples
|
|
70
|
+
|
|
71
|
+
We have a few simple examples:
|
|
72
|
+
|
|
73
|
+
```bash
|
|
74
|
+
python3 ./examples/save_logs.py
|
|
75
|
+
```
|
|
64
76
|
|
|
65
|
-
|
|
77
|
+
Or run the controlled example to see a batch job with prolog and epilog run and complete:
|
|
66
78
|
|
|
67
79
|
```bash
|
|
68
80
|
python3 ./tests/test_flux_batch.py
|
|
@@ -117,7 +129,9 @@ jobspec = flux_batch.BatchJobspecV1.from_jobs(
|
|
|
117
129
|
nodes=1,
|
|
118
130
|
nslots=1,
|
|
119
131
|
time_limit="10m",
|
|
120
|
-
job_name="test-batch"
|
|
132
|
+
job_name="test-batch",
|
|
133
|
+
# Add saving of logs, info, and metadata
|
|
134
|
+
logs_dir="./logs",
|
|
121
135
|
)
|
|
122
136
|
|
|
123
137
|
# Add a prolog and epilog
|
|
@@ -125,7 +139,7 @@ jobspec.add_prolog("echo 'Batch Wrapper Starting'")
|
|
|
125
139
|
jobspec.add_epilog("echo 'Batch Wrapper Finished'")
|
|
126
140
|
|
|
127
141
|
# Add a service (this assumes user level that exists)
|
|
128
|
-
|
|
142
|
+
jobspec.add_service("flux-scribe")
|
|
129
143
|
|
|
130
144
|
# Preview it
|
|
131
145
|
print(flux_batch.submit(handle, jobspec, dry_run=True))
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
flux_batch/__init__.py,sha256=ZAZF-E0SbVVY2U1_WkRGZFB4rge5BGqQZJ2MdjloZhY,230
|
|
2
|
+
flux_batch/jobspec.py,sha256=8eYPPgYmup37AvKQddtQax9RweZnsbPFUBe0eOb7__4,4638
|
|
3
|
+
flux_batch/models.py,sha256=JjrFqi4Skrop_cyIWfgAHTdY53kotkiGD0JpTnVlByI,2200
|
|
4
|
+
flux_batch/submit.py,sha256=TSDg1Dwa5HKhg7Rj9Due8hTDz5__ihyoUdGGKhtVqWo,1795
|
|
5
|
+
flux_batch/version.py,sha256=EjicWR0QF-5SiGDBpWkctAT6keC05IyZrr61fi4OjQ4,642
|
|
6
|
+
flux_batch/logger/__init__.py,sha256=eDdpw_uppR5mPLHE39qT_haqMxu-2wniLlJZDigRC2k,52
|
|
7
|
+
flux_batch/logger/generate.py,sha256=L9JyMY2oapp0ss7f7LGuihbLomzVJsMq7sByy9NhbZI,4017
|
|
8
|
+
flux_batch/logger/logger.py,sha256=HKymVBNcoPdX87QWy69er5wUzHVeriiKp9p0bIYboUo,5927
|
|
9
|
+
flux_batch/script/__init__.py,sha256=ZR-NCvI42RrAJTKzSmV4AcxlL-aoYh7mlQNPxF8Vzok,400
|
|
10
|
+
flux_batch/script/save_logs.sh,sha256=HeapqvL0iR8aX7LtbwlcFTy19j5WxkQ-1M2J9epu-EU,515
|
|
11
|
+
flux_batch/service/__init__.py,sha256=sFJ3r7XEw4vLcmadTxsxQq715bl_tFREOoUS4erimqQ,1116
|
|
12
|
+
flux_batch/service/scribe.py,sha256=dY6geiLvXYIRcIzuP_naZscKgzX4Y5dPzxoWf9Wywg0,253
|
|
13
|
+
flux_batch/service/scribe/__init__.py,sha256=BW4xb-QZvdrtbo73XB26MZozIbvDdnrgLzjB4GRtofU,39
|
|
14
|
+
flux_batch/service/scribe/__main__.py,sha256=3S0dyhkHk-bPT_Z0laNUg-HydrCFql4hPOV2ZNF5rO0,3777
|
|
15
|
+
flux_batch/service/scribe/database.py,sha256=EB8OEMfNvfCplGaz-ZNMsfIpd305eP-mfCvSd-fg_k4,5626
|
|
16
|
+
flux_batch/service/scribe/models.py,sha256=7lUrRosnQ2douFL_xD9GMYex4Z4lkN-CcBeWDhzmD8c,2668
|
|
17
|
+
flux_batch/service/scribe/template.py,sha256=YwmAX5qkMwaj9FztftdgY3DjBaF9PVd7hDZebiD50fA,256
|
|
18
|
+
flux_batch/utils/__init__.py,sha256=CqMhw_mBfR0HBcHwv7LtFITq0J7LBV413VQE9xrz8ks,42
|
|
19
|
+
flux_batch/utils/fileio.py,sha256=Elz8WkNkJ9B6x7WmCwiIBW0GgsRSSFCcbuJh7aqu2z4,4879
|
|
20
|
+
flux_batch/utils/text.py,sha256=Ci1BqHs2IbOSn2o60zhLkT4kIA7CSNuGj8mdiGaDIGk,606
|
|
21
|
+
flux_batch/utils/timer.py,sha256=_Weec7Wd5hWQ1r4ZHjownG4YdoIowpVqilXhvYFmIgA,491
|
|
22
|
+
flux_batch-0.0.1.dist-info/LICENSE,sha256=AlyLB1m_z0CENCx1ob0PedLTTohtH2VLZhs2kfygrfc,1108
|
|
23
|
+
flux_batch-0.0.1.dist-info/METADATA,sha256=G4iEbaFF1cRs12NvjbhWF76uj63MPnG4C6zF2T00e7w,5215
|
|
24
|
+
flux_batch-0.0.1.dist-info/NOTICE,sha256=9CR93geVKl_4ZrJORbXN0fzkEM2y4DglWhY1hn9ZwQw,1167
|
|
25
|
+
flux_batch-0.0.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
26
|
+
flux_batch-0.0.1.dist-info/entry_points.txt,sha256=ynoKpD82xn2V2sD-aZIQoq7NnfOu9VEKqW55Y1AoPGI,67
|
|
27
|
+
flux_batch-0.0.1.dist-info/top_level.txt,sha256=jj8zAsZzMmbjiBISJL7lRtA37MSEAQYfObGLUncn9Lw,11
|
|
28
|
+
flux_batch-0.0.1.dist-info/RECORD,,
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
flux_batch/__init__.py,sha256=ZAZF-E0SbVVY2U1_WkRGZFB4rge5BGqQZJ2MdjloZhY,230
|
|
2
|
-
flux_batch/jobspec.py,sha256=f0C3ba8kR9lS-ej-AH-q3fXd8F_WarB7qCSyIDKhhgk,3891
|
|
3
|
-
flux_batch/models.py,sha256=WACPnAvjEteLOq7Jx2zb8M2gKz5YVhz6V5pKjzRpPXI,1999
|
|
4
|
-
flux_batch/submit.py,sha256=TSDg1Dwa5HKhg7Rj9Due8hTDz5__ihyoUdGGKhtVqWo,1795
|
|
5
|
-
flux_batch/version.py,sha256=DX2m2hMpubLBjIWS8CfP-WZViMaSR2YD6eT8I_Akw8c,533
|
|
6
|
-
flux_batch/logger/__init__.py,sha256=eDdpw_uppR5mPLHE39qT_haqMxu-2wniLlJZDigRC2k,52
|
|
7
|
-
flux_batch/logger/generate.py,sha256=L9JyMY2oapp0ss7f7LGuihbLomzVJsMq7sByy9NhbZI,4017
|
|
8
|
-
flux_batch/logger/logger.py,sha256=HKymVBNcoPdX87QWy69er5wUzHVeriiKp9p0bIYboUo,5927
|
|
9
|
-
flux_batch/service/__init__.py,sha256=8IDdhIZY2B20RuJUcWsuTpwB6fm5BYxtTGot6pmn4Ag,1111
|
|
10
|
-
flux_batch/service/scribe.py,sha256=dY6geiLvXYIRcIzuP_naZscKgzX4Y5dPzxoWf9Wywg0,253
|
|
11
|
-
flux_batch/utils/__init__.py,sha256=CqMhw_mBfR0HBcHwv7LtFITq0J7LBV413VQE9xrz8ks,42
|
|
12
|
-
flux_batch/utils/fileio.py,sha256=Elz8WkNkJ9B6x7WmCwiIBW0GgsRSSFCcbuJh7aqu2z4,4879
|
|
13
|
-
flux_batch/utils/text.py,sha256=Ci1BqHs2IbOSn2o60zhLkT4kIA7CSNuGj8mdiGaDIGk,606
|
|
14
|
-
flux_batch/utils/timer.py,sha256=_Weec7Wd5hWQ1r4ZHjownG4YdoIowpVqilXhvYFmIgA,491
|
|
15
|
-
flux_batch-0.0.0.dist-info/LICENSE,sha256=AlyLB1m_z0CENCx1ob0PedLTTohtH2VLZhs2kfygrfc,1108
|
|
16
|
-
flux_batch-0.0.0.dist-info/METADATA,sha256=JGWbOpKTZIpesJokTG29z9p-x1L-tTajMlQJ7TcjQUQ,4877
|
|
17
|
-
flux_batch-0.0.0.dist-info/NOTICE,sha256=9CR93geVKl_4ZrJORbXN0fzkEM2y4DglWhY1hn9ZwQw,1167
|
|
18
|
-
flux_batch-0.0.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
19
|
-
flux_batch-0.0.0.dist-info/entry_points.txt,sha256=ynoKpD82xn2V2sD-aZIQoq7NnfOu9VEKqW55Y1AoPGI,67
|
|
20
|
-
flux_batch-0.0.0.dist-info/top_level.txt,sha256=jj8zAsZzMmbjiBISJL7lRtA37MSEAQYfObGLUncn9Lw,11
|
|
21
|
-
flux_batch-0.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|