mccode-plumber 0.11.0__py3-none-any.whl → 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mccode_plumber/epics.py +53 -6
- mccode_plumber/file_writer_control/WorkerJobPool.py +1 -1
- mccode_plumber/forwarder.py +10 -5
- mccode_plumber/kafka.py +49 -10
- mccode_plumber/manage/__init__.py +26 -0
- mccode_plumber/manage/efu.py +133 -0
- mccode_plumber/manage/ensure.py +73 -0
- mccode_plumber/manage/epics.py +33 -0
- mccode_plumber/manage/forwarder.py +79 -0
- mccode_plumber/manage/manager.py +113 -0
- mccode_plumber/manage/orchestrate.py +430 -0
- mccode_plumber/manage/writer.py +60 -0
- mccode_plumber/writer.py +64 -29
- {mccode_plumber-0.11.0.dist-info → mccode_plumber-0.12.0.dist-info}/METADATA +3 -2
- {mccode_plumber-0.11.0.dist-info → mccode_plumber-0.12.0.dist-info}/RECORD +18 -10
- {mccode_plumber-0.11.0.dist-info → mccode_plumber-0.12.0.dist-info}/entry_points.txt +3 -0
- {mccode_plumber-0.11.0.dist-info → mccode_plumber-0.12.0.dist-info}/WHEEL +0 -0
- {mccode_plumber-0.11.0.dist-info → mccode_plumber-0.12.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,430 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from mccode_antlr.common import InstrumentParameter
|
|
6
|
+
from mccode_antlr.instr import Instr
|
|
7
|
+
from mccode_plumber.manage import ensure_readable_file, ensure_writable_file, ensure_executable
|
|
8
|
+
from mccode_plumber.manage.efu import EventFormationUnitConfig
|
|
9
|
+
|
|
10
|
+
TOPICS = {
|
|
11
|
+
'parameter': 'SimulatedParameters',
|
|
12
|
+
'event': 'SimulatedEvents',
|
|
13
|
+
'config': 'ForwardConfig',
|
|
14
|
+
'status': 'ForwardStatus',
|
|
15
|
+
'command': 'WriterCommand',
|
|
16
|
+
'pool': 'WriterPool',
|
|
17
|
+
}
|
|
18
|
+
PREFIX = 'mcstas:'
|
|
19
|
+
|
|
20
|
+
def guess_instr_config(name: str):
|
|
21
|
+
guess = f'/event-formation-unit/configs/{name}/configs/{name}.json'
|
|
22
|
+
return ensure_readable_file(Path(guess))
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def guess_instr_calibration(name: str):
|
|
26
|
+
guess = f'/event-formation-unit/configs/{name}/configs/{name}nullcalib.json'
|
|
27
|
+
return ensure_readable_file(Path(guess))
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def guess_instr_efu(name: str):
|
|
31
|
+
guess = name.split('_')[0].split('.')[0].split('-')[0].lower()
|
|
32
|
+
return ensure_executable(Path(guess))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def register_topics(broker: str, topics: list[str]):
|
|
36
|
+
"""Ensure that topics are registered in the Kafka broker."""
|
|
37
|
+
from mccode_plumber.kafka import register_kafka_topics, all_exist
|
|
38
|
+
res = register_kafka_topics(broker, topics)
|
|
39
|
+
if not all_exist(res.values()):
|
|
40
|
+
raise RuntimeError(f'Missing Kafka topics? {res}')
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def augment_structure(
|
|
44
|
+
parameters: tuple[InstrumentParameter,...],
|
|
45
|
+
structure: dict,
|
|
46
|
+
title: str,
|
|
47
|
+
):
|
|
48
|
+
"""Helper to add stream JSON entries for Instr parameters to a NexusStructure
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
parameters : tuple[InstrumentParameter,...]
|
|
53
|
+
Instrument runtime parameters
|
|
54
|
+
structure : dict
|
|
55
|
+
NexusStructure JSON representing the instrument
|
|
56
|
+
title : str
|
|
57
|
+
Informative string about the simulation, to be inserted in structure
|
|
58
|
+
"""
|
|
59
|
+
from mccode_plumber.writer import (
|
|
60
|
+
add_title_to_nexus_structure, add_pvs_to_nexus_structure,
|
|
61
|
+
construct_writer_pv_dicts_from_parameters,
|
|
62
|
+
)
|
|
63
|
+
pvs = construct_writer_pv_dicts_from_parameters(parameters, PREFIX, TOPICS['parameter'])
|
|
64
|
+
data = add_pvs_to_nexus_structure(structure, pvs)
|
|
65
|
+
data = add_title_to_nexus_structure(data, title)
|
|
66
|
+
return data
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def stop_writer(broker, job_id, timeout):
|
|
70
|
+
from time import sleep
|
|
71
|
+
from datetime import timedelta
|
|
72
|
+
from mccode_plumber.file_writer_control import WorkerJobPool
|
|
73
|
+
from mccode_plumber.file_writer_control.JobStatus import JobState
|
|
74
|
+
# The process is now told to switch to a 'control' topic, that is job-specific
|
|
75
|
+
# So we should send the stop-command there. This is the 'command_topic_url'?
|
|
76
|
+
def back_stop(job_topic, command_topic):
|
|
77
|
+
job_topic_url = f"{broker}/{job_topic}"
|
|
78
|
+
command_topic_url = f"{broker}/{command_topic}"
|
|
79
|
+
pool = WorkerJobPool(job_topic_url, command_topic_url)
|
|
80
|
+
sleep(1)
|
|
81
|
+
pool.try_send_stop_now(None, job_id)
|
|
82
|
+
state = pool.get_job_state(job_id)
|
|
83
|
+
give_up = datetime.now() + timedelta(seconds=timeout)
|
|
84
|
+
while state != JobState.DONE and state != JobState.ERROR and state != JobState.TIMEOUT and datetime.now() < give_up:
|
|
85
|
+
sleep(1)
|
|
86
|
+
state = pool.get_job_state(job_id)
|
|
87
|
+
return state
|
|
88
|
+
|
|
89
|
+
jstate = back_stop(TOPICS['pool'], TOPICS['command'])
|
|
90
|
+
if jstate != JobState.DONE:
|
|
91
|
+
print(f'Done trying to stop {job_id} -> {jstate}')
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def start_writer(start_time: datetime,
|
|
95
|
+
structure: dict,
|
|
96
|
+
filename: Path,
|
|
97
|
+
broker: str,
|
|
98
|
+
timeout: float):
|
|
99
|
+
from uuid import uuid1
|
|
100
|
+
from mccode_plumber.writer import writer_start
|
|
101
|
+
job_id = str(uuid1())
|
|
102
|
+
success = False
|
|
103
|
+
name = filename.name
|
|
104
|
+
try:
|
|
105
|
+
print(f"Starting {job_id} from {start_time} for file {name} under kafka-to-nexus' working directory")
|
|
106
|
+
start, handler = writer_start(
|
|
107
|
+
start_time.isoformat(), structure, filename=name,
|
|
108
|
+
stop_time_string=None,
|
|
109
|
+
broker=broker, job_topic=TOPICS['pool'], command_topic=TOPICS['command'],
|
|
110
|
+
control_topic=TOPICS['command'], # don't switch topics
|
|
111
|
+
timeout=timeout, job_id=job_id, wait=False
|
|
112
|
+
)
|
|
113
|
+
# success = start.is_done() # this causes an infinite hang?
|
|
114
|
+
success = True
|
|
115
|
+
except RuntimeError as e:
|
|
116
|
+
if job_id in str(e):
|
|
117
|
+
# starting the job failed, so try to kill it
|
|
118
|
+
print(f"Starting {job_id} failed! Error: {e}")
|
|
119
|
+
stop_writer(broker, job_id, timeout)
|
|
120
|
+
|
|
121
|
+
return job_id, success
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def get_topics_iter(data: list | tuple):
|
|
125
|
+
topics = set()
|
|
126
|
+
for entry in data:
|
|
127
|
+
if isinstance(entry, dict):
|
|
128
|
+
topics.update(get_topics_dict(entry))
|
|
129
|
+
elif isinstance(entry, (list, tuple)):
|
|
130
|
+
topics.update(get_topics_iter(entry))
|
|
131
|
+
return topics
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def get_topics_dict(data: dict):
|
|
135
|
+
topics = set()
|
|
136
|
+
for k, v in data.items():
|
|
137
|
+
if isinstance(v, dict):
|
|
138
|
+
topics.update(get_topics_dict(v))
|
|
139
|
+
elif isinstance(v, (list, tuple)):
|
|
140
|
+
topics.update(get_topics_iter(list(v)))
|
|
141
|
+
elif k == 'topic':
|
|
142
|
+
topics.add(v)
|
|
143
|
+
return topics
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def get_topics_json(data: dict) -> list[str]:
|
|
147
|
+
"""Traverse a loaded JSON object and return the found list of topic names"""
|
|
148
|
+
return list(get_topics_dict(data))
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def load_file_json(file: str | Path):
|
|
152
|
+
from json import load
|
|
153
|
+
file = ensure_readable_file(file)
|
|
154
|
+
with file.open('r') as f:
|
|
155
|
+
return load(f)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def get_instr_name_and_parameters(file: str | Path):
|
|
159
|
+
file = ensure_readable_file(file)
|
|
160
|
+
if file.suffix == '.h5':
|
|
161
|
+
# Shortcut loading the whole Instr:
|
|
162
|
+
import h5py
|
|
163
|
+
from mccode_antlr.io.hdf5 import HDF5IO
|
|
164
|
+
with h5py.File(file, 'r', driver='core', backing_store=False) as f:
|
|
165
|
+
name = f.attrs['name']
|
|
166
|
+
parameters = HDF5IO.load(f['parameters'])
|
|
167
|
+
return name, parameters
|
|
168
|
+
elif file.suffix == '.instr':
|
|
169
|
+
# No shortcuts
|
|
170
|
+
from mccode_antlr.loader import load_mcstas_instr
|
|
171
|
+
instr = load_mcstas_instr(file)
|
|
172
|
+
return instr.name, instr.parameters
|
|
173
|
+
|
|
174
|
+
raise ValueError('Unsupported file extension')
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def efu_parameter(s: str):
|
|
178
|
+
if ':' in s:
|
|
179
|
+
# with any ':' we require fully specified
|
|
180
|
+
# name:{name};binary:{binary};config:{config_path};calibration:{calibration_path};topic:{topic};port:{port}
|
|
181
|
+
# what about spaces? or windows-style paths with C:/...
|
|
182
|
+
return EventFormationUnitConfig.from_cli_str(s)
|
|
183
|
+
# otherwise, allow an abbreviated format utilizing guesses
|
|
184
|
+
# Expected format is now:
|
|
185
|
+
# {efu_binary}[;{calibration/file}[;{config/file}]][;{port}]
|
|
186
|
+
# That is, if you specify --efu, you must give its binary path and should
|
|
187
|
+
# give its port. The calibration/file determines pixel calculations, so is more
|
|
188
|
+
# likely to be needed. Finally, the config file can also be supplied to change, e.g.,
|
|
189
|
+
# number of pixels or rings, etc.
|
|
190
|
+
parts = s.split(';')
|
|
191
|
+
data = {'topic': TOPICS['event'], 'port': 9000, 'binary': ensure_executable(parts[0]),}
|
|
192
|
+
data['name'] = data['binary'].stem
|
|
193
|
+
|
|
194
|
+
if len(parts) > 1 and (len(parts) > 2 or not parts[1].isnumeric()):
|
|
195
|
+
data['calibration'] = parts[1]
|
|
196
|
+
else:
|
|
197
|
+
data['calibration'] = guess_instr_calibration(data['name'])
|
|
198
|
+
if len(parts) > 2 and (len(parts) > 3 or not parts[2].isnumeric()):
|
|
199
|
+
data['config'] = parts[2]
|
|
200
|
+
else:
|
|
201
|
+
data['config'] = guess_instr_config(data['name'])
|
|
202
|
+
if len(parts) > 1 and parts[-1].isnumeric():
|
|
203
|
+
data['port'] = int(parts[-1])
|
|
204
|
+
|
|
205
|
+
return EventFormationUnitConfig.from_dict(data)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def make_services_parser():
|
|
209
|
+
from mccode_plumber import __version__
|
|
210
|
+
from argparse import ArgumentParser
|
|
211
|
+
parser = ArgumentParser('mp-nexus-services')
|
|
212
|
+
a=parser.add_argument
|
|
213
|
+
a('instrument', type=str, help='Instrument .instr or .h5 file')
|
|
214
|
+
a('-v', '--version', action='version', version=__version__)
|
|
215
|
+
# No need to specify the broker, or monitor source or topic names
|
|
216
|
+
a('-b', '--broker', type=str, default=None, help='Kafka broker for all services', metavar='address:port')
|
|
217
|
+
a('--efu', type=efu_parameter, action='append', default=None, help='Configuration of one EFU, repeatable', metavar='name;calibration;config;port')
|
|
218
|
+
a('--writer-working-dir', type=str, default=None, help='Working directory for kafka-to-nexus')
|
|
219
|
+
a('--writer-verbosity', type=str, default=None, help='Verbose output type (trace, debug, warning, error, critical)')
|
|
220
|
+
a('--forwarder-verbosity', type=str, default=None, help='Verbose output type (trace, debug, warning, error, critical)')
|
|
221
|
+
return parser
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def services():
|
|
225
|
+
args = make_services_parser().parse_args()
|
|
226
|
+
instr_name, instr_parameters = get_instr_name_and_parameters(args.instrument)
|
|
227
|
+
kwargs = {
|
|
228
|
+
'instr_name': instr_name,
|
|
229
|
+
'instr_parameters': instr_parameters,
|
|
230
|
+
'broker': args.broker or 'localhost:9092',
|
|
231
|
+
'efu': args.efu,
|
|
232
|
+
'work': args.writer_working_dir,
|
|
233
|
+
'verbosity_writer': args.writer_verbosity,
|
|
234
|
+
'verbosity_forwarder': args.forwarder_verbosity,
|
|
235
|
+
}
|
|
236
|
+
load_in_wait_load_out(**kwargs)
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def load_in_wait_load_out(
|
|
240
|
+
instr_name: str,
|
|
241
|
+
instr_parameters: tuple[InstrumentParameter, ...],
|
|
242
|
+
broker: str,
|
|
243
|
+
efu: list[EventFormationUnitConfig] | None,
|
|
244
|
+
work: str | None = None,
|
|
245
|
+
manage: bool = True,
|
|
246
|
+
verbosity_writer: str | None = None,
|
|
247
|
+
verbosity_forwarder: str | None = None,
|
|
248
|
+
):
|
|
249
|
+
import signal
|
|
250
|
+
from time import sleep
|
|
251
|
+
from colorama import Fore, Back, Style
|
|
252
|
+
from mccode_plumber.manage import (
|
|
253
|
+
EventFormationUnit, EPICSMailbox, Forwarder, KafkaToNexus
|
|
254
|
+
)
|
|
255
|
+
from mccode_plumber.manage.forwarder import forwarder_verbosity
|
|
256
|
+
from mccode_plumber.manage.writer import writer_verbosity
|
|
257
|
+
|
|
258
|
+
# Start up services if they should be managed locally
|
|
259
|
+
if manage:
|
|
260
|
+
if efu is None:
|
|
261
|
+
data = {
|
|
262
|
+
'name': instr_name,
|
|
263
|
+
'binary': guess_instr_efu(instr_name),
|
|
264
|
+
'config': guess_instr_config(name=instr_name),
|
|
265
|
+
'calibration': guess_instr_calibration(name=instr_name),
|
|
266
|
+
'topic': TOPICS['event'],
|
|
267
|
+
'port': 9000
|
|
268
|
+
}
|
|
269
|
+
if any('port' in p.name for p in instr_parameters):
|
|
270
|
+
from mccode_antlr.common.expression import DataType
|
|
271
|
+
port_parameter = next(
|
|
272
|
+
p for p in instr_parameters if 'port' in p.name)
|
|
273
|
+
if port_parameter.value.has_value and port_parameter.value.data_type == DataType.int:
|
|
274
|
+
# the instrument parameter has a default, which is an integer
|
|
275
|
+
data['port'] = port_parameter.value.value
|
|
276
|
+
efu = [EventFormationUnitConfig.from_dict(data)]
|
|
277
|
+
things = tuple(
|
|
278
|
+
EventFormationUnit.start(
|
|
279
|
+
style=Fore.BLUE, broker=broker, **x.to_dict()
|
|
280
|
+
) for x in efu) + (
|
|
281
|
+
Forwarder.start(
|
|
282
|
+
name='FWD',
|
|
283
|
+
style=Fore.GREEN,
|
|
284
|
+
broker=broker,
|
|
285
|
+
config=TOPICS['config'],
|
|
286
|
+
status=TOPICS['status'],
|
|
287
|
+
verbosity=forwarder_verbosity(verbosity_forwarder),
|
|
288
|
+
),
|
|
289
|
+
EPICSMailbox.start(
|
|
290
|
+
name='MBX',
|
|
291
|
+
style=Fore.YELLOW + Back.LIGHTCYAN_EX,
|
|
292
|
+
parameters=instr_parameters,
|
|
293
|
+
prefix=PREFIX,
|
|
294
|
+
),
|
|
295
|
+
KafkaToNexus.start(
|
|
296
|
+
name='K2N',
|
|
297
|
+
style=Fore.RED + Style.DIM,
|
|
298
|
+
broker=broker,
|
|
299
|
+
work=work,
|
|
300
|
+
command=TOPICS['command'],
|
|
301
|
+
pool=TOPICS['pool'],
|
|
302
|
+
verbosity=writer_verbosity(verbosity_writer),
|
|
303
|
+
),
|
|
304
|
+
)
|
|
305
|
+
else:
|
|
306
|
+
things = ()
|
|
307
|
+
|
|
308
|
+
# Ensure stream topics exist
|
|
309
|
+
register_topics(broker, list(TOPICS.values()))
|
|
310
|
+
|
|
311
|
+
def signal_handler(signum, frame):
|
|
312
|
+
if signum == signal.SIGINT:
|
|
313
|
+
print('Done waiting, following SIGINT')
|
|
314
|
+
for service in things:
|
|
315
|
+
service.stop()
|
|
316
|
+
exit(0)
|
|
317
|
+
else:
|
|
318
|
+
print(f'Received signal {signum}, ignoring')
|
|
319
|
+
|
|
320
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
321
|
+
print(
|
|
322
|
+
Fore.YELLOW+Back.LIGHTGREEN_EX+Style.BRIGHT
|
|
323
|
+
+ "\tYou can now run 'mp-nexus-splitrun' in another process"
|
|
324
|
+
+ " (Press CTRL+C to exit)." + Style.RESET_ALL
|
|
325
|
+
)
|
|
326
|
+
# signal.pause()
|
|
327
|
+
while all(service.poll() for service in things):
|
|
328
|
+
# Try to grab and print any updates
|
|
329
|
+
sleep(0.01)
|
|
330
|
+
# If we reach here, one or more service has _already_ stopped
|
|
331
|
+
for service in things:
|
|
332
|
+
if not service.poll():
|
|
333
|
+
print(f'{service.name} exited unexpectedly')
|
|
334
|
+
service.stop()
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def make_splitrun_nexus_parser():
|
|
338
|
+
from mccode_plumber import __version__
|
|
339
|
+
from restage.splitrun import make_splitrun_parser
|
|
340
|
+
parser = make_splitrun_parser()
|
|
341
|
+
parser.prog = 'mp-nexus-splitrun'
|
|
342
|
+
parser.add_argument('-v' ,'--version', action='version', version=__version__)
|
|
343
|
+
# No need to specify the broker, or monitor source or topic names
|
|
344
|
+
parser.add_argument('--structure', type=str, default=None, help='NeXus Structure JSON path')
|
|
345
|
+
parser.add_argument('--structure-out', type=str, default=None, help='Output configured structure JSON path')
|
|
346
|
+
parser.add_argument('--nexus-file', type=str, default=None, help='Output NeXus file path')
|
|
347
|
+
return parser
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def main():
|
|
351
|
+
from mccode_plumber.mccode import get_mcstas_instr
|
|
352
|
+
from restage.splitrun import parse_splitrun
|
|
353
|
+
from mccode_plumber.splitrun import monitors_to_kafka_callback_with_arguments
|
|
354
|
+
args, parameters, precision = parse_splitrun(make_splitrun_nexus_parser())
|
|
355
|
+
instr = get_mcstas_instr(args.instrument[0])
|
|
356
|
+
|
|
357
|
+
structure = load_file_json(args.structure if args.structure else Path(args.instrument[0]).with_suffix('.json'))
|
|
358
|
+
broker = 'localhost:9092'
|
|
359
|
+
monitor_source = 'mccode-to-kafka'
|
|
360
|
+
callback_topics = get_topics_json(structure) # all structure-topics might be monitor topics?
|
|
361
|
+
if len(callback_topics):
|
|
362
|
+
print(f'register {callback_topics}')
|
|
363
|
+
register_topics(broker, callback_topics) # ensure the topics are known to Kafka
|
|
364
|
+
else:
|
|
365
|
+
print('no callback topics registered')
|
|
366
|
+
|
|
367
|
+
callback, callback_args = monitors_to_kafka_callback_with_arguments(broker, monitor_source, callback_topics)
|
|
368
|
+
splitrun_kwargs = {
|
|
369
|
+
'args': args, 'parameters': parameters, 'precision': precision,
|
|
370
|
+
'callback': callback, 'callback_arguments': callback_args,
|
|
371
|
+
}
|
|
372
|
+
kwargs = {
|
|
373
|
+
'nexus_file': args.nexus_file, 'structure_out': args.structure_out
|
|
374
|
+
}
|
|
375
|
+
for k in list(kwargs.keys()) + ['structure']:
|
|
376
|
+
delattr(args, k)
|
|
377
|
+
return orchestrate(instr, structure, broker, splitrun_kwargs, **kwargs)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def orchestrate(
|
|
381
|
+
instr: Instr,
|
|
382
|
+
structure,
|
|
383
|
+
broker: str,
|
|
384
|
+
splitrun_kwargs: dict,
|
|
385
|
+
nexus_file: str | None= None,
|
|
386
|
+
structure_out: str | None = None,
|
|
387
|
+
):
|
|
388
|
+
from datetime import datetime, timezone
|
|
389
|
+
from restage.splitrun import splitrun_args
|
|
390
|
+
from mccode_plumber.forwarder import (
|
|
391
|
+
forwarder_partial_streams, configure_forwarder, reset_forwarder
|
|
392
|
+
)
|
|
393
|
+
now = datetime.now(timezone.utc)
|
|
394
|
+
title = f'{instr.name} simulation {now}: {splitrun_kwargs["args"]}'
|
|
395
|
+
# kafka-to-nexus will strip off the root part of this path and put the remaining
|
|
396
|
+
# location and filename under _its_ working directory.
|
|
397
|
+
# Since it doesn't seem to create missing folders, we need to ensure we only
|
|
398
|
+
# provide the file stem.
|
|
399
|
+
filename = ensure_writable_file(nexus_file or f'{instr.name}_{now:%y%m%dT%H%M%S}.h5')
|
|
400
|
+
|
|
401
|
+
# Tell the forwarder what to forward
|
|
402
|
+
partial_streams = forwarder_partial_streams(PREFIX, TOPICS['parameter'], instr.parameters)
|
|
403
|
+
forwarder_config = f"{broker}/{TOPICS['config']}"
|
|
404
|
+
configure_forwarder(partial_streams, forwarder_config, PREFIX, TOPICS['parameter'])
|
|
405
|
+
|
|
406
|
+
# Create a file-writer job
|
|
407
|
+
structure = augment_structure(instr.parameters, structure, title)
|
|
408
|
+
if structure_out:
|
|
409
|
+
from json import dump
|
|
410
|
+
with open(structure_out, 'w') as f:
|
|
411
|
+
dump(structure, f)
|
|
412
|
+
|
|
413
|
+
job_id, success = start_writer(now, structure, filename, broker, 30.0)
|
|
414
|
+
if success:
|
|
415
|
+
print("Writer job started -- start the simulation")
|
|
416
|
+
# Do the actual simulation, calling into restage.splitrun after parsing,
|
|
417
|
+
# Using the provided callbacks to send monitor data to Kafka
|
|
418
|
+
splitrun_args(instr, **splitrun_kwargs)
|
|
419
|
+
print("Splitrun simulation finished -- informing file-writer to stop")
|
|
420
|
+
# Wait for the file-writer to finish its job (possibly kill it)
|
|
421
|
+
stop_writer(broker, job_id, 20.0)
|
|
422
|
+
# De-register the forwarder topics
|
|
423
|
+
reset_forwarder(partial_streams, forwarder_config, PREFIX, TOPICS['parameter'])
|
|
424
|
+
# Verify that the file has been written?
|
|
425
|
+
# This only works if the filewriter was stared in the same directory :(
|
|
426
|
+
# ensure_readable_file(filename)
|
|
427
|
+
if filename.exists():
|
|
428
|
+
print(f'Finished writing {filename}')
|
|
429
|
+
else:
|
|
430
|
+
print(f'{filename} not found, check file-writer working directory')
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from .manager import Manager
|
|
5
|
+
from .ensure import ensure_writable_directory, ensure_executable
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class KafkaToNexus(Manager):
|
|
10
|
+
"""
|
|
11
|
+
Manage the execution of a kafka-to-nexus file writer
|
|
12
|
+
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
broker: the name or address and port of the broker containing the needed
|
|
16
|
+
command and job topics (localhost:9092)
|
|
17
|
+
work: the working directory for file output (`Path()`)
|
|
18
|
+
command: the topic used for receiving commands (WriterCommand)
|
|
19
|
+
pool: the topic used for receiving jobs as part of a pool (WriterJob)
|
|
20
|
+
verbosity: the level of output to print to STDOUT, any of
|
|
21
|
+
(trace, debug, info, warning, error, critical)
|
|
22
|
+
"""
|
|
23
|
+
broker: str
|
|
24
|
+
command: str
|
|
25
|
+
pool: str
|
|
26
|
+
work: Path | None = None
|
|
27
|
+
verbosity: str | None = None
|
|
28
|
+
_command: Path = field(default_factory=lambda: Path('kafka-to-nexus'))
|
|
29
|
+
|
|
30
|
+
def __post_init__(self):
|
|
31
|
+
from mccode_plumber.kafka import register_kafka_topics, all_exist
|
|
32
|
+
self._command = ensure_executable(self._command)
|
|
33
|
+
self.work = ensure_writable_directory(self.work or Path()).resolve()
|
|
34
|
+
res = register_kafka_topics(self.broker, [self.command, self.pool])
|
|
35
|
+
if not all_exist(res.values()):
|
|
36
|
+
raise RuntimeError(f'Missing Kafka topics? {res}')
|
|
37
|
+
|
|
38
|
+
def __run_command__(self) -> list[str]:
|
|
39
|
+
args = [
|
|
40
|
+
self._command.as_posix(),
|
|
41
|
+
'--brokers', f"{self.broker},{self.broker}",
|
|
42
|
+
'--command-status-topic', self.command,
|
|
43
|
+
'--job-pool-topic', self.pool,
|
|
44
|
+
#'--service-name', 'mpw',
|
|
45
|
+
f'--hdf-output-prefix={self.work}/',
|
|
46
|
+
'--kafka-error-timeout', '10s',
|
|
47
|
+
'--kafka-metadata-max-timeout', '10s',
|
|
48
|
+
'--time-before-start', '10s',
|
|
49
|
+
]
|
|
50
|
+
if (v := writer_verbosity(self.verbosity)) is not None:
|
|
51
|
+
args.extend(['--verbosity', v])
|
|
52
|
+
return args
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def writer_verbosity(v):
|
|
56
|
+
if isinstance(v, str):
|
|
57
|
+
for k in ('critical', 'error', 'warning', 'info', 'debug', 'trace'):
|
|
58
|
+
if k.lower() == v.lower():
|
|
59
|
+
return k
|
|
60
|
+
return None
|
mccode_plumber/writer.py
CHANGED
|
@@ -139,22 +139,23 @@ def insert_events_in_nexus_structure(ns: dict, config: dict):
|
|
|
139
139
|
|
|
140
140
|
def get_writer_pool(broker: str = None, job: str = None, command: str = None):
|
|
141
141
|
from .file_writer_control import WorkerJobPool
|
|
142
|
+
print(f'Create a Writer pool for {broker=} {job=} {command=}')
|
|
142
143
|
pool = WorkerJobPool(f"{broker}/{job}", f"{broker}/{command}")
|
|
143
144
|
return pool
|
|
144
145
|
|
|
145
146
|
|
|
146
147
|
def make_define_nexus_structure():
|
|
147
|
-
from typing import
|
|
148
|
+
from typing import Callable
|
|
148
149
|
from mccode_antlr.instr import Instr
|
|
149
150
|
|
|
150
151
|
def define_nexus_structure(
|
|
151
|
-
instr:
|
|
152
|
+
instr: Path | str,
|
|
152
153
|
pvs: list[dict],
|
|
153
154
|
title: str = None,
|
|
154
155
|
event_stream: dict[str, str] = None,
|
|
155
|
-
file:
|
|
156
|
-
func:
|
|
157
|
-
binary:
|
|
156
|
+
file: Path | None = None,
|
|
157
|
+
func: Callable[[Instr], dict] | None = None,
|
|
158
|
+
binary: Path | None = None,
|
|
158
159
|
origin: str = None):
|
|
159
160
|
import json
|
|
160
161
|
from .mccode import get_mcstas_instr
|
|
@@ -178,13 +179,21 @@ def make_define_nexus_structure():
|
|
|
178
179
|
return define_nexus_structure
|
|
179
180
|
|
|
180
181
|
|
|
181
|
-
def
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
182
|
+
def writer_start(
|
|
183
|
+
start_time_string,
|
|
184
|
+
structure,
|
|
185
|
+
filename,
|
|
186
|
+
stop_time_string,
|
|
187
|
+
broker,
|
|
188
|
+
job_topic,
|
|
189
|
+
command_topic,
|
|
190
|
+
control_topic,
|
|
191
|
+
timeout,
|
|
192
|
+
wait,
|
|
193
|
+
job_id,
|
|
194
|
+
):
|
|
187
195
|
from json import dumps
|
|
196
|
+
from time import sleep
|
|
188
197
|
from datetime import datetime, timedelta
|
|
189
198
|
from .file_writer_control import JobHandler, WriteJob, CommandState
|
|
190
199
|
|
|
@@ -201,33 +210,57 @@ def start_pool_writer(start_time_string, structure, filename=None, stop_time_str
|
|
|
201
210
|
end_time = datetime.now() if wait else None
|
|
202
211
|
if stop_time_string is not None:
|
|
203
212
|
end_time = datetime.fromisoformat(stop_time_string)
|
|
204
|
-
print(f"write file from {start_time} until {end_time}")
|
|
205
213
|
|
|
206
|
-
job = WriteJob(small_string, filename, broker, start_time, end_time,
|
|
214
|
+
job = WriteJob(small_string, filename, broker, start_time, end_time,
|
|
215
|
+
job_id=job_id or "", control_topic=control_topic)
|
|
207
216
|
# start the job
|
|
208
217
|
start = handler.start_job(job)
|
|
218
|
+
# Did the job handler accomplish its job of sending the start message?
|
|
219
|
+
print(f'Writer start {handler.is_done()=} {handler.get_message()=}')
|
|
209
220
|
if timeout is not None:
|
|
210
221
|
try:
|
|
211
222
|
# ensure the start succeeds:
|
|
212
|
-
|
|
223
|
+
give_up_time = datetime.now() + timedelta(seconds=timeout)
|
|
213
224
|
while not start.is_done():
|
|
214
|
-
|
|
225
|
+
state = start.get_state()
|
|
226
|
+
if give_up_time < datetime.now():
|
|
215
227
|
raise RuntimeError(f"Timed out while starting job {job.job_id}")
|
|
216
|
-
elif
|
|
228
|
+
elif state == CommandState.ERROR:
|
|
217
229
|
raise RuntimeError(f"Starting job {job.job_id} failed with message {start.get_message()}")
|
|
218
230
|
sleep(1)
|
|
219
231
|
except RuntimeError as e:
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
232
|
+
raise RuntimeError(f"{e} The message was: {start.get_message()}")
|
|
233
|
+
return start, handler
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def start_pool_writer(
|
|
237
|
+
start_time_string,
|
|
238
|
+
structure,
|
|
239
|
+
filename=None,
|
|
240
|
+
stop_time_string: str | None = None,
|
|
241
|
+
broker: str | None = None,
|
|
242
|
+
job_topic: str | None = None,
|
|
243
|
+
command_topic: str | None = None,
|
|
244
|
+
control_topic: str | None = None,
|
|
245
|
+
wait: bool = False,
|
|
246
|
+
timeout: float | None = None,
|
|
247
|
+
job_id: str | None = None
|
|
248
|
+
):
|
|
249
|
+
from sys import exit
|
|
250
|
+
from os import EX_OK, EX_UNAVAILABLE
|
|
251
|
+
from time import sleep
|
|
223
252
|
|
|
224
|
-
|
|
225
|
-
|
|
253
|
+
try:
|
|
254
|
+
start, handler = writer_start(
|
|
255
|
+
start_time_string, structure, filename, stop_time_string,
|
|
256
|
+
broker, job_topic, command_topic, control_topic, timeout, wait, job_id
|
|
257
|
+
)
|
|
258
|
+
if wait:
|
|
226
259
|
while not handler.is_done():
|
|
227
260
|
sleep(1)
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
261
|
+
except RuntimeError as error:
|
|
262
|
+
print(str(error))
|
|
263
|
+
exit(EX_UNAVAILABLE)
|
|
231
264
|
exit(EX_OK)
|
|
232
265
|
|
|
233
266
|
|
|
@@ -243,6 +276,7 @@ def get_arg_parser():
|
|
|
243
276
|
a('-b', '--broker', type=str, help="The Kafka broker server used by the Writer")
|
|
244
277
|
a('-j', '--job', type=str, help='Writer job topic')
|
|
245
278
|
a('-c', '--command', type=str, help='Writer command topic')
|
|
279
|
+
a('-r', '--control', type=str, help='Active writer job control topic')
|
|
246
280
|
a('--title', type=str, default='scan title for testing', help='Output file title parameter')
|
|
247
281
|
a('--event-source', type=str)
|
|
248
282
|
a('--event-topic', type=str)
|
|
@@ -270,7 +304,7 @@ def parameter_description(inst_param):
|
|
|
270
304
|
return desc
|
|
271
305
|
|
|
272
306
|
|
|
273
|
-
def construct_writer_pv_dicts(instr:
|
|
307
|
+
def construct_writer_pv_dicts(instr: Path | str, prefix: str, topic: str):
|
|
274
308
|
from .mccode import get_mccode_instr_parameters
|
|
275
309
|
parameters = get_mccode_instr_parameters(instr)
|
|
276
310
|
return construct_writer_pv_dicts_from_parameters(parameters, prefix, topic)
|
|
@@ -306,10 +340,11 @@ def print_time():
|
|
|
306
340
|
|
|
307
341
|
|
|
308
342
|
def start_writer():
|
|
309
|
-
|
|
310
|
-
return start_pool_writer(
|
|
311
|
-
|
|
312
|
-
|
|
343
|
+
a, parameters, structure = parse_writer_args()
|
|
344
|
+
return start_pool_writer(
|
|
345
|
+
a.start_time, structure, a.filename, stop_time_string=a.stop_time,
|
|
346
|
+
broker=a.broker, job_topic=a.job, command_topic=a.command,
|
|
347
|
+
control_topic=a.control, wait=a.wait, timeout=a.time_out, job_id=a.job_id)
|
|
313
348
|
|
|
314
349
|
|
|
315
350
|
def wait_on_writer():
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mccode-plumber
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.12.0
|
|
4
4
|
Author-email: Gregory Tucker <gregory.tucker@ess.eu>
|
|
5
5
|
Classifier: License :: OSI Approved :: BSD License
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
7
7
|
Requires-Dist: p4p
|
|
8
8
|
Requires-Dist: kafka-python>=2.2.11
|
|
9
9
|
Requires-Dist: ess-streaming-data-types>=0.14.0
|
|
10
|
-
Requires-Dist: restage>=0.7.
|
|
10
|
+
Requires-Dist: restage>=0.7.2
|
|
11
11
|
Requires-Dist: mccode-to-kafka>=0.2.2
|
|
12
12
|
Requires-Dist: moreniius>=0.4.0
|
|
13
13
|
Requires-Dist: icecream
|
|
14
|
+
Requires-Dist: ephemeral-port-reserve
|
|
14
15
|
|
|
15
16
|
# McCode Plumber
|
|
16
17
|
Setup, run, and teardown the infrastructure for splitrun McCode scans sending data through Kafka into NeXus
|