mccode-plumber 0.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,38 @@
1
+ name: Pip
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ # pull_request:
6
+ # push:
7
+ # branches:
8
+ # - main
9
+
10
+ jobs:
11
+ build:
12
+ name: Build with Pip
13
+ runs-on: ${{ matrix.platform }}
14
+ strategy:
15
+ fail-fast: false
16
+ matrix:
17
+ platform: [windows-latest, macos-latest, ubuntu-latest]
18
+ python-version: ["3.9", "3.10", "3.11", "3.12"]
19
+
20
+ steps:
21
+ - uses: actions/checkout@v4
22
+
23
+ - uses: actions/setup-python@v4
24
+ with:
25
+ python-version: ${{ matrix.python-version }}
26
+
27
+ - name: Set min macOS version
28
+ if: runner.os == 'macOS'
29
+ run: |
30
+ echo "MACOS_DEPLOYMENT_TARGET=10.14" >> $GITHUB_ENV
31
+
32
+ - name: Build and install
33
+ run: pip install --verbose .
34
+
35
+ - name: Test
36
+ run: |
37
+ python -m pip install pytest
38
+ python -m pytest
@@ -0,0 +1,52 @@
1
+ name: Wheels
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ pull_request:
6
+ push:
7
+ branches:
8
+ - main
9
+ release:
10
+ types:
11
+ - published
12
+
13
+ jobs:
14
+ build_sdist_and_wheel:
15
+ name: Build SDist and Wheel
16
+ runs-on: ubuntu-latest
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+ with:
20
+ fetch-depth: 0
21
+ submodules: true
22
+
23
+ - name: Build SDist
24
+ run: pipx run build
25
+
26
+ - name: Check metadata
27
+ run: pipx run twine check dist/*
28
+
29
+ - uses: actions/upload-artifact@v4
30
+ with:
31
+ path: dist/*
32
+
33
+ upload_all:
34
+ name: Upload if release
35
+ needs: [build_sdist_and_wheel]
36
+ runs-on: ubuntu-latest
37
+ environment:
38
+ name: pypi
39
+ url: https://pypi.org/p/mccode-plumber
40
+ permissions:
41
+ id-token: write
42
+ if: github.event_name == 'release' && github.event.action == 'published'
43
+
44
+ steps:
45
+ - uses: actions/setup-python@v5
46
+
47
+ - uses: actions/download-artifact@v4
48
+ with:
49
+ name: artifact
50
+ path: dist
51
+
52
+ - uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,5 @@
1
+ build/
2
+ venv/
3
+ __pycache__/
4
+ .idea/
5
+ *.egg-info
@@ -0,0 +1,15 @@
1
+ Metadata-Version: 2.4
2
+ Name: mccode-plumber
3
+ Version: 0.6.0
4
+ Author-email: Gregory Tucker <gregory.tucker@ess.eu>
5
+ Classifier: License :: OSI Approved :: BSD License
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: p4p
8
+ Requires-Dist: file-writer-control>=1.3.0
9
+ Requires-Dist: restage>=0.4.0
10
+ Requires-Dist: mccode-to-kafka>=0.2.1
11
+ Requires-Dist: moreniius>=0.2.3
12
+ Requires-Dist: icecream
13
+
14
+ # McCode Plumber
15
+ Setup, run, and teardown the infrastructure for splitrun McCode scans sending data through Kafka into NeXus
@@ -0,0 +1,2 @@
1
+ # McCode Plumber
2
+ Setup, run, and teardown the infrastructure for splitrun McCode scans sending data through Kafka into NeXus
@@ -0,0 +1,37 @@
1
+ [build-system]
2
+ requires = ['setuptools>=60', 'setuptools_scm>=8.0']
3
+ build-backend = 'setuptools.build_meta'
4
+
5
+ [project]
6
+ name = 'mccode-plumber'
7
+ dependencies = [
8
+ 'p4p',
9
+ 'file-writer-control>=1.3.0',
10
+ 'restage>=0.4.0',
11
+ 'mccode-to-kafka>=0.2.1',
12
+ 'moreniius>=0.2.3',
13
+ 'icecream',
14
+ ]
15
+ readme = "README.md"
16
+ authors = [
17
+ { name = "Gregory Tucker", email = "gregory.tucker@ess.eu" },
18
+ ]
19
+ classifiers = [
20
+ "License :: OSI Approved :: BSD License",
21
+ ]
22
+ dynamic = ['version']
23
+
24
+ [project.scripts]
25
+ mp-splitrun = 'mccode_plumber.splitrun:main'
26
+ mp-epics = 'mccode_plumber.epics:run'
27
+ mp-epics-update = 'mccode_plumber.epics:update'
28
+ mp-forwarder-setup = 'mccode_plumber.forwarder:setup'
29
+ mp-forwarder-teardown = 'mccode_plumber.forwarder:teardown'
30
+ mp-writer-from = 'mccode_plumber.writer:print_time'
31
+ mp-writer-write = 'mccode_plumber.writer:start_writer'
32
+ mp-writer-wait = 'mccode_plumber.writer:wait_on_writer'
33
+ mp-register-topics = 'mccode_plumber.kafka:register_topics'
34
+ mp-insert-hdf5-instr = 'mccode_plumber.mccode:insert'
35
+
36
+ [tool.setuptools_scm]
37
+
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
File without changes
File without changes
@@ -0,0 +1,123 @@
1
+ #!/usr/bin/env python3
2
+ from p4p.nt import NTScalar
3
+ from p4p.server import Server, StaticProvider
4
+ from p4p.server.thread import SharedPV
5
+ from pathlib import Path
6
+ from typing import Union
7
+
8
+
9
+ def convert_instr_parameters_to_nt(parameters):
10
+ from mccode_antlr.common.expression import DataType, ShapeType
11
+ out = {}
12
+ for p in parameters:
13
+ expr = p.value
14
+ if expr.is_str:
15
+ t, d = 's', ''
16
+ elif expr.data_type == DataType.int:
17
+ t, d = 'i', 0
18
+ elif expr.data_type == DataType.float:
19
+ t, d = 'd', 0.0
20
+ else:
21
+ raise ValueError(f"Unknown parameter type {expr.data_type}")
22
+ if expr.shape_type == ShapeType.vector:
23
+ t, d = 'a' + t, [d]
24
+ out[p.name] = NTScalar(t).wrap(expr.value if expr.has_value else d)
25
+ return out
26
+
27
+
28
+ def parse_instr_nt_values(instr: Union[Path, str]):
29
+ """Get the instrument parameters from an Instr a or a parseable Instr file and convert to NTScalar values"""
30
+ from .mccode import get_mccode_instr_parameters
31
+ nts = convert_instr_parameters_to_nt(get_mccode_instr_parameters(instr))
32
+ if 'mcpl_filename' not in nts:
33
+ nts['mcpl_filename'] = NTScalar('s').wrap('')
34
+ return nts
35
+
36
+
37
+ class MailboxHandler:
38
+ @staticmethod
39
+ def put(pv, op):
40
+ from datetime import datetime, timezone
41
+ val = op.value()
42
+
43
+ if pv.nt is None:
44
+ # Assume that this means wrap wasn't provided ...
45
+ pv.nt = NTScalar(val.type()['value'])
46
+ pv._wrap = pv.nt.wrap
47
+
48
+ # Notify any subscribers of the new value, adding the timestamp, so they know when it was set.
49
+ pv.post(val, timestamp=datetime.now(timezone.utc).timestamp())
50
+ # Notify the client making this PUT operation that it has now completed
51
+ op.done()
52
+
53
+
54
+ def get_parser():
55
+ from argparse import ArgumentParser
56
+ p = ArgumentParser()
57
+ p.add_argument('instr', type=str, help='The instrument file to read')
58
+ p.add_argument('-p', '--prefix', type=str, help='The EPICS PV prefix to use', default='mcstas:')
59
+ return p
60
+
61
+
62
+ def parse_args():
63
+ args = get_parser().parse_args()
64
+ parameters = parse_instr_nt_values(args.instr)
65
+ return parameters, args
66
+
67
+
68
+ def main(names: dict[str, NTScalar], prefix: str = None):
69
+ provider = StaticProvider('mailbox') # 'mailbox' is an arbitrary name
70
+
71
+ pvs = [] # we must keep a reference in order to keep the Handler from being collected
72
+ for name, value in names.items():
73
+ pv = SharedPV(initial=value, handler=MailboxHandler())
74
+ provider.add(f'{prefix}{name}' if prefix else name, pv)
75
+ pvs.append(pv)
76
+
77
+ print(f'Start mailbox server for {len(pvs)} PVs with prefix {prefix}')
78
+ Server.forever(providers=[provider])
79
+ print('Done')
80
+
81
+
82
+ def run():
83
+ parameters, args = parse_args()
84
+ main(parameters, prefix=args.prefix)
85
+
86
+
87
+ def start(parameters, prefix: str = None):
88
+ from multiprocessing import Process
89
+ proc = Process(target=main, args=(parameters, prefix))
90
+ proc.start()
91
+ return proc
92
+
93
+
94
+ def stop(proc):
95
+ proc.terminate()
96
+ proc.join(1)
97
+ proc.close()
98
+
99
+
100
+ def update():
101
+ from argparse import ArgumentParser
102
+ from p4p.client.thread import Context
103
+ parser = ArgumentParser(description="Update the mailbox server with new values")
104
+ parser.add_argument('address', type=str, help='The mailbox address of the value to be updated')
105
+ parser.add_argument('value', type=str, help='The new value to be assigned to the mailbox')
106
+ args = parser.parse_args()
107
+
108
+ ctx = Context('pva')
109
+ pv = ctx.get(args.address, throw=False)
110
+ if isinstance(pv, float):
111
+ ctx.put(args.address, float(args.value))
112
+ elif isinstance(pv, int):
113
+ ctx.put(args.address, int(args.value))
114
+ elif isinstance(pv, str):
115
+ ctx.put(args.address, str(args.value))
116
+ else:
117
+ raise ValueError(f'Unknown type {type(pv)} (this is likely a vector that I can not handle yet?)')
118
+
119
+ ctx.disconnect()
120
+
121
+
122
+ if __name__ == '__main__':
123
+ run()
@@ -0,0 +1,88 @@
1
+ """
2
+ Control a running Forwarder instance to send data to a Kafka broker.
3
+
4
+ Two gateway functions are exposed as system scripts to add and remove an Instr's parameters from the Forwarder's
5
+ list of EPICS PVs to monitor.
6
+
7
+ Alternatively, the same functionality can be accessed from Python using the configure_forwarder and reset_forwarder
8
+ functions. Which take PV information and Forwarder/Kafka configuration as arguments.
9
+ """
10
+
11
+
12
+ def normalise_pvs(pvs: list[dict], config=None, prefix=None, topic=None):
13
+ if config is None:
14
+ config = "localhost:9092/forwarderConfig"
15
+ if prefix is None:
16
+ prefix = 'mcstas:'
17
+ if topic is None:
18
+ topic = 'mcstasParameters'
19
+
20
+ if '/' not in config:
21
+ raise RuntimeError('Expected / to separate broker and topic in Forwarder Kafka configuration specification')
22
+
23
+ cfg_broker, cfg_topic = config.split('/', 1)
24
+
25
+ for pv in pvs:
26
+ if 'source' not in pv:
27
+ pv['source'] = f'{prefix}{pv["name"]}'
28
+ if 'topic' not in pv:
29
+ pv['topic'] = topic
30
+ return cfg_broker, cfg_topic, pvs
31
+
32
+
33
+ def streams(pvs: list[dict]):
34
+ from streaming_data_types.forwarder_config_update_rf5k import StreamInfo, Protocol
35
+ return [StreamInfo(pv['source'], pv['module'], pv['topic'], Protocol.Protocol.PVA) for pv in pvs]
36
+
37
+
38
+ def configure_forwarder(pvs: list[dict], config=None, prefix=None, topic=None):
39
+ from confluent_kafka import Producer
40
+ from streaming_data_types.forwarder_config_update_rf5k import serialise_rf5k, StreamInfo, Protocol
41
+ from streaming_data_types.fbschemas.forwarder_config_update_rf5k.UpdateType import UpdateType
42
+
43
+ cfg_broker, cfg_topic, pvs = normalise_pvs(pvs, config, prefix, topic)
44
+ producer = Producer({"bootstrap.servers": cfg_broker})
45
+ producer.produce(cfg_topic, serialise_rf5k(UpdateType.ADD, streams(pvs)))
46
+ producer.flush()
47
+ return pvs
48
+
49
+
50
+ def reset_forwarder(pvs: list[dict], config=None, prefix=None, topic=None):
51
+ from confluent_kafka import Producer
52
+ from streaming_data_types.forwarder_config_update_rf5k import serialise_rf5k
53
+ from streaming_data_types.fbschemas.forwarder_config_update_rf5k.UpdateType import UpdateType
54
+
55
+ cfg_broker, cfg_topic, pvs = normalise_pvs(pvs, config, prefix, topic)
56
+ producer = Producer({"bootstrap.servers": cfg_broker})
57
+ producer.produce(cfg_topic, serialise_rf5k(UpdateType.REMOVE, streams(pvs)))
58
+ producer.flush()
59
+ return pvs
60
+
61
+
62
+ def parse_registrar_args():
63
+ from argparse import ArgumentParser
64
+ from .mccode import get_mccode_instr_parameters
65
+
66
+ parser = ArgumentParser(description="Discover EPICS PVs and inform a forwarder about them")
67
+ parser.add_argument('-p', '--prefix', type=str, default='mcstas:')
68
+ parser.add_argument('instrument', type=str, help="The mcstas instrument with EPICS PVs")
69
+ parser.add_argument('-c', '--config', type=str, help="The Kafka server and topic for configuring the Forwarder")
70
+ parser.add_argument('-t', '--topic', type=str, help="The Kafka topic to instruct the Forwarder to send data to")
71
+
72
+ args = parser.parse_args()
73
+ parameter_names = [p.name for p in get_mccode_instr_parameters(args.instrument)]
74
+ if 'mcpl_filename' not in parameter_names:
75
+ parameter_names.append('mcpl_filename')
76
+ # the forwarder only cares about: "source", "module", "topic"
77
+ params = [{'source': f'{args.prefix}{name}', 'module': 'f144', 'topic': args.topic} for name in parameter_names]
78
+ return params, args
79
+
80
+
81
+ def setup():
82
+ parameters, args = parse_registrar_args()
83
+ configure_forwarder(parameters, config=args.config, prefix=args.prefix, topic=args.topic)
84
+
85
+
86
+ def teardown():
87
+ parameters, args = parse_registrar_args()
88
+ reset_forwarder(parameters, config=args.config, prefix=args.prefix, topic=args.topic)
@@ -0,0 +1,27 @@
1
+ def parse_kafka_topic_args():
2
+ from argparse import ArgumentParser
3
+ parser = ArgumentParser(description="Prepare the named Kafka broker to host one or more topics")
4
+ parser.add_argument('-b', '--broker', type=str, help='The Kafka broker server to interact with')
5
+ parser.add_argument('topic', nargs="+", type=str, help='The Kafka topic(s) to register')
6
+ parser.add_argument('-q', '--quiet', action='store_true', help='Quiet (positive) failure')
7
+
8
+ args = parser.parse_args()
9
+ return args
10
+
11
+
12
+ def register_topics():
13
+ from confluent_kafka.admin import AdminClient, NewTopic
14
+ args = parse_kafka_topic_args()
15
+
16
+ client = AdminClient({"bootstrap.servers": args.broker})
17
+ topics = [NewTopic(t, num_partitions=1, replication_factor=1) for t in args.topic]
18
+ futures = client.create_topics(topics)
19
+
20
+ for topic, future in futures.items():
21
+ try:
22
+ future.result()
23
+ print(f"Topic {topic} created")
24
+ except Exception as e:
25
+ from confluent_kafka.error import KafkaError
26
+ if not (args.quiet and e.args[0] == KafkaError.TOPIC_ALREADY_EXISTS):
27
+ print(f"Failed to create topic {topic}: {e.args[0].str()}")
@@ -0,0 +1,59 @@
1
+ from pathlib import Path
2
+ from typing import Union
3
+ from mccode_antlr.instr import Instr
4
+ from mccode_antlr.common import InstrumentParameter
5
+
6
+
7
+ def get_mcstas_instr(filename: Union[Path, str]) -> Instr:
8
+ from restage.instr import load_instr
9
+ return load_instr(filename)
10
+
11
+
12
+ def get_mccode_instr_parameters(filename: Union[Path, str]) -> tuple[InstrumentParameter]:
13
+ from mccode_antlr.loader.loader import parse_mccode_instr_parameters
14
+ if not isinstance(filename, Path):
15
+ filename = Path(filename)
16
+ if filename.suffix == '.instr':
17
+ with filename.open('r') as file:
18
+ contents = file.read()
19
+ return parse_mccode_instr_parameters(contents)
20
+ # otherwise:
21
+ return get_mcstas_instr(filename).parameters
22
+
23
+
24
+ def insert_mcstas_hdf5(filename: Union[Path, str], outfile: Union[Path, str], parent: str):
25
+ import h5py
26
+ from mccode_antlr.io.hdf5 import HDF5IO
27
+ if isinstance(filename, str):
28
+ filename = Path(filename)
29
+ with h5py.File(outfile, mode='r+') as dest_file:
30
+ if parent in dest_file:
31
+ raise RuntimeError(f'{outfile} already contains an object named {parent}')
32
+ dest = dest_file.create_group(parent)
33
+ if filename.stem.lower() in ('.h5', '.hdf', '.hdf5', ):
34
+ # copy the file contents if it _is_ a serialized instrument
35
+ with h5py.File(filename, mode='r') as source:
36
+ for obj in source.keys():
37
+ source.copy(obj, dest)
38
+ else:
39
+ instr = get_mcstas_instr(filename)
40
+ HDF5IO.save(dest, instr)
41
+
42
+
43
+ def get_arg_parser():
44
+ from argparse import ArgumentParser
45
+ from .utils import is_readable, is_appendable
46
+ parser = ArgumentParser(description="Copy a Instr HDF5 representation to a NeXus HDF5 file")
47
+ a = parser.add_argument
48
+ a('instrument', type=is_readable, default=None, help="The mcstas instrument file")
49
+ a('-p', '--parent', type=str, default='mcstas')
50
+ a('-o', '--outfile', type=is_appendable, default=None, help='Base NeXus structure, will be extended')
51
+ return parser
52
+
53
+
54
+ def insert():
55
+ parser = get_arg_parser()
56
+ args = parser.parse_args()
57
+ insert_mcstas_hdf5(args.instrument, args.outfile, args.parent)
58
+
59
+
@@ -0,0 +1,28 @@
1
+ def make_parser():
2
+ from restage.splitrun import make_splitrun_parser
3
+ parser = make_splitrun_parser()
4
+ parser.prog = 'mp-splitrun'
5
+ parser.add_argument('--broker', type=str, help='The Kafka broker to send monitors to', default=None)
6
+ parser.add_argument('--source', type=str, help='The Kafka source name to use for monitors', default=None)
7
+ return parser
8
+
9
+
10
+ def monitors_to_kafka_callback_with_arguments(broker: str, source: str):
11
+ from functools import partial
12
+ from mccode_to_kafka.sender import send_histograms
13
+
14
+ def callback(*args, **kwargs):
15
+ print(f'monitors to kafka callback called with {args} and {kwargs}')
16
+ return send_histograms(*args, broker=broker, source=source, **kwargs)
17
+
18
+ # return partial(send_histograms, broker=broker, source=source), {'dir': 'root'}
19
+ return callback, {'dir': 'root'}
20
+
21
+
22
+ def main():
23
+ from .mccode import get_mcstas_instr
24
+ from restage.splitrun import splitrun_args, parse_splitrun
25
+ args, parameters, precision = parse_splitrun(make_parser())
26
+ instr = get_mcstas_instr(args.instrument[0])
27
+ callback, callback_args = monitors_to_kafka_callback_with_arguments(args.broker, args.source)
28
+ return splitrun_args(instr, parameters, precision, args, callback=callback, callback_arguments=callback_args)
@@ -0,0 +1,68 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+
6
+ def is_accessible(access_type):
7
+ def checker(name: str | None | Path):
8
+ if name is None or name == '':
9
+ return None
10
+ from os import access
11
+ if not isinstance(name, Path):
12
+ name = Path(name).resolve()
13
+ if not name.exists():
14
+ raise RuntimeError(f'The specified filename {name} does not exist')
15
+ if not access(name, access_type):
16
+ raise RuntimeError(f'The specified filename {name} is not {access_type}')
17
+ return name
18
+
19
+ return checker
20
+
21
+
22
+ def is_readable(value: str | None | Path):
23
+ from os import R_OK as READABLE
24
+ return is_accessible(READABLE)(value)
25
+
26
+
27
+ def is_writable(value: str | None | Path):
28
+ """Determine if a provided path represents an existing writable file"""
29
+ from os import W_OK
30
+ if value is None or value == '':
31
+ return None
32
+ if not isinstance(value, Path):
33
+ value = Path(value).resolve()
34
+ # Typically we can create a new file if the containing folder is writable
35
+ if not value.exists():
36
+ return value if is_accessible(W_OK)(value.parent) else None
37
+ # And if the file exists we should check if we can overwrite it
38
+ return is_accessible(W_OK)(value)
39
+
40
+
41
+ def is_creatable(value: str | None | Path):
42
+ """Determine if a provided path represents a file that can be created"""
43
+ if value is None or value == '':
44
+ return None
45
+ if not isinstance(value, Path):
46
+ value = Path(value).resolve()
47
+ if value.exists():
48
+ raise RuntimeError(f"The specified filename {value} already exists!")
49
+ return value if is_writable(value.parent) else None
50
+
51
+
52
+ def is_appendable(value: str | None | Path):
53
+ from os import W_OK, R_OK
54
+ return is_accessible(R_OK | W_OK)(value)
55
+
56
+
57
+ def is_executable(value: str | None | Path):
58
+ from os import X_OK as EXECUTABLE
59
+ return is_accessible(EXECUTABLE)(value)
60
+
61
+
62
+ def is_callable(name: str | None):
63
+ if name is None:
64
+ return None
65
+ from importlib import import_module
66
+ module_name, func_name = name.split(':')
67
+ module = import_module(module_name)
68
+ return getattr(module, func_name)
@@ -0,0 +1,336 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Union, Callable
5
+ from mccode_antlr.instr import Instr
6
+
7
+
8
+ def _is_group(x, group):
9
+ """Is a (dict) object a (NeXus) group with the specified name?"""
10
+ return 'name' in x and 'type' in x and x['type'] == 'group' and x['name'] == group
11
+
12
+
13
+ def _is_stream(x, name):
14
+ """Is a (dict) object a stream with the specified name?"""
15
+ return 'module' in x and x['module'] == name
16
+
17
+
18
+ def _make_group(name: str, nx_class: str):
19
+ """Make a (NeXus) group dict with the specified name and class
20
+ A group always has a name, a type, a list of children, and a list of dictionaries as attributes.
21
+ """
22
+ return dict(name=name, type='group',
23
+ attributes=[dict(name='NX_class', dtype='string', values=nx_class)],
24
+ children=[])
25
+
26
+
27
+ def _get_or_add_group(children: list, name: str, nx_class: str):
28
+ """Get or add a group with the specified name and class to a list of children"""
29
+ g = [x for x in children if _is_group(x, name)]
30
+ if len(g):
31
+ return g[0], children
32
+ children.append(_make_group(name, nx_class))
33
+ return children[-1], children
34
+
35
+
36
+ def _get_or_add_stream(children: list, name: str, stream_config: dict):
37
+ """Get or add a stream with the specified name and config to a list of children"""
38
+ m = [x for x in children if _is_stream(x, name)]
39
+ if len(m):
40
+ # check that the stream-config is right?
41
+ return m[0], children
42
+ children.append(dict(module=name, config=stream_config))
43
+ return children[-1], children
44
+
45
+
46
+ # def a_log(ch: dict):
47
+ # """Unused, temporarily kept for debugging. May have been correct at some point, wrong now."""
48
+ # attrs = dict(name='NX_class', type='string', values='NXlog')
49
+ # units = dict(name='units', type='string', values=ch.get('units', 'dimensionless'))
50
+ # log_child = dict(module='f144', source=ch['source'], topic=ch['topic'], type=ch['dtype'], attributes=[units])
51
+ # # log_child = dict(module='f144', source=ch['source'], topic=ch['topic'], dtype=ch['dtype'], attributes=[units])
52
+ # desc_child = dict(module='dataset', config=dict(name='description', values=ch['description'], type='string'))
53
+ # return dict(name=ch['name'], type='group', attributes=[attrs], children=[log_child, desc_child])
54
+
55
+
56
+ def a_log_as_of_20230626(ch: dict):
57
+ """Correct form as of June 26, 2023. Notably, source, topic, type, and unit go in a config field.
58
+
59
+ The ch dict must have the following keys:
60
+ - name: the name of the logged value
61
+ - dtype: the data type of the logged value
62
+ - source: the Kafka source of the logged value
63
+ - topic: the Kafka topic of the logged value
64
+ - description: a description of the logged value
65
+ - module: the flatbuffer module to use to log the value, e.g., 'f144'
66
+ - unit: the unit of the logged value, e.g., 'dimensionless'
67
+
68
+ The returned structure is:
69
+ {name: <name>, type: 'group', attributes: [{name: 'NX_class', type: 'string', values: 'NXlog'}],
70
+ children: [
71
+ {module: <module>, config: {type: <dtype>, source: <source>, topic: <topic>, unit: <unit>}},
72
+ {module: 'dataset', config: {name: 'description', type: 'string', values: <description>}}
73
+ ]
74
+ }
75
+ """
76
+ c = dict(type=ch['dtype'], topic=ch['topic'], source=ch['source'], unit=ch.get('unit', 'dimensionless'))
77
+ # Use f144 for most things, or f143 for more general objects -- like strings
78
+ log_child = dict(module=ch['module'], config=c)
79
+ attrs = dict(name='NX_class', type='string', values='NXlog')
80
+ desc_child = dict(module='dataset', config=dict(name='description', values=ch['description'], type='string'))
81
+ return dict(name=ch['name'], type='group', attributes=[attrs], children=[log_child, desc_child])
82
+
83
+
84
+ def default_nexus_structure(instr, origin: str | None = None):
85
+ from zenlog import log
86
+ import moreniius.additions # patches the Instance class to have more translation methods
87
+ from moreniius import MorEniius
88
+ log.info('Creating NeXus structure from instrument'
89
+ ' -- no custom Instance to NeXus mapping is used'
90
+ ' -- provide a JSON object, a python module and function name, or executable to use a custom mapping')
91
+ return MorEniius.from_mccode(instr, origin=origin, only_nx=False, absolute_depends_on=True).to_nexus_structure()
92
+
93
+
94
+ def add_pvs_to_nexus_structure(ns: dict, pvs: list[dict]):
95
+ if 'children' not in ns:
96
+ raise RuntimeError('Top-level NeXus structure dict with toplevel list entry named "children" expected.')
97
+ entry, ns['children'] = _get_or_add_group(ns['children'], 'entry', 'NXentry')
98
+ # # NXlogs isn't a NeXus base class ...
99
+ # logs, entry['children'] = _get_or_add_group(entry['children'], 'logs', 'NXlogs')
100
+ # So dump everything directly into 'children'
101
+ # but 'NXparameters' _does_ exist:
102
+ parameters, entry['children'] = _get_or_add_group(entry['children'], 'parameters', 'NXparameters')
103
+ for pv in pvs:
104
+ if any(x not in pv for x in ['name', 'dtype', 'source', 'topic', 'description', 'module', 'unit']):
105
+ raise RuntimeError(f"PV {pv['name']} is missing one or more required keys")
106
+ parameters['children'].append(a_log_as_of_20230626(pv))
107
+ return ns
108
+
109
+
110
+ def add_title_to_nexus_structure(ns: dict, title: str):
111
+ if 'children' not in ns:
112
+ raise RuntimeError('Top-level NeXus structure dict with toplevel list entry named "children" expected.')
113
+ entry, ns['children'] = _get_or_add_group(ns['children'], 'entry', 'NXentry')
114
+ entry['children'].append(dict(module='dataset', config=dict(name='title', values=title, type='string')))
115
+ return ns
116
+
117
+
118
+ def insert_events_in_nexus_structure(ns: dict, config: dict):
119
+ if 'children' not in ns:
120
+ raise RuntimeError('Top-level NeXus structure dict with toplevel list entry named "children" expected.')
121
+ entry, ns['children'] = _get_or_add_group(ns['children'], 'entry', 'NXentry')
122
+
123
+ # check whether 'instrument' is already a group under 'entry', and add it if not
124
+ instr, entry['children'] = _get_or_add_group(entry['children'], 'instrument', 'NXinstrument')
125
+
126
+ # check whether 'detector' is a group under '/entry/instrument', and add it if not
127
+ detector, instr['children'] = _get_or_add_group(instr['children'], 'detector', 'NXdetector')
128
+ # ... TODO fill in all of the required detector elements :(
129
+
130
+ # check whether 'events' is a group under `/entry/instrument/detector`
131
+ events, detector['children'] = _get_or_add_group(detector['children'], 'events', 'NXevent_data')
132
+
133
+ # Ensure that the events group has the correct stream-specification child
134
+ # {'module': 'ev44', 'config': {'source': 'source', 'topic': 'topic'}}
135
+ stream, events['children'] = _get_or_add_stream(events['children'], 'ev44', config)
136
+
137
+ return ns
138
+
139
+
140
+ def get_writer_pool(broker: str = None, job: str = None, command: str = None):
141
+ from file_writer_control import WorkerJobPool
142
+ pool = WorkerJobPool(f"{broker}/{job}", f"{broker}/{command}")
143
+ return pool
144
+
145
+
146
+ def define_nexus_structure(instr: Union[Path, str], pvs: list[dict], title: str = None, event_stream: dict[str, str] = None,
147
+ file: Union[Path, None] = None, func: Union[Callable[[Instr], dict], None] = None,
148
+ binary: Union[Path, None] = None, origin: str = None):
149
+ import json
150
+ from .mccode import get_mcstas_instr
151
+ if file is not None and file.exists():
152
+ with open(file, 'r') as file:
153
+ nexus_structure = json.load(file)
154
+ elif func is not None:
155
+ nexus_structure = func(get_mcstas_instr(instr))
156
+ elif binary is not None and binary.exists():
157
+ from subprocess import run, PIPE
158
+ result = run([binary, str(instr)], stdout=PIPE, stderr=PIPE)
159
+ if result.returncode != 0:
160
+ raise RuntimeError(f"Failed to execute {binary} {instr} due to error {result.stderr.decode()}")
161
+ nexus_structure = json.loads(result.stdout.decode())
162
+ else:
163
+ nexus_structure = default_nexus_structure(get_mcstas_instr(instr), origin=origin)
164
+ nexus_structure = add_pvs_to_nexus_structure(nexus_structure, pvs)
165
+ nexus_structure = add_title_to_nexus_structure(nexus_structure, title)
166
+ # nexus_structure = insert_events_in_nexus_structure(nexus_structure, event_stream)
167
+ return nexus_structure
168
+
169
+
170
+ def start_pool_writer(start_time_string, structure, filename=None, stop_time_string: str | None = None,
171
+ broker: str | None = None, job_topic: str | None = None, command_topic: str | None = None,
172
+ wait: bool = False, timeout: float | None = None, job_id: str | None = None):
173
+ from sys import exit
174
+ from os import EX_OK, EX_UNAVAILABLE
175
+ from time import sleep
176
+ from json import dumps
177
+ from datetime import datetime, timedelta
178
+ from file_writer_control import JobHandler, WriteJob, CommandState
179
+
180
+ start_time = datetime.fromisoformat(start_time_string)
181
+ if filename is None:
182
+ filename = f'{start_time:%Y%m%d_%H%M%S}.nxs'
183
+
184
+ pool = get_writer_pool(broker=broker, job=job_topic, command=command_topic)
185
+ handler_opts = {'worker_finder': pool}
186
+
187
+ handler = JobHandler(**handler_opts)
188
+ small_string = dumps(structure, indent=None, separators=(',', ':'))
189
+
190
+ end_time = datetime.now() if wait else None
191
+ if stop_time_string is not None:
192
+ end_time = datetime.fromisoformat(stop_time_string)
193
+ print(f"write file from {start_time} until {end_time}")
194
+
195
+ job = WriteJob(small_string, filename, broker, start_time, end_time, job_id=job_id or "")
196
+ # start the job
197
+ start = handler.start_job(job)
198
+ if timeout is not None:
199
+ try:
200
+ # ensure the start succeeds:
201
+ zero_time = datetime.now()
202
+ while not start.is_done():
203
+ if zero_time + timedelta(seconds=timeout) < datetime.now():
204
+ raise RuntimeError(f"Timed out while starting job {job.job_id}")
205
+ elif start.get_state() == CommandState.ERROR:
206
+ raise RuntimeError(f"Starting job {job.job_id} failed with message {start.get_message()}")
207
+ sleep(1)
208
+ except RuntimeError as e:
209
+ # raise RuntimeError(e.__str__() + f" The message was: {start.get_message()}")
210
+ print(f"{e} The message was: {start.get_message()}")
211
+ exit(EX_UNAVAILABLE)
212
+
213
+ if wait:
214
+ try:
215
+ while not handler.is_done():
216
+ sleep(1)
217
+ except RuntimeError as error:
218
+ print(str(error) + f'Writer failed, producing message:\n{handler.get_message}')
219
+ exit(EX_UNAVAILABLE)
220
+ exit(EX_OK)
221
+
222
+
223
+ def get_arg_parser():
224
+ from argparse import ArgumentParser
225
+ from .utils import is_callable, is_readable, is_executable, is_writable
226
+ parser = ArgumentParser(description="Control writing Kafka stream(s) to a NeXus file")
227
+ a = parser.add_argument
228
+ a('instrument', type=str, default=None, help="The mcstas instrument with EPICS PVs")
229
+ a('-p', '--prefix', type=str, default='mcstas:')
230
+ a('-t', '--topic', type=str, help="The Kafka broker topic to instruct the Forwarder to use")
231
+ a('-b', '--broker', type=str, help="The Kafka broker server used by the Writer")
232
+ a('-j', '--job', type=str, help='Writer job topic')
233
+ a('-c', '--command', type=str, help='Writer command topic')
234
+ a('--title', type=str, default='scan title for testing', help='Output file title parameter')
235
+ a('--event-source', type=str)
236
+ a('--event-topic', type=str)
237
+ a('-f', '--filename', type=str, default=None)
238
+ a('--ns-func', type=is_callable, default=None, help='Python module:function to produce NeXus structure')
239
+ a('--ns-file', type=is_readable, default=None, help='Base NeXus structure, will be extended')
240
+ a('--ns-exec', type=is_executable, default=None, help='Executable to produce NeXus structure')
241
+ a('--ns-save', type=is_writable, default=None, help='Path at which to save (overwrite) extended NeXus structure')
242
+ a('--start-time', type=str)
243
+ a('--stop-time', type=str, default=None)
244
+ a('--origin', type=str, default=None, help='component name used for the origin of the NeXus file')
245
+ a('--wait', action='store_true', help='If provided, wait for the writer to finish before exiting')
246
+ a('--time-out', type=float, default=120., help='Wait up to the timeout for writing to start')
247
+ a('--job-id', type=str, default=None, help='Unique Job identifier for this write-job')
248
+
249
+ return parser
250
+
251
+
252
+ def parameter_description(inst_param):
253
+ desc = f"{inst_param.value.data_type} valued McStas parameter '{inst_param.name}', "
254
+ desc += f"default: {inst_param.value}" if inst_param.value.has_value else "no default"
255
+ if inst_param.unit is not None:
256
+ desc += f" and expected units of {inst_param.unit}"
257
+ return desc
258
+
259
+
260
+ def construct_writer_pv_dicts(instr: Union[Path, str], prefix: str, topic: str):
261
+ from .mccode import get_mccode_instr_parameters
262
+ parameters = get_mccode_instr_parameters(instr)
263
+ return construct_writer_pv_dicts_from_parameters(parameters, prefix, topic)
264
+
265
+
266
+ def construct_writer_pv_dicts_from_parameters(parameters, prefix: str, topic: str):
267
+ def strip_quotes(s):
268
+ return s[1:-1] if s is not None and len(s) > 2 and (s[0] == s[-1] == '"' or s[0] == s[-1] == "'") else s
269
+ return [dict(name=p.name, dtype=p.value.data_type.name, source=f'{prefix}{p.name}', topic=topic,
270
+ description=parameter_description(p), module='f144', unit=strip_quotes(p.unit)) for p in parameters]
271
+
272
+
273
+ def parse_writer_args():
274
+ args = get_arg_parser().parse_args()
275
+ params = construct_writer_pv_dicts(args.instrument, args.prefix, args.topic)
276
+ structure = define_nexus_structure(args.instrument, params, title=args.title, origin=args.origin,
277
+ file=args.ns_file, func=args.ns_func, binary=args.ns_exec,
278
+ event_stream={'source': args.event_source, 'topic': args.event_topic})
279
+ if args.ns_save is not None:
280
+ from json import dump
281
+ with open(args.ns_save, 'w') as file:
282
+ dump(structure, file, indent=2)
283
+
284
+ return args, params, structure
285
+
286
+
287
+ def print_time():
288
+ from datetime import datetime
289
+ print(datetime.now())
290
+
291
+
292
+ def start_writer():
293
+ args, parameters, structure = parse_writer_args()
294
+ return start_pool_writer(args.start_time, structure, args.filename, stop_time_string=args.stop_time,
295
+ broker=args.broker, job_topic=args.job, command_topic=args.command,
296
+ wait=args.wait, timeout=args.time_out, job_id=args.job_id)
297
+
298
+
299
+ def wait_on_writer():
300
+ from sys import exit
301
+ from os import EX_OK, EX_UNAVAILABLE
302
+ from time import sleep
303
+ from datetime import datetime, timedelta
304
+ from file_writer_control import JobHandler, CommandState
305
+
306
+ from argparse import ArgumentParser
307
+ parser = ArgumentParser()
308
+ a = parser.add_argument
309
+ a('-b', '--broker', type=str, help="The Kafka broker server used by the Writer")
310
+ a('-j', '--job', type=str, help='Writer job topic')
311
+ a('-c', '--command', type=str, help='Writer command topic')
312
+ a('id', type=str, help='Job id to wait on')
313
+ a('-s', '--stop-after', type=float, help='Stop after time, seconds', default=1)
314
+ a('-t', '--time-out', type=float, help='Time out after, seconds', default=24*60*60*30)
315
+ args = parser.parse_args()
316
+
317
+ pool = get_writer_pool(broker=args.broker, job=args.job, command=args.command)
318
+ job = JobHandler(worker_finder=pool, job_id=args.id)
319
+ stop_time = datetime.now() + timedelta(seconds=args.stop_after)
320
+ stop = job.set_stop_time(stop_time)
321
+
322
+ try:
323
+ timeout = args.time_out
324
+ zero_time = datetime.now()
325
+ while not stop.is_done() and not job.is_done():
326
+ if zero_time + timedelta(seconds=timeout) < datetime.now():
327
+ print('1')
328
+ raise RuntimeError(f"Timed out while stopping job {job.job_id}")
329
+ elif stop.get_state() == CommandState.ERROR:
330
+ print('2')
331
+ raise RuntimeError(f"Stopping job {job.job_id} failed with message {stop.get_message()}")
332
+ sleep(0.5)
333
+ except RuntimeError as e:
334
+ # raise RuntimeError(e.__str__() + f" The message was: {stop.get_message()}")
335
+ exit(EX_UNAVAILABLE)
336
+ exit(EX_OK)
@@ -0,0 +1,15 @@
1
+ Metadata-Version: 2.4
2
+ Name: mccode-plumber
3
+ Version: 0.6.0
4
+ Author-email: Gregory Tucker <gregory.tucker@ess.eu>
5
+ Classifier: License :: OSI Approved :: BSD License
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: p4p
8
+ Requires-Dist: file-writer-control>=1.3.0
9
+ Requires-Dist: restage>=0.4.0
10
+ Requires-Dist: mccode-to-kafka>=0.2.1
11
+ Requires-Dist: moreniius>=0.2.3
12
+ Requires-Dist: icecream
13
+
14
+ # McCode Plumber
15
+ Setup, run, and teardown the infrastructure for splitrun McCode scans sending data through Kafka into NeXus
@@ -0,0 +1,23 @@
1
+ .gitignore
2
+ README.md
3
+ pyproject.toml
4
+ .github/workflows/pip.yml
5
+ .github/workflows/wheels.yml
6
+ src/mccode_plumber/__init__.py
7
+ src/mccode_plumber/conductor.py
8
+ src/mccode_plumber/epics.py
9
+ src/mccode_plumber/forwarder.py
10
+ src/mccode_plumber/kafka.py
11
+ src/mccode_plumber/mccode.py
12
+ src/mccode_plumber/splitrun.py
13
+ src/mccode_plumber/utils.py
14
+ src/mccode_plumber/writer.py
15
+ src/mccode_plumber.egg-info/PKG-INFO
16
+ src/mccode_plumber.egg-info/SOURCES.txt
17
+ src/mccode_plumber.egg-info/dependency_links.txt
18
+ src/mccode_plumber.egg-info/entry_points.txt
19
+ src/mccode_plumber.egg-info/requires.txt
20
+ src/mccode_plumber.egg-info/top_level.txt
21
+ tests/test_epics.py
22
+ tests/test_splitrun.py
23
+ tests/test_writer.py
@@ -0,0 +1,11 @@
1
+ [console_scripts]
2
+ mp-epics = mccode_plumber.epics:run
3
+ mp-epics-update = mccode_plumber.epics:update
4
+ mp-forwarder-setup = mccode_plumber.forwarder:setup
5
+ mp-forwarder-teardown = mccode_plumber.forwarder:teardown
6
+ mp-insert-hdf5-instr = mccode_plumber.mccode:insert
7
+ mp-register-topics = mccode_plumber.kafka:register_topics
8
+ mp-splitrun = mccode_plumber.splitrun:main
9
+ mp-writer-from = mccode_plumber.writer:print_time
10
+ mp-writer-wait = mccode_plumber.writer:wait_on_writer
11
+ mp-writer-write = mccode_plumber.writer:start_writer
@@ -0,0 +1,6 @@
1
+ p4p
2
+ file-writer-control>=1.3.0
3
+ restage>=0.4.0
4
+ mccode-to-kafka>=0.2.1
5
+ moreniius>=0.2.3
6
+ icecream
@@ -0,0 +1 @@
1
+ mccode_plumber
@@ -0,0 +1,48 @@
1
+ import unittest
2
+
3
+
4
+ class EPICSTestCase(unittest.TestCase):
5
+ def setUp(self):
6
+ from uuid import uuid4
7
+ from multiprocessing import Process
8
+ from mccode_plumber.epics import main, convert_instr_parameters_to_nt
9
+ from mccode_antlr.loader.loader import parse_mccode_instr_parameters, parse_mcstas_instr
10
+ instr = 'define instrument blah(par1, double par2, int par3=1, string par4="string", double par5=5.5) trace end'
11
+ self.pars = parse_mccode_instr_parameters(instr)
12
+ self.pvs = convert_instr_parameters_to_nt(parse_mcstas_instr(instr).parameters)
13
+ self.prefix = f"test{str(uuid4()).replace('-', '')}:"
14
+ self.proc = Process(target=main, args=(self.pvs, self.prefix))
15
+ self.proc.start()
16
+
17
+ def tearDown(self):
18
+ self.proc.terminate()
19
+ self.proc.join(1)
20
+ self.proc.close()
21
+
22
+ def test_server_runs(self):
23
+ from p4p.client.thread import Context
24
+ providers = Context.providers()
25
+ self.assertTrue('pva' in providers)
26
+ ctx = Context('pva')
27
+
28
+ for par in self.pars:
29
+ pv = ctx.get(f"{self.prefix}{par.name}")
30
+ self.assertTrue(pv is not None)
31
+ if par.value.has_value:
32
+ self.assertEqual(pv, par.value.value)
33
+
34
+ def test_update_pvs(self):
35
+ from p4p.client.thread import Context
36
+ ctx = Context('pva')
37
+ values = {'par1': 1.1, 'par2': 2.2, 'par3': 3, 'par4': 'four', 'par5': 55.555}
38
+ for name, value in values.items():
39
+ ctx.put(f"{self.prefix}{name}", value)
40
+
41
+ for name, value in values.items():
42
+ pv = ctx.get(f"{self.prefix}{name}")
43
+ self.assertTrue(pv is not None)
44
+ self.assertEqual(pv, value)
45
+
46
+
47
+ if __name__ == '__main__':
48
+ unittest.main()
@@ -0,0 +1,46 @@
1
+ import unittest
2
+
3
+
4
+ class SplitrunTestCase(unittest.TestCase):
5
+ def test_parsing(self):
6
+ from mccode_plumber.splitrun import make_parser
7
+ parser = make_parser()
8
+ args = parser.parse_args(['--broker', 'l:9092', '--source', 'm', '-n', '10000', 'inst.h5', '--', 'a=1:4', 'b=2:5'])
9
+ self.assertEqual(args.instrument, ['inst.h5'])
10
+ self.assertEqual(args.broker, 'l:9092')
11
+ self.assertEqual(args.source, 'm')
12
+ self.assertEqual(args.ncount, [10000])
13
+ self.assertEqual(args.parameters, ['a=1:4', 'b=2:5'])
14
+ self.assertFalse(args.parallel)
15
+
16
+ def test_mixed_order_throws(self):
17
+ from mccode_plumber.splitrun import make_parser
18
+ parser = make_parser()
19
+ parser.prog = "{{This is supposed to fail, don't be dismayed by this output!}}"
20
+ # These also output usage information to stdout -- don't be surprised by the 'extra' test output.
21
+ with self.assertRaises(SystemExit):
22
+ parser.parse_args(['inst.h5', '--broker', 'l:9092', '--source', 'm', '-n', '10000', 'a=1:4', 'b=2:5'])
23
+ with self.assertRaises(SystemExit):
24
+ parser.parse_args(['--broker', 'l:9092', '--source', 'm', 'inst.h5', '-n', '10000', 'a=1:4', 'b=2:5'])
25
+
26
+ def test_sort_args(self):
27
+ from mccode_antlr.run.runner import sort_args
28
+ self.assertEqual(sort_args(['-n', '10000', 'inst.h5', 'a=1:4', 'b=2:5']), ['-n', '10000', 'inst.h5', 'a=1:4', 'b=2:5'])
29
+ self.assertEqual(sort_args(['inst.h5', '-n', '10000', 'a=1:4', 'b=2:5']), ['-n', '10000', 'inst.h5', 'a=1:4', 'b=2:5'])
30
+
31
+ def test_sorted_mixed_order_does_not_throw(self):
32
+ from mccode_plumber.splitrun import make_parser
33
+ from mccode_antlr.run.runner import sort_args
34
+ parser = make_parser()
35
+ args = parser.parse_args(sort_args(['inst.h5', '--broker', 'www.github.com:9093', '--source', 'dev/null',
36
+ '-n', '123', '--parallel', '--', 'a=1:4', 'b=2:5']))
37
+ self.assertEqual(args.instrument, ['inst.h5'])
38
+ self.assertEqual(args.broker, 'www.github.com:9093')
39
+ self.assertEqual(args.source, 'dev/null')
40
+ self.assertEqual(args.ncount, [123])
41
+ self.assertEqual(args.parameters, ['a=1:4', 'b=2:5'])
42
+ self.assertTrue(args.parallel)
43
+
44
+
45
+ if __name__ == '__main__':
46
+ unittest.main()
@@ -0,0 +1,71 @@
1
+ import unittest
2
+
3
+
4
+ class WriterTestCase(unittest.TestCase):
5
+ def setUp(self):
6
+ from json import dumps
7
+ from mccode_antlr.loader import parse_mcstas_instr
8
+ from mccode_to_kafka.writer import da00_dataarray_config, da00_variable_config
9
+ t = da00_variable_config(name='t', label='monitor', unit='usec', axes=['t'], shape=[10], data_type='float64')
10
+ ns = da00_dataarray_config(topic='monitor', source='mccode-to-kafka', variables=[t])
11
+ instr = f"""DEFINE INSTRUMENT this_IS_NOT_BIFROST()
12
+ TRACE
13
+ COMPONENT origin = Arm() AT (0, 0, 0) ABSOLUTE
14
+ COMPONENT source = Source_simple() AT (0, 0, 1) RELATIVE PREVIOUS
15
+ COMPONENT monitor = TOF_monitor() AT (0, 0, 1) RELATIVE source
16
+ METADATA "application/json" "nexus_structure_stream_data" %{{{dumps(ns)}%}}
17
+ COMPONENT sample = Arm() AT (0, 0, 80) RELATIVE source
18
+ END
19
+ """
20
+ self.instr = parse_mcstas_instr(instr)
21
+
22
+ def test_parse(self):
23
+ from mccode_plumber.writer import construct_writer_pv_dicts_from_parameters
24
+ from mccode_plumber.writer import default_nexus_structure
25
+ params = construct_writer_pv_dicts_from_parameters(self.instr.parameters, 'mcstas:', 'topic')
26
+ self.assertEqual(len(params), 0)
27
+ struct = default_nexus_structure(self.instr)
28
+
29
+ self.assertEqual(len(struct['children']), 1)
30
+ self.assertEqual(struct['children'][0]['name'], 'entry')
31
+ self.assertEqual(struct['children'][0]['children'][0]['name'], 'instrument')
32
+ self.assertEqual(struct['children'][0]['children'][0]['children'][1]['name'], '0_origin')
33
+ self.assertEqual(struct['children'][0]['children'][0]['children'][2]['name'], '1_source')
34
+ self.assertEqual(struct['children'][0]['children'][0]['children'][3]['name'], '2_monitor')
35
+ mon = struct['children'][0]['children'][0]['children'][3]
36
+ self.assertEqual(len(mon['children']), 4) # removed 'mccode' property 5->4
37
+ idx = [i for i, ch in enumerate(mon['children']) if 'name' in ch and 'data' == ch['name']]
38
+ self.assertTrue(len(idx), 1)
39
+ data = mon['children'][idx[0]]
40
+ idx = [i for i, ch in enumerate(data['children']) if 'module' in ch and 'da00' == ch['module']]
41
+ self.assertEqual(len(idx), 1)
42
+ da00 = data['children'][idx[0]]
43
+ self.assertEqual(len(da00.keys()), 2)
44
+ self.assertEqual(da00['module'], 'da00')
45
+ self.assertEqual(da00['config']['topic'], 'monitor')
46
+ self.assertEqual(da00['config']['source'], 'mccode-to-kafka')
47
+
48
+
49
+ class WriterUnitsTestCase(unittest.TestCase):
50
+ def setUp(self):
51
+ from mccode_antlr.loader import parse_mcstas_instr
52
+ instr = f"""DEFINE INSTRUMENT with_logs(double a/"Hz", b/"m", int c, string d)
53
+ TRACE
54
+ COMPONENT origin = Arm() AT (0, 0, 0) ABSOLUTE
55
+ COMPONENT source = Source_simple() AT (0, 0, 1) RELATIVE PREVIOUS
56
+ COMPONENT sample = Arm() AT (0, 0, 80) RELATIVE source
57
+ END
58
+ """
59
+ self.instr = parse_mcstas_instr(instr)
60
+
61
+ def test_parse(self):
62
+ from mccode_plumber.writer import construct_writer_pv_dicts_from_parameters
63
+ params = construct_writer_pv_dicts_from_parameters(self.instr.parameters, 'mcstas:', 'topic')
64
+ self.assertEqual(len(params), 4)
65
+ for p, x in zip(params, [('a', 'Hz'), ('b', 'm'), ('c', None), ('d', None)]):
66
+ self.assertEqual(p['name'], x[0])
67
+ self.assertEqual(p['unit'], x[1])
68
+
69
+
70
+ if __name__ == '__main__':
71
+ unittest.main()