mccode-plumber 0.7.1__tar.gz → 0.8.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {mccode_plumber-0.7.1/src/mccode_plumber.egg-info → mccode_plumber-0.8.1}/PKG-INFO +2 -2
  2. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/pyproject.toml +4 -1
  3. mccode_plumber-0.8.1/src/mccode_plumber/__init__.py +6 -0
  4. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/epics.py +2 -0
  5. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/CommandChannel.py +5 -5
  6. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/CommandHandler.py +2 -2
  7. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/InThreadStatusTracker.py +5 -5
  8. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/JobHandler.py +4 -4
  9. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/StateExtractor.py +3 -3
  10. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/WorkerFinder.py +7 -7
  11. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/WorkerJobPool.py +5 -5
  12. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/forwarder.py +2 -0
  13. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/kafka.py +2 -0
  14. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/mccode.py +4 -4
  15. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/splitrun.py +10 -6
  16. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/writer.py +154 -27
  17. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1/src/mccode_plumber.egg-info}/PKG-INFO +2 -2
  18. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber.egg-info/entry_points.txt +3 -0
  19. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber.egg-info/requires.txt +1 -1
  20. mccode_plumber-0.7.1/src/mccode_plumber/__init__.py +0 -0
  21. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/.github/workflows/pip.yml +0 -0
  22. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/.github/workflows/wheels.yml +0 -0
  23. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/.gitignore +0 -0
  24. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/README.md +0 -0
  25. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/setup.cfg +0 -0
  26. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/conductor.py +0 -0
  27. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/CommandStatus.py +0 -0
  28. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/JobStatus.py +0 -0
  29. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/KafkaTopicUrl.py +0 -0
  30. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/WorkerStatus.py +0 -0
  31. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/WriteJob.py +0 -0
  32. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/file_writer_control/__init__.py +0 -0
  33. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber/utils.py +0 -0
  34. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber.egg-info/SOURCES.txt +0 -0
  35. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber.egg-info/dependency_links.txt +0 -0
  36. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/src/mccode_plumber.egg-info/top_level.txt +0 -0
  37. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/tests/test_epics.py +0 -0
  38. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/tests/test_splitrun.py +0 -0
  39. {mccode_plumber-0.7.1 → mccode_plumber-0.8.1}/tests/test_writer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mccode-plumber
3
- Version: 0.7.1
3
+ Version: 0.8.1
4
4
  Author-email: Gregory Tucker <gregory.tucker@ess.eu>
5
5
  Classifier: License :: OSI Approved :: BSD License
6
6
  Description-Content-Type: text/markdown
@@ -8,7 +8,7 @@ Requires-Dist: p4p
8
8
  Requires-Dist: kafka-python>=2.0
9
9
  Requires-Dist: ess-streaming-data-types>=0.14.0
10
10
  Requires-Dist: restage>=0.4.0
11
- Requires-Dist: mccode-to-kafka>=0.2.1
11
+ Requires-Dist: mccode-to-kafka>=0.2.2
12
12
  Requires-Dist: moreniius>=0.2.3
13
13
  Requires-Dist: icecream
14
14
 
@@ -9,7 +9,7 @@ dependencies = [
9
9
  'kafka-python>=2.0',
10
10
  'ess-streaming-data-types>=0.14.0',
11
11
  'restage>=0.4.0',
12
- 'mccode-to-kafka>=0.2.1',
12
+ 'mccode-to-kafka>=0.2.2',
13
13
  'moreniius>=0.2.3',
14
14
  'icecream',
15
15
  ]
@@ -31,6 +31,9 @@ mp-forwarder-teardown = 'mccode_plumber.forwarder:teardown'
31
31
  mp-writer-from = 'mccode_plumber.writer:print_time'
32
32
  mp-writer-write = 'mccode_plumber.writer:start_writer'
33
33
  mp-writer-wait = 'mccode_plumber.writer:wait_on_writer'
34
+ mp-writer-list = 'mccode_plumber.writer:list_status'
35
+ mp-writer-kill = 'mccode_plumber.writer:kill_job'
36
+ mp-writer-killall = 'mccode_plumber.writer:kill_all'
34
37
  mp-register-topics = 'mccode_plumber.kafka:register_topics'
35
38
  mp-insert-hdf5-instr = 'mccode_plumber.mccode:insert'
36
39
 
@@ -0,0 +1,6 @@
1
+ from importlib.metadata import version, PackageNotFoundError
2
+
3
+ try:
4
+ __version__ = version('mccode-plumber')
5
+ except PackageNotFoundError:
6
+ pass
@@ -53,9 +53,11 @@ class MailboxHandler:
53
53
 
54
54
  def get_parser():
55
55
  from argparse import ArgumentParser
56
+ from mccode_plumber import __version__
56
57
  p = ArgumentParser()
57
58
  p.add_argument('instr', type=str, help='The instrument file to read')
58
59
  p.add_argument('-p', '--prefix', type=str, help='The EPICS PV prefix to use', default='mcstas:')
60
+ p.add_argument('-v', '--version', action='version', version=__version__)
59
61
  return p
60
62
 
61
63
 
@@ -7,14 +7,14 @@ from typing import Dict, List, Optional, Union
7
7
  from kafka import KafkaConsumer
8
8
  from kafka.errors import NoBrokersAvailable
9
9
 
10
- from file_writer_control.CommandStatus import CommandState, CommandStatus
11
- from file_writer_control.InThreadStatusTracker import (
10
+ from .CommandStatus import CommandState, CommandStatus
11
+ from .InThreadStatusTracker import (
12
12
  DEAD_ENTITY_TIME_LIMIT,
13
13
  InThreadStatusTracker,
14
14
  )
15
- from file_writer_control.JobStatus import JobStatus
16
- from file_writer_control.KafkaTopicUrl import KafkaTopicUrl
17
- from file_writer_control.WorkerStatus import WorkerStatus
15
+ from .JobStatus import JobStatus
16
+ from .KafkaTopicUrl import KafkaTopicUrl
17
+ from .WorkerStatus import WorkerStatus
18
18
 
19
19
 
20
20
  def thread_function(
@@ -1,7 +1,7 @@
1
1
  from datetime import timedelta
2
2
 
3
- from file_writer_control.CommandChannel import CommandChannel
4
- from file_writer_control.CommandStatus import CommandState
3
+ from .CommandChannel import CommandChannel
4
+ from .CommandStatus import CommandState
5
5
 
6
6
 
7
7
  class CommandHandler:
@@ -22,14 +22,14 @@ from streaming_data_types.status_x5f2 import FILE_IDENTIFIER as STAT_IDENTIFIER
22
22
  from streaming_data_types.status_x5f2 import StatusMessage
23
23
  from streaming_data_types.utils import get_schema
24
24
 
25
- from file_writer_control.CommandStatus import CommandState, CommandStatus
26
- from file_writer_control.JobStatus import JobState, JobStatus
27
- from file_writer_control.StateExtractor import (
25
+ from .CommandStatus import CommandState, CommandStatus
26
+ from .JobStatus import JobState, JobStatus
27
+ from .StateExtractor import (
28
28
  extract_job_state_from_answer,
29
29
  extract_state_from_command_answer,
30
30
  extract_worker_state_from_status,
31
31
  )
32
- from file_writer_control.WorkerStatus import WorkerState, WorkerStatus
32
+ from .WorkerStatus import WorkerState, WorkerStatus
33
33
 
34
34
  DEAD_ENTITY_TIME_LIMIT = timedelta(hours=1)
35
35
 
@@ -223,6 +223,6 @@ class InThreadStatusTracker:
223
223
  current_job.state = JobState.ERROR
224
224
  else:
225
225
  current_job.state = JobState.DONE
226
- current_job.metadata = json.loads(stopped.metadata)
226
+ current_job.metadata = json.loads(stopped.metadata) if stopped.metadata is not None else None
227
227
  current_job.message = stopped.message
228
228
  self.known_workers[stopped.service_id].state = WorkerState.IDLE
@@ -1,9 +1,9 @@
1
1
  from datetime import datetime
2
2
 
3
- from file_writer_control.CommandHandler import CommandHandler
4
- from file_writer_control.JobStatus import JobState
5
- from file_writer_control.WorkerFinder import WorkerFinder
6
- from file_writer_control.WriteJob import WriteJob
3
+ from .CommandHandler import CommandHandler
4
+ from .JobStatus import JobState
5
+ from .WorkerFinder import WorkerFinder
6
+ from .WriteJob import WriteJob
7
7
 
8
8
 
9
9
  class JobHandler:
@@ -8,9 +8,9 @@ from streaming_data_types.action_response_answ import (
8
8
  )
9
9
  from streaming_data_types.status_x5f2 import StatusMessage
10
10
 
11
- from file_writer_control.CommandStatus import CommandState
12
- from file_writer_control.JobStatus import JobState
13
- from file_writer_control.WorkerStatus import WorkerState
11
+ from .CommandStatus import CommandState
12
+ from .JobStatus import JobState
13
+ from .WorkerStatus import WorkerState
14
14
 
15
15
 
16
16
  def extract_worker_state_from_status(status: StatusMessage) -> WorkerState:
@@ -6,13 +6,13 @@ from kafka import KafkaProducer
6
6
  from kafka.errors import NoBrokersAvailable
7
7
  from streaming_data_types.run_stop_6s4t import serialise_6s4t as serialise_stop
8
8
 
9
- from file_writer_control.CommandChannel import CommandChannel
10
- from file_writer_control.CommandHandler import CommandHandler
11
- from file_writer_control.CommandStatus import CommandStatus
12
- from file_writer_control.JobStatus import JobState, JobStatus
13
- from file_writer_control.KafkaTopicUrl import KafkaTopicUrl
14
- from file_writer_control.WorkerStatus import WorkerStatus
15
- from file_writer_control.WriteJob import WriteJob
9
+ from .CommandChannel import CommandChannel
10
+ from .CommandHandler import CommandHandler
11
+ from .CommandStatus import CommandStatus
12
+ from .JobStatus import JobState, JobStatus
13
+ from .KafkaTopicUrl import KafkaTopicUrl
14
+ from .WorkerStatus import WorkerStatus
15
+ from .WriteJob import WriteJob
16
16
 
17
17
 
18
18
  class WorkerFinderBase:
@@ -3,11 +3,11 @@ from typing import Dict
3
3
  from kafka import KafkaProducer
4
4
  from kafka.errors import NoBrokersAvailable
5
5
 
6
- from file_writer_control.CommandHandler import CommandHandler
7
- from file_writer_control.CommandStatus import CommandState
8
- from file_writer_control.KafkaTopicUrl import KafkaTopicUrl
9
- from file_writer_control.WorkerFinder import WorkerFinder
10
- from file_writer_control.WriteJob import WriteJob
6
+ from .CommandHandler import CommandHandler
7
+ from .CommandStatus import CommandState
8
+ from .KafkaTopicUrl import KafkaTopicUrl
9
+ from .WorkerFinder import WorkerFinder
10
+ from .WriteJob import WriteJob
11
11
 
12
12
 
13
13
  class WorkerJobPool(WorkerFinder):
@@ -62,12 +62,14 @@ def reset_forwarder(pvs: list[dict], config=None, prefix=None, topic=None):
62
62
  def parse_registrar_args():
63
63
  from argparse import ArgumentParser
64
64
  from .mccode import get_mccode_instr_parameters
65
+ from mccode_plumber import __version__
65
66
 
66
67
  parser = ArgumentParser(description="Discover EPICS PVs and inform a forwarder about them")
67
68
  parser.add_argument('-p', '--prefix', type=str, default='mcstas:')
68
69
  parser.add_argument('instrument', type=str, help="The mcstas instrument with EPICS PVs")
69
70
  parser.add_argument('-c', '--config', type=str, help="The Kafka server and topic for configuring the Forwarder")
70
71
  parser.add_argument('-t', '--topic', type=str, help="The Kafka topic to instruct the Forwarder to send data to")
72
+ parser.add_argument('-v', '--version', action='version', version=__version__)
71
73
 
72
74
  args = parser.parse_args()
73
75
  parameter_names = [p.name for p in get_mccode_instr_parameters(args.instrument)]
@@ -1,9 +1,11 @@
1
1
  def parse_kafka_topic_args():
2
2
  from argparse import ArgumentParser
3
+ from mccode_plumber import __version__
3
4
  parser = ArgumentParser(description="Prepare the named Kafka broker to host one or more topics")
4
5
  parser.add_argument('-b', '--broker', type=str, help='The Kafka broker server to interact with')
5
6
  parser.add_argument('topic', nargs="+", type=str, help='The Kafka topic(s) to register')
6
7
  parser.add_argument('-q', '--quiet', action='store_true', help='Quiet (positive) failure')
8
+ parser.add_argument('-v', '--version', action='version', version=__version__)
7
9
 
8
10
  args = parser.parse_args()
9
11
  return args
@@ -1,15 +1,13 @@
1
1
  from pathlib import Path
2
2
  from typing import Union
3
- from mccode_antlr.instr import Instr
4
- from mccode_antlr.common import InstrumentParameter
5
3
 
6
4
 
7
- def get_mcstas_instr(filename: Union[Path, str]) -> Instr:
5
+ def get_mcstas_instr(filename: Union[Path, str]):
8
6
  from restage.instr import load_instr
9
7
  return load_instr(filename)
10
8
 
11
9
 
12
- def get_mccode_instr_parameters(filename: Union[Path, str]) -> tuple[InstrumentParameter]:
10
+ def get_mccode_instr_parameters(filename: Union[Path, str]):
13
11
  from mccode_antlr.loader.loader import parse_mccode_instr_parameters
14
12
  if not isinstance(filename, Path):
15
13
  filename = Path(filename)
@@ -42,12 +40,14 @@ def insert_mcstas_hdf5(filename: Union[Path, str], outfile: Union[Path, str], pa
42
40
 
43
41
  def get_arg_parser():
44
42
  from argparse import ArgumentParser
43
+ from mccode_plumber import __version__
45
44
  from .utils import is_readable, is_appendable
46
45
  parser = ArgumentParser(description="Copy a Instr HDF5 representation to a NeXus HDF5 file")
47
46
  a = parser.add_argument
48
47
  a('instrument', type=is_readable, default=None, help="The mcstas instrument file")
49
48
  a('-p', '--parent', type=str, default='mcstas')
50
49
  a('-o', '--outfile', type=is_appendable, default=None, help='Base NeXus structure, will be extended')
50
+ a('-v', '--version', action='version', version=__version__)
51
51
  return parser
52
52
 
53
53
 
@@ -1,21 +1,25 @@
1
1
  def make_parser():
2
+ from mccode_plumber import __version__
2
3
  from restage.splitrun import make_splitrun_parser
3
4
  parser = make_splitrun_parser()
4
5
  parser.prog = 'mp-splitrun'
5
6
  parser.add_argument('--broker', type=str, help='The Kafka broker to send monitors to', default=None)
6
7
  parser.add_argument('--source', type=str, help='The Kafka source name to use for monitors', default=None)
8
+ parser.add_argument('--topic', type=str, help='The Kafka topic name(s) to use for monitors', default=None, action='append')
9
+ parser.add_argument('-v', '--version', action='version', version=__version__)
7
10
  return parser
8
11
 
9
12
 
10
- def monitors_to_kafka_callback_with_arguments(broker: str, source: str):
11
- from functools import partial
13
+ def monitors_to_kafka_callback_with_arguments(broker: str, source: str, topics: list[str]):
12
14
  from mccode_to_kafka.sender import send_histograms
13
15
 
16
+ partial_kwargs = {'broker': broker, 'source': source}
17
+ if topics is not None and len(topics) > 0:
18
+ partial_kwargs['names'] = topics
19
+
14
20
  def callback(*args, **kwargs):
15
- print(f'monitors to kafka callback called with {args} and {kwargs}')
16
- return send_histograms(*args, broker=broker, source=source, **kwargs)
21
+ return send_histograms(*args, **partial_kwargs, **kwargs)
17
22
 
18
- # return partial(send_histograms, broker=broker, source=source), {'dir': 'root'}
19
23
  return callback, {'dir': 'root'}
20
24
 
21
25
 
@@ -24,5 +28,5 @@ def main():
24
28
  from restage.splitrun import splitrun_args, parse_splitrun
25
29
  args, parameters, precision = parse_splitrun(make_parser())
26
30
  instr = get_mcstas_instr(args.instrument[0])
27
- callback, callback_args = monitors_to_kafka_callback_with_arguments(args.broker, args.source)
31
+ callback, callback_args = monitors_to_kafka_callback_with_arguments(args.broker, args.source, args.topic)
28
32
  return splitrun_args(instr, parameters, precision, args, callback=callback, callback_arguments=callback_args)
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from pathlib import Path
4
- from typing import Union, Callable
5
- from mccode_antlr.instr import Instr
4
+
5
+ from .file_writer_control import WorkerJobPool
6
6
 
7
7
 
8
8
  def _is_group(x, group):
@@ -143,28 +143,39 @@ def get_writer_pool(broker: str = None, job: str = None, command: str = None):
143
143
  return pool
144
144
 
145
145
 
146
- def define_nexus_structure(instr: Union[Path, str], pvs: list[dict], title: str = None, event_stream: dict[str, str] = None,
147
- file: Union[Path, None] = None, func: Union[Callable[[Instr], dict], None] = None,
148
- binary: Union[Path, None] = None, origin: str = None):
149
- import json
150
- from .mccode import get_mcstas_instr
151
- if file is not None and file.exists():
152
- with open(file, 'r') as file:
153
- nexus_structure = json.load(file)
154
- elif func is not None:
155
- nexus_structure = func(get_mcstas_instr(instr))
156
- elif binary is not None and binary.exists():
157
- from subprocess import run, PIPE
158
- result = run([binary, str(instr)], stdout=PIPE, stderr=PIPE)
159
- if result.returncode != 0:
160
- raise RuntimeError(f"Failed to execute {binary} {instr} due to error {result.stderr.decode()}")
161
- nexus_structure = json.loads(result.stdout.decode())
162
- else:
163
- nexus_structure = default_nexus_structure(get_mcstas_instr(instr), origin=origin)
164
- nexus_structure = add_pvs_to_nexus_structure(nexus_structure, pvs)
165
- nexus_structure = add_title_to_nexus_structure(nexus_structure, title)
166
- # nexus_structure = insert_events_in_nexus_structure(nexus_structure, event_stream)
167
- return nexus_structure
146
+ def make_define_nexus_structure():
147
+ from typing import Union, Callable
148
+ from mccode_antlr.instr import Instr
149
+
150
+ def define_nexus_structure(
151
+ instr: Union[Path, str],
152
+ pvs: list[dict],
153
+ title: str = None,
154
+ event_stream: dict[str, str] = None,
155
+ file: Union[Path, None] = None,
156
+ func: Union[Callable[[Instr], dict], None] = None,
157
+ binary: Union[Path, None] = None,
158
+ origin: str = None):
159
+ import json
160
+ from .mccode import get_mcstas_instr
161
+ if file is not None and file.exists():
162
+ with open(file, 'r') as file:
163
+ nexus_structure = json.load(file)
164
+ elif func is not None:
165
+ nexus_structure = func(get_mcstas_instr(instr))
166
+ elif binary is not None and binary.exists():
167
+ from subprocess import run, PIPE
168
+ result = run([binary, str(instr)], stdout=PIPE, stderr=PIPE)
169
+ if result.returncode != 0:
170
+ raise RuntimeError(f"Failed to execute {binary} {instr} due to error {result.stderr.decode()}")
171
+ nexus_structure = json.loads(result.stdout.decode())
172
+ else:
173
+ nexus_structure = default_nexus_structure(get_mcstas_instr(instr), origin=origin)
174
+ nexus_structure = add_pvs_to_nexus_structure(nexus_structure, pvs)
175
+ nexus_structure = add_title_to_nexus_structure(nexus_structure, title)
176
+ # nexus_structure = insert_events_in_nexus_structure(nexus_structure, event_stream)
177
+ return nexus_structure
178
+ return define_nexus_structure
168
179
 
169
180
 
170
181
  def start_pool_writer(start_time_string, structure, filename=None, stop_time_string: str | None = None,
@@ -222,6 +233,7 @@ def start_pool_writer(start_time_string, structure, filename=None, stop_time_str
222
233
 
223
234
  def get_arg_parser():
224
235
  from argparse import ArgumentParser
236
+ from mccode_plumber import __version__
225
237
  from .utils import is_callable, is_readable, is_executable, is_writable
226
238
  parser = ArgumentParser(description="Control writing Kafka stream(s) to a NeXus file")
227
239
  a = parser.add_argument
@@ -245,6 +257,7 @@ def get_arg_parser():
245
257
  a('--wait', action='store_true', help='If provided, wait for the writer to finish before exiting')
246
258
  a('--time-out', type=float, default=120., help='Wait up to the timeout for writing to start')
247
259
  a('--job-id', type=str, default=None, help='Unique Job identifier for this write-job')
260
+ a('-v', '--version', action='version', version=__version__)
248
261
 
249
262
  return parser
250
263
 
@@ -273,9 +286,12 @@ def construct_writer_pv_dicts_from_parameters(parameters, prefix: str, topic: st
273
286
  def parse_writer_args():
274
287
  args = get_arg_parser().parse_args()
275
288
  params = construct_writer_pv_dicts(args.instrument, args.prefix, args.topic)
276
- structure = define_nexus_structure(args.instrument, params, title=args.title, origin=args.origin,
277
- file=args.ns_file, func=args.ns_func, binary=args.ns_exec,
278
- event_stream={'source': args.event_source, 'topic': args.event_topic})
289
+ define_nexus_structure = make_define_nexus_structure()
290
+ structure = define_nexus_structure(
291
+ args.instrument, params, title=args.title, origin=args.origin,
292
+ file=args.ns_file, func=args.ns_func, binary=args.ns_exec,
293
+ event_stream={'source': args.event_source, 'topic': args.event_topic}
294
+ )
279
295
  if args.ns_save is not None:
280
296
  from json import dump
281
297
  with open(args.ns_save, 'w') as file:
@@ -301,6 +317,7 @@ def wait_on_writer():
301
317
  from os import EX_OK, EX_UNAVAILABLE
302
318
  from time import sleep
303
319
  from datetime import datetime, timedelta
320
+ from mccode_plumber import __version__
304
321
  from .file_writer_control import JobHandler, CommandState
305
322
 
306
323
  from argparse import ArgumentParser
@@ -312,6 +329,7 @@ def wait_on_writer():
312
329
  a('id', type=str, help='Job id to wait on')
313
330
  a('-s', '--stop-after', type=float, help='Stop after time, seconds', default=1)
314
331
  a('-t', '--time-out', type=float, help='Time out after, seconds', default=24*60*60*30)
332
+ a('-v', '--version', action='version', version=__version__)
315
333
  args = parser.parse_args()
316
334
 
317
335
  pool = get_writer_pool(broker=args.broker, job=args.job, command=args.command)
@@ -334,3 +352,112 @@ def wait_on_writer():
334
352
  # raise RuntimeError(e.__str__() + f" The message was: {stop.get_message()}")
335
353
  exit(EX_UNAVAILABLE)
336
354
  exit(EX_OK)
355
+
356
+
357
+ def kill_list_parser():
358
+ from argparse import ArgumentParser
359
+ from mccode_plumber import __version__
360
+ parser = ArgumentParser()
361
+ a = parser.add_argument
362
+ a('-b', '--broker', help="Kafka broker", default='localhost:9092', type=str)
363
+ a('-c', '--command', help="Writer command topic", default="WriterCommand", type=str)
364
+ a('-t', '--topic', help='Writer job topic', default='WriterJobs', type=str)
365
+ a('-s', '--sleep', help='Post pool creation sleep time (s)', default=1, type=int)
366
+ a('-v', '--version', action='version', version=__version__)
367
+ return parser
368
+
369
+
370
+ def kill_job():
371
+ import time
372
+ from .file_writer_control import WorkerJobPool
373
+ parser = kill_list_parser()
374
+ parser.add_argument('service_id', type=str, help='Writer service id to stop')
375
+ parser.add_argument('job_id', type=str, help='Writer job id to stop')
376
+ args = parser.parse_args()
377
+ pool = WorkerJobPool(f'{args.broker}/{args.topic}', f'{args.broker}/{args.command}')
378
+ time.sleep(args.sleep)
379
+ pool.try_send_stop_now(args.service_id, args.job_id)
380
+
381
+
382
+ def print_columns(titles: list | tuple, values: list[list | tuple] | tuple[list | tuple, ...]):
383
+ if not len(values) or not len(titles):
384
+ return
385
+ widths = [len(str(x)) for x in titles]
386
+ for row in values:
387
+ for i, v in enumerate(row):
388
+ n = len(str(v))
389
+ if n > widths[i]:
390
+ widths[i] = n
391
+ w_format = ''.join([f'{{:{n + 1:d}s}}' for n in widths])
392
+ print(w_format.format(*[str(x) for x in titles]))
393
+ print(w_format.format(*['-' * n for n in widths]))
394
+ for row in values:
395
+ print(w_format.format(*[str(x) for x in row]))
396
+ print()
397
+
398
+
399
+ def print_workers(workers):
400
+ if len(workers):
401
+ print("Known workers")
402
+ print_columns(("Service id", "Current state"),
403
+ [(w.service_id, w.state) for w in workers])
404
+ else:
405
+ print("No workers")
406
+
407
+
408
+ def print_jobs(jobs):
409
+ if len(jobs):
410
+ print("Known jobs")
411
+ job_info = [(j.service_id, j.job_id, j.state,
412
+ j.file_name if j.file_name else j.message) for j in jobs]
413
+ print_columns(("Service id", "Job id", "Current state", "File name or message"),
414
+ job_info)
415
+ else:
416
+ print("No jobs")
417
+
418
+
419
+ def print_commands(commands):
420
+ if len(commands):
421
+ print("Known commands")
422
+ print_columns(("Job id", "Command id", "Current state", "Message"),
423
+ [(c.job_id, c.command_id, c.state, c.message) for c in
424
+ commands])
425
+ else:
426
+ print("No commands")
427
+
428
+
429
+ def print_current_state(channel: WorkerJobPool):
430
+ print_workers(channel.list_known_workers())
431
+ print_jobs(channel.list_known_jobs())
432
+ print_commands(channel.list_known_commands())
433
+
434
+
435
+ def kill_all():
436
+ import time
437
+ from .file_writer_control import WorkerJobPool
438
+ parser = kill_list_parser()
439
+ parser.add_argument('--verbose', help='Verbose output', action='store_true')
440
+ args = parser.parse_args()
441
+ pool = WorkerJobPool(f'{args.broker}/{args.topic}', f'{args.broker}/{args.command}')
442
+ time.sleep(args.sleep)
443
+ if args.verbose:
444
+ print_current_state(pool)
445
+ jobs = pool.list_known_jobs()
446
+ for job in jobs:
447
+ print(f'Kill {job.service_id} {job.job_id}')
448
+ pool.try_send_stop_now(job.service_id, job.job_id)
449
+ time.sleep(args.sleep)
450
+ if len(jobs) == 0:
451
+ print("No jobs")
452
+
453
+ if args.verbose:
454
+ print_current_state(pool)
455
+
456
+
457
+ def list_status():
458
+ import time
459
+ parser = kill_list_parser()
460
+ args = parser.parse_args()
461
+ pool = WorkerJobPool(f'{args.broker}/{args.topic}', f'{args.broker}/{args.command}')
462
+ time.sleep(args.sleep)
463
+ print_current_state(pool)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mccode-plumber
3
- Version: 0.7.1
3
+ Version: 0.8.1
4
4
  Author-email: Gregory Tucker <gregory.tucker@ess.eu>
5
5
  Classifier: License :: OSI Approved :: BSD License
6
6
  Description-Content-Type: text/markdown
@@ -8,7 +8,7 @@ Requires-Dist: p4p
8
8
  Requires-Dist: kafka-python>=2.0
9
9
  Requires-Dist: ess-streaming-data-types>=0.14.0
10
10
  Requires-Dist: restage>=0.4.0
11
- Requires-Dist: mccode-to-kafka>=0.2.1
11
+ Requires-Dist: mccode-to-kafka>=0.2.2
12
12
  Requires-Dist: moreniius>=0.2.3
13
13
  Requires-Dist: icecream
14
14
 
@@ -7,5 +7,8 @@ mp-insert-hdf5-instr = mccode_plumber.mccode:insert
7
7
  mp-register-topics = mccode_plumber.kafka:register_topics
8
8
  mp-splitrun = mccode_plumber.splitrun:main
9
9
  mp-writer-from = mccode_plumber.writer:print_time
10
+ mp-writer-kill = mccode_plumber.writer:kill_job
11
+ mp-writer-killall = mccode_plumber.writer:kill_all
12
+ mp-writer-list = mccode_plumber.writer:list_status
10
13
  mp-writer-wait = mccode_plumber.writer:wait_on_writer
11
14
  mp-writer-write = mccode_plumber.writer:start_writer
@@ -2,6 +2,6 @@ p4p
2
2
  kafka-python>=2.0
3
3
  ess-streaming-data-types>=0.14.0
4
4
  restage>=0.4.0
5
- mccode-to-kafka>=0.2.1
5
+ mccode-to-kafka>=0.2.2
6
6
  moreniius>=0.2.3
7
7
  icecream
File without changes
File without changes
File without changes