mplugin 2.0.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,88 @@
1
+ Metadata-Version: 2.3
2
+ Name: mplugin
3
+ Version: 2.0.0a0
4
+ Summary: Class library for writing Nagios (Icinga) plugins
5
+ Keywords: Nagios,Icinga,plugin,check,monitoring
6
+ Author: Christian Kauhaus, Matthew Pounsett, Josef Friedrich
7
+ Author-email: Christian Kauhaus <kc@flyingcircus.io>, Matthew Pounsett <matt@conundrum.com>, Josef Friedrich <josef@friedrich.rocks>
8
+ License: ZPL-2.1
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Environment :: Plugins
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: System Administrators
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Classifier: Operating System :: POSIX
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
20
+ Classifier: Programming Language :: Python :: 3.14
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Classifier: Topic :: System :: Monitoring
23
+ Requires-Dist: typing-extensions>=4.15.0
24
+ Requires-Python: >=3.10
25
+ Project-URL: Documentation, https://mplugin.readthedocs.io/
26
+ Project-URL: Download, https://pypi.org/project/mplugin/
27
+ Project-URL: Source, https://github.com/Josef-Friedrich/mplugin
28
+ Project-URL: Issues, https://github.com/Josef-Friedrich/mplugin/issues
29
+ Project-URL: Changelog, https://github.com/Josef-Friedrich/mplugin/blob/main/HISTORY.txt
30
+ Description-Content-Type: text/x-rst
31
+
32
+ The mplugin library
33
+ ============================
34
+
35
+ About
36
+ -----
37
+
38
+ **mplugin** is a Python class library which helps writing Nagios (or Icinga)
39
+ compatible plugins easily in Python. It cares for much of the boilerplate code
40
+ and default logic commonly found in Nagios checks, including:
41
+
42
+ - Nagios 3 Plugin API compliant parameters and output formatting
43
+ - Full Nagios range syntax support
44
+ - Automatic threshold checking
45
+ - Multiple independend measures
46
+ - Custom status line to communicate the main point quickly
47
+ - Long output and performance data
48
+ - Timeout handling
49
+ - Persistent "cookies" to retain state information between check runs
50
+ - Resume log file processing at the point where the last run left
51
+ - No dependencies beyond the Python standard library (except for Python 2.6).
52
+
53
+ **mplugin** runs on POSIX and Windows systems. It is compatible with
54
+ and Python 3.9 and later.
55
+
56
+ Feedback and Suggestions
57
+ ------------------------
58
+
59
+ mplugin is currently maintained by Josef Friedrich <josef@friedrich.rocks>. A
60
+ public issue tracker can be found at
61
+ <https://github.com/Josef-Friedrich/mplugin/issues> for bugs, suggestions, and
62
+ patches.
63
+
64
+ License
65
+ -------
66
+
67
+ The mplugin package is released under the Zope Public License 2.1 (ZPL), a
68
+ BSD-style Open Source license.
69
+
70
+
71
+ Documentation
72
+ -------------
73
+
74
+ Comprehensive documentation is `available online`_. The examples mentioned in
75
+ the `tutorials`_ can also be found in the `mplugin/examples` directory of
76
+ the source distribution.
77
+
78
+ .. _available online: https://mplugin.readthedocs.io/
79
+ .. _tutorials: https://mplugin.readthedocs.io/en/stable/tutorial/
80
+
81
+ Acknowledgements
82
+ ----------------
83
+
84
+ mplugin was originally written and maintained by Christian Kauhaus
85
+ <kc@flyingcircus.io>. Additional contributions from the community are
86
+ acknowledged in the file CONTRIBUTORS.txt
87
+
88
+ .. vim: set ft=rst:
@@ -0,0 +1,57 @@
1
+ The mplugin library
2
+ ============================
3
+
4
+ About
5
+ -----
6
+
7
+ **mplugin** is a Python class library which helps writing Nagios (or Icinga)
8
+ compatible plugins easily in Python. It cares for much of the boilerplate code
9
+ and default logic commonly found in Nagios checks, including:
10
+
11
+ - Nagios 3 Plugin API compliant parameters and output formatting
12
+ - Full Nagios range syntax support
13
+ - Automatic threshold checking
14
+ - Multiple independend measures
15
+ - Custom status line to communicate the main point quickly
16
+ - Long output and performance data
17
+ - Timeout handling
18
+ - Persistent "cookies" to retain state information between check runs
19
+ - Resume log file processing at the point where the last run left
20
+ - No dependencies beyond the Python standard library (except for Python 2.6).
21
+
22
+ **mplugin** runs on POSIX and Windows systems. It is compatible with
23
+ and Python 3.9 and later.
24
+
25
+ Feedback and Suggestions
26
+ ------------------------
27
+
28
+ mplugin is currently maintained by Josef Friedrich <josef@friedrich.rocks>. A
29
+ public issue tracker can be found at
30
+ <https://github.com/Josef-Friedrich/mplugin/issues> for bugs, suggestions, and
31
+ patches.
32
+
33
+ License
34
+ -------
35
+
36
+ The mplugin package is released under the Zope Public License 2.1 (ZPL), a
37
+ BSD-style Open Source license.
38
+
39
+
40
+ Documentation
41
+ -------------
42
+
43
+ Comprehensive documentation is `available online`_. The examples mentioned in
44
+ the `tutorials`_ can also be found in the `mplugin/examples` directory of
45
+ the source distribution.
46
+
47
+ .. _available online: https://mplugin.readthedocs.io/
48
+ .. _tutorials: https://mplugin.readthedocs.io/en/stable/tutorial/
49
+
50
+ Acknowledgements
51
+ ----------------
52
+
53
+ mplugin was originally written and maintained by Christian Kauhaus
54
+ <kc@flyingcircus.io>. Additional contributions from the community are
55
+ acknowledged in the file CONTRIBUTORS.txt
56
+
57
+ .. vim: set ft=rst:
@@ -0,0 +1,53 @@
1
+
2
+ [project]
3
+ name = "mplugin"
4
+ version = "2.0.0a0"
5
+ description = "Class library for writing Nagios (Icinga) plugins"
6
+ authors = [
7
+ { name = "Christian Kauhaus", email = "kc@flyingcircus.io" },
8
+ { name = "Matthew Pounsett", email = "matt@conundrum.com" },
9
+ { name = "Josef Friedrich", email = "josef@friedrich.rocks" }
10
+ ]
11
+ maintainers = [
12
+
13
+ ]
14
+ readme = "README.rst"
15
+ license = { text = "ZPL-2.1" }
16
+ keywords = ["Nagios", "Icinga", "plugin", "check", "monitoring"]
17
+ requires-python = ">= 3.10"
18
+ classifiers = [
19
+ 'Development Status :: 5 - Production/Stable',
20
+ 'Environment :: Plugins',
21
+ 'Intended Audience :: Developers',
22
+ 'Intended Audience :: System Administrators',
23
+ 'Operating System :: Microsoft :: Windows',
24
+ 'Operating System :: POSIX',
25
+ 'Programming Language :: Python :: 3.9',
26
+ 'Programming Language :: Python :: 3.10',
27
+ 'Programming Language :: Python :: 3.11',
28
+ 'Programming Language :: Python :: 3.12',
29
+ 'Programming Language :: Python :: 3.13',
30
+ 'Programming Language :: Python :: 3.14',
31
+ 'Topic :: Software Development :: Libraries :: Python Modules',
32
+ 'Topic :: System :: Monitoring',
33
+ ]
34
+ dependencies = [
35
+ "typing-extensions>=4.15.0",
36
+ ]
37
+
38
+ [build-system]
39
+ requires = ["uv_build>=0.9.26,<0.10.0"]
40
+ build-backend = "uv_build"
41
+
42
+ [dependency-groups]
43
+ dev = [
44
+ "mypy>=1.19.1",
45
+ "pytest>=8.4.2",
46
+ ]
47
+
48
+ [project.urls]
49
+ Documentation = "https://mplugin.readthedocs.io/"
50
+ Download = "https://pypi.org/project/mplugin/"
51
+ Source = "https://github.com/Josef-Friedrich/mplugin"
52
+ Issues = "https://github.com/Josef-Friedrich/mplugin/issues"
53
+ Changelog = "https://github.com/Josef-Friedrich/mplugin/blob/main/HISTORY.txt"
@@ -0,0 +1,17 @@
1
+ from .check import Check # noqa: F401
2
+ from .context import Context, ScalarContext # noqa: F401
3
+ from .cookie import Cookie # noqa: F401
4
+ from .error import CheckError, Timeout # noqa: F401
5
+ from .logtail import LogTail # noqa: F401
6
+ from .metric import Metric # noqa: F401
7
+ from .multiarg import MultiArg # noqa: F401
8
+ from .performance import Performance # noqa: F401
9
+ from .range import Range # noqa: F401
10
+ from .resource import Resource # noqa: F401
11
+ from .result import Result, Results # noqa: F401
12
+ from .runtime import Runtime, guarded # noqa: F401
13
+ from .state import critical, ok, unknown, warn # noqa: F401
14
+ from .summary import Summary # noqa: F401
15
+ from .version import __VERSION__
16
+
17
+ __version__ = __VERSION__
@@ -0,0 +1,185 @@
1
+ """Controller logic for check execution.
2
+
3
+ This module contains the :class:`Check` class which orchestrates the
4
+ the various stages of check execution. Interfacing with the
5
+ outside system is done via a separate :class:`Runtime` object.
6
+
7
+ When a check is called (using :meth:`Check.main` or
8
+ :meth:`Check.__call__`), it probes all resources and evaluates the
9
+ returned metrics to results and performance data. A typical usage
10
+ pattern would be to populate a check with domain objects and then
11
+ delegate control to it.
12
+ """
13
+
14
+ import logging
15
+ from typing import Any, NoReturn
16
+
17
+ from .context import Context, Contexts
18
+ from .error import CheckError
19
+ from .metric import Metric
20
+ from .resource import Resource
21
+ from .result import Result, Results
22
+ from .runtime import Runtime
23
+ from .state import ServiceState, ok, unknown
24
+ from .summary import Summary
25
+
26
+ _log = logging.getLogger(__name__)
27
+
28
+
29
+ class Check(object):
30
+ resources: list[Resource]
31
+ contexts: Contexts
32
+ summary: Summary
33
+ results: Results
34
+ perfdata: list[str]
35
+ name: str
36
+
37
+ def __init__(self, *objects: Resource | Context | Summary | Results, **kwargs):
38
+ """Creates and configures a check.
39
+
40
+ Specialized *objects* representing resources, contexts,
41
+ summary, or results are passed to the the :meth:`add` method.
42
+ Alternatively, objects can be added later manually.
43
+ If no *name* is given, the output prefix is set to the first
44
+ resource's name. If *name* is None, no prefix is set at all.
45
+ """
46
+ self.resources = []
47
+ self.contexts = Contexts()
48
+ self.summary = Summary()
49
+ self.results = Results()
50
+ self.perfdata = []
51
+ if "name" in kwargs and kwargs["name"] != "":
52
+ self.name = kwargs["name"]
53
+ else:
54
+ self.name = ""
55
+ self.add(*objects)
56
+
57
+ def add(self, *objects: Resource | Context | Summary | Results):
58
+ """Adds domain objects to a check.
59
+
60
+ :param objects: one or more objects that are descendants from
61
+ :class:`~mplugin.resource.Resource`,
62
+ :class:`~mplugin.context.Context`,
63
+ :class:`~mplugin.summary.Summary`, or
64
+ :class:`~mplugin.result.Results`.
65
+ """
66
+ for obj in objects:
67
+ if isinstance(obj, Resource):
68
+ self.resources.append(obj)
69
+ if self.name is None:
70
+ self.name = ""
71
+ elif self.name == "":
72
+ self.name = self.resources[0].name
73
+ elif isinstance(obj, Context):
74
+ self.contexts.add(obj)
75
+ elif isinstance(obj, Summary):
76
+ self.summary = obj
77
+ elif isinstance(obj, Results):
78
+ self.results = obj
79
+ else:
80
+ raise TypeError("cannot add type {0} to check".format(type(obj)), obj)
81
+ return self
82
+
83
+ def _evaluate_resource(self, resource: Resource) -> None:
84
+ try:
85
+ metric = None
86
+ metrics = resource.probe()
87
+ if not metrics:
88
+ _log.warning("resource %s did not produce any metric", resource.name)
89
+ if isinstance(metrics, Metric):
90
+ # resource returned a bare metric instead of list/generator
91
+ metrics = [metrics]
92
+ for metric in metrics:
93
+ context = self.contexts[metric.context]
94
+ metric = metric.replace(contextobj=context, resource=resource)
95
+ result = metric.evaluate()
96
+ if isinstance(result, Result):
97
+ self.results.add(result)
98
+ elif isinstance(result, ServiceState):
99
+ self.results.add(Result(result, metric=metric))
100
+ else:
101
+ raise ValueError(
102
+ "evaluate() returned neither Result nor ServiceState object",
103
+ metric.name,
104
+ result,
105
+ )
106
+ self.perfdata.append(str(metric.performance() or ""))
107
+ except CheckError as e:
108
+ self.results.add(Result(unknown, str(e), metric))
109
+
110
+ def __call__(self):
111
+ """Actually run the check.
112
+
113
+ After a check has been called, the :attr:`results` and
114
+ :attr:`perfdata` attributes are populated with the outcomes. In
115
+ most cases, you should not use __call__ directly but invoke
116
+ :meth:`main`, which delegates check execution to the
117
+ :class:`Runtime` environment.
118
+ """
119
+ for resource in self.resources:
120
+ self._evaluate_resource(resource)
121
+ self.perfdata = sorted([p for p in self.perfdata if p])
122
+
123
+ def main(self, verbose: Any = None, timeout: Any = None) -> NoReturn:
124
+ """All-in-one control delegation to the runtime environment.
125
+
126
+ Get a :class:`~mplugin.runtime.Runtime` instance and
127
+ perform all phases: run the check (via :meth:`__call__`), print
128
+ results and exit the program with an appropriate status code.
129
+
130
+ :param verbose: output verbosity level between 0 and 3
131
+ :param timeout: abort check execution with a :exc:`Timeout`
132
+ exception after so many seconds (use 0 for no timeout)
133
+ """
134
+ runtime = Runtime()
135
+ runtime.execute(self, verbose, timeout)
136
+
137
+ @property
138
+ def state(self) -> ServiceState:
139
+ """Overall check state.
140
+
141
+ The most significant (=worst) state seen in :attr:`results` to
142
+ far. :obj:`~mplugin.state.Unknown` if no results have been
143
+ collected yet. Corresponds with :attr:`exitcode`. Read-only
144
+ property.
145
+ """
146
+ try:
147
+ return self.results.most_significant_state
148
+ except ValueError:
149
+ return unknown
150
+
151
+ @property
152
+ def summary_str(self) -> str:
153
+ """Status line summary string.
154
+
155
+ The first line of output that summarizes that situation as
156
+ perceived by the check. The string is usually queried from a
157
+ :class:`Summary` object. Read-only property.
158
+ """
159
+ if not self.results:
160
+ return self.summary.empty() or ""
161
+
162
+ if self.state == ok:
163
+ return self.summary.ok(self.results) or ""
164
+
165
+ return self.summary.problem(self.results) or ""
166
+
167
+ @property
168
+ def verbose_str(self):
169
+ """Additional lines of output.
170
+
171
+ Long text output if check runs in verbose mode. Also queried
172
+ from :class:`~mplugin.summary.Summary`. Read-only property.
173
+ """
174
+ return self.summary.verbose(self.results) or ""
175
+
176
+ @property
177
+ def exitcode(self) -> int:
178
+ """Overall check exit code according to the Nagios API.
179
+
180
+ Corresponds with :attr:`state`. Read-only property.
181
+ """
182
+ try:
183
+ return int(self.results.most_significant_state)
184
+ except ValueError:
185
+ return 3
@@ -0,0 +1,223 @@
1
+ """Metadata about metrics to perform data :term:`evaluation`.
2
+
3
+ This module contains the :class:`Context` class, which is the base for
4
+ all contexts. :class:`ScalarContext` is an important specialization to
5
+ cover numeric contexts with warning and critical thresholds. The
6
+ :class:`~.check.Check` controller selects a context for each
7
+ :class:`~.metric.Metric` by matching the metric's `context` attribute with the
8
+ context's `name`. The same context may be used for several metrics.
9
+
10
+ Plugin authors may just use to :class:`ScalarContext` in the majority of cases.
11
+ Sometimes is better to subclass :class:`Context` instead to implement custom
12
+ evaluation or performance data logic.
13
+ """
14
+
15
+ import typing
16
+ from typing import Callable, Optional
17
+
18
+ from .performance import Performance
19
+ from .range import Range
20
+ from .result import Result
21
+ from .state import critical, ok, warn
22
+
23
+ if typing.TYPE_CHECKING:
24
+ from .context import Context
25
+ from .metric import Metric
26
+ from .resource import Resource
27
+
28
+ FmtMetric = str | Callable[["Metric", "Context"], str]
29
+
30
+
31
+ class Context(object):
32
+ name: str
33
+ fmt_metric: Optional[FmtMetric]
34
+ result_cls: type[Result]
35
+
36
+ def __init__(
37
+ self,
38
+ name: str,
39
+ fmt_metric: Optional[FmtMetric] = None,
40
+ result_cls: type[Result] = Result,
41
+ ) -> None:
42
+ """Creates generic context identified by `name`.
43
+
44
+ Generic contexts just format associated metrics and evaluate
45
+ always to :obj:`~mplugin.state.Ok`. Metric formatting is
46
+ controlled with the :attr:`fmt_metric` attribute. It can either
47
+ be a string or a callable. See the :meth:`describe` method for
48
+ how formatting is done.
49
+
50
+ :param name: context name that is matched by the context
51
+ attribute of :class:`~mplugin.metric.Metric`
52
+ :param fmt_metric: string or callable to convert
53
+ context and associated metric to a human readable string
54
+ :param result_cls: use this class (usually a
55
+ :class:`~.result.Result` subclass) to represent the<
56
+ evaluation outcome
57
+ """
58
+ self.name = name
59
+ self.fmt_metric = fmt_metric
60
+ self.result_cls = result_cls
61
+
62
+ def evaluate(self, metric: "Metric", resource: "Resource") -> Result:
63
+ """Determines state of a given metric.
64
+
65
+ This base implementation returns :class:`~mplugin.state.Ok`
66
+ in all cases. Plugin authors may override this method in
67
+ subclasses to specialize behaviour.
68
+
69
+ :param metric: associated metric that is to be evaluated
70
+ :param resource: resource that produced the associated metric
71
+ (may optionally be consulted)
72
+ :returns: :class:`~.result.Result` or
73
+ :class:`~.state.ServiceState` object
74
+ """
75
+ return self.result_cls(ok, metric=metric)
76
+
77
+ # This could be corrected by re-implementing this class as a proper ABC.
78
+ # See issue #43
79
+ # pylint: disable-next=no-self-use
80
+ def performance(
81
+ self, metric: "Metric", resource: "Resource"
82
+ ) -> Optional[Performance]:
83
+ """Derives performance data from a given metric.
84
+
85
+ This base implementation just returns none. Plugin authors may
86
+ override this method in subclass to specialize behaviour.
87
+
88
+ :param metric: associated metric from which performance data are
89
+ derived
90
+ :param resource: resource that produced the associated metric
91
+ (may optionally be consulted)
92
+ :returns: :class:`~.performance.Performance` object or `None`
93
+ """
94
+ return None
95
+
96
+ def describe(self, metric: "Metric") -> Optional[str]:
97
+ """Provides human-readable metric description.
98
+
99
+ Formats the metric according to the :attr:`fmt_metric`
100
+ attribute. If :attr:`fmt_metric` is a string, it is evaluated as
101
+ format string with all metric attributes in the root namespace.
102
+ If :attr:`fmt_metric` is callable, it is called with the metric
103
+ and this context as arguments. If :attr:`fmt_metric` is not set,
104
+ this default implementation does not return a description.
105
+
106
+ Plugin authors may override this method in subclasses to control
107
+ text output more tightly.
108
+
109
+ :param metric: associated metric
110
+ :returns: description string or None
111
+ """
112
+ if not self.fmt_metric:
113
+ return None
114
+
115
+ if isinstance(self.fmt_metric, str):
116
+ return self.fmt_metric.format(
117
+ name=metric.name,
118
+ value=metric.value,
119
+ uom=metric.uom,
120
+ valueunit=metric.valueunit,
121
+ min=metric.min,
122
+ max=metric.max,
123
+ )
124
+
125
+ return self.fmt_metric(metric, self)
126
+
127
+
128
+ class ScalarContext(Context):
129
+ def __init__(
130
+ self,
131
+ name: str,
132
+ warning=None,
133
+ critical=None,
134
+ fmt_metric: FmtMetric = "{name} is {valueunit}",
135
+ result_cls: type[Result] = Result,
136
+ ):
137
+ """Ready-to-use :class:`Context` subclass for scalar values.
138
+
139
+ ScalarContext models the common case where a single scalar is to
140
+ be evaluated against a pair of warning and critical thresholds.
141
+
142
+ :attr:`name`, :attr:`fmt_metric`, and :attr:`result_cls`,
143
+ are described in the :class:`Context` base class.
144
+
145
+ :param warning: Warning threshold as
146
+ :class:`~mplugin.range.Range` object or range string.
147
+ :param critical: Critical threshold as
148
+ :class:`~mplugin.range.Range` object or range string.
149
+ """
150
+ super(ScalarContext, self).__init__(name, fmt_metric, result_cls)
151
+ self.warning = Range(warning)
152
+ self.critical = Range(critical)
153
+
154
+ def evaluate(self, metric, resource):
155
+ """Compares metric with ranges and determines result state.
156
+
157
+ The metric's value is compared to the instance's :attr:`warning`
158
+ and :attr:`critical` ranges, yielding an appropropiate state
159
+ depending on how the metric fits in the ranges. Plugin authors
160
+ may override this method in subclasses to provide custom
161
+ evaluation logic.
162
+
163
+ :param metric: metric that is to be evaluated
164
+ :param resource: not used
165
+ :returns: :class:`~mplugin.result.Result` object
166
+ """
167
+ if not self.critical.match(metric.value):
168
+ return self.result_cls(critical, self.critical.violation, metric)
169
+ if not self.warning.match(metric.value):
170
+ return self.result_cls(warn, self.warning.violation, metric)
171
+ return self.result_cls(ok, None, metric)
172
+
173
+ def performance(self, metric, resource):
174
+ """Derives performance data.
175
+
176
+ The metric's attributes are combined with the local
177
+ :attr:`warning` and :attr:`critical` ranges to get a
178
+ fully populated :class:`~mplugin.performance.Performance`
179
+ object.
180
+
181
+ :param metric: metric from which performance data are derived
182
+ :param resource: not used
183
+ :returns: :class:`~mplugin.performance.Performance` object
184
+ """
185
+ return Performance(
186
+ metric.name,
187
+ metric.value,
188
+ metric.uom,
189
+ self.warning,
190
+ self.critical,
191
+ metric.min,
192
+ metric.max,
193
+ )
194
+
195
+
196
+ class Contexts:
197
+ """Container for collecting all generated contexts."""
198
+
199
+ by_name: dict[str, Context]
200
+
201
+ def __init__(self):
202
+ self.by_name = dict(
203
+ default=ScalarContext("default", "", ""), null=Context("null")
204
+ )
205
+
206
+ def add(self, context: Context) -> None:
207
+ self.by_name[context.name] = context
208
+
209
+ def __getitem__(self, context_name: str) -> Context:
210
+ try:
211
+ return self.by_name[context_name]
212
+ except KeyError:
213
+ raise KeyError(
214
+ "cannot find context",
215
+ context_name,
216
+ "known contexts: {0}".format(", ".join(self.by_name.keys())),
217
+ )
218
+
219
+ def __contains__(self, context_name: str) -> bool:
220
+ return context_name in self.by_name
221
+
222
+ def __iter__(self) -> typing.Iterator[str]:
223
+ return iter(self.by_name)