webchanges 3.24.0__tar.gz → 3.25.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {webchanges-3.24.0/webchanges.egg-info → webchanges-3.25.0}/PKG-INFO +1 -1
  2. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/__init__.py +1 -1
  3. webchanges-3.25.0/webchanges/_vendored/headers.py +319 -0
  4. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/cli.py +12 -2
  5. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/command.py +28 -21
  6. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/config.py +19 -17
  7. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/differs.py +92 -64
  8. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/filters.py +18 -21
  9. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/handler.py +14 -4
  10. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/jobs.py +65 -46
  11. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/mailer.py +5 -5
  12. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/reporters.py +72 -34
  13. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/storage.py +83 -10
  14. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/storage_minidb.py +1 -1
  15. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/util.py +23 -7
  16. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/worker.py +3 -3
  17. {webchanges-3.24.0 → webchanges-3.25.0/webchanges.egg-info}/PKG-INFO +1 -1
  18. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges.egg-info/SOURCES.txt +1 -1
  19. webchanges-3.24.0/webchanges/_vendored/case_insensitive_dict.py +0 -101
  20. {webchanges-3.24.0 → webchanges-3.25.0}/LICENSE +0 -0
  21. {webchanges-3.24.0 → webchanges-3.25.0}/MANIFEST.in +0 -0
  22. {webchanges-3.24.0 → webchanges-3.25.0}/README.rst +0 -0
  23. {webchanges-3.24.0 → webchanges-3.25.0}/pyproject.toml +0 -0
  24. {webchanges-3.24.0 → webchanges-3.25.0}/requirements.txt +0 -0
  25. {webchanges-3.24.0 → webchanges-3.25.0}/setup.cfg +0 -0
  26. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/_vendored/__init__.py +0 -0
  27. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/_vendored/packaging_version.py +0 -0
  28. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/main.py +0 -0
  29. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges/py.typed +0 -0
  30. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges.egg-info/dependency_links.txt +0 -0
  31. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges.egg-info/entry_points.txt +0 -0
  32. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges.egg-info/requires.txt +0 -0
  33. {webchanges-3.24.0 → webchanges-3.25.0}/webchanges.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webchanges
3
- Version: 3.24.0
3
+ Version: 3.25.0
4
4
  Summary: Check web (or command output) for changes since last run and notify. Anonymously alerts you of web changes, with
5
5
  Author-email: Mike Borsetti <mike+webchanges@borsetti.com>
6
6
  Maintainer-email: Mike Borsetti <mike+webchanges@borsetti.com>
@@ -22,7 +22,7 @@ __project_name__ = __package__
22
22
  # * MINOR version when you add functionality in a backwards compatible manner, and
23
23
  # * MICRO or PATCH version when you make backwards compatible bug fixes. We no longer use '0'
24
24
  # If unsure on increments, use pkg_resources.parse_version to parse
25
- __version__ = '3.24.0'
25
+ __version__ = '3.25.0'
26
26
  __description__ = (
27
27
  'Check web (or command output) for changes since last run and notify.\n'
28
28
  '\n'
@@ -0,0 +1,319 @@
1
+ """
2
+ Vendored version of httpx.Headers class from httpx v0.27.0 released on 21-Feb-24
3
+ https://github.com/encode/httpx/releases/tag/0.27.0.
4
+
5
+ Allows us to load this class in case httpx isn't installed.
6
+
7
+ See https://github.com/psf/requests and https://github.com/encode/httpx/blob/master/httpx/_models.py
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from typing import (
13
+ Any,
14
+ AnyStr,
15
+ ItemsView,
16
+ Iterable,
17
+ Iterator,
18
+ KeysView,
19
+ List,
20
+ Mapping,
21
+ MutableMapping,
22
+ Sequence,
23
+ Tuple,
24
+ Union,
25
+ ValuesView,
26
+ )
27
+
28
+ HeaderTypes = Union[
29
+ 'Headers',
30
+ Mapping[str, str],
31
+ Mapping[bytes, bytes],
32
+ Sequence[Tuple[str, str]],
33
+ Sequence[Tuple[bytes, bytes]],
34
+ ]
35
+
36
+
37
+ def normalize_header_key(
38
+ value: str | bytes,
39
+ lower: bool,
40
+ encoding: str | None = None,
41
+ ) -> bytes:
42
+ """
43
+ Coerce str/bytes into a strictly byte-wise HTTP header key.
44
+ """
45
+ if isinstance(value, bytes):
46
+ bytes_value = value
47
+ else:
48
+ bytes_value = value.encode(encoding or 'ascii')
49
+
50
+ return bytes_value.lower() if lower else bytes_value
51
+
52
+
53
+ def normalize_header_value(value: str | bytes, encoding: str | None = None) -> bytes:
54
+ """
55
+ Coerce str/bytes into a strictly byte-wise HTTP header value.
56
+ """
57
+ if isinstance(value, bytes):
58
+ return value
59
+ return value.encode(encoding or 'ascii')
60
+
61
+
62
+ SENSITIVE_HEADERS = {'authorization', 'proxy-authorization'}
63
+
64
+
65
+ def obfuscate_sensitive_headers(
66
+ items: Iterable[tuple[AnyStr, AnyStr]],
67
+ ) -> Iterator[tuple[AnyStr, AnyStr]]:
68
+ for k, v in items:
69
+ if to_str(k.lower()) in SENSITIVE_HEADERS:
70
+ v = to_bytes_or_str('[secure]', match_type_of=v)
71
+ yield k, v
72
+
73
+
74
+ def to_str(value: str | bytes, encoding: str = 'utf-8') -> str:
75
+ return value if isinstance(value, str) else value.decode(encoding)
76
+
77
+
78
+ def to_bytes_or_str(value: str, match_type_of: AnyStr) -> AnyStr:
79
+ return value if isinstance(match_type_of, str) else value.encode()
80
+
81
+
82
+ class Headers(MutableMapping[str, str]):
83
+ """
84
+ HTTP headers, as a case-insensitive multi-dict.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ headers: HeaderTypes | None = None,
90
+ encoding: str | None = None,
91
+ ) -> None:
92
+ if headers is None:
93
+ self._list: List[Tuple[bytes, bytes, bytes]] = []
94
+ elif isinstance(headers, Headers):
95
+ self._list = list(headers._list)
96
+ elif isinstance(headers, Mapping):
97
+ self._list = [
98
+ (
99
+ normalize_header_key(k, lower=False, encoding=encoding),
100
+ normalize_header_key(k, lower=True, encoding=encoding),
101
+ normalize_header_value(v, encoding),
102
+ )
103
+ for k, v in headers.items()
104
+ ]
105
+ else:
106
+ self._list = [
107
+ (
108
+ normalize_header_key(k, lower=False, encoding=encoding),
109
+ normalize_header_key(k, lower=True, encoding=encoding),
110
+ normalize_header_value(v, encoding),
111
+ )
112
+ for k, v in headers
113
+ ]
114
+
115
+ self._encoding = encoding
116
+
117
+ @property
118
+ def encoding(self) -> str:
119
+ """
120
+ Header encoding is mandated as ascii, but we allow fallbacks to utf-8
121
+ or iso-8859-1.
122
+ """
123
+ if self._encoding is None:
124
+ for encoding in ['ascii', 'utf-8']:
125
+ for key, value in self.raw:
126
+ try:
127
+ key.decode(encoding)
128
+ value.decode(encoding)
129
+ except UnicodeDecodeError:
130
+ break
131
+ else:
132
+ # The else block runs if 'break' did not occur, meaning
133
+ # all values fitted the encoding.
134
+ self._encoding = encoding
135
+ break
136
+ else:
137
+ # The ISO-8859-1 encoding covers all 256 code points in a byte,
138
+ # so will never raise decode errors.
139
+ self._encoding = 'iso-8859-1'
140
+ return self._encoding
141
+
142
+ @encoding.setter
143
+ def encoding(self, value: str) -> None:
144
+ self._encoding = value
145
+
146
+ @property
147
+ def raw(self) -> list[tuple[bytes, bytes]]:
148
+ """
149
+ Returns a list of the raw header items, as byte pairs.
150
+ """
151
+ return [(raw_key, value) for raw_key, _, value in self._list]
152
+
153
+ def keys(self) -> KeysView[str]:
154
+ return {key.decode(self.encoding): None for _, key, value in self._list}.keys()
155
+
156
+ def values(self) -> ValuesView[str]:
157
+ values_dict: dict[str, str] = {}
158
+ for _, key, value in self._list:
159
+ str_key = key.decode(self.encoding)
160
+ str_value = value.decode(self.encoding)
161
+ if str_key in values_dict:
162
+ values_dict[str_key] += f', {str_value}'
163
+ else:
164
+ values_dict[str_key] = str_value
165
+ return values_dict.values()
166
+
167
+ def items(self) -> ItemsView[str, str]:
168
+ """
169
+ Return `(key, value)` items of headers. Concatenate headers
170
+ into a single comma separated value when a key occurs multiple times.
171
+ """
172
+ values_dict: dict[str, str] = {}
173
+ for _, key, value in self._list:
174
+ str_key = key.decode(self.encoding)
175
+ str_value = value.decode(self.encoding)
176
+ if str_key in values_dict:
177
+ values_dict[str_key] += f', {str_value}'
178
+ else:
179
+ values_dict[str_key] = str_value
180
+ return values_dict.items()
181
+
182
+ def multi_items(self) -> list[tuple[str, str]]:
183
+ """
184
+ Return a list of `(key, value)` pairs of headers. Allow multiple
185
+ occurrences of the same key without concatenating into a single
186
+ comma separated value.
187
+ """
188
+ return [(key.decode(self.encoding), value.decode(self.encoding)) for _, key, value in self._list]
189
+
190
+ def get(self, key: str, default: Any = None) -> Any:
191
+ """
192
+ Return a header value. If multiple occurrences of the header occur
193
+ then concatenate them together with commas.
194
+ """
195
+ try:
196
+ return self[key]
197
+ except KeyError:
198
+ return default
199
+
200
+ def get_list(self, key: str, split_commas: bool = False) -> list[str]:
201
+ """
202
+ Return a list of all header values for a given key.
203
+ If `split_commas=True` is passed, then any comma separated header
204
+ values are split into multiple return strings.
205
+ """
206
+ get_header_key = key.lower().encode(self.encoding)
207
+
208
+ values = [
209
+ item_value.decode(self.encoding)
210
+ for _, item_key, item_value in self._list
211
+ if item_key.lower() == get_header_key
212
+ ]
213
+
214
+ if not split_commas:
215
+ return values
216
+
217
+ split_values = []
218
+ for value in values:
219
+ split_values.extend([item.strip() for item in value.split(',')])
220
+ return split_values
221
+
222
+ def update(self, headers: HeaderTypes | None = None) -> None: # type: ignore[override]
223
+ headers = Headers(headers)
224
+ for key in headers.keys():
225
+ if key in self:
226
+ self.pop(key)
227
+ self._list.extend(headers._list)
228
+
229
+ def copy(self) -> Headers:
230
+ return Headers(self, encoding=self.encoding)
231
+
232
+ def __getitem__(self, key: str) -> str:
233
+ """
234
+ Return a single header value.
235
+
236
+ If there are multiple headers with the same key, then we concatenate
237
+ them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2
238
+ """
239
+ normalized_key = key.lower().encode(self.encoding)
240
+
241
+ items = [
242
+ header_value.decode(self.encoding)
243
+ for _, header_key, header_value in self._list
244
+ if header_key == normalized_key
245
+ ]
246
+
247
+ if items:
248
+ return ', '.join(items)
249
+
250
+ raise KeyError(key)
251
+
252
+ def __setitem__(self, key: str, value: str) -> None:
253
+ """
254
+ Set the header `key` to `value`, removing any duplicate entries.
255
+ Retains insertion order.
256
+ """
257
+ set_key = key.encode(self._encoding or 'utf-8')
258
+ set_value = value.encode(self._encoding or 'utf-8')
259
+ lookup_key = set_key.lower()
260
+
261
+ found_indexes = [idx for idx, (_, item_key, _) in enumerate(self._list) if item_key == lookup_key]
262
+
263
+ for idx in reversed(found_indexes[1:]):
264
+ del self._list[idx]
265
+
266
+ if found_indexes:
267
+ idx = found_indexes[0]
268
+ self._list[idx] = (set_key, lookup_key, set_value)
269
+ else:
270
+ self._list.append((set_key, lookup_key, set_value))
271
+
272
+ def __delitem__(self, key: str) -> None:
273
+ """
274
+ Remove the header `key`.
275
+ """
276
+ del_key = key.lower().encode(self.encoding)
277
+
278
+ pop_indexes = [idx for idx, (_, item_key, _) in enumerate(self._list) if item_key.lower() == del_key]
279
+
280
+ if not pop_indexes:
281
+ raise KeyError(key)
282
+
283
+ for idx in reversed(pop_indexes):
284
+ del self._list[idx]
285
+
286
+ def __contains__(self, key: Any) -> bool:
287
+ header_key = key.lower().encode(self.encoding)
288
+ return header_key in [key for _, key, _ in self._list]
289
+
290
+ def __iter__(self) -> Iterator[Any]:
291
+ return iter(self.keys())
292
+
293
+ def __len__(self) -> int:
294
+ return len(self._list)
295
+
296
+ def __eq__(self, other: Any) -> bool:
297
+ try:
298
+ other_headers = Headers(other)
299
+ except ValueError:
300
+ return False
301
+
302
+ self_list = [(key, value) for _, key, value in self._list]
303
+ other_list = [(key, value) for _, key, value in other_headers._list]
304
+ return sorted(self_list) == sorted(other_list)
305
+
306
+ def __repr__(self) -> str:
307
+ class_name = self.__class__.__name__
308
+
309
+ encoding_str = ''
310
+ if self.encoding != 'ascii':
311
+ encoding_str = f', encoding={self.encoding!r}'
312
+
313
+ as_list = list(obfuscate_sensitive_headers(self.multi_items()))
314
+ as_dict = dict(as_list)
315
+
316
+ no_duplicate_keys = len(as_dict) == len(as_list)
317
+ if no_duplicate_keys:
318
+ return f'{class_name}({as_dict!r}{encoding_str})'
319
+ return f'{class_name}({as_list!r}{encoding_str})'
@@ -120,7 +120,7 @@ def teardown_logger(verbose: Optional[int] = None) -> None:
120
120
  os.environ.pop('DEBUG', None)
121
121
 
122
122
 
123
- def locate_jobs_files(filename: Path, default_path: Path, ext: Optional[str] = None) -> list[Path]:
123
+ def _expand_jobs_files(filename: Path, default_path: Path, ext: Optional[str] = None) -> list[Path]:
124
124
  """Searches for file both as specified and in the default directory, then retries with 'ext' extension if defined.
125
125
 
126
126
  :param filename: The filename.
@@ -134,6 +134,8 @@ def locate_jobs_files(filename: Path, default_path: Path, ext: Optional[str] = N
134
134
  # if ext is given, iterate both on raw filename and the filename with ext if different
135
135
  if ext and filename.suffix != ext:
136
136
  search_filenames.append(filename.with_suffix(ext))
137
+ # also iterate on file pre-pended with 'jobs-'
138
+ search_filenames.append(filename.with_stem(f'jobs-{filename.stem}').with_suffix(ext))
137
139
 
138
140
  # try as given
139
141
  for file in search_filenames:
@@ -152,6 +154,14 @@ def locate_jobs_files(filename: Path, default_path: Path, ext: Optional[str] = N
152
154
  return [filename]
153
155
 
154
156
 
157
+ def locate_jobs_files(filenames: list[Path], default_path: Path, ext: Optional[str] = None) -> list[Path]:
158
+ job_files = set()
159
+ for filename in filenames:
160
+ for file in _expand_jobs_files(filename, default_path, ext):
161
+ job_files.add(file)
162
+ return list(job_files)
163
+
164
+
155
165
  def locate_storage_file(filename: Path, default_path: Path, ext: Optional[str] = None) -> Path:
156
166
  """Searches for file both as specified and in the default directory, then retries with 'ext' extension if defined.
157
167
 
@@ -353,7 +363,7 @@ def main() -> None: # pragma: no cover
353
363
 
354
364
  # Locate config, job and hooks files
355
365
  command_config.config_file = locate_storage_file(command_config.config_file, command_config.config_path, '.yaml')
356
- command_config.jobs_files = locate_jobs_files(command_config.jobs_def_file, command_config.config_path, '.yaml')
366
+ command_config.jobs_files = locate_jobs_files(command_config.jobs_files, command_config.config_path, '.yaml')
357
367
  command_config.hooks_file = locate_storage_file(command_config.hooks_file, command_config.config_path, '.py')
358
368
 
359
369
  # Check for first run
@@ -11,6 +11,7 @@ import importlib.metadata
11
11
  import logging
12
12
  import os
13
13
  import platform
14
+ import re
14
15
  import shutil
15
16
  import sqlite3
16
17
  import subprocess # noqa: S404 Consider possible security implications associated with the subprocess module.
@@ -21,6 +22,7 @@ from concurrent.futures import ThreadPoolExecutor
21
22
  from datetime import datetime
22
23
  from pathlib import Path
23
24
  from typing import Iterable, Iterator, Optional, TYPE_CHECKING, Union
25
+ from urllib.parse import unquote_plus
24
26
  from zoneinfo import ZoneInfo
25
27
 
26
28
  from webchanges import __docs_url__, __project_name__, __version__
@@ -345,22 +347,29 @@ class UrlwatchCommand:
345
347
  pass
346
348
  return 0
347
349
 
348
- def list_jobs(self) -> None:
350
+ def list_jobs(self, regex: Union[bool, str]) -> None:
349
351
  """
350
352
  Lists the job and their respective _index_number.
351
353
 
352
354
  :return: None.
353
355
  """
356
+ if isinstance(regex, str):
357
+ print(f"List of jobs matching the RegEx '{regex}':")
358
+ else:
359
+ print('List of jobs:')
354
360
  for job in self.urlwatcher.jobs:
355
361
  if self.urlwatch_config.verbose:
356
- print(f'{job.index_number:3}: {job!r}')
362
+ job_desc = f'{job.index_number:3}: {job!r}'
357
363
  else:
358
364
  pretty_name = job.pretty_name()
359
365
  location = job.get_location()
360
366
  if pretty_name != location:
361
- print(f'{job.index_number:3}: {pretty_name} ({location})')
367
+ job_desc = f'{job.index_number:3}: {pretty_name} ({location})'
362
368
  else:
363
- print(f'{job.index_number:3}: {pretty_name}')
369
+ job_desc = f'{job.index_number:3}: {pretty_name}'
370
+ if isinstance(regex, bool) or re.findall(regex, job_desc):
371
+ print(job_desc)
372
+
364
373
  if len(self.urlwatch_config.jobs_files) > 1:
365
374
  jobs_files = ['Jobs files concatenated:'] + [f'• {file}' for file in self.urlwatch_config.jobs_files]
366
375
  elif len(self.urlwatch_config.jobs_files) == 1:
@@ -377,13 +386,17 @@ class UrlwatchCommand:
377
386
  :return: The matching JobBase.
378
387
  :raises IndexError: If job is not found.
379
388
  """
380
- try:
381
- index = int(query)
382
- except ValueError:
389
+ if isinstance(query, int):
390
+ index = query
391
+ else:
383
392
  try:
384
- return next((job for job in self.urlwatcher.jobs if job.get_location() == query))
385
- except StopIteration as e:
386
- raise ValueError(f"Job {query} does not match any job's url/user_visible_url or command.") from e
393
+ index = int(query)
394
+ except ValueError:
395
+ query = unquote_plus(query)
396
+ try:
397
+ return next((job for job in self.urlwatcher.jobs if unquote_plus(job.get_location()) == query))
398
+ except StopIteration:
399
+ raise ValueError(f"Job {query} does not match any job's url/user_visible_url or command.") from None
387
400
 
388
401
  if index == 0:
389
402
  raise ValueError(f'Job index {index} out of range.')
@@ -422,15 +435,15 @@ class UrlwatchCommand:
422
435
  message = [f'No syntax errors in config file {self.urlwatch_config.config_file}']
423
436
  conj = ',\n' if 'hooks' in sys.modules else '\nand '
424
437
  if len(self.urlwatch_config.jobs_files) == 1:
425
- message.append(f'{conj}jobs file {self.urlwatch_config.jobs_files[0]}')
438
+ message.append(f'{conj}jobs file {self.urlwatch_config.jobs_files[0]},')
426
439
  else:
427
440
  message.append(
428
441
  '\n '.join(
429
- [f'{conj}jobs files'] + [f'• {file}' for file in sorted(self.urlwatch_config.jobs_files)]
442
+ [f'{conj}jobs files'] + [f'• {file},' for file in sorted(self.urlwatch_config.jobs_files)]
430
443
  )
431
444
  )
432
445
  if 'hooks' in sys.modules:
433
- message.append(f",\nand hooks file {sys.modules['hooks'].__file__}")
446
+ message.append(f"\nand hooks file {sys.modules['hooks'].__file__}")
434
447
  print(f"{''.join(message)}.")
435
448
  return
436
449
 
@@ -491,10 +504,6 @@ class UrlwatchCommand:
491
504
 
492
505
  job = self._find_job_with_defaults(job_id)
493
506
 
494
- # TODO: The below is a hack; must find whether data is markdown programmatically (e.g. save it in database)
495
- if job.filter:
496
- job.is_markdown = any('html2text' in filter_type for filter_type in job.filter)
497
-
498
507
  history_data = self.urlwatcher.ssdb_storage.get_history_snapshots(job.get_guid())
499
508
 
500
509
  num_snapshots = len(history_data)
@@ -531,8 +540,6 @@ class UrlwatchCommand:
531
540
  job_state.old_etag = history_dic_snapshots[close_matches[0]].etag
532
541
  job_state.old_mime_type = history_dic_snapshots[close_matches[0]].mime_type
533
542
 
534
- # TODO: setting of job_state.job.is_markdown = True when it had been set by a filter.
535
- # Ideally it should be saved as an attribute when saving "data".
536
543
  if self.urlwatch_config.test_reporter is None:
537
544
  self.urlwatch_config.test_reporter = 'stdout' # default
538
545
  report.job_states = [] # required
@@ -647,7 +654,7 @@ class UrlwatchCommand:
647
654
  jobs_files = [f'in jobs file {self.urlwatch_config.jobs_files[0]}:']
648
655
  else:
649
656
  jobs_files = ['in the concatenation of the jobs files'] + [
650
- f'• {file}' for file in self.urlwatch_config.jobs_files
657
+ f'• {file},' for file in self.urlwatch_config.jobs_files
651
658
  ]
652
659
  header = '\n '.join(['Jobs with errors or returning no data (after unmodified filters, if any)'] + jobs_files)
653
660
 
@@ -1038,7 +1045,7 @@ class UrlwatchCommand:
1038
1045
  def handle_actions(self) -> None:
1039
1046
  """Handles the actions for command line arguments and exits."""
1040
1047
  if self.urlwatch_config.list_jobs:
1041
- self.list_jobs()
1048
+ self.list_jobs(self.urlwatch_config.list_jobs)
1042
1049
  self._exit(0)
1043
1050
 
1044
1051
  if self.urlwatch_config.errors:
@@ -33,10 +33,10 @@ class CommandConfig(BaseConfig):
33
33
  """Command line arguments configuration; the arguments are stored as class attributes."""
34
34
 
35
35
  add: Optional[str]
36
- change_location: tuple[Union[int, str], str]
36
+ change_location: Optional[tuple[Union[int, str], str]]
37
37
  check_new: bool
38
- clean_database: int
39
- database_engine: str
38
+ clean_database: Optional[int]
39
+ database_engine: Optional[str]
40
40
  delete: Optional[str]
41
41
  delete_snapshot: Optional[str]
42
42
  detailed_versions: bool
@@ -44,21 +44,22 @@ class CommandConfig(BaseConfig):
44
44
  edit: bool
45
45
  edit_config: bool
46
46
  edit_hooks: bool
47
- errors: str
47
+ errors: Optional[str]
48
48
  features: bool
49
49
  footnote: Optional[str]
50
- gc_database: int
50
+ gc_database: Optional[int]
51
51
  install_chrome: bool
52
52
  joblist: list[str]
53
- list_jobs: bool
54
- max_snapshots: int
53
+ jobs_files: list[Path]
54
+ list_jobs: Optional[Union[bool, str]]
55
+ max_snapshots: Optional[int]
55
56
  max_workers: Optional[int]
56
57
  no_headless: bool
57
58
  rollback_database: Optional[str]
58
59
  smtp_login: bool
59
60
  telegram_chats: bool
60
61
  test_differ: Optional[list[str]]
61
- test_job: Union[bool, Optional[str]]
62
+ test_job: Optional[Union[bool, str]]
62
63
  test_reporter: Optional[str]
63
64
  verbose: Optional[int]
64
65
  xmpp_login: bool
@@ -82,8 +83,8 @@ class CommandConfig(BaseConfig):
82
83
  snapshots are stored.
83
84
  """
84
85
  super().__init__(config_path, config_file, jobs_def_file, hooks_file, ssdb_file)
85
- self.jobs_files = [jobs_def_file]
86
86
  self.parse_args(args)
87
+ self.jobs_files = self.jobs_files or [jobs_def_file]
87
88
 
88
89
  class CustomHelpFormatter(argparse.RawDescriptionHelpFormatter):
89
90
  def __init__(self, prog: str) -> None:
@@ -114,10 +115,7 @@ class CommandConfig(BaseConfig):
114
115
  parser.add_argument(
115
116
  'joblist',
116
117
  nargs='*',
117
- help=(
118
- 'JOB(S) to run (if one, index as per --list or URL/command, if multiple, by index) (default: run all '
119
- 'jobs)'
120
- ),
118
+ help=('JOB(S) to run (index number(s) as per --list; if one also URL/command) (default: run all jobs)'),
121
119
  metavar='JOB(S)',
122
120
  )
123
121
  parser.add_argument(
@@ -135,11 +133,12 @@ class CommandConfig(BaseConfig):
135
133
  group.add_argument(
136
134
  '--jobs',
137
135
  '--urls',
138
- default=self.jobs_def_file,
136
+ action='append',
137
+ # default=[self.jobs_def_file],
139
138
  type=Path,
140
139
  help='read job list (URLs/commands) from FILE or files matching a glob pattern',
141
140
  metavar='FILE',
142
- dest='jobs_def_file',
141
+ dest='jobs_files',
143
142
  )
144
143
  group.add_argument(
145
144
  '--config',
@@ -170,8 +169,11 @@ class CommandConfig(BaseConfig):
170
169
  group = parser.add_argument_group('job management')
171
170
  group.add_argument(
172
171
  '--list-jobs',
173
- action='store_true',
174
- help='list jobs and their index number',
172
+ nargs='?',
173
+ const=True,
174
+ help='list jobs and their index number (optional: only those who match REGEX)',
175
+ metavar='REGEX',
176
+ dest='list_jobs',
175
177
  )
176
178
  group.add_argument(
177
179
  '--errors',