jupyter-analysis-tools 1.2.0__tar.gz → 1.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/.cookiecutterrc +1 -1
  2. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/CHANGELOG.md +36 -0
  3. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/PKG-INFO +41 -5
  4. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/README.rst +4 -4
  5. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/conf.py +1 -1
  6. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/__init__.py +3 -2
  7. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/readdata.py +38 -24
  8. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/utils.py +2 -1
  9. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools.egg-info/PKG-INFO +41 -5
  10. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/testdata/S2842 water.json +7 -17
  11. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/tests/readdata.py +26 -15
  12. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/tests/utils.py +3 -2
  13. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/.editorconfig +0 -0
  14. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/.pre-commit-config.yaml +0 -0
  15. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/AUTHORS.rst +0 -0
  16. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/CONTRIBUTING.rst +0 -0
  17. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/LICENSE +0 -0
  18. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/MANIFEST.in +0 -0
  19. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/requirements.txt +0 -0
  20. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/templates/.github/workflows/build.yml +0 -0
  21. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/templates/.github/workflows/ci-cd.yml +0 -0
  22. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/templates/.github/workflows/coverage.yml +0 -0
  23. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/templates/.github/workflows/docs.yml +0 -0
  24. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/templates/.github/workflows/release.yml +0 -0
  25. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/templates/.github/workflows/tests.yml +0 -0
  26. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/ci/update.py +0 -0
  27. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/_templates/class.rst +0 -0
  28. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/_templates/module.rst +0 -0
  29. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/authors.rst +0 -0
  30. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/changelog.rst +0 -0
  31. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/contributing.rst +0 -0
  32. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/index.rst +0 -0
  33. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/installation.rst +0 -0
  34. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/readme.rst +0 -0
  35. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/reference/index.rst +0 -0
  36. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/requirements.txt +0 -0
  37. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/spelling_wordlist.txt +0 -0
  38. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/docs/usage.rst +0 -0
  39. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/pyproject.toml +0 -0
  40. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/setup.cfg +0 -0
  41. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/analysis.py +0 -0
  42. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/binning.py +0 -0
  43. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/datalocations.py +0 -0
  44. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/distrib.py +0 -0
  45. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/git.py +0 -0
  46. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/plotting.py +0 -0
  47. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools/widgets.py +0 -0
  48. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools.egg-info/SOURCES.txt +0 -0
  49. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools.egg-info/dependency_links.txt +0 -0
  50. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools.egg-info/requires.txt +0 -0
  51. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/src/jupyter_analysis_tools.egg-info/top_level.txt +0 -0
  52. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/templates/CHANGELOG.md.j2 +0 -0
  53. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/testdata/2015-03-20-Silica.ssf.json +0 -0
  54. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/testdata/2015-03-20-Silica.ssfz +0 -0
  55. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/testdata/S2842 water.pdh +0 -0
  56. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/testdata/S2843[9].pdh +0 -0
  57. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/tests/requirements.txt +0 -0
  58. {jupyter_analysis_tools-1.2.0 → jupyter_analysis_tools-1.2.2}/tox.ini +0 -0
@@ -20,7 +20,7 @@ default_context:
20
20
  release_date: 'today'
21
21
  year_from: '2018'
22
22
  year_to: '2023'
23
- version: '1.2.0'
23
+ version: '1.2.2'
24
24
  pypi_host: 'pypi.org'
25
25
  license: 'MIT license'
26
26
  sphinx_theme: 'furo'
@@ -1,5 +1,41 @@
1
1
  # CHANGELOG
2
2
 
3
+ ## v1.2.2 (2025-07-15)
4
+
5
+ ### Bug fixes
6
+
7
+ * readPDHmeta: use unique dict keys, the xmk *key* can occur in multiple groups in PDH ([`ef41c81`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/ef41c81d40d801b5baf86f56cf9012ca35d2ccde))
8
+
9
+ ### Documentation
10
+
11
+ * pyproject: revert specify readme+changelog document types ([`1baa762`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/1baa762d441fe0a1b7b663b9d0589de857277426))
12
+
13
+ * pyproject: specify readme+changelog document types to render overview on pypi correctly ([`6e4d1e5`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/6e4d1e56640b604f971ddca8dabd8d1aff5c9bf1))
14
+
15
+ * ghpages: make sure .nojekyll exists after purging old html docs ([`4847845`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/4847845cc06884b6e589b26897e83411d649ef4d))
16
+
17
+ ## v1.2.1 (2025-07-11)
18
+
19
+ ### Bug fixes
20
+
21
+ * readme: trigger new version after style changes ([`8b2b5e9`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/8b2b5e93c0f24ae59afaa764abdc508e994946b4))
22
+
23
+ ### Code style
24
+
25
+ * __init__: imports format ([`6f07790`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/6f07790a04e43736b1c0fbce0eac54d0b661a7cf))
26
+
27
+ * utils: satisfy flake8 ([`9657474`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/9657474e94a4d9887c4a6b653e0ffa403e666d02))
28
+
29
+ * readdata: satisfy flake8 ([`36bf6e8`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/36bf6e8be67a2ebd345c5557c2352710bcebed82))
30
+
31
+ ### Continuous integration
32
+
33
+ * workflow: publish only if the docs are good ([`a663ed3`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/a663ed3d1fd87079a4fd7cf419132a129280a562))
34
+
35
+ ### Testing
36
+
37
+ * utils: fix imports ([`ddd5369`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/ddd5369b8037f583c6900aea25522a56126c9d32))
38
+
3
39
  ## v1.2.0 (2025-07-11)
4
40
 
5
41
  ### Features
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jupyter-analysis-tools
3
- Version: 1.2.0
3
+ Version: 1.2.2
4
4
  Summary: Yet another Python library with helpers and utilities for data analysis and processing.
5
5
  Author-email: Ingo Breßler <dev@ingobressler.net>
6
6
  License: MIT license
@@ -53,8 +53,8 @@ Yet another Python library with helpers and utilities for data analysis and proc
53
53
  :target: https://pypi.org/project/jupyter-analysis-tools
54
54
  :alt: PyPI Package latest release
55
55
 
56
- .. |commits-since| image:: https://img.shields.io/github/commits-since/BAMresearch/jupyter-analysis-tools/v1.2.0.svg
57
- :target: https://github.com/BAMresearch/jupyter-analysis-tools/compare/v1.2.0...main
56
+ .. |commits-since| image:: https://img.shields.io/github/commits-since/BAMresearch/jupyter-analysis-tools/v1.2.2.svg
57
+ :target: https://github.com/BAMresearch/jupyter-analysis-tools/compare/v1.2.2...main
58
58
  :alt: Commits since latest release
59
59
 
60
60
  .. |license| image:: https://img.shields.io/pypi/l/jupyter-analysis-tools.svg
@@ -104,9 +104,9 @@ https://BAMresearch.github.io/jupyter-analysis-tools
104
104
  Development
105
105
  ===========
106
106
 
107
- To run all the tests run::
107
+ Run all tests with::
108
108
 
109
- tox
109
+ tox -e py
110
110
 
111
111
  Note, to combine the coverage data from all the tox environments run:
112
112
 
@@ -127,6 +127,42 @@ Note, to combine the coverage data from all the tox environments run:
127
127
 
128
128
  # CHANGELOG
129
129
 
130
+ ## v1.2.2 (2025-07-15)
131
+
132
+ ### Bug fixes
133
+
134
+ * readPDHmeta: use unique dict keys, the xmk *key* can occur in multiple groups in PDH ([`ef41c81`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/ef41c81d40d801b5baf86f56cf9012ca35d2ccde))
135
+
136
+ ### Documentation
137
+
138
+ * pyproject: revert specify readme+changelog document types ([`1baa762`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/1baa762d441fe0a1b7b663b9d0589de857277426))
139
+
140
+ * pyproject: specify readme+changelog document types to render overview on pypi correctly ([`6e4d1e5`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/6e4d1e56640b604f971ddca8dabd8d1aff5c9bf1))
141
+
142
+ * ghpages: make sure .nojekyll exists after purging old html docs ([`4847845`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/4847845cc06884b6e589b26897e83411d649ef4d))
143
+
144
+ ## v1.2.1 (2025-07-11)
145
+
146
+ ### Bug fixes
147
+
148
+ * readme: trigger new version after style changes ([`8b2b5e9`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/8b2b5e93c0f24ae59afaa764abdc508e994946b4))
149
+
150
+ ### Code style
151
+
152
+ * __init__: imports format ([`6f07790`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/6f07790a04e43736b1c0fbce0eac54d0b661a7cf))
153
+
154
+ * utils: satisfy flake8 ([`9657474`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/9657474e94a4d9887c4a6b653e0ffa403e666d02))
155
+
156
+ * readdata: satisfy flake8 ([`36bf6e8`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/36bf6e8be67a2ebd345c5557c2352710bcebed82))
157
+
158
+ ### Continuous integration
159
+
160
+ * workflow: publish only if the docs are good ([`a663ed3`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/a663ed3d1fd87079a4fd7cf419132a129280a562))
161
+
162
+ ### Testing
163
+
164
+ * utils: fix imports ([`ddd5369`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/ddd5369b8037f583c6900aea25522a56126c9d32))
165
+
130
166
  ## v1.2.0 (2025-07-11)
131
167
 
132
168
  ### Features
@@ -14,8 +14,8 @@ Yet another Python library with helpers and utilities for data analysis and proc
14
14
  :target: https://pypi.org/project/jupyter-analysis-tools
15
15
  :alt: PyPI Package latest release
16
16
 
17
- .. |commits-since| image:: https://img.shields.io/github/commits-since/BAMresearch/jupyter-analysis-tools/v1.2.0.svg
18
- :target: https://github.com/BAMresearch/jupyter-analysis-tools/compare/v1.2.0...main
17
+ .. |commits-since| image:: https://img.shields.io/github/commits-since/BAMresearch/jupyter-analysis-tools/v1.2.2.svg
18
+ :target: https://github.com/BAMresearch/jupyter-analysis-tools/compare/v1.2.2...main
19
19
  :alt: Commits since latest release
20
20
 
21
21
  .. |license| image:: https://img.shields.io/pypi/l/jupyter-analysis-tools.svg
@@ -65,9 +65,9 @@ https://BAMresearch.github.io/jupyter-analysis-tools
65
65
  Development
66
66
  ===========
67
67
 
68
- To run all the tests run::
68
+ Run all tests with::
69
69
 
70
- tox
70
+ tox -e py
71
71
 
72
72
  Note, to combine the coverage data from all the tox environments run:
73
73
 
@@ -29,7 +29,7 @@ project = "Jupyter Analysis Tools"
29
29
  year = "2018-2023"
30
30
  author = "Ingo Breßler"
31
31
  copyright = "{0}, {1}".format(year, author)
32
- version = "1.2.0"
32
+ version = "1.2.2"
33
33
  release = version
34
34
  commit_id = None
35
35
  try:
@@ -1,12 +1,13 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # __init__.py
3
3
 
4
- __version__ = "1.2.0"
4
+ __version__ = "1.2.2"
5
5
 
6
6
  from .binning import reBin
7
7
  from .git import checkRepo, isNBstripoutActivated, isNBstripoutInstalled, isRepo
8
- from .readdata import readdata, readPDHmeta, readSSF
8
+ from .readdata import readdata
9
9
  from .readdata import readdata as readPDH
10
+ from .readdata import readPDHmeta, readSSF
10
11
  from .utils import setLocaleUTF8
11
12
  from .widgets import PathSelector, showBoolStatus
12
13
 
@@ -1,7 +1,6 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # readdata.py
3
3
 
4
- import json
5
4
  import warnings
6
5
  import xml.etree.ElementTree as et
7
6
  from pathlib import Path
@@ -62,20 +61,28 @@ def convertValue(val):
62
61
  pass
63
62
  return val
64
63
 
64
+
65
65
  def xmlPDHToDict(root):
66
66
  result = {}
67
67
  stack = [(root, result)]
68
68
  while stack:
69
69
  elem, parentCont = stack.pop()
70
70
  elemCont = {}
71
- key = elem.attrib.pop("key", None)
72
71
  idx = -1
73
- if (not len(list(elem)) and not len(elem.attrib)
74
- and not (elem.text and len(elem.text.strip()))):
75
- continue # skip empty elements with a key only early
72
+ key = elem.attrib.pop("key", None)
73
+ if ( # get a unique key, the key can occur in multiple groups in PDH
74
+ key is not None and elem.tag == "group" and elem.attrib.get("id", None) is not None
75
+ ):
76
+ key = elem.attrib.pop("id")
77
+ if ( # skip empty elements with a key only early
78
+ not len(list(elem))
79
+ and not len(elem.attrib)
80
+ and not (elem.text and len(elem.text.strip()))
81
+ ):
82
+ continue
76
83
  if elem.tag == "list":
77
84
  elemCont = []
78
- else: # add attributes & values to dict
85
+ else: # add attributes & values to dict
79
86
  # Attach text, if any
80
87
  if elem.text and len(elem.text.strip()):
81
88
  if elem.tag in ("value", "reference"):
@@ -84,17 +91,20 @@ def xmlPDHToDict(root):
84
91
  elemCont["#text"] = convertValue(elem.text)
85
92
  # Attach attributes, if any
86
93
  if elem.attrib:
87
- elemCont.update({k: convertValue(v) for k, v in elem.attrib.items() if len(v.strip())})
88
- if key == "unit" and "value" in elemCont: # fix some units
94
+ elemCont.update(
95
+ {k: convertValue(v) for k, v in elem.attrib.items() if len(v.strip())}
96
+ )
97
+ if key == "unit" and "value" in elemCont: # fix some units
89
98
  elemCont["value"] = elemCont["value"].replace("_", "")
90
99
  if "unit" in elemCont:
91
100
  elemCont["unit"] = elemCont["unit"].replace("_", "")
92
101
  # reduce the extracted dict&attributes
93
- idx = elemCont.get("index", -1) # insert last/append if no index given
102
+ idx = elemCont.get("index", -1) # insert last/append if no index given
94
103
  value = elemCont.get("value", None)
95
- if value is not None and (len(elemCont) == 1
96
- or (len(elemCont) == 2 and "index" in elemCont)):
97
- elemCont = value # contains value only
104
+ if value is not None and (
105
+ len(elemCont) == 1 or (len(elemCont) == 2 and "index" in elemCont)
106
+ ):
107
+ elemCont = value # contains value only
98
108
  parentKey = elem.tag
99
109
  if key is not None and parentKey in ("list", "value", "group"):
100
110
  # skip one level in hierarchy for these generic containers
@@ -103,21 +113,22 @@ def xmlPDHToDict(root):
103
113
  try:
104
114
  if isinstance(parentCont, list):
105
115
  parentCont.insert(idx, elemCont)
106
- elif parentKey not in parentCont: # add as new list
107
- if key is None: # make a list
116
+ elif parentKey not in parentCont: # add as new list
117
+ if key is None: # make a list
108
118
  parentCont[parentKey] = elemCont
109
- else: # have a key
119
+ else: # have a key
110
120
  parentCont[parentKey] = {key: elemCont}
111
- else: # parentKey exists already
112
- if (not isinstance(parentCont[parentKey], list) and
113
- not isinstance(parentCont[parentKey], dict)):
121
+ else: # parentKey exists already
122
+ if not isinstance(parentCont[parentKey], list) and not isinstance(
123
+ parentCont[parentKey], dict
124
+ ):
114
125
  # if its a plain value before, make a list out of it and append in next step
115
126
  parentCont[parentKey] = [parentCont[parentKey]]
116
127
  if isinstance(parentCont[parentKey], list):
117
128
  parentCont[parentKey].append(elemCont)
118
129
  elif key is not None:
119
130
  parentCont[parentKey].update({key: elemCont})
120
- else: # key is None
131
+ else: # key is None
121
132
  parentCont[parentKey].update(elemCont)
122
133
  except AttributeError:
123
134
  raise
@@ -126,31 +137,34 @@ def xmlPDHToDict(root):
126
137
  # fix some entry values, weird Anton Paar PDH format
127
138
  try:
128
139
  oldts = result["fileinfo"]["parameter"]["DateTime"]["value"]
129
- delta = (39*365+10)*24*3600 # timestamp seems to be based on around 2009-01-01 (a day give or take)
140
+ # timestamp seems to be based on around 2009-01-01 (a day give or take)
141
+ delta = (39 * 365 + 10) * 24 * 3600
130
142
  # make it compatible to datetime.datetime routines
131
- result["fileinfo"]["parameter"]["DateTime"]["value"] = oldts+delta
143
+ result["fileinfo"]["parameter"]["DateTime"]["value"] = oldts + delta
132
144
  except KeyError:
133
145
  pass
134
146
  return result
135
147
 
148
+
136
149
  def readPDHmeta(fp):
137
150
  fp = Path(fp)
138
151
  if fp.suffix.lower() != ".pdh":
139
152
  warnings.warn("readPDHmeta() supports .pdh files only!")
140
- return # for PDH files
153
+ return # for PDH files
141
154
  lines = ""
142
155
  with open(fp) as fd:
143
156
  lines = fd.readlines()
144
157
  nrows = int(lines[2].split()[0])
145
- xml = "".join(lines[nrows+5:])
158
+ xml = "".join(lines[nrows + 5 :])
146
159
  return xmlPDHToDict(et.fromstring(xml))
147
160
 
161
+
148
162
  def readSSF(fp):
149
163
  fp = Path(fp)
150
164
  if fp.suffix.lower() != ".ssf":
151
165
  warnings.warn("readSession() supports .ssf files only!")
152
166
  return # for PDH files
153
167
  data = ""
154
- with open(fp, encoding='utf-8-sig') as fd:
168
+ with open(fp, encoding="utf-8-sig") as fd:
155
169
  data = fd.read()
156
170
  return xmlPDHToDict(et.fromstring(data))
@@ -89,7 +89,8 @@ def addEnvScriptsToPATH():
89
89
  """Prepends the *Scripts* directory of the current Python environment base directory to systems
90
90
  PATH variable.
91
91
 
92
- It is intended for Conda (Miniforge) environments on Windows that do not have this in their PATH environment variable, causing them to miss many commands provided from this location.
92
+ It is intended for Conda (Miniforge) environments on Windows that do not have this in their PATH
93
+ environment variable, causing them to miss many commands provided from this location.
93
94
  """
94
95
  envPath = [p for p in sys.path if p.endswith("Lib")]
95
96
  if not envPath:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jupyter-analysis-tools
3
- Version: 1.2.0
3
+ Version: 1.2.2
4
4
  Summary: Yet another Python library with helpers and utilities for data analysis and processing.
5
5
  Author-email: Ingo Breßler <dev@ingobressler.net>
6
6
  License: MIT license
@@ -53,8 +53,8 @@ Yet another Python library with helpers and utilities for data analysis and proc
53
53
  :target: https://pypi.org/project/jupyter-analysis-tools
54
54
  :alt: PyPI Package latest release
55
55
 
56
- .. |commits-since| image:: https://img.shields.io/github/commits-since/BAMresearch/jupyter-analysis-tools/v1.2.0.svg
57
- :target: https://github.com/BAMresearch/jupyter-analysis-tools/compare/v1.2.0...main
56
+ .. |commits-since| image:: https://img.shields.io/github/commits-since/BAMresearch/jupyter-analysis-tools/v1.2.2.svg
57
+ :target: https://github.com/BAMresearch/jupyter-analysis-tools/compare/v1.2.2...main
58
58
  :alt: Commits since latest release
59
59
 
60
60
  .. |license| image:: https://img.shields.io/pypi/l/jupyter-analysis-tools.svg
@@ -104,9 +104,9 @@ https://BAMresearch.github.io/jupyter-analysis-tools
104
104
  Development
105
105
  ===========
106
106
 
107
- To run all the tests run::
107
+ Run all tests with::
108
108
 
109
- tox
109
+ tox -e py
110
110
 
111
111
  Note, to combine the coverage data from all the tox environments run:
112
112
 
@@ -127,6 +127,42 @@ Note, to combine the coverage data from all the tox environments run:
127
127
 
128
128
  # CHANGELOG
129
129
 
130
+ ## v1.2.2 (2025-07-15)
131
+
132
+ ### Bug fixes
133
+
134
+ * readPDHmeta: use unique dict keys, the xmk *key* can occur in multiple groups in PDH ([`ef41c81`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/ef41c81d40d801b5baf86f56cf9012ca35d2ccde))
135
+
136
+ ### Documentation
137
+
138
+ * pyproject: revert specify readme+changelog document types ([`1baa762`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/1baa762d441fe0a1b7b663b9d0589de857277426))
139
+
140
+ * pyproject: specify readme+changelog document types to render overview on pypi correctly ([`6e4d1e5`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/6e4d1e56640b604f971ddca8dabd8d1aff5c9bf1))
141
+
142
+ * ghpages: make sure .nojekyll exists after purging old html docs ([`4847845`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/4847845cc06884b6e589b26897e83411d649ef4d))
143
+
144
+ ## v1.2.1 (2025-07-11)
145
+
146
+ ### Bug fixes
147
+
148
+ * readme: trigger new version after style changes ([`8b2b5e9`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/8b2b5e93c0f24ae59afaa764abdc508e994946b4))
149
+
150
+ ### Code style
151
+
152
+ * __init__: imports format ([`6f07790`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/6f07790a04e43736b1c0fbce0eac54d0b661a7cf))
153
+
154
+ * utils: satisfy flake8 ([`9657474`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/9657474e94a4d9887c4a6b653e0ffa403e666d02))
155
+
156
+ * readdata: satisfy flake8 ([`36bf6e8`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/36bf6e8be67a2ebd345c5557c2352710bcebed82))
157
+
158
+ ### Continuous integration
159
+
160
+ * workflow: publish only if the docs are good ([`a663ed3`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/a663ed3d1fd87079a4fd7cf419132a129280a562))
161
+
162
+ ### Testing
163
+
164
+ * utils: fix imports ([`ddd5369`](https://github.com/BAMresearch/jupyter-analysis-tools/commit/ddd5369b8037f583c6900aea25522a56126c9d32))
165
+
130
166
  ## v1.2.0 (2025-07-11)
131
167
 
132
168
  ### Features
@@ -165,7 +165,6 @@
165
165
  "isdevice": "false",
166
166
  "category": "exp",
167
167
  "sub": "sample",
168
- "id": "exp:sample:Default",
169
168
  "ParameterCount": {
170
169
  "value": 0,
171
170
  "counts": "ParameterKey|ParameterName|ParameterValue",
@@ -208,7 +207,6 @@
208
207
  "isdevice": "false",
209
208
  "category": "exp",
210
209
  "sub": "file",
211
- "id": "exp:file:Default",
212
210
  "Filename": {
213
211
  "value": "S2842",
214
212
  "type": "String",
@@ -261,7 +259,6 @@
261
259
  "isdevice": "false",
262
260
  "category": "exp",
263
261
  "sub": "vari",
264
- "id": "exp:vari:Default",
265
262
  "VariationCount": {
266
263
  "value": 0,
267
264
  "counts": "Parameter|Count|Start|Increment|Positions|Prefix|Digits|NamingOption",
@@ -316,10 +313,9 @@
316
313
  "db": "D"
317
314
  }
318
315
  },
319
- "custom_generator": {
316
+ "generator::custom_generator#0": {
320
317
  "isdevice": "true",
321
318
  "category": "generator",
322
- "id": "generator::custom_generator#0",
323
319
  "isactive": "true",
324
320
  "Name": {
325
321
  "value": "PW1830",
@@ -453,10 +449,9 @@
453
449
  "type": "String"
454
450
  }
455
451
  },
456
- "AP_SAXSess": {
452
+ "camera::AP_SAXSess#0": {
457
453
  "isdevice": "true",
458
454
  "category": "camera",
459
- "id": "camera::AP_SAXSess#0",
460
455
  "Name": {
461
456
  "value": "SAXSess",
462
457
  "type": "String",
@@ -573,11 +568,10 @@
573
568
  "type": "String"
574
569
  }
575
570
  },
576
- "PI_SCX4300": {
571
+ "detector:ccd:PI_SCX4300#0": {
577
572
  "isdevice": "true",
578
573
  "category": "detector",
579
574
  "sub": "ccd",
580
- "id": "detector:ccd:PI_SCX4300#0",
581
575
  "Name": {
582
576
  "value": "CCD",
583
577
  "type": "String",
@@ -897,10 +891,9 @@
897
891
  "Y": 1
898
892
  }
899
893
  },
900
- "AP_TCS120": {
894
+ "holder::AP_TCS120#0": {
901
895
  "isdevice": "true",
902
896
  "category": "holder",
903
- "id": "holder::AP_TCS120#0",
904
897
  "Name": {
905
898
  "value": "TCS 120",
906
899
  "type": "String",
@@ -957,11 +950,10 @@
957
950
  "type": "String"
958
951
  }
959
952
  },
960
- "AP_TCU50": {
953
+ "secu:temp:AP_TCU50#0": {
961
954
  "isdevice": "true",
962
955
  "category": "secu",
963
956
  "sub": "temp",
964
- "id": "secu:temp:AP_TCU50#0",
965
957
  "Name": {
966
958
  "value": "TCU 50",
967
959
  "type": "String",
@@ -1158,11 +1150,10 @@
1158
1150
  "type": "String"
1159
1151
  }
1160
1152
  },
1161
- "Dectris_Mythen1K": {
1153
+ "detector:ccd:Dectris_Mythen1K#0": {
1162
1154
  "isdevice": "true",
1163
1155
  "category": "detector",
1164
1156
  "sub": "ccd",
1165
- "id": "detector:ccd:Dectris_Mythen1K#0",
1166
1157
  "Name": {
1167
1158
  "value": "1D diode array",
1168
1159
  "type": "String",
@@ -1506,11 +1497,10 @@
1506
1497
  "Y": 1
1507
1498
  }
1508
1499
  },
1509
- "Spark_ASXc": {
1500
+ "sampler:auto:Spark_ASXc#0": {
1510
1501
  "isdevice": "true",
1511
1502
  "category": "sampler",
1512
1503
  "sub": "auto",
1513
- "id": "sampler:auto:Spark_ASXc#0",
1514
1504
  "Name": {
1515
1505
  "value": "ASX-c",
1516
1506
  "type": "String",
@@ -26,17 +26,28 @@ def test_readdata1(capsys):
26
26
  assert df.columns.tolist() == ["q", "I", "e"]
27
27
  assert df.dtypes.tolist() == [numpy.float64, numpy.float64, numpy.float64]
28
28
  # checking the first data values
29
- assert numpy.all(df.loc[:2].values == numpy.array(
30
- [[-1.005583e00, 5.555556e-08, 2.754402e-08],
31
- [-9.989474e-01, 3.611111e-07, 6.568830e-08],
32
- [-9.923112e-01, 3.055556e-07, 6.120415e-08]]))
29
+ assert numpy.all(
30
+ df.loc[:2].values
31
+ == numpy.array(
32
+ [
33
+ [-1.005583e00, 5.555556e-08, 2.754402e-08],
34
+ [-9.989474e-01, 3.611111e-07, 6.568830e-08],
35
+ [-9.923112e-01, 3.055556e-07, 6.120415e-08],
36
+ ]
37
+ )
38
+ )
33
39
  # and checking the last data values
34
- assert numpy.all(df.loc[df.shape[0] - 3 :].values == numpy.array(
35
- [[7.381979e00, 2.972222e-06, 1.792166e-07],
36
- [7.388376e00, 2.944444e-06, 1.436040e-07],
37
- [7.394774e00, 2.388889e-06, 1.548690e-07]]))
38
- assert numpy.all(df.median().values == numpy.array(
39
- [3.233221e00, 5.826389e-05, 8.835466e-07]))
40
+ assert numpy.all(
41
+ df.loc[df.shape[0] - 3 :].values
42
+ == numpy.array(
43
+ [
44
+ [7.381979e00, 2.972222e-06, 1.792166e-07],
45
+ [7.388376e00, 2.944444e-06, 1.436040e-07],
46
+ [7.394774e00, 2.388889e-06, 1.548690e-07],
47
+ ]
48
+ )
49
+ )
50
+ assert numpy.all(df.median().values == numpy.array([3.233221e00, 5.826389e-05, 8.835466e-07]))
40
51
 
41
52
 
42
53
  def test_readdata2(capsys):
@@ -48,8 +59,7 @@ def test_readdata2(capsys):
48
59
  assert fn == "S2843"
49
60
  assert df.shape == (427, 3)
50
61
  assert df.columns.tolist() == ["q", "I", "e"]
51
- assert numpy.all(df.median().values == numpy.array(
52
- [1.470428, 0.01907878, 0.01353293]))
62
+ assert numpy.all(df.median().values == numpy.array([1.470428, 0.01907878, 0.01353293]))
53
63
 
54
64
 
55
65
  def test_readPDHmeta1():
@@ -62,7 +72,7 @@ def test_readPDHmeta1():
62
72
 
63
73
  # writing the test JSON for comparisons on updates
64
74
  # with open(pathPDH1.with_suffix(".json"), "w") as fd:
65
- # json.dump(data, fd, indent=4)
75
+ # json.dump(data, fd, indent=4)
66
76
 
67
77
  # write the JSON formatted metadata to disk, read it back in and compare
68
78
  # it with the expected reference from testdata dir
@@ -97,8 +107,9 @@ def test_readSSF():
97
107
  # json.dump(data, fd, indent=4)
98
108
  # write the JSON formatted session data to disk
99
109
  # and compare it with the expected JSON file from testdata dir
100
- with open(pathSSFZ.with_suffix(".ssf.json")) as fdRef, \
101
- tempfile.TemporaryFile("w+") as fdNew:
110
+ with open(pathSSFZ.with_suffix(".ssf.json")) as fdRef, tempfile.TemporaryFile(
111
+ "w+"
112
+ ) as fdNew:
102
113
  json.dump(data, fdNew, indent=4)
103
114
  fdNew.seek(0)
104
115
  assert fdRef.read() == fdNew.read()
@@ -1,13 +1,14 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # tests/utils.py
3
3
 
4
+
5
+ import os
6
+
4
7
  from jupyter_analysis_tools.utils import appendToPATH, isWindows
5
8
 
6
9
 
7
10
  def test_appendToPATH(capsys):
8
11
  # Setting up a PATH for testing first (platform dependent).
9
- import os
10
- import unittest
11
12
  testpath = "/usr/local/sbin:/usr/local/bin:/sbin:/usr/games:/usr/local/games:/snap/bin"
12
13
  if isWindows():
13
14
  testpath = "something else"