diffpy.utils 3.6.0rc3__py3-none-any.whl → 3.6.1rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,16 +47,19 @@ def serialize_data(
47
47
  data_table: list or ndarray
48
48
  Data table.
49
49
  dt_colnames: list
50
- Names of each column in data_table. Every name in data_table_cols will be put into the Dictionary
51
- as a key with a value of that column in data_table (stored as a List). Put None for columns
52
- without names. If dt_cols has less non-None entries than columns in data_table,
53
- the pair {'data table': data_table} will be put in the dictionary.
54
- (Default None: only entry {'data table': data_table} will be added to dictionary.)
50
+ Names of each column in data_table. Every name in data_table_cols
51
+ will be put into the Dictionary as a key with a value of that column
52
+ in data_table (stored as a List). Put None for columns without names.
53
+ If dt_cols has less non-None entries than columns in data_table, the
54
+ pair {'data table': data_table} will be put in the dictionary.
55
+ (Default None: only entry {'data table': data_table} will be added to
56
+ dictionary.)
55
57
  show_path: bool
56
- include a path element in the database entry (default True). If 'path' is not included in hddata,
57
- extract path from filename.
58
+ include a path element in the database entry (default True). If
59
+ 'path' is not included in hddata, extract path from filename.
58
60
  serial_file
59
- Serial language file to dump dictionary into. If None (default), no dumping will occur.
61
+ Serial language file to dump dictionary into. If None (default), no
62
+ dumping will occur.
60
63
 
61
64
  Returns
62
65
  -------
@@ -79,32 +82,44 @@ def serialize_data(
79
82
  data.update(hdata)
80
83
 
81
84
  # second add named columns in dt_cols
82
- # performed second to prioritize overwriting hdata entries with data_table column entries
85
+ # performed second to prioritize overwriting hdata entries with data_
86
+ # table column entries
83
87
  named_columns = 0 # initial value
84
88
  max_columns = 1 # higher than named_columns to trigger 'data table' entry
85
89
  if dt_colnames is not None:
86
90
  num_columns = [len(row) for row in data_table]
87
91
  max_columns = max(num_columns)
88
92
  num_col_names = len(dt_colnames)
89
- if max_columns < num_col_names: # assume numpy.loadtxt gives non-irregular array
90
- raise ImproperSizeError("More entries in dt_colnames than columns in data_table.")
93
+ if (
94
+ max_columns < num_col_names
95
+ ): # assume numpy.loadtxt gives non-irregular array
96
+ raise ImproperSizeError(
97
+ "More entries in dt_colnames than columns in data_table."
98
+ )
91
99
  named_columns = 0
92
100
  for idx in range(num_col_names):
93
101
  colname = dt_colnames[idx]
94
102
  if colname is not None:
95
103
  if colname in hdata.keys():
96
104
  warnings.warn(
97
- f"Entry '{colname}' in hdata has been overwritten by a data_table entry.",
105
+ (
106
+ f"Entry '{colname}' in hdata has been "
107
+ "overwritten by a data_table entry."
108
+ ),
98
109
  RuntimeWarning,
99
110
  )
100
111
  data.update({colname: list(data_table[:, idx])})
101
112
  named_columns += 1
102
113
 
103
- # finally add data_table as an entry named 'data table' if not all columns were parsed
114
+ # finally add data_table as an entry named 'data table' if not all
115
+ # columns were parsed
104
116
  if named_columns < max_columns:
105
117
  if "data table" in data.keys():
106
118
  warnings.warn(
107
- "Entry 'data table' in hdata has been overwritten by data_table.",
119
+ (
120
+ "Entry 'data table' in hdata has been "
121
+ "overwritten by data_table."
122
+ ),
108
123
  RuntimeWarning,
109
124
  )
110
125
  data.update({"data table": data_table})
diffpy/utils/resampler.py CHANGED
@@ -22,11 +22,11 @@ import numpy as np
22
22
  def wsinterp(x, xp, fp, left=None, right=None):
23
23
  """One-dimensional Whittaker-Shannon interpolation.
24
24
 
25
- Reconstruct a continuous signal from discrete data points by utilizing sinc functions
26
- as interpolation kernels. This function interpolates the values of fp (array),
27
- which are defined over xp (array), at new points x (array or float).
28
- The implementation is based on E. T. Whittaker's 1915 paper
29
- (https://doi.org/10.1017/S0370164600017806).
25
+ Reconstruct a continuous signal from discrete data points by utilizing
26
+ sinc functions as interpolation kernels. This function interpolates the
27
+ values of fp (array), which are defined over xp (array), at new points x
28
+ (array or float). The implementation is based on E. T. Whittaker's 1915
29
+ paper (https://doi.org/10.1017/S0370164600017806).
30
30
 
31
31
  Parameters
32
32
  ----------
@@ -37,17 +37,18 @@ def wsinterp(x, xp, fp, left=None, right=None):
37
37
  fp: ndarray
38
38
  The array of y values associated with xp.
39
39
  left: float
40
- If given, set fp for x < xp[0] to left. Otherwise, if left is None (default) or not given,
41
- set fp for x < xp[0] to fp evaluated at xp[-1].
40
+ If given, set fp for x < xp[0] to left. Otherwise, if left is None
41
+ (default) or not given, set fp for x < xp[0] to fp evaluated at xp[-1].
42
42
  right: float
43
- If given, set fp for x > xp[-1] to right. Otherwise, if right is None (default) or not given, set fp for
44
- x > xp[-1] to fp evaluated at xp[-1].
43
+ If given, set fp for x > xp[-1] to right. Otherwise, if right is None
44
+ (default) or not given, set fp for x > xp[-1] to fp evaluated at
45
+ xp[-1].
45
46
 
46
47
  Returns
47
48
  -------
48
49
  ndarray or float
49
- The interpolated values at points x. Returns a single float if x is a scalar,
50
- otherwise returns a numpy.ndarray.
50
+ The interpolated values at points x. Returns a single float if x is a
51
+ scalar, otherwise returns a numpy.ndarray.
51
52
  """
52
53
  scalar = np.isscalar(x)
53
54
  if scalar:
@@ -82,10 +83,11 @@ def nsinterp(xp, fp, qmin=0, qmax=25, left=None, right=None):
82
83
  """One-dimensional Whittaker-Shannon interpolation onto the Nyquist-Shannon
83
84
  grid.
84
85
 
85
- Takes a band-limited function fp and original grid xp and resamples fp on the NS grid.
86
- Uses the minimum number of points N required by the Nyquist sampling theorem.
87
- N = (qmax-qmin)(rmax-rmin)/pi, where rmin and rmax are the ends of the real-space ranges.
88
- fp must be finite, and the user inputs qmin and qmax of the frequency-domain.
86
+ Takes a band-limited function fp and original grid xp and resamples fp on
87
+ the NS grid. Uses the minimum number of points N required by the Nyquist
88
+ sampling theorem. N = (qmax-qmin)(rmax-rmin)/pi, where rmin and rmax are
89
+ the ends of the real-space ranges. fp must be finite, and the user inputs
90
+ qmin and qmax of the frequency-domain.
89
91
 
90
92
  Parameters
91
93
  ----------
@@ -103,8 +105,8 @@ def nsinterp(xp, fp, qmin=0, qmax=25, left=None, right=None):
103
105
  x: ndarray
104
106
  The Nyquist-Shannon grid computed for the given qmin and qmax.
105
107
  fp_at_x: ndarray
106
- The interpolated values at points x. Returns a single float if x is a scalar,
107
- otherwise returns a numpy.ndarray.
108
+ The interpolated values at points x. Returns a single float if x is a
109
+ scalar, otherwise returns a numpy.ndarray.
108
110
  """
109
111
  # Ensure numpy array
110
112
  xp = np.array(xp)
@@ -122,8 +124,9 @@ def nsinterp(xp, fp, qmin=0, qmax=25, left=None, right=None):
122
124
  def resample(r, s, dr):
123
125
  """Resample a PDF on a new grid.
124
126
 
125
- This uses the Whittaker-Shannon interpolation formula to put s1 on a new grid if dr is less than the sampling
126
- interval of r1, or linear interpolation if dr is greater than the sampling interval of r1.
127
+ This uses the Whittaker-Shannon interpolation formula to put s1 on a new
128
+ grid if dr is less than the sampling interval of r1, or linear
129
+ interpolation if dr is greater than the sampling interval of r1.
127
130
 
128
131
  Parameters
129
132
  ----------
@@ -140,8 +143,12 @@ def resample(r, s, dr):
140
143
  """
141
144
 
142
145
  warnings.warn(
143
- "The 'resample' function is deprecated and will be removed in a future release (3.8.0). \n"
144
- "'resample' has been renamed 'wsinterp' to better reflect functionality. Please use 'wsinterp' instead.",
146
+ (
147
+ "The 'resample' function is deprecated and will be removed "
148
+ "in a future release (3.8.0). \n"
149
+ "'resample' has been renamed 'wsinterp' to better reflect "
150
+ "functionality. Please use 'wsinterp' instead."
151
+ ),
145
152
  DeprecationWarning,
146
153
  stacklevel=2,
147
154
  )
diffpy/utils/tools.py CHANGED
@@ -22,7 +22,8 @@ def _stringify(string_value):
22
22
  Returns
23
23
  -------
24
24
  str
25
- The original string if string_value is not None, otherwise an empty string.
25
+ The original string if string_value is not None, otherwise an empty
26
+ string.
26
27
  """
27
28
  return string_value if string_value is not None else ""
28
29
 
@@ -53,38 +54,46 @@ def get_user_info(owner_name=None, owner_email=None, owner_orcid=None):
53
54
  """Get name, email, and orcid of the owner/user from various sources and
54
55
  return it as a metadata dictionary.
55
56
 
56
- The function looks for the information in json format configuration files with the name 'diffpyconfig.json'.
57
- These can be in the user's home directory and in the current working directory. The information in the
58
- config files are combined, with the local config overriding the home-directory one. Values for
59
- owner_name, owner_email, and owner_orcid may be passed in to the function and these override the values
60
- in the config files.
61
-
62
- A template for the config file is below. Create a text file called 'diffpyconfig.json' in your home directory
63
- and copy-paste the template into it, editing it with your real information.
57
+ The function looks for the information in json format configuration files
58
+ with the name 'diffpyconfig.json'. These can be in the user's home
59
+ directory and in the current working directory. The information in the
60
+ config files are combined, with the local config overriding the
61
+ home- directory one. Values for owner_name, owner_email, and owner_orcid
62
+ may be passed in to the function and these override the values in the
63
+ config files.
64
+
65
+ A template for the config file is below. Create a text file called '
66
+ diffpyconfig.json' in your home directory and copy-paste the template
67
+ into it, editing it with your real information.
64
68
  {
65
69
  "owner_name": "<your name as you would like it stored with your data>>",
66
70
  "owner_email": "<your_associated_email>>@email.com",
67
- "owner_orcid": "<your_associated_orcid if you would like this stored with your data>>"
71
+ "owner_orcid": "<your_associated_orcid if you would like this stored with your data>>" # noqa: E501
68
72
  }
69
- You may also store any other global-level information that you would like associated with your
70
- diffraction data in this file
73
+ You may also store any other global-level information that you would like
74
+ associated with your diffraction data in this file
71
75
 
72
76
  Parameters
73
77
  ----------
74
- owner_name : str, optional, default is the value stored in the global or local config file.
75
- The name of the user who will show as owner in the metadata that is stored with the data
76
- owner_email : str, optional, default is the value stored in the global or local config file.
77
- The email of the user/owner
78
- owner_orcid : str, optional, default is the value stored in the global or local config file.
79
- The ORCID id of the user/owner
78
+ owner_name : str, optional, default is the value stored in the global or
79
+ local config file. The name of the user who will show as owner in the
80
+ metadata that is stored with the data
81
+ owner_email : str, optional, default is the value stored in the global or
82
+ local config file. The email of the user/owner
83
+ owner_orcid : str, optional, default is the value stored in the global or
84
+ local config file. The ORCID id of the user/owner
80
85
 
81
86
  Returns
82
87
  -------
83
88
  user_info : dict
84
- The dictionary containing username, email and orcid of the user/owner, and any other information
85
- stored in the global or local config files.
89
+ The dictionary containing username, email and orcid of the user/owner
90
+ , and any other information stored in the global or local config files.
86
91
  """
87
- runtime_info = {"owner_name": owner_name, "owner_email": owner_email, "owner_orcid": owner_orcid}
92
+ runtime_info = {
93
+ "owner_name": owner_name,
94
+ "owner_email": owner_email,
95
+ "owner_orcid": owner_orcid,
96
+ }
88
97
  for key, value in copy(runtime_info).items():
89
98
  if value is None or value == "":
90
99
  del runtime_info[key]
@@ -100,24 +109,27 @@ def check_and_build_global_config(skip_config_creation=False):
100
109
  """Check for a global diffpu config file in user's home directory and
101
110
  creates one if it is missing.
102
111
 
103
- The file it looks for is called diffpyconfig.json. This can contain anything in json format, but
104
- minimally contains information about the computer owner. The information is used
105
- when diffpy objects are created and saved to files or databases to retain ownership information
106
- of datasets. For example, it is used by diffpy.utils.tools.get_user_info().
112
+ The file it looks for is called diffpyconfig.json. This can contain
113
+ anything in json format, but minimally contains information about the
114
+ computer owner. The information is used when diffpy objects are created
115
+ and saved to files or databases to retain ownership information of
116
+ datasets. For example, it is used by diffpy.utils.tools.get_user_info().
107
117
 
108
- If the function finds no config file in the user's home directory it interrupts execution
109
- and prompts the user for name, email, and orcid information. It then creates the config file
110
- with this information inside it.
118
+ If the function finds no config file in the user's home directory it
119
+ interrupts execution and prompts the user for name, email, and orcid
120
+ information. It then creates the config file with this information
121
+ inside it.
111
122
 
112
123
  The function returns True if the file exists and False otherwise.
113
124
 
114
- If you would like to check for a file but not run the file creation workflow you can set
115
- the optional argument skip_config_creation to True.
125
+ If you would like to check for a file but not run the file creation
126
+ workflow you can set the optional argument skip_config_creation to True.
116
127
 
117
128
  Parameters
118
129
  ----------
119
130
  skip_config_creation : bool, optional, default is False.
120
- The boolean that will override the creation workflow even if no config file exists.
131
+ The boolean that will override the creation workflow even if no
132
+ config file exists.
121
133
 
122
134
  Returns
123
135
  -------
@@ -132,16 +144,19 @@ def check_and_build_global_config(skip_config_creation=False):
132
144
  if skip_config_creation:
133
145
  return config_exists
134
146
  intro_text = (
135
- "No global configuration file was found containing information about the user to "
136
- "associate with the data.\n By following the prompts below you can add your name "
137
- "and email to this file on the current "
138
- "computer and your name will be automatically associated with subsequent diffpy data by default.\n"
139
- "This is not recommended on a shared or public computer. "
140
- "You will only have to do that once.\n"
141
- "For more information, please refer to www.diffpy.org/diffpy.utils/examples/toolsexample.html"
147
+ "No global configuration file was found containing information about "
148
+ "the user to associate with the data.\n By following the prompts "
149
+ "below you can add your name and email to this file on the current "
150
+ "computer and your name will be automatically associated with "
151
+ "subsequent diffpy data by default.\n This is not recommended on a "
152
+ "shared or public computer. You will only have to do that once.\n "
153
+ "For more information, please refer to www.diffpy.org/diffpy.utils/ "
154
+ "examples/toolsexample.html "
142
155
  )
143
156
  print(intro_text)
144
- username = input("Please enter the name you would want future work to be credited to: ").strip()
157
+ username = input(
158
+ "Please enter the name you would want future work to be credited to: "
159
+ ).strip()
145
160
  email = input("Please enter your email: ").strip()
146
161
  orcid = input("Please enter your orcid ID if you know it: ").strip()
147
162
  config = {
@@ -154,14 +169,15 @@ def check_and_build_global_config(skip_config_creation=False):
154
169
  with open(config_path, "w") as f:
155
170
  f.write(json.dumps(config))
156
171
  outro_text = (
157
- f"The config file at {Path().home() / 'diffpyconfig.json'} has been created. "
158
- f"The values {config} were entered.\n"
159
- f"These values will be inserted as metadata with your data in apps that use "
160
- f"diffpy.get_user_info(). If you would like to update these values, either "
161
- f"delete the config file and this workflow will rerun next time you run this "
162
- f"program. Or you may open the config file in a text editor and manually edit the"
163
- f"entries. For more information, see: "
164
- f"https://diffpy.github.io/diffpy.utils/examples/tools_example.html"
172
+ f"The config file at {Path().home() / 'diffpyconfig.json'} has "
173
+ f"been created. The values {config} were entered.\n These values "
174
+ "will be inserted as metadata with your data in apps that use "
175
+ "diffpy.get_user_info(). If you would like to update these values "
176
+ ", either delete the config file and this workflow will rerun "
177
+ "next time you run this program. Or you may open the config "
178
+ "file in a text editor and manually edit the entries. For more "
179
+ "information, see: "
180
+ "https://diffpy.github.io/diffpy.utils/examples/tools_example.html"
165
181
  )
166
182
  print(outro_text)
167
183
  config_exists = True
@@ -171,13 +187,15 @@ def check_and_build_global_config(skip_config_creation=False):
171
187
  def get_package_info(package_names, metadata=None):
172
188
  """Fetch package version and updates it into (given) metadata.
173
189
 
174
- Package info stored in metadata as {'package_info': {'package_name': 'version_number'}}.
190
+ Package info stored in metadata as
191
+ {'package_info': {'package_name': 'version_number'}}.
175
192
 
176
193
  ----------
177
194
  package_name : str or list
178
195
  The name of the package(s) to retrieve the version number for.
179
196
  metadata : dict
180
- The dictionary to store the package info. If not provided, a new dictionary will be created.
197
+ The dictionary to store the package info. If not provided, a new
198
+ dictionary will be created.
181
199
 
182
200
  Returns
183
201
  -------
@@ -202,18 +220,22 @@ def get_density_from_cloud(sample_composition, mp_token=""):
202
220
  It is not implemented yet.
203
221
  """
204
222
  raise NotImplementedError(
205
- "So sorry, density computation from composition is not implemented right now. "
223
+ "So sorry, density computation from composition is not implemented "
224
+ "right now. "
206
225
  "We hope to have this implemented in the next release. "
207
226
  "Please rerun specifying a sample mass density."
208
227
  )
209
228
 
210
229
 
211
- def compute_mu_using_xraydb(sample_composition, energy, sample_mass_density=None, packing_fraction=None):
230
+ def compute_mu_using_xraydb(
231
+ sample_composition, energy, sample_mass_density=None, packing_fraction=None
232
+ ):
212
233
  """Compute the attenuation coefficient (mu) using the XrayDB database.
213
234
 
214
235
  Computes mu based on the sample composition and energy.
215
236
  User should provide a sample mass density or a packing fraction.
216
- If neither density nor packing fraction is specified, or if both are specified, a ValueError will be raised.
237
+ If neither density nor packing fraction is specified,
238
+ or if both are specified, a ValueError will be raised.
217
239
  Reference: https://xraypy.github.io/XrayDB/python.html#xraydb.material_mu.
218
240
 
219
241
  Parameters
@@ -237,13 +259,24 @@ def compute_mu_using_xraydb(sample_composition, energy, sample_mass_density=None
237
259
  sample_mass_density is not None and packing_fraction is not None
238
260
  ):
239
261
  raise ValueError(
240
- "You must specify either sample_mass_density or packing_fraction, but not both. "
262
+ "You must specify either sample_mass_density or packing_fraction, "
263
+ "but not both. "
241
264
  "Please rerun specifying only one."
242
265
  )
243
266
  if packing_fraction is not None:
244
- sample_mass_density = get_density_from_cloud(sample_composition) * packing_fraction
267
+ sample_mass_density = (
268
+ get_density_from_cloud(sample_composition) * packing_fraction
269
+ )
245
270
  energy_eV = energy * 1000
246
- mu = material_mu(sample_composition, energy_eV, density=sample_mass_density, kind="total") / 10
271
+ mu = (
272
+ material_mu(
273
+ sample_composition,
274
+ energy_eV,
275
+ density=sample_mass_density,
276
+ kind="total",
277
+ )
278
+ / 10
279
+ )
247
280
  return mu
248
281
 
249
282
 
@@ -257,8 +290,10 @@ def _model_function(z, diameter, z0, I0, mud, slope):
257
290
  """
258
291
  Compute the model function with the following steps:
259
292
  1. Let dz = z-z0, so that dz is centered at 0
260
- 2. Compute length l that is the effective length for computing intensity I = I0 * e^{-mu * l}:
261
- - For dz within the capillary diameter, l is the chord length of the circle at position dz
293
+ 2. Compute length l that is the effective length for computing intensity
294
+ I = I0 * e^{-mu * l}:
295
+ - For dz within the capillary diameter, l is the chord length of
296
+ the circle at position dz
262
297
  - For dz outside this range, l = 0
263
298
  3. Apply a linear adjustment to I0 by taking I0 as I0 - slope * z
264
299
  """
@@ -267,7 +302,11 @@ def _model_function(z, diameter, z0, I0, mud, slope):
267
302
  dz = z - z0
268
303
  length = np.piecewise(
269
304
  dz,
270
- [dz < min_radius, (min_radius <= dz) & (dz <= max_radius), dz > max_radius],
305
+ [
306
+ dz < min_radius,
307
+ (min_radius <= dz) & (dz <= max_radius),
308
+ dz > max_radius,
309
+ ],
271
310
  [0, lambda dz: 2 * np.sqrt((diameter / 2) ** 2 - dz**2), 0],
272
311
  )
273
312
  return (I0 - slope * z) * np.exp(-mud / diameter * length)
@@ -278,12 +317,16 @@ def _extend_z_and_convolve(z, diameter, half_slit_width, z0, I0, mud, slope):
278
317
  convolution), then perform convolution (note that the convolved I values
279
318
  are the same as modeled I values if slit width is close to 0)"""
280
319
  n_points = len(z)
281
- z_left_pad = np.linspace(z.min() - n_points * (z[1] - z[0]), z.min(), n_points)
282
- z_right_pad = np.linspace(z.max(), z.max() + n_points * (z[1] - z[0]), n_points)
320
+ z_left_pad = np.linspace(
321
+ z.min() - n_points * (z[1] - z[0]), z.min(), n_points
322
+ )
323
+ z_right_pad = np.linspace(
324
+ z.max(), z.max() + n_points * (z[1] - z[0]), n_points
325
+ )
283
326
  z_extended = np.concatenate([z_left_pad, z, z_right_pad])
284
327
  I_extended = _model_function(z_extended, diameter, z0, I0, mud, slope)
285
328
  kernel = _top_hat(z_extended - z_extended.mean(), half_slit_width)
286
- I_convolved = I_extended # this takes care of the case where slit width is close to 0
329
+ I_convolved = I_extended # this takes care of the case where slit width is close to 0 # noqa: E501
287
330
  if kernel.sum() != 0:
288
331
  kernel /= kernel.sum()
289
332
  I_convolved = convolve(I_extended, kernel, mode="same")
@@ -296,7 +339,9 @@ def _objective_function(params, z, observed_data):
296
339
  observed/experimental data by minimizing the sum of squared residuals
297
340
  between the observed data and the convolved model data."""
298
341
  diameter, half_slit_width, z0, I0, mud, slope = params
299
- convolved_model_data = _extend_z_and_convolve(z, diameter, half_slit_width, z0, I0, mud, slope)
342
+ convolved_model_data = _extend_z_and_convolve(
343
+ z, diameter, half_slit_width, z0, I0, mud, slope
344
+ )
300
345
  residuals = observed_data - convolved_model_data
301
346
  return np.sum(residuals**2)
302
347
 
@@ -304,16 +349,27 @@ def _objective_function(params, z, observed_data):
304
349
  def _compute_single_mud(z_data, I_data):
305
350
  """Perform dual annealing optimization and extract the parameters."""
306
351
  bounds = [
307
- (1e-5, z_data.max() - z_data.min()), # diameter: [small positive value, upper bound]
308
- (0, (z_data.max() - z_data.min()) / 2), # half slit width: [0, upper bound]
352
+ (
353
+ 1e-5,
354
+ z_data.max() - z_data.min(),
355
+ ), # diameter: [small positive value, upper bound]
356
+ (
357
+ 0,
358
+ (z_data.max() - z_data.min()) / 2,
359
+ ), # half slit width: [0, upper bound]
309
360
  (z_data.min(), z_data.max()), # z0: [min z, max z]
310
- (1e-5, I_data.max()), # I0: [small positive value, max observed intensity]
361
+ (
362
+ 1e-5,
363
+ I_data.max(),
364
+ ), # I0: [small positive value, max observed intensity]
311
365
  (1e-5, 20), # muD: [small positive value, upper bound]
312
366
  (-100000, 100000), # slope: [lower bound, upper bound]
313
367
  ]
314
368
  result = dual_annealing(_objective_function, bounds, args=(z_data, I_data))
315
369
  diameter, half_slit_width, z0, I0, mud, slope = result.x
316
- convolved_fitted_signal = _extend_z_and_convolve(z_data, diameter, half_slit_width, z0, I0, mud, slope)
370
+ convolved_fitted_signal = _extend_z_and_convolve(
371
+ z_data, diameter, half_slit_width, z0, I0, mud, slope
372
+ )
317
373
  residuals = I_data - convolved_fitted_signal
318
374
  rmse = np.sqrt(np.mean(residuals**2))
319
375
  return mud, rmse
@@ -325,11 +381,13 @@ def compute_mud(filepath):
325
381
 
326
382
  This function loads z-scan data and fits it to a model
327
383
  that convolves a top-hat function with I = I0 * e^{-mu * l}.
328
- The fitting procedure is run multiple times, and we return the best-fit parameters based on the lowest rmse.
384
+ The fitting procedure is run multiple times, and we return the best-fit
385
+ parameters based on the lowest rmse.
329
386
 
330
387
  The full mathematical details are described in the paper:
331
- An ad hoc Absorption Correction for Reliable Pair-Distribution Functions from Low Energy x-ray Sources,
332
- Yucong Chen, Till Schertenleib, Andrew Yang, Pascal Schouwink, Wendy L. Queen and Simon J. L. Billinge,
388
+ An ad hoc Absorption Correction for Reliable Pair-Distribution Functions
389
+ from Low Energy x-ray Sources, Yucong Chen, Till Schertenleib, Andrew Yang
390
+ , Pascal Schouwink, Wendy L. Queen and Simon J. L. Billinge,
333
391
  in preparation.
334
392
 
335
393
  Parameters
@@ -343,5 +401,8 @@ def compute_mud(filepath):
343
401
  The best-fit mu*D value.
344
402
  """
345
403
  z_data, I_data = loadData(filepath, unpack=True)
346
- best_mud, _ = min((_compute_single_mud(z_data, I_data) for _ in range(20)), key=lambda pair: pair[1])
404
+ best_mud, _ = min(
405
+ (_compute_single_mud(z_data, I_data) for _ in range(20)),
406
+ key=lambda pair: pair[1],
407
+ )
347
408
  return best_mud
@@ -4,18 +4,23 @@ from copy import copy
4
4
  import numpy as np
5
5
 
6
6
  wavelength_warning_emsg = (
7
- "No wavelength has been specified. You can continue to use the DiffractionObject, but "
8
- "some of its powerful features will not be available. "
9
- "To specify a wavelength, if you have do = DiffractionObject(xarray, yarray, 'tth'), "
10
- "you may set do.wavelength = 1.54 for a wavelength of 1.54 angstroms."
7
+ "No wavelength has been specified. You can continue to use the "
8
+ "DiffractionObject, but some of its powerful features will not be "
9
+ "available. To specify a wavelength, if you have "
10
+ "do = DiffractionObject(xarray, yarray, 'tth'), you may set "
11
+ "do.wavelength = 1.54 for a wavelength of 1.54 angstroms. "
12
+ )
13
+ invalid_tth_emsg = (
14
+ "Two theta exceeds 180 degrees. Please check the input values for errors."
11
15
  )
12
- invalid_tth_emsg = "Two theta exceeds 180 degrees. Please check the input values for errors."
13
16
  invalid_q_or_d_or_wavelength_emsg = (
14
- "The supplied input array and wavelength will result in an impossible two-theta. "
15
- "Please check these values and re-instantiate the DiffractionObject with correct values."
17
+ "The supplied input array and wavelength will result in an impossible "
18
+ "two-theta. Please check these values and re-instantiate the "
19
+ "DiffractionObject with correct values. "
16
20
  )
17
21
  inf_output_imsg = (
18
- "INFO: The largest output value in the array is infinite. This is allowed, but it will not be plotted."
22
+ "INFO: The largest output value in the array is infinite. "
23
+ "This is allowed, but it will not be plotted."
19
24
  )
20
25
 
21
26
 
@@ -73,7 +78,8 @@ def q_to_tth(q, wavelength):
73
78
  def tth_to_q(tth, wavelength):
74
79
  r"""Helper function to convert two-theta to q on independent variable axis.
75
80
 
76
- If wavelength is missing, returns independent variable axis as integer indexes.
81
+ If wavelength is missing, returns independent variable axis as integer
82
+ indexes.
77
83
 
78
84
  By definition the relationship is:
79
85
 
@@ -100,7 +106,8 @@ def tth_to_q(tth, wavelength):
100
106
  -------
101
107
  q : ndarray
102
108
  The 1D array of :math:`q` values np.array([qs]).
103
- The units for the q-values are the inverse of the units of the provided wavelength.
109
+ The units for the q-values are the inverse of the units of the
110
+ provided wavelength.
104
111
  """
105
112
  tth.astype(float)
106
113
  if np.any(np.deg2rad(tth) > np.pi):
@@ -139,7 +146,8 @@ def q_to_d(q):
139
146
  def tth_to_d(tth, wavelength):
140
147
  r"""Helper function to convert two-theta to d on independent variable axis.
141
148
 
142
- The formula is .. math:: d = \frac{\lambda}{2 \sin\left(\frac{2\theta}{2}\right)}.
149
+ The formula is ..
150
+ math:: d = \frac{\lambda}{2 \sin\left(\frac{2\theta}{2}\right)}.
143
151
 
144
152
  Here we convert tth to q first, then to d.
145
153
 
@@ -191,7 +199,8 @@ def d_to_q(d):
191
199
  def d_to_tth(d, wavelength):
192
200
  r"""Helper function to convert d to two-theta on independent variable axis.
193
201
 
194
- The formula is .. math:: 2\theta = 2 \arcsin\left(\frac{\lambda}{2d}\right).
202
+ The formula is ..
203
+ math:: 2\theta = 2 \arcsin\left(\frac{\lambda}{2d}\right).
195
204
 
196
205
  Here we convert d to q first, then to tth.
197
206
 
@@ -1,8 +1,9 @@
1
1
  def is_number(string):
2
2
  """Check if the provided string can be converted to a float.
3
3
 
4
- Since integers can be converted to floats, this function will return True for integers as well.
5
- Hence, we can use this function to check if a string is a number.
4
+ Since integers can be converted to floats, this function will return True
5
+ for integers as well. Hence, we can use this function to check if a
6
+ string is a number.
6
7
 
7
8
  Parameters
8
9
  ----------
diffpy/utils/version.py CHANGED
@@ -1,10 +1,10 @@
1
1
  #!/usr/bin/env python
2
2
  ##############################################################################
3
3
  #
4
- # (c) 2024 The Trustees of Columbia University in the City of New York.
4
+ # (c) 2025 The Trustees of Columbia University in the City of New York.
5
5
  # All rights reserved.
6
6
  #
7
- # File coded by: Billinge Group members and community contributors.
7
+ # File coded by: Simon Billinge, Billinge Group members.
8
8
  #
9
9
  # See GitHub contributions for a more detailed list of contributors.
10
10
  # https://github.com/diffpy/diffpy.utils/graphs/contributors
@@ -21,3 +21,5 @@
21
21
  from importlib.metadata import version
22
22
 
23
23
  __version__ = version("diffpy.utils")
24
+
25
+ # End of file