pydvma 0.9.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pydvma-0.9.7/LICENSE ADDED
@@ -0,0 +1,31 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2020, Tore Butlin
4
+ Contributors: Tore Butlin, Jim Woodhouse, Areeg Emarah, En Yi Tee, Theo Brown, Rick Lupton
5
+
6
+ All rights reserved.
7
+
8
+ Redistribution and use in source and binary forms, with or without
9
+ modification, are permitted provided that the following conditions are met:
10
+
11
+ * Redistributions of source code must retain the above copyright notice, this
12
+ list of conditions and the following disclaimer.
13
+
14
+ * Redistributions in binary form must reproduce the above copyright notice,
15
+ this list of conditions and the following disclaimer in the documentation
16
+ and/or other materials provided with the distribution.
17
+
18
+ * Neither the name of the copyright holder nor the names of its
19
+ contributors may be used to endorse or promote products derived from
20
+ this software without specific prior written permission.
21
+
22
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pydvma-0.9.7/PKG-INFO ADDED
@@ -0,0 +1,101 @@
1
+ Metadata-Version: 2.1
2
+ Name: pydvma
3
+ Version: 0.9.7
4
+ Home-page: https://github.com/torebutlin/pydvma
5
+ Author: Tore Butlin
6
+ Author-email: tb267@cam.ac.uk
7
+ License: BSD 3-Clause License
8
+ License-File: LICENSE
9
+
10
+ # pydvma
11
+
12
+ A Python package for dynamics and vibration measurements and analysis.
13
+
14
+
15
+ ## About pydvma
16
+
17
+ This is a modular library for data measurement and analysis in the context of dynamics and vibration, for use in student laboratory experiments as well as for research projects, developed at Cambridge University Engineering Department.
18
+
19
+ A high-level interface allows straightforward application for common use-cases and a low-level interface provides more control when needed.
20
+
21
+ The aim is for a library that is simple to use and simple to maintain. It is not a full-featured GUI, but when used in conjunction with Jupyter Notebooks it is intended to provide the best of both worlds: interactive tools for common tasks and a command line interface for customisation.
22
+
23
+
24
+ ## Getting started
25
+
26
+ ### Installation
27
+
28
+ The logger is recommended for use with Python 3.10.
29
+
30
+ ```
31
+ pip install pydvma
32
+ ```
33
+
34
+ If you would like soundcard acquisition then also install sounddevice:
35
+ ```
36
+ pip install sounddevice
37
+ ```
38
+
39
+ Or clone this repository and install using:
40
+ ```
41
+ python setup.py install
42
+ ```
43
+
44
+ Alternatively you can use the environment yml file provided:
45
+ ```
46
+ conda env -name logger create -f logger.yml
47
+ conda activate logger
48
+ ```
49
+
50
+ ### Running the logger
51
+
52
+ To get started, open the file:
53
+ ```
54
+ pydvma_template.ipynb
55
+ ```
56
+
57
+ or within a Jupyter Notebook or Python console:
58
+ ```python
59
+ %gui qt
60
+ import pydvma as dvma
61
+ settings = dvma.MySettings()
62
+ osc = dvma.Oscilloscope(settings)
63
+ logger = dvma.Logger(settings)
64
+ ```
65
+
66
+ ## Roadmap
67
+
68
+ At present the library has basic functionality for:
69
+
70
+ - logging data using soundcards or National Instrument DAQs (requires NiDAQmx to be installed from NI, windows only)
71
+ - logging with pre-trigger for impulse response measurements
72
+ - logging with pc generated output (soundcard and NIDAQ)
73
+ - computing frequency domain data
74
+ - computing transfer function data
75
+ - computing sonograms/spectrograms
76
+ - basic modal analysis tools (mode-fitting)
77
+ - saving and plotting data
78
+ - export to Matlab
79
+ - interactive tools for standard acquisition and analysis
80
+ - oscilloscope view of input signals
81
+
82
+ The plan is to include the following functionality:
83
+
84
+ - wider support for import/export
85
+ - more advanced modal analysis tools (e.g. global fitting)
86
+ - extend the range of hardware that can be accessed from this library
87
+
88
+
89
+ ## Contributer guidelines
90
+
91
+ Contributions to this project are welcomed, keeping in mind the project aims above:
92
+
93
+ - If you find a bug, please report using GitHub's issue tracker
94
+
95
+ - For bug-fixes and refinements: please feel free to clone the repository, make edits and create a pull request with a clear description of changes made.
96
+
97
+ - If you would like to make a more significant contribution or change, then please be in contact to outline your suggestion.
98
+
99
+ Please see the documentation for details of the code structure and templates for anticipated applications.
100
+ <!-- pip install git+https://github.com/torebutlin/pydvma.git -->
101
+ <!-- pip install git+https://github.com/js2597/pydvma.git -->
pydvma-0.9.7/README.md ADDED
@@ -0,0 +1,92 @@
1
+ # pydvma
2
+
3
+ A Python package for dynamics and vibration measurements and analysis.
4
+
5
+
6
+ ## About pydvma
7
+
8
+ This is a modular library for data measurement and analysis in the context of dynamics and vibration, for use in student laboratory experiments as well as for research projects, developed at Cambridge University Engineering Department.
9
+
10
+ A high-level interface allows straightforward application for common use-cases and a low-level interface provides more control when needed.
11
+
12
+ The aim is for a library that is simple to use and simple to maintain. It is not a full-featured GUI, but when used in conjunction with Jupyter Notebooks it is intended to provide the best of both worlds: interactive tools for common tasks and a command line interface for customisation.
13
+
14
+
15
+ ## Getting started
16
+
17
+ ### Installation
18
+
19
+ The logger is recommended for use with Python 3.10.
20
+
21
+ ```
22
+ pip install pydvma
23
+ ```
24
+
25
+ If you would like soundcard acquisition then also install sounddevice:
26
+ ```
27
+ pip install sounddevice
28
+ ```
29
+
30
+ Or clone this repository and install using:
31
+ ```
32
+ python setup.py install
33
+ ```
34
+
35
+ Alternatively you can use the environment yml file provided:
36
+ ```
37
+ conda env -name logger create -f logger.yml
38
+ conda activate logger
39
+ ```
40
+
41
+ ### Running the logger
42
+
43
+ To get started, open the file:
44
+ ```
45
+ pydvma_template.ipynb
46
+ ```
47
+
48
+ or within a Jupyter Notebook or Python console:
49
+ ```python
50
+ %gui qt
51
+ import pydvma as dvma
52
+ settings = dvma.MySettings()
53
+ osc = dvma.Oscilloscope(settings)
54
+ logger = dvma.Logger(settings)
55
+ ```
56
+
57
+ ## Roadmap
58
+
59
+ At present the library has basic functionality for:
60
+
61
+ - logging data using soundcards or National Instrument DAQs (requires NiDAQmx to be installed from NI, windows only)
62
+ - logging with pre-trigger for impulse response measurements
63
+ - logging with pc generated output (soundcard and NIDAQ)
64
+ - computing frequency domain data
65
+ - computing transfer function data
66
+ - computing sonograms/spectrograms
67
+ - basic modal analysis tools (mode-fitting)
68
+ - saving and plotting data
69
+ - export to Matlab
70
+ - interactive tools for standard acquisition and analysis
71
+ - oscilloscope view of input signals
72
+
73
+ The plan is to include the following functionality:
74
+
75
+ - wider support for import/export
76
+ - more advanced modal analysis tools (e.g. global fitting)
77
+ - extend the range of hardware that can be accessed from this library
78
+
79
+
80
+ ## Contributer guidelines
81
+
82
+ Contributions to this project are welcomed, keeping in mind the project aims above:
83
+
84
+ - If you find a bug, please report using GitHub's issue tracker
85
+
86
+ - For bug-fixes and refinements: please feel free to clone the repository, make edits and create a pull request with a clear description of changes made.
87
+
88
+ - If you would like to make a more significant contribution or change, then please be in contact to outline your suggestion.
89
+
90
+ Please see the documentation for details of the code structure and templates for anticipated applications.
91
+ <!-- pip install git+https://github.com/torebutlin/pydvma.git -->
92
+ <!-- pip install git+https://github.com/js2597/pydvma.git -->
@@ -0,0 +1,16 @@
1
+ from .gui import Logger, Oscilloscope
2
+ from .options import MySettings, Output_Signal_Settings ,set_plot_colours
3
+ from .file import load_data, save_data, save_fig, export_to_matlab_jwlogger, export_to_matlab, export_to_csv
4
+ # from .oscilloscope import Oscilloscope
5
+ from .acquisition import log_data, output_signal, signal_generator, stream_snapshot
6
+ from .datastructure import DataSet, TimeData, FreqData, CrossSpecData, TfData, SonoData, MetaData, ModalData, update_dataset
7
+ from .testdata import create_test_impulse_data, create_test_impulse_ensemble, create_test_noise_data
8
+ from .plotting import PlotData
9
+ from .analysis import calculate_fft, calculate_cross_spectrum_matrix, calculate_cross_spectra_averaged, clean_impulse
10
+ from .analysis import calculate_tf, calculate_tf_averaged, multiply_by_power_of_iw, best_match, calculate_sonogram
11
+ from .streams import Recorder, Recorder_NI, start_stream, REC, setup_output_NI, setup_output_soundcard, list_available_devices, get_devices_NI, get_devices_soundcard
12
+ from .modal import modal_fit_single_channel, modal_fit_all_channels
13
+ # import faulthandler
14
+ # faulthandler.enable()
15
+ # from .gui_tk_test import Logger
16
+
@@ -0,0 +1,280 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Mon Aug 27 17:08:42 2018
4
+
5
+ @author: tb267
6
+ """
7
+
8
+ from . import datastructure
9
+ from . import streams
10
+
11
+ import numpy as np
12
+ import scipy.signal as signal
13
+ import scipy.stats as stats
14
+ import datetime
15
+ import time
16
+
17
+ MESSAGE = ''
18
+
19
+ #%% Main data acquisition function
20
+ def log_data(settings,test_name=None,rec=None, output=None):
21
+ '''
22
+ Logs data according to settings and returns DataSet class
23
+ '''
24
+ global MESSAGE
25
+
26
+ if rec is None:
27
+ streams.start_stream(settings)
28
+ rec = streams.REC
29
+
30
+
31
+ streams.REC.trigger_detected = False
32
+
33
+ # Stream is slightly longer than settings.stored_time, so need to add delay
34
+ # from initialisation to allow stream to fill up and prevent zeros at start
35
+ # of logged data.
36
+ time.sleep(2*settings.chunk_size/settings.fs)
37
+ t = datetime.datetime.now()
38
+ timestring = '_'+str(t.year)+'_'+str(t.month)+'_'+str(t.day)+'_at_'+str(t.hour)+'_'+str(t.minute)+'_'+str(t.second)
39
+
40
+ if settings.pretrig_samples == None:
41
+
42
+ MESSAGE = 'Logging data for {} seconds.\n'.format(settings.stored_time)
43
+ print(MESSAGE)
44
+
45
+
46
+ # basic way to control logging time: won't be precise time from calling function
47
+ # also won't be exactly synced to output signal
48
+ if output is not None:
49
+ s = output_signal(settings,output)
50
+ # rec.write(output.astype('float32'))
51
+
52
+ if (settings.device_driver == 'nidaq') or (output is None): # nidaq output is nonblocking, but soundcard is blocking
53
+ time.sleep(settings.stored_time)
54
+
55
+
56
+ # make copy of data
57
+ stored_time_data_copy = np.copy(streams.REC.stored_time_data)
58
+ number_samples = int(streams.REC.settings.stored_time * streams.REC.settings.fs)
59
+
60
+ stored_time_data_copy = stored_time_data_copy[-number_samples:,:]
61
+ MESSAGE += 'Logging complete.\n'
62
+ print(MESSAGE)
63
+
64
+ if output is not None:
65
+ if settings.output_device_driver == 'soundcard':
66
+ s.stop()
67
+ s.close()
68
+ if settings.output_device_driver == 'nidaq':
69
+ s.WaitUntilTaskDone(settings.stored_time+5)
70
+ s.StopTask()
71
+
72
+
73
+
74
+ else:
75
+ streams.REC.__init__(settings) # zeros buffers so looks for trigger in fresh data
76
+ streams.REC.trigger_detected = False # somehow this can be true even after previous call
77
+ streams.REC.trigger_first_detected_message = True
78
+
79
+ MESSAGE = 'Waiting for trigger on channel {}.\n'.format(settings.pretrig_channel)
80
+ print(MESSAGE)
81
+
82
+ start_output_flag = True
83
+ t0 = time.time()
84
+ while (time.time()-t0 < settings.pretrig_timeout) and not streams.REC.trigger_detected:
85
+ if (output is not None) and (start_output_flag == True): # start output within loop, but only once!
86
+ time.sleep(1)
87
+ MESSAGE = 'Starting output signal.\n'
88
+ print(MESSAGE)
89
+ s = output_signal(settings,output)
90
+ # rec.write(output.astype('float32'))
91
+ start_output_flag = False
92
+
93
+ time.sleep(0.2)
94
+ if (time.time()-t0 > settings.pretrig_timeout):
95
+ MESSAGE = 'Trigger not detected within timeout of {} seconds.\n'.format(settings.pretrig_timeout)
96
+ print(MESSAGE)
97
+
98
+ # make copy of data
99
+ stored_time_data_copy = np.copy(streams.REC.stored_time_data)
100
+ trigger_check = stored_time_data_copy[(settings.chunk_size):(2*settings.chunk_size),settings.pretrig_channel]
101
+ detected_sample = settings.chunk_size + np.where(np.abs(trigger_check) > settings.pretrig_threshold)[0][0]
102
+ number_samples = int(settings.stored_time * settings.fs)
103
+ start_index = detected_sample - settings.pretrig_samples
104
+ end_index = start_index + number_samples
105
+ streams.REC.trigger_detected = False # don't start stream again until sorted out trigger detection
106
+
107
+ stored_time_data_copy = stored_time_data_copy[start_index:end_index,:]
108
+
109
+ MESSAGE = 'Logging complete.\n'
110
+ print(MESSAGE)
111
+
112
+ if output is not None:
113
+ if settings.output_device_driver == 'soundcard':
114
+ s.stop()
115
+ s.close()
116
+ if settings.output_device_driver == 'nidaq':
117
+ s.WaitUntilTaskDone(settings.stored_time+5)
118
+ s.StopTask()
119
+
120
+ # make into dataset
121
+ fs = settings.fs
122
+ n_samp = len(stored_time_data_copy[:,0])
123
+ dt = 1/fs
124
+ t_samp = n_samp*dt
125
+ time_axis = np.linspace(0,(n_samp-1)/n_samp * settings.stored_time,n_samp)
126
+
127
+ if (output is not None) and (settings.use_output_as_ch0 == True):
128
+ stored_output = np.zeros((n_samp,len(output[0,:])))
129
+ n_start = settings.pretrig_samples
130
+ if n_start is None:
131
+ n_start = 0
132
+ n_end = np.copy(n_samp)
133
+ if len(output[:,0]) >= (n_end-n_start):
134
+ stored_output[n_start:n_end,:] = output[:n_end-n_start,:]
135
+ elif len(output[:,0]) < (n_end-n_start):
136
+ stored_output[n_start:len(output[:,0])+n_start,:] = output[:,:]
137
+
138
+ stored_time_data_copy = np.concatenate((stored_output,stored_time_data_copy),axis=1)
139
+
140
+ timedata = datastructure.TimeData(time_axis,stored_time_data_copy,settings,timestamp=t,timestring=timestring,test_name=test_name)
141
+
142
+
143
+ dataset = datastructure.DataSet()
144
+ dataset.add_to_dataset(timedata)
145
+
146
+ # check for clipping
147
+ if np.any(np.abs(stored_time_data_copy) > 0.95):
148
+ MESSAGE += 'WARNING: Data may be clipped'
149
+ print(MESSAGE)
150
+
151
+
152
+ return dataset
153
+
154
+
155
+
156
+ #def log_data_with_output(settings, output,test_name=None, rec=None):
157
+ #
158
+
159
+ # # call log_data function
160
+ # dataset = log_data(settings, test_name, rec)
161
+
162
+ # # call output_signal function
163
+ # output_signal(settings,output)
164
+
165
+ # return dataset
166
+
167
+
168
+
169
+ def output_signal(settings,output):
170
+ # setup NI / audio stream
171
+ if settings.output_device_driver == 'soundcard':
172
+ s = streams.setup_output_soundcard(settings)
173
+ data = output.astype('float32')
174
+ s.write(data)
175
+ return s
176
+
177
+ elif settings.output_device_driver == 'nidaq':
178
+ sh = np.shape(output)
179
+ T = sh[0]/settings.fs
180
+ s = streams.setup_output_NI(settings,output)
181
+ s.StartTask()
182
+ return s
183
+ else:
184
+ print('device_driver not recognised')
185
+ return None
186
+
187
+ # send to device
188
+
189
+
190
+ def signal_generator(settings,sig='gaussian',T=1,amplitude=0.1,f=None,selected_channels='all'):
191
+ """
192
+ Creates a signal ready for output to a chosen device
193
+ """
194
+ global MESSAGE
195
+ if selected_channels == 'all':
196
+ selected_channels = np.arange(0,settings.output_channels)
197
+
198
+ # initiate variables
199
+ t = np.arange(0,T,1/settings.output_fs)
200
+ N_per_channel = np.size(t)
201
+ y = np.zeros((N_per_channel,settings.output_channels))
202
+ win = np.ones((N_per_channel,1))
203
+ T_ramp = np.min([T/10,0.1])
204
+ N_ramp = int(T_ramp*settings.output_fs)
205
+ win[0:N_ramp,0] = 0.5*(1-np.cos(np.arange(0,N_ramp)/N_ramp*np.pi))
206
+ win[-N_ramp:,0] = 0.5*(1+np.cos(np.arange(0,N_ramp)/N_ramp*np.pi))
207
+
208
+ limit = 1 # generate signals normalised with amplitudes 0-1
209
+
210
+ # Create sig. Note 'sig' is choice of signal, while 'signal' is scipy.signal
211
+ if sig == 'gaussian':
212
+ y[:,selected_channels] = np.random.randn(N_per_channel,np.size(selected_channels))
213
+
214
+
215
+
216
+ # if settings.output_device_driver == 'soundcard':
217
+ # limit = 1
218
+ # elif settings.output_device_driver == 'nidaq':
219
+ # limit = settings.VmaxNI
220
+
221
+
222
+ y[:,selected_channels] = stats.truncnorm.rvs(-limit/amplitude, limit/amplitude, loc=0,scale=amplitude, size=(N_per_channel,np.size(selected_channels)))
223
+ if f is not None:
224
+ b,a = signal.butter(3,f,btype='bandpass',fs=settings.output_fs)
225
+ y = signal.filtfilt(b,a,y,axis=0,padtype=None)
226
+ y = amplitude * y / np.sqrt(np.mean(y**2))
227
+ if np.max(np.abs(y)) > limit:
228
+ y = limit * y / np.max(np.abs(y))
229
+ MESSAGE = 'Actual rms output after scaling to avoid clipping is {0:1.3f}'.format(np.sqrt(np.mean(y**2)))
230
+ else:
231
+ MESSAGE = 'Actual rms output is {0:1.3f}'.format(np.sqrt(np.mean(y**2)))
232
+
233
+
234
+ # N_exceed_lim = np.sum(y>limit)+np.sum(y<-limit)
235
+ # fraction_clipped = N_exceed_lim/np.size(y)
236
+ # y[y>limit] = limit
237
+ # y[y<-limit] = -limit
238
+ # if fraction_clipped > 0:
239
+ # print('{} out of {} samples exceeded output voltage limit of device and have been clipped'.format(N_exceed_lim,np.size(y)))
240
+ # if fraction_clipped > 0.01:
241
+ # print('{0:1.1f} percent of samples exceed output voltage limit of device'.format(fraction_clipped*100))
242
+
243
+ elif sig == 'uniform':
244
+ y[:,selected_channels] = np.random.uniform(low=-amplitude,high=amplitude,size=(N_per_channel,np.size(selected_channels)))
245
+ if f is not None:
246
+ b,a = signal.butter(3,f,btype='bandpass',fs=settings.output_fs)
247
+ y = signal.filtfilt(b,a,y,axis=0,padtype=None)
248
+ y = amplitude * y / np.sqrt(np.mean(y**2))
249
+ elif sig == 'sweep':
250
+ if f is None:
251
+ f = [0,settings.output_fs/2]
252
+
253
+ for ch in selected_channels:
254
+ y[:,ch] = amplitude*signal.chirp(t,f[0],T,f[1])
255
+ else:
256
+ print('signal type must be one of {''gaussian'',''uniform'',''sweep''}')
257
+ y = np.zeros((N_per_channel,settings.output_channels))
258
+
259
+ y = win * y
260
+
261
+ # final correction to ensure all signals limited to 0-1
262
+ if np.max(np.abs(y)) > limit:
263
+ y = limit * y / np.max(np.abs(y))
264
+ MESSAGE = 'Actual rms output after scaling to avoid clipping is {0:1.3f}'.format(np.sqrt(np.mean(y**2)))
265
+
266
+ return t,y
267
+
268
+
269
+ def stream_snapshot(rec):
270
+
271
+ time_data_copy = np.copy(streams.REC.osc_time_data)
272
+ time_axis_copy = np.copy(streams.REC.osc_time_axis)
273
+
274
+ t = datetime.datetime.now()
275
+ timestring = '_'+str(t.year)+'_'+str(t.month)+'_'+str(t.day)+'_at_'+str(t.hour)+'_'+str(t.minute)+'_'+str(t.second)
276
+
277
+ time_data = datastructure.TimeData(time_axis_copy,time_data_copy,streams.REC.settings,timestamp=t,timestring=timestring,test_name='stream_snapshot')
278
+
279
+
280
+ return time_data