DiadFit 1.0.10__py3-none-any.whl → 1.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
DiadFit/_version.py CHANGED
@@ -5,4 +5,4 @@
5
5
  # 1) we don't load dependencies by storing it in __init__.py
6
6
  # 2) we can import it in setup.py for the same reason
7
7
  # 3) we can import it into your module
8
- __version__ = '1.0.10'
8
+ __version__ = '1.0.12'
DiadFit/argon_lines.py CHANGED
@@ -623,5 +623,324 @@ def calculate_Ar_line_positions(wavelength=532.05, cut_off_intensity=2000):
623
623
  return df_Ar_r
624
624
 
625
625
 
626
+ ## Function that is basically just the Ne function but with some renaming.
627
+
628
+
629
+ def loop_Ar_lines(*, files, spectra_path, filetype, config_ID_peaks, config, df_fit_params, prefix, plot_figure=True, print_df=False, const_params=True):
630
+ # Call the Neon line processing function
631
+ df_ne = loop_Ne_lines(
632
+ files=files,
633
+ spectra_path=spectra_path,
634
+ filetype=filetype,
635
+ config_ID_peaks=config_ID_peaks,
636
+ config=config,
637
+ df_fit_params=df_fit_params,
638
+ prefix=prefix,
639
+ plot_figure=plot_figure,
640
+ print_df=False,
641
+ const_params=const_params
642
+ )
643
+
644
+
645
+
646
+ # Rename columns by replacing 'Ne' with 'Ar'
647
+ df_ar = df_ne.rename(columns=lambda col: col.replace('Ne', 'Ar'))
648
+
649
+ return df_ar
650
+
651
+
652
+ def Argon_id_config(*, height, distance, prominence, width, threshold,
653
+ peak1_cent, peak2_cent, n_peaks,
654
+ exclude_range_1=None, exclude_range_2=None):
655
+ return Neon_id_config(
656
+ height=height,
657
+ distance=distance,
658
+ prominence=prominence,
659
+ width=width,
660
+ threshold=threshold,
661
+ peak1_cent=peak1_cent,
662
+ peak2_cent=peak2_cent,
663
+ n_peaks=n_peaks,
664
+ exclude_range_1=exclude_range_1,
665
+ exclude_range_2=exclude_range_2
666
+ )
667
+
668
+ def identify_Ar_lines(*, path, filename, filetype, config, print_df=False):
669
+ return identify_Ne_lines(
670
+ path=path,
671
+ filename=filename,
672
+ filetype=filetype,
673
+ config=config,
674
+ print_df=print_df
675
+ )
676
+
677
+ def Ar_peak_config(**kwargs):
678
+ return Ne_peak_config(**kwargs)
679
+
680
+
681
+ def fit_Ar_lines(*, Ar, filename, path, prefix=False, config,
682
+ Ar_center_1, Ar_center_2,
683
+ Ar_prom_1, Ar_prom_2,
684
+ const_params=False):
685
+ # Call the Neon version internally
686
+ df_ne = fit_Ne_lines(
687
+ Ne=Ar,
688
+ filename=filename,
689
+ path=path,
690
+ prefix=prefix,
691
+ config=config,
692
+ Ne_center_1=Ar_center_1,
693
+ Ne_center_2=Ar_center_2,
694
+ Ne_prom_1=Ar_prom_1,
695
+ Ne_prom_2=Ar_prom_2,
696
+ const_params=const_params
697
+ )
698
+
699
+ # Rename output columns from 'Ne' to 'Ar'
700
+ df_ar = df_ne.rename(columns=lambda col: col.replace('Ne', 'Ar'))
701
+
702
+ return df_ar
703
+
704
+
705
+ import matplotlib.pyplot as plt
706
+
707
+ def plot_Ar_corrections(df=None, x_axis=None, x_label='index', marker='o', mec='k', mfc='r'):
708
+ """
709
+ Plot correction-related information for Ar spectra.
710
+ Assumes column names have 'Ar_Corr', '1σ_Ar_Corr', etc.
711
+ """
712
+ if x_axis is not None:
713
+ x = x_axis
714
+ else:
715
+ x = df.index
716
+
717
+ fig, ((ax5, ax6), (ax3, ax4), (ax1, ax2)) = plt.subplots(3, 2, figsize=(10, 12))
718
+
719
+ # Peak 1
720
+ ax5.errorbar(x, df['pk1_peak_cent'], xerr=0, yerr=df['error_pk1'].fillna(0).infer_objects(),
721
+ fmt='o', ecolor='k', elinewidth=0.8, mfc='b', ms=5, mec='k', capsize=3)
722
+ ax5.set_xlabel(x_label)
723
+ ax5.set_ylabel('Peak 1 center')
724
+
725
+ # Peak 2
726
+ ax6.plot(x, df['pk2_peak_cent'], marker, mec=mec, mfc=mfc)
727
+ ax6.errorbar(x, df['pk2_peak_cent'], xerr=0, yerr=df['error_pk2'].fillna(0).infer_objects(),
728
+ fmt='o', ecolor='k', elinewidth=0.8, mfc=mfc, ms=5, mec=mec, capsize=3)
729
+ ax6.set_xlabel(x_label)
730
+ ax6.set_ylabel('Peak 2 center')
731
+
732
+ # Correction vs. Peak 2
733
+ ax3.errorbar(df['Ar_Corr'], df['pk2_peak_cent'],
734
+ xerr=df['1σ_Ar_Corr'].fillna(0).infer_objects(),
735
+ yerr=df['error_pk2'].fillna(0).infer_objects(),
736
+ fmt='o', ecolor='k', elinewidth=0.8, mfc='b', ms=5, mec='k', capsize=3)
737
+ ax3.set_xlabel('Ar Correction factor')
738
+ ax3.set_ylabel('Peak 2 center')
739
+
740
+ # Correction vs. Peak 1
741
+ ax4.errorbar(df['Ar_Corr'], df['pk1_peak_cent'],
742
+ xerr=df['1σ_Ar_Corr'].fillna(0).infer_objects(),
743
+ yerr=df['error_pk1'].fillna(0).infer_objects(),
744
+ fmt='o', ecolor='k', elinewidth=0.8, mfc='b', ms=5, mec='k', capsize=3)
745
+ ax4.set_xlabel('Ar Correction factor')
746
+ ax4.set_ylabel('Peak 1 center')
747
+
748
+ # Ar Correction vs. x
749
+ ax1.errorbar(x, df['Ar_Corr'], xerr=0, yerr=df['1σ_Ar_Corr'].fillna(0).infer_objects(),
750
+ fmt='o', ecolor='k', elinewidth=0.8, mfc='grey', ms=5, mec='k', capsize=3)
751
+ ax1.set_ylabel('Ar Correction factor')
752
+ ax1.set_xlabel(x_label)
753
+
754
+ # Ar Correction vs. residuals (placeholder logic — update as needed)
755
+ if 'residual_sum' in df.columns:
756
+ ax2.errorbar(df['residual_sum'], df['Ar_Corr'],
757
+ xerr=0, yerr=df['1σ_Ar_Corr'].fillna(0).infer_objects(),
758
+ fmt='o', ecolor='k', elinewidth=0.8, mfc='grey', ms=5, mec='k', capsize=3)
759
+
760
+ ax2.set_xlabel('Sum of pk1 and pk2 residual')
761
+ ax2.set_ylabel('Ar Correction factor')
762
+
763
+ for ax in [ax1, ax2, ax3, ax4, ax5, ax6]:
764
+ ax.ticklabel_format(useOffset=False)
765
+
766
+ plt.setp(ax3.get_xticklabels(), rotation=30, ha='right')
767
+ plt.setp(ax4.get_xticklabels(), rotation=30, ha='right')
768
+
769
+ fig.tight_layout()
770
+ return fig
771
+
772
+
773
+ def filter_Ar_Line_neighbours(*, df_combo=None, Corr_factor=None, number_av=6, offset=0.00005, file_name_filt=None):
774
+ """
775
+ Filters Ar correction factors that deviate by more than `offset` from the
776
+ local median of neighboring values (defined by `number_av`).
777
+ Optionally excludes specified filenames via `file_name_filt`.
778
+ """
779
+ if df_combo is not None:
780
+ Corr_factor = df_combo['Ar_Corr']
781
+
782
+ Corr_factor_Filt = np.zeros(len(Corr_factor), dtype=float)
783
+ median_loop = np.zeros(len(Corr_factor), dtype=float)
784
+
785
+ for i in range(len(Corr_factor)):
786
+ if i < len(Corr_factor) / 2:
787
+ median_loop[i] = np.nanmedian(Corr_factor[i:i+number_av])
788
+ else:
789
+ median_loop[i] = np.nanmedian(Corr_factor[i-number_av:i])
790
+
791
+ if (
792
+ Corr_factor[i] > (median_loop[i] + offset)
793
+ or Corr_factor[i] < (median_loop[i] - offset)
794
+ ):
795
+ Corr_factor_Filt[i] = np.nan
796
+ else:
797
+ Corr_factor_Filt[i] = Corr_factor[i]
798
+
799
+ ds = pd.Series(Corr_factor_Filt)
800
+
801
+ if file_name_filt is not None:
802
+ pattern = '|'.join(file_name_filt)
803
+ mask = df_combo['filename_x'].str.contains(pattern)
804
+ ds = ds.where(~mask, np.nan)
805
+
806
+ return ds
807
+
808
+
809
+ def generate_Ar_corr_model(*, time, Ar_corr, N_poly=3, CI=0.67, bootstrap=False,
810
+ std_error=True, N_bootstrap=500,
811
+ save_fig=False, pkl_name='polyfit_data_Ar.pkl'):
812
+ """Generates a polynomial correction model for Ar correction data."""
813
+
814
+ x_all = np.array([time])
815
+
816
+ if isinstance(Ar_corr, pd.DataFrame):
817
+ y_all = np.array([Ar_corr['Ar_Corr']])
818
+ y_err = Ar_corr['1σ_Ar_Corr']
819
+ else:
820
+ y_all = Ar_corr
821
+ y_err = 0 * Ar_corr
822
+
823
+ non_nan_indices = ~np.isnan(x_all) & ~np.isnan(y_all)
824
+ x = x_all[non_nan_indices]
825
+ y = y_all[non_nan_indices]
826
+
827
+ coefficients = np.polyfit(x, y, N_poly)
828
+ Pf = np.poly1d(coefficients)
829
+
830
+ data = {'model': Pf, 'x': x, 'y': y}
831
+ with open(pkl_name, 'wb') as f:
832
+ pickle.dump(data, f)
833
+
834
+ new_x_plot = np.linspace(np.min(x), np.max(x), 100)
835
+
836
+ if bootstrap:
837
+ Ar_corr2 = calculate_Ar_corr_bootstrap_values(
838
+ pickle_str=pkl_name,
839
+ new_x=pd.Series(new_x_plot),
840
+ N_poly=N_poly,
841
+ CI=CI,
842
+ N_bootstrap=N_bootstrap
843
+ )
844
+ elif std_error:
845
+ Ar_corr2 = calculate_Ar_corr_std_err_values(
846
+ pickle_str=pkl_name,
847
+ new_x=pd.Series(new_x_plot),
848
+ CI=CI
849
+ )
850
+
851
+ fig, ax1 = plt.subplots(1, 1, figsize=(10, 5))
852
+ ax1.errorbar(x, y, xerr=0, yerr=y_err, fmt='o', ecolor='k',
853
+ elinewidth=0.8, mfc='grey', ms=5, mec='k', capsize=3)
854
+
855
+ ax1.plot(new_x_plot, Ar_corr2['preferred_values'], '-k', label='best fit')
856
+ ax1.plot(new_x_plot, Ar_corr2['lower_values'], ':k', label='lower bound')
857
+ ax1.plot(new_x_plot, Ar_corr2['upper_values'], ':k', label='upper bound')
858
+ ax1.plot(x, y, '+r', label='Ar lines')
859
+
860
+ ax1.set_xlabel('sec after midnight')
861
+ ax1.set_ylabel('Ar Corr factor')
862
+ ax1.ticklabel_format(useOffset=False)
863
+ ax1.legend()
864
+
865
+ suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(N_poly, 'th')
866
+ ax1.set_title(f"{N_poly}$^{{{suffix}}}$ degree polynomial: {int(CI * 100)}% prediction interval")
867
+
868
+ if save_fig:
869
+ fig.savefig('Ar_line_correction.png')
870
+
871
+ return Pf, fig
872
+
873
+
874
+ def calculate_Ar_corr_std_err_values(*, pickle_str, new_x, CI=0.67):
875
+ with open(pickle_str, 'rb') as f:
876
+ data = pickle.load(f)
877
+
878
+ model = data['model']
879
+ N_poly = model.order - 1
880
+ x = data['x']
881
+ y = data['y']
882
+
883
+ new_x_array = np.asarray(new_x)
884
+ residuals = y - model(x)
885
+ residual_std = np.std(residuals)
886
+
887
+ mean_x = np.mean(x)
888
+ n = len(x)
889
+ standard_errors = residual_std * np.sqrt(1 + 1/n + (new_x_array - mean_x)**2 / np.sum((x - mean_x)**2))
890
+
891
+ df_dof = len(x) - (N_poly + 1)
892
+ t_value = t.ppf((1 + CI) / 2, df_dof)
893
+
894
+ preferred_values = model(new_x_array)
895
+ lower_values = preferred_values - t_value * standard_errors
896
+ upper_values = preferred_values + t_value * standard_errors
897
+
898
+ return pd.DataFrame({
899
+ 'time': new_x_array,
900
+ 'preferred_values': preferred_values,
901
+ 'lower_values': lower_values,
902
+ 'upper_values': upper_values
903
+ })
904
+
905
+
906
+ def calculate_Ar_corr_bootstrap_values(*, pickle_str, new_x, N_poly=3, CI=0.67, N_bootstrap=500):
907
+ with open(pickle_str, 'rb') as f:
908
+ data = pickle.load(f)
909
+
910
+ Pf = data['model']
911
+ x = data['x']
912
+ y = data['y']
913
+
914
+ x_values = new_x
915
+ preferred_values = []
916
+ lower_values = []
917
+ upper_values = []
918
+
919
+ for new_x in x_values:
920
+ bootstrap_predictions = []
921
+ for _ in range(N_bootstrap):
922
+ bootstrap_indices = np.random.choice(len(x), size=len(x), replace=True)
923
+ bootstrap_x = x[bootstrap_indices]
924
+ bootstrap_y = y[bootstrap_indices]
925
+ bootstrap_coefficients = np.polyfit(bootstrap_x, bootstrap_y, N_poly)
926
+ bootstrap_Pf = np.poly1d(bootstrap_coefficients)
927
+ bootstrap_predictions.append(bootstrap_Pf(new_x))
928
+
929
+ bootstrap_predictions_sorted = np.sort(bootstrap_predictions)
930
+ lower_idx = int(((1 - CI) / 2) * N_bootstrap)
931
+ upper_idx = int((1 - (1 - CI) / 2) * N_bootstrap)
932
+
933
+ preferred_values.append(Pf(new_x))
934
+ lower_values.append(bootstrap_predictions_sorted[lower_idx])
935
+ upper_values.append(bootstrap_predictions_sorted[upper_idx])
936
+
937
+ return pd.DataFrame({
938
+ 'time': x_values,
939
+ 'preferred_values': preferred_values,
940
+ 'lower_values': lower_values,
941
+ 'upper_values': upper_values
942
+ })
943
+
944
+
626
945
 
627
946
 
DiadFit/densimeters.py CHANGED
@@ -976,7 +976,7 @@ def merge_fit_files(path):
976
976
  ## New UC Berkeley using 1220
977
977
 
978
978
  def calculate_density_ucb(*, Ne_line_combo='1117_1447', df_combo=None, temp='SupCrit',
979
- CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, corrected_split=None, split_err=None):
979
+ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, Ar_pickle_str=None, pref_Ne=None, Ne_err=None, corrected_split=None, split_err=None):
980
980
  """ This function converts Diad Splitting into CO$_2$ density using the UC Berkeley calibration line
981
981
  developed by DeVitre and Wieser in 2023.
982
982
 
@@ -1339,28 +1339,41 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, cor
1339
1339
  df_merge = df_merge.rename(columns={'filename_x': 'filename'})
1340
1340
 
1341
1341
 
1342
- #
1343
- #
1344
-
1345
- if Ne_pickle_str is not None: # If its not none, have all the columns for Ne
1346
- cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1347
- 'Corrected_Splitting', 'Corrected_Splitting_σ',
1348
- 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit', 'power (mW)', 'Spectral Center']
1349
- df_merge = df_merge[cols_to_move + [
1350
- col for col in df_merge.columns if col not in cols_to_move]]
1351
- elif pref_Ne is not None and df_combo is not None: #If Pref Ne,
1352
- cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1353
- 'Corrected_Splitting', 'Corrected_Splitting_σ',
1354
- 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit']
1355
- df_merge = df_merge[cols_to_move + [
1356
- col for col in df_merge.columns if col not in cols_to_move]]
1357
-
1342
+ # NE or Ar.
1343
+
1344
+ if Ne_pickle_str is not None:
1345
+ cols_to_move = [
1346
+ 'filename', 'Density g/cm3', 'σ Density g/cm3',
1347
+ 'σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1348
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1349
+ 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit',
1350
+ 'power (mW)', 'Spectral Center'
1351
+ ]
1352
+ elif Ar_pickle_str is not None:
1353
+ cols_to_move = [
1354
+ 'filename', 'Density g/cm3', 'σ Density g/cm3',
1355
+ Density g/cm3 (from Ar+peakfit)', 'σ Density g/cm3 (from densimeter)',
1356
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1357
+ 'Corrected_Splitting_σ_Ar', 'Corrected_Splitting_σ_peak_fit',
1358
+ 'power (mW)', 'Spectral Center'
1359
+ ]
1360
+ elif pref_Ne is not None and df_combo is not None:
1361
+ cols_to_move = [
1362
+ 'filename', 'Density g/cm3', 'σ Density g/cm3',
1363
+ 'σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1364
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1365
+ 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit'
1366
+ ]
1358
1367
  elif df_combo is None:
1359
1368
 
1360
1369
  cols_to_move = ['Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1361
1370
  'Corrected_Splitting']
1362
- df_merge = df_merge[cols_to_move + [
1363
- col for col in df_merge.columns if col not in cols_to_move]]
1371
+
1372
+
1373
+ # Move only existing columns to the front
1374
+ cols_existing = [col for col in cols_to_move if col in df_merge.columns]
1375
+ df_merge = df_merge[cols_existing + [col for col in df_merge.columns if col not in cols_existing]]
1376
+
1364
1377
 
1365
1378
 
1366
1379
 
@@ -2050,4 +2063,123 @@ def apply_and_save_vertical_shift_to_ucb_densimeter(new_x, new_y):
2050
2063
 
2051
2064
 
2052
2065
  return shift
2066
+
2067
+
2068
+ ## Corrected splitting calculator for Argon
2069
+ def propagate_error_split_argon_peakfit(*, df_fits, Ar_corr=None, Ar_err=None, pref_Ar=None):
2070
+ """ This function propagates errors in your Ar correction model and peak fits by quadrature.
2071
+
2072
+ Parameters
2073
+ -----------------
2074
+
2075
+ df_fits: pd.DataFrame
2076
+ Dataframe of peak fitting parameters. Must contain columns for 'Diad1_cent_err', 'Diad2_cent_err', 'Splitting'
2077
+
2078
+ Choose either:
2079
+
2080
+ Ar_corr: pd.DataFrame (Optional)
2081
+ Dataframe with columns for 'upper_values' and 'lower values', e.g. the upper and lower bounds of the error on the Ar correction model
2082
+
2083
+ Or
2084
+
2085
+ pref_Ar and Ar_err: float, int, pd.Series, np.array
2086
+ A preferred value of the Ar correction factor and the error (e.g. pref_Ar=0.998, Ar_err=0.001). Used for
2087
+ rapid peak fitting before developing Ar lines.
2088
+
2089
+ Returns
2090
+ -----------------
2091
+ two pd.Series: the error on the splitting, and the combined error from the splitting and the Ar correction model.
2092
+
2093
+ """
2094
+ # Get the error on Argon things
2095
+ if isinstance(Ar_corr, pd.DataFrame):
2096
+ Ar_err = (Ar_corr['upper_values'] - Ar_corr['lower_values']) / 2
2097
+ print(np.mean(Ar_err))
2098
+ pref_Ar = Ar_corr['preferred_values']
2099
+
2100
+ elif pref_Ar is not None and Ar_err is not None:
2101
+ print('using fixed values for Ar error and Ar factor')
2102
+ else:
2103
+ raise TypeError('You must either provide Ar_corr as a dataframe, or provide pref_Ar and Ar_err as values.')
2104
+
2105
+ # Get the peak fit errors
2106
+ Diad1_err = df_fits['Diad1_cent_err'].fillna(0).infer_objects()
2107
+ Diad2_err = df_fits['Diad2_cent_err'].fillna(0).infer_objects()
2108
+ split_err = (Diad1_err**2 + Diad2_err**2)**0.5
2109
+
2110
+ # Propagate uncertainty from Ar correction and peak fits
2111
+ Combo_err = (((df_fits['Splitting'] * Ar_err)**2) + (pref_Ar * split_err)**2)**0.5
2112
+
2113
+ return Combo_err, split_err
2114
+
2115
+ def calculate_Ar_corr_std_err_values(*, pickle_str, new_x, CI=0.67):
2116
+ # Load the model and the data from the pickle file
2117
+ with open(pickle_str, 'rb') as f:
2118
+ data = pickle.load(f)
2119
+
2120
+ model = data['model']
2121
+ N_poly = model.order - 1
2122
+
2123
+ Pf = data['model']
2124
+ x = data['x']
2125
+ y = data['y']
2126
+
2127
+ # Convert new_x to plain numpy array
2128
+ new_x_array = np.asarray(new_x)
2129
+
2130
+ # Calculate the residuals
2131
+ residuals = y - Pf(x)
2132
+
2133
+ # Calculate the standard deviation of the residuals
2134
+ residual_std = np.std(residuals)
2135
+
2136
+ # Calculate the standard errors for the new x values
2137
+ mean_x = np.mean(x)
2138
+ n = len(x)
2139
+ standard_errors = residual_std * np.sqrt(1 + 1/n + (new_x_array - mean_x)**2 / np.sum((x - mean_x)**2))
2140
+
2141
+ # Calculate the degrees of freedom
2142
+ df_dof = len(x) - (N_poly + 1)
2143
+
2144
+ # Calculate the t value for the given confidence level
2145
+ t_value = t.ppf((1 + CI) / 2, df_dof)
2146
+
2147
+ # Calculate the prediction intervals
2148
+ preferred_values = Pf(new_x_array)
2149
+ lower_values = preferred_values - t_value * standard_errors
2150
+ upper_values = preferred_values + t_value * standard_errors
2151
+
2152
+ df_out = pd.DataFrame(data={
2153
+ 'time': new_x_array,
2154
+ 'preferred_values': preferred_values,
2155
+ 'lower_values': lower_values,
2156
+ 'upper_values': upper_values
2157
+ })
2158
+
2159
+ return df_out
2160
+
2161
+
2162
+ def calculate_corrected_splitting_argon(*, df_combo_c, Ar_pickle_str, CI):
2163
+
2164
+ time=df_combo_c['sec since midnight']
2165
+ Ar_corr = calculate_Ar_corr_std_err_values(pickle_str=Ar_pickle_str, new_x=time, CI=CI)
2166
+
2167
+ Split=df_combo_c['Splitting']*Ar_corr['preferred_values']
2168
+ df_combo_c['Corrected_Splitting']=Split
2169
+
2170
+
2171
+ # Extract preferred correction values
2172
+ pref_Ar = Ar_corr['preferred_values']
2173
+ Split_err, pk_err = propagate_error_split_argon_peakfit(Ar_corr=Ar_corr, df_fits=df_combo_c)
2174
+
2175
+ # Add Ar-specific columns to the DataFrame
2176
+ df_combo_c['Corrected_Splitting_σ'] = Split_err
2177
+ df_combo_c['Corrected_Splitting_σ_Ar'] = (
2178
+ (Ar_corr['upper_values'] * df_combo_c['Splitting'] -
2179
+ Ar_corr['lower_values'] * df_combo_c['Splitting']) / 2
2180
+ )
2181
+ df_combo_c['Corrected_Splitting_σ_peak_fit'] = pk_err
2182
+
2183
+ return df_combo_c
2184
+
2053
2185
 
@@ -496,85 +496,176 @@ creation=creation, modification=modification)
496
496
  ## Functions to extract things for HORIBA
497
497
 
498
498
  ## HORIBA acquisition time
499
- encode="ISO-8859-1"
500
- def extract_duration_horiba(*, path, filename):
501
- """ This function extracts the duration from a HORIBA file by finding the line starting with #Acq. """
502
- fr = open(path+'/'+filename, 'r', encoding=encode)
499
+ # encode="ISO-8859-1"
500
+ # def extract_duration_horiba(*, path, filename):
501
+ # """ This function extracts the duration from a HORIBA file by finding the line starting with #Acq. """
502
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
503
+ #
504
+ # while True:
505
+ # l=fr.readline()
506
+ # if l.startswith('#Acq.'):
507
+ # line=l
508
+ # break
509
+ # return line
510
+ #
511
+ # def extract_accumulations_horiba(*, path, filename):
512
+ # """ This function extracts the accumulations from a HORIBA file by finding the line starting with #Accumu. """
513
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
514
+ #
515
+ # while True:
516
+ # l=fr.readline()
517
+ # if l.startswith('#Accumu'):
518
+ # line=l
519
+ # break
520
+ # return line
521
+ #
522
+ # def extract_objective_horiba(*, path, filename):
523
+ # """ This function extracts the objective used from a HORIBA file by finding the line starting with #Object. """
524
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
525
+ #
526
+ # while True:
527
+ # l=fr.readline()
528
+ # if l.startswith('#Object'):
529
+ # line=l
530
+ # break
531
+ # return line
532
+ #
533
+ # def extract_date_horiba(*, path, filename):
534
+ # """ This function extracts the date used from a HORIBA file by finding the line starting with #Date. """
535
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
536
+ #
537
+ # while True:
538
+ # l=fr.readline()
539
+ # if l.startswith('#Date'):
540
+ # line=l
541
+ # break
542
+ # return line
543
+ #
544
+ # def extract_spectral_center_horiba(*, path, filename):
545
+ # """ This function extracts the spectral center used from a HORIBA file by finding the line starting with #Spectro (cm-¹). """
546
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
547
+ #
548
+ # while True:
549
+ # l=fr.readline()
550
+ # if l.startswith('#Spectro (cm-¹)'):
551
+ # line=l
552
+ # break
553
+ # return line
554
+ #
555
+ # def extract_24hr_time_horiba(*, path, filename):
556
+ # """ This function extracts the 24 hr time from a HORIBA file by finding the line starting with #Acquired. """
557
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
558
+ #
559
+ # while True:
560
+ # l=fr.readline()
561
+ # if l.startswith('#Acquired'):
562
+ # line=l
563
+ # break
564
+ # return line
565
+ #
566
+ # def extract_spectraname_horiba(*, path, filename):
567
+ # """
568
+ # This function extracts the spectral name from HORIBA files
569
+ # """
570
+ # fr = open(path+'/'+filename, 'r', encoding=encode)
571
+ #
572
+ # while True:
573
+ # l=fr.readline()
574
+ # if l.startswith('#Title'):
575
+ # line=l
576
+ # break
577
+ # return line
578
+ #
579
+ #
503
580
 
504
- while True:
505
- l=fr.readline()
506
- if l.startswith('#Acq.'):
507
- line=l
508
- break
509
- return line
581
+ #
582
+ #
583
+ #
584
+ #
585
+ import numpy as np
510
586
 
511
- def extract_accumulations_horiba(*, path, filename):
512
- """ This function extracts the accumulations from a HORIBA file by finding the line starting with #Accumu. """
513
- fr = open(path+'/'+filename, 'r', encoding=encode)
587
+ encode = "ISO-8859-1"
514
588
 
515
- while True:
516
- l=fr.readline()
517
- if l.startswith('#Accumu'):
518
- line=l
519
- break
520
- return line
589
+ def extract_duration_horiba(*, path, filename):
590
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
591
+ for l in fr:
592
+ if l.startswith('#Acq.'):
593
+ return l
594
+ return np.nan
521
595
 
522
- def extract_objective_horiba(*, path, filename):
523
- """ This function extracts the objective used from a HORIBA file by finding the line starting with #Object. """
524
- fr = open(path+'/'+filename, 'r', encoding=encode)
596
+ def extract_accumulations_horiba(*, path, filename):
597
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
598
+ for l in fr:
599
+ if l.startswith('#Accumulations'):
600
+ return l
601
+ return np.nan
525
602
 
526
- while True:
527
- l=fr.readline()
528
- if l.startswith('#Object'):
529
- line=l
530
- break
531
- return line
603
+ def extract_objective_horiba(*, path, filename):
604
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
605
+ for l in fr:
606
+ if l.startswith('#Objective'):
607
+ return l
608
+ return np.nan
532
609
 
533
610
  def extract_date_horiba(*, path, filename):
534
- """ This function extracts the date used from a HORIBA file by finding the line starting with #Date. """
535
- fr = open(path+'/'+filename, 'r', encoding=encode)
536
-
537
- while True:
538
- l=fr.readline()
539
- if l.startswith('#Date'):
540
- line=l
541
- break
542
- return line
611
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
612
+ for l in fr:
613
+ if l.startswith('#Date'):
614
+ return l
615
+ return np.nan
543
616
 
544
617
  def extract_spectral_center_horiba(*, path, filename):
545
- """ This function extracts the spectral center used from a HORIBA file by finding the line starting with #Spectro (cm-¹). """
546
- fr = open(path+'/'+filename, 'r', encoding=encode)
618
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
619
+ for l in fr:
620
+ if l.startswith('#Spectro (cm-¹)'):
621
+ try:
622
+ return float(l.split('=')[1].strip())
623
+ except (IndexError, ValueError):
624
+ return np.nan
625
+ return np.nan
547
626
 
548
- while True:
549
- l=fr.readline()
550
- if l.startswith('#Spectro (cm-¹)'):
551
- line=l
552
- break
553
- return line
554
627
 
555
628
  def extract_24hr_time_horiba(*, path, filename):
556
- """ This function extracts the 24 hr time from a HORIBA file by finding the line starting with #Acquired. """
557
- fr = open(path+'/'+filename, 'r', encoding=encode)
558
-
559
- while True:
560
- l=fr.readline()
561
- if l.startswith('#Acquired'):
562
- line=l
563
- break
564
- return line
629
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
630
+ for l in fr:
631
+ if l.startswith('#Acquired'):
632
+ return l
633
+ return np.nan
565
634
 
566
635
  def extract_spectraname_horiba(*, path, filename):
636
+ with open(path + '/' + filename, 'r', encoding=encode) as fr:
637
+ for l in fr:
638
+ if l.startswith('#Title'):
639
+ return l
640
+ return np.nan
641
+
642
+
643
+ def stitch_metadata_in_loop_horiba(Allfiles, path=None):
644
+
645
+ """ Stitches acquisition parameters together from the function extract_acq_params_horiba for multiple files
646
+ Parameters
647
+ -------------
648
+ AllFiles: list
649
+ List of all file names
650
+
651
+ path: str
652
+ Path where files are found
653
+
654
+ Returns
655
+ -------------
656
+ df of aquisitoin parameters
567
657
  """
568
- This function extracts the spectral name from HORIBA files
569
- """
570
- fr = open(path+'/'+filename, 'r', encoding=encode)
658
+ if path is None:
659
+ path=os.getcwd()
660
+
661
+ df=pd.DataFrame([])
662
+ for i in tqdm(range(0, len(Allfiles))):
663
+ file=Allfiles[i]
664
+ one_file=extract_acq_params_horiba(path=path, filename=file)
665
+ df=pd.concat([df, one_file], axis=0)
666
+ df_out=df.reset_index(drop=True)
667
+ return df_out
571
668
 
572
- while True:
573
- l=fr.readline()
574
- if l.startswith('#Title'):
575
- line=l
576
- break
577
- return line
578
669
 
579
670
 
580
671
  def extract_acq_params_horiba(path, filename):
@@ -604,14 +695,10 @@ def extract_acq_params_horiba(path, filename):
604
695
  year=int(date.split('.')[2])
605
696
  month_name=calendar.month_name[month]
606
697
  Day=datetime.strptime(date, "%d.%m.%Y")
607
- spec_cen=extract_spectral_center_horiba(path=path, filename=filename)
608
- spec=float(spec_cen.split('=')[1])
609
698
 
610
- spec_str=extract_spectral_center_horiba(path=path,
699
+ spec=extract_spectral_center_horiba(path=path,
611
700
  filename=filename)
612
701
 
613
- spec=spec_str.split('\t')[1].split('\n')[0]
614
-
615
702
 
616
703
  time_str=extract_24hr_time_horiba(path=path, filename=filename)
617
704
  time=time_str.split(' ')[1].split('\n')[0]
@@ -641,36 +728,8 @@ def extract_acq_params_horiba(path, filename):
641
728
 
642
729
 
643
730
  return df
644
-
645
-
646
-
647
-
648
- def stitch_metadata_in_loop_horiba(AllFiles, path=None):
649
-
650
- """ Stitches acquisition parameters together from the function extract_acq_params_horiba for multiple files
651
- Parameters
652
- -------------
653
- AllFiles: list
654
- List of all file names
655
-
656
- path: str
657
- Path where files are found
658
-
659
- Returns
660
- -------------
661
- df of aquisitoin parameters
662
- """
663
- if path is None:
664
- path=os.getcwd()
665
-
666
- df=pd.DataFrame([])
667
- for i in tqdm(range(0, len(AllFiles))):
668
- file=AllFiles[i]
669
- one_file=extract_acq_params_horiba(path=path, filename=file)
670
- df=pd.concat([df, one_file], axis=0)
671
- df_out=df.reset_index(drop=True)
672
- return df_out
673
-
731
+
732
+
674
733
 
675
734
 
676
735
  ## Functions to extract metadata from WITEC files (v instrument specific)
@@ -6,6 +6,10 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of
6
6
 
7
7
  The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
8
 
9
+ Citing in Main Text
10
+
11
+ If you use DiadFit in your study, you must cite it in the main text of your paper. This code took me years to write and presumably is saving you a lot of time. Thus, it deserves to actually recieve a citation, vs just describing its use in the supplement. If I do not get credit for my open source work that means something for promotion/tenure, I will not be able to continue working and building these packages. If you only cite it in the supplement, you must pay for your use of DiadFit, at the rate of $500/study. This will support my spare time to maintain these packages.
12
+
9
13
  Special Clause on GUI Usage
10
14
 
11
15
  Use of DiadFit in graphical user interfaces (GUIs), including but not limited to applications that provide point-and-click access to DiadFit’s functions, requires prior written permission from the author.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: DiadFit
3
- Version: 1.0.10
3
+ Version: 1.0.12
4
4
  Summary: DiadFit
5
5
  Home-page: https://github.com/PennyWieser/DiadFit
6
6
  Author: Penny Wieser
@@ -29,15 +29,15 @@ DiadFit/Mediumrho_polyfit_data_CCMR.pkl,sha256=U6ODSdurqS0-lynm1MG1zktg8NuhYRbrY
29
29
  DiadFit/Mediumrho_polyfit_data_CMASS.pkl,sha256=SBy1pIdqCAF9UtB9FLNTuD0-tFyD7swwJppdE2U_FsY,1557
30
30
  DiadFit/Psensor.py,sha256=C2xSlgxhUJIKIBDvUp02QaYRs5QsIqjGGRMP25ZLRZ0,10435
31
31
  DiadFit/__init__.py,sha256=F-HjhCYKL_U8PfiH8tZ9DUCkxPvo6lAslJS4fyvxkbY,1148
32
- DiadFit/_version.py,sha256=yrzFCh_DgcJ9iS_fo4-jCpQ0dTsU1x004zEAZPOJWu4,296
33
- DiadFit/argon_lines.py,sha256=vtzsuDdEgrAmEF9xwpejpFqKV9hKPS1JUYhIl4AfXZ0,7675
32
+ DiadFit/_version.py,sha256=H5d8goZbkob4s3WN42m3x_S_cdOTG_7oVqE8MEDVSs4,296
33
+ DiadFit/argon_lines.py,sha256=yMPmNS2b93t6PeULX72QBnNmam-TZUJRQP0lozYl4pw,18305
34
34
  DiadFit/cosmicray_filter.py,sha256=a45x2_nmpi9Qcjc_L39UA9JOd1NMorIjtTRGnCdG3MU,23634
35
35
  DiadFit/densimeter_fitting.py,sha256=AV5jWHSuIuN-e61chwMiTETa26pQo5drEGorYTkceHo,8308
36
- DiadFit/densimeters.py,sha256=1eT6XVYtRPbY4WmEgIvbvEYdDkJTvFlIjoqhypdpDKk,76511
36
+ DiadFit/densimeters.py,sha256=L7q9SmRm1exkCPsSTsbGwACo2afRsReJKyOeaR-FiAw,80982
37
37
  DiadFit/density_depth_crustal_profiles.py,sha256=Vvtw3-_xuWIYEuhuDzXstkprluXyBkUcdm9iP7qBwyQ,19754
38
38
  DiadFit/diads.py,sha256=gwHWTquJeoJaBYEYjJcJct38j6Bi-GUUsFCPsFgCFzU,179483
39
39
  DiadFit/error_propagation.py,sha256=Evka2vsHQmgekO1xft-AkjxvkMdwAVawNVbWSe7SZIQ,50734
40
- DiadFit/importing_data_files.py,sha256=j7cSEPZ6iKmYnSqYEIcCl7YNdqqkCD56W-4V9T2oWOE,52010
40
+ DiadFit/importing_data_files.py,sha256=r8Lezr0eXcO6P0nmWlb7K_H5yw9Se54rz-tUaNqJpPY,53775
41
41
  DiadFit/lookup_table.csv,sha256=Hs1tmSQ9ArTUDv3ymEXbvnLlPBxYUP0P51dz7xAKk-Q,2946857
42
42
  DiadFit/lookup_table_noneg.csv,sha256=HelvewKbBy4cqT2GAqsMo-1ps1lBYqZ-8hCJZWPGfhI,3330249
43
43
  DiadFit/molar_gas_proportions.py,sha256=3zc5t037L11w_hCYJqV4Xp4NwVCmGb3gMp1McAhV0TM,9315
@@ -45,8 +45,8 @@ DiadFit/ne_lines.py,sha256=kY_6ThigSc21ONyo38yq1gBhzlyUPYcfbYC50nIoiRs,64091
45
45
  DiadFit/relaxfi_PW.py,sha256=vXXW9JjEBRf0UR9p-DJLx8j4Z2ePpUDweoAok-2nMJ0,32119
46
46
  DiadFit/relaxifi.py,sha256=DSHAUP0tnkiMrHQgQPBK-9P3cWYmegURKzYOUgdAlos,38569
47
47
  DiadFit/smoothed_polyfit_June25_UCB.pkl,sha256=I_AHj8uJVv1lPfir6QZa-EmOzSp8yDAWUZH2ZTE-0KI,2596
48
- DiadFit-1.0.10.dist-info/LICENSE.txt,sha256=wM112Xn2cOj94pg0LUBgB-7Vs2y8KZ4bzrhIK-R72WE,1707
49
- DiadFit-1.0.10.dist-info/METADATA,sha256=oA2cVHNgcUtEC8uwK875WtNcdgXHquyZssF0az4591o,1172
50
- DiadFit-1.0.10.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
51
- DiadFit-1.0.10.dist-info/top_level.txt,sha256=yZC6OFLVznaFA5kcPlFPkvhKotcVd-YO4bKxZZw3LQE,8
52
- DiadFit-1.0.10.dist-info/RECORD,,
48
+ DiadFit-1.0.12.dist-info/LICENSE.txt,sha256=GzU7ouxObfYUKRAXOjoLZtZwb1fAZaiRDLmAz8puSeg,2308
49
+ DiadFit-1.0.12.dist-info/METADATA,sha256=KmDlQB5Ch3o_vFzlauAFebYtOcGnvHfOk41FrDOesfo,1172
50
+ DiadFit-1.0.12.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
51
+ DiadFit-1.0.12.dist-info/top_level.txt,sha256=yZC6OFLVznaFA5kcPlFPkvhKotcVd-YO4bKxZZw3LQE,8
52
+ DiadFit-1.0.12.dist-info/RECORD,,