DiadFit 0.0.78__py3-none-any.whl → 0.0.80__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,8 @@ plot_figure=True, fig_i=0, neg_values=True):
12
12
 
13
13
 
14
14
  """ This function propagates uncertainty in reconstruction of melt inclusion CO2 contents
15
- by feeding each row into propagate_CO2_in_bubble_ind
15
+ by feeding each row into propagate_CO2_in_bubble_ind. The returned standard deviation uses the 84th-16th percentile
16
+ rather than the true standard deviation, as this is better for skewed distributions.
16
17
 
17
18
  Parameters
18
19
  ----------------
@@ -287,6 +288,8 @@ error_melt_dens_kgm3=0, error_type_melt_dens_kgm3='Abs', error_dist_melt_dens_kg
287
288
  neg_values: bool
288
289
  Default True - whether negative values are removed from MC simulations or not. False, replace all negative values with zeros.
289
290
 
291
+
292
+
290
293
  Returns
291
294
  ------------------
292
295
  pd.DataFrame:
Binary file
Binary file
Binary file
DiadFit/__init__.py CHANGED
@@ -36,6 +36,7 @@ from DiadFit.error_propagation import *
36
36
 
37
37
  from DiadFit.density_depth_crustal_profiles import *
38
38
  from DiadFit.CO2_EOS import *
39
+ from DiadFit.CO2_H2O_EOS import *
39
40
 
40
41
  from DiadFit.CO2_in_bubble_error import *
41
42
 
DiadFit/_version.py CHANGED
@@ -5,4 +5,4 @@
5
5
  # 1) we don't load dependencies by storing it in __init__.py
6
6
  # 2) we can import it in setup.py for the same reason
7
7
  # 3) we can import it into your module
8
- __version__ = '0.0.78'
8
+ __version__ = '0.0.80'
@@ -236,6 +236,8 @@ def filter_singleray(*,path=None,Diad_files=None,i=None,diad_peaks=None, exclude
236
236
  pxdf_filt_pass2_4export[['Wavenumber','Intensity']].to_csv(path+'/'+file.replace('.txt', '')+'_CRR_DiadFit.txt', sep='\t', header=False, index=False)
237
237
  if second_pass==False:
238
238
  pxdf[['Wavenumber','Intensity']].to_csv(path+'/'+file.replace('.txt', '')+'_CRR_DiadFit.txt', sep='\t', header=False, index=False)
239
+ if filetype!='headless_txt':
240
+ pxdf[['Wavenumber','Intensity']].to_csv(path+'/'+file.replace('.txt', '')+'_CRR_DiadFit.txt', sep='\t', header=False, index=False)
239
241
 
240
242
  # This plots the results if True
241
243
  if plot_rays=='rays_only':
DiadFit/densimeters.py CHANGED
@@ -407,17 +407,26 @@ def calculate_errors_no_densimeter(*, df_combo, Ne_pickle_str='polyfit_data.pkl'
407
407
 
408
408
 
409
409
 
410
- def calculate_density_cornell(*, df_combo, Ne_pickle_str='polyfit_data.pkl', temp='SupCrit',
411
- CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
412
- """ This function converts Diad Splitting into CO$_2$ density using the Raman at CCMR, Cornell, run by E. Gazel.
413
- It is currently only supported for measurements performed at 37C. (e.g. SupCrit).
414
- However, if you need the lower T 24C version, we just need to hunt down the calibration data to save the densimeter pkl.
415
- It fully propagates uncertainty from the densimeter, the peak fitting and the Ne correction model
410
+ def calculate_density_cornell(*, lab='CMASS', df_combo=None, temp='SupCrit',
411
+ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, corrected_split=None, split_err=None):
412
+ """ This function converts Diad Splitting into CO$_2$ density using the Cornell densimeters. Use lab='CCMR' for CCMR and lab='CMASS' for Esteban Gazels lab.
416
413
 
417
414
  Parameters
418
415
  -------------
416
+
417
+ lab: str. 'CMASS' or 'CCMR'
418
+ Name of the lab where the analy
419
+ Either:
420
+
419
421
  df_combo: pandas DataFrame
420
422
  data frame of peak fitting information
423
+
424
+ Or:
425
+ corrected_split: pd.Series
426
+ Corrected splitting (cm-1)
427
+
428
+ Split_err: float, int
429
+ Error on corrected splitting
421
430
 
422
431
  temp: str
423
432
  'SupCrit' if measurements done at 37C
@@ -430,6 +439,8 @@ CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
430
439
  CI_split: float
431
440
  Default 0.67. Confidence interval to use, e.g. 0.67 returns 1 sigma uncertainties. If you use another number,
432
441
  note the column headings will still say sigma.
442
+
443
+
433
444
 
434
445
 
435
446
  Either
@@ -450,43 +461,41 @@ CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
450
461
  --------------
451
462
  pd.DataFrame
452
463
  Prefered Density (based on different equatoins being merged), and intermediate calculations
453
-
454
-
455
-
456
-
457
464
  """
458
- df_combo_c=df_combo.copy()
459
- time=df_combo_c['sec since midnight']
460
-
461
- if Ne_pickle_str is not None:
462
-
463
- # Calculating the upper and lower values for Ne to get that error
464
- Ne_corr=calculate_Ne_corr_std_err_values(pickle_str=Ne_pickle_str,
465
- new_x=time, CI=CI_neon)
466
- # Extracting preferred correction values
467
- pref_Ne=Ne_corr['preferred_values']
468
- Split_err, pk_err=propagate_error_split_neon_peakfit(Ne_corr=Ne_corr, df_fits=df_combo_c)
465
+ if corrected_split is not None:
466
+ Split=corrected_split
467
+ if df_combo is not None:
468
+ df_combo_c=df_combo.copy()
469
+ time=df_combo_c['sec since midnight']
469
470
 
470
- df_combo_c['Corrected_Splitting_σ']=Split_err
471
- df_combo_c['Corrected_Splitting_σ_Ne']=(Ne_corr['upper_values']*df_combo_c['Splitting']-Ne_corr['lower_values']*df_combo_c['Splitting'])/2
472
- df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
471
+ if Ne_pickle_str is not None:
473
472
 
474
- # If using a single value for quick dirty fitting
475
- else:
476
- Split_err, pk_err=propagate_error_split_neon_peakfit(df_fits=df_combo_c, Ne_err=Ne_err, pref_Ne=pref_Ne)
473
+ # Calculating the upper and lower values for Ne to get that error
474
+ Ne_corr=calculate_Ne_corr_std_err_values(pickle_str=Ne_pickle_str,
475
+ new_x=time, CI=CI_neon)
476
+ # Extracting preferred correction values
477
+ pref_Ne=Ne_corr['preferred_values']
478
+ Split_err, pk_err=propagate_error_split_neon_peakfit(Ne_corr=Ne_corr, df_fits=df_combo_c)
477
479
 
480
+ df_combo_c['Corrected_Splitting_σ']=Split_err
481
+ df_combo_c['Corrected_Splitting_σ_Ne']=(Ne_corr['upper_values']*df_combo_c['Splitting']-Ne_corr['lower_values']*df_combo_c['Splitting'])/2
482
+ df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
478
483
 
479
- df_combo_c['Corrected_Splitting_σ']=Split_err
484
+ # If using a single value for quick dirty fitting
485
+ else:
486
+ Split_err, pk_err=propagate_error_split_neon_peakfit(df_fits=df_combo_c, Ne_err=Ne_err, pref_Ne=pref_Ne)
480
487
 
481
- df_combo_c['Corrected_Splitting_σ_Ne']=((Ne_err+pref_Ne)*df_combo_c['Splitting']-(Ne_err-pref_Ne)*df_combo_c['Splitting'])/2
482
- df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
483
488
 
484
489
 
485
- Split=df_combo_c['Splitting']*pref_Ne
490
+ df_combo_c['Corrected_Splitting_σ']=Split_err
486
491
 
487
- # This propgates the uncertainty in the splitting from peak fitting, and the Ne correction model
492
+ df_combo_c['Corrected_Splitting_σ_Ne']=((Ne_err+pref_Ne)*df_combo_c['Splitting']-(Ne_err-pref_Ne)*df_combo_c['Splitting'])/2
493
+ df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
488
494
 
495
+ Split=df_combo_c['Splitting']*pref_Ne
489
496
 
497
+ else:
498
+ Split_err=(split_err*Split).astype(float)
490
499
 
491
500
 
492
501
  if temp=='RoomT':
@@ -500,19 +509,37 @@ CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
500
509
  HighD_RT=-41.64784 + 0.4058777*Split- 0.1460339*(Split-104.653)**2
501
510
 
502
511
  # IF temp is 37
503
- # This gets the densimeter at low density
504
- pickle_str_lowr='Lowrho_polyfit_data_CMASS.pkl'
505
- with open(DiadFit_dir/pickle_str_lowr, 'rb') as f:
506
- lowrho_pickle_data = pickle.load(f)
507
-
508
- # This gets the densimeter at medium density
509
- pickle_str_medr='Mediumrho_polyfit_data_CMASS.pkl'
510
- with open(DiadFit_dir/pickle_str_medr, 'rb') as f:
511
- medrho_pickle_data = pickle.load(f)
512
- # This gets the densimeter at high density.
513
- pickle_str_highr='Highrho_polyfit_data_CMASS.pkl'
514
- with open(DiadFit_dir/pickle_str_highr, 'rb') as f:
515
- highrho_pickle_data = pickle.load(f)
512
+ if lab=='CMASS':
513
+ # This gets the densimeter at low density
514
+ pickle_str_lowr='Lowrho_polyfit_data_CMASS.pkl'
515
+ with open(DiadFit_dir/pickle_str_lowr, 'rb') as f:
516
+ lowrho_pickle_data = pickle.load(f)
517
+
518
+ # This gets the densimeter at medium density
519
+ pickle_str_medr='Mediumrho_polyfit_data_CMASS.pkl'
520
+ with open(DiadFit_dir/pickle_str_medr, 'rb') as f:
521
+ medrho_pickle_data = pickle.load(f)
522
+ # This gets the densimeter at high density.
523
+ pickle_str_highr='Highrho_polyfit_data_CMASS.pkl'
524
+ with open(DiadFit_dir/pickle_str_highr, 'rb') as f:
525
+ highrho_pickle_data = pickle.load(f)
526
+ elif lab=='CCMR':
527
+ pickle_str_lowr='Lowrho_polyfit_data_CCMR.pkl'
528
+ with open(DiadFit_dir/pickle_str_lowr, 'rb') as f:
529
+ lowrho_pickle_data = pickle.load(f)
530
+
531
+ # This gets the densimeter at medium density
532
+ pickle_str_medr='Mediumrho_polyfit_data_CCMR.pkl'
533
+ with open(DiadFit_dir/pickle_str_medr, 'rb') as f:
534
+ medrho_pickle_data = pickle.load(f)
535
+ # This gets the densimeter at high density.
536
+ pickle_str_highr='Highrho_polyfit_data_CCMR.pkl'
537
+ with open(DiadFit_dir/pickle_str_highr, 'rb') as f:
538
+ highrho_pickle_data = pickle.load(f)
539
+
540
+
541
+ else:
542
+ raise TypeError('Lab name not recognised. enter CCMR or CMASS')
516
543
 
517
544
  # this allocates the model
518
545
  lowrho_model = lowrho_pickle_data['model']
@@ -663,10 +690,17 @@ CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
663
690
  df.loc[SupCrit&Upper_Cal_SC, 'Notes']='Above upper Cali Limit'
664
691
  df.loc[SupCrit&Upper_Cal_SC, 'in range']='N'
665
692
 
693
+
694
+
666
695
  if Ne_pickle_str is not None:
667
696
  df_merge1=pd.concat([df_combo_c, Ne_corr], axis=1).reset_index(drop=True)
668
697
  else:
669
- df_merge1=df_combo_c
698
+ df_merge1=df
699
+
700
+ df_merge=pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
701
+
702
+
703
+
670
704
 
671
705
  df_merge=pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
672
706
 
@@ -686,7 +720,7 @@ CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
686
720
  'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit', 'power (mW)', 'Spectral Center']
687
721
  df_merge = df_merge[cols_to_move + [
688
722
  col for col in df_merge.columns if col not in cols_to_move]]
689
- else:
723
+ elif pref_Ne is not None:
690
724
  cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
691
725
  'Corrected_Splitting', 'Corrected_Splitting_σ',
692
726
  'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit']
@@ -694,11 +728,6 @@ CI_split=0.67, CI_neon=0.67, pref_Ne=None, Ne_err=None):
694
728
  col for col in df_merge.columns if col not in cols_to_move]]
695
729
 
696
730
 
697
-
698
-
699
-
700
-
701
-
702
731
  return df_merge
703
732
 
704
733
 
@@ -850,7 +879,7 @@ def merge_fit_files(path):
850
879
  ## New UC Berkeley using 1220
851
880
 
852
881
  def calculate_density_ucb(*, Ne_line_combo='1117_1447', df_combo=None, temp='SupCrit',
853
- CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
882
+ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, corrected_split=None, split_err=None):
854
883
  """ This function converts Diad Splitting into CO$_2$ density using the UC Berkeley calibration line
855
884
  developed by DeVitre and Wieser in 2023.
856
885
 
@@ -858,9 +887,18 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
858
887
  -------------
859
888
  Ne_line_combo: str, '1117_1447', '1220_1447', '1220_1400'
860
889
  Combination of Ne lines used for drift correction
890
+
891
+ Either:
861
892
 
862
893
  df_combo: pandas DataFrame
863
894
  data frame of peak fitting information
895
+
896
+ Or:
897
+ corrected_split: pd.Series
898
+ Corrected splitting (cm-1)
899
+
900
+ Split_err: float, int
901
+ Error on corrected splitting
864
902
 
865
903
  temp: str
866
904
  'SupCrit' if measurements done at 37C
@@ -873,6 +911,8 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
873
911
  CI_split: float
874
912
  Default 0.67. Confidence interval to use, e.g. 0.67 returns 1 sigma uncertainties. If you use another number,
875
913
  note the column headings will still say sigma.
914
+
915
+
876
916
 
877
917
 
878
918
  Either
@@ -895,6 +935,8 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
895
935
  Prefered Density (based on different equatoins being merged), and intermediate calculations
896
936
 
897
937
  """
938
+ if corrected_split is not None:
939
+ Split=corrected_split
898
940
  if df_combo is not None:
899
941
  df_combo_c=df_combo.copy()
900
942
  time=df_combo_c['sec since midnight']
@@ -978,6 +1020,21 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
978
1020
  pickle_str_highr='Highrho_polyfit_dataUCB_1220_1400.pkl'
979
1021
  with open(DiadFit_dir/pickle_str_highr, 'rb') as f:
980
1022
  highrho_pickle_data = pickle.load(f)
1023
+
1024
+ if Ne_line_combo=='1117_1400':
1025
+ pickle_str_lowr='Lowrho_polyfit_dataUCB_1117_1400.pkl'
1026
+ with open(DiadFit_dir/pickle_str_lowr, 'rb') as f:
1027
+ lowrho_pickle_data = pickle.load(f)
1028
+
1029
+ # This gets the densimeter at medium density
1030
+ pickle_str_medr='Mediumrho_polyfit_dataUCB_1117_1400.pkl'
1031
+ with open(DiadFit_dir/pickle_str_medr, 'rb') as f:
1032
+ medrho_pickle_data = pickle.load(f)
1033
+ # This gets the densimeter at high density.
1034
+ pickle_str_highr='Highrho_polyfit_dataUCB_1117_1400.pkl'
1035
+ with open(DiadFit_dir/pickle_str_highr, 'rb') as f:
1036
+ highrho_pickle_data = pickle.load(f)
1037
+
981
1038
 
982
1039
  if Ne_line_combo=='1117_1447':
983
1040
  # This gets the densimeter at low density
@@ -1145,10 +1202,13 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
1145
1202
 
1146
1203
  if Ne_pickle_str is not None:
1147
1204
  df_merge1=pd.concat([df_combo_c, Ne_corr], axis=1).reset_index(drop=True)
1205
+ df_merge=pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
1206
+ elif Ne_pickle_str is None and df_combo is not None:
1207
+ df_merge=pd.concat([df, df_combo_c], axis=1).reset_index(drop=True)
1148
1208
  else:
1149
- df_merge1=df
1209
+ df_merge=df
1150
1210
 
1151
- df_merge=pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
1211
+
1152
1212
 
1153
1213
  df_merge = df_merge.rename(columns={'Preferred D': 'Density g/cm3'})
1154
1214
  df_merge = df_merge.rename(columns={'Preferred D_σ': 'σ Density g/cm3'})
@@ -1160,19 +1220,25 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None):
1160
1220
  #
1161
1221
  #
1162
1222
 
1163
- if Ne_pickle_str is not None:
1223
+ if Ne_pickle_str is not None: # If its not none, have all the columns for Ne
1164
1224
  cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1165
1225
  'Corrected_Splitting', 'Corrected_Splitting_σ',
1166
1226
  'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit', 'power (mW)', 'Spectral Center']
1167
1227
  df_merge = df_merge[cols_to_move + [
1168
1228
  col for col in df_merge.columns if col not in cols_to_move]]
1169
- elif pref_Ne is not None:
1229
+ elif pref_Ne is not None and df_combo is not None: #If Pref Ne,
1170
1230
  cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1171
1231
  'Corrected_Splitting', 'Corrected_Splitting_σ',
1172
1232
  'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit']
1173
1233
  df_merge = df_merge[cols_to_move + [
1174
1234
  col for col in df_merge.columns if col not in cols_to_move]]
1175
-
1235
+
1236
+ elif df_combo is None:
1237
+
1238
+ cols_to_move = ['Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1239
+ 'Corrected_Splitting']
1240
+ df_merge = df_merge[cols_to_move + [
1241
+ col for col in df_merge.columns if col not in cols_to_move]]
1176
1242
 
1177
1243
 
1178
1244