DiadFit 1.0.9__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
DiadFit/_version.py CHANGED
@@ -5,4 +5,4 @@
5
5
  # 1) we don't load dependencies by storing it in __init__.py
6
6
  # 2) we can import it in setup.py for the same reason
7
7
  # 3) we can import it into your module
8
- __version__ = '1.0.9'
8
+ __version__ = '1.0.11'
DiadFit/densimeters.py CHANGED
@@ -1370,6 +1370,9 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, cor
1370
1370
 
1371
1371
  return df_merge
1372
1372
 
1373
+
1374
+
1375
+
1373
1376
  ## Method from FLuids laboratory from FRANCIS Program
1374
1377
 
1375
1378
 
@@ -1433,3 +1436,618 @@ def Francis_pureCO2(FDS, FDS_std, uncer_FDS, uncer_FDS_std=0):
1433
1436
  return df
1434
1437
 
1435
1438
 
1439
+ ## Shifted polynomial
1440
+
1441
+ import pickle
1442
+
1443
+ def blend_weights(x, x_min, x_max):
1444
+ """Cosine smooth blend between x_min and x_max."""
1445
+ t = np.clip((x - x_min) / (x_max - x_min), 0, 1)
1446
+ return 0.5 * (1 - np.cos(np.pi * t))
1447
+
1448
+ def build_piecewise_poly_by_density(x, y, y_bounds=(0.17, 0.65), degrees=(1, 3, 2), blend_width=0.05, save_path=None):
1449
+ """
1450
+ Fits and optionally saves a smoothed piecewise polynomial model.
1451
+
1452
+ Returns:
1453
+ f_base : callable
1454
+ model_data : dict (can be pickled)
1455
+ """
1456
+ x = np.asarray(x)
1457
+ y = np.asarray(y)
1458
+
1459
+ mask_low = y < y_bounds[0]
1460
+ mask_mid = (y >= y_bounds[0]) & (y <= y_bounds[1])
1461
+ mask_high = y > y_bounds[1]
1462
+
1463
+ polys = []
1464
+ coeffs = []
1465
+ for mask, deg in zip([mask_low, mask_mid, mask_high], degrees):
1466
+ c = np.polyfit(x[mask], y[mask], deg)
1467
+ coeffs.append(c)
1468
+ polys.append(np.poly1d(c))
1469
+
1470
+ x_low_med = x[np.abs(y - y_bounds[0]).argmin()]
1471
+ x_med_high = x[np.abs(y - y_bounds[1]).argmin()]
1472
+
1473
+ def f_base(x_input):
1474
+ x_arr = np.asarray(x_input)
1475
+ result = np.full_like(x_arr, np.nan, dtype=float)
1476
+
1477
+ low_mask = x_arr < (x_low_med - blend_width)
1478
+ mid_mask = (x_arr > (x_low_med + blend_width)) & (x_arr < (x_med_high - blend_width))
1479
+ high_mask = x_arr > (x_med_high + blend_width)
1480
+
1481
+ result[low_mask] = polys[0](x_arr[low_mask])
1482
+ result[mid_mask] = polys[1](x_arr[mid_mask])
1483
+ result[high_mask] = polys[2](x_arr[high_mask])
1484
+
1485
+ blend_lm = (x_arr >= (x_low_med - blend_width)) & (x_arr <= (x_low_med + blend_width))
1486
+ w_lm = blend_weights(x_arr[blend_lm], x_low_med - blend_width, x_low_med + blend_width)
1487
+ result[blend_lm] = (1 - w_lm) * polys[0](x_arr[blend_lm]) + w_lm * polys[1](x_arr[blend_lm])
1488
+
1489
+ blend_mh = (x_arr >= (x_med_high - blend_width)) & (x_arr <= (x_med_high + blend_width))
1490
+ w_mh = blend_weights(x_arr[blend_mh], x_med_high - blend_width, x_med_high + blend_width)
1491
+ result[blend_mh] = (1 - w_mh) * polys[1](x_arr[blend_mh]) + w_mh * polys[2](x_arr[blend_mh])
1492
+
1493
+ return result
1494
+
1495
+ model_data = {
1496
+ 'coeffs': coeffs,
1497
+ 'y_bounds': y_bounds,
1498
+ 'degrees': degrees,
1499
+ 'blend_width': blend_width,
1500
+ 'x_low_med': x_low_med,
1501
+ 'x_med_high': x_med_high,
1502
+ 'x': x,
1503
+ 'y': y
1504
+ }
1505
+ if save_path:
1506
+ with open(save_path, 'wb') as f:
1507
+ pickle.dump(model_data, f)
1508
+
1509
+ return f_base, model_data
1510
+
1511
+
1512
+ def blend_weights(x, x_min, x_max):
1513
+ t = np.clip((x - x_min) / (x_max - x_min), 0, 1)
1514
+ return 0.5 * (1 - np.cos(np.pi * t))
1515
+
1516
+ def load_piecewise_model(model_data):
1517
+ coeffs = model_data['coeffs']
1518
+ blend_width = model_data['blend_width']
1519
+ x_low_med = model_data['x_low_med']
1520
+ x_med_high = model_data['x_med_high']
1521
+ polys = [np.poly1d(c) for c in coeffs]
1522
+
1523
+ vertical_shift = model_data.get('vertical_shift', 0)
1524
+
1525
+ def f_base(x_input):
1526
+ x_arr = np.asarray(x_input)
1527
+ result = np.full_like(x_arr, np.nan, dtype=float)
1528
+
1529
+ low_mask = x_arr < (x_low_med - blend_width)
1530
+ mid_mask = (x_arr > (x_low_med + blend_width)) & (x_arr < (x_med_high - blend_width))
1531
+ high_mask = x_arr > (x_med_high + blend_width)
1532
+
1533
+ result[low_mask] = polys[0](x_arr[low_mask])
1534
+ result[mid_mask] = polys[1](x_arr[mid_mask])
1535
+ result[high_mask] = polys[2](x_arr[high_mask])
1536
+
1537
+ blend_lm = (x_arr >= (x_low_med - blend_width)) & (x_arr <= (x_low_med + blend_width))
1538
+ w_lm = blend_weights(x_arr[blend_lm], x_low_med - blend_width, x_low_med + blend_width)
1539
+ result[blend_lm] = (1 - w_lm) * polys[0](x_arr[blend_lm]) + w_lm * polys[1](x_arr[blend_lm])
1540
+
1541
+ blend_mh = (x_arr >= (x_med_high - blend_width)) & (x_arr <= (x_med_high + blend_width))
1542
+ w_mh = blend_weights(x_arr[blend_mh], x_med_high - blend_width, x_med_high + blend_width)
1543
+ result[blend_mh] = (1 - w_mh) * polys[1](x_arr[blend_mh]) + w_mh * polys[2](x_arr[blend_mh])
1544
+
1545
+ return result + vertical_shift
1546
+
1547
+ return f_base
1548
+
1549
+
1550
+
1551
+
1552
+ ## New function that is much simpler
1553
+
1554
+ def calculate_density_ucb_new(*, df_combo=None, temp='SupCrit',
1555
+ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, corrected_split=None, split_err=None, shift=0):
1556
+ """ This function converts Diad Splitting into CO$_2$ density using the UC Berkeley calibration line
1557
+ developed by DeVitre and Wieser in 2023.
1558
+
1559
+ Parameters
1560
+ -------------
1561
+ Ne_line_combo: str, '1117_1447', '1117_1400', '1220_1447', '1220_1400', '1220_1567'
1562
+ Combination of Ne lines used for drift correction
1563
+
1564
+ Either:
1565
+
1566
+ df_combo: pandas DataFrame
1567
+ data frame of peak fitting information
1568
+
1569
+ Or:
1570
+ corrected_split: pd.Series
1571
+ Corrected splitting (cm-1)
1572
+
1573
+ split_err: float, int
1574
+ Error on corrected splitting
1575
+
1576
+ temp: str
1577
+ 'SupCrit' if measurements done at 37C
1578
+ 'RoomT' if measurements done at 24C - Not supported yet but could be added if needed.
1579
+
1580
+ CI_neon: float
1581
+ Default 0.67. Confidence interval to use, e.g. 0.67 returns 1 sigma uncertainties. If you use another number,
1582
+ note the column headings will still say sigma.
1583
+
1584
+ CI_split: float
1585
+ Default 0.67. Confidence interval to use, e.g. 0.67 returns 1 sigma uncertainties. If you use another number,
1586
+ note the column headings will still say sigma.
1587
+
1588
+
1589
+
1590
+
1591
+ Either
1592
+
1593
+ Ne_pickle_str: str
1594
+ Name of Ne correction model
1595
+
1596
+ OR
1597
+
1598
+ pref_Ne, Ne_err: float, int
1599
+ For quick and dirty fitting can pass a preferred value for your instrument before you have a chance to
1600
+ regress the Ne lines (useful when first analysing new samples. )
1601
+
1602
+
1603
+
1604
+
1605
+ Returns
1606
+ --------------
1607
+ pd.DataFrame
1608
+ Prefered Density (based on different equatoins being merged), and intermediate calculations
1609
+
1610
+ """
1611
+ if corrected_split is not None:
1612
+ Split=corrected_split
1613
+ if df_combo is not None:
1614
+ df_combo_c=df_combo.copy()
1615
+ time=df_combo_c['sec since midnight']
1616
+
1617
+ if Ne_pickle_str is not None:
1618
+
1619
+ # Calculating the upper and lower values for Ne to get that error
1620
+ Ne_corr=calculate_Ne_corr_std_err_values(pickle_str=Ne_pickle_str,
1621
+ new_x=time, CI=CI_neon)
1622
+ # Extracting preferred correction values
1623
+ pref_Ne=Ne_corr['preferred_values']
1624
+ Split_err, pk_err=propagate_error_split_neon_peakfit(Ne_corr=Ne_corr, df_fits=df_combo_c)
1625
+
1626
+ df_combo_c['Corrected_Splitting_σ']=Split_err
1627
+ df_combo_c['Corrected_Splitting_σ_Ne']=(Ne_corr['upper_values']*df_combo_c['Splitting']-Ne_corr['lower_values']*df_combo_c['Splitting'])/2
1628
+ df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
1629
+
1630
+ # If using a single value for quick dirty fitting
1631
+ else:
1632
+ Split_err, pk_err=propagate_error_split_neon_peakfit(df_fits=df_combo_c, Ne_err=Ne_err, pref_Ne=pref_Ne)
1633
+
1634
+
1635
+
1636
+ df_combo_c['Corrected_Splitting_σ']=Split_err
1637
+
1638
+ df_combo_c['Corrected_Splitting_σ_Ne']=((Ne_err+pref_Ne)*df_combo_c['Splitting']-(Ne_err-pref_Ne)*df_combo_c['Splitting'])/2
1639
+ df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
1640
+
1641
+ Split=df_combo_c['Splitting']*pref_Ne
1642
+
1643
+ else:
1644
+ Split_err=split_err
1645
+
1646
+
1647
+
1648
+ # This is for if you just have splitting
1649
+
1650
+
1651
+
1652
+ # This propgates the uncertainty in the splitting from peak fitting, and the Ne correction model
1653
+
1654
+
1655
+ if temp=='RoomT':
1656
+ raise TypeError('Sorry, no UC Berkeley calibration at 24C, please enter temp=SupCrit')
1657
+ if isinstance(Split, float) or isinstance(Split, int):
1658
+ Split=pd.Series(Split)
1659
+
1660
+
1661
+ DiadFit_dir=Path(__file__).parent
1662
+
1663
+ # load the new smoothed model
1664
+ with open(DiadFit_dir / "smoothed_polyfit_June25_UCB.pkl", 'rb') as f:
1665
+ smoothed_model_data = pickle.load(f)
1666
+
1667
+ smoothed_model = load_piecewise_model(smoothed_model_data)
1668
+
1669
+ # Evaluate model
1670
+ Density = pd.Series(smoothed_model(Split), index=Split.index)
1671
+
1672
+ # Lets get the error
1673
+ err_df = calculate_Densimeter_std_err_values_smooth(
1674
+ model_data=smoothed_model_data,
1675
+ corrected_split=Split,
1676
+ corrected_split_err=Split_err,
1677
+ CI_dens=0.67,
1678
+ CI_split=0.67,
1679
+ str_d='Smoothed'
1680
+ )
1681
+
1682
+
1683
+
1684
+
1685
+ df=pd.DataFrame(data={'Density g/cm3': Density+shift,
1686
+ 'σ Density g/cm3': err_df['Smoothed_Density_σ'],
1687
+ 'σ Density g/cm3 (from Ne+peakfit)': err_df['Smoothed_Density_σ_split'],
1688
+ 'σ Density g/cm3 (from densimeter)': err_df['Smoothed_Density_σ_dens'],
1689
+ 'Corrected_Splitting': Split,
1690
+ 'Preferred D_σ_Ne': 0,
1691
+ 'in range': 'in progress',
1692
+ 'Temperature': temp})
1693
+
1694
+
1695
+ if Ne_pickle_str is not None:
1696
+ df_merge1=pd.concat([df_combo_c, Ne_corr], axis=1).reset_index(drop=True)
1697
+ df_merge=pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
1698
+ elif Ne_pickle_str is None and df_combo is not None:
1699
+ df_merge=pd.concat([df, df_combo_c], axis=1).reset_index(drop=True)
1700
+ else:
1701
+ df_merge=df
1702
+
1703
+
1704
+
1705
+ if Ne_pickle_str is not None: # If its not none, have all the columns for Ne
1706
+ cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1707
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1708
+ 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit', 'power (mW)', 'Spectral Center']
1709
+ df_merge = df_merge[cols_to_move + [
1710
+ col for col in df_merge.columns if col not in cols_to_move]]
1711
+
1712
+
1713
+ elif pref_Ne is not None and df_combo is not None: #If Pref Ne,
1714
+ cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1715
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1716
+ 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit']
1717
+ df_merge = df_merge[cols_to_move + [
1718
+ col for col in df_merge.columns if col not in cols_to_move]]
1719
+
1720
+ elif df_combo is None:
1721
+
1722
+ cols_to_move = ['Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1723
+ 'Corrected_Splitting']
1724
+ df_merge = df_merge[cols_to_move + [
1725
+ col for col in df_merge.columns if col not in cols_to_move]]
1726
+
1727
+
1728
+
1729
+ return df_merge
1730
+
1731
+ from scipy.stats import t
1732
+ import numpy as np
1733
+ import pandas as pd
1734
+
1735
+
1736
+
1737
+
1738
+ ##
1739
+ def calculate_Densimeter_std_err_values_smooth(
1740
+ *,
1741
+ model_data,
1742
+ corrected_split,
1743
+ corrected_split_err,
1744
+ CI_dens=0.67,
1745
+ CI_split=0.67,
1746
+ str_d='Smoothed',
1747
+ x=None,
1748
+ y=None
1749
+ ):
1750
+ """
1751
+ Calculates propagated uncertainty for a smoothed polynomial model.
1752
+
1753
+ Parameters
1754
+ ----------
1755
+ model_data : dict
1756
+ Dictionary from build_piecewise_poly_by_density including coeffs, blend_width, etc.
1757
+ corrected_split : pd.Series or np.ndarray
1758
+ Corrected splitting values
1759
+ corrected_split_err : float or pd.Series
1760
+ Uncertainty on splitting
1761
+ CI_dens : float
1762
+ Confidence interval for uncertainty in the fit
1763
+ CI_split : float
1764
+ Confidence interval for splitting uncertainty
1765
+ str_d : str
1766
+ Prefix for column names
1767
+ x, y : array-like (optional)
1768
+ Original data used to fit the model, if not included in model_data
1769
+
1770
+ Returns
1771
+ -------
1772
+ pd.DataFrame
1773
+ DataFrame of predicted values and propagated uncertainties
1774
+ """
1775
+ from scipy.stats import t
1776
+ import numpy as np
1777
+ import pandas as pd
1778
+
1779
+ # === Rebuild model ===
1780
+ def load_piecewise_model(model_data):
1781
+ coeffs = model_data['coeffs']
1782
+ blend_width = model_data['blend_width']
1783
+ x_low_med = model_data['x_low_med']
1784
+ x_med_high = model_data['x_med_high']
1785
+ polys = [np.poly1d(c) for c in coeffs]
1786
+
1787
+ def f_base(x_input):
1788
+ x_arr = np.asarray(x_input)
1789
+ result = np.full_like(x_arr, np.nan, dtype=float)
1790
+
1791
+ low_mask = x_arr < (x_low_med - blend_width)
1792
+ mid_mask = (x_arr > (x_low_med + blend_width)) & (x_arr < (x_med_high - blend_width))
1793
+ high_mask = x_arr > (x_med_high + blend_width)
1794
+
1795
+ result[low_mask] = polys[0](x_arr[low_mask])
1796
+ result[mid_mask] = polys[1](x_arr[mid_mask])
1797
+ result[high_mask] = polys[2](x_arr[high_mask])
1798
+
1799
+ blend_lm = (x_arr >= (x_low_med - blend_width)) & (x_arr <= (x_low_med + blend_width))
1800
+ w_lm = 0.5 * (1 - np.cos(np.pi * (x_arr[blend_lm] - (x_low_med - blend_width)) / (2 * blend_width)))
1801
+ result[blend_lm] = (1 - w_lm) * polys[0](x_arr[blend_lm]) + w_lm * polys[1](x_arr[blend_lm])
1802
+
1803
+ blend_mh = (x_arr >= (x_med_high - blend_width)) & (x_arr <= (x_med_high + blend_width))
1804
+ w_mh = 0.5 * (1 - np.cos(np.pi * (x_arr[blend_mh] - (x_med_high - blend_width)) / (2 * blend_width)))
1805
+ result[blend_mh] = (1 - w_mh) * polys[1](x_arr[blend_mh]) + w_mh * polys[2](x_arr[blend_mh])
1806
+
1807
+ return result
1808
+
1809
+ return f_base
1810
+
1811
+ Pf = load_piecewise_model(model_data)
1812
+
1813
+ # Use x/y from model_data if available, else require them as args
1814
+ if 'x' in model_data and 'y' in model_data:
1815
+ x = model_data['x']
1816
+ y = model_data['y']
1817
+ elif x is None or y is None:
1818
+ raise ValueError("You must supply x and y arrays if not included in model_data.")
1819
+
1820
+ residuals = y - Pf(x)
1821
+ residual_std = np.std(residuals)
1822
+
1823
+ mean_x = np.nanmean(x)
1824
+ n = len(x)
1825
+ N_poly = max(len(c) - 1 for c in model_data['coeffs'])
1826
+
1827
+ # Standard error on predictions
1828
+ standard_errors = residual_std * np.sqrt(1 + 1/n + (corrected_split - mean_x)**2 / np.sum((x - mean_x)**2))
1829
+ dfree = n - (N_poly + 1)
1830
+
1831
+ t_value_split = t.ppf((1 + CI_split) / 2, dfree)
1832
+ t_value_dens = t.ppf((1 + CI_dens) / 2, dfree)
1833
+
1834
+ # Central prediction
1835
+ preferred_values = Pf(corrected_split)
1836
+ lower_values = preferred_values - t_value_dens * standard_errors
1837
+ upper_values = preferred_values + t_value_dens * standard_errors
1838
+ uncertainty_from_dens = (upper_values - lower_values) / 2
1839
+
1840
+ # Splitting propagation
1841
+ max_split = corrected_split + corrected_split_err
1842
+ min_split = corrected_split - corrected_split_err
1843
+ max_density = Pf(max_split)
1844
+ min_density = Pf(min_split)
1845
+ uncertainty_split = (max_density - min_density) / 2
1846
+
1847
+ total_uncertainty = np.sqrt(uncertainty_split ** 2 + uncertainty_from_dens ** 2)
1848
+
1849
+ return pd.DataFrame({
1850
+ f'{str_d}_Density': preferred_values,
1851
+ f'{str_d}_Density_σ': total_uncertainty,
1852
+ f'{str_d}_Density+1σ': preferred_values - total_uncertainty,
1853
+ f'{str_d}_Density-1σ': preferred_values + total_uncertainty,
1854
+ f'{str_d}_Density_σ_dens': uncertainty_from_dens,
1855
+ f'{str_d}_Density_σ_split': uncertainty_split
1856
+ })
1857
+
1858
+
1859
+ from pathlib import Path
1860
+ import pickle
1861
+
1862
+ def calculate_density_labx(
1863
+ *,
1864
+ df_combo=None,
1865
+ temp='SupCrit',
1866
+ CI_split=0.67,
1867
+ CI_neon=0.67,
1868
+ Ne_pickle_str=None,
1869
+ pref_Ne=None,
1870
+ Ne_err=None,
1871
+ corrected_split=None,
1872
+ split_err=None,
1873
+ model_pickle_path=None
1874
+ ):
1875
+ import pandas as pd
1876
+ import numpy as np
1877
+ from DiadFit.densimeters import calculate_Ne_corr_std_err_values, propagate_error_split_neon_peakfit
1878
+ from DiadFit.densimeters import calculate_Densimeter_std_err_values_smooth, load_piecewise_model
1879
+
1880
+ if corrected_split is not None:
1881
+ Split = corrected_split
1882
+
1883
+ if df_combo is not None:
1884
+ df_combo_c = df_combo.copy()
1885
+ time = df_combo_c['sec since midnight']
1886
+
1887
+ if Ne_pickle_str is not None:
1888
+ Ne_corr = calculate_Ne_corr_std_err_values(pickle_str=Ne_pickle_str, new_x=time, CI=CI_neon)
1889
+ pref_Ne = Ne_corr['preferred_values']
1890
+ Split_err, pk_err = propagate_error_split_neon_peakfit(Ne_corr=Ne_corr, df_fits=df_combo_c)
1891
+ df_combo_c['Corrected_Splitting_σ'] = Split_err
1892
+ df_combo_c['Corrected_Splitting_σ_Ne'] = (
1893
+ (Ne_corr['upper_values'] * df_combo_c['Splitting'] -
1894
+ Ne_corr['lower_values'] * df_combo_c['Splitting']) / 2
1895
+ )
1896
+ df_combo_c['Corrected_Splitting_σ_peak_fit'] = pk_err
1897
+ else:
1898
+ Split_err, pk_err = propagate_error_split_neon_peakfit(
1899
+ df_fits=df_combo_c, Ne_err=Ne_err, pref_Ne=pref_Ne
1900
+ )
1901
+ df_combo_c['Corrected_Splitting_σ'] = Split_err
1902
+ df_combo_c['Corrected_Splitting_σ_Ne'] = (
1903
+ ((Ne_err + pref_Ne) * df_combo_c['Splitting'] -
1904
+ (Ne_err - pref_Ne) * df_combo_c['Splitting']) / 2
1905
+ )
1906
+ df_combo_c['Corrected_Splitting_σ_peak_fit'] = pk_err
1907
+
1908
+ Split = df_combo_c['Splitting'] * pref_Ne
1909
+ else:
1910
+ Split_err = split_err
1911
+
1912
+ if temp == 'RoomT':
1913
+ raise TypeError('No calibration available at 24C, please use temp="SupCrit"')
1914
+ if isinstance(Split, (float, int)):
1915
+ import pandas as pd
1916
+ Split = pd.Series(Split)
1917
+
1918
+ if model_pickle_path is None:
1919
+ raise ValueError("You must provide a path to the LabX model pickle using `model_pickle_path`.")
1920
+
1921
+ with open(Path(model_pickle_path), 'rb') as f:
1922
+ model_data = pickle.load(f)
1923
+
1924
+ model = load_piecewise_model(model_data)
1925
+ Density = pd.Series(model(Split), index=Split.index)
1926
+
1927
+ err_df = calculate_Densimeter_std_err_values_smooth(
1928
+ model_data=model_data,
1929
+ corrected_split=Split,
1930
+ corrected_split_err=Split_err,
1931
+ CI_dens=CI_split,
1932
+ CI_split=CI_split,
1933
+ str_d='LabX'
1934
+ )
1935
+
1936
+ df = pd.DataFrame(data={
1937
+ 'Density g/cm3': Density,
1938
+ 'σ Density g/cm3': err_df['LabX_Density_σ'],
1939
+ 'σ Density g/cm3 (from Ne+peakfit)': err_df['LabX_Density_σ_split'],
1940
+ 'σ Density g/cm3 (from densimeter)': err_df['LabX_Density_σ_dens'],
1941
+ 'Corrected_Splitting': Split,
1942
+ 'Preferred D_σ_Ne': 0,
1943
+ 'in range': 'in progress',
1944
+ 'Temperature': temp
1945
+ })
1946
+
1947
+ if Ne_pickle_str is not None:
1948
+ df_merge1 = pd.concat([df_combo_c, Ne_corr], axis=1).reset_index(drop=True)
1949
+ df_merge = pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
1950
+ elif df_combo is not None:
1951
+ df_merge = pd.concat([df, df_combo_c], axis=1).reset_index(drop=True)
1952
+ else:
1953
+ df_merge = df
1954
+
1955
+ return df_merge
1956
+
1957
+ ## Way to actually shift densimeter
1958
+
1959
+ # This general model works for any pickel you load in.
1960
+ def apply_and_save_vertical_shift_to_model(*, pickle_in_path, new_x, new_y, pickle_out_path=None):
1961
+ """
1962
+ Applies a vertical shift to a saved piecewise model based on new_x and new_y,
1963
+ then saves the shifted model to a new .pkl file.
1964
+
1965
+ Parameters
1966
+ ----------
1967
+ pickle_in_path : str
1968
+ Path to the original .pkl file (output from build_piecewise_poly_by_density).
1969
+ new_x : array-like
1970
+ Corrected splitting values (x).
1971
+ new_y : array-like
1972
+ Measured density values (y).
1973
+ pickle_out_path : str, optional
1974
+ Where to save the new model. If None, appends '_shifted.pkl' to the input path.
1975
+
1976
+ Returns
1977
+ -------
1978
+ shift : float
1979
+ Vertical shift applied to the model.
1980
+ """
1981
+ import pickle
1982
+ import numpy as np
1983
+
1984
+ # Load the model
1985
+ with open(pickle_in_path, 'rb') as f:
1986
+ model_data = pickle.load(f)
1987
+
1988
+ # Rebuild the base function
1989
+ base_model = pf.load_piecewise_model(model_data)
1990
+ f_vals = base_model(new_x)
1991
+
1992
+ # Calculate vertical shift
1993
+ shift = np.nanmean(new_y - f_vals)
1994
+
1995
+ # Store the shift
1996
+ model_data['vertical_shift'] = shift
1997
+
1998
+ # Save new .pkl
1999
+ if pickle_out_path is None:
2000
+ pickle_out_path = pickle_in_path.replace('.pkl', '_shifted.pkl')
2001
+
2002
+ with open(pickle_out_path, 'wb') as f:
2003
+ pickle.dump(model_data, f)
2004
+
2005
+ return shift
2006
+
2007
+
2008
+
2009
+
2010
+
2011
+ def apply_and_save_vertical_shift_to_ucb_densimeter(new_x, new_y):
2012
+ """
2013
+ Applies a vertical shift to a saved piecewise model based on new_x and new_y,
2014
+ then saves the shifted model to a new .pkl file in the same directory.
2015
+
2016
+ Parameters
2017
+ ----------
2018
+ filename : str
2019
+ Name of the original .pkl file (e.g., "smoothed_polyfit_June25_UCB.pkl").
2020
+ new_x : array-like
2021
+ Corrected splitting values (x).
2022
+ new_y : array-like
2023
+ Measured density values (y).
2024
+ pickle_out_name : str, optional
2025
+ Filename to save the new shifted model. If None, appends '_shifted.pkl' to the input name.
2026
+
2027
+ Returns
2028
+ -------
2029
+ shift : float
2030
+ Vertical shift applied to the model.
2031
+ """
2032
+
2033
+
2034
+
2035
+
2036
+
2037
+
2038
+ DiadFit_dir = Path(__file__).parent
2039
+
2040
+
2041
+ with open(DiadFit_dir / "smoothed_polyfit_June25_UCB.pkl", 'rb') as f:
2042
+ model_data = pickle.load(f)
2043
+
2044
+ base_model = load_piecewise_model(model_data)
2045
+ f_vals = base_model(new_x)
2046
+
2047
+ shift = np.nanmean(new_y - f_vals)
2048
+ model_data['vertical_shift'] = shift
2049
+
2050
+
2051
+
2052
+ return shift
2053
+
@@ -884,7 +884,7 @@ def convert_co2_dens_press_depth(EOS='SW96', T_K=None,
884
884
  CO2_dens_gcm3=None,
885
885
  crust_dens_kgm3=None, output='kbar',
886
886
  g=9.81, model=None, XH2O=None, Hloss=True,
887
- d1=None, d2=None, rho1=None, rho2=None, rho3=None,T_K_ambient=37+273.15 ):
887
+ d1=None, d2=None,d3=None, rho1=None, rho2=None, rho3=None, rho4=None, T_K_ambient=37+273.15 ):
888
888
 
889
889
  """ This function calculates pressure and depth based on input CO2 densities,
890
890
  temperatures, and crustal density information from the user
@@ -921,12 +921,13 @@ def convert_co2_dens_press_depth(EOS='SW96', T_K=None,
921
921
 
922
922
  if model is three-step:
923
923
  If three step, must also define:
924
- d1: Depth to first transition in km
925
924
  rho1: Density between surface and 1st transition
926
- d2: Depth to second transition in km (from surface)
925
+ d1: Depth to first transition in km
927
926
  rho2: Density between 1st and 2nd transition
928
- d3: Depth to third transition in km (from surface)
927
+ d2: Depth to second transition in km (from surface)
929
928
  rho3: Density between 2nd and 3rd transition depth.
929
+ d3: Depth to third transition in km (from surface)
930
+ rho4: Density between below d3
930
931
 
931
932
  Returns
932
933
  ---------------------
@@ -997,9 +998,11 @@ def convert_co2_dens_press_depth(EOS='SW96', T_K=None,
997
998
  P_kbar_calc=pf.calculate_entrapment_P_XH2O(XH2O=XH2O, CO2_dens_gcm3=CO2_dens_gcm3, T_K=T_K, T_K_ambient=T_K_ambient, fast_calcs=True, Hloss=Hloss )
998
999
 
999
1000
 
1000
- Depth_km=convert_pressure_to_depth(P_kbar_calc,
1001
+ Depth_km=convert_pressure_to_depth(P_kbar=P_kbar_calc,
1001
1002
  crust_dens_kgm3=crust_dens_kgm3, g=9.81, model=model,
1002
- d1=d1, d2=d2, rho1=rho1, rho2=rho2, rho3=rho3)
1003
+ d1=d1, d2=d2, d3=d3, rho1=rho1, rho2=rho2, rho3=rho3, rho4=rho4)
1004
+
1005
+
1003
1006
 
1004
1007
  if type(Depth_km) is float:
1005
1008
  # Crustal density, using P=rho g H
DiadFit/ne_lines.py CHANGED
@@ -1894,7 +1894,6 @@ def generate_Ne_corr_model(*, time, Ne_corr, N_poly=3, CI=0.67, bootstrap=False,
1894
1894
 
1895
1895
  from scipy.stats import t
1896
1896
 
1897
-
1898
1897
  def calculate_Ne_corr_std_err_values(*, pickle_str, new_x, CI=0.67):
1899
1898
  # Load the model and the data from the pickle file
1900
1899
  with open(pickle_str, 'rb') as f:
@@ -1902,11 +1901,14 @@ def calculate_Ne_corr_std_err_values(*, pickle_str, new_x, CI=0.67):
1902
1901
 
1903
1902
  model = data['model']
1904
1903
  N_poly = model.order - 1
1905
-
1904
+
1906
1905
  Pf = data['model']
1907
1906
  x = data['x']
1908
1907
  y = data['y']
1909
1908
 
1909
+ # Convert new_x to plain numpy array
1910
+ new_x_array = np.asarray(new_x)
1911
+
1910
1912
  # Calculate the residuals
1911
1913
  residuals = y - Pf(x)
1912
1914
 
@@ -1916,27 +1918,29 @@ def calculate_Ne_corr_std_err_values(*, pickle_str, new_x, CI=0.67):
1916
1918
  # Calculate the standard errors for the new x values
1917
1919
  mean_x = np.mean(x)
1918
1920
  n = len(x)
1919
- standard_errors = residual_std * np.sqrt(1 + 1/n + (new_x - mean_x)**2 / np.sum((x - mean_x)**2))
1921
+ standard_errors = residual_std * np.sqrt(1 + 1/n + (new_x_array - mean_x)**2 / np.sum((x - mean_x)**2))
1920
1922
 
1921
1923
  # Calculate the degrees of freedom
1922
- df = len(x) - (N_poly + 1)
1924
+ df_dof = len(x) - (N_poly + 1)
1923
1925
 
1924
1926
  # Calculate the t value for the given confidence level
1925
- t_value = t.ppf((1 + CI) / 2, df)
1927
+ t_value = t.ppf((1 + CI) / 2, df_dof)
1926
1928
 
1927
1929
  # Calculate the prediction intervals
1928
- preferred_values = Pf(new_x)
1930
+ preferred_values = Pf(new_x_array)
1929
1931
  lower_values = preferred_values - t_value * standard_errors
1930
1932
  upper_values = preferred_values + t_value * standard_errors
1931
1933
 
1932
- df=pd.DataFrame(data={
1933
- 'time': new_x,
1934
+ df_out = pd.DataFrame(data={
1935
+ 'time': new_x_array,
1934
1936
  'preferred_values': preferred_values,
1935
1937
  'lower_values': lower_values,
1936
1938
  'upper_values': upper_values
1937
1939
  })
1938
1940
 
1939
- return df
1941
+ return df_out
1942
+
1943
+
1940
1944
 
1941
1945
 
1942
1946
 
Binary file
@@ -0,0 +1,16 @@
1
+ LICENSE
2
+
3
+ Copyright (c) [2025] [Penny Wieser]
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
6
+
7
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
+
9
+ Special Clause on GUI Usage
10
+
11
+ Use of DiadFit in graphical user interfaces (GUIs), including but not limited to applications that provide point-and-click access to DiadFit’s functions, requires prior written permission from the author.
12
+
13
+ This requirement exists because of a broader issue in the geoscience community: tools that wrap or interface with core scientific software often receive the bulk of citations, while foundational packages like DiadFit go unrecognized—particularly when journals impose citation limits. DiadFit represents a significant and ongoing labor investment. Responsible citation and acknowledgment are necessary to support its continued development and maintenance.
14
+
15
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: DiadFit
3
- Version: 1.0.9
3
+ Version: 1.0.11
4
4
  Summary: DiadFit
5
5
  Home-page: https://github.com/PennyWieser/DiadFit
6
6
  Author: Penny Wieser
@@ -29,22 +29,24 @@ DiadFit/Mediumrho_polyfit_data_CCMR.pkl,sha256=U6ODSdurqS0-lynm1MG1zktg8NuhYRbrY
29
29
  DiadFit/Mediumrho_polyfit_data_CMASS.pkl,sha256=SBy1pIdqCAF9UtB9FLNTuD0-tFyD7swwJppdE2U_FsY,1557
30
30
  DiadFit/Psensor.py,sha256=C2xSlgxhUJIKIBDvUp02QaYRs5QsIqjGGRMP25ZLRZ0,10435
31
31
  DiadFit/__init__.py,sha256=F-HjhCYKL_U8PfiH8tZ9DUCkxPvo6lAslJS4fyvxkbY,1148
32
- DiadFit/_version.py,sha256=8wkWdIefAjvOQecKda3-TYv9_XiCz7EIQMvxj1kOrJA,295
32
+ DiadFit/_version.py,sha256=yz8r6H20z1GetC4Zn4tV21xqy4lJucguWWWyVy-HmNA,296
33
33
  DiadFit/argon_lines.py,sha256=vtzsuDdEgrAmEF9xwpejpFqKV9hKPS1JUYhIl4AfXZ0,7675
34
34
  DiadFit/cosmicray_filter.py,sha256=a45x2_nmpi9Qcjc_L39UA9JOd1NMorIjtTRGnCdG3MU,23634
35
35
  DiadFit/densimeter_fitting.py,sha256=AV5jWHSuIuN-e61chwMiTETa26pQo5drEGorYTkceHo,8308
36
- DiadFit/densimeters.py,sha256=J4DnQgavhkDKOaBTQqqShepZVeH5jxJiT1FmebmLY88,55282
36
+ DiadFit/densimeters.py,sha256=1eT6XVYtRPbY4WmEgIvbvEYdDkJTvFlIjoqhypdpDKk,76511
37
37
  DiadFit/density_depth_crustal_profiles.py,sha256=Vvtw3-_xuWIYEuhuDzXstkprluXyBkUcdm9iP7qBwyQ,19754
38
38
  DiadFit/diads.py,sha256=gwHWTquJeoJaBYEYjJcJct38j6Bi-GUUsFCPsFgCFzU,179483
39
- DiadFit/error_propagation.py,sha256=ZN9EspONh_vUGxBHxxWNkYskKqFMRvJMNr2h2RXv-54,50624
39
+ DiadFit/error_propagation.py,sha256=Evka2vsHQmgekO1xft-AkjxvkMdwAVawNVbWSe7SZIQ,50734
40
40
  DiadFit/importing_data_files.py,sha256=j7cSEPZ6iKmYnSqYEIcCl7YNdqqkCD56W-4V9T2oWOE,52010
41
41
  DiadFit/lookup_table.csv,sha256=Hs1tmSQ9ArTUDv3ymEXbvnLlPBxYUP0P51dz7xAKk-Q,2946857
42
42
  DiadFit/lookup_table_noneg.csv,sha256=HelvewKbBy4cqT2GAqsMo-1ps1lBYqZ-8hCJZWPGfhI,3330249
43
43
  DiadFit/molar_gas_proportions.py,sha256=3zc5t037L11w_hCYJqV4Xp4NwVCmGb3gMp1McAhV0TM,9315
44
- DiadFit/ne_lines.py,sha256=KR1s33pZB_O8e3irtDLdBpepGwUjxWudjJNyHyXqfbg,63980
44
+ DiadFit/ne_lines.py,sha256=kY_6ThigSc21ONyo38yq1gBhzlyUPYcfbYC50nIoiRs,64091
45
45
  DiadFit/relaxfi_PW.py,sha256=vXXW9JjEBRf0UR9p-DJLx8j4Z2ePpUDweoAok-2nMJ0,32119
46
46
  DiadFit/relaxifi.py,sha256=DSHAUP0tnkiMrHQgQPBK-9P3cWYmegURKzYOUgdAlos,38569
47
- DiadFit-1.0.9.dist-info/METADATA,sha256=8cAHxsD398JkFantbqsbVF1BMvHOes3KgsTkvCBjRWE,1171
48
- DiadFit-1.0.9.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
49
- DiadFit-1.0.9.dist-info/top_level.txt,sha256=yZC6OFLVznaFA5kcPlFPkvhKotcVd-YO4bKxZZw3LQE,8
50
- DiadFit-1.0.9.dist-info/RECORD,,
47
+ DiadFit/smoothed_polyfit_June25_UCB.pkl,sha256=I_AHj8uJVv1lPfir6QZa-EmOzSp8yDAWUZH2ZTE-0KI,2596
48
+ DiadFit-1.0.11.dist-info/LICENSE.txt,sha256=wM112Xn2cOj94pg0LUBgB-7Vs2y8KZ4bzrhIK-R72WE,1707
49
+ DiadFit-1.0.11.dist-info/METADATA,sha256=kyVNmvm8Kx6fbPb-PsYKxFn8Byh1o2jhxoQMccqD0Bo,1172
50
+ DiadFit-1.0.11.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
51
+ DiadFit-1.0.11.dist-info/top_level.txt,sha256=yZC6OFLVznaFA5kcPlFPkvhKotcVd-YO4bKxZZw3LQE,8
52
+ DiadFit-1.0.11.dist-info/RECORD,,