DiadFit 1.0.8__tar.gz → 1.0.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. DiadFit-1.0.10/LICENSE.txt +16 -0
  2. {DiadFit-1.0.8 → DiadFit-1.0.10}/PKG-INFO +1 -1
  3. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/_version.py +1 -1
  4. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/densimeter_fitting.py +1 -0
  5. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/densimeters.py +619 -1
  6. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/diads.py +0 -47
  7. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/error_propagation.py +9 -6
  8. DiadFit-1.0.10/src/DiadFit/molar_gas_proportions.py +231 -0
  9. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/ne_lines.py +13 -9
  10. DiadFit-1.0.10/src/DiadFit/smoothed_polyfit_June25_UCB.pkl +0 -0
  11. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit.egg-info/PKG-INFO +1 -1
  12. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit.egg-info/SOURCES.txt +2 -0
  13. DiadFit-1.0.8/src/DiadFit/molar_gas_proportions.py +0 -183
  14. {DiadFit-1.0.8 → DiadFit-1.0.10}/README.md +0 -0
  15. {DiadFit-1.0.8 → DiadFit-1.0.10}/setup.cfg +0 -0
  16. {DiadFit-1.0.8 → DiadFit-1.0.10}/setup.py +0 -0
  17. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/CO2_EOS.py +0 -0
  18. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/CO2_in_bubble_error.py +0 -0
  19. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/H2O_fitting.py +0 -0
  20. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_data.pkl +0 -0
  21. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_dataUCB_1117_1400.pkl +0 -0
  22. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_dataUCB_1117_1447.pkl +0 -0
  23. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_dataUCB_1220_1400.pkl +0 -0
  24. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_dataUCB_1220_1447.pkl +0 -0
  25. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_dataUCB_1220_1567.pkl +0 -0
  26. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_data_CCMR.pkl +0 -0
  27. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_data_CMASS.pkl +0 -0
  28. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Highrho_polyfit_data_CMASS_24C.pkl +0 -0
  29. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_data.pkl +0 -0
  30. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_dataUCB_1117_1400.pkl +0 -0
  31. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_dataUCB_1117_1447.pkl +0 -0
  32. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_dataUCB_1220_1400.pkl +0 -0
  33. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_dataUCB_1220_1447.pkl +0 -0
  34. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_dataUCB_1220_1567.pkl +0 -0
  35. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_data_CCMR.pkl +0 -0
  36. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_data_CMASS.pkl +0 -0
  37. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Lowrho_polyfit_data_CMASS_24C.pkl +0 -0
  38. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_data.pkl +0 -0
  39. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_dataUCB_1117_1400.pkl +0 -0
  40. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_dataUCB_1117_1447.pkl +0 -0
  41. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_dataUCB_1220_1400.pkl +0 -0
  42. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_dataUCB_1220_1447.pkl +0 -0
  43. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_dataUCB_1220_1567.pkl +0 -0
  44. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_data_CCMR.pkl +0 -0
  45. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Mediumrho_polyfit_data_CMASS.pkl +0 -0
  46. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/Psensor.py +0 -0
  47. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/__init__.py +0 -0
  48. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/argon_lines.py +0 -0
  49. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/cosmicray_filter.py +0 -0
  50. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/density_depth_crustal_profiles.py +0 -0
  51. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/importing_data_files.py +0 -0
  52. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/lookup_table.csv +0 -0
  53. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/lookup_table_noneg.csv +0 -0
  54. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/relaxfi_PW.py +0 -0
  55. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit/relaxifi.py +0 -0
  56. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit.egg-info/dependency_links.txt +0 -0
  57. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit.egg-info/requires.txt +0 -0
  58. {DiadFit-1.0.8 → DiadFit-1.0.10}/src/DiadFit.egg-info/top_level.txt +0 -0
@@ -0,0 +1,16 @@
1
+ LICENSE
2
+
3
+ Copyright (c) [2025] [Penny Wieser]
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
6
+
7
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
+
9
+ Special Clause on GUI Usage
10
+
11
+ Use of DiadFit in graphical user interfaces (GUIs), including but not limited to applications that provide point-and-click access to DiadFit’s functions, requires prior written permission from the author.
12
+
13
+ This requirement exists because of a broader issue in the geoscience community: tools that wrap or interface with core scientific software often receive the bulk of citations, while foundational packages like DiadFit go unrecognized—particularly when journals impose citation limits. DiadFit represents a significant and ongoing labor investment. Responsible citation and acknowledgment are necessary to support its continued development and maintenance.
14
+
15
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: DiadFit
3
- Version: 1.0.8
3
+ Version: 1.0.10
4
4
  Summary: DiadFit
5
5
  Home-page: https://github.com/PennyWieser/DiadFit
6
6
  Author: Penny Wieser
@@ -5,4 +5,4 @@
5
5
  # 1) we don't load dependencies by storing it in __init__.py
6
6
  # 2) we can import it in setup.py for the same reason
7
7
  # 3) we can import it into your module
8
- __version__ = '1.0.8'
8
+ __version__ = '1.0.10'
@@ -15,6 +15,7 @@ from scipy import stats
15
15
 
16
16
  encode="ISO-8859-1"
17
17
 
18
+ ## Files are fit using the folder UCB_cali_fitting_1117_1447
18
19
 
19
20
  def calculate_generic_std_err_values(*, pickle_str, new_x, CI=0.67):
20
21
 
@@ -994,7 +994,7 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, cor
994
994
  corrected_split: pd.Series
995
995
  Corrected splitting (cm-1)
996
996
 
997
- Split_err: float, int
997
+ split_err: float, int
998
998
  Error on corrected splitting
999
999
 
1000
1000
  temp: str
@@ -1370,6 +1370,9 @@ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, cor
1370
1370
 
1371
1371
  return df_merge
1372
1372
 
1373
+
1374
+
1375
+
1373
1376
  ## Method from FLuids laboratory from FRANCIS Program
1374
1377
 
1375
1378
 
@@ -1433,3 +1436,618 @@ def Francis_pureCO2(FDS, FDS_std, uncer_FDS, uncer_FDS_std=0):
1433
1436
  return df
1434
1437
 
1435
1438
 
1439
+ ## Shifted polynomial
1440
+
1441
+ import pickle
1442
+
1443
+ def blend_weights(x, x_min, x_max):
1444
+ """Cosine smooth blend between x_min and x_max."""
1445
+ t = np.clip((x - x_min) / (x_max - x_min), 0, 1)
1446
+ return 0.5 * (1 - np.cos(np.pi * t))
1447
+
1448
+ def build_piecewise_poly_by_density(x, y, y_bounds=(0.17, 0.65), degrees=(1, 3, 2), blend_width=0.05, save_path=None):
1449
+ """
1450
+ Fits and optionally saves a smoothed piecewise polynomial model.
1451
+
1452
+ Returns:
1453
+ f_base : callable
1454
+ model_data : dict (can be pickled)
1455
+ """
1456
+ x = np.asarray(x)
1457
+ y = np.asarray(y)
1458
+
1459
+ mask_low = y < y_bounds[0]
1460
+ mask_mid = (y >= y_bounds[0]) & (y <= y_bounds[1])
1461
+ mask_high = y > y_bounds[1]
1462
+
1463
+ polys = []
1464
+ coeffs = []
1465
+ for mask, deg in zip([mask_low, mask_mid, mask_high], degrees):
1466
+ c = np.polyfit(x[mask], y[mask], deg)
1467
+ coeffs.append(c)
1468
+ polys.append(np.poly1d(c))
1469
+
1470
+ x_low_med = x[np.abs(y - y_bounds[0]).argmin()]
1471
+ x_med_high = x[np.abs(y - y_bounds[1]).argmin()]
1472
+
1473
+ def f_base(x_input):
1474
+ x_arr = np.asarray(x_input)
1475
+ result = np.full_like(x_arr, np.nan, dtype=float)
1476
+
1477
+ low_mask = x_arr < (x_low_med - blend_width)
1478
+ mid_mask = (x_arr > (x_low_med + blend_width)) & (x_arr < (x_med_high - blend_width))
1479
+ high_mask = x_arr > (x_med_high + blend_width)
1480
+
1481
+ result[low_mask] = polys[0](x_arr[low_mask])
1482
+ result[mid_mask] = polys[1](x_arr[mid_mask])
1483
+ result[high_mask] = polys[2](x_arr[high_mask])
1484
+
1485
+ blend_lm = (x_arr >= (x_low_med - blend_width)) & (x_arr <= (x_low_med + blend_width))
1486
+ w_lm = blend_weights(x_arr[blend_lm], x_low_med - blend_width, x_low_med + blend_width)
1487
+ result[blend_lm] = (1 - w_lm) * polys[0](x_arr[blend_lm]) + w_lm * polys[1](x_arr[blend_lm])
1488
+
1489
+ blend_mh = (x_arr >= (x_med_high - blend_width)) & (x_arr <= (x_med_high + blend_width))
1490
+ w_mh = blend_weights(x_arr[blend_mh], x_med_high - blend_width, x_med_high + blend_width)
1491
+ result[blend_mh] = (1 - w_mh) * polys[1](x_arr[blend_mh]) + w_mh * polys[2](x_arr[blend_mh])
1492
+
1493
+ return result
1494
+
1495
+ model_data = {
1496
+ 'coeffs': coeffs,
1497
+ 'y_bounds': y_bounds,
1498
+ 'degrees': degrees,
1499
+ 'blend_width': blend_width,
1500
+ 'x_low_med': x_low_med,
1501
+ 'x_med_high': x_med_high,
1502
+ 'x': x,
1503
+ 'y': y
1504
+ }
1505
+ if save_path:
1506
+ with open(save_path, 'wb') as f:
1507
+ pickle.dump(model_data, f)
1508
+
1509
+ return f_base, model_data
1510
+
1511
+
1512
+ def blend_weights(x, x_min, x_max):
1513
+ t = np.clip((x - x_min) / (x_max - x_min), 0, 1)
1514
+ return 0.5 * (1 - np.cos(np.pi * t))
1515
+
1516
+ def load_piecewise_model(model_data):
1517
+ coeffs = model_data['coeffs']
1518
+ blend_width = model_data['blend_width']
1519
+ x_low_med = model_data['x_low_med']
1520
+ x_med_high = model_data['x_med_high']
1521
+ polys = [np.poly1d(c) for c in coeffs]
1522
+
1523
+ vertical_shift = model_data.get('vertical_shift', 0)
1524
+
1525
+ def f_base(x_input):
1526
+ x_arr = np.asarray(x_input)
1527
+ result = np.full_like(x_arr, np.nan, dtype=float)
1528
+
1529
+ low_mask = x_arr < (x_low_med - blend_width)
1530
+ mid_mask = (x_arr > (x_low_med + blend_width)) & (x_arr < (x_med_high - blend_width))
1531
+ high_mask = x_arr > (x_med_high + blend_width)
1532
+
1533
+ result[low_mask] = polys[0](x_arr[low_mask])
1534
+ result[mid_mask] = polys[1](x_arr[mid_mask])
1535
+ result[high_mask] = polys[2](x_arr[high_mask])
1536
+
1537
+ blend_lm = (x_arr >= (x_low_med - blend_width)) & (x_arr <= (x_low_med + blend_width))
1538
+ w_lm = blend_weights(x_arr[blend_lm], x_low_med - blend_width, x_low_med + blend_width)
1539
+ result[blend_lm] = (1 - w_lm) * polys[0](x_arr[blend_lm]) + w_lm * polys[1](x_arr[blend_lm])
1540
+
1541
+ blend_mh = (x_arr >= (x_med_high - blend_width)) & (x_arr <= (x_med_high + blend_width))
1542
+ w_mh = blend_weights(x_arr[blend_mh], x_med_high - blend_width, x_med_high + blend_width)
1543
+ result[blend_mh] = (1 - w_mh) * polys[1](x_arr[blend_mh]) + w_mh * polys[2](x_arr[blend_mh])
1544
+
1545
+ return result + vertical_shift
1546
+
1547
+ return f_base
1548
+
1549
+
1550
+
1551
+
1552
+ ## New function that is much simpler
1553
+
1554
+ def calculate_density_ucb_new(*, df_combo=None, temp='SupCrit',
1555
+ CI_split=0.67, CI_neon=0.67, Ne_pickle_str=None, pref_Ne=None, Ne_err=None, corrected_split=None, split_err=None, shift=0):
1556
+ """ This function converts Diad Splitting into CO$_2$ density using the UC Berkeley calibration line
1557
+ developed by DeVitre and Wieser in 2023.
1558
+
1559
+ Parameters
1560
+ -------------
1561
+ Ne_line_combo: str, '1117_1447', '1117_1400', '1220_1447', '1220_1400', '1220_1567'
1562
+ Combination of Ne lines used for drift correction
1563
+
1564
+ Either:
1565
+
1566
+ df_combo: pandas DataFrame
1567
+ data frame of peak fitting information
1568
+
1569
+ Or:
1570
+ corrected_split: pd.Series
1571
+ Corrected splitting (cm-1)
1572
+
1573
+ split_err: float, int
1574
+ Error on corrected splitting
1575
+
1576
+ temp: str
1577
+ 'SupCrit' if measurements done at 37C
1578
+ 'RoomT' if measurements done at 24C - Not supported yet but could be added if needed.
1579
+
1580
+ CI_neon: float
1581
+ Default 0.67. Confidence interval to use, e.g. 0.67 returns 1 sigma uncertainties. If you use another number,
1582
+ note the column headings will still say sigma.
1583
+
1584
+ CI_split: float
1585
+ Default 0.67. Confidence interval to use, e.g. 0.67 returns 1 sigma uncertainties. If you use another number,
1586
+ note the column headings will still say sigma.
1587
+
1588
+
1589
+
1590
+
1591
+ Either
1592
+
1593
+ Ne_pickle_str: str
1594
+ Name of Ne correction model
1595
+
1596
+ OR
1597
+
1598
+ pref_Ne, Ne_err: float, int
1599
+ For quick and dirty fitting can pass a preferred value for your instrument before you have a chance to
1600
+ regress the Ne lines (useful when first analysing new samples. )
1601
+
1602
+
1603
+
1604
+
1605
+ Returns
1606
+ --------------
1607
+ pd.DataFrame
1608
+ Prefered Density (based on different equatoins being merged), and intermediate calculations
1609
+
1610
+ """
1611
+ if corrected_split is not None:
1612
+ Split=corrected_split
1613
+ if df_combo is not None:
1614
+ df_combo_c=df_combo.copy()
1615
+ time=df_combo_c['sec since midnight']
1616
+
1617
+ if Ne_pickle_str is not None:
1618
+
1619
+ # Calculating the upper and lower values for Ne to get that error
1620
+ Ne_corr=calculate_Ne_corr_std_err_values(pickle_str=Ne_pickle_str,
1621
+ new_x=time, CI=CI_neon)
1622
+ # Extracting preferred correction values
1623
+ pref_Ne=Ne_corr['preferred_values']
1624
+ Split_err, pk_err=propagate_error_split_neon_peakfit(Ne_corr=Ne_corr, df_fits=df_combo_c)
1625
+
1626
+ df_combo_c['Corrected_Splitting_σ']=Split_err
1627
+ df_combo_c['Corrected_Splitting_σ_Ne']=(Ne_corr['upper_values']*df_combo_c['Splitting']-Ne_corr['lower_values']*df_combo_c['Splitting'])/2
1628
+ df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
1629
+
1630
+ # If using a single value for quick dirty fitting
1631
+ else:
1632
+ Split_err, pk_err=propagate_error_split_neon_peakfit(df_fits=df_combo_c, Ne_err=Ne_err, pref_Ne=pref_Ne)
1633
+
1634
+
1635
+
1636
+ df_combo_c['Corrected_Splitting_σ']=Split_err
1637
+
1638
+ df_combo_c['Corrected_Splitting_σ_Ne']=((Ne_err+pref_Ne)*df_combo_c['Splitting']-(Ne_err-pref_Ne)*df_combo_c['Splitting'])/2
1639
+ df_combo_c['Corrected_Splitting_σ_peak_fit']=pk_err
1640
+
1641
+ Split=df_combo_c['Splitting']*pref_Ne
1642
+
1643
+ else:
1644
+ Split_err=split_err
1645
+
1646
+
1647
+
1648
+ # This is for if you just have splitting
1649
+
1650
+
1651
+
1652
+ # This propgates the uncertainty in the splitting from peak fitting, and the Ne correction model
1653
+
1654
+
1655
+ if temp=='RoomT':
1656
+ raise TypeError('Sorry, no UC Berkeley calibration at 24C, please enter temp=SupCrit')
1657
+ if isinstance(Split, float) or isinstance(Split, int):
1658
+ Split=pd.Series(Split)
1659
+
1660
+
1661
+ DiadFit_dir=Path(__file__).parent
1662
+
1663
+ # load the new smoothed model
1664
+ with open(DiadFit_dir / "smoothed_polyfit_June25_UCB.pkl", 'rb') as f:
1665
+ smoothed_model_data = pickle.load(f)
1666
+
1667
+ smoothed_model = load_piecewise_model(smoothed_model_data)
1668
+
1669
+ # Evaluate model
1670
+ Density = pd.Series(smoothed_model(Split), index=Split.index)
1671
+
1672
+ # Lets get the error
1673
+ err_df = calculate_Densimeter_std_err_values_smooth(
1674
+ model_data=smoothed_model_data,
1675
+ corrected_split=Split,
1676
+ corrected_split_err=Split_err,
1677
+ CI_dens=0.67,
1678
+ CI_split=0.67,
1679
+ str_d='Smoothed'
1680
+ )
1681
+
1682
+
1683
+
1684
+
1685
+ df=pd.DataFrame(data={'Density g/cm3': Density+shift,
1686
+ 'σ Density g/cm3': err_df['Smoothed_Density_σ'],
1687
+ 'σ Density g/cm3 (from Ne+peakfit)': err_df['Smoothed_Density_σ_split'],
1688
+ 'σ Density g/cm3 (from densimeter)': err_df['Smoothed_Density_σ_dens'],
1689
+ 'Corrected_Splitting': Split,
1690
+ 'Preferred D_σ_Ne': 0,
1691
+ 'in range': 'in progress',
1692
+ 'Temperature': temp})
1693
+
1694
+
1695
+ if Ne_pickle_str is not None:
1696
+ df_merge1=pd.concat([df_combo_c, Ne_corr], axis=1).reset_index(drop=True)
1697
+ df_merge=pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
1698
+ elif Ne_pickle_str is None and df_combo is not None:
1699
+ df_merge=pd.concat([df, df_combo_c], axis=1).reset_index(drop=True)
1700
+ else:
1701
+ df_merge=df
1702
+
1703
+
1704
+
1705
+ if Ne_pickle_str is not None: # If its not none, have all the columns for Ne
1706
+ cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1707
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1708
+ 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit', 'power (mW)', 'Spectral Center']
1709
+ df_merge = df_merge[cols_to_move + [
1710
+ col for col in df_merge.columns if col not in cols_to_move]]
1711
+
1712
+
1713
+ elif pref_Ne is not None and df_combo is not None: #If Pref Ne,
1714
+ cols_to_move = ['filename', 'Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1715
+ 'Corrected_Splitting', 'Corrected_Splitting_σ',
1716
+ 'Corrected_Splitting_σ_Ne', 'Corrected_Splitting_σ_peak_fit']
1717
+ df_merge = df_merge[cols_to_move + [
1718
+ col for col in df_merge.columns if col not in cols_to_move]]
1719
+
1720
+ elif df_combo is None:
1721
+
1722
+ cols_to_move = ['Density g/cm3', 'σ Density g/cm3','σ Density g/cm3 (from Ne+peakfit)', 'σ Density g/cm3 (from densimeter)',
1723
+ 'Corrected_Splitting']
1724
+ df_merge = df_merge[cols_to_move + [
1725
+ col for col in df_merge.columns if col not in cols_to_move]]
1726
+
1727
+
1728
+
1729
+ return df_merge
1730
+
1731
+ from scipy.stats import t
1732
+ import numpy as np
1733
+ import pandas as pd
1734
+
1735
+
1736
+
1737
+
1738
+ ##
1739
+ def calculate_Densimeter_std_err_values_smooth(
1740
+ *,
1741
+ model_data,
1742
+ corrected_split,
1743
+ corrected_split_err,
1744
+ CI_dens=0.67,
1745
+ CI_split=0.67,
1746
+ str_d='Smoothed',
1747
+ x=None,
1748
+ y=None
1749
+ ):
1750
+ """
1751
+ Calculates propagated uncertainty for a smoothed polynomial model.
1752
+
1753
+ Parameters
1754
+ ----------
1755
+ model_data : dict
1756
+ Dictionary from build_piecewise_poly_by_density including coeffs, blend_width, etc.
1757
+ corrected_split : pd.Series or np.ndarray
1758
+ Corrected splitting values
1759
+ corrected_split_err : float or pd.Series
1760
+ Uncertainty on splitting
1761
+ CI_dens : float
1762
+ Confidence interval for uncertainty in the fit
1763
+ CI_split : float
1764
+ Confidence interval for splitting uncertainty
1765
+ str_d : str
1766
+ Prefix for column names
1767
+ x, y : array-like (optional)
1768
+ Original data used to fit the model, if not included in model_data
1769
+
1770
+ Returns
1771
+ -------
1772
+ pd.DataFrame
1773
+ DataFrame of predicted values and propagated uncertainties
1774
+ """
1775
+ from scipy.stats import t
1776
+ import numpy as np
1777
+ import pandas as pd
1778
+
1779
+ # === Rebuild model ===
1780
+ def load_piecewise_model(model_data):
1781
+ coeffs = model_data['coeffs']
1782
+ blend_width = model_data['blend_width']
1783
+ x_low_med = model_data['x_low_med']
1784
+ x_med_high = model_data['x_med_high']
1785
+ polys = [np.poly1d(c) for c in coeffs]
1786
+
1787
+ def f_base(x_input):
1788
+ x_arr = np.asarray(x_input)
1789
+ result = np.full_like(x_arr, np.nan, dtype=float)
1790
+
1791
+ low_mask = x_arr < (x_low_med - blend_width)
1792
+ mid_mask = (x_arr > (x_low_med + blend_width)) & (x_arr < (x_med_high - blend_width))
1793
+ high_mask = x_arr > (x_med_high + blend_width)
1794
+
1795
+ result[low_mask] = polys[0](x_arr[low_mask])
1796
+ result[mid_mask] = polys[1](x_arr[mid_mask])
1797
+ result[high_mask] = polys[2](x_arr[high_mask])
1798
+
1799
+ blend_lm = (x_arr >= (x_low_med - blend_width)) & (x_arr <= (x_low_med + blend_width))
1800
+ w_lm = 0.5 * (1 - np.cos(np.pi * (x_arr[blend_lm] - (x_low_med - blend_width)) / (2 * blend_width)))
1801
+ result[blend_lm] = (1 - w_lm) * polys[0](x_arr[blend_lm]) + w_lm * polys[1](x_arr[blend_lm])
1802
+
1803
+ blend_mh = (x_arr >= (x_med_high - blend_width)) & (x_arr <= (x_med_high + blend_width))
1804
+ w_mh = 0.5 * (1 - np.cos(np.pi * (x_arr[blend_mh] - (x_med_high - blend_width)) / (2 * blend_width)))
1805
+ result[blend_mh] = (1 - w_mh) * polys[1](x_arr[blend_mh]) + w_mh * polys[2](x_arr[blend_mh])
1806
+
1807
+ return result
1808
+
1809
+ return f_base
1810
+
1811
+ Pf = load_piecewise_model(model_data)
1812
+
1813
+ # Use x/y from model_data if available, else require them as args
1814
+ if 'x' in model_data and 'y' in model_data:
1815
+ x = model_data['x']
1816
+ y = model_data['y']
1817
+ elif x is None or y is None:
1818
+ raise ValueError("You must supply x and y arrays if not included in model_data.")
1819
+
1820
+ residuals = y - Pf(x)
1821
+ residual_std = np.std(residuals)
1822
+
1823
+ mean_x = np.nanmean(x)
1824
+ n = len(x)
1825
+ N_poly = max(len(c) - 1 for c in model_data['coeffs'])
1826
+
1827
+ # Standard error on predictions
1828
+ standard_errors = residual_std * np.sqrt(1 + 1/n + (corrected_split - mean_x)**2 / np.sum((x - mean_x)**2))
1829
+ dfree = n - (N_poly + 1)
1830
+
1831
+ t_value_split = t.ppf((1 + CI_split) / 2, dfree)
1832
+ t_value_dens = t.ppf((1 + CI_dens) / 2, dfree)
1833
+
1834
+ # Central prediction
1835
+ preferred_values = Pf(corrected_split)
1836
+ lower_values = preferred_values - t_value_dens * standard_errors
1837
+ upper_values = preferred_values + t_value_dens * standard_errors
1838
+ uncertainty_from_dens = (upper_values - lower_values) / 2
1839
+
1840
+ # Splitting propagation
1841
+ max_split = corrected_split + corrected_split_err
1842
+ min_split = corrected_split - corrected_split_err
1843
+ max_density = Pf(max_split)
1844
+ min_density = Pf(min_split)
1845
+ uncertainty_split = (max_density - min_density) / 2
1846
+
1847
+ total_uncertainty = np.sqrt(uncertainty_split ** 2 + uncertainty_from_dens ** 2)
1848
+
1849
+ return pd.DataFrame({
1850
+ f'{str_d}_Density': preferred_values,
1851
+ f'{str_d}_Density_σ': total_uncertainty,
1852
+ f'{str_d}_Density+1σ': preferred_values - total_uncertainty,
1853
+ f'{str_d}_Density-1σ': preferred_values + total_uncertainty,
1854
+ f'{str_d}_Density_σ_dens': uncertainty_from_dens,
1855
+ f'{str_d}_Density_σ_split': uncertainty_split
1856
+ })
1857
+
1858
+
1859
+ from pathlib import Path
1860
+ import pickle
1861
+
1862
+ def calculate_density_labx(
1863
+ *,
1864
+ df_combo=None,
1865
+ temp='SupCrit',
1866
+ CI_split=0.67,
1867
+ CI_neon=0.67,
1868
+ Ne_pickle_str=None,
1869
+ pref_Ne=None,
1870
+ Ne_err=None,
1871
+ corrected_split=None,
1872
+ split_err=None,
1873
+ model_pickle_path=None
1874
+ ):
1875
+ import pandas as pd
1876
+ import numpy as np
1877
+ from DiadFit.densimeters import calculate_Ne_corr_std_err_values, propagate_error_split_neon_peakfit
1878
+ from DiadFit.densimeters import calculate_Densimeter_std_err_values_smooth, load_piecewise_model
1879
+
1880
+ if corrected_split is not None:
1881
+ Split = corrected_split
1882
+
1883
+ if df_combo is not None:
1884
+ df_combo_c = df_combo.copy()
1885
+ time = df_combo_c['sec since midnight']
1886
+
1887
+ if Ne_pickle_str is not None:
1888
+ Ne_corr = calculate_Ne_corr_std_err_values(pickle_str=Ne_pickle_str, new_x=time, CI=CI_neon)
1889
+ pref_Ne = Ne_corr['preferred_values']
1890
+ Split_err, pk_err = propagate_error_split_neon_peakfit(Ne_corr=Ne_corr, df_fits=df_combo_c)
1891
+ df_combo_c['Corrected_Splitting_σ'] = Split_err
1892
+ df_combo_c['Corrected_Splitting_σ_Ne'] = (
1893
+ (Ne_corr['upper_values'] * df_combo_c['Splitting'] -
1894
+ Ne_corr['lower_values'] * df_combo_c['Splitting']) / 2
1895
+ )
1896
+ df_combo_c['Corrected_Splitting_σ_peak_fit'] = pk_err
1897
+ else:
1898
+ Split_err, pk_err = propagate_error_split_neon_peakfit(
1899
+ df_fits=df_combo_c, Ne_err=Ne_err, pref_Ne=pref_Ne
1900
+ )
1901
+ df_combo_c['Corrected_Splitting_σ'] = Split_err
1902
+ df_combo_c['Corrected_Splitting_σ_Ne'] = (
1903
+ ((Ne_err + pref_Ne) * df_combo_c['Splitting'] -
1904
+ (Ne_err - pref_Ne) * df_combo_c['Splitting']) / 2
1905
+ )
1906
+ df_combo_c['Corrected_Splitting_σ_peak_fit'] = pk_err
1907
+
1908
+ Split = df_combo_c['Splitting'] * pref_Ne
1909
+ else:
1910
+ Split_err = split_err
1911
+
1912
+ if temp == 'RoomT':
1913
+ raise TypeError('No calibration available at 24C, please use temp="SupCrit"')
1914
+ if isinstance(Split, (float, int)):
1915
+ import pandas as pd
1916
+ Split = pd.Series(Split)
1917
+
1918
+ if model_pickle_path is None:
1919
+ raise ValueError("You must provide a path to the LabX model pickle using `model_pickle_path`.")
1920
+
1921
+ with open(Path(model_pickle_path), 'rb') as f:
1922
+ model_data = pickle.load(f)
1923
+
1924
+ model = load_piecewise_model(model_data)
1925
+ Density = pd.Series(model(Split), index=Split.index)
1926
+
1927
+ err_df = calculate_Densimeter_std_err_values_smooth(
1928
+ model_data=model_data,
1929
+ corrected_split=Split,
1930
+ corrected_split_err=Split_err,
1931
+ CI_dens=CI_split,
1932
+ CI_split=CI_split,
1933
+ str_d='LabX'
1934
+ )
1935
+
1936
+ df = pd.DataFrame(data={
1937
+ 'Density g/cm3': Density,
1938
+ 'σ Density g/cm3': err_df['LabX_Density_σ'],
1939
+ 'σ Density g/cm3 (from Ne+peakfit)': err_df['LabX_Density_σ_split'],
1940
+ 'σ Density g/cm3 (from densimeter)': err_df['LabX_Density_σ_dens'],
1941
+ 'Corrected_Splitting': Split,
1942
+ 'Preferred D_σ_Ne': 0,
1943
+ 'in range': 'in progress',
1944
+ 'Temperature': temp
1945
+ })
1946
+
1947
+ if Ne_pickle_str is not None:
1948
+ df_merge1 = pd.concat([df_combo_c, Ne_corr], axis=1).reset_index(drop=True)
1949
+ df_merge = pd.concat([df, df_merge1], axis=1).reset_index(drop=True)
1950
+ elif df_combo is not None:
1951
+ df_merge = pd.concat([df, df_combo_c], axis=1).reset_index(drop=True)
1952
+ else:
1953
+ df_merge = df
1954
+
1955
+ return df_merge
1956
+
1957
+ ## Way to actually shift densimeter
1958
+
1959
+ # This general model works for any pickel you load in.
1960
+ def apply_and_save_vertical_shift_to_model(*, pickle_in_path, new_x, new_y, pickle_out_path=None):
1961
+ """
1962
+ Applies a vertical shift to a saved piecewise model based on new_x and new_y,
1963
+ then saves the shifted model to a new .pkl file.
1964
+
1965
+ Parameters
1966
+ ----------
1967
+ pickle_in_path : str
1968
+ Path to the original .pkl file (output from build_piecewise_poly_by_density).
1969
+ new_x : array-like
1970
+ Corrected splitting values (x).
1971
+ new_y : array-like
1972
+ Measured density values (y).
1973
+ pickle_out_path : str, optional
1974
+ Where to save the new model. If None, appends '_shifted.pkl' to the input path.
1975
+
1976
+ Returns
1977
+ -------
1978
+ shift : float
1979
+ Vertical shift applied to the model.
1980
+ """
1981
+ import pickle
1982
+ import numpy as np
1983
+
1984
+ # Load the model
1985
+ with open(pickle_in_path, 'rb') as f:
1986
+ model_data = pickle.load(f)
1987
+
1988
+ # Rebuild the base function
1989
+ base_model = pf.load_piecewise_model(model_data)
1990
+ f_vals = base_model(new_x)
1991
+
1992
+ # Calculate vertical shift
1993
+ shift = np.nanmean(new_y - f_vals)
1994
+
1995
+ # Store the shift
1996
+ model_data['vertical_shift'] = shift
1997
+
1998
+ # Save new .pkl
1999
+ if pickle_out_path is None:
2000
+ pickle_out_path = pickle_in_path.replace('.pkl', '_shifted.pkl')
2001
+
2002
+ with open(pickle_out_path, 'wb') as f:
2003
+ pickle.dump(model_data, f)
2004
+
2005
+ return shift
2006
+
2007
+
2008
+
2009
+
2010
+
2011
+ def apply_and_save_vertical_shift_to_ucb_densimeter(new_x, new_y):
2012
+ """
2013
+ Applies a vertical shift to a saved piecewise model based on new_x and new_y,
2014
+ then saves the shifted model to a new .pkl file in the same directory.
2015
+
2016
+ Parameters
2017
+ ----------
2018
+ filename : str
2019
+ Name of the original .pkl file (e.g., "smoothed_polyfit_June25_UCB.pkl").
2020
+ new_x : array-like
2021
+ Corrected splitting values (x).
2022
+ new_y : array-like
2023
+ Measured density values (y).
2024
+ pickle_out_name : str, optional
2025
+ Filename to save the new shifted model. If None, appends '_shifted.pkl' to the input name.
2026
+
2027
+ Returns
2028
+ -------
2029
+ shift : float
2030
+ Vertical shift applied to the model.
2031
+ """
2032
+
2033
+
2034
+
2035
+
2036
+
2037
+
2038
+ DiadFit_dir = Path(__file__).parent
2039
+
2040
+
2041
+ with open(DiadFit_dir / "smoothed_polyfit_June25_UCB.pkl", 'rb') as f:
2042
+ model_data = pickle.load(f)
2043
+
2044
+ base_model = load_piecewise_model(model_data)
2045
+ f_vals = base_model(new_x)
2046
+
2047
+ shift = np.nanmean(new_y - f_vals)
2048
+ model_data['vertical_shift'] = shift
2049
+
2050
+
2051
+
2052
+ return shift
2053
+