AeroViz 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of AeroViz might be problematic. Click here for more details.

Files changed (59) hide show
  1. AeroViz/dataProcess/Chemistry/_mass_volume.py +4 -3
  2. AeroViz/dataProcess/Chemistry/_ocec.py +20 -7
  3. AeroViz/dataProcess/Optical/_IMPROVE.py +2 -3
  4. AeroViz/dataProcess/SizeDistr/__init__.py +6 -10
  5. AeroViz/plot/__init__.py +1 -0
  6. AeroViz/plot/meteorology/meteorology.py +2 -0
  7. AeroViz/plot/optical/optical.py +1 -1
  8. AeroViz/plot/pie.py +14 -2
  9. AeroViz/plot/radar.py +184 -0
  10. AeroViz/plot/scatter.py +16 -7
  11. AeroViz/plot/templates/koschmieder.py +11 -8
  12. AeroViz/plot/timeseries/timeseries.py +0 -1
  13. AeroViz/rawDataReader/__init__.py +75 -70
  14. AeroViz/rawDataReader/config/supported_instruments.py +70 -38
  15. AeroViz/rawDataReader/core/__init__.py +208 -178
  16. AeroViz/rawDataReader/script/AE33.py +1 -1
  17. AeroViz/rawDataReader/script/AE43.py +1 -1
  18. AeroViz/rawDataReader/script/APS_3321.py +2 -2
  19. AeroViz/rawDataReader/script/Aurora.py +1 -1
  20. AeroViz/rawDataReader/script/BC1054.py +1 -1
  21. AeroViz/rawDataReader/script/EPA.py +39 -0
  22. AeroViz/rawDataReader/script/GRIMM.py +1 -1
  23. AeroViz/rawDataReader/script/IGAC.py +6 -23
  24. AeroViz/rawDataReader/script/MA350.py +1 -1
  25. AeroViz/rawDataReader/script/Minion.py +102 -30
  26. AeroViz/rawDataReader/script/NEPH.py +1 -1
  27. AeroViz/rawDataReader/script/{Sunset_OCEC.py → OCEC.py} +2 -2
  28. AeroViz/rawDataReader/script/SMPS.py +77 -0
  29. AeroViz/rawDataReader/script/TEOM.py +2 -2
  30. AeroViz/rawDataReader/script/VOC.py +2 -2
  31. AeroViz/rawDataReader/script/XRF.py +11 -0
  32. AeroViz/rawDataReader/script/__init__.py +4 -6
  33. {AeroViz-0.1.5.dist-info → AeroViz-0.1.7.dist-info}/METADATA +57 -32
  34. {AeroViz-0.1.5.dist-info → AeroViz-0.1.7.dist-info}/RECORD +37 -55
  35. AeroViz/process/__init__.py +0 -31
  36. AeroViz/process/core/DataProc.py +0 -19
  37. AeroViz/process/core/SizeDist.py +0 -90
  38. AeroViz/process/core/__init__.py +0 -4
  39. AeroViz/process/method/PyMieScatt_update.py +0 -567
  40. AeroViz/process/method/__init__.py +0 -2
  41. AeroViz/process/method/mie_theory.py +0 -260
  42. AeroViz/process/method/prop.py +0 -62
  43. AeroViz/process/script/AbstractDistCalc.py +0 -143
  44. AeroViz/process/script/Chemical.py +0 -177
  45. AeroViz/process/script/IMPACT.py +0 -49
  46. AeroViz/process/script/IMPROVE.py +0 -161
  47. AeroViz/process/script/Others.py +0 -65
  48. AeroViz/process/script/PSD.py +0 -103
  49. AeroViz/process/script/PSD_dry.py +0 -93
  50. AeroViz/process/script/__init__.py +0 -5
  51. AeroViz/process/script/retrieve_RI.py +0 -69
  52. AeroViz/rawDataReader/script/EPA_vertical.py +0 -46
  53. AeroViz/rawDataReader/script/SMPS_TH.py +0 -41
  54. AeroViz/rawDataReader/script/SMPS_aim11.py +0 -51
  55. AeroViz/rawDataReader/script/SMPS_genr.py +0 -51
  56. AeroViz/rawDataReader/script/Table.py +0 -27
  57. {AeroViz-0.1.5.dist-info → AeroViz-0.1.7.dist-info}/LICENSE +0 -0
  58. {AeroViz-0.1.5.dist-info → AeroViz-0.1.7.dist-info}/WHEEL +0 -0
  59. {AeroViz-0.1.5.dist-info → AeroViz-0.1.7.dist-info}/top_level.txt +0 -0
@@ -1,161 +0,0 @@
1
- from pathlib import Path
2
- from typing import Literal
3
-
4
- from pandas import read_csv, concat, read_json
5
-
6
- from AeroViz.process.core import DataProc
7
- from AeroViz.tools.datareader import DataReader
8
-
9
-
10
- class ImproveProc(DataProc):
11
- """
12
- A class for process improved chemical data.
13
-
14
- Parameters:
15
- -----------
16
- reset : bool, optional
17
- If True, resets the process. Default is False.
18
- filename : str, optional
19
- The name of the file to process. Default is None.
20
- version : str, optional
21
- The version of the data process. Should be one of 'revised' or 'modified'.
22
- Default is None.
23
-
24
- Methods:
25
- --------
26
- revised(_df):
27
- Calculate revised version of particle contribution.
28
-
29
- modified(_df):
30
- Calculate modified version of particle contribution.
31
-
32
- gas(_df):
33
- Calculate gas contribution.
34
-
35
- frh(_RH, version=None):
36
- Helper function to get frh values based on relative humidity (RH) and version.
37
-
38
- process_data():
39
- Process data and save the result.
40
-
41
- Attributes:
42
- -----------
43
- DEFAULT_PATH : Path
44
- The default path for data files.
45
-
46
- Examples:
47
- ---------
48
- >>> df = ImproveProc(reset=True, filename='revised_IMPROVE.csv', version='revised').process_data()
49
-
50
- """
51
-
52
- def __init__(self, file_paths: list[Path | str] = None):
53
- super().__init__()
54
- self.file_paths = [Path(fp) for fp in file_paths]
55
-
56
- @staticmethod
57
- def frh(_RH):
58
- _frh = read_json(Path(__file__).parent.parent.parent / 'plot' / 'utils' / 'fRH.json')
59
- if _RH is not None:
60
- if _RH > 95:
61
- _RH = 95
62
- _RH = round(_RH)
63
- return _frh.loc[_RH].values.T
64
-
65
- return 1, 1, 1, 1
66
-
67
- def revised(self, _df):
68
- def mode(Mass):
69
- L_mode = Mass ** 2 / 20 if Mass < 20 else Mass
70
- S_mode = Mass - L_mode if Mass < 20 else 0
71
-
72
- return L_mode, S_mode
73
-
74
- _frh, _frhss, _frhs, _frhl = self.frh(_df['RH'])
75
-
76
- L_AS, S_AS = mode(_df['AS'])
77
- L_AN, S_AN = mode(_df['AN'])
78
- L_OM, S_OM = mode(_df['OM'])
79
-
80
- _df['AS_ext_dry'] = 2.2 * 1 * S_AS + 4.8 * 1 * L_AS
81
- _df['AN_ext_dry'] = 2.4 * 1 * S_AN + 5.1 * 1 * L_AN
82
- _df['OM_ext_dry'] = 2.8 * S_OM + 6.1 * L_OM
83
- _df['Soil_ext_dry'] = 1 * _df['Soil']
84
- _df['SS_ext_dry'] = 1.7 * 1 * _df['SS']
85
- _df['EC_ext_dry'] = 10 * _df['EC']
86
- _df['total_ext_dry'] = sum(_df['AS_ext_dry':'EC_ext_dry'])
87
-
88
- _df['AS_ext'] = (2.2 * _frhs * S_AS) + (4.8 * _frhl * L_AS)
89
- _df['AN_ext'] = (2.4 * _frhs * S_AN) + (5.1 * _frhl * L_AN)
90
- _df['OM_ext'] = (2.8 * S_OM) + (6.1 * L_OM)
91
- _df['Soil_ext'] = (1 * _df['Soil'])
92
- _df['SS_ext'] = (1.7 * _frhss * _df['SS'])
93
- _df['EC_ext'] = (10 * _df['EC'])
94
- _df['total_ext'] = sum(_df['AS_ext':'EC_ext'])
95
-
96
- _df['ALWC_AS_ext'] = _df['AS_ext'] - _df['AS_ext_dry']
97
- _df['ALWC_AN_ext'] = _df['AN_ext'] - _df['AN_ext_dry']
98
- _df['ALWC_SS_ext'] = _df['SS_ext'] - _df['SS_ext_dry']
99
- _df['ALWC_ext'] = _df['total_ext'] - _df['total_ext_dry']
100
-
101
- _df['fRH_IMPR'] = _df['total_ext'] / _df['total_ext_dry']
102
-
103
- return _df['AS_ext_dry':]
104
-
105
- def modified(self, _df):
106
- _frh, _frhss, _frhs, _frhl = self.frh(_df['RH'])
107
-
108
- _df['AS_ext_dry'] = 3 * 1 * _df['AS']
109
- _df['AN_ext_dry'] = 3 * 1 * _df['AN']
110
- _df['OM_ext_dry'] = 4 * _df['OM']
111
- _df['Soil_ext_dry'] = 1 * _df['Soil']
112
- _df['SS_ext_dry'] = 1.7 * 1 * _df['SS']
113
- _df['EC_ext_dry'] = 10 * _df['EC']
114
- _df['total_ext_dry'] = sum(_df['AS_ext_dry':'EC_ext_dry'])
115
-
116
- _df['AS_ext'] = (3 * _frh * _df['AS'])
117
- _df['AN_ext'] = (3 * _frh * _df['AN'])
118
- _df['OM_ext'] = (4 * _df['OM'])
119
- _df['Soil_ext'] = (1 * _df['Soil'])
120
- _df['SS_ext'] = (1.7 * _frhss * _df['SS'])
121
- _df['EC_ext'] = (10 * _df['EC'])
122
- _df['total_ext'] = sum(_df['AS_ext':'EC_ext'])
123
-
124
- _df['ALWC_AS_ext'] = _df['AS_ext'] - _df['AS_ext_dry']
125
- _df['ALWC_AN_ext'] = _df['AN_ext'] - _df['AN_ext_dry']
126
- _df['ALWC_SS_ext'] = _df['SS_ext'] - _df['SS_ext_dry']
127
- _df['ALWC_ext'] = _df['total_ext'] - _df['total_ext_dry']
128
-
129
- _df['fRH_IMPR'] = _df['total_ext'] / _df['total_ext_dry']
130
-
131
- return _df['AS_ext_dry':]
132
-
133
- @staticmethod
134
- def gas(_df):
135
- _df['ScatteringByGas'] = (11.4 * 293 / (273 + _df['AT']))
136
- _df['AbsorptionByGas'] = (0.33 * _df['NO2'])
137
- _df['ExtinctionByGas'] = _df['ScatteringByGas'] + _df['AbsorptionByGas']
138
- return _df['ScatteringByGas':]
139
-
140
- def process_data(self, reset: bool = False, save_file: Path | str = None,
141
- version: Literal["revised", "modified"] = "revised"):
142
- save_file = Path(save_file)
143
- if save_file.exists() and not reset:
144
- return read_csv(save_file, parse_dates=['Time'], index_col='Time')
145
- else:
146
- # data_files = ['EPB.csv', 'IMPACT.csv', 'chemical.csv']
147
- df = concat([DataReader(file) for file in self.file_paths], axis=1)
148
-
149
- # particle contribution '銨不足不納入計算'
150
- improve_input_df = df.loc[df['NH4_status'] != 'Deficiency', ['AS', 'AN', 'OM', 'Soil', 'SS', 'EC', 'RH']]
151
-
152
- df_improve = improve_input_df.dropna().copy().apply(self.revised if version == 'revised' else self.modified,
153
- axis=1)
154
-
155
- # gas contribution
156
- df_ext_gas = df[['NO2', 'AT']].dropna().copy().apply(self.gas, axis=1)
157
-
158
- _df = concat([df_improve, df_ext_gas], axis=1).reindex(df.index.copy())
159
- _df.to_csv(save_file)
160
-
161
- return _df
@@ -1,65 +0,0 @@
1
- from pathlib import Path
2
-
3
- import numpy as np
4
- from pandas import read_csv, concat, DataFrame
5
-
6
- from AeroViz.process.core import DataProc
7
- from AeroViz.tools.datareader import DataReader
8
-
9
-
10
- class OthersProc(DataProc):
11
- """
12
- A class for process impact data.
13
-
14
- Parameters:
15
- -----------
16
- reset : bool, optional
17
- If True, resets the process. Default is False.
18
- filename : str, optional
19
- The name of the file to process. Default is None.
20
-
21
- Methods:
22
- --------
23
- process_data():
24
- Process data and save the result.
25
-
26
- Attributes:
27
- -----------
28
- DEFAULT_PATH : Path
29
- The default path for data files.
30
-
31
- Examples:
32
- ---------
33
- >>> df = OthersProc().process_data(reset=True, filename=None)
34
-
35
- """
36
-
37
- def __init__(self, file_paths: Path | list[Path | str] = None):
38
- super().__init__()
39
- self.file_paths = [Path(fp) for fp in file_paths]
40
-
41
- def process_data(self, reset: bool = False, save_file: Path | str = None) -> DataFrame:
42
- save_file = Path(save_file)
43
- if save_file.exists() and not reset:
44
- return read_csv(save_file, parse_dates=['Time'], index_col='Time')
45
- else:
46
- df = concat([DataReader(file) for file in self.file_paths], axis=1)
47
-
48
- results = DataFrame(index=df.index)
49
-
50
- results['PG'] = df[
51
- ['Scattering', 'Absorption', 'ScatteringByGas', 'AbsorptionByGas']].dropna().copy().apply(np.sum,
52
- axis=1)
53
- results['MAC'] = df['Absorption'] / df['T_EC']
54
- results['Ox'] = df['NO2'] + df['O3']
55
- results['N2O5_tracer'] = df['NO2'] * df['O3']
56
- results['Vis_cal'] = 1096 / df['Extinction']
57
- # results['fRH_Mix'] = df['Bext'] / df['Extinction']
58
- # results['fRH_PNSD'] = df['Bext_internal'] / df['Bext_dry']
59
- results['fRH_IMPR'] = df['total_ext'] / df['total_ext_dry']
60
- results['OCEC_ratio'] = df['O_OC'] / df['O_EC']
61
- results['PM1/PM25'] = np.where(df['PM1'] / df['PM2.5'] < 1, df['PM1'] / df['PM2.5'], np.nan)
62
- # results['MEE_PNSD'] = df['Bext_internal'] / df['PM25']
63
- # results['MEE_dry_PNSD'] = df['Bext_dry'] / df['PM25']
64
-
65
- return results
@@ -1,103 +0,0 @@
1
- from pathlib import Path
2
-
3
- from pandas import concat, read_csv, DataFrame
4
-
5
- from AeroViz.process.core import DataProc
6
- from AeroViz.process.core.SizeDist import SizeDist
7
- from AeroViz.process.script.AbstractDistCalc import DistributionCalculator
8
-
9
-
10
- class ParticleSizeDistProc(DataProc):
11
- """
12
- A class for process particle size distribution (PSD) data.
13
-
14
- Parameters
15
- ----------
16
- filename : str, optional
17
- The name of the PSD data file.
18
- Defaults to 'PNSD_dNdlogdp.csv' in the default path.
19
-
20
- Attributes
21
- ----------
22
- file_path : Path
23
- The directory path where the PSD data file is located.
24
-
25
- psd : SizeDist
26
- The SizeDist object.
27
-
28
- Methods
29
- -------
30
- process_data(filename='PSD.csv')
31
- Process and save overall PSD properties.
32
-
33
- Examples
34
- --------
35
- Example 1: Use default path and filename
36
- >>> psd_data = ParticleSizeDistProc(filename='PNSD_dNdlogdp.csv').process_data(reset=True)
37
- """
38
-
39
- def __init__(self, file_path: Path | str = None):
40
- super().__init__()
41
- self.file_path = Path(file_path)
42
-
43
- self.psd = SizeDist(read_csv(file_path, parse_dates=['Time'], index_col='Time'))
44
-
45
- def process_data(self, reset: bool = False, save_file: Path | str = None) -> DataFrame:
46
- save_file = Path(save_file)
47
- if save_file.exists() and not reset:
48
- return read_csv(save_file, parse_dates=['Time'], index_col='Time')
49
-
50
- number = DistributionCalculator('number', self.psd).useApply()
51
- surface = DistributionCalculator('surface', self.psd).useApply()
52
- volume = DistributionCalculator('volume', self.psd).useApply()
53
-
54
- surface.to_csv(save_file.parent / 'PSSD_dSdlogdp.csv')
55
- volume.to_csv(save_file.parent / 'PVSD_dVdlogdp.csv')
56
-
57
- result_df = concat(
58
- [DistributionCalculator('property', SizeDist(data=number, weighting='n')).useApply(),
59
- DistributionCalculator('property', SizeDist(data=surface, weighting='s')).useApply(),
60
- DistributionCalculator('property', SizeDist(data=volume, weighting='v')).useApply()
61
- ], axis=1)
62
-
63
- result_df.to_csv(save_file)
64
- return result_df
65
-
66
-
67
- class ExtinctionDistProc(DataProc):
68
-
69
- def __init__(self, file_path: Path | str = 'PNSD_dNdlogdp.csv', file_path_chem: Path | str = 'chemical.csv'):
70
- super().__init__()
71
- self.file_path = Path(file_path)
72
- self.file_path_chem = Path(file_path_chem)
73
-
74
- self.psd = SizeDist(read_csv(file_path, parse_dates=['Time'], index_col='Time'))
75
- self.RI = read_csv(file_path_chem, parse_dates=['Time'], index_col='Time')[['n_dry', 'n_amb', 'k_dry', 'k_amb',
76
- 'AS_volume_ratio',
77
- 'AN_volume_ratio',
78
- 'OM_volume_ratio',
79
- 'Soil_volume_ratio',
80
- 'SS_volume_ratio',
81
- 'EC_volume_ratio',
82
- 'ALWC_volume_ratio']]
83
-
84
- def process_data(self, reset: bool = False, save_file: Path | str = 'PESD.csv'):
85
- save_file = Path(save_file)
86
- if save_file.exists() and not reset:
87
- return read_csv(save_file, parse_dates=['Time']).set_index('Time')
88
-
89
- ext_internal = DistributionCalculator('extinction', self.psd, self.RI, method='internal',
90
- result_type='extinction').useApply()
91
- ext_external = DistributionCalculator('extinction', self.psd, self.RI, method='external',
92
- result_type='extinction').useApply()
93
-
94
- ext_internal.to_csv(save_file.parent / 'PESD_dextdlogdp_internal.csv')
95
- ext_external.to_csv(save_file.parent / 'PESD_dextdlogdp_external.csv')
96
-
97
- result_df = concat([
98
- DistributionCalculator('property', SizeDist(data=ext_internal, weighting='ext_in')).useApply(),
99
- DistributionCalculator('property', SizeDist(data=ext_internal, weighting='ext_ex')).useApply(),
100
- ], axis=1)
101
-
102
- result_df.to_csv(save_file)
103
- return result_df
@@ -1,93 +0,0 @@
1
- from pathlib import Path
2
-
3
- import numpy as np
4
- from pandas import DataFrame, read_csv, concat
5
-
6
- from AeroViz.process.core import DataProc
7
- from AeroViz.process.core.SizeDist import SizeDist
8
-
9
-
10
- class DryPSDProc(DataProc):
11
- """
12
- A class for process impact data.
13
-
14
- Parameters
15
- ----------
16
- reset : bool, optional
17
- If True, resets the process. Default is False.
18
- filename : str, optional
19
- The name of the file to process. Default is None.
20
-
21
- Methods
22
- -------
23
- process_data():
24
- Process data and save the result.
25
-
26
- Attributes
27
- ----------
28
- DEFAULT_PATH : Path
29
- The default path for data files.
30
-
31
-
32
- Examples
33
- --------
34
- >>> df = DryPSDProc(reset=True, filename='PNSD_dNdlogdp_dry.csv').process_data()
35
- """
36
-
37
- def __init__(self, file_path: Path | str = 'PNSD_dNdlogdp.csv', file_path_chem: Path | str = 'chemical.csv'):
38
- super().__init__()
39
- self.file_path = Path(file_path)
40
- self.file_path_chem = Path(file_path_chem)
41
-
42
- self.psd = SizeDist(read_csv(file_path, parse_dates=['Time'], index_col='Time'))
43
- self.RI = read_csv(file_path_chem, parse_dates=['Time'], index_col='Time')[['n_dry', 'n_amb', 'k_dry', 'k_amb',
44
- 'AS_volume_ratio',
45
- 'AN_volume_ratio',
46
- 'OM_volume_ratio',
47
- 'Soil_volume_ratio',
48
- 'SS_volume_ratio',
49
- 'EC_volume_ratio',
50
- 'ALWC_volume_ratio']]
51
-
52
- def process_data(self, reset: bool = False, save_filename: Path | str = None) -> DataFrame:
53
- save_filename = Path(save_filename)
54
- if save_filename.exists() and not reset:
55
- return read_csv(save_filename, parse_dates=['Time']).set_index('Time')
56
- _df = concat([self.psd, self.RI], axis=1)
57
- _df.to_csv(save_filename)
58
- return _df
59
-
60
-
61
- def dry_PNSD_process(dist, dp, **kwargs):
62
- ndp = np.array(dist[:np.size(dp)])
63
- gRH = resolved_gRH(dp, dist['gRH'], uniform=True)
64
-
65
- dry_dp = dp / gRH
66
- belong_which_ibin = np.digitize(dry_dp, dp) - 1
67
-
68
- result = {}
69
- for i, (ibin, dn) in enumerate(zip(belong_which_ibin, ndp)):
70
- if dp[ibin] not in result:
71
- result[dp[ibin]] = []
72
- result[dp[ibin]].append(ndp[i])
73
-
74
- dry_ndp = []
75
- for key, val in result.items():
76
- dry_ndp.append(sum(val) / len(val))
77
-
78
- return np.array(dry_ndp)
79
-
80
-
81
- def resolved_gRH(dp, gRH=1.31, uniform=True):
82
- if uniform:
83
- return np.array([gRH] * dp.size)
84
-
85
- else:
86
- lognorm_dist = lambda x, geoMean, geoStd: (gRH / (np.log10(geoStd) * np.sqrt(2 * np.pi))) * np.exp(
87
- -(x - np.log10(geoMean)) ** 2 / (2 * np.log10(geoStd) ** 2))
88
- abc = lognorm_dist(np.log10(dp), 200, 2.0)
89
- return np.where(abc < 1, 1, abc)
90
-
91
-
92
- if __name__ == '__main__':
93
- pass
@@ -1,5 +0,0 @@
1
- from .Chemical import ChemicalProc
2
- from .IMPACT import ImpactProc
3
- from .IMPROVE import ImproveProc
4
- from .Others import OthersProc
5
- from .PSD import ParticleSizeDistProc, ExtinctionDistProc
@@ -1,69 +0,0 @@
1
- import numpy as np
2
- from pandas import DataFrame
3
-
4
- from AeroViz.process.core.SizeDist import SizeDist
5
- from AeroViz.process.method import Mie_PESD
6
-
7
-
8
- def retrieve_RI(_df: DataFrame,
9
- _PNSD: DataFrame,
10
- nMin: float = 1.33,
11
- nMax: float = 1.60,
12
- kMin: float = 0.00,
13
- kMax: float = 0.60,
14
- spaceSize: int = 31,
15
- dlogdp: float = 0.014
16
- ) -> DataFrame:
17
- nRange = np.linspace(nMin, nMax, num=spaceSize)
18
- kRange = np.linspace(kMin, kMax, spaceSize)
19
- Delta_array = np.zeros((spaceSize, spaceSize))
20
- # 同一時間除了折射率其餘數據皆相同 因此在折射率的迴圈外
21
- bext_mea, bsca_mea, babs_mea = _df['Extinction'], _df['Scattering'], _df['Absorption']
22
-
23
- dp = SizeDist(data=_PNSD).dp
24
- for ki, k in enumerate(kRange):
25
- for ni, n in enumerate(nRange):
26
- m = n + (1j * k)
27
- ndp = np.array(_df[3:])
28
-
29
- ext_dist, sca_dist, abs_dist = Mie_PESD(m, 550, dp, ndp)
30
-
31
- bext_cal = sum(ext_dist) * dlogdp
32
- bsca_cal = sum(sca_dist) * dlogdp
33
- babs_cal = sum(abs_dist) * dlogdp
34
-
35
- Delta_array[ni][ki] = ((babs_mea - babs_cal) / 18.23) ** 2 + ((bsca_mea - bsca_cal) / 83.67) ** 2
36
-
37
- min_delta = Delta_array.argmin()
38
- next_n = nRange[(min_delta // spaceSize)]
39
- next_k = kRange[(min_delta % spaceSize)]
40
-
41
- # 將網格變小
42
- nMin_small = next_n - 0.02 if next_n > 1.33 else 1.33
43
- nMax_small = next_n + 0.02
44
- kMin_small = next_k - 0.04 if next_k > 0.04 else 0
45
- kMax_small = next_k + 0.04
46
- spaceSize_small = 41
47
-
48
- nRange_small = np.linspace(nMin_small, nMax_small, spaceSize_small)
49
- kRange_small = np.linspace(kMin_small, kMax_small, spaceSize_small)
50
- Delta_array_small = np.zeros((spaceSize_small, spaceSize_small))
51
- # 所有數據與大網格一致所以使用上方便數即可
52
- for ki, k in enumerate(kRange_small):
53
- for ni, n in enumerate(nRange_small):
54
- m = n + (1j * k)
55
- ndp = np.array(_df[3:])
56
- ext_dist, sca_dist, abs_dist = Mie_PESD(m, 550, dp, ndp)
57
-
58
- bext_cal = sum(ext_dist) * dlogdp
59
- bsca_cal = sum(sca_dist) * dlogdp
60
- babs_cal = sum(abs_dist) * dlogdp
61
-
62
- Delta_array_small[ni][ki] = ((bext_mea - bext_cal) / 18.23) ** 2 + ((bsca_mea - bsca_cal) / 83.67) ** 2
63
-
64
- min_delta_small = Delta_array_small.argmin()
65
- _df['re_real'] = (nRange_small[(min_delta_small // spaceSize_small)])
66
- _df['re_imaginary'] = (kRange_small[(min_delta_small % spaceSize_small)])
67
-
68
- print(f'\t\tReal part:{_df['re_real']}\tIm part:{_df['re_imaginary']}', end='')
69
- return _df['re_real':]
@@ -1,46 +0,0 @@
1
- import numpy as np
2
- from pandas import read_csv, to_numeric
3
-
4
- from AeroViz.rawDataReader.core import AbstractReader
5
-
6
-
7
- class Reader(AbstractReader):
8
- nam = 'EPA_vertical'
9
-
10
- def _raw_reader(self, file):
11
- with file.open('r', encoding='ascii', errors='ignore') as f:
12
- # 有、無輸出有效值都可以
13
- # read 查詢小時值(測項).csv
14
- df = read_csv(f, encoding='ascii', encoding_errors='ignore', index_col=0, parse_dates=True,
15
- usecols=lambda col: col != 'Unnamed: 1')
16
-
17
- df.index.name = 'Time'
18
- df.rename(columns={'AMB_TEMP': 'AT', 'WIND_SPEED': 'WS', 'WIND_DIREC': 'WD'}, inplace=True)
19
-
20
- # 欄位排序
21
- desired_order = ['SO2', 'NO', 'NOx', 'NO2', 'CO', 'O3', 'THC', 'NMHC', 'CH4', 'PM10', 'PM2.5', 'WS', 'WD',
22
- 'AT', 'RH']
23
-
24
- missing_columns = []
25
-
26
- for col in desired_order:
27
- if col not in df.columns:
28
- df[col] = np.nan
29
- missing_columns.append(col)
30
-
31
- if missing_columns:
32
- self.logger.info(f"{'=' * 60}")
33
- self.logger.info(f"Missing columns: {missing_columns}")
34
- self.logger.info(f"{'=' * 60}")
35
- print(f"Missing columns: {missing_columns}")
36
-
37
- df = df[desired_order]
38
-
39
- # 如果沒有將無效值拿掉就輸出 請將包含 #、L、O 的字串替換成 *
40
- df.replace(to_replace=r'\d*[#LO]\b', value='*', regex=True, inplace=True)
41
- df = df.apply(to_numeric, errors='coerce')
42
-
43
- return df
44
-
45
- def _QC(self, _df):
46
- return _df
@@ -1,41 +0,0 @@
1
- from pandas import to_datetime, read_table
2
-
3
- from AeroViz.rawDataReader.core import AbstractReader
4
-
5
-
6
- class Reader(AbstractReader):
7
- nam = 'SMPS_TH'
8
-
9
- def _raw_reader(self, file):
10
- with open(file, 'r', encoding='utf-8', errors='ignore') as f:
11
- _df = read_table(f, skiprows=18, parse_dates={'Time': ['Date', 'Start Time']}).set_index('Time')
12
- _key = list(_df.keys()[6:-26])
13
-
14
- _newkey = {}
15
- for _k in _key:
16
- _newkey[_k] = float(_k).__round__(4)
17
-
18
- # _newkey['Total Conc.(#/cm)'] = 'total'
19
- # _newkey['Mode(nm)'] = 'mode'
20
-
21
- _df_idx = to_datetime(_df.index, errors='coerce')
22
- return _df[_newkey.keys()].rename(_newkey, axis=1).set_index(_df_idx).loc[_df_idx.dropna()]
23
-
24
- # QC data
25
- def _QC(self, _df):
26
- import numpy as n
27
-
28
- # mask out the data size lower than 7
29
- _df['total'] = _df.sum(axis=1, min_count=1) * (n.diff(n.log(_df.keys().to_numpy(float)))).mean()
30
- _df_size = _df['total'].dropna().resample('1h').size().resample(_df.index.freq).ffill()
31
- _df = _df.mask(_df_size < 7)
32
-
33
- # remove total conc. lower than 2000
34
- _df = _df.mask(_df['total'] < 2000)
35
-
36
- # remove the bin over 400 nm which num. conc. larger than 4000
37
- _df_remv_ky = _df.keys()[:-2][_df.keys()[:-2] >= 400.]
38
-
39
- _df[_df_remv_ky] = _df[_df_remv_ky].copy().mask(_df[_df_remv_ky] > 4000.)
40
-
41
- return _df[_df.keys()[:-1]]
@@ -1,51 +0,0 @@
1
- from pandas import to_datetime, read_csv, to_numeric
2
-
3
- from AeroViz.rawDataReader.core import AbstractReader
4
-
5
-
6
- class Reader(AbstractReader):
7
- nam = 'SMPS_aim11'
8
-
9
- def _raw_reader(self, file):
10
- with open(file, 'r', encoding='utf-8', errors='ignore') as f:
11
-
12
- skiprows = 0
13
- for _line in f:
14
-
15
- if _line.split(',')[0] == 'Scan Number':
16
- f.seek(0)
17
- break
18
-
19
- skiprows += 1
20
- # breakpoint()
21
- _df = read_csv(f, skiprows=skiprows)
22
- _tm_idx = to_datetime(_df['DateTime Sample Start'], format='%d/%m/%Y %X', errors='coerce')
23
-
24
- # index
25
- _df = _df.set_index(_tm_idx).loc[_tm_idx.dropna()]
26
-
27
- # keys
28
- _key = to_numeric(_df.keys(), errors='coerce')
29
- _df.columns = _key
30
- _df = _df.loc[:, ~_key.isna()]
31
-
32
- return _df.apply(to_numeric, errors='coerce')
33
-
34
- # QC data
35
- def _QC(self, _df):
36
- import numpy as n
37
-
38
- # mask out the data size lower than 7
39
- _df['total'] = _df.sum(axis=1, min_count=1) * (n.diff(n.log(_df.keys().to_numpy(float)))).mean()
40
- _df_size = _df['total'].dropna().resample('1h').size().resample(_df.index.freq).ffill()
41
- _df = _df.mask(_df_size < 7)
42
-
43
- # remove total conc. lower than 2000
44
- _df = _df.mask(_df['total'] < 2000)
45
-
46
- # remove the bin over 400 nm which num. conc. larger than 4000
47
- _df_remv_ky = _df.keys()[:-2][_df.keys()[:-2] >= 400.]
48
-
49
- _df[_df_remv_ky] = _df[_df_remv_ky].copy().mask(_df[_df_remv_ky] > 4000.)
50
-
51
- return _df[_df.keys()[:-1]]