ccfx 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ccfx/ccfx.py CHANGED
@@ -26,6 +26,7 @@ import geopandas, pandas
26
26
  from osgeo import gdal, ogr, osr
27
27
  import py7zr
28
28
  import subprocess
29
+ import multiprocessing
29
30
 
30
31
 
31
32
 
@@ -56,8 +57,10 @@ def getExtension(filePath:str) -> str:
56
57
  '''
57
58
  Get the extension of a file
58
59
  filePath: file path
60
+
61
+ return: file extension without the dot
59
62
  '''
60
- return os.path.splitext(filePath)[1]
63
+ return os.path.splitext(filePath)[1].lstrip('.')
61
64
 
62
65
 
63
66
  def deleteFile(filePath:str, v:bool = False) -> bool:
@@ -86,6 +89,47 @@ def deleteFile(filePath:str, v:bool = False) -> bool:
86
89
 
87
90
  return deleted
88
91
 
92
+ def deletePath(path:str, v:bool = False) -> bool:
93
+ '''
94
+ Delete a directory
95
+
96
+ path: directory
97
+ v: verbose (default is True)
98
+
99
+ return: True if the directory is deleted, False otherwise
100
+ '''
101
+ deleted = False
102
+ if os.path.exists(path):
103
+ try:
104
+ shutil.rmtree(path)
105
+ deleted = True
106
+ except:
107
+ print(f'! Could not delete {path}')
108
+ deleted = False
109
+ if v:
110
+ print(f'> {path} deleted')
111
+ else:
112
+ if v:
113
+ print(f'! {path} does not exist')
114
+ deleted = False
115
+
116
+
117
+ def mergeRasterTiles(tileList:list, outFile:str) -> str:
118
+ '''
119
+ Merge raster tiles into one raster file
120
+ tileList: list of raster files
121
+ outFile: output raster file
122
+ '''
123
+ gdal.Warp(outFile, tileList)
124
+ return outFile
125
+
126
+ def mergeRasterFiles(tileList:list, outFile:str) -> str:
127
+ '''
128
+ this function is an alias for mergeRasterTiles
129
+ '''
130
+ return mergeRasterTiles(tileList, outFile)
131
+
132
+
89
133
  def systemPlatform() -> str:
90
134
  '''
91
135
  Get the system platform
@@ -112,6 +156,55 @@ def fileCount(path:str = "./", extension:str = ".*", v:bool = True) -> int:
112
156
  print(f'> there are {count} {extension if not extension ==".*" else ""} files in {path}')
113
157
  return count
114
158
 
159
+ def resampleRaster(inFile:str, outFile:str, resolution:float, dstSRS = None, resamplingMethod = 'bilinear', replaceOutput:bool = True, v:bool = True) -> str:
160
+ '''
161
+ Resample a raster file
162
+ inFile: input raster file
163
+ outFile: output raster file
164
+ resolution: resolution in the same units as the input raster
165
+ v: verbose (default is True)
166
+ available resample types:
167
+ 'nearest', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'average', 'mode', 'max', 'min', 'med', 'q1', 'q3'
168
+
169
+ return: output raster file path
170
+ '''
171
+
172
+ resamleTypes = {
173
+ 'nearest': gdal.GRA_NearestNeighbour,
174
+ 'bilinear': gdal.GRA_Bilinear,
175
+ 'cubic': gdal.GRA_Cubic,
176
+ 'cubicspline': gdal.GRA_CubicSpline,
177
+ 'lanczos': gdal.GRA_Lanczos,
178
+ 'average': gdal.GRA_Average,
179
+ 'mode': gdal.GRA_Mode,
180
+ 'max': gdal.GRA_Max,
181
+ 'min': gdal.GRA_Min,
182
+ 'med': gdal.GRA_Med,
183
+ 'q1': gdal.GRA_Q1,
184
+ 'q3': gdal.GRA_Q3
185
+ }
186
+
187
+ if not os.path.exists(inFile):
188
+ print(f'! {inFile} does not exist')
189
+ return None
190
+
191
+ if os.path.exists(outFile):
192
+ if replaceOutput:
193
+ os.remove(outFile)
194
+ else:
195
+ print(f'! {outFile} already exists')
196
+ return None
197
+
198
+ if v:
199
+ print(f'> resampling {inFile} to {outFile} at {resolution}')
200
+
201
+ ds = gdal.Open(inFile)
202
+ if dstSRS is None: gdal.Warp(outFile, ds, xRes=resolution, yRes=resolution, resampleAlg=resamleTypes[resamplingMethod])
203
+ else: gdal.Warp(outFile, ds, xRes=resolution, yRes=resolution, resampleAlg=resamleTypes[resamplingMethod], dstSRS=dstSRS)
204
+
205
+ ds = None
206
+ return outFile
207
+
115
208
  def watchFileCount(path:str="./", extension:str = ".*", interval:float = 0.2, duration = 3, v:bool = True) -> None:
116
209
  '''
117
210
  Watch the number of files in a directory with a specific extension
@@ -639,7 +732,7 @@ def netcdfExportTif(ncFile: str, variable: str, outputFile: str = None, band: in
639
732
  return dataset
640
733
 
641
734
 
642
- def getFileBaseName(filePath:str, extension:bool = False) -> str:
735
+ def getFileBaseName(filePath:str, extension:bool = True) -> str:
643
736
  '''
644
737
  Get the base name of a file
645
738
  filePath: file path
@@ -710,12 +803,64 @@ def copyFile(source:str, destination:str, v:bool = True) -> None:
710
803
  source: source file
711
804
  destination: destination file
712
805
  '''
806
+ if not exists(os.path.dirname(destination)): createPath(f"{os.path.dirname(destination)}/")
713
807
  with open(source, 'rb') as src:
714
808
  with open(destination, 'wb') as dest: dest.write(src.read())
715
809
 
716
810
  if v: print(f'> {source} copied to \t - {destination}')
717
811
 
718
812
 
813
+ def copyDirectory(source:str, destination:str, recursive = True, v:bool = True, filter = []) -> None:
814
+ '''
815
+ Copy a directory from source to destination
816
+ source: source directory
817
+ destination: destination directory
818
+ recursive: copy subdirectories (default is True)
819
+ v: verbose (default is True)
820
+ filter: list of file extensions to filter out
821
+ '''
822
+ if not exists(destination): os.makedirs(destination)
823
+
824
+ itemCount = None
825
+ counter = 1
826
+
827
+ if recursive:
828
+ if len(filter) > 0:
829
+ itemCount = len([fn for fn in listAllFiles(source) if not getExtension(fn) in filter])
830
+ else:
831
+ itemCount = len(listAllFiles(source))
832
+ else:
833
+ if len(filter) > 0:
834
+ itemCount = len([fn for fn in listFiles(source) if not getExtension(fn) in filter])
835
+ else:
836
+ itemCount = len(listFiles(source))
837
+
838
+
839
+ for item in os.listdir(source):
840
+ s = os.path.join(source, item)
841
+ d = os.path.join(destination, item)
842
+ if os.path.isdir(s):
843
+ if recursive: copyDirectory(s, d, recursive, v, filter)
844
+ else:
845
+ if len(filter) > 0:
846
+ if not getExtension(s) in filter:
847
+ copyFile(s, d, v = False)
848
+ counter += 1
849
+ if v: showProgress(counter, itemCount, f'copying {getFileBaseName(item)}\t\t', barLength=50)
850
+ else:
851
+ copyFile(s, d, v = False)
852
+ if v: showProgress(counter, itemCount, f'copying {getFileBaseName(item)}\t\t', barLength=50)
853
+ counter += 1
854
+ print()
855
+
856
+
857
+ def copyFolder(source:str, destination:str, v:bool = True) -> None:
858
+ '''
859
+ this function is an alias for copyDirectory
860
+ '''
861
+ copyDirectory(source, destination, v)
862
+
863
+
719
864
  def convertCoordinates(lon, lat, srcEPSG, dstCRS) -> tuple:
720
865
  """
721
866
  this function converts coordinates from one CRS to another
@@ -806,7 +951,9 @@ def showProgress(count: int, end: int, message: str, barLength: int = 100) -> No
806
951
  filled = int(barLength * count / end)
807
952
  bar = '█' * filled + '░' * (barLength - filled)
808
953
  print(f'\r{bar}| {percentStr}% [{count}/{end}] | {message} ', end='', flush=True)
809
- if count == end: print()
954
+ if count == end:
955
+ print(f'\r{bar}| {percentStr}% [{count}/{end}] ', end='', flush=True)
956
+ print()
810
957
 
811
958
 
812
959
  def listAllFiles(folder, extension="*"):
@@ -854,4 +1001,317 @@ def createPointGeometry(coords: list, proj: str = "EPSG:4326") -> geopandas.GeoD
854
1001
  gdf.reset_index(inplace=True)
855
1002
  return gdf
856
1003
 
1004
+ def calculateTimeseriesStats(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> dict:
1005
+ '''
1006
+ Calculate statistics for a timeseries
1007
+
1008
+ the assumed dataframe structure is:
1009
+ date - DateTime
1010
+ observed - float
1011
+ simulated - float
1012
+
1013
+ Parameters:
1014
+ data: pandas.DataFrame
1015
+ DataFrame containing the timeseries data
1016
+ observed: str
1017
+ name of the observed column
1018
+ simulated: str
1019
+ name of the simulated column
1020
+ resample: str
1021
+ if specified, the data will be resampled to the specified frequency
1022
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1023
+
1024
+ Returns:
1025
+ dict: Dictionary containing the following statistics:
1026
+ NSE: Nash-Sutcliffe Efficiency
1027
+ KGE: Kling-Gupta Efficiency
1028
+ PBIAS: Percent Bias
1029
+ LNSE: Log Nash-Sutcliffe Efficiency
1030
+ R2: R-squared
1031
+ RMSE: Root Mean Square Error
1032
+ MAE: Mean Absolute Error
1033
+ MSE: Mean Square Error
1034
+ MAPE: Mean Absolute Percentage Error
1035
+ alpha: Ratio of standard deviations
1036
+ beta: Ratio of means
1037
+ '''
1038
+
1039
+ options = {'H': '1H', 'D': '1D', 'M': '1M', 'Y': '1Y'}
1040
+
1041
+ if resample:
1042
+ if resample not in options:
1043
+ raise ValueError(f"Invalid resample option. Choose from {list(options.keys())}")
1044
+ if not 'date' in data.columns:
1045
+ for col in data.columns:
1046
+ if data[col].dtype == 'datetime64[ns]':
1047
+ data = data.set_index(col).resample(options[resample]).mean()
1048
+ break
1049
+ else:
1050
+ raise ValueError("No datetime column found for resampling.")
1051
+ else:
1052
+ data = data.set_index('date').resample(options[resample]).mean()
1053
+
1054
+ # Auto-detect columns if not specified
1055
+ if not observed and not simulated:
1056
+ datetime_cols = [col for col in data.columns if data[col].dtype == 'datetime64[ns]']
1057
+ if datetime_cols:
1058
+ data = data.drop(datetime_cols, axis=1)
1059
+
1060
+ if len(data.columns) == 2:
1061
+ observed = data.columns[0]
1062
+ simulated = data.columns[1]
1063
+ else:
1064
+ raise ValueError("Could not automatically detect observed and simulated columns")
1065
+ elif not observed or not simulated:
1066
+ raise ValueError("Both observed and simulated columns must be specified if one is specified")
1067
+
1068
+ # Extract data
1069
+ obs = data[observed].values
1070
+ sim = data[simulated].values
1071
+
1072
+ # Remove any rows where either observed or simulated is NaN
1073
+ mask = ~(numpy.isnan(obs) | numpy.isnan(sim))
1074
+ obs = obs[mask]
1075
+ sim = sim[mask]
1076
+
1077
+ if len(obs) == 0:
1078
+ raise ValueError("No valid data points after removing NaN values")
1079
+
1080
+ # Calculate statistics with error handling
1081
+ try:
1082
+ # Nash-Sutcliffe Efficiency (NSE)
1083
+ denominator = numpy.sum((obs - numpy.mean(obs)) ** 2)
1084
+ nse = 1 - numpy.sum((obs - sim) ** 2) / denominator if denominator != 0 else numpy.nan
1085
+
1086
+ # Kling-Gupta Efficiency (KGE) components
1087
+ r = numpy.corrcoef(obs, sim)[0, 1]
1088
+ obs_std = numpy.std(obs)
1089
+ sim_std = numpy.std(sim)
1090
+ obs_mean = numpy.mean(obs)
1091
+ sim_mean = numpy.mean(sim)
1092
+
1093
+ alpha = sim_std / obs_std if obs_std != 0 else numpy.nan
1094
+ beta = sim_mean / obs_mean if obs_mean != 0 else numpy.nan
1095
+
1096
+ # KGE calculation
1097
+ if not any(numpy.isnan([r, alpha, beta])):
1098
+ kge = 1 - numpy.sqrt((r - 1) ** 2 + (alpha - 1) ** 2 + (beta - 1) ** 2)
1099
+ else:
1100
+ kge = numpy.nan
1101
+
1102
+ # Percent Bias (PBIAS)
1103
+ pbias = 100 * numpy.sum(sim - obs) / numpy.sum(obs) if numpy.sum(obs) != 0 else numpy.nan
1104
+
1105
+ # Log Nash-Sutcliffe Efficiency (LNSE)
1106
+ eps = 0.0001
1107
+ log_obs = numpy.log(obs + eps)
1108
+ log_sim = numpy.log(sim + eps)
1109
+ log_denominator = numpy.sum((log_obs - numpy.mean(log_obs)) ** 2)
1110
+ lnse = 1 - numpy.sum((log_obs - log_sim) ** 2) / log_denominator if log_denominator != 0 else numpy.nan
1111
+
1112
+ # R-squared (R2)
1113
+ r2 = r ** 2 if not numpy.isnan(r) else numpy.nan
1114
+
1115
+ # Root Mean Square Error (RMSE)
1116
+ rmse = numpy.sqrt(numpy.mean((obs - sim) ** 2))
1117
+
1118
+ # Mean Absolute Error (MAE)
1119
+ mae = numpy.mean(numpy.abs(obs - sim))
1120
+
1121
+ # Mean Square Error (MSE)
1122
+ mse = numpy.mean((obs - sim) ** 2)
1123
+
1124
+ # Mean Absolute Percentage Error (MAPE)
1125
+ with numpy.errstate(divide='ignore', invalid='ignore'):
1126
+ mape = numpy.mean(numpy.abs((obs - sim) / obs) * 100)
1127
+ mape = numpy.nan if numpy.isinf(mape) else mape
1128
+
1129
+ except Exception as e:
1130
+ print(f"Warning: Error in statistical calculations: {str(e)}")
1131
+ return {stat: numpy.nan for stat in ['NSE', 'KGE', 'PBIAS', 'LNSE', 'R2', 'RMSE', 'MAE', 'MSE', 'MAPE', 'alpha', 'beta']}
1132
+
1133
+ return {
1134
+ 'NSE': nse,
1135
+ 'KGE': kge,
1136
+ 'PBIAS': pbias,
1137
+ 'LNSE': lnse,
1138
+ 'R2': r2,
1139
+ 'RMSE': rmse,
1140
+ 'MAE': mae,
1141
+ 'MSE': mse,
1142
+ 'MAPE': mape,
1143
+ 'alpha': alpha,
1144
+ 'beta': beta
1145
+ }
1146
+
1147
+
1148
+ def getNSE(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1149
+ '''
1150
+ this function is a wrapper for calculateTimeseriesStats specifically to return the NSE
1151
+
1152
+ data: pandas.DataFrame
1153
+ DataFrame containing the timeseries data
1154
+ observed: str
1155
+ name of the observed column
1156
+ simulated: str
1157
+ name of the simulated column
1158
+ resample: str
1159
+ if specified, the data will be resampled to the specified frequency
1160
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1161
+
1162
+ return: float
1163
+ NSE value
1164
+ '''
1165
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1166
+
1167
+ return stats['NSE']
1168
+
1169
+ def getKGE(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1170
+ '''
1171
+ this function is a wrapper for calculateTimeseriesStats specifically to return the KGE
1172
+
1173
+ data: pandas.DataFrame
1174
+ DataFrame containing the timeseries data
1175
+ observed: str
1176
+ name of the observed column
1177
+ simulated: str
1178
+ name of the simulated column
1179
+ resample: str
1180
+ if specified, the data will be resampled to the specified frequency
1181
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1182
+
1183
+ return: float
1184
+ KGE value
1185
+ '''
1186
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1187
+
1188
+ return stats['KGE']
1189
+
1190
+ def getPBIAS(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1191
+ '''
1192
+ this function is a wrapper for calculateTimeseriesStats specifically to return the PBIAS
1193
+
1194
+ data: pandas.DataFrame
1195
+ DataFrame containing the timeseries data
1196
+ observed: str
1197
+ name of the observed column
1198
+ simulated: str
1199
+ name of the simulated column
1200
+ resample: str
1201
+ if specified, the data will be resampled to the specified frequency
1202
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1203
+
1204
+ return: float
1205
+ PBIAS value
1206
+ '''
1207
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1208
+
1209
+ return stats['PBIAS']
1210
+
1211
+
1212
+ def getLNSE(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1213
+ '''
1214
+ this function is a wrapper for calculateTimeseriesStats specifically to return the LNSE
1215
+
1216
+ data: pandas.DataFrame
1217
+ DataFrame containing the timeseries data
1218
+ observed: str
1219
+ name of the observed column
1220
+ simulated: str
1221
+ name of the simulated column
1222
+ resample: str
1223
+ if specified, the data will be resampled to the specified frequency
1224
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1225
+
1226
+ return: float
1227
+ LNSE value
1228
+ '''
1229
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1230
+
1231
+ return stats['LNSE']
1232
+
1233
+ def getR2(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1234
+ '''
1235
+ this function is a wrapper for calculateTimeseriesStats specifically to return the R2
1236
+
1237
+ data: pandas.DataFrame
1238
+ DataFrame containing the timeseries data
1239
+ observed: str
1240
+ name of the observed column
1241
+ simulated: str
1242
+ name of the simulated column
1243
+ resample: str
1244
+ if specified, the data will be resampled to the specified frequency
1245
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1246
+
1247
+ return: float
1248
+ R2 value
1249
+ '''
1250
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1251
+
1252
+ return stats['R2']
1253
+
1254
+ def getRMSE(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1255
+ '''
1256
+ this function is a wrapper for calculateTimeseriesStats specifically to return the RMSE
1257
+
1258
+ data: pandas.DataFrame
1259
+ DataFrame containing the timeseries data
1260
+ observed: str
1261
+ name of the observed column
1262
+ simulated: str
1263
+ name of the simulated column
1264
+ resample: str
1265
+ if specified, the data will be resampled to the specified frequency
1266
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1267
+
1268
+ return: float
1269
+ RMSE value
1270
+ '''
1271
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1272
+
1273
+ return stats['RMSE']
1274
+
1275
+ def getMAE(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1276
+ '''
1277
+ this function is a wrapper for calculateTimeseriesStats specifically to return the MAE
1278
+
1279
+ data: pandas.DataFrame
1280
+ DataFrame containing the timeseries data
1281
+ observed: str
1282
+ name of the observed column
1283
+ simulated: str
1284
+ name of the simulated column
1285
+ resample: str
1286
+ if specified, the data will be resampled to the specified frequency
1287
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1288
+
1289
+ return: float
1290
+ MAE value
1291
+ '''
1292
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1293
+
1294
+ return stats['MAE']
1295
+
1296
+ def getMSE(data:pandas.DataFrame, observed:str = None, simulated:str = None, resample:str = None ) -> float:
1297
+ '''
1298
+ this function is a wrapper for calculateTimeseriesStats specifically to return the MSE
1299
+
1300
+ data: pandas.DataFrame
1301
+ DataFrame containing the timeseries data
1302
+ observed: str
1303
+ name of the observed column
1304
+ simulated: str
1305
+ name of the simulated column
1306
+ resample: str
1307
+ if specified, the data will be resampled to the specified frequency
1308
+ available options: 'H' (hourly), 'D' (daily), 'M' (monthly), 'Y' (yearly)
1309
+
1310
+ return: float
1311
+ MSE value
1312
+ '''
1313
+ stats = calculateTimeseriesStats(data, observed, simulated, resample)
1314
+
1315
+ return stats['MSE']
1316
+
857
1317
  ignoreWarnings()
ccfx/sqliteConnection.py CHANGED
@@ -49,9 +49,8 @@ class sqliteConnection:
49
49
  self.cursor.execute(query, (new_value, val_1))
50
50
 
51
51
  if v:
52
- self.report(f"\t -> updated value in {self.db_name.split('/')[-1].split('\\')[-1]} table: {table_name}")
52
+ self.report(f"\t -> updated value in {self.db_name} table: {table_name}")
53
53
 
54
- # Commit the transaction if needed (add self.conn.commit() if not in a transaction)
55
54
 
56
55
  except Exception as e:
57
56
  raise Exception(f"Error updating value: {str(e)}")
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ccfx
3
- Version: 0.2.0
4
- Summary: Your package description here
3
+ Version: 0.4.0
4
+ Summary: This package implifies regular coommon actions for quick prototyping in a user friendly way
5
5
  Author-email: Celray James CHAWANDA <celray@chawanda.com>
6
6
  License: MIT
7
7
  Project-URL: Homepage, https://github.com/celray/ccfx
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
- Requires-Python: >=3.9
11
+ Requires-Python: >=3.10
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
  Requires-Dist: netCDF4
@@ -0,0 +1,11 @@
1
+ ccfx/__init__.py,sha256=VmBeF3oj6JTJ_793d4i8PvhyF8_FxaxA1L_FmHWqitc,142
2
+ ccfx/ccfx.py,sha256=TcGb46QY57295iRfRnMwdm_fjjQ1Z-VDDlzveahRoEw,44547
3
+ ccfx/excel.py,sha256=cQ4TQW49XqbMB3sSS0IOhO3-WArIolEBIrvOvhFyPtI,4757
4
+ ccfx/mssqlConnection.py,sha256=TwyZXhHHI7zy6BSfH1pszuHVJ5cmndRC5dVxvEtSTks,7904
5
+ ccfx/sqliteConnection.py,sha256=jEJ94D5ySt84N7AeDpa27Rclt1NaKhkX6nYzidwApIg,11104
6
+ ccfx/word.py,sha256=AGa64jX5Zl5qotZh5L0QmrsjTnktIBhmj_ByRKZ88vw,3061
7
+ ccfx-0.4.0.dist-info/LICENSE,sha256=2-M3fBUS3FmrSIrqd3cZDmxXxojWVJtZY-SHSRE6RxM,1098
8
+ ccfx-0.4.0.dist-info/METADATA,sha256=3llLy7xyGpG1tpvmB9LZc4csbq-R41UoGMlR95ajT-s,5482
9
+ ccfx-0.4.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
10
+ ccfx-0.4.0.dist-info/top_level.txt,sha256=_cSvSA1WX2K8TgoV3iBJUdUZZqMKJbOPLNnKLYSLHaw,5
11
+ ccfx-0.4.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,11 +0,0 @@
1
- ccfx/__init__.py,sha256=VmBeF3oj6JTJ_793d4i8PvhyF8_FxaxA1L_FmHWqitc,142
2
- ccfx/ccfx.py,sha256=lB6hTCywvbMpHH6lMgw0UJuoiU-7vA22yvttvKFG5Gc,28450
3
- ccfx/excel.py,sha256=cQ4TQW49XqbMB3sSS0IOhO3-WArIolEBIrvOvhFyPtI,4757
4
- ccfx/mssqlConnection.py,sha256=TwyZXhHHI7zy6BSfH1pszuHVJ5cmndRC5dVxvEtSTks,7904
5
- ccfx/sqliteConnection.py,sha256=BsS3jzHSevXLDmtPIFPVuzKqB981su0VpYadTk4xFEQ,11231
6
- ccfx/word.py,sha256=AGa64jX5Zl5qotZh5L0QmrsjTnktIBhmj_ByRKZ88vw,3061
7
- ccfx-0.2.0.dist-info/LICENSE,sha256=2-M3fBUS3FmrSIrqd3cZDmxXxojWVJtZY-SHSRE6RxM,1098
8
- ccfx-0.2.0.dist-info/METADATA,sha256=2GD2kIkcVUo6FG4Th3IEAucmYmhZ4DvdbtGPTI328-c,5419
9
- ccfx-0.2.0.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
10
- ccfx-0.2.0.dist-info/top_level.txt,sha256=_cSvSA1WX2K8TgoV3iBJUdUZZqMKJbOPLNnKLYSLHaw,5
11
- ccfx-0.2.0.dist-info/RECORD,,
File without changes