ccfx 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccfx/__init__.py +5 -0
- ccfx/ccfx.py +694 -0
- ccfx/excel.py +143 -0
- ccfx/mssqlConnection.py +228 -0
- ccfx/sqliteConnection.py +302 -0
- ccfx/word.py +96 -0
- ccfx-0.1.0.dist-info/LICENSE +21 -0
- ccfx-0.1.0.dist-info/METADATA +145 -0
- ccfx-0.1.0.dist-info/RECORD +11 -0
- ccfx-0.1.0.dist-info/WHEEL +5 -0
- ccfx-0.1.0.dist-info/top_level.txt +1 -0
ccfx/excel.py
ADDED
@@ -0,0 +1,143 @@
|
|
1
|
+
#!/bin/python3
|
2
|
+
|
3
|
+
'''
|
4
|
+
a module to easily create microsoft excel documents from python
|
5
|
+
|
6
|
+
Author : Celray James CHAWANDA
|
7
|
+
Email : celray.chawanda@outlook.com
|
8
|
+
Licence : MIT 2023
|
9
|
+
Repo : https://github.com/celray
|
10
|
+
|
11
|
+
Date : 2023-07-20
|
12
|
+
'''
|
13
|
+
|
14
|
+
# imports
|
15
|
+
|
16
|
+
import os
|
17
|
+
import platform
|
18
|
+
import xml.etree.ElementTree as ET
|
19
|
+
import xlsxwriter
|
20
|
+
from xlsxwriter.utility import xl_rowcol_to_cell
|
21
|
+
|
22
|
+
# classes
|
23
|
+
class excel:
|
24
|
+
def __init__(self, path):
|
25
|
+
self.path = path
|
26
|
+
self.sheet_names = {}
|
27
|
+
self.book = None
|
28
|
+
self.chart_names = []
|
29
|
+
self.date_format = None
|
30
|
+
|
31
|
+
def create(self):
|
32
|
+
self.create_path(os.path.dirname(self.path))
|
33
|
+
self.book = xlsxwriter.Workbook(self.path)
|
34
|
+
|
35
|
+
def addSheet(self, sheet_name):
|
36
|
+
if self.book is None:
|
37
|
+
self.create()
|
38
|
+
self.sheet_names[sheet_name] = self.book.add_worksheet(sheet_name)
|
39
|
+
|
40
|
+
def setDateFormat(self, format_string='dd/mm/yyyy'):
|
41
|
+
self.date_format = self.book.add_format({'num_format': format_string})
|
42
|
+
|
43
|
+
def writeDate(self, sheet_name, row, column, datetime_obj):
|
44
|
+
if self.date_format is None:
|
45
|
+
self.set_date_format()
|
46
|
+
|
47
|
+
self.sheet_names[sheet_name].write_datetime(
|
48
|
+
row, column, datetime_obj, self.date_format)
|
49
|
+
|
50
|
+
def write(self, sheet_name, row, column, value):
|
51
|
+
self.sheet_names[sheet_name].write(row, column, value)
|
52
|
+
|
53
|
+
def setColumnWidth(self, sheet_name, column_names, width=12):
|
54
|
+
'''
|
55
|
+
column_names: list
|
56
|
+
'''
|
57
|
+
if isinstance(column_names, str):
|
58
|
+
self.sheet_names[sheet_name].set_column(
|
59
|
+
"{col}:{col}".format(col=column_names), width)
|
60
|
+
else:
|
61
|
+
for column in column_names:
|
62
|
+
self.sheet_names[sheet_name].set_column(
|
63
|
+
"{col}:{col}".format(col=column), width)
|
64
|
+
|
65
|
+
def addFigure(self, sheet_name, x_src_sheet_name, x_start, x_end, y_src_sheet_name, y_start, y_end, position_cell="E2", chart_type='subtype',
|
66
|
+
subtype='straight', title='-', size = [1, 1], width = 720, height = 576, marker_type = 'automatic', x_axis_name = "", y_axis_name = "",
|
67
|
+
gridlines_visible = False, xmin = 0, ymin = 0, xmax = None, ymax = None):
|
68
|
+
'''
|
69
|
+
x_start example : "E3"
|
70
|
+
marker_type : automatic, none, square, diamond, triangle, x, star, short_dash, long_dash, circle, plus
|
71
|
+
'''
|
72
|
+
chart = self.book.add_chart({'type': 'scatter', })
|
73
|
+
chart.set_size({'width': width, 'height': height})
|
74
|
+
# axis options
|
75
|
+
chart.set_x_axis({
|
76
|
+
'name': x_axis_name,
|
77
|
+
'min': xmin, 'max': xmax,
|
78
|
+
'major_gridlines': {
|
79
|
+
'visible': gridlines_visible,
|
80
|
+
},
|
81
|
+
})
|
82
|
+
|
83
|
+
chart.set_y_axis({
|
84
|
+
'name': y_axis_name,
|
85
|
+
'min': ymin, 'max': ymax,
|
86
|
+
'major_gridlines': {
|
87
|
+
'visible': gridlines_visible,
|
88
|
+
},
|
89
|
+
})
|
90
|
+
|
91
|
+
|
92
|
+
self.sheet_names[sheet_name].insert_chart(
|
93
|
+
position_cell, chart, {'x_scale': size[1], 'y_scale': size[0]})
|
94
|
+
chart.add_series({
|
95
|
+
'categories': '={sht}!{strt_x}:{end_x}'.format(sht=x_src_sheet_name, strt_x=x_start, end_x=x_end),
|
96
|
+
'values': '={sht}!{strt_y}:{end_y}'.format(sht=y_src_sheet_name, strt_y=y_start, end_y=y_end),
|
97
|
+
'name': title,
|
98
|
+
'marker': {'type': marker_type},
|
99
|
+
# 'trendline': {'type': 'linear'},
|
100
|
+
})
|
101
|
+
chart.set_legend({'position': 'bottom'})
|
102
|
+
|
103
|
+
def writeColumn(self, sheet_name, target_cell, content_list):
|
104
|
+
'''target_cell eg = A1'''
|
105
|
+
self.sheet_names[sheet_name].write_column(target_cell, content_list)
|
106
|
+
|
107
|
+
def getPlatform():
|
108
|
+
"""Returns: Windows or Linux"""
|
109
|
+
return platform.system()
|
110
|
+
|
111
|
+
|
112
|
+
def open(self):
|
113
|
+
if self.getPlatform() == "Windows":
|
114
|
+
os.startfile(os.path.abspath(self.path))
|
115
|
+
else:
|
116
|
+
os.system(f"open {self.path}")
|
117
|
+
|
118
|
+
def toAlphaNumeric(self, row, column):
|
119
|
+
return xl_rowcol_to_cell(row, column)
|
120
|
+
|
121
|
+
def save(self):
|
122
|
+
continue_ = True
|
123
|
+
while continue_:
|
124
|
+
try:
|
125
|
+
self.book.close()
|
126
|
+
continue_ = False
|
127
|
+
except:
|
128
|
+
print("\t! Error writing the Excel file, make sure it is closed")
|
129
|
+
answer = input("\t> retry? (y/n): ")
|
130
|
+
continue_ = True if answer == "y" else False
|
131
|
+
|
132
|
+
|
133
|
+
def createPath(self, path_name, v = False):
|
134
|
+
path_name = os.path.dirname(path_name)
|
135
|
+
if path_name == '':
|
136
|
+
path_name = './'
|
137
|
+
if not os.path.isdir(path_name):
|
138
|
+
os.makedirs(path_name)
|
139
|
+
if v:
|
140
|
+
print(f"\t> created path: {path_name}")
|
141
|
+
|
142
|
+
return path_name
|
143
|
+
|
ccfx/mssqlConnection.py
ADDED
@@ -0,0 +1,228 @@
|
|
1
|
+
'''
|
2
|
+
a module that helps ease the management of MSSQL databases
|
3
|
+
|
4
|
+
Author : Celray James CHAWANDA
|
5
|
+
Email : celray.chawanda@outlook.com
|
6
|
+
Licence : MIT 2023
|
7
|
+
Repo : https://github.com/celray
|
8
|
+
|
9
|
+
Date : 2023-07-20
|
10
|
+
'''
|
11
|
+
# imports
|
12
|
+
|
13
|
+
import sys
|
14
|
+
import urllib
|
15
|
+
import sys
|
16
|
+
import pandas
|
17
|
+
import pyodbc
|
18
|
+
from sqlalchemy import MetaData, Table, create_engine, func, select
|
19
|
+
from shapely import wkt
|
20
|
+
import geopandas
|
21
|
+
|
22
|
+
|
23
|
+
# classes
|
24
|
+
class mssql_connection:
|
25
|
+
def __init__(self, server, username, password, driver, trust_server_ssl = True) -> None:
|
26
|
+
self.server = server
|
27
|
+
self.username = username
|
28
|
+
self.password = password
|
29
|
+
self.driver = driver
|
30
|
+
|
31
|
+
self.trust_server_ssl = trust_server_ssl
|
32
|
+
|
33
|
+
self.connection = None
|
34
|
+
self.cursor = None
|
35
|
+
self.db_name = "TMP_CJames"
|
36
|
+
|
37
|
+
self.databases = []
|
38
|
+
|
39
|
+
def connect(self):
|
40
|
+
# Connect to the SQL Server instance
|
41
|
+
connection_string = f'DRIVER={self.driver};SERVER={self.server};UID={self.username};PWD={self.password};TrustServerCertificate={"yes" if self.trust_server_ssl else "no"}'
|
42
|
+
try:
|
43
|
+
self.connection = pyodbc.connect(connection_string)
|
44
|
+
self.cursor = self.connection.cursor()
|
45
|
+
print(f"> connection to {self.server} established...")
|
46
|
+
except pyodbc.Error as e:
|
47
|
+
print("! error occurred while connecting to the SQL Server instance:")
|
48
|
+
print(e)
|
49
|
+
|
50
|
+
|
51
|
+
def listDatabases(self) -> list:
|
52
|
+
query = "SELECT name FROM sys.databases"
|
53
|
+
|
54
|
+
if self.connection is None:
|
55
|
+
print("! there is no connection to the MSSQL server instance")
|
56
|
+
sys.exit(1)
|
57
|
+
|
58
|
+
# Execute the query and fetch the results
|
59
|
+
try:
|
60
|
+
self.cursor = self.connection.cursor()
|
61
|
+
self.cursor.execute(query)
|
62
|
+
self.databases = [row[0] for row in self.cursor.fetchall()]
|
63
|
+
print("\n> List of available databases:")
|
64
|
+
for db in self.databases:
|
65
|
+
print(f"\t- {db}")
|
66
|
+
except pyodbc.Error as e:
|
67
|
+
print("! error occurred while fetching the list of databases:")
|
68
|
+
print(e)
|
69
|
+
|
70
|
+
return self.databases
|
71
|
+
|
72
|
+
|
73
|
+
def listTables(self, db_name = None) -> list:
|
74
|
+
|
75
|
+
if not db_name is None:
|
76
|
+
self.connect_db(db_name)
|
77
|
+
|
78
|
+
query = """
|
79
|
+
SELECT TABLE_NAME
|
80
|
+
FROM INFORMATION_SCHEMA.TABLES
|
81
|
+
WHERE TABLE_TYPE = 'BASE TABLE'
|
82
|
+
"""
|
83
|
+
|
84
|
+
try:
|
85
|
+
self.cursor = self.connection.cursor()
|
86
|
+
self.cursor.execute(query)
|
87
|
+
tables = [row[0] for row in self.cursor.fetchall()]
|
88
|
+
print("> list of tables in the active database:")
|
89
|
+
for table in tables:
|
90
|
+
print(f"\t- {table}")
|
91
|
+
except pyodbc.Error as e:
|
92
|
+
print("Error occurred while fetching the list of tables:")
|
93
|
+
print(e)
|
94
|
+
|
95
|
+
return tables
|
96
|
+
|
97
|
+
|
98
|
+
def readTable(self, table_name:str, db_name:str = None, columns:list = None, geom_col:str = None, v = True):
|
99
|
+
if db_name is not None:
|
100
|
+
self.connect_db(db_name)
|
101
|
+
|
102
|
+
if columns is not None and geom_col is not None:
|
103
|
+
columns.append(f"{geom_col}.STAsText() as {geom_col}_wkt")
|
104
|
+
query = f"SELECT {','.join(columns)} FROM {table_name}"
|
105
|
+
elif columns is not None:
|
106
|
+
query = f"SELECT {','.join(columns)} FROM {table_name}"
|
107
|
+
elif geom_col is not None:
|
108
|
+
query = f"SELECT *, {geom_col}.STAsText() as {geom_col}_wkt FROM {table_name}"
|
109
|
+
else:
|
110
|
+
query = f"SELECT * FROM {table_name}"
|
111
|
+
|
112
|
+
# Load as a regular DataFrame
|
113
|
+
if v: print(f"> reading table: {table_name} from {self.db_name}")
|
114
|
+
df = pandas.read_sql(query, self.connection)
|
115
|
+
|
116
|
+
# Convert WKT column to a GeoPandas geometry column if needed
|
117
|
+
if geom_col is not None:
|
118
|
+
df[geom_col] = df[geom_col+"_wkt"].apply(wkt.loads)
|
119
|
+
df = geopandas.GeoDataFrame(df, geometry=geom_col)
|
120
|
+
|
121
|
+
return df
|
122
|
+
|
123
|
+
# Function to change the active database
|
124
|
+
def connectDB(self, db_name = None, v = True):
|
125
|
+
if not self.connection:
|
126
|
+
self.connect()
|
127
|
+
try:
|
128
|
+
self.cursor = self.connection.cursor()
|
129
|
+
self.cursor.execute(f"USE {db_name if not db_name is None else self.db_name}")
|
130
|
+
self.db_name = db_name if not db_name is None else self.db_name
|
131
|
+
self.cursor.commit()
|
132
|
+
|
133
|
+
if v: print(f"> changed active database to: {db_name if not db_name is None else self.db_name}")
|
134
|
+
except pyodbc.Error as e:
|
135
|
+
print("! error occurred while changing the active database:")
|
136
|
+
print(e)
|
137
|
+
|
138
|
+
def dataframeToSql(self, df, table_name, if_exists='fail', geom_col='geometry', v = True):
|
139
|
+
"""
|
140
|
+
Write records stored in a DataFrame to a SQL database.
|
141
|
+
"""
|
142
|
+
print(f"> saving data to table: {table_name}...")
|
143
|
+
|
144
|
+
# Create SQLAlchemy engine
|
145
|
+
params = urllib.parse.quote_plus(f'DRIVER={self.driver};SERVER={self.server};DATABASE={self.db_name};UID={self.username};PWD={self.password};TrustServerCertificate=yes')
|
146
|
+
engine = create_engine(f"mssql+pyodbc:///?odbc_connect={params}")
|
147
|
+
|
148
|
+
# Check if dataframe is a GeoDataFrame and has a geometry column
|
149
|
+
if isinstance(df, geopandas.GeoDataFrame) and geom_col in df.columns:
|
150
|
+
# Create a new column for WKT format
|
151
|
+
df[geom_col+'_wkt'] = df[geom_col].apply(lambda x: x.wkt)
|
152
|
+
|
153
|
+
# Drop the original geometry column
|
154
|
+
df = df.drop(columns=[geom_col])
|
155
|
+
|
156
|
+
# Write DataFrame to SQL table
|
157
|
+
df.to_sql(table_name, engine, if_exists=if_exists, index=False)
|
158
|
+
|
159
|
+
# Create a new connection and cursor
|
160
|
+
conn = engine.raw_connection()
|
161
|
+
cursor = conn.cursor()
|
162
|
+
|
163
|
+
# Convert the WKT column back to a geometry column in SQL Server
|
164
|
+
cursor.execute(f"ALTER TABLE [{table_name}] ADD [{geom_col}] geometry")
|
165
|
+
cursor.execute(f"UPDATE [{table_name}] SET [{geom_col}] = geometry::STGeomFromText([{geom_col}_wkt], 4326)")
|
166
|
+
|
167
|
+
# Drop the WKT column
|
168
|
+
cursor.execute(f"ALTER TABLE [{table_name}] DROP COLUMN [{geom_col}_wkt]")
|
169
|
+
|
170
|
+
conn.commit()
|
171
|
+
|
172
|
+
# Close the connection and cursor
|
173
|
+
cursor.close()
|
174
|
+
conn.close()
|
175
|
+
|
176
|
+
else:
|
177
|
+
# If dataframe is not a GeoDataFrame or doesn't have a geometry column, write it to SQL as usual
|
178
|
+
df.to_sql(table_name, engine, if_exists=if_exists, index=False)
|
179
|
+
|
180
|
+
if v:
|
181
|
+
print(f"> saved data to table: {table_name}...")
|
182
|
+
|
183
|
+
|
184
|
+
|
185
|
+
def modifySqlTable(self, df, table_name):
|
186
|
+
"""
|
187
|
+
Replace an existing SQL table with a new one based on a DataFrame.
|
188
|
+
|
189
|
+
Parameters:
|
190
|
+
df : DataFrame
|
191
|
+
table_name : string
|
192
|
+
Name of SQL table
|
193
|
+
"""
|
194
|
+
self.dataframe_to_sql(df, table_name, if_exists='replace')
|
195
|
+
|
196
|
+
def dropTable(self, table_name, v = True):
|
197
|
+
"""
|
198
|
+
Drops a table from the database.
|
199
|
+
|
200
|
+
Parameters:
|
201
|
+
table_name : string
|
202
|
+
Name of SQL table
|
203
|
+
"""
|
204
|
+
|
205
|
+
if not self.connection:
|
206
|
+
self.connect()
|
207
|
+
|
208
|
+
# Drop the table
|
209
|
+
self.cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
|
210
|
+
|
211
|
+
# Commit the transaction
|
212
|
+
self.connection.commit()
|
213
|
+
|
214
|
+
if v: print(f"> deleted table {table_name}")
|
215
|
+
|
216
|
+
def deleteTable(self, table_name, v = True):
|
217
|
+
self.dropTable(table_name, v = v)
|
218
|
+
|
219
|
+
def close(self, v = True):
|
220
|
+
if self.connection:
|
221
|
+
self.connection.close()
|
222
|
+
self.connection = None
|
223
|
+
self.cursor = None
|
224
|
+
if v: print("> connection closed...")
|
225
|
+
|
226
|
+
def disconnect(self, v = True):
|
227
|
+
self.close(v = v)
|
228
|
+
|
ccfx/sqliteConnection.py
ADDED
@@ -0,0 +1,302 @@
|
|
1
|
+
'''
|
2
|
+
a module that helps ease the management of sqlite databases
|
3
|
+
|
4
|
+
Author : Celray James CHAWANDA
|
5
|
+
Email : celray.chawanda@outlook.com
|
6
|
+
Licence : MIT 2023
|
7
|
+
Repo : https://github.com/celray
|
8
|
+
|
9
|
+
Date : 2023-07-20
|
10
|
+
'''
|
11
|
+
|
12
|
+
# imports
|
13
|
+
import sys
|
14
|
+
import sqlite3
|
15
|
+
import sys
|
16
|
+
import pandas
|
17
|
+
|
18
|
+
# classes
|
19
|
+
class sqliteConnection:
|
20
|
+
def __init__(self, sqlite_database, connect = False):
|
21
|
+
self.db_name = sqlite_database
|
22
|
+
self.connection = None
|
23
|
+
self.cursor = None
|
24
|
+
|
25
|
+
if connect:
|
26
|
+
self.connect()
|
27
|
+
|
28
|
+
def connect(self, v=True):
|
29
|
+
self.connection = sqlite3.connect(self.db_name)
|
30
|
+
self.cursor = self.connection.cursor()
|
31
|
+
if v:
|
32
|
+
self.report("\t-> connection to " + self.db_name + " established...")
|
33
|
+
|
34
|
+
def updateValue(self, table_name, col_name, new_value, col_where1, val_1, v=False):
|
35
|
+
"""
|
36
|
+
does not work yet!
|
37
|
+
"""
|
38
|
+
if not new_value is None:
|
39
|
+
new_value = str(new_value)
|
40
|
+
self.cursor.execute("UPDATE " + table_name + " SET " + col_name +
|
41
|
+
" = '" + new_value + "' WHERE " + col_where1 + " = " + val_1 + ";")
|
42
|
+
if new_value is None:
|
43
|
+
self.cursor.execute("UPDATE " + table_name + " SET " + col_name +
|
44
|
+
" = ? " + " WHERE " + col_where1 + " = ?", (new_value, val_1))
|
45
|
+
# self.cursor.execute(sql_str)
|
46
|
+
if v:
|
47
|
+
self.report("\t -> updated {1} value in {0}".format(
|
48
|
+
self.db_name.split("/")[-1].split("\\")[-1], table_name))
|
49
|
+
|
50
|
+
def createTable(self, table_name, initial_field_name, data_type):
|
51
|
+
'''
|
52
|
+
can be text, real, etc
|
53
|
+
'''
|
54
|
+
try:
|
55
|
+
self.cursor.execute('''CREATE TABLE ''' + table_name +
|
56
|
+
'(' + initial_field_name + ' ' + data_type + ')')
|
57
|
+
self.report("\t-> created table " + table_name + " in " + self.db_name)
|
58
|
+
except:
|
59
|
+
self.report("\t! table exists")
|
60
|
+
|
61
|
+
def renameTable(self, old_table_name, new_table_name, v=False):
|
62
|
+
"""
|
63
|
+
this function gives a new name to an existing table and saves the changes
|
64
|
+
"""
|
65
|
+
self.cursor.execute("ALTER TABLE " + old_table_name +
|
66
|
+
" RENAME TO " + new_table_name)
|
67
|
+
if v:
|
68
|
+
self.report("\t-> renamed " + old_table_name + " to " + new_table_name)
|
69
|
+
self.commitChanges()
|
70
|
+
|
71
|
+
def tableExists(self, table_name):
|
72
|
+
self.cursor.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{table_name}'".format(
|
73
|
+
table_name=table_name))
|
74
|
+
if self.cursor.fetchone()[0] == 1:
|
75
|
+
return True
|
76
|
+
else:
|
77
|
+
return False
|
78
|
+
|
79
|
+
def deleteRows(self, table_to_clean, col_where=None, col_where_value=None, v=False):
|
80
|
+
"""
|
81
|
+
|
82
|
+
"""
|
83
|
+
|
84
|
+
if (col_where is None) and (col_where_value is None):
|
85
|
+
self.connection.execute("DELETE FROM " + table_to_clean)
|
86
|
+
|
87
|
+
elif (not col_where is None) and (not col_where_value is None):
|
88
|
+
self.connection.execute(
|
89
|
+
"DELETE FROM " + table_to_clean + " WHERE " + col_where + " = " + col_where_value + ";")
|
90
|
+
|
91
|
+
else:
|
92
|
+
raise ("\t! not all arguments were provided for selective row deletion")
|
93
|
+
|
94
|
+
if v:
|
95
|
+
self.report("\t-> removed all rows from " + table_to_clean)
|
96
|
+
|
97
|
+
def deleteTable(self, table_name):
|
98
|
+
"""
|
99
|
+
this function deletes the specified table
|
100
|
+
"""
|
101
|
+
self.cursor.execute('''DROP TABLE ''' + table_name)
|
102
|
+
self.report("\t-> deleted table " + table_name + " from " + self.db_name)
|
103
|
+
|
104
|
+
def dropTable(self, table_name):
|
105
|
+
self.deleteTable(table_name)
|
106
|
+
|
107
|
+
def undoChanges(self):
|
108
|
+
"""
|
109
|
+
This function reverts the database to status before last commit
|
110
|
+
"""
|
111
|
+
self.report("\t-> undoing changes to " + self.db_name + " then saving")
|
112
|
+
self.connection.rollback()
|
113
|
+
self.commitChanges()
|
114
|
+
|
115
|
+
def readTableAsDict(self, table_name, key_column = 'id'):
|
116
|
+
# Execute a SQL query to fetch all rows from your table
|
117
|
+
self.cursor = self.connection.execute(f"SELECT * FROM {table_name}")
|
118
|
+
|
119
|
+
# Fetch all rows as dictionaries
|
120
|
+
rows = [dict(zip([column[0] for column in self.cursor.description], row)) for row in self.cursor.fetchall()]
|
121
|
+
|
122
|
+
# Convert the list of dictionaries to a dictionary of dictionaries,
|
123
|
+
# using the 'id' field as the key
|
124
|
+
data = {row[key_column]: row for row in rows}
|
125
|
+
|
126
|
+
return data
|
127
|
+
|
128
|
+
def getColumnsWithTypes(self, table_name):
|
129
|
+
c = self.cursor
|
130
|
+
|
131
|
+
# Prepare and execute a PRAGMA table_info statement
|
132
|
+
c.execute(f'PRAGMA table_info({table_name})')
|
133
|
+
|
134
|
+
# Fetch all rows and extract the column names and types
|
135
|
+
columns_with_types = {row[1]: row[2] for row in c.fetchall()}
|
136
|
+
|
137
|
+
return columns_with_types
|
138
|
+
|
139
|
+
def insertDictPartial(self, table_name, data_dict):
|
140
|
+
c = self.cursor
|
141
|
+
|
142
|
+
# Get the column names from the table
|
143
|
+
c.execute(f"PRAGMA table_info({table_name})")
|
144
|
+
columns = [row[1] for row in c.fetchall()]
|
145
|
+
|
146
|
+
# Filter the dictionary keys to match the column names
|
147
|
+
filtered_data = {k: v for k, v in data_dict.items() if k in columns}
|
148
|
+
|
149
|
+
# Prepare an INSERT INTO statement
|
150
|
+
fields = ', '.join(filtered_data.keys())
|
151
|
+
placeholders = ', '.join('?' for _ in filtered_data)
|
152
|
+
values = list(filtered_data.values())
|
153
|
+
sql = f'INSERT INTO {table_name} ({fields}) VALUES ({placeholders})'
|
154
|
+
|
155
|
+
# Execute the statement
|
156
|
+
c.execute(sql, values)
|
157
|
+
|
158
|
+
# Commit the changes
|
159
|
+
self.commitChanges()
|
160
|
+
|
161
|
+
|
162
|
+
def report(self, string, printing=False):
|
163
|
+
if printing:
|
164
|
+
print(f"\t> {string}")
|
165
|
+
else:
|
166
|
+
sys.stdout.write("\r" + string)
|
167
|
+
sys.stdout.flush()
|
168
|
+
|
169
|
+
|
170
|
+
def createTableFromDict(self, table_name, columns_with_types):
|
171
|
+
|
172
|
+
# Prepare a CREATE TABLE statement
|
173
|
+
fields = ', '.join(f'{column} {data_type}' for column, data_type in columns_with_types.items())
|
174
|
+
sql = f'CREATE TABLE IF NOT EXISTS {table_name} ({fields})'
|
175
|
+
|
176
|
+
# Execute the statement
|
177
|
+
self.connection.execute(sql)
|
178
|
+
self.commitChanges()
|
179
|
+
|
180
|
+
|
181
|
+
def insertDict(self, table_name, data):
|
182
|
+
|
183
|
+
# Prepare an INSERT INTO statement for each dictionary
|
184
|
+
for id, row in data.items():
|
185
|
+
fields = ', '.join(row.keys())
|
186
|
+
placeholders = ', '.join('?' for _ in row)
|
187
|
+
values = list(row.values())
|
188
|
+
sql = f'INSERT INTO {table_name} ({fields}) VALUES ({placeholders})'
|
189
|
+
|
190
|
+
# Execute the statement
|
191
|
+
self.cursor.execute(sql, values)
|
192
|
+
|
193
|
+
# Commit the changes
|
194
|
+
self.connection.commit()
|
195
|
+
|
196
|
+
|
197
|
+
|
198
|
+
def readTableColumns(self, table_name, column_list="all"):
|
199
|
+
"""
|
200
|
+
this function takes a list to be a string separated by commmas and
|
201
|
+
a table and puts the columns in the table into a variable
|
202
|
+
|
203
|
+
"all" to select all columns
|
204
|
+
"""
|
205
|
+
if column_list == "all":
|
206
|
+
self.cursor = self.connection.execute(
|
207
|
+
"SELECT * from " + table_name)
|
208
|
+
else:
|
209
|
+
self.cursor = self.connection.execute(
|
210
|
+
"SELECT " + ",".join(column_list) + " from " + table_name)
|
211
|
+
|
212
|
+
list_of_tuples = []
|
213
|
+
for row in self.cursor:
|
214
|
+
list_of_tuples.append(row)
|
215
|
+
self.cursor = self.connection.cursor()
|
216
|
+
self.report("\t-> read selected table columns from " + table_name)
|
217
|
+
return list_of_tuples
|
218
|
+
|
219
|
+
def insertField(self, table_name, field_name, data_type, to_new_line=False, messages=True):
|
220
|
+
"""
|
221
|
+
This will insert a new field into your sqlite database
|
222
|
+
|
223
|
+
table_name: an existing table
|
224
|
+
field_name: the field you want to add
|
225
|
+
data_type : text, integer, float or real
|
226
|
+
"""
|
227
|
+
self.cursor.execute("alter table " + table_name +
|
228
|
+
" add column " + field_name + " " + data_type)
|
229
|
+
if messages:
|
230
|
+
if to_new_line:
|
231
|
+
self.report(
|
232
|
+
"\t-> inserted into table {0} field {1}".format(table_name, field_name))
|
233
|
+
else:
|
234
|
+
sys.stdout.write(
|
235
|
+
"\r\t-> inserted into table {0} field {1} ".format(table_name, field_name))
|
236
|
+
sys.stdout.flush()
|
237
|
+
|
238
|
+
def insertRow(self, table_name, ordered_content_list = [], dictionary_obj = {}, messages=False):
|
239
|
+
"""
|
240
|
+
ordered_list such as ['ha','he','hi']
|
241
|
+
list should have data as strings
|
242
|
+
"""
|
243
|
+
if len(ordered_content_list) > 0:
|
244
|
+
values_placeholder = ','.join(['?' if value is None else '?' for value in ordered_content_list])
|
245
|
+
values = [None if value is None else value for value in ordered_content_list]
|
246
|
+
self.cursor.execute("INSERT INTO " + table_name + " VALUES(" + values_placeholder + ")", values)
|
247
|
+
|
248
|
+
elif len(dictionary_obj) > 0:
|
249
|
+
question_marks = ','.join(list('?'*len(dictionary_obj)))
|
250
|
+
keys = ','.join(dictionary_obj.keys())
|
251
|
+
values = tuple(dictionary_obj.values())
|
252
|
+
self.cursor.execute('INSERT INTO '+table_name+' ('+keys+') VALUES ('+question_marks+')', values)
|
253
|
+
|
254
|
+
if messages:
|
255
|
+
self.report("\t-> inserted row into " + table_name)
|
256
|
+
|
257
|
+
def insertRows(self, table_name, list_of_tuples, messages=False):
|
258
|
+
"""
|
259
|
+
list_of_tuples such as [('ha','he','hi')'
|
260
|
+
('ha','he','hi')]
|
261
|
+
not limited to string data
|
262
|
+
"""
|
263
|
+
self.cursor.executemany('INSERT INTO ' + table_name + ' VALUES (?{qmarks})'.format(
|
264
|
+
qmarks=",?" * (len(list_of_tuples[0]) - 1)), list_of_tuples)
|
265
|
+
if messages:
|
266
|
+
self.report("\t-> inserted rows into " + table_name)
|
267
|
+
|
268
|
+
def dumpCSV(self, table_name, file_name, index=False, v=False):
|
269
|
+
'''
|
270
|
+
save table to csv
|
271
|
+
'''
|
272
|
+
tmp_conn = sqlite3.connect(self.db_name)
|
273
|
+
df = pandas.read_sql_query(
|
274
|
+
"SELECT * FROM {tn}".format(tn=table_name), tmp_conn)
|
275
|
+
if index:
|
276
|
+
df.to_csv(file_name)
|
277
|
+
else:
|
278
|
+
df.to_csv(file_name, index=False)
|
279
|
+
|
280
|
+
if v:
|
281
|
+
self.report(
|
282
|
+
"\t-> dumped table {0} to {1}".format(table_name, file_name))
|
283
|
+
|
284
|
+
def commitChanges(self, v=False):
|
285
|
+
'''
|
286
|
+
save changes to the database.
|
287
|
+
'''
|
288
|
+
self.connection.commit()
|
289
|
+
number_of_changes = self.connection.total_changes
|
290
|
+
if v:
|
291
|
+
self.report(
|
292
|
+
"\t-> saved {0} changes to ".format(number_of_changes) + self.db_name)
|
293
|
+
|
294
|
+
def closeConnection(self, commit=True):
|
295
|
+
'''
|
296
|
+
disconnects from the database
|
297
|
+
'''
|
298
|
+
if commit:
|
299
|
+
self.commitChanges()
|
300
|
+
self.connection.close()
|
301
|
+
self.report("\t-> closed connection to " + self.db_name)
|
302
|
+
|