rda-python-common 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rda_python_common/PgCMD.py +603 -0
- rda_python_common/PgDBI.py +2306 -0
- rda_python_common/PgFile.py +3118 -0
- rda_python_common/PgLOG.py +1689 -0
- rda_python_common/PgLock.py +640 -0
- rda_python_common/PgOPT.py +1740 -0
- rda_python_common/PgSIG.py +1164 -0
- rda_python_common/PgSplit.py +299 -0
- rda_python_common/PgUtil.py +1854 -0
- rda_python_common/__init__.py +0 -0
- rda_python_common/pg_cmd.py +493 -0
- rda_python_common/pg_dbi.py +1885 -0
- rda_python_common/pg_file.py +2462 -0
- rda_python_common/pg_lock.py +533 -0
- rda_python_common/pg_log.py +1352 -0
- rda_python_common/pg_opt.py +1447 -0
- rda_python_common/pg_pass.py +92 -0
- rda_python_common/pg_sig.py +879 -0
- rda_python_common/pg_split.py +260 -0
- rda_python_common/pg_util.py +1534 -0
- rda_python_common/pgpassword.py +92 -0
- rda_python_common-2.0.0.dist-info/METADATA +20 -0
- rda_python_common-2.0.0.dist-info/RECORD +27 -0
- rda_python_common-2.0.0.dist-info/WHEEL +5 -0
- rda_python_common-2.0.0.dist-info/entry_points.txt +3 -0
- rda_python_common-2.0.0.dist-info/licenses/LICENSE +21 -0
- rda_python_common-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1885 @@
|
|
|
1
|
+
#
|
|
2
|
+
###############################################################################
|
|
3
|
+
#
|
|
4
|
+
# Title : pg_dbi.py -- PostgreSQL DataBase Interface
|
|
5
|
+
# Author : Zaihua Ji, zji@ucar.edu
|
|
6
|
+
# Date : 06/07/2022
|
|
7
|
+
# 2025-01-10 transferred to package rda_python_common from
|
|
8
|
+
# https://github.com/NCAR/rda-shared-libraries.git
|
|
9
|
+
# 2025-11-24 convert to class PgDBI
|
|
10
|
+
# Purpose : Python library module to handle query and manipulate PostgreSQL database
|
|
11
|
+
#
|
|
12
|
+
# Github : https://github.com/NCAR/rda-python-common.git
|
|
13
|
+
#
|
|
14
|
+
###############################################################################
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
import re
|
|
18
|
+
import time
|
|
19
|
+
import hvac
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
import psycopg2 as PgSQL
|
|
22
|
+
from psycopg2.extras import execute_values
|
|
23
|
+
from psycopg2.extras import execute_batch
|
|
24
|
+
from os import path as op
|
|
25
|
+
from .pg_log import PgLOG
|
|
26
|
+
|
|
27
|
+
class PgDBI(PgLOG):
|
|
28
|
+
|
|
29
|
+
# PostgreSQL specified query timestamp format
|
|
30
|
+
fmtyr = lambda fn: "extract(year from {})::int".format(fn)
|
|
31
|
+
fmtqt = lambda fn: "extract(quarter from {})::int".format(fn)
|
|
32
|
+
fmtmn = lambda fn: "extract(month from {})::int".format(fn)
|
|
33
|
+
fmtdt = lambda fn: "date({})".format(fn)
|
|
34
|
+
fmtym = lambda fn: "to_char({}, 'yyyy-mm')".format(fn)
|
|
35
|
+
fmthr = lambda fn: "extract(hour from {})::int".format(fn)
|
|
36
|
+
|
|
37
|
+
def __init__(self):
|
|
38
|
+
super().__init__() # initialize parent class
|
|
39
|
+
self.pgdb = None # reference to a connected database object
|
|
40
|
+
self.curtran = 0 # 0 - no transaction, 1 - in transaction
|
|
41
|
+
self.NMISSES = [] # array of mising userno
|
|
42
|
+
self.LMISSES = [] # array of mising logname
|
|
43
|
+
self.TABLES = {} # record table field information
|
|
44
|
+
self.SEQUENCES = {} # record table sequence fielnames
|
|
45
|
+
self.SPECIALIST = {} # hash array refrences to specialist info of dsids
|
|
46
|
+
self.self.SYSDOWN = {}
|
|
47
|
+
self.PGDBI = {}
|
|
48
|
+
self.ADDTBLS = []
|
|
49
|
+
self.PGSIGNS = ['!', '<', '>', '<>']
|
|
50
|
+
self.CHCODE = 1042
|
|
51
|
+
# hard coded db ports for dbnames
|
|
52
|
+
self.DBPORTS = {'default' : 0}
|
|
53
|
+
self.DBPASS = {}
|
|
54
|
+
self.DBBAOS = {}
|
|
55
|
+
# hard coded db names for given schema names
|
|
56
|
+
self.DBNAMES = {
|
|
57
|
+
'ivaddb' : 'ivaddb',
|
|
58
|
+
'cntldb' : 'ivaddb',
|
|
59
|
+
'cdmsdb' : 'ivaddb',
|
|
60
|
+
'ispddb' : 'ispddb',
|
|
61
|
+
'obsua' : 'upadb',
|
|
62
|
+
'default' : 'rdadb',
|
|
63
|
+
}
|
|
64
|
+
# hard coded socket paths for machine_dbnames
|
|
65
|
+
self.DBSOCKS = {'default' : ''}
|
|
66
|
+
# home path for check db on alter host
|
|
67
|
+
self.VIEWHOMES = {'default' : self.PGLOG['DSSDBHM']}
|
|
68
|
+
# add more to the list if used for names
|
|
69
|
+
self.PGRES = ['end', 'window']
|
|
70
|
+
self.SETPGDBI('DEFDB', 'rdadb')
|
|
71
|
+
self.SETPGDBI("DEFSC", 'dssdb')
|
|
72
|
+
self.SETPGDBI('DEFHOST', self.PGLOG['PSQLHOST'])
|
|
73
|
+
self.SETPGDBI("DEFPORT", 0)
|
|
74
|
+
self.SETPGDBI("DEFSOCK", '')
|
|
75
|
+
self.SETPGDBI("DBNAME", self.PGDBI['DEFDB'])
|
|
76
|
+
self.SETPGDBI("SCNAME", self.PGDBI['DEFSC'])
|
|
77
|
+
self.SETPGDBI("LNNAME", self.PGDBI['DEFSC'])
|
|
78
|
+
self.SETPGDBI("PWNAME", None)
|
|
79
|
+
self.SETPGDBI("DBHOST", (os.environ['DSSDBHOST'] if os.environ.get('DSSDBHOST') else self.PGDBI['DEFHOST']))
|
|
80
|
+
self.SETPGDBI("DBPORT", 0)
|
|
81
|
+
self.SETPGDBI("ERRLOG", self.LOGERR) # default error logact
|
|
82
|
+
self.SETPGDBI("EXITLG", self.LGEREX) # default exit logact
|
|
83
|
+
self.SETPGDBI("DBSOCK", '')
|
|
84
|
+
self.SETPGDBI("DATADIR", self.PGLOG['DSDHOME'])
|
|
85
|
+
self.SETPGDBI("BCKPATH", self.PGLOG['DSSDBHM'] + "/backup")
|
|
86
|
+
self.SETPGDBI("SQLPATH", self.PGLOG['DSSDBHM'] + "/sql")
|
|
87
|
+
self.SETPGDBI("VWNAME", self.PGDBI['DEFSC'])
|
|
88
|
+
self.SETPGDBI("VWPORT", 0)
|
|
89
|
+
self.SETPGDBI("VWSOCK", '')
|
|
90
|
+
self.SETPGDBI("BAOURL", 'https://bao.k8s.ucar.edu/')
|
|
91
|
+
|
|
92
|
+
self.PGDBI['DBSHOST'] = self.get_short_host(self.PGDBI['DBHOST'])
|
|
93
|
+
self.PGDBI['DEFSHOST'] = self.get_short_host(self.PGDBI['DEFHOST'])
|
|
94
|
+
self.PGDBI['VWHOST'] = self.PGLOG['PVIEWHOST']
|
|
95
|
+
self.PGDBI['MSHOST'] = self.PGLOG['PMISCHOST']
|
|
96
|
+
self.PGDBI['VWSHOST'] = self.get_short_host(self.PGDBI['VWHOST'])
|
|
97
|
+
self.PGDBI['MSSHOST'] = self.get_short_host(self.PGDBI['MSHOST'])
|
|
98
|
+
self.PGDBI['VWHOME'] = (self.VIEWHOMES[self.PGLOG['HOSTNAME']] if self.PGLOG['HOSTNAME'] in self.VIEWHOMES else self.VIEWHOMES['default'])
|
|
99
|
+
self.PGDBI['SCPATH'] = None # additional schema path for set search_path
|
|
100
|
+
self.PGDBI['VHSET'] = 0
|
|
101
|
+
self.PGDBI['PGSIZE'] = 1000 # number of records for page_size
|
|
102
|
+
self.PGDBI['MTRANS'] = 5000 # max number of changes in one transactions
|
|
103
|
+
self.PGDBI['MAXICNT'] = 6000000 # maximum number of records in each table
|
|
104
|
+
|
|
105
|
+
# set environments and defaults
|
|
106
|
+
def SETPGDBI(self, name, value):
|
|
107
|
+
self.PGDBI[name] = self.get_environment(name, value)
|
|
108
|
+
|
|
109
|
+
# create a pgddl command string with
|
|
110
|
+
# table name (tname), prefix (pre) and suffix (suf)
|
|
111
|
+
def get_pgddl_command(self, tname, pre = None, suf = None, scname = None):
|
|
112
|
+
ms = re.match(r'^(.+)\.(.+)$', tname)
|
|
113
|
+
if not scname:
|
|
114
|
+
if ms:
|
|
115
|
+
scname = ms.group(1)
|
|
116
|
+
tname = ms.group(2)
|
|
117
|
+
else:
|
|
118
|
+
scname = self.PGDBI['SCNAME']
|
|
119
|
+
xy = ''
|
|
120
|
+
if suf: xy += ' -x ' + suf
|
|
121
|
+
if pre: xy += ' -y ' + pre
|
|
122
|
+
return "pgddl {} -aa -h {} -d {} -c {} -u {}{}".format(tname, self.PGDBI['DBHOST'], self.PGDBI['DBNAME'], scname, self.PGDBI['LNNAME'], xy)
|
|
123
|
+
|
|
124
|
+
# set default connection for dssdb PostgreSQL Server
|
|
125
|
+
def dssdb_dbname(self):
|
|
126
|
+
self.default_scinfo(self.PGDBI['DEFDB'], self.PGDBI['DEFSC'], self.PGLOG['PSQLHOST'])
|
|
127
|
+
dssdb_scname = dssdb_dbname
|
|
128
|
+
|
|
129
|
+
# set default connection for obsua PostgreSQL Server
|
|
130
|
+
def obsua_dbname(self):
|
|
131
|
+
self.default_scinfo('upadb', 'obsua', self.PGLOG['PMISCHOST'])
|
|
132
|
+
obsua_scname = obsua_dbname
|
|
133
|
+
|
|
134
|
+
# set default connection for ivaddb PostgreSQL Server
|
|
135
|
+
def ivaddb_dbname(self):
|
|
136
|
+
self.default_scinfo('ivaddb', 'ivaddb', self.PGLOG['PMISCHOST'])
|
|
137
|
+
ivaddb_scname = ivaddb_dbname
|
|
138
|
+
|
|
139
|
+
# set default connection for ispddb PostgreSQL Server
|
|
140
|
+
def ispddb_dbname(self):
|
|
141
|
+
self.default_scinfo('ispddb', 'ispddb', self.PGLOG['PMISCHOST'])
|
|
142
|
+
ispddb_scname = ispddb_dbname
|
|
143
|
+
|
|
144
|
+
# set a default schema info with hard coded info
|
|
145
|
+
def default_dbinfo(self, scname = None, dbhost = None, lnname = None, pwname = None, dbport = None, socket = None):
|
|
146
|
+
return self.default_scinfo(self.get_dbname(scname), scname, dbhost, lnname, pwname, dbport, socket)
|
|
147
|
+
|
|
148
|
+
# set default database/schema info with hard coded info
|
|
149
|
+
def default_scinfo(self, dbname = None, scname = None, dbhost = None, lnname = None, pwname = None, dbport = None, socket = None):
|
|
150
|
+
if not dbname: dbname = self.PGDBI['DEFDB']
|
|
151
|
+
if not scname: scname = self.PGDBI['DEFSC']
|
|
152
|
+
if not dbhost: dbhost = self.PGDBI['DEFHOST']
|
|
153
|
+
if dbport is None: dbport = self.PGDBI['DEFPORT']
|
|
154
|
+
if socket is None: socket = self.PGDBI['DEFSOCK']
|
|
155
|
+
self.set_scname(dbname, scname, lnname, pwname, dbhost, dbport, socket)
|
|
156
|
+
|
|
157
|
+
# get the datbase sock file name of a given dbname for local connection
|
|
158
|
+
def get_dbsock(self, dbname):
|
|
159
|
+
return (self.DBSOCKS[dbname] if dbname in self.DBSOCKS else self.DBSOCKS['default'])
|
|
160
|
+
|
|
161
|
+
# get the datbase port number of a given dbname for remote connection
|
|
162
|
+
def get_dbport(self, dbname):
|
|
163
|
+
return (self.DBPORTS[dbname] if dbname in self.DBPORTS else self.DBPORTS['default'])
|
|
164
|
+
|
|
165
|
+
# get the datbase name of a given schema name for remote connection
|
|
166
|
+
def get_dbname(self, scname):
|
|
167
|
+
if scname:
|
|
168
|
+
if scname in self.DBNAMES: return self.DBNAMES[scname]
|
|
169
|
+
return self.DBNAMES['default']
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
# set connection for viewing database information
|
|
173
|
+
def view_dbinfo(self, scname = None, lnname = None, pwname = None):
|
|
174
|
+
self.view_scinfo(self.get_dbname(scname), scname, lnname, pwname)
|
|
175
|
+
|
|
176
|
+
# set connection for viewing database/schema information
|
|
177
|
+
def view_scinfo(self, dbname = None, scname = None, lnname = None, pwname = None):
|
|
178
|
+
if not dbname: dbname = self.PGDBI['DEFDB']
|
|
179
|
+
if not scname: scname = self.PGDBI['DEFSC']
|
|
180
|
+
self.set_scname(dbname, scname, lnname, pwname, self.PGLOG['PVIEWHOST'], self.PGDBI['VWPORT'])
|
|
181
|
+
|
|
182
|
+
# set connection for given scname
|
|
183
|
+
def set_dbname(self, scname = None, lnname = None, pwname = None, dbhost = None, dbport = None, socket = None):
|
|
184
|
+
if not scname: scname = self.PGDBI['DEFSC']
|
|
185
|
+
self.set_scname(self.get_dbname(scname), scname, lnname, pwname, dbhost, dbport, socket)
|
|
186
|
+
|
|
187
|
+
# set connection for given database & schema names
|
|
188
|
+
def set_scname(self, dbname = None, scname = None, lnname = None, pwname = None, dbhost = None, dbport = None, socket = None):
|
|
189
|
+
changed = 0
|
|
190
|
+
if dbname and dbname != self.PGDBI['DBNAME']:
|
|
191
|
+
self.PGDBI['DBNAME'] = dbname
|
|
192
|
+
changed = 1
|
|
193
|
+
if scname and scname != self.PGDBI['SCNAME']:
|
|
194
|
+
self.PGDBI['LNNAME'] = self.PGDBI['SCNAME'] = scname
|
|
195
|
+
changed = 1
|
|
196
|
+
if lnname and lnname != self.PGDBI['LNNAME']:
|
|
197
|
+
self.PGDBI['LNNAME'] = lnname
|
|
198
|
+
changed = 1
|
|
199
|
+
if pwname != self.PGDBI['PWNAME']:
|
|
200
|
+
self.PGDBI['PWNAME'] = pwname
|
|
201
|
+
changed = 1
|
|
202
|
+
if dbhost and dbhost != self.PGDBI['DBHOST']:
|
|
203
|
+
self.PGDBI['DBHOST'] = dbhost
|
|
204
|
+
self.PGDBI['DBSHOST'] = self.get_short_host(dbhost)
|
|
205
|
+
changed = 1
|
|
206
|
+
if self.PGDBI['DBSHOST'] == self.PGLOG['HOSTNAME']:
|
|
207
|
+
if socket is None: socket = self.get_dbsock(dbname)
|
|
208
|
+
if socket != self.PGDBI['DBSOCK']:
|
|
209
|
+
self.PGDBI['DBSOCK'] = socket
|
|
210
|
+
changed = 1
|
|
211
|
+
else:
|
|
212
|
+
if not dbport: dbport = self.get_dbport(dbname)
|
|
213
|
+
if dbport != self.PGDBI['DBPORT']:
|
|
214
|
+
self.PGDBI['DBPORT'] = dbport
|
|
215
|
+
changed = 1
|
|
216
|
+
if changed and self.pgdb is not None: self.pgdisconnect(1)
|
|
217
|
+
|
|
218
|
+
# start a database transaction and exit if fails
|
|
219
|
+
def starttran(self):
|
|
220
|
+
if self.curtran == 1: self.endtran() # try to end previous transaction
|
|
221
|
+
if not self.pgdb:
|
|
222
|
+
self.pgconnect(0, 0, False)
|
|
223
|
+
else:
|
|
224
|
+
try:
|
|
225
|
+
self.pgdb.isolation_level
|
|
226
|
+
except PgSQL.OperationalError as e:
|
|
227
|
+
self.pgconnect(0, 0, False)
|
|
228
|
+
if self.pgdb.closed:
|
|
229
|
+
self.pgconnect(0, 0, False)
|
|
230
|
+
elif self.pgdb.autocommit:
|
|
231
|
+
self.pgdb.autocommit = False
|
|
232
|
+
self.curtran = 1
|
|
233
|
+
|
|
234
|
+
# end a transaction with changes committed and exit if fails
|
|
235
|
+
def endtran(self, autocommit = True):
|
|
236
|
+
if self.curtran and self.pgdb:
|
|
237
|
+
if not self.pgdb.closed: self.pgdb.commit()
|
|
238
|
+
self.pgdb.autocommit = autocommit
|
|
239
|
+
self.curtran = 0 if autocommit else 1
|
|
240
|
+
|
|
241
|
+
# end a transaction without changes committed and exit inside if fails
|
|
242
|
+
def aborttran(self, autocommit = True):
|
|
243
|
+
if self.curtran and self.pgdb:
|
|
244
|
+
if not self.pgdb.closed: self.pgdb.rollback()
|
|
245
|
+
self.pgdb.autocommit = autocommit
|
|
246
|
+
self.curtran = 0 if autocommit else 1
|
|
247
|
+
|
|
248
|
+
# record error message to dscheck record and clean the lock
|
|
249
|
+
def record_dscheck_error(self, errmsg, logact = None):
|
|
250
|
+
if logact is None: logact = self.PGDBI['EXITLG']
|
|
251
|
+
cnd = self.PGLOG['DSCHECK']['chkcnd']
|
|
252
|
+
if self.PGLOG['NOQUIT']: self.PGLOG['NOQUIT'] = 0
|
|
253
|
+
dflags = self.PGLOG['DSCHECK']['dflags']
|
|
254
|
+
pgrec = self.pgget("dscheck", "mcount, tcount, lockhost, pid", cnd, logact)
|
|
255
|
+
if not pgrec: return 0
|
|
256
|
+
if not pgrec['pid'] and not pgrec['lockhost']: return 0
|
|
257
|
+
(chost, cpid) = self.current_process_info()
|
|
258
|
+
if pgrec['pid'] != cpid or pgrec['lockhost'] != chost: return 0
|
|
259
|
+
# update dscheck record only if it is still locked by the current process
|
|
260
|
+
record = {}
|
|
261
|
+
record['chktime'] = int(time.time())
|
|
262
|
+
if logact&self.EXITLG:
|
|
263
|
+
record['status'] = "E"
|
|
264
|
+
record['pid'] = 0 # release lock
|
|
265
|
+
if dflags:
|
|
266
|
+
record['dflags'] = dflags
|
|
267
|
+
record['mcount'] = pgrec['mcount'] + 1
|
|
268
|
+
else:
|
|
269
|
+
record['dflags'] = ''
|
|
270
|
+
if errmsg:
|
|
271
|
+
errmsg = self.break_long_string(errmsg, 512, None, 50, None, 50, 25)
|
|
272
|
+
if pgrec['tcount'] > 1: errmsg = "Try {}: {}".format(pgrec['tcount'], errmsg)
|
|
273
|
+
record['errmsg'] = errmsg
|
|
274
|
+
return self.pgupdt("dscheck", record, cnd, logact)
|
|
275
|
+
|
|
276
|
+
# local function to log query error
|
|
277
|
+
def qelog(self, dberror, sleep, sqlstr, vals, pgcnt, logact = None):
|
|
278
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
279
|
+
retry = " Sleep {}(sec) & ".format(sleep) if sleep else " "
|
|
280
|
+
if sqlstr:
|
|
281
|
+
if sqlstr.find("Retry ") == 0:
|
|
282
|
+
retry += "the {} ".format(self.int2order(pgcnt+1))
|
|
283
|
+
elif sleep:
|
|
284
|
+
retry += "the {} Retry: \n".format(self.int2order(pgcnt+1))
|
|
285
|
+
elif pgcnt:
|
|
286
|
+
retry = " Error the {} Retry: \n".format(self.int2order(pgcnt))
|
|
287
|
+
else:
|
|
288
|
+
retry = "\n"
|
|
289
|
+
sqlstr = retry + sqlstr
|
|
290
|
+
else:
|
|
291
|
+
sqlstr = ''
|
|
292
|
+
if vals: sqlstr += " with values: " + str(vals)
|
|
293
|
+
if dberror: sqlstr = "{}\n{}".format(dberror, sqlstr)
|
|
294
|
+
if logact&self.EXITLG and self.PGLOG['DSCHECK']: self.record_dscheck_error(sqlstr, logact)
|
|
295
|
+
self.pglog(sqlstr, logact)
|
|
296
|
+
if sleep: time.sleep(sleep)
|
|
297
|
+
return self.FAILURE # if not exit in self.pglog()
|
|
298
|
+
|
|
299
|
+
# try to add a new table according the table not exist error
|
|
300
|
+
def try_add_table(self, dberror, logact):
|
|
301
|
+
ms = re.match(r'^42P01 ERROR: relation "(.+)" does not exist', dberror)
|
|
302
|
+
if ms:
|
|
303
|
+
tname = ms.group(1)
|
|
304
|
+
self.add_new_table(tname, logact = logact)
|
|
305
|
+
|
|
306
|
+
# add a table for given table name
|
|
307
|
+
def add_a_table(self, tname, logact):
|
|
308
|
+
self.add_new_table(tname, logact = logact)
|
|
309
|
+
|
|
310
|
+
# add a new table for given table name
|
|
311
|
+
def add_new_table(self, tname, pre = None, suf = None, logact = 0):
|
|
312
|
+
if pre:
|
|
313
|
+
tbname = '{}_{}'.format(pre, tname)
|
|
314
|
+
elif suf:
|
|
315
|
+
tbname = '{}_{}'.format(tname, suf)
|
|
316
|
+
else:
|
|
317
|
+
tbname = tname
|
|
318
|
+
if tbname in self.ADDTBLS: return
|
|
319
|
+
self.pgsystem(self.get_pgddl_command(tname, pre, suf), logact)
|
|
320
|
+
self.ADDTBLS.append(tbname)
|
|
321
|
+
|
|
322
|
+
# validate a table for given table name (tname), prefix (pre) and suffix (suf),
|
|
323
|
+
# and add it if not existing
|
|
324
|
+
def valid_table(self, tname, pre = None, suf = None, logact = 0):
|
|
325
|
+
if pre:
|
|
326
|
+
tbname = '{}_{}'.format(pre, tname)
|
|
327
|
+
elif suf:
|
|
328
|
+
tbname = '{}_{}'.format(tname, suf)
|
|
329
|
+
else:
|
|
330
|
+
tbname = tname
|
|
331
|
+
if tbname in self.ADDTBLS: return tbname
|
|
332
|
+
if not self.pgcheck(tbname, logact): self.pgsystem(self.get_pgddl_command(tname, pre, suf), logact)
|
|
333
|
+
self.ADDTBLS.append(tbname)
|
|
334
|
+
return tbname
|
|
335
|
+
|
|
336
|
+
# local function to log query error
|
|
337
|
+
def check_dberror(self, pgerr, pgcnt, sqlstr, ary, logact = None):
|
|
338
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
339
|
+
ret = self.FAILURE
|
|
340
|
+
pgcode = pgerr.pgcode
|
|
341
|
+
pgerror = pgerr.pgerror
|
|
342
|
+
dberror = "{} {}".format(pgcode, pgerror) if pgcode and pgerror else str(pgerr)
|
|
343
|
+
if pgcnt < self.PGLOG['DBRETRY']:
|
|
344
|
+
if not pgcode:
|
|
345
|
+
if self.PGDBI['DBNAME'] == self.PGDBI['DEFDB'] and self.PGDBI['DBSHOST'] != self.PGDBI['DEFSHOST']:
|
|
346
|
+
self.default_dbinfo()
|
|
347
|
+
self.qelog(dberror, 0, "Retry Connecting to {} on {}".format(self.PGDBI['DBNAME'], self.PGDBI['DBHOST']), ary, pgcnt, self.MSGLOG)
|
|
348
|
+
else:
|
|
349
|
+
self.qelog(dberror, 5+5*pgcnt, "Retry Connecting", ary, pgcnt, self.LOGWRN)
|
|
350
|
+
return self.SUCCESS
|
|
351
|
+
elif re.match(r'^(08|57)', pgcode):
|
|
352
|
+
self.qelog(dberror, 0, "Retry Connecting", ary, pgcnt, self.LOGWRN)
|
|
353
|
+
self.pgconnect(1, pgcnt + 1)
|
|
354
|
+
return (self.FAILURE if not self.pgdb else self.SUCCESS)
|
|
355
|
+
elif re.match(r'^55', pgcode): # try to lock again
|
|
356
|
+
self.qelog(dberror, 10, "Retry Locking", ary, pgcnt, self.LOGWRN)
|
|
357
|
+
return self.SUCCESS
|
|
358
|
+
elif pgcode == '25P02': # try to add table
|
|
359
|
+
self.qelog(dberror, 0, "Rollback transaction", ary, pgcnt, self.LOGWRN)
|
|
360
|
+
self.pgdb.rollback()
|
|
361
|
+
return self.SUCCESS
|
|
362
|
+
elif pgcode == '42P01' and logact&self.ADDTBL: # try to add table
|
|
363
|
+
self.qelog(dberror, 0, "Retry after adding a table", ary, pgcnt, self.LOGWRN)
|
|
364
|
+
self.try_add_table(dberror, logact)
|
|
365
|
+
return self.SUCCESS
|
|
366
|
+
if logact&self.DOLOCK and pgcode and re.match(r'^55\w\w\w$', pgcode):
|
|
367
|
+
logact &= ~self.EXITLG # no exit for lock error
|
|
368
|
+
return self.qelog(dberror, 0, sqlstr, ary, pgcnt, logact)
|
|
369
|
+
|
|
370
|
+
# return hash reference to postgresql batch mode command and output file name
|
|
371
|
+
def pgbatch(self, sqlfile, foreground = 0):
|
|
372
|
+
dbhost = 'localhost' if self.PGDBI['DBSHOST'] == self.PGLOG['HOSTNAME'] else self.PGDBI['DBHOST']
|
|
373
|
+
options = "-h {} -p {}".format(dbhost, self.PGDBI['DBPORT'])
|
|
374
|
+
pwname = self.get_pgpass_password()
|
|
375
|
+
os.environ['PGPASSWORD'] = pwname
|
|
376
|
+
options += " -U {} {}".format(self.PGDBI['LNNAME'], self.PGDBI['DBNAME'])
|
|
377
|
+
if not sqlfile: return options
|
|
378
|
+
if foreground:
|
|
379
|
+
batch = "psql {} < {} |".format(options, sqlfile)
|
|
380
|
+
else:
|
|
381
|
+
batch['out'] = sqlfile
|
|
382
|
+
if re.search(r'\.sql$', batch['out']):
|
|
383
|
+
batch['out'] = re.sub(r'\.sql$', '.out', batch['out'])
|
|
384
|
+
else:
|
|
385
|
+
batch['out'] += ".out"
|
|
386
|
+
batch['cmd'] = "psql {} < {} > {} 2>&1".format(options, sqlfile , batch['out'])
|
|
387
|
+
return batch
|
|
388
|
+
|
|
389
|
+
# start a connection to dssdb database and return a DBI object; None if error
|
|
390
|
+
# force connect if connect > 0
|
|
391
|
+
def pgconnect(self, reconnect = 0, pgcnt = 0, autocommit = True):
|
|
392
|
+
if self.pgdb:
|
|
393
|
+
if reconnect and not self.pgdb.closed: return self.pgdb # no need reconnect
|
|
394
|
+
elif reconnect:
|
|
395
|
+
reconnect = 0 # initial connection
|
|
396
|
+
while True:
|
|
397
|
+
config = {'database' : self.PGDBI['DBNAME'],
|
|
398
|
+
'user' : self.PGDBI['LNNAME']}
|
|
399
|
+
if self.PGDBI['DBSHOST'] == self.PGLOG['HOSTNAME']:
|
|
400
|
+
config['host'] = 'localhost'
|
|
401
|
+
else:
|
|
402
|
+
config['host'] = self.PGDBI['DBHOST'] if self.PGDBI['DBHOST'] else self.PGDBI['DEFHOST']
|
|
403
|
+
if not self.PGDBI['DBPORT']: self.PGDBI['DBPORT'] = self.get_dbport(self.PGDBI['DBNAME'])
|
|
404
|
+
if self.PGDBI['DBPORT']: config['port'] = self.PGDBI['DBPORT']
|
|
405
|
+
config['password'] = '***'
|
|
406
|
+
sqlstr = "psycopg2.connect(**{})".format(config)
|
|
407
|
+
config['password'] = self.get_pgpass_password()
|
|
408
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, sqlstr)
|
|
409
|
+
try:
|
|
410
|
+
self.PGLOG['PGDBBUF'] = self.pgdb = PgSQL.connect(**config)
|
|
411
|
+
if reconnect: self.pglog("{} Reconnected at {}".format(sqlstr, self.current_datetime()), self.MSGLOG|self.FRCLOG)
|
|
412
|
+
if autocommit: self.pgdb.autocommit = autocommit
|
|
413
|
+
return self.pgdb
|
|
414
|
+
except PgSQL.Error as pgerr:
|
|
415
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, None, self.PGDBI['EXITLG']): return self.FAILURE
|
|
416
|
+
pgcnt += 1
|
|
417
|
+
|
|
418
|
+
# return a PostgreSQL cursor upon success
|
|
419
|
+
def pgcursor(self):
|
|
420
|
+
pgcur = None
|
|
421
|
+
if not self.pgdb:
|
|
422
|
+
self.pgconnect()
|
|
423
|
+
if not self.pgdb: return self.FAILURE
|
|
424
|
+
pgcnt = 0
|
|
425
|
+
while True:
|
|
426
|
+
try:
|
|
427
|
+
pgcur = self.pgdb.cursor()
|
|
428
|
+
spath = "SET search_path = '{}'".format(self.PGDBI['SCNAME'])
|
|
429
|
+
if self.PGDBI['SCPATH'] and self.PGDBI['SCPATH'] != self.PGDBI['SCNAME']:
|
|
430
|
+
spath += ", '{}'".format(self.PGDBI['SCPATH'])
|
|
431
|
+
pgcur.execute(spath)
|
|
432
|
+
except PgSQL.Error as pgerr:
|
|
433
|
+
if pgcnt == 0 and self.pgdb.closed:
|
|
434
|
+
self.pgconnect(1)
|
|
435
|
+
elif not self.check_dberror(pgerr, pgcnt, '', None, self.PGDBI['EXITLG']):
|
|
436
|
+
return self.FAILURE
|
|
437
|
+
else:
|
|
438
|
+
break
|
|
439
|
+
pgcnt += 1
|
|
440
|
+
return pgcur
|
|
441
|
+
|
|
442
|
+
# disconnect to dssdb database
|
|
443
|
+
def pgdisconnect(self, stopit = 1):
|
|
444
|
+
if self.pgdb:
|
|
445
|
+
if stopit: self.pgdb.close()
|
|
446
|
+
self.PGLOG['PGDBBUF'] = self.pgdb = None
|
|
447
|
+
|
|
448
|
+
# gather table field default information as hash array with field names as keys
|
|
449
|
+
# and default values as values
|
|
450
|
+
# the whole table information is cached to a hash array with table names as keys
|
|
451
|
+
def pgtable(self, tablename, logact = None):
|
|
452
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
453
|
+
if tablename in self.TABLES: return self.TABLES[tablename].copy() # cached already
|
|
454
|
+
intms = r'^(smallint||bigint|integer)$'
|
|
455
|
+
fields = "column_name col, data_type typ, is_nullable nil, column_default def"
|
|
456
|
+
condition = self.table_condition(tablename)
|
|
457
|
+
pgcnt = 0
|
|
458
|
+
while True:
|
|
459
|
+
pgrecs = self.pgmget('information_schema.columns', fields, condition, logact)
|
|
460
|
+
cnt = len(pgrecs['col']) if pgrecs else 0
|
|
461
|
+
if cnt: break
|
|
462
|
+
if pgcnt == 0 and logact&self.ADDTBL:
|
|
463
|
+
self.add_new_table(tablename, logact = logact)
|
|
464
|
+
else:
|
|
465
|
+
return self.pglog(tablename + ": Table not exists", logact)
|
|
466
|
+
pgcnt += 1
|
|
467
|
+
pgdefs = {}
|
|
468
|
+
for i in range(cnt):
|
|
469
|
+
name = pgrecs['col'][i]
|
|
470
|
+
isint = re.match(intms, pgrecs['typ'][i])
|
|
471
|
+
dflt = pgrecs['def'][i]
|
|
472
|
+
if dflt != None:
|
|
473
|
+
if re.match(r'^nextval\(', dflt):
|
|
474
|
+
dflt = 0
|
|
475
|
+
else:
|
|
476
|
+
dflt = self.check_default_value(dflt, isint)
|
|
477
|
+
elif pgrecs['nil'][i] == 'YES':
|
|
478
|
+
dflt = None
|
|
479
|
+
elif isint:
|
|
480
|
+
dflt = 0
|
|
481
|
+
else:
|
|
482
|
+
dflt = ''
|
|
483
|
+
pgdefs[name] = dflt
|
|
484
|
+
self.TABLES[tablename] = pgdefs.copy()
|
|
485
|
+
return pgdefs
|
|
486
|
+
|
|
487
|
+
# get sequence field name for given table name
|
|
488
|
+
def pgsequence(self, tablename, logact = None):
|
|
489
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
490
|
+
if tablename in self.SEQUENCES: return self.SEQUENCES[tablename] # cached already
|
|
491
|
+
condition = self.table_condition(tablename) + " AND column_default LIKE 'nextval(%'"
|
|
492
|
+
pgrec = self.pgget('information_schema.columns', 'column_name', condition, logact)
|
|
493
|
+
seqname = pgrec['column_name'] if pgrec else None
|
|
494
|
+
self.SEQUENCES[tablename] = seqname
|
|
495
|
+
return seqname
|
|
496
|
+
|
|
497
|
+
# check default value for integer & string
|
|
498
|
+
@staticmethod
|
|
499
|
+
def check_default_value(dflt, isint):
|
|
500
|
+
if isint:
|
|
501
|
+
ms = re.match(r"^'{0,1}(\d+)", dflt)
|
|
502
|
+
if ms: dflt = int(ms.group(1))
|
|
503
|
+
elif dflt[0] == "'":
|
|
504
|
+
ms = re.match(r"^(.+)::", dflt)
|
|
505
|
+
if ms: dflt = ms.group(1)
|
|
506
|
+
elif dflt != 'NULL':
|
|
507
|
+
dflt = "'{}'".format(dflt)
|
|
508
|
+
return dflt
|
|
509
|
+
|
|
510
|
+
# local fucntion: insert prepare pgadd()/pgmadd() for given table and field names
|
|
511
|
+
# according to options of multiple place holds and returning sequence id
|
|
512
|
+
def prepare_insert(self, tablename, fields, multi = True, getid = None):
|
|
513
|
+
strfld = self.pgnames(fields, '.', ',')
|
|
514
|
+
if multi:
|
|
515
|
+
strplc = "(" + ','.join(['%s']*len(fields)) + ")"
|
|
516
|
+
else:
|
|
517
|
+
strplc = '%s'
|
|
518
|
+
sqlstr = "INSERT INTO {} ({}) VALUES {}".format(tablename, strfld, strplc)
|
|
519
|
+
if getid: sqlstr += " RETURNING " + getid
|
|
520
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, sqlstr)
|
|
521
|
+
return sqlstr
|
|
522
|
+
|
|
523
|
+
# local fucntion: prepare default value for single record
|
|
524
|
+
def prepare_default(self, tablename, record, logact = 0):
|
|
525
|
+
table = self.pgtable(tablename, logact)
|
|
526
|
+
for fld in record:
|
|
527
|
+
val = record[fld]
|
|
528
|
+
if val is None:
|
|
529
|
+
vlen = 0
|
|
530
|
+
elif isinstance(val, str):
|
|
531
|
+
vlen = len(val)
|
|
532
|
+
else:
|
|
533
|
+
vlen = 1
|
|
534
|
+
if vlen == 0: record[fld] = table[fld]
|
|
535
|
+
|
|
536
|
+
# local fucntion: prepare default value for multiple records
|
|
537
|
+
def prepare_defaults(self, tablename, records, logact = 0):
|
|
538
|
+
table = self.pgtable(tablename, logact)
|
|
539
|
+
for fld in records:
|
|
540
|
+
vals = records[fld]
|
|
541
|
+
vcnt = len(vals)
|
|
542
|
+
for i in range(vcnt):
|
|
543
|
+
if vals[i] is None:
|
|
544
|
+
vlen = 0
|
|
545
|
+
elif isinstance(vals[i], str):
|
|
546
|
+
vlen = len(vals[i])
|
|
547
|
+
else:
|
|
548
|
+
vlen = 1
|
|
549
|
+
if vlen == 0: records[fld][i] = table[fld]
|
|
550
|
+
|
|
551
|
+
# insert one record into tablename
|
|
552
|
+
# tablename: add record for one table name each call
|
|
553
|
+
# record: hash reference with keys as field names and hash values as field values
|
|
554
|
+
# return self.SUCCESS or self.FAILURE
|
|
555
|
+
def pgadd(self, tablename, record, logact = None, getid = None):
|
|
556
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
557
|
+
if not record: return self.pglog("Nothing adds to " + tablename, logact)
|
|
558
|
+
if logact&self.DODFLT: self.prepare_default(tablename, record, logact)
|
|
559
|
+
if logact&self.AUTOID and not getid: getid = self.pgsequence(tablename, logact)
|
|
560
|
+
sqlstr = self.prepare_insert(tablename, list(record), True, getid)
|
|
561
|
+
values = tuple(record.values())
|
|
562
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "Insert: " + str(values))
|
|
563
|
+
ret = acnt = pgcnt = 0
|
|
564
|
+
while True:
|
|
565
|
+
pgcur = self.pgcursor()
|
|
566
|
+
if not pgcur: return self.FAILURE
|
|
567
|
+
try:
|
|
568
|
+
pgcur.execute(sqlstr, values)
|
|
569
|
+
acnt = 1
|
|
570
|
+
if getid:
|
|
571
|
+
ret = pgcur.fetchone()[0]
|
|
572
|
+
else:
|
|
573
|
+
ret = self.SUCCESS
|
|
574
|
+
pgcur.close()
|
|
575
|
+
except PgSQL.Error as pgerr:
|
|
576
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values, logact): return self.FAILURE
|
|
577
|
+
else:
|
|
578
|
+
break
|
|
579
|
+
pgcnt += 1
|
|
580
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pgadd: 1 record added to " + tablename + ", return " + str(ret))
|
|
581
|
+
if(logact&self.ENDLCK):
|
|
582
|
+
self.endtran()
|
|
583
|
+
elif self.curtran:
|
|
584
|
+
self.curtran += acnt
|
|
585
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
586
|
+
return ret
|
|
587
|
+
|
|
588
|
+
# insert multiple records into tablename
|
|
589
|
+
# tablename: add records for one table name each call
|
|
590
|
+
# records: dict with field names as keys and each value is a list of field values
|
|
591
|
+
# return self.SUCCESS or self.FAILURE
|
|
592
|
+
def pgmadd(self, tablename, records, logact = None, getid = None):
|
|
593
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
594
|
+
if not records: return self.pglog("Nothing to insert to table " + tablename, logact)
|
|
595
|
+
if logact&self.DODFLT: self.prepare_defaults(tablename, records, logact)
|
|
596
|
+
if logact&self.AUTOID and not getid: getid = self.pgsequence(tablename, logact)
|
|
597
|
+
multi = True if getid else False
|
|
598
|
+
sqlstr = self.prepare_insert(tablename, list(records), multi, getid)
|
|
599
|
+
v = records.values()
|
|
600
|
+
values = list(zip(*v))
|
|
601
|
+
cntrow = len(values)
|
|
602
|
+
ids = [] if getid else None
|
|
603
|
+
if self.PGLOG['DBGLEVEL']:
|
|
604
|
+
for row in values: self.pgdbg(1000, "Insert: " + str(row))
|
|
605
|
+
count = pgcnt = 0
|
|
606
|
+
while True:
|
|
607
|
+
pgcur = self.pgcursor()
|
|
608
|
+
if not pgcur: return self.FAILURE
|
|
609
|
+
if getid:
|
|
610
|
+
while count < cntrow:
|
|
611
|
+
record = values[count]
|
|
612
|
+
try:
|
|
613
|
+
pgcur.execute(sqlstr, record)
|
|
614
|
+
ids.append(pgcur.fetchone()[0])
|
|
615
|
+
count += 1
|
|
616
|
+
except PgSQL.Error as pgerr:
|
|
617
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, record, logact): return self.FAILURE
|
|
618
|
+
break
|
|
619
|
+
else:
|
|
620
|
+
try:
|
|
621
|
+
execute_values(pgcur, sqlstr, values, page_size=self.PGDBI['PGSIZE'])
|
|
622
|
+
count = cntrow
|
|
623
|
+
except PgSQL.Error as pgerr:
|
|
624
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values[0], logact): return self.FAILURE
|
|
625
|
+
if count >= cntrow: break
|
|
626
|
+
pgcnt += 1
|
|
627
|
+
pgcur.close()
|
|
628
|
+
if(self.PGLOG['DBGLEVEL']): self.pgdbg(1000, "pgmadd: {} of {} record(s) added to {}".format(count, cntrow, tablename))
|
|
629
|
+
if(logact&self.ENDLCK):
|
|
630
|
+
self.endtran()
|
|
631
|
+
elif self.curtran:
|
|
632
|
+
self.curtran += count
|
|
633
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
634
|
+
return (ids if ids else count)
|
|
635
|
+
|
|
636
|
+
# local function: select prepare for pgget() and pgmget()
|
|
637
|
+
def prepare_select(self, tablenames, fields = None, condition = None, cndflds = None, logact = 0):
|
|
638
|
+
sqlstr = ''
|
|
639
|
+
if tablenames:
|
|
640
|
+
if fields:
|
|
641
|
+
sqlstr = "SELECT " + fields
|
|
642
|
+
else:
|
|
643
|
+
sqlstr = "SELECT count(*) cntrec"
|
|
644
|
+
|
|
645
|
+
sqlstr += " FROM " + tablenames
|
|
646
|
+
if condition:
|
|
647
|
+
if re.match(r'^\s*(ORDER|GROUP|HAVING|OFFSET|LIMIT)\s', condition, re.I):
|
|
648
|
+
sqlstr += " " + condition # no where clause, append directly
|
|
649
|
+
else:
|
|
650
|
+
sqlstr += " WHERE " + condition
|
|
651
|
+
elif cndflds:
|
|
652
|
+
sep = 'WHERE'
|
|
653
|
+
for fld in cndflds:
|
|
654
|
+
sqlstr += " {} {}=%s".format(sep, fld)
|
|
655
|
+
sep = 'AND'
|
|
656
|
+
if logact&self.DOLOCK:
|
|
657
|
+
self.starttran()
|
|
658
|
+
sqlstr += " FOR UPDATE"
|
|
659
|
+
elif fields:
|
|
660
|
+
sqlstr = "SELECT " + fields
|
|
661
|
+
elif condition:
|
|
662
|
+
sqlstr = condition
|
|
663
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, sqlstr)
|
|
664
|
+
return sqlstr
|
|
665
|
+
|
|
666
|
+
# tablenames: comma deliminated string of one or more tables and more than one table for joining,
|
|
667
|
+
# fields: comma deliminated string of one or more field names,
|
|
668
|
+
# condition: querry conditions for where clause
|
|
669
|
+
# return a dict reference with keys as field names upon success
|
|
670
|
+
def pgget(self, tablenames, fields, condition = None, logact = 0):
|
|
671
|
+
if not logact: logact = self.PGDBI['ERRLOG']
|
|
672
|
+
if fields and condition and not re.search(r'limit 1$', condition, re.I): condition += " LIMIT 1"
|
|
673
|
+
sqlstr = self.prepare_select(tablenames, fields, condition, None, logact)
|
|
674
|
+
if fields and not re.search(r'(^|\s)limit 1($|\s)', sqlstr, re.I): sqlstr += " LIMIT 1"
|
|
675
|
+
ucname = True if logact&self.UCNAME else False
|
|
676
|
+
pgcnt = 0
|
|
677
|
+
record = {}
|
|
678
|
+
while True:
|
|
679
|
+
pgcur = self.pgcursor()
|
|
680
|
+
if not pgcur: return self.FAILURE
|
|
681
|
+
try:
|
|
682
|
+
pgcur.execute(sqlstr)
|
|
683
|
+
vals = pgcur.fetchone()
|
|
684
|
+
if vals:
|
|
685
|
+
colcnt = len(pgcur.description)
|
|
686
|
+
for i in range(colcnt):
|
|
687
|
+
col = pgcur.description[i]
|
|
688
|
+
colname = col[0].upper() if ucname else col[0]
|
|
689
|
+
val = vals[i]
|
|
690
|
+
if col[1] == self.CHCODE and val and val[-1] == ' ': val = val.rstrip()
|
|
691
|
+
record[colname] = val
|
|
692
|
+
pgcur.close()
|
|
693
|
+
except PgSQL.Error as pgerr:
|
|
694
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, None, logact): return self.FAILURE
|
|
695
|
+
else:
|
|
696
|
+
break
|
|
697
|
+
pgcnt += 1
|
|
698
|
+
if record and tablenames and not fields:
|
|
699
|
+
if self.PGLOG['DBGLEVEL']:
|
|
700
|
+
self.pgdbg(1000, "pgget: {} record(s) found from {}".format(record['cntrec'], tablenames))
|
|
701
|
+
return record['cntrec']
|
|
702
|
+
elif self.PGLOG['DBGLEVEL']:
|
|
703
|
+
cnt = 1 if record else 0
|
|
704
|
+
self.pgdbg(1000, "pgget: {} record retrieved from {}".format(cnt, tablenames))
|
|
705
|
+
return record
|
|
706
|
+
|
|
707
|
+
# tablenames: comma deliminated string of one or more tables and more than one table for joining,
|
|
708
|
+
# fields: comma deliminated string of one or more field names,
|
|
709
|
+
# condition: querry conditions for where clause
|
|
710
|
+
# return a dict reference with keys as field names upon success, values for each field name
|
|
711
|
+
# are in a list. All lists are the same length with missing values set to None
|
|
712
|
+
def pgmget(self, tablenames, fields, condition = None, logact = None):
|
|
713
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
714
|
+
sqlstr = self.prepare_select(tablenames, fields, condition, None, logact)
|
|
715
|
+
ucname = True if logact&self.UCNAME else False
|
|
716
|
+
count = pgcnt = 0
|
|
717
|
+
records = {}
|
|
718
|
+
while True:
|
|
719
|
+
pgcur = self.pgcursor()
|
|
720
|
+
if not pgcur: return self.FAILURE
|
|
721
|
+
try:
|
|
722
|
+
pgcur.execute(sqlstr)
|
|
723
|
+
rowvals = pgcur.fetchall()
|
|
724
|
+
if rowvals:
|
|
725
|
+
colcnt = len(pgcur.description)
|
|
726
|
+
count = len(rowvals)
|
|
727
|
+
colvals = list(zip(*rowvals))
|
|
728
|
+
for i in range(colcnt):
|
|
729
|
+
col = pgcur.description[i]
|
|
730
|
+
colname = col[0].upper() if ucname else col[0]
|
|
731
|
+
vals = list(colvals[i])
|
|
732
|
+
if col[1] == self.CHCODE:
|
|
733
|
+
for j in range(count):
|
|
734
|
+
if vals[j] and vals[j][-1] == ' ': vals[j] = vals[j].rstrip()
|
|
735
|
+
records[colname] = vals
|
|
736
|
+
pgcur.close()
|
|
737
|
+
except PgSQL.Error as pgerr:
|
|
738
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, None, logact): return self.FAILURE
|
|
739
|
+
else:
|
|
740
|
+
break
|
|
741
|
+
pgcnt += 1
|
|
742
|
+
if self.PGLOG['DBGLEVEL']:
|
|
743
|
+
self.pgdbg(1000, "pgmget: {} record(s) retrieved from {}".format(count, tablenames))
|
|
744
|
+
return records
|
|
745
|
+
|
|
746
|
+
# tablenames: comma deliminated string of one or more tables
|
|
747
|
+
# fields: comma deliminated string of one or more field names,
|
|
748
|
+
# cnddict: condition dict with field names : values
|
|
749
|
+
# return a dict(field names : values) upon success
|
|
750
|
+
# retrieve one records from tablenames condition dict
|
|
751
|
+
def pghget(self, tablenames, fields, cnddict, logact = None):
|
|
752
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
753
|
+
if not tablenames: return self.pglog("Miss Table name to query", logact)
|
|
754
|
+
if not fields: return self.pglog("Nothing to query " + tablenames, logact)
|
|
755
|
+
if not cnddict: return self.pglog("Miss condition dict values to query " + tablenames, logact)
|
|
756
|
+
sqlstr = self.prepare_select(tablenames, fields, None, list(cnddict), logact)
|
|
757
|
+
if fields and not re.search(r'limit 1$', sqlstr, re.I): sqlstr += " LIMIT 1"
|
|
758
|
+
ucname = True if logact&self.UCNAME else False
|
|
759
|
+
values = tuple(cnddict.values())
|
|
760
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "Query from {} for {}".format(tablenames, values))
|
|
761
|
+
pgcnt = 0
|
|
762
|
+
record = {}
|
|
763
|
+
while True:
|
|
764
|
+
pgcur = self.pgcursor()
|
|
765
|
+
if not pgcur: return self.FAILURE
|
|
766
|
+
try:
|
|
767
|
+
pgcur.execute(sqlstr, values)
|
|
768
|
+
vals = pgcur.fetchone()
|
|
769
|
+
if vals:
|
|
770
|
+
colcnt = len(pgcur.description)
|
|
771
|
+
for i in range(colcnt):
|
|
772
|
+
col = pgcur.description[i]
|
|
773
|
+
colname = col[0].upper() if ucname else col[0]
|
|
774
|
+
val = vals[i]
|
|
775
|
+
if col[1] == self.CHCODE and val and val[-1] == ' ': val = val.rstrip()
|
|
776
|
+
record[colname] = val
|
|
777
|
+
pgcur.close()
|
|
778
|
+
except PgSQL.Error as pgerr:
|
|
779
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values, logact): return self.FAILURE
|
|
780
|
+
else:
|
|
781
|
+
break
|
|
782
|
+
pgcnt += 1
|
|
783
|
+
if record and tablenames and not fields:
|
|
784
|
+
if self.PGLOG['DBGLEVEL']:
|
|
785
|
+
self.pgdbg(1000, "pghget: {} record(s) found from {}".format(record['cntrec'], tablenames))
|
|
786
|
+
return record['cntrec']
|
|
787
|
+
elif self.PGLOG['DBGLEVEL']:
|
|
788
|
+
cnt = 1 if record else 0
|
|
789
|
+
self.pgdbg(1000, "pghget: {} record retrieved from {}".format(cnt, tablenames))
|
|
790
|
+
return record
|
|
791
|
+
|
|
792
|
+
# tablenames: comma deliminated string of one or more tables
|
|
793
|
+
# fields: comma deliminated string of one or more field names,
|
|
794
|
+
# cnddicts: condition dict with field names : value lists
|
|
795
|
+
# return a dict(field names : value lists) upon success
|
|
796
|
+
# retrieve multiple records from tablenames for condition dict
|
|
797
|
+
def pgmhget(self, tablenames, fields, cnddicts, logact = None):
|
|
798
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
799
|
+
if not tablenames: return self.pglog("Miss Table name to query", logact)
|
|
800
|
+
if not fields: return self.pglog("Nothing to query " + tablenames, logact)
|
|
801
|
+
if not cnddicts: return self.pglog("Miss condition dict values to query " + tablenames, logact)
|
|
802
|
+
sqlstr = self.prepare_select(tablenames, fields, None, list(cnddicts), logact)
|
|
803
|
+
ucname = True if logact&self.UCNAME else False
|
|
804
|
+
v = cnddicts.values()
|
|
805
|
+
values = list(zip(*v))
|
|
806
|
+
cndcnt = len(values)
|
|
807
|
+
if self.PGLOG['DBGLEVEL']:
|
|
808
|
+
for row in values:
|
|
809
|
+
self.pgdbg(1000, "Query from {} for {}".format(tablenames, row))
|
|
810
|
+
colcnt = ccnt = count = pgcnt = 0
|
|
811
|
+
cols = []
|
|
812
|
+
chrs = []
|
|
813
|
+
records = {}
|
|
814
|
+
while True:
|
|
815
|
+
pgcur = self.pgcursor()
|
|
816
|
+
if not pgcur: return self.FAILURE
|
|
817
|
+
while ccnt < cndcnt:
|
|
818
|
+
cndvals = values[ccnt]
|
|
819
|
+
try:
|
|
820
|
+
pgcur.execute(sqlstr, cndvals)
|
|
821
|
+
ccnt += 1
|
|
822
|
+
rowvals = pgcur.fetchall()
|
|
823
|
+
if rowvals:
|
|
824
|
+
if colcnt == 0:
|
|
825
|
+
for col in pgcur.description:
|
|
826
|
+
colname = col[0].upper() if ucname else col[0]
|
|
827
|
+
if col[1] == self.CHCODE: chrs.append(colname)
|
|
828
|
+
cols.append(colname)
|
|
829
|
+
records[colname] = []
|
|
830
|
+
colcnt = len(cols)
|
|
831
|
+
rcnt = len(rowvals)
|
|
832
|
+
count += rcnt
|
|
833
|
+
colvals = list(zip(*rowvals))
|
|
834
|
+
for i in range(colcnt):
|
|
835
|
+
vals = list(colvals[i])
|
|
836
|
+
colname = cols[i]
|
|
837
|
+
if chrs and colname in chrs:
|
|
838
|
+
for j in range(rcnt):
|
|
839
|
+
if vals[j] and vals[j][-1] == ' ': vals[j] = vals[j].rstrip()
|
|
840
|
+
records[colname].extend(vals)
|
|
841
|
+
except PgSQL.Error as pgerr:
|
|
842
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, cndvals, logact): return self.FAILURE
|
|
843
|
+
break
|
|
844
|
+
if ccnt >= cndcnt: break
|
|
845
|
+
pgcnt += 1
|
|
846
|
+
pgcur.close()
|
|
847
|
+
if self.PGLOG['DBGLEVEL']:
|
|
848
|
+
self.pgdbg(1000, "pgmhget: {} record(s) retrieved from {}".format(count, tablenames))
|
|
849
|
+
return records
|
|
850
|
+
|
|
851
|
+
# local fucntion: update prepare for pgupdt, pghupdt and pgmupdt
|
|
852
|
+
def prepare_update(self, tablename, fields, condition = None, cndflds = None):
|
|
853
|
+
strset = []
|
|
854
|
+
# build set string
|
|
855
|
+
for fld in fields:
|
|
856
|
+
strset.append("{}=%s".format(self.pgname(fld, '.')))
|
|
857
|
+
strflds = ",".join(strset)
|
|
858
|
+
# build condition string
|
|
859
|
+
if not condition:
|
|
860
|
+
cndset = []
|
|
861
|
+
for fld in cndflds:
|
|
862
|
+
cndset.append("{}=%s".format(self.pgname(fld, '.')))
|
|
863
|
+
condition = " AND ".join(cndset)
|
|
864
|
+
sqlstr = "UPDATE {} SET {} WHERE {}".format(tablename, strflds, condition)
|
|
865
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, sqlstr)
|
|
866
|
+
return sqlstr
|
|
867
|
+
|
|
868
|
+
# update one or multiple rows in tablename
|
|
869
|
+
# tablename: update for one table name each call
|
|
870
|
+
# record: dict with field names : values
|
|
871
|
+
# condition: update conditions for where clause)
|
|
872
|
+
# return number of rows undated upon success
|
|
873
|
+
def pgupdt(self, tablename, record, condition, logact = None):
|
|
874
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
875
|
+
if not record: self.pglog("Nothing updates to " + tablename, logact)
|
|
876
|
+
if not condition or isinstance(condition, int): self.pglog("Miss condition to update " + tablename, logact)
|
|
877
|
+
sqlstr = self.prepare_update(tablename, list(record), condition)
|
|
878
|
+
if logact&self.DODFLT: self.prepare_default(tablename, record, logact)
|
|
879
|
+
values = tuple(record.values())
|
|
880
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "Update {} for {}".format(tablename, values))
|
|
881
|
+
ucnt = pgcnt = 0
|
|
882
|
+
while True:
|
|
883
|
+
pgcur = self.pgcursor()
|
|
884
|
+
if not pgcur: return self.FAILURE
|
|
885
|
+
try:
|
|
886
|
+
pgcur.execute(sqlstr, values)
|
|
887
|
+
ucnt = pgcur.rowcount
|
|
888
|
+
pgcur.close()
|
|
889
|
+
except PgSQL.Error as pgerr:
|
|
890
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values, logact): return self.FAILURE
|
|
891
|
+
else:
|
|
892
|
+
break
|
|
893
|
+
pgcnt += 1
|
|
894
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pgupdt: {} record(s) updated to {}".format(ucnt, tablename))
|
|
895
|
+
if(logact&self.ENDLCK):
|
|
896
|
+
self.endtran()
|
|
897
|
+
elif self.curtran:
|
|
898
|
+
self.curtran += ucnt
|
|
899
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
900
|
+
return ucnt
|
|
901
|
+
|
|
902
|
+
# update one or multiple records in tablename
|
|
903
|
+
# tablename: update for one table name each call
|
|
904
|
+
# record: update values, dict with field names : values
|
|
905
|
+
# cnddict: condition dict with field names : values
|
|
906
|
+
# return number of records updated upon success
|
|
907
|
+
def pghupdt(self, tablename, record, cnddict, logact = None):
|
|
908
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
909
|
+
if not record: self.pglog("Nothing updates to " + tablename, logact)
|
|
910
|
+
if not cnddict or isinstance(cnddict, int): self.pglog("Miss condition to update to " + tablename, logact)
|
|
911
|
+
if logact&self.DODFLT: self.prepare_defaults(tablename, record, logact)
|
|
912
|
+
sqlstr = self.prepare_update(tablename, list(record), None, list(cnddict))
|
|
913
|
+
values = tuple(record.values()) + tuple(cnddict.values())
|
|
914
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "Update {} for {}".format(tablename, values))
|
|
915
|
+
ucnt = count = pgcnt = 0
|
|
916
|
+
while True:
|
|
917
|
+
pgcur = self.pgcursor()
|
|
918
|
+
if not pgcur: return self.FAILURE
|
|
919
|
+
try:
|
|
920
|
+
pgcur.execute(sqlstr, values)
|
|
921
|
+
count += 1
|
|
922
|
+
ucnt = pgcur.rowcount
|
|
923
|
+
pgcur.close()
|
|
924
|
+
except PgSQL.Error as pgerr:
|
|
925
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values, logact): return self.FAILURE
|
|
926
|
+
else:
|
|
927
|
+
break
|
|
928
|
+
pgcnt += 1
|
|
929
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pghupdt: {}/{} record(s) updated to {}".format(ucnt, tablename))
|
|
930
|
+
if(logact&self.ENDLCK):
|
|
931
|
+
self.endtran()
|
|
932
|
+
elif self.curtran:
|
|
933
|
+
self.curtran += ucnt
|
|
934
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
935
|
+
return ucnt
|
|
936
|
+
|
|
937
|
+
# update multiple records in tablename
|
|
938
|
+
# tablename: update for one table name each call
|
|
939
|
+
# records: update values, dict with field names : value lists
|
|
940
|
+
# cnddicts: condition dict with field names : value lists
|
|
941
|
+
# return number of records updated upon success
|
|
942
|
+
def pgmupdt(self, tablename, records, cnddicts, logact = None):
|
|
943
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
944
|
+
if not records: self.pglog("Nothing updates to " + tablename, logact)
|
|
945
|
+
if not cnddicts or isinstance(cnddicts, int): self.pglog("Miss condition to update to " + tablename, logact)
|
|
946
|
+
if logact&self.DODFLT: self.prepare_defaults(tablename, records, logact)
|
|
947
|
+
sqlstr = self.prepare_update(tablename, list(records), None, list(cnddicts))
|
|
948
|
+
fldvals = tuple(records.values())
|
|
949
|
+
cntrow = len(fldvals[0])
|
|
950
|
+
cndvals = tuple(cnddicts.values())
|
|
951
|
+
cntcnd = len(cndvals[0])
|
|
952
|
+
if cntcnd != cntrow: return self.pglog("Field/Condition value counts Miss match {}/{} to update {}".format(cntrow, cntcnd, tablename), logact)
|
|
953
|
+
v = fldvals + cndvals
|
|
954
|
+
values = list(zip(*v))
|
|
955
|
+
if self.PGLOG['DBGLEVEL']:
|
|
956
|
+
for row in values: self.pgdbg(1000, "Update {} for {}".format(tablename, row))
|
|
957
|
+
ucnt = pgcnt = 0
|
|
958
|
+
while True:
|
|
959
|
+
pgcur = self.pgcursor()
|
|
960
|
+
if not pgcur: return self.FAILURE
|
|
961
|
+
try:
|
|
962
|
+
execute_batch(pgcur, sqlstr, values, page_size=self.PGDBI['PGSIZE'])
|
|
963
|
+
ucnt = cntrow
|
|
964
|
+
except PgSQL.Error as pgerr:
|
|
965
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values[0], logact): return self.FAILURE
|
|
966
|
+
else:
|
|
967
|
+
break
|
|
968
|
+
pgcnt += 1
|
|
969
|
+
pgcur.close()
|
|
970
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pgmupdt: {} record(s) updated to {}".format(ucnt, tablename))
|
|
971
|
+
if(logact&self.ENDLCK):
|
|
972
|
+
self.endtran()
|
|
973
|
+
elif self.curtran:
|
|
974
|
+
self.curtran += ucnt
|
|
975
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
976
|
+
return ucnt
|
|
977
|
+
|
|
978
|
+
# local fucntion: delete prepare for pgdel, pghdel and del
|
|
979
|
+
def prepare_delete(self, tablename, condition = None, cndflds = None):
|
|
980
|
+
# build condition string
|
|
981
|
+
if not condition:
|
|
982
|
+
cndset = []
|
|
983
|
+
for fld in cndflds:
|
|
984
|
+
cndset.append("{}=%s".format(fld))
|
|
985
|
+
condition = " AND ".join(cndset)
|
|
986
|
+
sqlstr = "DELETE FROM {} WHERE {}".format(tablename, condition)
|
|
987
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, sqlstr)
|
|
988
|
+
return sqlstr
|
|
989
|
+
|
|
990
|
+
# delete one or mutiple records in tablename according condition
|
|
991
|
+
# tablename: delete for one table name each call
|
|
992
|
+
# condition: delete conditions for where clause
|
|
993
|
+
# return number of records deleted upon success
|
|
994
|
+
def pgdel(self, tablename, condition, logact = None):
|
|
995
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
996
|
+
if not condition or isinstance(condition, int): self.pglog("Miss condition to delete from " + tablename, logact)
|
|
997
|
+
sqlstr = self.prepare_delete(tablename, condition)
|
|
998
|
+
dcnt = pgcnt = 0
|
|
999
|
+
while True:
|
|
1000
|
+
pgcur = self.pgcursor()
|
|
1001
|
+
if not pgcur: return self.FAILURE
|
|
1002
|
+
try:
|
|
1003
|
+
pgcur.execute(sqlstr)
|
|
1004
|
+
dcnt = pgcur.rowcount
|
|
1005
|
+
pgcur.close()
|
|
1006
|
+
except PgSQL.Error as pgerr:
|
|
1007
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, None, logact): return self.FAILURE
|
|
1008
|
+
else:
|
|
1009
|
+
break
|
|
1010
|
+
pgcnt += 1
|
|
1011
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pgdel: {} record(s) deleted from {}".format(dcnt, tablename))
|
|
1012
|
+
if logact&self.ENDLCK:
|
|
1013
|
+
self.endtran()
|
|
1014
|
+
elif self.curtran:
|
|
1015
|
+
self.curtran += dcnt
|
|
1016
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
1017
|
+
return dcnt
|
|
1018
|
+
|
|
1019
|
+
# delete one or mutiple records in tablename according condition
|
|
1020
|
+
# tablename: delete for one table name each call
|
|
1021
|
+
# cndict: delete condition dict for names : values
|
|
1022
|
+
# return number of records deleted upon success
|
|
1023
|
+
def pghdel(self, tablename, cnddict, logact = None):
|
|
1024
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
1025
|
+
if not cnddict or isinstance(cnddict, int): self.pglog("Miss condition dict to delete from " + tablename, logact)
|
|
1026
|
+
sqlstr = self.prepare_delete(tablename, None, list(cnddict))
|
|
1027
|
+
values = tuple(cnddict.values())
|
|
1028
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "Delete from {} for {}".format(tablename, values))
|
|
1029
|
+
dcnt = pgcnt = 0
|
|
1030
|
+
while True:
|
|
1031
|
+
pgcur = self.pgcursor()
|
|
1032
|
+
if not pgcur: return self.FAILURE
|
|
1033
|
+
try:
|
|
1034
|
+
pgcur.execute(sqlstr, values)
|
|
1035
|
+
dcnt = pgcur.rowcount
|
|
1036
|
+
pgcur.close()
|
|
1037
|
+
except PgSQL.Error as pgerr:
|
|
1038
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values, logact): return self.FAILURE
|
|
1039
|
+
else:
|
|
1040
|
+
break
|
|
1041
|
+
pgcnt += 1
|
|
1042
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pghdel: {} record(s) deleted from {}".format(dcnt, tablename))
|
|
1043
|
+
if logact&self.ENDLCK:
|
|
1044
|
+
self.endtran()
|
|
1045
|
+
elif self.curtran:
|
|
1046
|
+
self.curtran += dcnt
|
|
1047
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
1048
|
+
return dcnt
|
|
1049
|
+
|
|
1050
|
+
# delete mutiple records in tablename according condition
|
|
1051
|
+
# tablename: delete for one table name each call
|
|
1052
|
+
# cndicts: delete condition dict for names : value lists
|
|
1053
|
+
# return number of records deleted upon success
|
|
1054
|
+
def pgmdel(self, tablename, cnddicts, logact = None):
|
|
1055
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
1056
|
+
if not cnddicts or isinstance(cnddicts, int): self.pglog("Miss condition dict to delete from " + tablename, logact)
|
|
1057
|
+
sqlstr = self.prepare_delete(tablename, None, list(cnddicts))
|
|
1058
|
+
v = cnddicts.values()
|
|
1059
|
+
values = list(zip(*v))
|
|
1060
|
+
if self.PGLOG['DBGLEVEL']:
|
|
1061
|
+
for row in values:
|
|
1062
|
+
self.pgdbg(1000, "Delete from {} for {}".format(tablename, row))
|
|
1063
|
+
dcnt = pgcnt = 0
|
|
1064
|
+
while True:
|
|
1065
|
+
pgcur = self.pgcursor()
|
|
1066
|
+
if not pgcur: return self.FAILURE
|
|
1067
|
+
try:
|
|
1068
|
+
execute_batch(pgcur, sqlstr, values, page_size=self.PGDBI['PGSIZE'])
|
|
1069
|
+
dcnt = len(values)
|
|
1070
|
+
except PgSQL.Error as pgerr:
|
|
1071
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, values[0], logact): return self.FAILURE
|
|
1072
|
+
else:
|
|
1073
|
+
break
|
|
1074
|
+
pgcnt += 1
|
|
1075
|
+
pgcur.close()
|
|
1076
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pgmdel: {} record(s) deleted from {}".format(dcnt, tablename))
|
|
1077
|
+
if logact&self.ENDLCK:
|
|
1078
|
+
self.endtran()
|
|
1079
|
+
elif self.curtran:
|
|
1080
|
+
self.curtran += dcnt
|
|
1081
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
1082
|
+
return dcnt
|
|
1083
|
+
|
|
1084
|
+
# sqlstr: a complete sql string
|
|
1085
|
+
# return number of record affected upon success
|
|
1086
|
+
def pgexec(self, sqlstr, logact = None):
|
|
1087
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
1088
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(100, sqlstr)
|
|
1089
|
+
ret = pgcnt = 0
|
|
1090
|
+
while True:
|
|
1091
|
+
pgcur = self.pgcursor()
|
|
1092
|
+
if not pgcur: return self.FAILURE
|
|
1093
|
+
try:
|
|
1094
|
+
pgcur.execute(sqlstr)
|
|
1095
|
+
ret = pgcur.rowcount
|
|
1096
|
+
pgcur.close()
|
|
1097
|
+
except PgSQL.Error as pgerr:
|
|
1098
|
+
if not self.check_dberror(pgerr, pgcnt, sqlstr, None, logact): return self.FAILURE
|
|
1099
|
+
else:
|
|
1100
|
+
break
|
|
1101
|
+
pgcnt += 1
|
|
1102
|
+
if self.PGLOG['DBGLEVEL']: self.pgdbg(1000, "pgexec: {} record(s) affected for {}".format(ret, sqlstr))
|
|
1103
|
+
if logact&self.ENDLCK:
|
|
1104
|
+
self.endtran()
|
|
1105
|
+
elif self.curtran:
|
|
1106
|
+
self.curtran += ret
|
|
1107
|
+
if self.curtran > self.PGDBI['MTRANS']: self.starttran()
|
|
1108
|
+
return ret
|
|
1109
|
+
|
|
1110
|
+
# tablename: one table name to a temporary table
|
|
1111
|
+
# fromtable: table name data gathing from
|
|
1112
|
+
# fields: table name data gathing from
|
|
1113
|
+
# condition: querry conditions for where clause
|
|
1114
|
+
# return number of records created upon success
|
|
1115
|
+
def pgtemp(self, tablename, fromtable, fields, condition = None, logact = 0):
|
|
1116
|
+
sqlstr = "CREATE TEMPORARY TABLE {} SELECT {} FROM {}".format(tablename, fields, fromtable)
|
|
1117
|
+
if condition: sqlstr += " WHERE " + condition
|
|
1118
|
+
return self.pgexec(sqlstr, logact)
|
|
1119
|
+
|
|
1120
|
+
# get condition for given table name for accessing information_schema
|
|
1121
|
+
def table_condition(self, tablename):
|
|
1122
|
+
ms = re.match(r'(.+)\.(.+)', tablename)
|
|
1123
|
+
if ms:
|
|
1124
|
+
scname = ms.group(1)
|
|
1125
|
+
tbname = ms.group(2)
|
|
1126
|
+
else:
|
|
1127
|
+
scname = self.PGDBI['SCNAME']
|
|
1128
|
+
tbname = tablename
|
|
1129
|
+
return "table_schema = '{}' AND table_name = '{}'".format(scname, tbname)
|
|
1130
|
+
|
|
1131
|
+
# check if a given table name exists or not
|
|
1132
|
+
# tablename: one table name to check
|
|
1133
|
+
def pgcheck(self, tablename, logact = 0):
|
|
1134
|
+
condition = self.table_condition(tablename)
|
|
1135
|
+
ret = self.pgget('information_schema.tables', None, condition, logact)
|
|
1136
|
+
return (self.SUCCESS if ret else self.FAILURE)
|
|
1137
|
+
|
|
1138
|
+
# group of functions to check parent records and add an empty one if missed
|
|
1139
|
+
# return user.uid upon success, 0 otherwise
|
|
1140
|
+
def check_user_uid(self, userno, date = None):
|
|
1141
|
+
if not userno: return 0
|
|
1142
|
+
if type(userno) is str: userno = int(userno)
|
|
1143
|
+
if date is None:
|
|
1144
|
+
datecond = "until_date IS NULL"
|
|
1145
|
+
date = 'today'
|
|
1146
|
+
else:
|
|
1147
|
+
datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
|
|
1148
|
+
pgrec = self.pgget("dssdb.user", "uid", "userno = {} AND {}".format(userno, datecond), self.PGDBI['ERRLOG'])
|
|
1149
|
+
if pgrec: return pgrec['uid']
|
|
1150
|
+
if userno not in self.NMISSES:
|
|
1151
|
+
self.pglog("{}: Scientist ID NOT on file for {}".format(userno, date), self.LGWNEM)
|
|
1152
|
+
self.NMISSES.append(userno)
|
|
1153
|
+
# check again if a user is on file with different date range
|
|
1154
|
+
pgrec = self.pgget("dssdb.user", "uid", "userno = {}".format(userno), self.PGDBI['ERRLOG'])
|
|
1155
|
+
if pgrec: return pgrec['uid']
|
|
1156
|
+
pgrec = self.ucar_user_info(userno)
|
|
1157
|
+
if not pgrec: pgrec = {'userno' : userno, 'stat_flag' : 'M'}
|
|
1158
|
+
uid = self.pgadd("dssdb.user", pgrec, (self.PGDBI['EXITLG']|self.AUTOID))
|
|
1159
|
+
if uid: self.pglog("{}: Scientist ID Added as user.uid = {}".format(userno, uid), self.LGWNEM)
|
|
1160
|
+
return uid
|
|
1161
|
+
|
|
1162
|
+
# return user.uid upon success, 0 otherwise
|
|
1163
|
+
def get_user_uid(self, logname, date = None):
|
|
1164
|
+
if not logname: return 0
|
|
1165
|
+
if not date:
|
|
1166
|
+
date = 'today'
|
|
1167
|
+
datecond = "until_date IS NULL"
|
|
1168
|
+
else:
|
|
1169
|
+
datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
|
|
1170
|
+
pgrec = self.pgget("dssdb.user", "uid", "logname = '{}' AND {}".format(logname, datecond), self.PGDBI['ERRLOG'])
|
|
1171
|
+
if pgrec: return pgrec['uid']
|
|
1172
|
+
if logname not in self.LMISSES:
|
|
1173
|
+
self.pglog("{}: UCAR Login Name NOT on file for {}".format(logname, date), self.LGWNEM)
|
|
1174
|
+
self.LMISSES.append(logname)
|
|
1175
|
+
# check again if a user is on file with different date range
|
|
1176
|
+
pgrec = self.pgget("dssdb.user", "uid", "logname = '{}'".format(logname), self.PGDBI['ERRLOG'])
|
|
1177
|
+
if pgrec: return pgrec['uid']
|
|
1178
|
+
pgrec = self.ucar_user_info(0, logname)
|
|
1179
|
+
if not pgrec: pgrec = {'logname' : logname, 'stat_flag' : 'M'}
|
|
1180
|
+
uid = self.pgadd("dssdb.user", pgrec, (self.PGDBI['EXITLG']|self.AUTOID))
|
|
1181
|
+
if uid: self.pglog("{}: UCAR Login Name Added as user.uid = {}".format(logname, uid), self.LGWNEM)
|
|
1182
|
+
return uid
|
|
1183
|
+
|
|
1184
|
+
# get ucar user info for given userno (scientist number) or logname (Ucar login)
|
|
1185
|
+
def ucar_user_info(self, userno, logname = None):
|
|
1186
|
+
matches = {
|
|
1187
|
+
'upid' : "upid",
|
|
1188
|
+
'uid' : "userno",
|
|
1189
|
+
'username' : "logname",
|
|
1190
|
+
'lastName' : "lstname",
|
|
1191
|
+
'firstName' : "fstname",
|
|
1192
|
+
'active' : "stat_flag",
|
|
1193
|
+
'internalOrg' : "division",
|
|
1194
|
+
'externalOrg' : "org_name",
|
|
1195
|
+
'country' : "country",
|
|
1196
|
+
'forwardEmail' : "email",
|
|
1197
|
+
'email' : "ucaremail",
|
|
1198
|
+
'phone' : "phoneno"
|
|
1199
|
+
}
|
|
1200
|
+
buf = self.pgsystem("pgperson " + ("-uid {}".format(userno) if userno else "-username {}".format(logname)), self.LOGWRN, 20)
|
|
1201
|
+
if not buf: return None
|
|
1202
|
+
pgrec = {}
|
|
1203
|
+
for line in buf.split('\n'):
|
|
1204
|
+
ms = re.match(r'^(.+)<=>(.*)$', line)
|
|
1205
|
+
if ms:
|
|
1206
|
+
(key, val) = ms.groups()
|
|
1207
|
+
if key in matches:
|
|
1208
|
+
if key == 'upid' and 'upid' in pgrec: break # get one record only
|
|
1209
|
+
pgrec[matches[key]] = val
|
|
1210
|
+
if not pgrec: return None
|
|
1211
|
+
if userno:
|
|
1212
|
+
pgrec['userno'] = userno
|
|
1213
|
+
elif pgrec['userno']:
|
|
1214
|
+
pgrec['userno'] = userno = int(pgrec['userno'])
|
|
1215
|
+
if pgrec['upid']: pgrec['upid'] = int(pgrec['upid'])
|
|
1216
|
+
if pgrec['stat_flag']: pgrec['stat_flag'] = 'A' if pgrec['stat_flag'] == "True" else 'C'
|
|
1217
|
+
if pgrec['email'] and re.search(r'(@|\.)ucar\.edu$', pgrec['email'], re.I):
|
|
1218
|
+
pgrec['email'] = pgrec['ucaremail']
|
|
1219
|
+
pgrec['org_name'] = 'NCAR'
|
|
1220
|
+
country = pgrec['country'] if 'country' in pgrec else None
|
|
1221
|
+
pgrec['country'] = self.set_country_code(pgrec['email'], country)
|
|
1222
|
+
if pgrec['division']:
|
|
1223
|
+
val = "NCAR"
|
|
1224
|
+
else:
|
|
1225
|
+
val = None
|
|
1226
|
+
pgrec['org_type'] = self.get_org_type(val, pgrec['email'])
|
|
1227
|
+
buf = self.pgsystem("pgusername {}".format(pgrec['logname']), self.LOGWRN, 20)
|
|
1228
|
+
if not buf: return pgrec
|
|
1229
|
+
for line in buf.split('\n'):
|
|
1230
|
+
ms = re.match(r'^(.+)<=>(.*)$', line)
|
|
1231
|
+
if ms:
|
|
1232
|
+
(key, val) = ms.groups()
|
|
1233
|
+
if key == 'startDate':
|
|
1234
|
+
m = re.match(r'^(\d+-\d+-\d+)\s', val)
|
|
1235
|
+
if m:
|
|
1236
|
+
pgrec['start_date'] = m.group(1)
|
|
1237
|
+
else:
|
|
1238
|
+
pgrec['start_date'] = val
|
|
1239
|
+
if key == 'endDate':
|
|
1240
|
+
m = re.match(r'^(\d+-\d+-\d+)\s', val)
|
|
1241
|
+
if m:
|
|
1242
|
+
pgrec['until_date'] = m.group(1)
|
|
1243
|
+
else:
|
|
1244
|
+
pgrec['until_date'] = val
|
|
1245
|
+
return pgrec
|
|
1246
|
+
|
|
1247
|
+
# set country code for given coutry name or email address
|
|
1248
|
+
def set_country_code(self, email, country = None):
|
|
1249
|
+
codes = {
|
|
1250
|
+
'CHINA' : "P.R.CHINA",
|
|
1251
|
+
'ENGLAND' : "UNITED.KINGDOM",
|
|
1252
|
+
'FR' : "FRANCE",
|
|
1253
|
+
'KOREA' : "SOUTH.KOREA",
|
|
1254
|
+
'USSR' : "RUSSIA",
|
|
1255
|
+
'US' : "UNITED.STATES",
|
|
1256
|
+
'U.S.A.' : "UNITED.STATES"
|
|
1257
|
+
}
|
|
1258
|
+
if country:
|
|
1259
|
+
country = country.upper()
|
|
1260
|
+
ms = re.match(r'^(\w+)\s(\w+)$', country)
|
|
1261
|
+
if ms:
|
|
1262
|
+
country = ms.group(1) + '.' + ms.group(2)
|
|
1263
|
+
elif country in codes:
|
|
1264
|
+
country = codes[country]
|
|
1265
|
+
else:
|
|
1266
|
+
country = self.email_to_country(email)
|
|
1267
|
+
return country
|
|
1268
|
+
|
|
1269
|
+
# return wuser.wuid upon success, 0 otherwise
|
|
1270
|
+
def check_wuser_wuid(self, email, date = None):
|
|
1271
|
+
if not email: return 0
|
|
1272
|
+
emcond = "email = '{}'".format(email)
|
|
1273
|
+
if not date:
|
|
1274
|
+
date = 'today'
|
|
1275
|
+
datecond = "until_date IS NULL"
|
|
1276
|
+
else:
|
|
1277
|
+
datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
|
|
1278
|
+
pgrec = self.pgget("wuser", "wuid", "{} AND {}".format(emcond, datecond), self.PGDBI['ERRLOG'])
|
|
1279
|
+
if pgrec: return pgrec['wuid']
|
|
1280
|
+
# check again if a user is on file with different date range
|
|
1281
|
+
pgrec = self.pgget("wuser", "wuid", emcond, self.LOGERR)
|
|
1282
|
+
if pgrec: return pgrec['wuid']
|
|
1283
|
+
# now add one in
|
|
1284
|
+
record = {'email' : email}
|
|
1285
|
+
# check again if a ruser is on file
|
|
1286
|
+
pgrec = self.pgget("ruser", "*", emcond + " AND end_date IS NULL", self.PGDBI['ERRLOG'])
|
|
1287
|
+
if not pgrec: pgrec = self.pgget("ruser", "*", emcond, self.PGDBI['ERRLOG'])
|
|
1288
|
+
if pgrec:
|
|
1289
|
+
record['ruid'] = pgrec['id']
|
|
1290
|
+
record['fstname'] = pgrec['fname']
|
|
1291
|
+
record['lstname'] = pgrec['lname']
|
|
1292
|
+
record['country'] = pgrec['country']
|
|
1293
|
+
record['org_type'] = self.get_org_type(pgrec['org_type'], pgrec['email'])
|
|
1294
|
+
record['start_date'] = str(pgrec['rdate'])
|
|
1295
|
+
if pgrec['end_date']:
|
|
1296
|
+
record['until_date'] = str(pgrec['end_date'])
|
|
1297
|
+
record['stat_flag'] = 'C'
|
|
1298
|
+
else:
|
|
1299
|
+
record['stat_flag'] = 'A'
|
|
1300
|
+
if pgrec['title']: record['utitle'] = pgrec['title']
|
|
1301
|
+
if pgrec['mname']: record['midinit'] = pgrec['mname'][0]
|
|
1302
|
+
if pgrec['org']: record['org_name'] = pgrec['org']
|
|
1303
|
+
else:
|
|
1304
|
+
record['stat_flag'] = 'M'
|
|
1305
|
+
record['org_type'] = self.get_org_type('', email)
|
|
1306
|
+
record['country'] = self.email_to_country(email)
|
|
1307
|
+
wuid = self.pgadd("wuser", record, self.LOGERR|self.AUTOID)
|
|
1308
|
+
if wuid:
|
|
1309
|
+
if pgrec:
|
|
1310
|
+
self.pglog("{}({}, {}) Added as wuid({})".format(email, pgrec['lname'], pgrec['fname'], wuid), self.LGWNEM)
|
|
1311
|
+
else:
|
|
1312
|
+
self.pglog("{} Added as wuid({})".format(email, wuid), self.LGWNEM)
|
|
1313
|
+
return wuid
|
|
1314
|
+
return 0
|
|
1315
|
+
|
|
1316
|
+
# return wuser.wuid upon success, 0 otherwise
|
|
1317
|
+
def check_cdp_wuser(self, username):
|
|
1318
|
+
pgrec = self.pgget("wuser", "wuid", "cdpname = '{}'".format(username), self.PGDBI['EXITLG'])
|
|
1319
|
+
if pgrec: return pgrec['wuid']
|
|
1320
|
+
idrec = self.pgget("wuser", "wuid", "email = '{}'".format(pgrec['email']), self.PGDBI['EXITLG'])
|
|
1321
|
+
wuid = idrec['wuid'] if idrec else 0
|
|
1322
|
+
if wuid > 0:
|
|
1323
|
+
idrec = {}
|
|
1324
|
+
idrec['cdpid'] = pgrec['cdpid']
|
|
1325
|
+
idrec['cdpname'] = pgrec['cdpname']
|
|
1326
|
+
self.pgupdt("wuser", idrec, "wuid = {}".format(wuid) , self.PGDBI['EXITLG'])
|
|
1327
|
+
else:
|
|
1328
|
+
pgrec['stat_flag'] = 'A'
|
|
1329
|
+
pgrec['org_type'] = self.get_org_type(pgrec['org_type'], pgrec['email'])
|
|
1330
|
+
pgrec['country'] = self.email_to_country(pgrec['email'])
|
|
1331
|
+
wuid = self.pgadd("wuser", pgrec, self.PGDBI['EXITLG']|self.AUTOID)
|
|
1332
|
+
if wuid > 0:
|
|
1333
|
+
self.pglog("CDP User {} added as wuid = {} in RDADB".format(username, wuid), self.LGWNEM)
|
|
1334
|
+
return wuid
|
|
1335
|
+
|
|
1336
|
+
# for given email to get long country name
|
|
1337
|
+
def email_to_country(self, email):
|
|
1338
|
+
ms = re.search(r'\.(\w\w)$', email)
|
|
1339
|
+
if ms:
|
|
1340
|
+
pgrec = self.pgget("countries", "token", "domain_id = '{}'".format(ms.group(1)), self.PGDBI['EXITLG'])
|
|
1341
|
+
if pgrec: return pgrec['token']
|
|
1342
|
+
elif re.search(r'\.(gov|edu|mil|org|com|net)$', email):
|
|
1343
|
+
return "UNITED.STATES"
|
|
1344
|
+
else:
|
|
1345
|
+
return "UNKNOWN"
|
|
1346
|
+
|
|
1347
|
+
# if filelists is published for given dataset, reset it to 'P'
|
|
1348
|
+
def reset_rdadb_version(self, dsid):
|
|
1349
|
+
self.pgexec("UPDATE dataset SET version = version + 1 WHERE dsid = '{}'".format(dsid), self.PGDBI['ERRLOG'])
|
|
1350
|
+
|
|
1351
|
+
# check the use rdadb flag in table dataset for a given dataset and given values
|
|
1352
|
+
def use_rdadb(self, dsid, logact = 0, vals = None):
|
|
1353
|
+
ret = '' # default to empty in case dataset not in RDADB
|
|
1354
|
+
if dsid:
|
|
1355
|
+
pgrec = self.pgget("dataset", "use_rdadb", "dsid = '{}'".format(dsid), self.PGDBI['EXITLG'])
|
|
1356
|
+
if pgrec:
|
|
1357
|
+
ret = 'N' # default to 'N' if dataset record in RDADB already
|
|
1358
|
+
if pgrec['use_rdadb']:
|
|
1359
|
+
if not vals: vals = "IPYMW" # default to Internal; Publishable; Yes RDADB
|
|
1360
|
+
if vals.find(pgrec['use_rdadb']) > -1:
|
|
1361
|
+
ret = pgrec['use_rdadb']
|
|
1362
|
+
elif logact:
|
|
1363
|
+
self.pglog("Dataset '{}' is not in RDADB!".format(dsid), logact)
|
|
1364
|
+
return ret
|
|
1365
|
+
|
|
1366
|
+
# fld: field name for querry condition
|
|
1367
|
+
# vals: reference to aaray of values
|
|
1368
|
+
# isstr: 1 for string values requires quotes and support wildcard
|
|
1369
|
+
# noand: 1 for skiping the leading ' AND ' for condition
|
|
1370
|
+
# return a condition string for a given field
|
|
1371
|
+
def get_field_condition(self, fld, vals, isstr = 0, noand = 0):
|
|
1372
|
+
cnd = wcnd = negative = ''
|
|
1373
|
+
sign = "="
|
|
1374
|
+
logic = " OR "
|
|
1375
|
+
count = len(vals) if vals else 0
|
|
1376
|
+
if count == 0: return ''
|
|
1377
|
+
ncnt = scnt = wcnt = cnt = 0
|
|
1378
|
+
for i in range(count):
|
|
1379
|
+
val = vals[i]
|
|
1380
|
+
if val is None or (i > 0 and val == vals[i-1]): continue
|
|
1381
|
+
if i == 0 and val == self.PGSIGNS[0]:
|
|
1382
|
+
negative = "NOT "
|
|
1383
|
+
logic = " AND "
|
|
1384
|
+
continue
|
|
1385
|
+
if scnt == 0 and isinstance(val, str):
|
|
1386
|
+
ms = re.match(r'^({})$'.format('|'.join(self.PGSIGNS[1:])), val)
|
|
1387
|
+
if ms:
|
|
1388
|
+
osign = sign = ms.group(1)
|
|
1389
|
+
scnt += 1
|
|
1390
|
+
if sign == "<>":
|
|
1391
|
+
scnt += 1
|
|
1392
|
+
sign = negative + "BETWEEN"
|
|
1393
|
+
elif negative:
|
|
1394
|
+
sign = "<=" if (sign == ">") else ">="
|
|
1395
|
+
continue
|
|
1396
|
+
if isstr:
|
|
1397
|
+
if not isinstance(val, str): val = str(val)
|
|
1398
|
+
if sign == "=":
|
|
1399
|
+
if not val:
|
|
1400
|
+
ncnt += 1 # found null string
|
|
1401
|
+
elif val.find('%') > -1:
|
|
1402
|
+
sign = negative + "LIKE"
|
|
1403
|
+
elif re.search(r'[\[\(\?\.]', val):
|
|
1404
|
+
sign = negative + "SIMILAR TO"
|
|
1405
|
+
if val.find("'") != 0:
|
|
1406
|
+
val = "'{}'".format(val)
|
|
1407
|
+
elif isinstance(val, str):
|
|
1408
|
+
if val.find('.') > -1:
|
|
1409
|
+
val = float(val)
|
|
1410
|
+
else:
|
|
1411
|
+
val = int(val)
|
|
1412
|
+
if sign == "=":
|
|
1413
|
+
if cnt > 0: cnd += ", "
|
|
1414
|
+
cnd += str(val)
|
|
1415
|
+
cnt += 1
|
|
1416
|
+
else:
|
|
1417
|
+
if sign == "AND":
|
|
1418
|
+
wcnd += " {} {}".format(sign, val)
|
|
1419
|
+
else:
|
|
1420
|
+
if wcnt > 0: wcnd += logic
|
|
1421
|
+
wcnd += "{} {} {}".format(fld, sign, val)
|
|
1422
|
+
wcnt += 1
|
|
1423
|
+
if re.search(r'BETWEEN$', sign):
|
|
1424
|
+
sign = "AND"
|
|
1425
|
+
else:
|
|
1426
|
+
sign = "="
|
|
1427
|
+
scnt = 0
|
|
1428
|
+
if scnt > 0:
|
|
1429
|
+
s = 's' if scnt > 1 else ''
|
|
1430
|
+
self.pglog("Need {} value{} after sign '{}'".format(scnt, s, osign), self.LGEREX)
|
|
1431
|
+
if wcnt > 1: wcnd = "({})".format(wcnd)
|
|
1432
|
+
if cnt > 0:
|
|
1433
|
+
if cnt > 1:
|
|
1434
|
+
cnd = "{} {}IN ({})".format(fld, negative, cnd)
|
|
1435
|
+
else:
|
|
1436
|
+
cnd = "{} {} {}".format(fld, ("<>" if negative else "="), cnd)
|
|
1437
|
+
if ncnt > 0:
|
|
1438
|
+
ncnd = "{} IS {}NULL".format(fld, negative)
|
|
1439
|
+
cnd = "({}{}{})".format(cnd, logic, ncnd)
|
|
1440
|
+
if wcnt > 0: cnd = "({}{}{})".format(cnd, logic, wcnd)
|
|
1441
|
+
elif wcnt > 0:
|
|
1442
|
+
cnd = wcnd
|
|
1443
|
+
if cnd and not noand: cnd = " AND " + cnd
|
|
1444
|
+
return cnd
|
|
1445
|
+
|
|
1446
|
+
# build up fieldname string for given or default condition
|
|
1447
|
+
def fieldname_string(self, fnames, dnames = None, anames = None, wflds = None):
|
|
1448
|
+
if not fnames:
|
|
1449
|
+
fnames = dnames # include default fields names
|
|
1450
|
+
elif re.match(r'^all$', fnames, re.I):
|
|
1451
|
+
fnames = anames # include all field names
|
|
1452
|
+
if not wflds: return fnames
|
|
1453
|
+
for wfld in wflds:
|
|
1454
|
+
if not wfld or fnames.find(wfld) > -1: continue # empty field, or included already
|
|
1455
|
+
if wfld == "Q":
|
|
1456
|
+
pos = fnames.find("R") # request name
|
|
1457
|
+
elif wfld == "Y":
|
|
1458
|
+
pos = fnames.find("X") # parent group name
|
|
1459
|
+
elif wfld == "G":
|
|
1460
|
+
pos = fnames.find("I") # group name
|
|
1461
|
+
else:
|
|
1462
|
+
pos = -1 # prepend other with-field names
|
|
1463
|
+
if pos == -1:
|
|
1464
|
+
fnames = wfld + fnames # prepend with-field
|
|
1465
|
+
else:
|
|
1466
|
+
fnames = fnames[0:pos] + wfld + fnames[pos:] # insert with-field
|
|
1467
|
+
return fnames
|
|
1468
|
+
|
|
1469
|
+
# Function get_group_field_path(gindex: group index
|
|
1470
|
+
# dsid: dataset id
|
|
1471
|
+
# field: path field name: webpath or savedpath)
|
|
1472
|
+
# go through group tree upward to find a none-empty path, return it or null
|
|
1473
|
+
def get_group_field_path(self, gindex, dsid, field):
|
|
1474
|
+
if gindex:
|
|
1475
|
+
pgrec = self.pgget("dsgroup", f"pindex, {field}",
|
|
1476
|
+
f"dsid = '{dsid}' AND gindex = {gindex}", self.PGDBI['EXITLG'])
|
|
1477
|
+
else:
|
|
1478
|
+
pgrec = self.pgget("dataset", field, f"dsid = '{dsid}'", self.PGDBI['EXITLG'])
|
|
1479
|
+
if pgrec:
|
|
1480
|
+
if pgrec[field]:
|
|
1481
|
+
return pgrec[field]
|
|
1482
|
+
elif gindex:
|
|
1483
|
+
return self.get_group_field_path(pgrec['pindex'], dsid, field)
|
|
1484
|
+
else:
|
|
1485
|
+
return None
|
|
1486
|
+
|
|
1487
|
+
# get the specialist info for a given dataset
|
|
1488
|
+
def get_specialist(self, dsid, logact = None):
|
|
1489
|
+
if logact is None: logact = self.PGDBI['ERRLOG']
|
|
1490
|
+
if dsid in self.SPECIALIST: return self.SPECIALIST['dsid']
|
|
1491
|
+
|
|
1492
|
+
pgrec = self.pgget("dsowner, dssgrp", "specialist, lstname, fstname",
|
|
1493
|
+
"specialist = logname AND dsid = '{}' AND priority = 1".format(dsid), logact)
|
|
1494
|
+
if pgrec:
|
|
1495
|
+
if pgrec['specialist'] == "datahelp" or pgrec['specialist'] == "dss":
|
|
1496
|
+
pgrec['lstname'] = "Help"
|
|
1497
|
+
pgrec['fstname'] = "Data"
|
|
1498
|
+
else:
|
|
1499
|
+
pgrec['specialist'] = "datahelp"
|
|
1500
|
+
pgrec['lstname'] = "Help"
|
|
1501
|
+
pgrec['fstname'] = "Data"
|
|
1502
|
+
self.SPECIALIST['dsid'] = pgrec # cache specialist info for dsowner of dsid
|
|
1503
|
+
return pgrec
|
|
1504
|
+
|
|
1505
|
+
# build customized email from get_email()
|
|
1506
|
+
def build_customized_email(self, table, field, condition, subject, logact = 0):
|
|
1507
|
+
estat = self.FAILURE
|
|
1508
|
+
msg = self.get_email()
|
|
1509
|
+
if not msg: return estat
|
|
1510
|
+
sender = self.PGLOG['CURUID'] + "@ucar.edu"
|
|
1511
|
+
receiver = self.PGLOG['EMLADDR'] if self.PGLOG['EMLADDR'] else (self.PGLOG['CURUID'] + "@ucar.edu")
|
|
1512
|
+
if receiver.find(sender) < 0: self.add_carbon_copy(sender, 1)
|
|
1513
|
+
cc = self.PGLOG['CCDADDR']
|
|
1514
|
+
if not subject: subject = "Message from {}-{}".format(self.PGLOG['HOSTNAME'], self.get_command())
|
|
1515
|
+
estat = self.send_python_email(subject, receiver, msg, sender, cc, logact)
|
|
1516
|
+
if estat != self.SUCCESS:
|
|
1517
|
+
ebuf = "From: {}\nTo: {}\n".format(sender, receiver)
|
|
1518
|
+
if cc: ebuf += "Cc: {}\n".format(cc)
|
|
1519
|
+
ebuf += "Subject: {}!\n\n{}\n".format(subject, msg)
|
|
1520
|
+
if self.PGLOG['EMLSEND']:
|
|
1521
|
+
estat = self.send_customized_email(f"{table}.{condition}", ebuf, logact)
|
|
1522
|
+
if estat != self.SUCCESS:
|
|
1523
|
+
estat = self.cache_customized_email(table, field, condition, ebuf, 0)
|
|
1524
|
+
if estat and logact:
|
|
1525
|
+
self.pglog("Email {} cached to '{}.{}' for {}, Subject: {}".format(receiver, table, field, condition, subject), logact)
|
|
1526
|
+
return estat
|
|
1527
|
+
|
|
1528
|
+
# email: full user email address
|
|
1529
|
+
# get user real name from table ruser for a given email address
|
|
1530
|
+
# opts == 1 : include email
|
|
1531
|
+
# opts == 2 : include org_type
|
|
1532
|
+
# opts == 4 : include country
|
|
1533
|
+
# opts == 8 : include valid_email
|
|
1534
|
+
# opts == 16 : include org
|
|
1535
|
+
def get_ruser_names(self, email, opts = 0, date = None):
|
|
1536
|
+
fields = "lname lstname, fname fstname"
|
|
1537
|
+
if opts&1: fields += ", email"
|
|
1538
|
+
if opts&2: fields += ", org_type"
|
|
1539
|
+
if opts&4: fields += ", country"
|
|
1540
|
+
if opts&8: fields += ", valid_email"
|
|
1541
|
+
if opts&16: fields += ", org"
|
|
1542
|
+
if date:
|
|
1543
|
+
datecond = "rdate <= '{}' AND (end_date IS NULL OR end_date >= '{}')".format(date, date)
|
|
1544
|
+
else:
|
|
1545
|
+
datecond = "end_date IS NULL"
|
|
1546
|
+
date = time.strftime("%Y-%m-%d", (time.gmtime() if self.PGLOG['GMTZ'] else time.localtime()))
|
|
1547
|
+
emcnd = "email = '{}'".format(email)
|
|
1548
|
+
pgrec = self.pgget("ruser", fields, "{} AND {}".format(emcnd, datecond), self.LGEREX)
|
|
1549
|
+
if not pgrec: # missing user record add one in
|
|
1550
|
+
self.pglog("{}: email not in ruser for {}".format(email, date), self.LOGWRN)
|
|
1551
|
+
# check again if a user is on file with different date range
|
|
1552
|
+
pgrec = self.pgget("ruser", fields, emcnd, self.LGEREX)
|
|
1553
|
+
if not pgrec and self.pgget("dssdb.user", '', emcnd):
|
|
1554
|
+
fields = "lstname, fstname"
|
|
1555
|
+
if opts&1: fields += ", email"
|
|
1556
|
+
if opts&2: fields += ", org_type"
|
|
1557
|
+
if opts&4: fields += ", country"
|
|
1558
|
+
if opts&8: fields += ", email valid_email"
|
|
1559
|
+
if opts&16: fields += ", org_name org"
|
|
1560
|
+
pgrec = self.pgget("dssdb.user", fields, emcnd, self.LGEREX)
|
|
1561
|
+
if pgrec and pgrec['lstname']:
|
|
1562
|
+
pgrec['name'] = (pgrec['fstname'].capitalize() + ' ') if pgrec['fstname'] else ''
|
|
1563
|
+
pgrec['name'] += pgrec['lstname'].capitalize()
|
|
1564
|
+
else:
|
|
1565
|
+
if not pgrec: pgrec = {}
|
|
1566
|
+
pgrec['name'] = email.split('@')[0]
|
|
1567
|
+
if opts&1: pgrec['email'] = email
|
|
1568
|
+
return pgrec
|
|
1569
|
+
|
|
1570
|
+
# cache a customized email for sending it later
|
|
1571
|
+
def cache_customized_email(self, table, field, condition, emlmsg, logact = 0):
|
|
1572
|
+
pgrec = {field: emlmsg}
|
|
1573
|
+
if self.pgupdt(table, pgrec, condition, logact|self.ERRLOG):
|
|
1574
|
+
if logact: self.pglog("Email cached to '{}.{}' for {}".format(table, field, condition), logact&(~self.EXITLG))
|
|
1575
|
+
return self.SUCCESS
|
|
1576
|
+
else:
|
|
1577
|
+
msg = "cache email to '{}.{}' for {}".format(table, field, condition)
|
|
1578
|
+
self.pglog(f"Error {msg}, try to send directly now", logact|self.ERRLOG)
|
|
1579
|
+
return self.send_customized_email(msg, emlmsg, logact)
|
|
1580
|
+
|
|
1581
|
+
# otype: user organization type
|
|
1582
|
+
# email: user email address)
|
|
1583
|
+
# return: orgonizaion type like DSS, NCAR, UNIV...
|
|
1584
|
+
def get_org_type(self, otype, email):
|
|
1585
|
+
if not otype: otype = "OTHER"
|
|
1586
|
+
if email:
|
|
1587
|
+
ms = re.search(r'(@|\.)ucar\.edu$', email)
|
|
1588
|
+
if ms:
|
|
1589
|
+
mc = ms.group(1)
|
|
1590
|
+
if otype == 'UCAR' or otype == 'OTHER': otype = 'NCAR'
|
|
1591
|
+
if otype == 'NCAR' and mc == '@':
|
|
1592
|
+
ms = re.match(r'^(.+)@', email)
|
|
1593
|
+
if ms and self.pgget("dssgrp", "", "logname = '{}'".format(ms.group(1))): otype = 'DSS'
|
|
1594
|
+
else:
|
|
1595
|
+
ms = re.search(r'\.(mil|org|gov|edu|com|net)(\.\w\w|$)', email)
|
|
1596
|
+
if ms:
|
|
1597
|
+
otype = ms.group(1).upper()
|
|
1598
|
+
if otype == 'EDU': otype = "UNIV"
|
|
1599
|
+
return otype
|
|
1600
|
+
|
|
1601
|
+
# join values and handle the null values
|
|
1602
|
+
@staticmethod
|
|
1603
|
+
def join_values(vstr, vals):
|
|
1604
|
+
if vstr:
|
|
1605
|
+
vstr += "\n"
|
|
1606
|
+
elif vstr is None:
|
|
1607
|
+
vstr = ''
|
|
1608
|
+
return "{}Value{}({})".format(vstr, ('s' if len(vals) > 1 else ''), ', '.join(map(str, vals)))
|
|
1609
|
+
|
|
1610
|
+
# check table hostname to find the system down times. Cache the result for 10 minutes
|
|
1611
|
+
def get_system_downs(self, hostname, logact = 0):
|
|
1612
|
+
curtime = int(time.time())
|
|
1613
|
+
newhost = 0
|
|
1614
|
+
if hostname not in self.SYSDOWN:
|
|
1615
|
+
self.SYSDOWN[hostname] = {}
|
|
1616
|
+
newhost = 1
|
|
1617
|
+
if newhost or (curtime - self.SYSDOWN[hostname]['chktime']) > 600:
|
|
1618
|
+
self.SYSDOWN[hostname]['chktime'] = curtime
|
|
1619
|
+
self.SYSDOWN[hostname]['start'] = 0
|
|
1620
|
+
self.SYSDOWN[hostname]['end'] = 0
|
|
1621
|
+
self.SYSDOWN[hostname]['active'] = 1
|
|
1622
|
+
self.SYSDOWN[hostname]['path'] = None
|
|
1623
|
+
pgrec = self.pgget('hostname', 'service, domain, downstart, downend',
|
|
1624
|
+
"hostname = '{}'".format(hostname), logact)
|
|
1625
|
+
if pgrec:
|
|
1626
|
+
if pgrec['service'] == 'N':
|
|
1627
|
+
self.SYSDOWN[hostname]['start'] = curtime
|
|
1628
|
+
self.SYSDOWN[hostname]['active'] = 0
|
|
1629
|
+
else:
|
|
1630
|
+
start = int(datetime.timestamp(pgrec['downstart'])) if pgrec['downstart'] else 0
|
|
1631
|
+
end = int(datetime.timestamp(pgrec['downend'])) if pgrec['downend'] else 0
|
|
1632
|
+
if start > 0 and (end == 0 or end > curtime):
|
|
1633
|
+
self.SYSDOWN[hostname]['start'] = start
|
|
1634
|
+
self.SYSDOWN[hostname]['end'] = end
|
|
1635
|
+
if pgrec['service'] == 'S' and pgrec['domain'] and re.match(r'^/', pgrec['domain']):
|
|
1636
|
+
self.SYSDOWN[hostname]['path'] = pgrec['domain']
|
|
1637
|
+
self.SYSDOWN[hostname]['curtime'] = curtime
|
|
1638
|
+
return self.SYSDOWN[hostname]
|
|
1639
|
+
|
|
1640
|
+
# return seconds for how long the system will continue to be down
|
|
1641
|
+
def system_down_time(self, hostname, offset, logact = 0):
|
|
1642
|
+
down = self.get_system_downs(hostname, logact)
|
|
1643
|
+
if down['start'] and down['curtime'] >= (down['start'] - offset):
|
|
1644
|
+
if not down['end']:
|
|
1645
|
+
if self.PGLOG['PGBATCH'] == self.PGLOG['PBSNAME']:
|
|
1646
|
+
return self.PGLOG['PBSTIME']
|
|
1647
|
+
elif down['curtime'] <= down['end']:
|
|
1648
|
+
return (down['end'] - down['curtime'])
|
|
1649
|
+
return 0 # the system is not down
|
|
1650
|
+
|
|
1651
|
+
# return string message if the system is down
|
|
1652
|
+
def system_down_message(self, hostname, path, offset, logact = 0):
|
|
1653
|
+
down = self.get_system_downs(hostname, logact)
|
|
1654
|
+
msg = None
|
|
1655
|
+
if down['start'] and down['curtime'] >= (down['start'] - offset):
|
|
1656
|
+
match = self.match_down_path(path, down['path'])
|
|
1657
|
+
if match:
|
|
1658
|
+
msg = "{}{}:".format(hostname, ('-' + path) if match > 0 else '')
|
|
1659
|
+
if not down['active']:
|
|
1660
|
+
msg += " Not in Service"
|
|
1661
|
+
else:
|
|
1662
|
+
msg += " Planned down, started at " + self.current_datetime(down['start'])
|
|
1663
|
+
if not down['end']:
|
|
1664
|
+
msg += " And no end time specified"
|
|
1665
|
+
elif down['curtime'] <= down['end']:
|
|
1666
|
+
msg = " And will end by " + self.current_datetime(down['end'])
|
|
1667
|
+
return msg
|
|
1668
|
+
|
|
1669
|
+
# return 1 if given path match daemon paths, 0 if not; -1 if cannot compare
|
|
1670
|
+
@staticmethod
|
|
1671
|
+
def match_down_path(path, dpaths):
|
|
1672
|
+
if not (path and dpaths): return -1
|
|
1673
|
+
paths = re.split(':', dpaths)
|
|
1674
|
+
for p in paths:
|
|
1675
|
+
if re.match(r'^{}'.format(p), path): return 1
|
|
1676
|
+
return 0
|
|
1677
|
+
|
|
1678
|
+
# validate is login user is in DECS group
|
|
1679
|
+
# check all node if skpdsg is false, otherwise check non-DSG nodes
|
|
1680
|
+
def validate_decs_group(self, cmdname, logname, skpdsg):
|
|
1681
|
+
if skpdsg and self.PGLOG['DSGHOSTS'] and re.search(r'(^|:){}'.format(self.PGLOG['HOSTNAME']), self.PGLOG['DSGHOSTS']): return
|
|
1682
|
+
if not logname: lgname = self.PGLOG['CURUID']
|
|
1683
|
+
if not self.pgget("dssgrp", '', "logname = '{}'".format(logname), self.LGEREX):
|
|
1684
|
+
self.pglog("{}: Must be in DECS Group to run '{}' on {}".format(logname, cmdname, self.PGLOG['HOSTNAME']), self.LGEREX)
|
|
1685
|
+
|
|
1686
|
+
# add an allusage record into yearly table; create a new yearly table if it does not exist
|
|
1687
|
+
# year -- year to identify the yearly table, evaluated if missing
|
|
1688
|
+
# records -- hash to hold one or multiple records.
|
|
1689
|
+
# Dict keys: email -- user email address,
|
|
1690
|
+
# org_type -- organization type
|
|
1691
|
+
# country -- country code
|
|
1692
|
+
# dsid -- dataset ID
|
|
1693
|
+
# date -- date data accessed
|
|
1694
|
+
# time -- time data accessed
|
|
1695
|
+
# quarter -- quarter of the year data accessed
|
|
1696
|
+
# size -- bytes of data accessed
|
|
1697
|
+
# method -- delivery methods: MSS,Web,Ftp,Tape,Cd,Disk,Paper,cArt,Micro
|
|
1698
|
+
# source -- usage source flag: W - wusage, O - ordusage
|
|
1699
|
+
# midx -- refer to mbr2loc.midx if not 0
|
|
1700
|
+
# ip -- user IP address
|
|
1701
|
+
# region -- user region name; for example, Colorado
|
|
1702
|
+
# isarray -- if true, mutiple records provided via arrays for each hash key
|
|
1703
|
+
# docheck -- if 1, check and add only if record is not on file
|
|
1704
|
+
# docheck -- if 2, check and add if record is not on file, and update if exists
|
|
1705
|
+
# docheck -- if 4, check and add if record is not on file, and update if exists,
|
|
1706
|
+
# and also checking NULL email value too
|
|
1707
|
+
def add_yearly_allusage(self, year, records, isarray = 0, docheck = 0):
|
|
1708
|
+
acnt = 0
|
|
1709
|
+
if not year:
|
|
1710
|
+
ms = re.match(r'^(\d\d\d\d)', str(records['date'][0] if isarray else records['date']))
|
|
1711
|
+
if ms: year = ms.group(1)
|
|
1712
|
+
tname = "allusage_{}".format(year)
|
|
1713
|
+
if isarray:
|
|
1714
|
+
cnt = len(records['email'])
|
|
1715
|
+
if 'quarter' not in records: records['quarter'] = [0]*cnt
|
|
1716
|
+
for i in range(cnt):
|
|
1717
|
+
if not records['quarter'][i]:
|
|
1718
|
+
ms = re.search(r'-(\d+)-', str(records['date'][i]))
|
|
1719
|
+
if ms: records['quarter'][i] = int((int(ms.group(1))-1)/3)+1
|
|
1720
|
+
if docheck:
|
|
1721
|
+
for i in range(cnt):
|
|
1722
|
+
record = {}
|
|
1723
|
+
for key in records:
|
|
1724
|
+
record[key] = records[key][i]
|
|
1725
|
+
cnd = "email = '{}' AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
1726
|
+
record['email'], record['dsid'], record['method'], record['date'], record['time'])
|
|
1727
|
+
pgrec = self.pgget(tname, 'aidx', cnd, self.LOGERR|self.ADDTBL)
|
|
1728
|
+
if docheck == 4 and not pgrec:
|
|
1729
|
+
cnd = "email IS NULL AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
1730
|
+
record['dsid'], record['method'], record['date'], record['time'])
|
|
1731
|
+
pgrec = self.pgget(tname, 'aidx', cnd, self.LOGERR|self.ADDTBL)
|
|
1732
|
+
if pgrec:
|
|
1733
|
+
if docheck > 1: acnt += self.pgupdt(tname, record, "aidx = {}".format(pgrec['aidx']), self.LGEREX)
|
|
1734
|
+
else:
|
|
1735
|
+
acnt += self.pgadd(tname, record, self.LGEREX|self.ADDTBL)
|
|
1736
|
+
else:
|
|
1737
|
+
acnt = self.pgmadd(tname, records, self.LGEREX|self.ADDTBL)
|
|
1738
|
+
else:
|
|
1739
|
+
record = records
|
|
1740
|
+
if not ('quarter' in record and record['quarter']):
|
|
1741
|
+
ms = re.search(r'-(\d+)-', str(record['date']))
|
|
1742
|
+
if ms: record['quarter'] = int((int(ms.group(1))-1)/3)+1
|
|
1743
|
+
if docheck:
|
|
1744
|
+
cnd = "email = '{}' AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
1745
|
+
record['email'], record['dsid'], record['method'], record['date'], record['time'])
|
|
1746
|
+
pgrec = self.pgget(tname, 'aidx', cnd, self.LOGERR|self.ADDTBL)
|
|
1747
|
+
if docheck == 4 and not pgrec:
|
|
1748
|
+
cnd = "email IS NULL AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
1749
|
+
record['dsid'], record['method'], record['date'], record['time'])
|
|
1750
|
+
pgrec = self.pgget(tname, 'aidx', cnd, self.LOGERR|self.ADDTBL)
|
|
1751
|
+
if pgrec:
|
|
1752
|
+
if docheck > 1: acnt = self.pgupdt(tname, record, "aidx = {}".format(pgrec['aidx']), self.LGEREX)
|
|
1753
|
+
return acnt
|
|
1754
|
+
acnt = self.pgadd(tname, record, self.LGEREX|self.ADDTBL)
|
|
1755
|
+
return acnt
|
|
1756
|
+
|
|
1757
|
+
# add a wusage record into yearly table; create a new yearly table if it does not exist
|
|
1758
|
+
# year -- year to identify the yearly table, evaluated if missing
|
|
1759
|
+
# records -- hash to hold one or multiple records.
|
|
1760
|
+
# Dict keys: wid - reference to wfile.wid
|
|
1761
|
+
# wuid_read - reference to wuser.wuid, 0 if missing email
|
|
1762
|
+
# dsid - reference to dataset.dsid at the time of read
|
|
1763
|
+
# date_read - date file read
|
|
1764
|
+
# time_read - time file read
|
|
1765
|
+
# quarter - quarter of the year data accessed
|
|
1766
|
+
# size_read - bytes of data read
|
|
1767
|
+
# method - download methods: WEB, CURL, MGET, FTP and MGET
|
|
1768
|
+
# locflag - location flag: Glade or Object
|
|
1769
|
+
# ip - IP address
|
|
1770
|
+
# isarray -- if true, mutiple records provided via arrays for each hash key
|
|
1771
|
+
def add_yearly_wusage(self, year, records, isarray = 0):
|
|
1772
|
+
acnt = 0
|
|
1773
|
+
if not year:
|
|
1774
|
+
ms = re.match(r'^(\d\d\d\d)', str(records['date_read'][0] if isarray else records['date_read']))
|
|
1775
|
+
if ms: year = ms.group(1)
|
|
1776
|
+
tname = "wusage_{}".format(year)
|
|
1777
|
+
if isarray:
|
|
1778
|
+
if 'quarter' not in records:
|
|
1779
|
+
cnt = len(records['wid'])
|
|
1780
|
+
records['quarter'] = [0]*cnt
|
|
1781
|
+
for i in range(cnt):
|
|
1782
|
+
ms = re.search(r'-(\d+)-', str(records['date_read'][i]))
|
|
1783
|
+
if ms: records['quarter'][i] = (int((int(ms.group(1))-1)/3)+1)
|
|
1784
|
+
acnt = self.pgmadd(tname, records, self.LGEREX|self.ADDTBL)
|
|
1785
|
+
else:
|
|
1786
|
+
record = records
|
|
1787
|
+
if 'quarter' not in record:
|
|
1788
|
+
ms = re.search(r'-(\d+)-', str(record['date_read']))
|
|
1789
|
+
if ms: record['quarter'] = (int((int(ms.group(1))-1)/3)+1)
|
|
1790
|
+
acnt = self.pgadd(tname, record, self.LGEREX|self.ADDTBL)
|
|
1791
|
+
return acnt
|
|
1792
|
+
|
|
1793
|
+
# double quote a array of single or sign delimited strings
|
|
1794
|
+
def pgnames(self, ary, sign = None, joinstr = None):
|
|
1795
|
+
pgary = []
|
|
1796
|
+
for a in ary:
|
|
1797
|
+
pgary.append(self.pgname(a, sign))
|
|
1798
|
+
if joinstr == None:
|
|
1799
|
+
return pgary
|
|
1800
|
+
else:
|
|
1801
|
+
return joinstr.join(pgary)
|
|
1802
|
+
|
|
1803
|
+
# double quote a single or sign delimited string
|
|
1804
|
+
def pgname(self, str, sign = None):
|
|
1805
|
+
if sign:
|
|
1806
|
+
nstr = ''
|
|
1807
|
+
names = str.split(sign[0])
|
|
1808
|
+
for name in names:
|
|
1809
|
+
if nstr: nstr += sign[0]
|
|
1810
|
+
nstr += self.pgname(name, sign[1:])
|
|
1811
|
+
else:
|
|
1812
|
+
nstr = str.strip()
|
|
1813
|
+
if nstr and nstr.find('"') < 0:
|
|
1814
|
+
if not re.match(r'^[a-z_][a-z0-9_]*$', nstr) or nstr in self.PGRES:
|
|
1815
|
+
nstr = '"{}"'.format(nstr)
|
|
1816
|
+
return nstr
|
|
1817
|
+
|
|
1818
|
+
# get a postgres password for given host, port, dbname, usname
|
|
1819
|
+
def get_pgpass_password(self):
|
|
1820
|
+
if self.PGDBI['PWNAME']: return self.PGDBI['PWNAME']
|
|
1821
|
+
pwname = self.get_baopassword()
|
|
1822
|
+
if not pwname: pwname = self.get_pgpassword()
|
|
1823
|
+
return pwname
|
|
1824
|
+
|
|
1825
|
+
# get the pg passwords from file .pgpass
|
|
1826
|
+
def get_pgpassword(self):
|
|
1827
|
+
if not self.DBPASS: self.read_pgpass()
|
|
1828
|
+
dbport = str(self.PGDBI['DBPORT']) if self.PGDBI['DBPORT'] else '5432'
|
|
1829
|
+
pwname = self.DBPASS.get((self.PGDBI['DBSHOST'], dbport, self.PGDBI['DBNAME'], self.PGDBI['LNNAME']))
|
|
1830
|
+
if not pwname: pwname = self.DBPASS.get((self.PGDBI['DBHOST'], dbport, self.PGDBI['DBNAME'], self.PGDBI['LNNAME']))
|
|
1831
|
+
return pwname
|
|
1832
|
+
|
|
1833
|
+
# get the pg passwords from OpenBao
|
|
1834
|
+
def get_baopassword(self):
|
|
1835
|
+
dbname = self.PGDBI['DBNAME']
|
|
1836
|
+
if dbname not in self.DBBAOS: self.read_openbao()
|
|
1837
|
+
return self.DBBAOS[dbname].get(self.PGDBI['LNNAME'])
|
|
1838
|
+
|
|
1839
|
+
# Reads the .pgpass file and returns a dictionary of credentials.
|
|
1840
|
+
def read_pgpass(self):
|
|
1841
|
+
pgpass = self.PGLOG['DSSHOME'] + '/.pgpass'
|
|
1842
|
+
if not op.isfile(pgpass): pgpass = self.PGLOG['GDEXHOME'] + '/.pgpass'
|
|
1843
|
+
try:
|
|
1844
|
+
with open(pgpass, "r") as f:
|
|
1845
|
+
for line in f:
|
|
1846
|
+
line = line.strip()
|
|
1847
|
+
if not line or line.startswith("#"): continue
|
|
1848
|
+
dbhost, dbport, dbname, lnname, pwname = line.split(":")
|
|
1849
|
+
self.DBPASS[(dbhost, dbport, dbname, lnname)] = pwname
|
|
1850
|
+
except Exception as e:
|
|
1851
|
+
self.pglog(str(e), self.PGDBI['ERRLOG'])
|
|
1852
|
+
|
|
1853
|
+
# Reads OpenBao secrets and returns a dictionary of credentials.
|
|
1854
|
+
def read_openbao(self):
|
|
1855
|
+
dbname = self.PGDBI['DBNAME']
|
|
1856
|
+
self.DBBAOS[dbname] = {}
|
|
1857
|
+
url = 'https://bao.k8s.ucar.edu/'
|
|
1858
|
+
baopath = {
|
|
1859
|
+
'ivaddb' : 'gdex/pgdb03',
|
|
1860
|
+
'ispddb' : 'gdex/pgdb03',
|
|
1861
|
+
'default' : 'gdex/pgdb01'
|
|
1862
|
+
}
|
|
1863
|
+
dbpath = baopath[dbname] if dbname in baopath else baopath['default']
|
|
1864
|
+
client = hvac.Client(url=self.PGDBI.get('BAOURL'))
|
|
1865
|
+
client.token = self.PGLOG.get('BAOTOKEN')
|
|
1866
|
+
try:
|
|
1867
|
+
read_response = client.secrets.kv.v2.read_secret_version(
|
|
1868
|
+
path=dbpath,
|
|
1869
|
+
mount_point='kv',
|
|
1870
|
+
raise_on_deleted_version=False
|
|
1871
|
+
)
|
|
1872
|
+
except Exception as e:
|
|
1873
|
+
return self.pglog(str(e), self.PGDBI['ERRLOG'])
|
|
1874
|
+
baos = read_response['data']['data']
|
|
1875
|
+
for key in baos:
|
|
1876
|
+
ms = re.match(r'^(\w*)pass(\w*)$', key)
|
|
1877
|
+
if not ms: continue
|
|
1878
|
+
baoname = None
|
|
1879
|
+
pre = ms.group(1)
|
|
1880
|
+
suf = ms.group(2)
|
|
1881
|
+
if pre:
|
|
1882
|
+
baoname = 'metadata' if pre == 'meta' else pre
|
|
1883
|
+
elif suf == 'word':
|
|
1884
|
+
baoname = 'postgres'
|
|
1885
|
+
if baoname: self.DBBAOS[dbname][baoname] = baos[key]
|