rda-python-common 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rda_python_common/PgCMD.py +603 -0
- rda_python_common/PgDBI.py +2306 -0
- rda_python_common/PgFile.py +3118 -0
- rda_python_common/PgLOG.py +1689 -0
- rda_python_common/PgLock.py +640 -0
- rda_python_common/PgOPT.py +1740 -0
- rda_python_common/PgSIG.py +1164 -0
- rda_python_common/PgSplit.py +299 -0
- rda_python_common/PgUtil.py +1854 -0
- rda_python_common/__init__.py +0 -0
- rda_python_common/pg_cmd.py +493 -0
- rda_python_common/pg_dbi.py +1885 -0
- rda_python_common/pg_file.py +2462 -0
- rda_python_common/pg_lock.py +533 -0
- rda_python_common/pg_log.py +1352 -0
- rda_python_common/pg_opt.py +1447 -0
- rda_python_common/pg_pass.py +92 -0
- rda_python_common/pg_sig.py +879 -0
- rda_python_common/pg_split.py +260 -0
- rda_python_common/pg_util.py +1534 -0
- rda_python_common/pgpassword.py +92 -0
- rda_python_common-2.0.0.dist-info/METADATA +20 -0
- rda_python_common-2.0.0.dist-info/RECORD +27 -0
- rda_python_common-2.0.0.dist-info/WHEEL +5 -0
- rda_python_common-2.0.0.dist-info/entry_points.txt +3 -0
- rda_python_common-2.0.0.dist-info/licenses/LICENSE +21 -0
- rda_python_common-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2306 @@
|
|
|
1
|
+
#
|
|
2
|
+
###############################################################################
|
|
3
|
+
#
|
|
4
|
+
# Title : PgDBI.py -- PostgreSQL DataBase Interface
|
|
5
|
+
# Author : Zaihua Ji, zji@ucar.edu
|
|
6
|
+
# Date : 06/07/2022
|
|
7
|
+
# 2025-01-10 transferred to package rda_python_common from
|
|
8
|
+
# https://github.com/NCAR/rda-shared-libraries.git
|
|
9
|
+
# Purpose : Python library module to handle query and manipulate PostgreSQL database
|
|
10
|
+
#
|
|
11
|
+
# Github : https://github.com/NCAR/rda-python-common.git
|
|
12
|
+
#
|
|
13
|
+
###############################################################################
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
import re
|
|
17
|
+
import time
|
|
18
|
+
import hvac
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
import psycopg2 as PgSQL
|
|
21
|
+
from psycopg2.extras import execute_values
|
|
22
|
+
from psycopg2.extras import execute_batch
|
|
23
|
+
from os import path as op
|
|
24
|
+
from . import PgLOG
|
|
25
|
+
|
|
26
|
+
pgdb = None # reference to a connected database object
|
|
27
|
+
curtran = 0 # 0 - no transaction, 1 - in transaction
|
|
28
|
+
NMISSES = [] # array of mising userno
|
|
29
|
+
LMISSES = [] # array of mising logname
|
|
30
|
+
TABLES = {} # record table field information
|
|
31
|
+
SEQUENCES = {} # record table sequence fielnames
|
|
32
|
+
SPECIALIST = {} # hash array refrences to specialist info of dsids
|
|
33
|
+
SYSDOWN = {}
|
|
34
|
+
PGDBI = {}
|
|
35
|
+
ADDTBLS = []
|
|
36
|
+
PGSIGNS = ['!', '<', '>', '<>']
|
|
37
|
+
CHCODE = 1042
|
|
38
|
+
|
|
39
|
+
# hard coded db ports for dbnames
|
|
40
|
+
DBPORTS = {
|
|
41
|
+
'default' : 0
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
DBPASS = {}
|
|
45
|
+
DBBAOS = {}
|
|
46
|
+
|
|
47
|
+
# hard coded db names for given schema names
|
|
48
|
+
DBNAMES = {
|
|
49
|
+
'ivaddb' : 'ivaddb',
|
|
50
|
+
'cntldb' : 'ivaddb',
|
|
51
|
+
'ivaddb1' : 'ivaddb',
|
|
52
|
+
'cntldb1' : 'ivaddb',
|
|
53
|
+
'cdmsdb' : 'ivaddb',
|
|
54
|
+
'ispddb' : 'ispddb',
|
|
55
|
+
'obsua' : 'upadb',
|
|
56
|
+
'default' : 'rdadb',
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
# hard coded socket paths for machine_dbnames
|
|
60
|
+
DBSOCKS = {
|
|
61
|
+
'default' : '',
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# home path for check db on alter host
|
|
65
|
+
VIEWHOMES = {
|
|
66
|
+
'default' : PgLOG.PGLOG['DSSDBHM']
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# add more to the list if used for names
|
|
70
|
+
PGRES = ['end', 'window']
|
|
71
|
+
|
|
72
|
+
#
|
|
73
|
+
# PostgreSQL specified query timestamp format
|
|
74
|
+
#
|
|
75
|
+
fmtyr = lambda fn: "extract(year from {})::int".format(fn)
|
|
76
|
+
fmtqt = lambda fn: "extract(quarter from {})::int".format(fn)
|
|
77
|
+
fmtmn = lambda fn: "extract(month from {})::int".format(fn)
|
|
78
|
+
fmtdt = lambda fn: "date({})".format(fn)
|
|
79
|
+
fmtym = lambda fn: "to_char({}, 'yyyy-mm')".format(fn)
|
|
80
|
+
fmthr = lambda fn: "extract(hour from {})::int".format(fn)
|
|
81
|
+
|
|
82
|
+
#
|
|
83
|
+
# set environments and defaults
|
|
84
|
+
#
|
|
85
|
+
def SETPGDBI(name, value):
|
|
86
|
+
PGDBI[name] = PgLOG.get_environment(name, value)
|
|
87
|
+
|
|
88
|
+
SETPGDBI('DEFDB', 'rdadb')
|
|
89
|
+
SETPGDBI("DEFSC", 'dssdb')
|
|
90
|
+
SETPGDBI('DEFHOST', PgLOG.PGLOG['PSQLHOST'])
|
|
91
|
+
SETPGDBI("DEFPORT", 0)
|
|
92
|
+
SETPGDBI("DEFSOCK", '')
|
|
93
|
+
SETPGDBI("DBNAME", PGDBI['DEFDB'])
|
|
94
|
+
SETPGDBI("SCNAME", PGDBI['DEFSC'])
|
|
95
|
+
SETPGDBI("LNNAME", PGDBI['DEFSC'])
|
|
96
|
+
SETPGDBI("PWNAME", None)
|
|
97
|
+
SETPGDBI("DBHOST", (os.environ['DSSDBHOST'] if os.environ.get('DSSDBHOST') else PGDBI['DEFHOST']))
|
|
98
|
+
SETPGDBI("DBPORT", 0)
|
|
99
|
+
SETPGDBI("ERRLOG", PgLOG.LOGERR) # default error logact
|
|
100
|
+
SETPGDBI("EXITLG", PgLOG.LGEREX) # default exit logact
|
|
101
|
+
SETPGDBI("DBSOCK", '')
|
|
102
|
+
SETPGDBI("DATADIR", PgLOG.PGLOG['DSDHOME'])
|
|
103
|
+
SETPGDBI("BCKPATH", PgLOG.PGLOG['DSSDBHM'] + "/backup")
|
|
104
|
+
SETPGDBI("SQLPATH", PgLOG.PGLOG['DSSDBHM'] + "/sql")
|
|
105
|
+
SETPGDBI("VWNAME", PGDBI['DEFSC'])
|
|
106
|
+
SETPGDBI("VWPORT", 0)
|
|
107
|
+
SETPGDBI("VWSOCK", '')
|
|
108
|
+
SETPGDBI("BAOURL", 'https://bao.k8s.ucar.edu/')
|
|
109
|
+
|
|
110
|
+
PGDBI['DBSHOST'] = PgLOG.get_short_host(PGDBI['DBHOST'])
|
|
111
|
+
PGDBI['DEFSHOST'] = PgLOG.get_short_host(PGDBI['DEFHOST'])
|
|
112
|
+
PGDBI['VWHOST'] = PgLOG.PGLOG['PVIEWHOST']
|
|
113
|
+
PGDBI['MSHOST'] = PgLOG.PGLOG['PMISCHOST']
|
|
114
|
+
PGDBI['VWSHOST'] = PgLOG.get_short_host(PGDBI['VWHOST'])
|
|
115
|
+
PGDBI['MSSHOST'] = PgLOG.get_short_host(PGDBI['MSHOST'])
|
|
116
|
+
PGDBI['VWHOME'] = (VIEWHOMES[PgLOG.PGLOG['HOSTNAME']] if PgLOG.PGLOG['HOSTNAME'] in VIEWHOMES else VIEWHOMES['default'])
|
|
117
|
+
PGDBI['SCPATH'] = None # additional schema path for set search_path
|
|
118
|
+
PGDBI['VHSET'] = 0
|
|
119
|
+
PGDBI['PGSIZE'] = 1000 # number of records for page_size
|
|
120
|
+
PGDBI['MTRANS'] = 5000 # max number of changes in one transactions
|
|
121
|
+
PGDBI['MAXICNT'] = 6000000 # maximum number of records in each table
|
|
122
|
+
|
|
123
|
+
#
|
|
124
|
+
# create a pgddl command string with
|
|
125
|
+
# table name (tname), prefix (pre) and suffix (suf)
|
|
126
|
+
#
|
|
127
|
+
def get_pgddl_command(tname, pre = None, suf = None, scname = None):
|
|
128
|
+
|
|
129
|
+
ms = re.match(r'^(.+)\.(.+)$', tname)
|
|
130
|
+
if not scname:
|
|
131
|
+
if ms:
|
|
132
|
+
scname = ms.group(1)
|
|
133
|
+
tname = ms.group(2)
|
|
134
|
+
else:
|
|
135
|
+
scname = PGDBI['SCNAME']
|
|
136
|
+
xy = ''
|
|
137
|
+
if suf: xy += ' -x ' + suf
|
|
138
|
+
if pre: xy += ' -y ' + pre
|
|
139
|
+
return "pgddl {} -aa -h {} -d {} -c {} -u {}{}".format(tname, PGDBI['DBHOST'], PGDBI['DBNAME'], scname, PGDBI['LNNAME'], xy)
|
|
140
|
+
|
|
141
|
+
#
|
|
142
|
+
# set default connection for dssdb PostgreSQL Server
|
|
143
|
+
#
|
|
144
|
+
def dssdb_dbname():
|
|
145
|
+
default_scinfo(PGDBI['DEFDB'], PGDBI['DEFSC'], PgLOG.PGLOG['PSQLHOST'])
|
|
146
|
+
|
|
147
|
+
dssdb_scname = dssdb_dbname
|
|
148
|
+
|
|
149
|
+
#
|
|
150
|
+
# set default connection for obsua PostgreSQL Server
|
|
151
|
+
#
|
|
152
|
+
def obsua_dbname():
|
|
153
|
+
default_scinfo('upadb', 'obsua', PgLOG.PGLOG['PMISCHOST'])
|
|
154
|
+
|
|
155
|
+
obsua_scname = obsua_dbname
|
|
156
|
+
|
|
157
|
+
#
|
|
158
|
+
# set default connection for ivaddb PostgreSQL Server
|
|
159
|
+
#
|
|
160
|
+
def ivaddb_dbname():
|
|
161
|
+
default_scinfo('ivaddb', 'ivaddb', PgLOG.PGLOG['PMISCHOST'])
|
|
162
|
+
|
|
163
|
+
ivaddb_scname = ivaddb_dbname
|
|
164
|
+
|
|
165
|
+
#
|
|
166
|
+
# set default connection for ispddb PostgreSQL Server
|
|
167
|
+
#
|
|
168
|
+
def ispddb_dbname():
|
|
169
|
+
default_scinfo('ispddb', 'ispddb', PgLOG.PGLOG['PMISCHOST'])
|
|
170
|
+
|
|
171
|
+
ispddb_scname = ispddb_dbname
|
|
172
|
+
|
|
173
|
+
#
|
|
174
|
+
# set a default schema info with hard coded info
|
|
175
|
+
#
|
|
176
|
+
def default_dbinfo(scname = None, dbhost = None, lnname = None, pwname = None, dbport = None, socket = None):
|
|
177
|
+
|
|
178
|
+
return default_scinfo(get_dbname(scname), scname, dbhost, lnname, pwname, dbport, socket)
|
|
179
|
+
|
|
180
|
+
#
|
|
181
|
+
# set default database/schema info with hard coded info
|
|
182
|
+
#
|
|
183
|
+
def default_scinfo(dbname = None, scname = None, dbhost = None, lnname = None, pwname = None, dbport = None, socket = None):
|
|
184
|
+
|
|
185
|
+
if not dbname: dbname = PGDBI['DEFDB']
|
|
186
|
+
if not scname: scname = PGDBI['DEFSC']
|
|
187
|
+
if not dbhost: dbhost = PGDBI['DEFHOST']
|
|
188
|
+
if dbport is None: dbport = PGDBI['DEFPORT']
|
|
189
|
+
if socket is None: socket = PGDBI['DEFSOCK']
|
|
190
|
+
|
|
191
|
+
set_scname(dbname, scname, lnname, pwname, dbhost, dbport, socket)
|
|
192
|
+
|
|
193
|
+
#
|
|
194
|
+
# get the datbase sock file name of a given dbname for local connection
|
|
195
|
+
#
|
|
196
|
+
def get_dbsock(dbname):
|
|
197
|
+
|
|
198
|
+
return (DBSOCKS[dbname] if dbname in DBSOCKS else DBSOCKS['default'])
|
|
199
|
+
|
|
200
|
+
#
|
|
201
|
+
# get the datbase port number of a given dbname for remote connection
|
|
202
|
+
#
|
|
203
|
+
def get_dbport(dbname):
|
|
204
|
+
|
|
205
|
+
return (DBPORTS[dbname] if dbname in DBPORTS else DBPORTS['default'])
|
|
206
|
+
|
|
207
|
+
#
|
|
208
|
+
# get the datbase name of a given schema name for remote connection
|
|
209
|
+
#
|
|
210
|
+
def get_dbname(scname):
|
|
211
|
+
|
|
212
|
+
if scname:
|
|
213
|
+
if scname in DBNAMES: return DBNAMES[scname]
|
|
214
|
+
return DBNAMES['default']
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
#
|
|
218
|
+
# set connection for viewing database information
|
|
219
|
+
#
|
|
220
|
+
def view_dbinfo(scname = None, lnname = None, pwname = None):
|
|
221
|
+
|
|
222
|
+
return view_scinfo(get_dbname(scname), scname, lnname, pwname)
|
|
223
|
+
|
|
224
|
+
#
|
|
225
|
+
# set connection for viewing database/schema information
|
|
226
|
+
#
|
|
227
|
+
def view_scinfo(dbname = None, scname = None, lnname = None, pwname = None):
|
|
228
|
+
|
|
229
|
+
if not dbname: dbname = PGDBI['DEFDB']
|
|
230
|
+
if not scname: scname = PGDBI['DEFSC']
|
|
231
|
+
|
|
232
|
+
set_scname(dbname, scname, lnname, pwname, PgLOG.PGLOG['PVIEWHOST'], PGDBI['VWPORT'])
|
|
233
|
+
|
|
234
|
+
#
|
|
235
|
+
# set connection for given scname
|
|
236
|
+
#
|
|
237
|
+
def set_dbname(scname = None, lnname = None, pwname = None, dbhost = None, dbport = None, socket = None):
|
|
238
|
+
|
|
239
|
+
if not scname: scname = PGDBI['DEFSC']
|
|
240
|
+
return set_scname(get_dbname(scname), scname, lnname, pwname, dbhost, dbport, socket)
|
|
241
|
+
|
|
242
|
+
#
|
|
243
|
+
# set connection for given database & schema names
|
|
244
|
+
#
|
|
245
|
+
def set_scname(dbname = None, scname = None, lnname = None, pwname = None, dbhost = None, dbport = None, socket = None):
|
|
246
|
+
|
|
247
|
+
changed = 0
|
|
248
|
+
|
|
249
|
+
if dbname and dbname != PGDBI['DBNAME']:
|
|
250
|
+
PGDBI['DBNAME'] = dbname
|
|
251
|
+
changed = 1
|
|
252
|
+
if scname and scname != PGDBI['SCNAME']:
|
|
253
|
+
PGDBI['LNNAME'] = PGDBI['SCNAME'] = scname
|
|
254
|
+
changed = 1
|
|
255
|
+
if lnname and lnname != PGDBI['LNNAME']:
|
|
256
|
+
PGDBI['LNNAME'] = lnname
|
|
257
|
+
changed = 1
|
|
258
|
+
if pwname != PGDBI['PWNAME']:
|
|
259
|
+
PGDBI['PWNAME'] = pwname
|
|
260
|
+
changed = 1
|
|
261
|
+
if dbhost and dbhost != PGDBI['DBHOST']:
|
|
262
|
+
PGDBI['DBHOST'] = dbhost
|
|
263
|
+
PGDBI['DBSHOST'] = PgLOG.get_short_host(dbhost)
|
|
264
|
+
changed = 1
|
|
265
|
+
if PGDBI['DBSHOST'] == PgLOG.PGLOG['HOSTNAME']:
|
|
266
|
+
if socket is None: socket = get_dbsock(dbname)
|
|
267
|
+
if socket != PGDBI['DBSOCK']:
|
|
268
|
+
PGDBI['DBSOCK'] = socket
|
|
269
|
+
changed = 1
|
|
270
|
+
else:
|
|
271
|
+
if not dbport: dbport = get_dbport(dbname)
|
|
272
|
+
if dbport != PGDBI['DBPORT']:
|
|
273
|
+
PGDBI['DBPORT'] = dbport
|
|
274
|
+
changed = 1
|
|
275
|
+
|
|
276
|
+
if changed and pgdb is not None: pgdisconnect(1)
|
|
277
|
+
|
|
278
|
+
#
|
|
279
|
+
# start a database transaction and exit if fails
|
|
280
|
+
#
|
|
281
|
+
def starttran():
|
|
282
|
+
|
|
283
|
+
global curtran
|
|
284
|
+
|
|
285
|
+
if curtran == 1: endtran() # try to end previous transaction
|
|
286
|
+
if not pgdb:
|
|
287
|
+
pgconnect(0, 0, False)
|
|
288
|
+
else:
|
|
289
|
+
try:
|
|
290
|
+
pgdb.isolation_level
|
|
291
|
+
except PgSQL.OperationalError as e:
|
|
292
|
+
pgconnect(0, 0, False)
|
|
293
|
+
if pgdb.closed:
|
|
294
|
+
pgconnect(0, 0, False)
|
|
295
|
+
elif pgdb.autocommit:
|
|
296
|
+
pgdb.autocommit = False
|
|
297
|
+
curtran = 1
|
|
298
|
+
|
|
299
|
+
#
|
|
300
|
+
# end a transaction with changes committed and exit if fails
|
|
301
|
+
#
|
|
302
|
+
def endtran(autocommit = True):
|
|
303
|
+
|
|
304
|
+
global curtran
|
|
305
|
+
if curtran and pgdb:
|
|
306
|
+
if not pgdb.closed: pgdb.commit()
|
|
307
|
+
pgdb.autocommit = autocommit
|
|
308
|
+
curtran = 0 if autocommit else 1
|
|
309
|
+
|
|
310
|
+
#
|
|
311
|
+
# end a transaction without changes committed and exit inside if fails
|
|
312
|
+
#
|
|
313
|
+
def aborttran(autocommit = True):
|
|
314
|
+
|
|
315
|
+
global curtran
|
|
316
|
+
if curtran and pgdb:
|
|
317
|
+
if not pgdb.closed: pgdb.rollback()
|
|
318
|
+
pgdb.autocommit = autocommit
|
|
319
|
+
curtran = 0 if autocommit else 1
|
|
320
|
+
|
|
321
|
+
#
|
|
322
|
+
# record error message to dscheck record and clean the lock
|
|
323
|
+
#
|
|
324
|
+
def record_dscheck_error(errmsg, logact = PGDBI['EXITLG']):
|
|
325
|
+
|
|
326
|
+
cnd = PgLOG.PGLOG['DSCHECK']['chkcnd']
|
|
327
|
+
if PgLOG.PGLOG['NOQUIT']: PgLOG.PGLOG['NOQUIT'] = 0
|
|
328
|
+
dflags = PgLOG.PGLOG['DSCHECK']['dflags']
|
|
329
|
+
|
|
330
|
+
pgrec = pgget("dscheck", "mcount, tcount, lockhost, pid", cnd, logact)
|
|
331
|
+
if not pgrec: return 0
|
|
332
|
+
if not pgrec['pid'] and not pgrec['lockhost']: return 0
|
|
333
|
+
(chost, cpid) = PgLOG.current_process_info()
|
|
334
|
+
if pgrec['pid'] != cpid or pgrec['lockhost'] != chost: return 0
|
|
335
|
+
|
|
336
|
+
# update dscheck record only if it is still locked by the current process
|
|
337
|
+
record = {}
|
|
338
|
+
record['chktime'] = int(time.time())
|
|
339
|
+
if logact&PgLOG.EXITLG:
|
|
340
|
+
record['status'] = "E"
|
|
341
|
+
record['pid'] = 0 # release lock
|
|
342
|
+
if dflags:
|
|
343
|
+
record['dflags'] = dflags
|
|
344
|
+
record['mcount'] = pgrec['mcount'] + 1
|
|
345
|
+
else:
|
|
346
|
+
record['dflags'] = ''
|
|
347
|
+
|
|
348
|
+
if errmsg:
|
|
349
|
+
errmsg = PgLOG.break_long_string(errmsg, 512, None, 50, None, 50, 25)
|
|
350
|
+
if pgrec['tcount'] > 1: errmsg = "Try {}: {}".format(pgrec['tcount'], errmsg)
|
|
351
|
+
record['errmsg'] = errmsg
|
|
352
|
+
|
|
353
|
+
return pgupdt("dscheck", record, cnd, logact)
|
|
354
|
+
|
|
355
|
+
#
|
|
356
|
+
# local function to log query error
|
|
357
|
+
#
|
|
358
|
+
def qelog(dberror, sleep, sqlstr, vals, pgcnt, logact = PGDBI['ERRLOG']):
|
|
359
|
+
|
|
360
|
+
retry = " Sleep {}(sec) & ".format(sleep) if sleep else " "
|
|
361
|
+
if sqlstr:
|
|
362
|
+
if sqlstr.find("Retry ") == 0:
|
|
363
|
+
retry += "the {} ".format(PgLOG.int2order(pgcnt+1))
|
|
364
|
+
elif sleep:
|
|
365
|
+
retry += "the {} Retry: \n".format(PgLOG.int2order(pgcnt+1))
|
|
366
|
+
elif pgcnt:
|
|
367
|
+
retry = " Error the {} Retry: \n".format(PgLOG.int2order(pgcnt))
|
|
368
|
+
else:
|
|
369
|
+
retry = "\n"
|
|
370
|
+
sqlstr = retry + sqlstr
|
|
371
|
+
else:
|
|
372
|
+
sqlstr = ''
|
|
373
|
+
|
|
374
|
+
if vals: sqlstr += " with values: " + str(vals)
|
|
375
|
+
|
|
376
|
+
if dberror: sqlstr = "{}\n{}".format(dberror, sqlstr)
|
|
377
|
+
if logact&PgLOG.EXITLG and PgLOG.PGLOG['DSCHECK']: record_dscheck_error(sqlstr, logact)
|
|
378
|
+
PgLOG.pglog(sqlstr, logact)
|
|
379
|
+
if sleep: time.sleep(sleep)
|
|
380
|
+
|
|
381
|
+
return PgLOG.FAILURE # if not exit in PgLOG.pglog()
|
|
382
|
+
|
|
383
|
+
#
|
|
384
|
+
# try to add a new table according the table not exist error
|
|
385
|
+
#
|
|
386
|
+
def try_add_table(dberror, logact):
|
|
387
|
+
|
|
388
|
+
ms = re.match(r'^42P01 ERROR: relation "(.+)" does not exist', dberror)
|
|
389
|
+
if ms:
|
|
390
|
+
tname = ms.group(1)
|
|
391
|
+
add_new_table(tname, logact = logact)
|
|
392
|
+
|
|
393
|
+
#
|
|
394
|
+
# add a table for given table name
|
|
395
|
+
#
|
|
396
|
+
def add_a_table(tname, logact):
|
|
397
|
+
|
|
398
|
+
add_new_table(tname, logact = logact)
|
|
399
|
+
|
|
400
|
+
#
|
|
401
|
+
# add a new table for given table name
|
|
402
|
+
#
|
|
403
|
+
def add_new_table(tname, pre = None, suf = None, logact = 0):
|
|
404
|
+
|
|
405
|
+
if pre:
|
|
406
|
+
tbname = '{}_{}'.format(pre, tname)
|
|
407
|
+
elif suf:
|
|
408
|
+
tbname = '{}_{}'.format(tname, suf)
|
|
409
|
+
else:
|
|
410
|
+
tbname = tname
|
|
411
|
+
if tbname in ADDTBLS: return
|
|
412
|
+
|
|
413
|
+
PgLOG.pgsystem(get_pgddl_command(tname, pre, suf), logact)
|
|
414
|
+
ADDTBLS.append(tbname)
|
|
415
|
+
|
|
416
|
+
#
|
|
417
|
+
# validate a table for given table name (tname), prefix (pre) and suffix (suf),
|
|
418
|
+
# and add it if not existing
|
|
419
|
+
#
|
|
420
|
+
def valid_table(tname, pre = None, suf = None, logact = 0):
|
|
421
|
+
|
|
422
|
+
if pre:
|
|
423
|
+
tbname = '{}_{}'.format(pre, tname)
|
|
424
|
+
elif suf:
|
|
425
|
+
tbname = '{}_{}'.format(tname, suf)
|
|
426
|
+
else:
|
|
427
|
+
tbname = tname
|
|
428
|
+
if tbname in ADDTBLS: return tbname
|
|
429
|
+
|
|
430
|
+
if not pgcheck(tbname, logact): PgLOG.pgsystem(get_pgddl_command(tname, pre, suf), logact)
|
|
431
|
+
ADDTBLS.append(tbname)
|
|
432
|
+
return tbname
|
|
433
|
+
|
|
434
|
+
#
|
|
435
|
+
# local function to log query error
|
|
436
|
+
#
|
|
437
|
+
def check_dberror(pgerr, pgcnt, sqlstr, ary, logact = PGDBI['ERRLOG']):
|
|
438
|
+
|
|
439
|
+
ret = PgLOG.FAILURE
|
|
440
|
+
|
|
441
|
+
pgcode = pgerr.pgcode
|
|
442
|
+
pgerror = pgerr.pgerror
|
|
443
|
+
dberror = "{} {}".format(pgcode, pgerror) if pgcode and pgerror else str(pgerr)
|
|
444
|
+
if pgcnt < PgLOG.PGLOG['DBRETRY']:
|
|
445
|
+
if not pgcode:
|
|
446
|
+
if PGDBI['DBNAME'] == PGDBI['DEFDB'] and PGDBI['DBSHOST'] != PGDBI['DEFSHOST']:
|
|
447
|
+
default_dbinfo()
|
|
448
|
+
qelog(dberror, 0, "Retry Connecting to {} on {}".format(PGDBI['DBNAME'], PGDBI['DBHOST']), ary, pgcnt, PgLOG.MSGLOG)
|
|
449
|
+
else:
|
|
450
|
+
qelog(dberror, 5+5*pgcnt, "Retry Connecting", ary, pgcnt, PgLOG.LOGWRN)
|
|
451
|
+
return PgLOG.SUCCESS
|
|
452
|
+
elif re.match(r'^(08|57)', pgcode):
|
|
453
|
+
qelog(dberror, 0, "Retry Connecting", ary, pgcnt, PgLOG.LOGWRN)
|
|
454
|
+
pgconnect(1, pgcnt + 1)
|
|
455
|
+
return (PgLOG.FAILURE if not pgdb else PgLOG.SUCCESS)
|
|
456
|
+
elif re.match(r'^55', pgcode): # try to lock again
|
|
457
|
+
qelog(dberror, 10, "Retry Locking", ary, pgcnt, PgLOG.LOGWRN)
|
|
458
|
+
return PgLOG.SUCCESS
|
|
459
|
+
elif pgcode == '25P02': # try to add table
|
|
460
|
+
qelog(dberror, 0, "Rollback transaction", ary, pgcnt, PgLOG.LOGWRN)
|
|
461
|
+
pgdb.rollback()
|
|
462
|
+
return PgLOG.SUCCESS
|
|
463
|
+
elif pgcode == '42P01' and logact&PgLOG.ADDTBL: # try to add table
|
|
464
|
+
qelog(dberror, 0, "Retry after adding a table", ary, pgcnt, PgLOG.LOGWRN)
|
|
465
|
+
try_add_table(dberror, logact)
|
|
466
|
+
return PgLOG.SUCCESS
|
|
467
|
+
|
|
468
|
+
if logact&PgLOG.DOLOCK and pgcode and re.match(r'^55\w\w\w$', pgcode):
|
|
469
|
+
logact &= ~PgLOG.EXITLG # no exit for lock error
|
|
470
|
+
return qelog(dberror, 0, sqlstr, ary, pgcnt, logact)
|
|
471
|
+
|
|
472
|
+
#
|
|
473
|
+
# return hash reference to postgresql batch mode command and output file name
|
|
474
|
+
#
|
|
475
|
+
def pgbatch(sqlfile, foreground = 0):
|
|
476
|
+
|
|
477
|
+
# if(PGDBI['VWHOST'] and PGDBI['VWHOME'] and
|
|
478
|
+
# PGDBI['DBSHOST'] == PGDBI['VWSHOST'] and PGDBI['SCNAME'] == PGDBI['VWNAME']):
|
|
479
|
+
# slave = "/{}/{}.slave".format(PGDBI['VWHOME'], PGDBI['VWHOST'])
|
|
480
|
+
# if not op.exists(slave): default_scname()
|
|
481
|
+
|
|
482
|
+
dbhost = 'localhost' if PGDBI['DBSHOST'] == PgLOG.PGLOG['HOSTNAME'] else PGDBI['DBHOST']
|
|
483
|
+
options = "-h {} -p {}".format(dbhost, PGDBI['DBPORT'])
|
|
484
|
+
pwname = get_pgpass_password()
|
|
485
|
+
os.environ['PGPASSWORD'] = pwname
|
|
486
|
+
options += " -U {} {}".format(PGDBI['LNNAME'], PGDBI['DBNAME'])
|
|
487
|
+
|
|
488
|
+
if not sqlfile: return options
|
|
489
|
+
|
|
490
|
+
if foreground:
|
|
491
|
+
batch = "psql {} < {} |".format(options, sqlfile)
|
|
492
|
+
else:
|
|
493
|
+
batch['out'] = sqlfile
|
|
494
|
+
if re.search(r'\.sql$', batch['out']):
|
|
495
|
+
batch['out'] = re.sub(r'\.sql$', '.out', batch['out'])
|
|
496
|
+
else:
|
|
497
|
+
batch['out'] += ".out"
|
|
498
|
+
|
|
499
|
+
batch['cmd'] = "psql {} < {} > {} 2>&1".format(options, sqlfile , batch['out'])
|
|
500
|
+
|
|
501
|
+
return batch
|
|
502
|
+
|
|
503
|
+
#
|
|
504
|
+
# start a connection to dssdb database and return a DBI object; None if error
|
|
505
|
+
# force connect if connect > 0
|
|
506
|
+
#
|
|
507
|
+
def pgconnect(reconnect = 0, pgcnt = 0, autocommit = True):
|
|
508
|
+
|
|
509
|
+
global pgdb
|
|
510
|
+
|
|
511
|
+
if pgdb:
|
|
512
|
+
if reconnect and not pgdb.closed: return pgdb # no need reconnect
|
|
513
|
+
elif reconnect:
|
|
514
|
+
reconnect = 0 # initial connection
|
|
515
|
+
|
|
516
|
+
while True:
|
|
517
|
+
config = {'database' : PGDBI['DBNAME'],
|
|
518
|
+
'user' : PGDBI['LNNAME']}
|
|
519
|
+
if PGDBI['DBSHOST'] == PgLOG.PGLOG['HOSTNAME']:
|
|
520
|
+
config['host'] = 'localhost'
|
|
521
|
+
else:
|
|
522
|
+
config['host'] = PGDBI['DBHOST'] if PGDBI['DBHOST'] else PGDBI['DEFHOST']
|
|
523
|
+
if not PGDBI['DBPORT']: PGDBI['DBPORT'] = get_dbport(PGDBI['DBNAME'])
|
|
524
|
+
if PGDBI['DBPORT']: config['port'] = PGDBI['DBPORT']
|
|
525
|
+
config['password'] = '***'
|
|
526
|
+
sqlstr = "psycopg2.connect(**{})".format(config)
|
|
527
|
+
config['password'] = get_pgpass_password()
|
|
528
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, sqlstr)
|
|
529
|
+
try:
|
|
530
|
+
PgLOG.PGLOG['PGDBBUF'] = pgdb = PgSQL.connect(**config)
|
|
531
|
+
if reconnect: PgLOG.pglog("{} Reconnected at {}".format(sqlstr, PgLOG.current_datetime()), PgLOG.MSGLOG|PgLOG.FRCLOG)
|
|
532
|
+
if autocommit: pgdb.autocommit = autocommit
|
|
533
|
+
return pgdb
|
|
534
|
+
except PgSQL.Error as pgerr:
|
|
535
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, None, PGDBI['EXITLG']): return PgLOG.FAILURE
|
|
536
|
+
pgcnt += 1
|
|
537
|
+
|
|
538
|
+
#
|
|
539
|
+
# return a PostgreSQL cursor upon success
|
|
540
|
+
#
|
|
541
|
+
def pgcursor():
|
|
542
|
+
|
|
543
|
+
pgcur = None
|
|
544
|
+
|
|
545
|
+
if not pgdb:
|
|
546
|
+
pgconnect()
|
|
547
|
+
if not pgdb: return PgLOG.FAILURE
|
|
548
|
+
|
|
549
|
+
pgcnt = 0
|
|
550
|
+
while True:
|
|
551
|
+
try:
|
|
552
|
+
pgcur = pgdb.cursor()
|
|
553
|
+
spath = "SET search_path = '{}'".format(PGDBI['SCNAME'])
|
|
554
|
+
if PGDBI['SCPATH'] and PGDBI['SCPATH'] != PGDBI['SCNAME']:
|
|
555
|
+
spath += ", '{}'".format(PGDBI['SCPATH'])
|
|
556
|
+
pgcur.execute(spath)
|
|
557
|
+
except PgSQL.Error as pgerr:
|
|
558
|
+
if pgcnt == 0 and pgdb.closed:
|
|
559
|
+
pgconnect(1)
|
|
560
|
+
elif not check_dberror(pgerr, pgcnt, '', None, PGDBI['EXITLG']):
|
|
561
|
+
return PgLOG.FAILURE
|
|
562
|
+
else:
|
|
563
|
+
break
|
|
564
|
+
pgcnt += 1
|
|
565
|
+
|
|
566
|
+
return pgcur
|
|
567
|
+
|
|
568
|
+
#
|
|
569
|
+
# disconnect to dssdb database
|
|
570
|
+
#
|
|
571
|
+
def pgdisconnect(stopit = 1):
|
|
572
|
+
|
|
573
|
+
global pgdb
|
|
574
|
+
if pgdb:
|
|
575
|
+
if stopit: pgdb.close()
|
|
576
|
+
PgLOG.PGLOG['PGDBBUF'] = pgdb = None
|
|
577
|
+
|
|
578
|
+
#
|
|
579
|
+
# gather table field default information as hash array with field names as keys
|
|
580
|
+
# and default values as values
|
|
581
|
+
# the whole table information is cached to a hash array with table names as keys
|
|
582
|
+
#
|
|
583
|
+
def pgtable(tablename, logact = PGDBI['ERRLOG']):
|
|
584
|
+
|
|
585
|
+
if tablename in TABLES: return TABLES[tablename].copy() # cached already
|
|
586
|
+
intms = r'^(smallint||bigint|integer)$'
|
|
587
|
+
fields = "column_name col, data_type typ, is_nullable nil, column_default def"
|
|
588
|
+
condition = table_condition(tablename)
|
|
589
|
+
pgcnt = 0
|
|
590
|
+
while True:
|
|
591
|
+
pgrecs = pgmget('information_schema.columns', fields, condition, logact)
|
|
592
|
+
cnt = len(pgrecs['col']) if pgrecs else 0
|
|
593
|
+
if cnt: break
|
|
594
|
+
if pgcnt == 0 and logact&PgLOG.ADDTBL:
|
|
595
|
+
add_new_table(tablename, logact = logact)
|
|
596
|
+
else:
|
|
597
|
+
return PgLOG.pglog(tablename + ": Table not exists", logact)
|
|
598
|
+
pgcnt += 1
|
|
599
|
+
|
|
600
|
+
pgdefs = {}
|
|
601
|
+
for i in range(cnt):
|
|
602
|
+
name = pgrecs['col'][i]
|
|
603
|
+
isint = re.match(intms, pgrecs['typ'][i])
|
|
604
|
+
dflt = pgrecs['def'][i]
|
|
605
|
+
if dflt != None:
|
|
606
|
+
if re.match(r'^nextval\(', dflt):
|
|
607
|
+
dflt = 0
|
|
608
|
+
else:
|
|
609
|
+
dflt = check_default_value(dflt, isint)
|
|
610
|
+
elif pgrecs['nil'][i] == 'YES':
|
|
611
|
+
dflt = None
|
|
612
|
+
elif isint:
|
|
613
|
+
dflt = 0
|
|
614
|
+
else:
|
|
615
|
+
dflt = ''
|
|
616
|
+
pgdefs[name] = dflt
|
|
617
|
+
|
|
618
|
+
TABLES[tablename] = pgdefs.copy()
|
|
619
|
+
return pgdefs
|
|
620
|
+
|
|
621
|
+
#
|
|
622
|
+
# get sequence field name for given table name
|
|
623
|
+
#
|
|
624
|
+
def pgsequence(tablename, logact = PGDBI['ERRLOG']):
|
|
625
|
+
|
|
626
|
+
if tablename in SEQUENCES: return SEQUENCES[tablename] # cached already
|
|
627
|
+
condition = table_condition(tablename) + " AND column_default LIKE 'nextval(%'"
|
|
628
|
+
pgrec = pgget('information_schema.columns', 'column_name', condition, logact)
|
|
629
|
+
seqname = pgrec['column_name'] if pgrec else None
|
|
630
|
+
SEQUENCES[tablename] = seqname
|
|
631
|
+
|
|
632
|
+
return seqname
|
|
633
|
+
|
|
634
|
+
#
|
|
635
|
+
# check default value for integer & string
|
|
636
|
+
#
|
|
637
|
+
def check_default_value(dflt, isint):
|
|
638
|
+
|
|
639
|
+
if isint:
|
|
640
|
+
ms = re.match(r"^'{0,1}(\d+)", dflt)
|
|
641
|
+
if ms: dflt = int(ms.group(1))
|
|
642
|
+
elif dflt[0] == "'":
|
|
643
|
+
ms = re.match(r"^(.+)::", dflt)
|
|
644
|
+
if ms: dflt = ms.group(1)
|
|
645
|
+
elif dflt != 'NULL':
|
|
646
|
+
dflt = "'{}'".format(dflt)
|
|
647
|
+
return dflt
|
|
648
|
+
|
|
649
|
+
#
|
|
650
|
+
# local fucntion: insert prepare pgadd()/pgmadd() for given table and field names
|
|
651
|
+
# according to options of multiple place holds and returning sequence id
|
|
652
|
+
#
|
|
653
|
+
def prepare_insert(tablename, fields, multi = True, getid = None):
|
|
654
|
+
|
|
655
|
+
strfld = pgnames(fields, '.', ',')
|
|
656
|
+
if multi:
|
|
657
|
+
strplc = "(" + ','.join(['%s']*len(fields)) + ")"
|
|
658
|
+
else:
|
|
659
|
+
strplc = '%s'
|
|
660
|
+
sqlstr = "INSERT INTO {} ({}) VALUES {}".format(tablename, strfld, strplc)
|
|
661
|
+
if getid: sqlstr += " RETURNING " + getid
|
|
662
|
+
|
|
663
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, sqlstr)
|
|
664
|
+
|
|
665
|
+
return sqlstr
|
|
666
|
+
|
|
667
|
+
#
|
|
668
|
+
# local fucntion: prepare default value for single record
|
|
669
|
+
#
|
|
670
|
+
def prepare_default(tablename, record, logact = 0):
|
|
671
|
+
|
|
672
|
+
table = pgtable(tablename, logact)
|
|
673
|
+
|
|
674
|
+
for fld in record:
|
|
675
|
+
val = record[fld]
|
|
676
|
+
if val is None:
|
|
677
|
+
vlen = 0
|
|
678
|
+
elif isinstance(val, str):
|
|
679
|
+
vlen = len(val)
|
|
680
|
+
else:
|
|
681
|
+
vlen = 1
|
|
682
|
+
if vlen == 0: record[fld] = table[fld]
|
|
683
|
+
|
|
684
|
+
#
|
|
685
|
+
# local fucntion: prepare default value for multiple records
|
|
686
|
+
#
|
|
687
|
+
def prepare_defaults(tablename, records, logact = 0):
|
|
688
|
+
|
|
689
|
+
table = pgtable(tablename, logact)
|
|
690
|
+
|
|
691
|
+
for fld in records:
|
|
692
|
+
vals = records[fld]
|
|
693
|
+
vcnt = len(vals)
|
|
694
|
+
for i in range(vcnt):
|
|
695
|
+
if vals[i] is None:
|
|
696
|
+
vlen = 0
|
|
697
|
+
elif isinstance(vals[i], str):
|
|
698
|
+
vlen = len(vals[i])
|
|
699
|
+
else:
|
|
700
|
+
vlen = 1
|
|
701
|
+
if vlen == 0: records[fld][i] = table[fld]
|
|
702
|
+
|
|
703
|
+
#
|
|
704
|
+
# insert one record into tablename
|
|
705
|
+
# tablename: add record for one table name each call
|
|
706
|
+
# record: hash reference with keys as field names and hash values as field values
|
|
707
|
+
# return PgLOG.SUCCESS or PgLOG.FAILURE
|
|
708
|
+
#
|
|
709
|
+
def pgadd(tablename, record, logact = PGDBI['ERRLOG'], getid = None):
|
|
710
|
+
|
|
711
|
+
global curtran
|
|
712
|
+
if not record: return PgLOG.pglog("Nothing adds to " + tablename, logact)
|
|
713
|
+
if logact&PgLOG.DODFLT: prepare_default(tablename, record, logact)
|
|
714
|
+
if logact&PgLOG.AUTOID and not getid: getid = pgsequence(tablename, logact)
|
|
715
|
+
sqlstr = prepare_insert(tablename, list(record), True, getid)
|
|
716
|
+
values = tuple(record.values())
|
|
717
|
+
|
|
718
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "Insert: " + str(values))
|
|
719
|
+
|
|
720
|
+
ret = acnt = pgcnt = 0
|
|
721
|
+
while True:
|
|
722
|
+
pgcur = pgcursor()
|
|
723
|
+
if not pgcur: return PgLOG.FAILURE
|
|
724
|
+
try:
|
|
725
|
+
pgcur.execute(sqlstr, values)
|
|
726
|
+
acnt = 1
|
|
727
|
+
if getid:
|
|
728
|
+
ret = pgcur.fetchone()[0]
|
|
729
|
+
else:
|
|
730
|
+
ret = PgLOG.SUCCESS
|
|
731
|
+
pgcur.close()
|
|
732
|
+
except PgSQL.Error as pgerr:
|
|
733
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values, logact): return PgLOG.FAILURE
|
|
734
|
+
else:
|
|
735
|
+
break
|
|
736
|
+
pgcnt += 1
|
|
737
|
+
|
|
738
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pgadd: 1 record added to " + tablename + ", return " + str(ret))
|
|
739
|
+
if(logact&PgLOG.ENDLCK):
|
|
740
|
+
endtran()
|
|
741
|
+
elif curtran:
|
|
742
|
+
curtran += acnt
|
|
743
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
744
|
+
|
|
745
|
+
return ret
|
|
746
|
+
|
|
747
|
+
#
|
|
748
|
+
# insert multiple records into tablename
|
|
749
|
+
# tablename: add records for one table name each call
|
|
750
|
+
# records: dict with field names as keys and each value is a list of field values
|
|
751
|
+
# return PgLOG.SUCCESS or PgLOG.FAILURE
|
|
752
|
+
#
|
|
753
|
+
def pgmadd(tablename, records, logact = PGDBI['ERRLOG'], getid = None):
|
|
754
|
+
|
|
755
|
+
global curtran
|
|
756
|
+
if not records: return PgLOG.pglog("Nothing to insert to table " + tablename, logact)
|
|
757
|
+
if logact&PgLOG.DODFLT: prepare_defaults(tablename, records, logact)
|
|
758
|
+
if logact&PgLOG.AUTOID and not getid: getid = pgsequence(tablename, logact)
|
|
759
|
+
multi = True if getid else False
|
|
760
|
+
sqlstr = prepare_insert(tablename, list(records), multi, getid)
|
|
761
|
+
|
|
762
|
+
v = records.values()
|
|
763
|
+
values = list(zip(*v))
|
|
764
|
+
cntrow = len(values)
|
|
765
|
+
ids = [] if getid else None
|
|
766
|
+
|
|
767
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
768
|
+
for row in values: PgLOG.pgdbg(1000, "Insert: " + str(row))
|
|
769
|
+
|
|
770
|
+
count = pgcnt = 0
|
|
771
|
+
while True:
|
|
772
|
+
pgcur = pgcursor()
|
|
773
|
+
if not pgcur: return PgLOG.FAILURE
|
|
774
|
+
|
|
775
|
+
if getid:
|
|
776
|
+
while count < cntrow:
|
|
777
|
+
record = values[count]
|
|
778
|
+
try:
|
|
779
|
+
pgcur.execute(sqlstr, record)
|
|
780
|
+
ids.append(pgcur.fetchone()[0])
|
|
781
|
+
count += 1
|
|
782
|
+
except PgSQL.Error as pgerr:
|
|
783
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, record, logact): return PgLOG.FAILURE
|
|
784
|
+
break
|
|
785
|
+
else:
|
|
786
|
+
try:
|
|
787
|
+
execute_values(pgcur, sqlstr, values, page_size=PGDBI['PGSIZE'])
|
|
788
|
+
count = cntrow
|
|
789
|
+
except PgSQL.Error as pgerr:
|
|
790
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values[0], logact): return PgLOG.FAILURE
|
|
791
|
+
if count >= cntrow: break
|
|
792
|
+
pgcnt += 1
|
|
793
|
+
|
|
794
|
+
pgcur.close()
|
|
795
|
+
if(PgLOG.PGLOG['DBGLEVEL']): PgLOG.pgdbg(1000, "pgmadd: {} of {} record(s) added to {}".format(count, cntrow, tablename))
|
|
796
|
+
|
|
797
|
+
if(logact&PgLOG.ENDLCK):
|
|
798
|
+
endtran()
|
|
799
|
+
elif curtran:
|
|
800
|
+
curtran += count
|
|
801
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
802
|
+
|
|
803
|
+
return (ids if ids else count)
|
|
804
|
+
|
|
805
|
+
#
|
|
806
|
+
# local function: select prepare for pgget() and pgmget()
|
|
807
|
+
#
|
|
808
|
+
def prepare_select(tablenames, fields = None, condition = None, cndflds = None, logact = 0):
|
|
809
|
+
|
|
810
|
+
sqlstr = ''
|
|
811
|
+
if tablenames:
|
|
812
|
+
if fields:
|
|
813
|
+
sqlstr = "SELECT " + fields
|
|
814
|
+
else:
|
|
815
|
+
sqlstr = "SELECT count(*) cntrec"
|
|
816
|
+
|
|
817
|
+
sqlstr += " FROM " + tablenames
|
|
818
|
+
if condition:
|
|
819
|
+
if re.match(r'^\s*(ORDER|GROUP|HAVING|OFFSET|LIMIT)\s', condition, re.I):
|
|
820
|
+
sqlstr += " " + condition # no where clause, append directly
|
|
821
|
+
else:
|
|
822
|
+
sqlstr += " WHERE " + condition
|
|
823
|
+
elif cndflds:
|
|
824
|
+
sep = 'WHERE'
|
|
825
|
+
for fld in cndflds:
|
|
826
|
+
sqlstr += " {} {}=%s".format(sep, fld)
|
|
827
|
+
sep = 'AND'
|
|
828
|
+
if logact&PgLOG.DOLOCK:
|
|
829
|
+
starttran()
|
|
830
|
+
sqlstr += " FOR UPDATE"
|
|
831
|
+
elif fields:
|
|
832
|
+
sqlstr = "SELECT " + fields
|
|
833
|
+
elif condition:
|
|
834
|
+
sqlstr = condition
|
|
835
|
+
|
|
836
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, sqlstr)
|
|
837
|
+
|
|
838
|
+
return sqlstr
|
|
839
|
+
|
|
840
|
+
#
|
|
841
|
+
# tablenames: comma deliminated string of one or more tables and more than one table for joining,
|
|
842
|
+
# fields: comma deliminated string of one or more field names,
|
|
843
|
+
# condition: querry conditions for where clause
|
|
844
|
+
# return a dict reference with keys as field names upon success
|
|
845
|
+
#
|
|
846
|
+
def pgget(tablenames, fields, condition = None, logact = 0):
|
|
847
|
+
|
|
848
|
+
if not logact: logact = PGDBI['ERRLOG']
|
|
849
|
+
if fields and condition and not re.search(r'limit 1$', condition, re.I): condition += " LIMIT 1"
|
|
850
|
+
sqlstr = prepare_select(tablenames, fields, condition, None, logact)
|
|
851
|
+
if fields and not re.search(r'(^|\s)limit 1($|\s)', sqlstr, re.I): sqlstr += " LIMIT 1"
|
|
852
|
+
ucname = True if logact&PgLOG.UCNAME else False
|
|
853
|
+
pgcnt = 0
|
|
854
|
+
record = {}
|
|
855
|
+
while True:
|
|
856
|
+
pgcur = pgcursor()
|
|
857
|
+
if not pgcur: return PgLOG.FAILURE
|
|
858
|
+
try:
|
|
859
|
+
pgcur.execute(sqlstr)
|
|
860
|
+
vals = pgcur.fetchone()
|
|
861
|
+
if vals:
|
|
862
|
+
colcnt = len(pgcur.description)
|
|
863
|
+
for i in range(colcnt):
|
|
864
|
+
col = pgcur.description[i]
|
|
865
|
+
colname = col[0].upper() if ucname else col[0]
|
|
866
|
+
val = vals[i]
|
|
867
|
+
if col[1] == CHCODE and val and val[-1] == ' ': val = val.rstrip()
|
|
868
|
+
record[colname] = val
|
|
869
|
+
pgcur.close()
|
|
870
|
+
except PgSQL.Error as pgerr:
|
|
871
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, None, logact): return PgLOG.FAILURE
|
|
872
|
+
else:
|
|
873
|
+
break
|
|
874
|
+
pgcnt += 1
|
|
875
|
+
|
|
876
|
+
if record and tablenames and not fields:
|
|
877
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
878
|
+
PgLOG.pgdbg(1000, "pgget: {} record(s) found from {}".format(record['cntrec'], tablenames))
|
|
879
|
+
return record['cntrec']
|
|
880
|
+
elif PgLOG.PGLOG['DBGLEVEL']:
|
|
881
|
+
cnt = 1 if record else 0
|
|
882
|
+
PgLOG.pgdbg(1000, "pgget: {} record retrieved from {}".format(cnt, tablenames))
|
|
883
|
+
|
|
884
|
+
return record
|
|
885
|
+
|
|
886
|
+
#
|
|
887
|
+
# tablenames: comma deliminated string of one or more tables and more than one table for joining,
|
|
888
|
+
# fields: comma deliminated string of one or more field names,
|
|
889
|
+
# condition: querry conditions for where clause
|
|
890
|
+
# return a dict reference with keys as field names upon success, values for each field name
|
|
891
|
+
# are in a list. All lists are the same length with missing values set to None
|
|
892
|
+
#
|
|
893
|
+
def pgmget(tablenames, fields, condition = None, logact = PGDBI['ERRLOG']):
|
|
894
|
+
|
|
895
|
+
sqlstr = prepare_select(tablenames, fields, condition, None, logact)
|
|
896
|
+
ucname = True if logact&PgLOG.UCNAME else False
|
|
897
|
+
count = pgcnt = 0
|
|
898
|
+
records = {}
|
|
899
|
+
while True:
|
|
900
|
+
pgcur = pgcursor()
|
|
901
|
+
if not pgcur: return PgLOG.FAILURE
|
|
902
|
+
try:
|
|
903
|
+
pgcur.execute(sqlstr)
|
|
904
|
+
rowvals = pgcur.fetchall()
|
|
905
|
+
if rowvals:
|
|
906
|
+
colcnt = len(pgcur.description)
|
|
907
|
+
count = len(rowvals)
|
|
908
|
+
colvals = list(zip(*rowvals))
|
|
909
|
+
for i in range(colcnt):
|
|
910
|
+
col = pgcur.description[i]
|
|
911
|
+
colname = col[0].upper() if ucname else col[0]
|
|
912
|
+
vals = list(colvals[i])
|
|
913
|
+
if col[1] == CHCODE:
|
|
914
|
+
for j in range(count):
|
|
915
|
+
if vals[j] and vals[j][-1] == ' ': vals[j] = vals[j].rstrip()
|
|
916
|
+
records[colname] = vals
|
|
917
|
+
pgcur.close()
|
|
918
|
+
except PgSQL.Error as pgerr:
|
|
919
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, None, logact): return PgLOG.FAILURE
|
|
920
|
+
else:
|
|
921
|
+
break
|
|
922
|
+
pgcnt += 1
|
|
923
|
+
|
|
924
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
925
|
+
PgLOG.pgdbg(1000, "pgmget: {} record(s) retrieved from {}".format(count, tablenames))
|
|
926
|
+
|
|
927
|
+
return records
|
|
928
|
+
|
|
929
|
+
#
|
|
930
|
+
# tablenames: comma deliminated string of one or more tables
|
|
931
|
+
# fields: comma deliminated string of one or more field names,
|
|
932
|
+
# cnddict: condition dict with field names : values
|
|
933
|
+
# return a dict(field names : values) upon success
|
|
934
|
+
#
|
|
935
|
+
# retrieve one records from tablenames condition dict
|
|
936
|
+
#
|
|
937
|
+
def pghget(tablenames, fields, cnddict, logact = PGDBI['ERRLOG']):
|
|
938
|
+
|
|
939
|
+
if not tablenames: return PgLOG.pglog("Miss Table name to query", logact)
|
|
940
|
+
if not fields: return PgLOG.pglog("Nothing to query " + tablenames, logact)
|
|
941
|
+
if not cnddict: return PgLOG.pglog("Miss condition dict values to query " + tablenames, logact)
|
|
942
|
+
sqlstr = prepare_select(tablenames, fields, None, list(cnddict), logact)
|
|
943
|
+
if fields and not re.search(r'limit 1$', sqlstr, re.I): sqlstr += " LIMIT 1"
|
|
944
|
+
ucname = True if logact&PgLOG.UCNAME else False
|
|
945
|
+
|
|
946
|
+
values = tuple(cnddict.values())
|
|
947
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "Query from {} for {}".format(tablenames, values))
|
|
948
|
+
|
|
949
|
+
pgcnt = 0
|
|
950
|
+
record = {}
|
|
951
|
+
while True:
|
|
952
|
+
pgcur = pgcursor()
|
|
953
|
+
if not pgcur: return PgLOG.FAILURE
|
|
954
|
+
try:
|
|
955
|
+
pgcur.execute(sqlstr, values)
|
|
956
|
+
vals = pgcur.fetchone()
|
|
957
|
+
if vals:
|
|
958
|
+
colcnt = len(pgcur.description)
|
|
959
|
+
for i in range(colcnt):
|
|
960
|
+
col = pgcur.description[i]
|
|
961
|
+
colname = col[0].upper() if ucname else col[0]
|
|
962
|
+
val = vals[i]
|
|
963
|
+
if col[1] == CHCODE and val and val[-1] == ' ': val = val.rstrip()
|
|
964
|
+
record[colname] = val
|
|
965
|
+
pgcur.close()
|
|
966
|
+
except PgSQL.Error as pgerr:
|
|
967
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values, logact): return PgLOG.FAILURE
|
|
968
|
+
else:
|
|
969
|
+
break
|
|
970
|
+
pgcnt += 1
|
|
971
|
+
|
|
972
|
+
if record and tablenames and not fields:
|
|
973
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
974
|
+
PgLOG.pgdbg(1000, "pghget: {} record(s) found from {}".format(record['cntrec'], tablenames))
|
|
975
|
+
return record['cntrec']
|
|
976
|
+
elif PgLOG.PGLOG['DBGLEVEL']:
|
|
977
|
+
cnt = 1 if record else 0
|
|
978
|
+
PgLOG.pgdbg(1000, "pghget: {} record retrieved from {}".format(cnt, tablenames))
|
|
979
|
+
|
|
980
|
+
return record
|
|
981
|
+
|
|
982
|
+
#
|
|
983
|
+
# tablenames: comma deliminated string of one or more tables
|
|
984
|
+
# fields: comma deliminated string of one or more field names,
|
|
985
|
+
# cnddicts: condition dict with field names : value lists
|
|
986
|
+
# return a dict(field names : value lists) upon success
|
|
987
|
+
#
|
|
988
|
+
# retrieve multiple records from tablenames for condition dict
|
|
989
|
+
#
|
|
990
|
+
def pgmhget(tablenames, fields, cnddicts, logact = PGDBI['ERRLOG']):
|
|
991
|
+
|
|
992
|
+
if not tablenames: return PgLOG.pglog("Miss Table name to query", logact)
|
|
993
|
+
if not fields: return PgLOG.pglog("Nothing to query " + tablenames, logact)
|
|
994
|
+
if not cnddicts: return PgLOG.pglog("Miss condition dict values to query " + tablenames, logact)
|
|
995
|
+
sqlstr = prepare_select(tablenames, fields, None, list(cnddicts), logact)
|
|
996
|
+
ucname = True if logact&PgLOG.UCNAME else False
|
|
997
|
+
|
|
998
|
+
v = cnddicts.values()
|
|
999
|
+
values = list(zip(*v))
|
|
1000
|
+
cndcnt = len(values)
|
|
1001
|
+
|
|
1002
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
1003
|
+
for row in values:
|
|
1004
|
+
PgLOG.pgdbg(1000, "Query from {} for {}".format(tablenames, row))
|
|
1005
|
+
|
|
1006
|
+
colcnt = ccnt = count = pgcnt = 0
|
|
1007
|
+
cols = []
|
|
1008
|
+
chrs = []
|
|
1009
|
+
records = {}
|
|
1010
|
+
while True:
|
|
1011
|
+
pgcur = pgcursor()
|
|
1012
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1013
|
+
while ccnt < cndcnt:
|
|
1014
|
+
cndvals = values[ccnt]
|
|
1015
|
+
try:
|
|
1016
|
+
pgcur.execute(sqlstr, cndvals)
|
|
1017
|
+
ccnt += 1
|
|
1018
|
+
rowvals = pgcur.fetchall()
|
|
1019
|
+
if rowvals:
|
|
1020
|
+
if colcnt == 0:
|
|
1021
|
+
for col in pgcur.description:
|
|
1022
|
+
colname = col[0].upper() if ucname else col[0]
|
|
1023
|
+
if col[1] == CHCODE: chrs.append(colname)
|
|
1024
|
+
cols.append(colname)
|
|
1025
|
+
records[colname] = []
|
|
1026
|
+
colcnt = len(cols)
|
|
1027
|
+
rcnt = len(rowvals)
|
|
1028
|
+
count += rcnt
|
|
1029
|
+
colvals = list(zip(*rowvals))
|
|
1030
|
+
for i in range(colcnt):
|
|
1031
|
+
vals = list(colvals[i])
|
|
1032
|
+
colname = cols[i]
|
|
1033
|
+
if chrs and colname in chrs:
|
|
1034
|
+
for j in range(rcnt):
|
|
1035
|
+
if vals[j] and vals[j][-1] == ' ': vals[j] = vals[j].rstrip()
|
|
1036
|
+
records[colname].extend(vals)
|
|
1037
|
+
except PgSQL.Error as pgerr:
|
|
1038
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, cndvals, logact): return PgLOG.FAILURE
|
|
1039
|
+
break
|
|
1040
|
+
if ccnt >= cndcnt: break
|
|
1041
|
+
pgcnt += 1
|
|
1042
|
+
pgcur.close()
|
|
1043
|
+
|
|
1044
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
1045
|
+
PgLOG.pgdbg(1000, "pgmhget: {} record(s) retrieved from {}".format(count, tablenames))
|
|
1046
|
+
|
|
1047
|
+
return records
|
|
1048
|
+
|
|
1049
|
+
#
|
|
1050
|
+
# local fucntion: update prepare for pgupdt, pghupdt and pgmupdt
|
|
1051
|
+
#
|
|
1052
|
+
def prepare_update(tablename, fields, condition = None, cndflds = None):
|
|
1053
|
+
|
|
1054
|
+
strset = []
|
|
1055
|
+
# build set string
|
|
1056
|
+
for fld in fields:
|
|
1057
|
+
strset.append("{}=%s".format(pgname(fld, '.')))
|
|
1058
|
+
strflds = ",".join(strset)
|
|
1059
|
+
|
|
1060
|
+
# build condition string
|
|
1061
|
+
if not condition:
|
|
1062
|
+
cndset = []
|
|
1063
|
+
for fld in cndflds:
|
|
1064
|
+
cndset.append("{}=%s".format(pgname(fld, '.')))
|
|
1065
|
+
condition = " AND ".join(cndset)
|
|
1066
|
+
|
|
1067
|
+
sqlstr = "UPDATE {} SET {} WHERE {}".format(tablename, strflds, condition)
|
|
1068
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, sqlstr)
|
|
1069
|
+
|
|
1070
|
+
return sqlstr
|
|
1071
|
+
|
|
1072
|
+
#
|
|
1073
|
+
# update one or multiple rows in tablename
|
|
1074
|
+
# tablename: update for one table name each call
|
|
1075
|
+
# record: dict with field names : values
|
|
1076
|
+
# condition: update conditions for where clause)
|
|
1077
|
+
# return number of rows undated upon success
|
|
1078
|
+
#
|
|
1079
|
+
def pgupdt(tablename, record, condition, logact = PGDBI['ERRLOG']):
|
|
1080
|
+
|
|
1081
|
+
global curtran
|
|
1082
|
+
if not record: PgLOG.pglog("Nothing updates to " + tablename, logact)
|
|
1083
|
+
if not condition or isinstance(condition, int): PgLOG.pglog("Miss condition to update " + tablename, logact)
|
|
1084
|
+
sqlstr = prepare_update(tablename, list(record), condition)
|
|
1085
|
+
if logact&PgLOG.DODFLT: prepare_default(tablename, record, logact)
|
|
1086
|
+
|
|
1087
|
+
values = tuple(record.values())
|
|
1088
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "Update {} for {}".format(tablename, values))
|
|
1089
|
+
|
|
1090
|
+
ucnt = pgcnt = 0
|
|
1091
|
+
while True:
|
|
1092
|
+
pgcur = pgcursor()
|
|
1093
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1094
|
+
try:
|
|
1095
|
+
pgcur.execute(sqlstr, values)
|
|
1096
|
+
ucnt = pgcur.rowcount
|
|
1097
|
+
pgcur.close()
|
|
1098
|
+
except PgSQL.Error as pgerr:
|
|
1099
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values, logact): return PgLOG.FAILURE
|
|
1100
|
+
else:
|
|
1101
|
+
break
|
|
1102
|
+
pgcnt += 1
|
|
1103
|
+
|
|
1104
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pgupdt: {} record(s) updated to {}".format(ucnt, tablename))
|
|
1105
|
+
if(logact&PgLOG.ENDLCK):
|
|
1106
|
+
endtran()
|
|
1107
|
+
elif curtran:
|
|
1108
|
+
curtran += ucnt
|
|
1109
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1110
|
+
|
|
1111
|
+
return ucnt
|
|
1112
|
+
|
|
1113
|
+
#
|
|
1114
|
+
# update one or multiple records in tablename
|
|
1115
|
+
# tablename: update for one table name each call
|
|
1116
|
+
# record: update values, dict with field names : values
|
|
1117
|
+
# cnddict: condition dict with field names : values
|
|
1118
|
+
# return number of records updated upon success
|
|
1119
|
+
#
|
|
1120
|
+
def pghupdt(tablename, record, cnddict, logact = PGDBI['ERRLOG']):
|
|
1121
|
+
|
|
1122
|
+
global curtran
|
|
1123
|
+
if not record: PgLOG.pglog("Nothing updates to " + tablename, logact)
|
|
1124
|
+
if not cnddict or isinstance(cnddict, int): PgLOG.pglog("Miss condition to update to " + tablename, logact)
|
|
1125
|
+
if logact&PgLOG.DODFLT: prepare_defaults(tablename, record, logact)
|
|
1126
|
+
sqlstr = prepare_update(tablename, list(record), None, list(cnddict))
|
|
1127
|
+
|
|
1128
|
+
values = tuple(record.values()) + tuple(cnddict.values())
|
|
1129
|
+
|
|
1130
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "Update {} for {}".format(tablename, values))
|
|
1131
|
+
|
|
1132
|
+
ucnt = count = pgcnt = 0
|
|
1133
|
+
while True:
|
|
1134
|
+
pgcur = pgcursor()
|
|
1135
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1136
|
+
try:
|
|
1137
|
+
pgcur.execute(sqlstr, values)
|
|
1138
|
+
count += 1
|
|
1139
|
+
ucnt = pgcur.rowcount
|
|
1140
|
+
pgcur.close()
|
|
1141
|
+
except PgSQL.Error as pgerr:
|
|
1142
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values, logact): return PgLOG.FAILURE
|
|
1143
|
+
else:
|
|
1144
|
+
break
|
|
1145
|
+
pgcnt += 1
|
|
1146
|
+
|
|
1147
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pghupdt: {}/{} record(s) updated to {}".format(ucnt, tablename))
|
|
1148
|
+
if(logact&PgLOG.ENDLCK):
|
|
1149
|
+
endtran()
|
|
1150
|
+
elif curtran:
|
|
1151
|
+
curtran += ucnt
|
|
1152
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1153
|
+
|
|
1154
|
+
return ucnt
|
|
1155
|
+
|
|
1156
|
+
#
|
|
1157
|
+
# update multiple records in tablename
|
|
1158
|
+
# tablename: update for one table name each call
|
|
1159
|
+
# records: update values, dict with field names : value lists
|
|
1160
|
+
# cnddicts: condition dict with field names : value lists
|
|
1161
|
+
# return number of records updated upon success
|
|
1162
|
+
#
|
|
1163
|
+
def pgmupdt(tablename, records, cnddicts, logact = PGDBI['ERRLOG']):
|
|
1164
|
+
|
|
1165
|
+
global curtran
|
|
1166
|
+
if not records: PgLOG.pglog("Nothing updates to " + tablename, logact)
|
|
1167
|
+
if not cnddicts or isinstance(cnddicts, int): PgLOG.pglog("Miss condition to update to " + tablename, logact)
|
|
1168
|
+
if logact&PgLOG.DODFLT: prepare_defaults(tablename, records, logact)
|
|
1169
|
+
sqlstr = prepare_update(tablename, list(records), None, list(cnddicts))
|
|
1170
|
+
|
|
1171
|
+
fldvals = tuple(records.values())
|
|
1172
|
+
cntrow = len(fldvals[0])
|
|
1173
|
+
cndvals = tuple(cnddicts.values())
|
|
1174
|
+
cntcnd = len(cndvals[0])
|
|
1175
|
+
if cntcnd != cntrow: return PgLOG.pglog("Field/Condition value counts Miss match {}/{} to update {}".format(cntrow, cntcnd, tablename), logact)
|
|
1176
|
+
v = fldvals + cndvals
|
|
1177
|
+
values = list(zip(*v))
|
|
1178
|
+
|
|
1179
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
1180
|
+
for row in values: PgLOG.pgdbg(1000, "Update {} for {}".format(tablename, row))
|
|
1181
|
+
|
|
1182
|
+
ucnt = pgcnt = 0
|
|
1183
|
+
while True:
|
|
1184
|
+
pgcur = pgcursor()
|
|
1185
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1186
|
+
try:
|
|
1187
|
+
execute_batch(pgcur, sqlstr, values, page_size=PGDBI['PGSIZE'])
|
|
1188
|
+
ucnt = cntrow
|
|
1189
|
+
except PgSQL.Error as pgerr:
|
|
1190
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values[0], logact): return PgLOG.FAILURE
|
|
1191
|
+
else:
|
|
1192
|
+
break
|
|
1193
|
+
pgcnt += 1
|
|
1194
|
+
|
|
1195
|
+
pgcur.close()
|
|
1196
|
+
|
|
1197
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pgmupdt: {} record(s) updated to {}".format(ucnt, tablename))
|
|
1198
|
+
if(logact&PgLOG.ENDLCK):
|
|
1199
|
+
endtran()
|
|
1200
|
+
elif curtran:
|
|
1201
|
+
curtran += ucnt
|
|
1202
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1203
|
+
|
|
1204
|
+
return ucnt
|
|
1205
|
+
|
|
1206
|
+
#
|
|
1207
|
+
# local fucntion: delete prepare for pgdel, pghdel and del
|
|
1208
|
+
#
|
|
1209
|
+
def prepare_delete(tablename, condition = None, cndflds = None):
|
|
1210
|
+
|
|
1211
|
+
# build condition string
|
|
1212
|
+
if not condition:
|
|
1213
|
+
cndset = []
|
|
1214
|
+
for fld in cndflds:
|
|
1215
|
+
cndset.append("{}=%s".format(fld))
|
|
1216
|
+
condition = " AND ".join(cndset)
|
|
1217
|
+
|
|
1218
|
+
sqlstr = "DELETE FROM {} WHERE {}".format(tablename, condition)
|
|
1219
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, sqlstr)
|
|
1220
|
+
|
|
1221
|
+
return sqlstr
|
|
1222
|
+
|
|
1223
|
+
#
|
|
1224
|
+
# delete one or mutiple records in tablename according condition
|
|
1225
|
+
# tablename: delete for one table name each call
|
|
1226
|
+
# condition: delete conditions for where clause
|
|
1227
|
+
# return number of records deleted upon success
|
|
1228
|
+
#
|
|
1229
|
+
def pgdel(tablename, condition, logact = PGDBI['ERRLOG']):
|
|
1230
|
+
|
|
1231
|
+
global curtran
|
|
1232
|
+
if not condition or isinstance(condition, int): PgLOG.pglog("Miss condition to delete from " + tablename, logact)
|
|
1233
|
+
sqlstr = prepare_delete(tablename, condition)
|
|
1234
|
+
|
|
1235
|
+
dcnt = pgcnt = 0
|
|
1236
|
+
while True:
|
|
1237
|
+
pgcur = pgcursor()
|
|
1238
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1239
|
+
try:
|
|
1240
|
+
pgcur.execute(sqlstr)
|
|
1241
|
+
dcnt = pgcur.rowcount
|
|
1242
|
+
pgcur.close()
|
|
1243
|
+
except PgSQL.Error as pgerr:
|
|
1244
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, None, logact): return PgLOG.FAILURE
|
|
1245
|
+
else:
|
|
1246
|
+
break
|
|
1247
|
+
pgcnt += 1
|
|
1248
|
+
|
|
1249
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pgdel: {} record(s) deleted from {}".format(dcnt, tablename))
|
|
1250
|
+
if logact&PgLOG.ENDLCK:
|
|
1251
|
+
endtran()
|
|
1252
|
+
elif curtran:
|
|
1253
|
+
curtran += dcnt
|
|
1254
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1255
|
+
|
|
1256
|
+
return dcnt
|
|
1257
|
+
|
|
1258
|
+
#
|
|
1259
|
+
# delete one or mutiple records in tablename according condition
|
|
1260
|
+
# tablename: delete for one table name each call
|
|
1261
|
+
# cndict: delete condition dict for names : values
|
|
1262
|
+
# return number of records deleted upon success
|
|
1263
|
+
#
|
|
1264
|
+
def pghdel(tablename, cnddict, logact = PGDBI['ERRLOG']):
|
|
1265
|
+
|
|
1266
|
+
global curtran
|
|
1267
|
+
if not cnddict or isinstance(cnddict, int): PgLOG.pglog("Miss condition dict to delete from " + tablename, logact)
|
|
1268
|
+
sqlstr = prepare_delete(tablename, None, list(cnddict))
|
|
1269
|
+
|
|
1270
|
+
values = tuple(cnddict.values())
|
|
1271
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "Delete from {} for {}".format(tablename, values))
|
|
1272
|
+
|
|
1273
|
+
dcnt = pgcnt = 0
|
|
1274
|
+
while True:
|
|
1275
|
+
pgcur = pgcursor()
|
|
1276
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1277
|
+
try:
|
|
1278
|
+
pgcur.execute(sqlstr, values)
|
|
1279
|
+
dcnt = pgcur.rowcount
|
|
1280
|
+
pgcur.close()
|
|
1281
|
+
except PgSQL.Error as pgerr:
|
|
1282
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values, logact): return PgLOG.FAILURE
|
|
1283
|
+
else:
|
|
1284
|
+
break
|
|
1285
|
+
pgcnt += 1
|
|
1286
|
+
|
|
1287
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pghdel: {} record(s) deleted from {}".format(dcnt, tablename))
|
|
1288
|
+
if logact&PgLOG.ENDLCK:
|
|
1289
|
+
endtran()
|
|
1290
|
+
elif curtran:
|
|
1291
|
+
curtran += dcnt
|
|
1292
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1293
|
+
|
|
1294
|
+
return dcnt
|
|
1295
|
+
|
|
1296
|
+
#
|
|
1297
|
+
# delete mutiple records in tablename according condition
|
|
1298
|
+
# tablename: delete for one table name each call
|
|
1299
|
+
# cndicts: delete condition dict for names : value lists
|
|
1300
|
+
# return number of records deleted upon success
|
|
1301
|
+
#
|
|
1302
|
+
def pgmdel(tablename, cnddicts, logact = PGDBI['ERRLOG']):
|
|
1303
|
+
|
|
1304
|
+
global curtran
|
|
1305
|
+
if not cnddicts or isinstance(cnddicts, int): PgLOG.pglog("Miss condition dict to delete from " + tablename, logact)
|
|
1306
|
+
sqlstr = prepare_delete(tablename, None, list(cnddicts))
|
|
1307
|
+
|
|
1308
|
+
v = cnddicts.values()
|
|
1309
|
+
values = list(zip(*v))
|
|
1310
|
+
if PgLOG.PGLOG['DBGLEVEL']:
|
|
1311
|
+
for row in values:
|
|
1312
|
+
PgLOG.pgdbg(1000, "Delete from {} for {}".format(tablename, row))
|
|
1313
|
+
|
|
1314
|
+
dcnt = pgcnt = 0
|
|
1315
|
+
while True:
|
|
1316
|
+
pgcur = pgcursor()
|
|
1317
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1318
|
+
try:
|
|
1319
|
+
execute_batch(pgcur, sqlstr, values, page_size=PGDBI['PGSIZE'])
|
|
1320
|
+
dcnt = len(values)
|
|
1321
|
+
except PgSQL.Error as pgerr:
|
|
1322
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, values[0], logact): return PgLOG.FAILURE
|
|
1323
|
+
else:
|
|
1324
|
+
break
|
|
1325
|
+
pgcnt += 1
|
|
1326
|
+
|
|
1327
|
+
pgcur.close()
|
|
1328
|
+
|
|
1329
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pgmdel: {} record(s) deleted from {}".format(dcnt, tablename))
|
|
1330
|
+
if logact&PgLOG.ENDLCK:
|
|
1331
|
+
endtran()
|
|
1332
|
+
elif curtran:
|
|
1333
|
+
curtran += dcnt
|
|
1334
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1335
|
+
|
|
1336
|
+
return dcnt
|
|
1337
|
+
|
|
1338
|
+
#
|
|
1339
|
+
# sqlstr: a complete sql string
|
|
1340
|
+
# return number of record affected upon success
|
|
1341
|
+
#
|
|
1342
|
+
def pgexec(sqlstr, logact = PGDBI['ERRLOG']):
|
|
1343
|
+
|
|
1344
|
+
global curtran
|
|
1345
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(100, sqlstr)
|
|
1346
|
+
|
|
1347
|
+
ret = pgcnt = 0
|
|
1348
|
+
while True:
|
|
1349
|
+
pgcur = pgcursor()
|
|
1350
|
+
if not pgcur: return PgLOG.FAILURE
|
|
1351
|
+
try:
|
|
1352
|
+
pgcur.execute(sqlstr)
|
|
1353
|
+
ret = pgcur.rowcount
|
|
1354
|
+
pgcur.close()
|
|
1355
|
+
except PgSQL.Error as pgerr:
|
|
1356
|
+
if not check_dberror(pgerr, pgcnt, sqlstr, None, logact): return PgLOG.FAILURE
|
|
1357
|
+
else:
|
|
1358
|
+
break
|
|
1359
|
+
pgcnt += 1
|
|
1360
|
+
|
|
1361
|
+
if PgLOG.PGLOG['DBGLEVEL']: PgLOG.pgdbg(1000, "pgexec: {} record(s) affected for {}".format(ret, sqlstr))
|
|
1362
|
+
if logact&PgLOG.ENDLCK:
|
|
1363
|
+
endtran()
|
|
1364
|
+
elif curtran:
|
|
1365
|
+
curtran += ret
|
|
1366
|
+
if curtran > PGDBI['MTRANS']: starttran()
|
|
1367
|
+
|
|
1368
|
+
return ret
|
|
1369
|
+
|
|
1370
|
+
#
|
|
1371
|
+
# tablename: one table name to a temporary table
|
|
1372
|
+
# fromtable: table name data gathing from
|
|
1373
|
+
# fields: table name data gathing from
|
|
1374
|
+
# condition: querry conditions for where clause
|
|
1375
|
+
# return number of records created upon success
|
|
1376
|
+
#
|
|
1377
|
+
def pgtemp(tablename, fromtable, fields, condition = None, logact = 0):
|
|
1378
|
+
|
|
1379
|
+
sqlstr = "CREATE TEMPORARY TABLE {} SELECT {} FROM {}".format(tablename, fields, fromtable)
|
|
1380
|
+
if condition: sqlstr += " WHERE " + condition
|
|
1381
|
+
|
|
1382
|
+
return pgexec(sqlstr, logact)
|
|
1383
|
+
|
|
1384
|
+
#
|
|
1385
|
+
# get condition for given table name for accessing information_schema
|
|
1386
|
+
#
|
|
1387
|
+
def table_condition(tablename):
|
|
1388
|
+
|
|
1389
|
+
ms = re.match(r'(.+)\.(.+)', tablename)
|
|
1390
|
+
if ms:
|
|
1391
|
+
scname = ms.group(1)
|
|
1392
|
+
tbname = ms.group(2)
|
|
1393
|
+
else:
|
|
1394
|
+
scname = PGDBI['SCNAME']
|
|
1395
|
+
tbname = tablename
|
|
1396
|
+
|
|
1397
|
+
return "table_schema = '{}' AND table_name = '{}'".format(scname, tbname)
|
|
1398
|
+
|
|
1399
|
+
#
|
|
1400
|
+
# check if a given table name exists or not
|
|
1401
|
+
# tablename: one table name to check
|
|
1402
|
+
#
|
|
1403
|
+
def pgcheck(tablename, logact = 0):
|
|
1404
|
+
|
|
1405
|
+
condition = table_condition(tablename)
|
|
1406
|
+
|
|
1407
|
+
ret = pgget('information_schema.tables', None, condition, logact)
|
|
1408
|
+
return (PgLOG.SUCCESS if ret else PgLOG.FAILURE)
|
|
1409
|
+
|
|
1410
|
+
#
|
|
1411
|
+
# group of functions to check parent records and add an empty one if missed
|
|
1412
|
+
# return user.uid upon success, 0 otherwise
|
|
1413
|
+
#
|
|
1414
|
+
def check_user_uid(userno, date = None):
|
|
1415
|
+
|
|
1416
|
+
if not userno: return 0
|
|
1417
|
+
if type(userno) is str: userno = int(userno)
|
|
1418
|
+
|
|
1419
|
+
if date is None:
|
|
1420
|
+
datecond = "until_date IS NULL"
|
|
1421
|
+
date = 'today'
|
|
1422
|
+
else:
|
|
1423
|
+
datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
|
|
1424
|
+
|
|
1425
|
+
pgrec = pgget("dssdb.user", "uid", "userno = {} AND {}".format(userno, datecond), PGDBI['ERRLOG'])
|
|
1426
|
+
if pgrec: return pgrec['uid']
|
|
1427
|
+
|
|
1428
|
+
if userno not in NMISSES:
|
|
1429
|
+
PgLOG.pglog("{}: Scientist ID NOT on file for {}".format(userno, date), PgLOG.LGWNEM)
|
|
1430
|
+
NMISSES.append(userno)
|
|
1431
|
+
|
|
1432
|
+
# check again if a user is on file with different date range
|
|
1433
|
+
pgrec = pgget("dssdb.user", "uid", "userno = {}".format(userno), PGDBI['ERRLOG'])
|
|
1434
|
+
if pgrec: return pgrec['uid']
|
|
1435
|
+
|
|
1436
|
+
pgrec = ucar_user_info(userno)
|
|
1437
|
+
if not pgrec: pgrec = {'userno' : userno, 'stat_flag' : 'M'}
|
|
1438
|
+
uid = pgadd("dssdb.user", pgrec, (PGDBI['EXITLG']|PgLOG.AUTOID))
|
|
1439
|
+
if uid: PgLOG.pglog("{}: Scientist ID Added as user.uid = {}".format(userno, uid), PgLOG.LGWNEM)
|
|
1440
|
+
|
|
1441
|
+
return uid
|
|
1442
|
+
|
|
1443
|
+
#
|
|
1444
|
+
# return user.uid upon success, 0 otherwise
|
|
1445
|
+
#
|
|
1446
|
+
def get_user_uid(logname, date = None):
|
|
1447
|
+
|
|
1448
|
+
if not logname: return 0
|
|
1449
|
+
if not date:
|
|
1450
|
+
date = 'today'
|
|
1451
|
+
datecond = "until_date IS NULL"
|
|
1452
|
+
else:
|
|
1453
|
+
datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
|
|
1454
|
+
|
|
1455
|
+
pgrec = pgget("dssdb.user", "uid", "logname = '{}' AND {}".format(logname, datecond), PGDBI['ERRLOG'])
|
|
1456
|
+
if pgrec: return pgrec['uid']
|
|
1457
|
+
|
|
1458
|
+
if logname not in LMISSES:
|
|
1459
|
+
PgLOG.pglog("{}: UCAR Login Name NOT on file for {}".format(logname, date), PgLOG.LGWNEM)
|
|
1460
|
+
LMISSES.append(logname)
|
|
1461
|
+
|
|
1462
|
+
# check again if a user is on file with different date range
|
|
1463
|
+
pgrec = pgget("dssdb.user", "uid", "logname = '{}'".format(logname), PGDBI['ERRLOG'])
|
|
1464
|
+
if pgrec: return pgrec['uid']
|
|
1465
|
+
|
|
1466
|
+
pgrec = ucar_user_info(0, logname)
|
|
1467
|
+
if not pgrec: pgrec = {'logname' : logname, 'stat_flag' : 'M'}
|
|
1468
|
+
uid = pgadd("dssdb.user", pgrec, (PGDBI['EXITLG']|PgLOG.AUTOID))
|
|
1469
|
+
if uid: PgLOG.pglog("{}: UCAR Login Name Added as user.uid = {}".format(logname, uid), PgLOG.LGWNEM)
|
|
1470
|
+
|
|
1471
|
+
return uid
|
|
1472
|
+
|
|
1473
|
+
#
|
|
1474
|
+
# get ucar user info for given userno (scientist number) or logname (Ucar login)
|
|
1475
|
+
#
|
|
1476
|
+
def ucar_user_info(userno, logname = None):
|
|
1477
|
+
|
|
1478
|
+
MATCH = {
|
|
1479
|
+
'upid' : "upid",
|
|
1480
|
+
'uid' : "userno",
|
|
1481
|
+
'username' : "logname",
|
|
1482
|
+
'lastName' : "lstname",
|
|
1483
|
+
'firstName' : "fstname",
|
|
1484
|
+
'active' : "stat_flag",
|
|
1485
|
+
'internalOrg' : "division",
|
|
1486
|
+
'externalOrg' : "org_name",
|
|
1487
|
+
'country' : "country",
|
|
1488
|
+
'forwardEmail' : "email",
|
|
1489
|
+
'email' : "ucaremail",
|
|
1490
|
+
'phone' : "phoneno"
|
|
1491
|
+
}
|
|
1492
|
+
|
|
1493
|
+
buf = PgLOG.pgsystem("pgperson " + ("-uid {}".format(userno) if userno else "-username {}".format(logname)), PgLOG.LOGWRN, 20)
|
|
1494
|
+
if not buf: return None
|
|
1495
|
+
|
|
1496
|
+
pgrec = {}
|
|
1497
|
+
for line in buf.split('\n'):
|
|
1498
|
+
ms = re.match(r'^(.+)<=>(.*)$', line)
|
|
1499
|
+
if ms:
|
|
1500
|
+
(key, val) = ms.groups()
|
|
1501
|
+
if key in MATCH:
|
|
1502
|
+
if key == 'upid' and 'upid' in pgrec: break # get one record only
|
|
1503
|
+
pgrec[MATCH[key]] = val
|
|
1504
|
+
|
|
1505
|
+
if not pgrec: return None
|
|
1506
|
+
|
|
1507
|
+
if userno:
|
|
1508
|
+
pgrec['userno'] = userno
|
|
1509
|
+
elif pgrec['userno']:
|
|
1510
|
+
pgrec['userno'] = userno = int(pgrec['userno'])
|
|
1511
|
+
if pgrec['upid']: pgrec['upid'] = int(pgrec['upid'])
|
|
1512
|
+
if pgrec['stat_flag']: pgrec['stat_flag'] = 'A' if pgrec['stat_flag'] == "True" else 'C'
|
|
1513
|
+
if pgrec['email'] and re.search(r'(@|\.)ucar\.edu$', pgrec['email'], re.I):
|
|
1514
|
+
pgrec['email'] = pgrec['ucaremail']
|
|
1515
|
+
pgrec['org_name'] = 'NCAR'
|
|
1516
|
+
country = pgrec['country'] if 'country' in pgrec else None
|
|
1517
|
+
pgrec['country'] = set_country_code(pgrec['email'], country)
|
|
1518
|
+
if pgrec['division']:
|
|
1519
|
+
val = "NCAR"
|
|
1520
|
+
else:
|
|
1521
|
+
val = None
|
|
1522
|
+
pgrec['org_type'] = get_org_type(val, pgrec['email'])
|
|
1523
|
+
|
|
1524
|
+
buf = PgLOG.pgsystem("pgusername {}".format(pgrec['logname']), PgLOG.LOGWRN, 20)
|
|
1525
|
+
if not buf: return pgrec
|
|
1526
|
+
|
|
1527
|
+
for line in buf.split('\n'):
|
|
1528
|
+
ms = re.match(r'^(.+)<=>(.*)$', line)
|
|
1529
|
+
if ms:
|
|
1530
|
+
(key, val) = ms.groups()
|
|
1531
|
+
if key == 'startDate':
|
|
1532
|
+
m = re.match(r'^(\d+-\d+-\d+)\s', val)
|
|
1533
|
+
if m:
|
|
1534
|
+
pgrec['start_date'] = m.group(1)
|
|
1535
|
+
else:
|
|
1536
|
+
pgrec['start_date'] = val
|
|
1537
|
+
|
|
1538
|
+
if key == 'endDate':
|
|
1539
|
+
m = re.match(r'^(\d+-\d+-\d+)\s', val)
|
|
1540
|
+
if m:
|
|
1541
|
+
pgrec['until_date'] = m.group(1)
|
|
1542
|
+
else:
|
|
1543
|
+
pgrec['until_date'] = val
|
|
1544
|
+
|
|
1545
|
+
return pgrec
|
|
1546
|
+
|
|
1547
|
+
#
|
|
1548
|
+
# set country code for given coutry name or email address
|
|
1549
|
+
#
|
|
1550
|
+
def set_country_code(email, country = None):
|
|
1551
|
+
|
|
1552
|
+
codes = {
|
|
1553
|
+
'CHINA' : "P.R.CHINA",
|
|
1554
|
+
'ENGLAND' : "UNITED.KINGDOM",
|
|
1555
|
+
'FR' : "FRANCE",
|
|
1556
|
+
'KOREA' : "SOUTH.KOREA",
|
|
1557
|
+
'USSR' : "RUSSIA",
|
|
1558
|
+
'US' : "UNITED.STATES",
|
|
1559
|
+
'U.S.A.' : "UNITED.STATES"
|
|
1560
|
+
}
|
|
1561
|
+
|
|
1562
|
+
if country:
|
|
1563
|
+
country = country.upper()
|
|
1564
|
+
ms = re.match(r'^(\w+)\s(\w+)$', country)
|
|
1565
|
+
if ms:
|
|
1566
|
+
country = ms.group(1) + '.' + ms.group(2)
|
|
1567
|
+
elif country in codes:
|
|
1568
|
+
country = codes[country]
|
|
1569
|
+
else:
|
|
1570
|
+
country = email_to_country(email)
|
|
1571
|
+
|
|
1572
|
+
return country
|
|
1573
|
+
|
|
1574
|
+
# return wuser.wuid upon success, 0 otherwise
|
|
1575
|
+
def check_wuser_wuid(email, date = None):
|
|
1576
|
+
|
|
1577
|
+
if not email: return 0
|
|
1578
|
+
emcond = "email = '{}'".format(email)
|
|
1579
|
+
if not date:
|
|
1580
|
+
date = 'today'
|
|
1581
|
+
datecond = "until_date IS NULL"
|
|
1582
|
+
else:
|
|
1583
|
+
datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
|
|
1584
|
+
|
|
1585
|
+
pgrec = pgget("wuser", "wuid", "{} AND {}".format(emcond, datecond), PGDBI['ERRLOG'])
|
|
1586
|
+
if pgrec: return pgrec['wuid']
|
|
1587
|
+
|
|
1588
|
+
# check again if a user is on file with different date range
|
|
1589
|
+
pgrec = pgget("wuser", "wuid", emcond, PgLOG.LOGERR)
|
|
1590
|
+
if pgrec: return pgrec['wuid']
|
|
1591
|
+
|
|
1592
|
+
# now add one in
|
|
1593
|
+
record = {'email' : email}
|
|
1594
|
+
# check again if a ruser is on file
|
|
1595
|
+
pgrec = pgget("ruser", "*", emcond + " AND end_date IS NULL", PGDBI['ERRLOG'])
|
|
1596
|
+
if not pgrec: pgrec = pgget("ruser", "*", emcond, PGDBI['ERRLOG'])
|
|
1597
|
+
|
|
1598
|
+
if pgrec:
|
|
1599
|
+
record['ruid'] = pgrec['id']
|
|
1600
|
+
record['fstname'] = pgrec['fname']
|
|
1601
|
+
record['lstname'] = pgrec['lname']
|
|
1602
|
+
record['country'] = pgrec['country']
|
|
1603
|
+
record['org_type'] = get_org_type(pgrec['org_type'], pgrec['email'])
|
|
1604
|
+
record['start_date'] = str(pgrec['rdate'])
|
|
1605
|
+
if pgrec['end_date']:
|
|
1606
|
+
record['until_date'] = str(pgrec['end_date'])
|
|
1607
|
+
record['stat_flag'] = 'C'
|
|
1608
|
+
else:
|
|
1609
|
+
record['stat_flag'] = 'A'
|
|
1610
|
+
|
|
1611
|
+
if pgrec['title']: record['utitle'] = pgrec['title']
|
|
1612
|
+
if pgrec['mname']: record['midinit'] = pgrec['mname'][0]
|
|
1613
|
+
if pgrec['org']: record['org_name'] = pgrec['org']
|
|
1614
|
+
else:
|
|
1615
|
+
record['stat_flag'] = 'M'
|
|
1616
|
+
record['org_type'] = get_org_type('', email)
|
|
1617
|
+
record['country'] = email_to_country(email)
|
|
1618
|
+
|
|
1619
|
+
wuid = pgadd("wuser", record, PgLOG.LOGERR|PgLOG.AUTOID)
|
|
1620
|
+
if wuid:
|
|
1621
|
+
if pgrec:
|
|
1622
|
+
PgLOG.pglog("{}({}, {}) Added as wuid({})".format(email, pgrec['lname'], pgrec['fname'], wuid), PgLOG.LGWNEM)
|
|
1623
|
+
else:
|
|
1624
|
+
PgLOG.pglog("{} Added as wuid({})".format(email, wuid), PgLOG.LGWNEM)
|
|
1625
|
+
return wuid
|
|
1626
|
+
|
|
1627
|
+
return 0
|
|
1628
|
+
|
|
1629
|
+
# return wuser.wuid upon success, 0 otherwise
|
|
1630
|
+
def check_cdp_wuser(username):
|
|
1631
|
+
|
|
1632
|
+
pgrec = pgget("wuser", "wuid", "cdpname = '{}'".format(username), PGDBI['EXITLG'])
|
|
1633
|
+
if pgrec: return pgrec['wuid']
|
|
1634
|
+
|
|
1635
|
+
idrec = pgget("wuser", "wuid", "email = '{}'".format(pgrec['email']), PGDBI['EXITLG'])
|
|
1636
|
+
wuid = idrec['wuid'] if idrec else 0
|
|
1637
|
+
if wuid > 0:
|
|
1638
|
+
idrec = {}
|
|
1639
|
+
idrec['cdpid'] = pgrec['cdpid']
|
|
1640
|
+
idrec['cdpname'] = pgrec['cdpname']
|
|
1641
|
+
pgupdt("wuser", idrec, "wuid = {}".format(wuid) , PGDBI['EXITLG'])
|
|
1642
|
+
else:
|
|
1643
|
+
pgrec['stat_flag'] = 'A'
|
|
1644
|
+
pgrec['org_type'] = get_org_type(pgrec['org_type'], pgrec['email'])
|
|
1645
|
+
pgrec['country'] = email_to_country(pgrec['email'])
|
|
1646
|
+
wuid = pgadd("wuser", pgrec, PGDBI['EXITLG']|PgLOG.AUTOID)
|
|
1647
|
+
if wuid > 0:
|
|
1648
|
+
PgLOG.pglog("CDP User {} added as wuid = {} in RDADB".format(username, wuid), PgLOG.LGWNEM)
|
|
1649
|
+
|
|
1650
|
+
return wuid
|
|
1651
|
+
|
|
1652
|
+
#
|
|
1653
|
+
# for given email to get long country name
|
|
1654
|
+
#
|
|
1655
|
+
def email_to_country(email):
|
|
1656
|
+
|
|
1657
|
+
ms = re.search(r'\.(\w\w)$', email)
|
|
1658
|
+
if ms:
|
|
1659
|
+
pgrec = pgget("countries", "token", "domain_id = '{}'".format(ms.group(1)), PGDBI['EXITLG'])
|
|
1660
|
+
if pgrec: return pgrec['token']
|
|
1661
|
+
elif re.search(r'\.(gov|edu|mil|org|com|net)$', email):
|
|
1662
|
+
return "UNITED.STATES"
|
|
1663
|
+
else:
|
|
1664
|
+
return "UNKNOWN"
|
|
1665
|
+
|
|
1666
|
+
#
|
|
1667
|
+
# if filelists is published for given dataset, reset it to 'P'
|
|
1668
|
+
#
|
|
1669
|
+
def reset_rdadb_version(dsid):
|
|
1670
|
+
|
|
1671
|
+
pgexec("UPDATE dataset SET version = version + 1 WHERE dsid = '{}'".format(dsid), PGDBI['ERRLOG'])
|
|
1672
|
+
|
|
1673
|
+
#
|
|
1674
|
+
# check the use rdadb flag in table dataset for a given dataset and given values
|
|
1675
|
+
#
|
|
1676
|
+
def use_rdadb(dsid, logact = 0, vals = None):
|
|
1677
|
+
|
|
1678
|
+
ret = '' # default to empty in case dataset not in RDADB
|
|
1679
|
+
if dsid:
|
|
1680
|
+
pgrec = pgget("dataset", "use_rdadb", "dsid = '{}'".format(dsid), PGDBI['EXITLG'])
|
|
1681
|
+
if pgrec:
|
|
1682
|
+
ret = 'N' # default to 'N' if dataset record in RDADB already
|
|
1683
|
+
if pgrec['use_rdadb']:
|
|
1684
|
+
if not vals: vals = "IPYMW" # default to Internal; Publishable; Yes RDADB
|
|
1685
|
+
if vals.find(pgrec['use_rdadb']) > -1:
|
|
1686
|
+
ret = pgrec['use_rdadb']
|
|
1687
|
+
elif logact:
|
|
1688
|
+
PgLOG.pglog("Dataset '{}' is not in RDADB!".format(dsid), logact)
|
|
1689
|
+
|
|
1690
|
+
return ret
|
|
1691
|
+
|
|
1692
|
+
#
|
|
1693
|
+
# fld: field name for querry condition
|
|
1694
|
+
# vals: reference to aaray of values
|
|
1695
|
+
# isstr: 1 for string values requires quotes and support wildcard
|
|
1696
|
+
# noand: 1 for skiping the leading ' AND ' for condition
|
|
1697
|
+
# return a condition string for a given field
|
|
1698
|
+
#
|
|
1699
|
+
def get_field_condition(fld, vals, isstr = 0, noand = 0):
|
|
1700
|
+
|
|
1701
|
+
cnd = wcnd = negative = ''
|
|
1702
|
+
sign = "="
|
|
1703
|
+
logic = " OR "
|
|
1704
|
+
count = len(vals) if vals else 0
|
|
1705
|
+
if count == 0: return ''
|
|
1706
|
+
ncnt = scnt = wcnt = cnt = 0
|
|
1707
|
+
for i in range(count):
|
|
1708
|
+
val = vals[i]
|
|
1709
|
+
if val is None or (i > 0 and val == vals[i-1]): continue
|
|
1710
|
+
if i == 0 and val == PGSIGNS[0]:
|
|
1711
|
+
negative = "NOT "
|
|
1712
|
+
logic = " AND "
|
|
1713
|
+
continue
|
|
1714
|
+
if scnt == 0 and isinstance(val, str):
|
|
1715
|
+
ms = re.match(r'^({})$'.format('|'.join(PGSIGNS[1:])), val)
|
|
1716
|
+
if ms:
|
|
1717
|
+
osign = sign = ms.group(1)
|
|
1718
|
+
scnt += 1
|
|
1719
|
+
if sign == "<>":
|
|
1720
|
+
scnt += 1
|
|
1721
|
+
sign = negative + "BETWEEN"
|
|
1722
|
+
elif negative:
|
|
1723
|
+
sign = "<=" if (sign == ">") else ">="
|
|
1724
|
+
continue
|
|
1725
|
+
if isstr:
|
|
1726
|
+
if not isinstance(val, str): val = str(val)
|
|
1727
|
+
if sign == "=":
|
|
1728
|
+
if not val:
|
|
1729
|
+
ncnt += 1 # found null string
|
|
1730
|
+
elif val.find('%') > -1:
|
|
1731
|
+
sign = negative + "LIKE"
|
|
1732
|
+
elif re.search(r'[\[\(\?\.]', val):
|
|
1733
|
+
sign = negative + "SIMILAR TO"
|
|
1734
|
+
if val.find("'") != 0:
|
|
1735
|
+
val = "'{}'".format(val)
|
|
1736
|
+
elif isinstance(val, str):
|
|
1737
|
+
if val.find('.') > -1:
|
|
1738
|
+
val = float(val)
|
|
1739
|
+
else:
|
|
1740
|
+
val = int(val)
|
|
1741
|
+
if sign == "=":
|
|
1742
|
+
if cnt > 0: cnd += ", "
|
|
1743
|
+
cnd += str(val)
|
|
1744
|
+
cnt += 1
|
|
1745
|
+
else:
|
|
1746
|
+
if sign == "AND":
|
|
1747
|
+
wcnd += " {} {}".format(sign, val)
|
|
1748
|
+
else:
|
|
1749
|
+
if wcnt > 0: wcnd += logic
|
|
1750
|
+
wcnd += "{} {} {}".format(fld, sign, val)
|
|
1751
|
+
wcnt += 1
|
|
1752
|
+
if re.search(r'BETWEEN$', sign):
|
|
1753
|
+
sign = "AND"
|
|
1754
|
+
else:
|
|
1755
|
+
sign = "="
|
|
1756
|
+
scnt = 0
|
|
1757
|
+
|
|
1758
|
+
if scnt > 0:
|
|
1759
|
+
s = 's' if scnt > 1 else ''
|
|
1760
|
+
PgLOG.pglog("Need {} value{} after sign '{}'".format(scnt, s, osign), PgLOG.LGEREX)
|
|
1761
|
+
if wcnt > 1: wcnd = "({})".format(wcnd)
|
|
1762
|
+
if cnt > 0:
|
|
1763
|
+
if cnt > 1:
|
|
1764
|
+
cnd = "{} {}IN ({})".format(fld, negative, cnd)
|
|
1765
|
+
else:
|
|
1766
|
+
cnd = "{} {} {}".format(fld, ("<>" if negative else "="), cnd)
|
|
1767
|
+
if ncnt > 0:
|
|
1768
|
+
ncnd = "{} IS {}NULL".format(fld, negative)
|
|
1769
|
+
cnd = "({}{}{})".format(cnd, logic, ncnd)
|
|
1770
|
+
if wcnt > 0: cnd = "({}{}{})".format(cnd, logic, wcnd)
|
|
1771
|
+
elif wcnt > 0:
|
|
1772
|
+
cnd = wcnd
|
|
1773
|
+
if cnd and not noand: cnd = " AND " + cnd
|
|
1774
|
+
|
|
1775
|
+
return cnd
|
|
1776
|
+
|
|
1777
|
+
#
|
|
1778
|
+
# build up fieldname string for given or default condition
|
|
1779
|
+
#
|
|
1780
|
+
def fieldname_string(fnames, dnames = None, anames = None, wflds = None):
|
|
1781
|
+
|
|
1782
|
+
if not fnames:
|
|
1783
|
+
fnames = dnames # include default fields names
|
|
1784
|
+
elif re.match(r'^all$', fnames, re.I):
|
|
1785
|
+
fnames = anames # include all field names
|
|
1786
|
+
|
|
1787
|
+
if not wflds: return fnames
|
|
1788
|
+
|
|
1789
|
+
for wfld in wflds:
|
|
1790
|
+
if not wfld or fnames.find(wfld) > -1: continue # empty field, or included already
|
|
1791
|
+
if wfld == "Q":
|
|
1792
|
+
pos = fnames.find("R") # request name
|
|
1793
|
+
elif wfld == "Y":
|
|
1794
|
+
pos = fnames.find("X") # parent group name
|
|
1795
|
+
elif wfld == "G":
|
|
1796
|
+
pos = fnames.find("I") # group name
|
|
1797
|
+
else:
|
|
1798
|
+
pos = -1 # prepend other with-field names
|
|
1799
|
+
|
|
1800
|
+
if pos == -1:
|
|
1801
|
+
fnames = wfld + fnames # prepend with-field
|
|
1802
|
+
else:
|
|
1803
|
+
fnames = fnames[0:pos] + wfld + fnames[pos:] # insert with-field
|
|
1804
|
+
|
|
1805
|
+
return fnames
|
|
1806
|
+
|
|
1807
|
+
#
|
|
1808
|
+
# Function get_group_field_path(gindex: group index
|
|
1809
|
+
# dsid: dataset id
|
|
1810
|
+
# field: path field name: webpath or savedpath)
|
|
1811
|
+
# go through group tree upward to find a none-empty path, return it or null
|
|
1812
|
+
#
|
|
1813
|
+
def get_group_field_path(gindex, dsid, field):
|
|
1814
|
+
|
|
1815
|
+
if gindex:
|
|
1816
|
+
pgrec = pgget("dsgroup", "pindex, {}".format(field),
|
|
1817
|
+
"dsid = '{}' AND gindex = {}".format(dsid, gindex), PGDBI['EXITLG'])
|
|
1818
|
+
else:
|
|
1819
|
+
pgrec = pgget("dataset", field,
|
|
1820
|
+
"dsid = '{}'".format(dsid), PGDBI['EXITLG'])
|
|
1821
|
+
if pgrec:
|
|
1822
|
+
if pgrec[field]:
|
|
1823
|
+
return pgrec[field]
|
|
1824
|
+
elif gindex:
|
|
1825
|
+
return get_group_field_path(pgrec['pindex'], dsid, field)
|
|
1826
|
+
else:
|
|
1827
|
+
return None
|
|
1828
|
+
|
|
1829
|
+
#
|
|
1830
|
+
# get the specialist info for a given dataset
|
|
1831
|
+
#
|
|
1832
|
+
def get_specialist(dsid, logact = PGDBI['ERRLOG']):
|
|
1833
|
+
|
|
1834
|
+
if dsid in SPECIALIST: return SPECIALIST['dsid']
|
|
1835
|
+
|
|
1836
|
+
pgrec = pgget("dsowner, dssgrp", "specialist, lstname, fstname",
|
|
1837
|
+
"specialist = logname AND dsid = '{}' AND priority = 1".format(dsid), logact)
|
|
1838
|
+
if pgrec:
|
|
1839
|
+
if pgrec['specialist'] == "datahelp" or pgrec['specialist'] == "dss":
|
|
1840
|
+
pgrec['lstname'] = "Help"
|
|
1841
|
+
pgrec['fstname'] = "Data"
|
|
1842
|
+
else:
|
|
1843
|
+
pgrec['specialist'] = "datahelp"
|
|
1844
|
+
pgrec['lstname'] = "Help"
|
|
1845
|
+
pgrec['fstname'] = "Data"
|
|
1846
|
+
|
|
1847
|
+
SPECIALIST['dsid'] = pgrec # cache specialist info for dsowner of dsid
|
|
1848
|
+
return pgrec
|
|
1849
|
+
|
|
1850
|
+
#
|
|
1851
|
+
# build customized email from get_email()
|
|
1852
|
+
#
|
|
1853
|
+
def build_customized_email(table, field, condition, subject, logact = 0):
|
|
1854
|
+
|
|
1855
|
+
estat = PgLOG.FAILURE
|
|
1856
|
+
msg = PgLOG.get_email()
|
|
1857
|
+
if not msg: return estat
|
|
1858
|
+
|
|
1859
|
+
sender = PgLOG.PGLOG['CURUID'] + "@ucar.edu"
|
|
1860
|
+
receiver = PgLOG.PGLOG['EMLADDR'] if PgLOG.PGLOG['EMLADDR'] else (PgLOG.PGLOG['CURUID'] + "@ucar.edu")
|
|
1861
|
+
if receiver.find(sender) < 0: PgLOG.add_carbon_copy(sender, 1)
|
|
1862
|
+
cc = PgLOG.PGLOG['CCDADDR']
|
|
1863
|
+
if not subject: subject = "Message from {}-{}".format(PgLOG.PGLOG['HOSTNAME'], PgLOG.get_command())
|
|
1864
|
+
estat = PgLOG.send_python_email(subject, receiver, msg, sender, cc, logact)
|
|
1865
|
+
if estat != PgLOG.SUCCESS:
|
|
1866
|
+
ebuf = "From: {}\nTo: {}\n".format(sender, receiver)
|
|
1867
|
+
if cc: ebuf += "Cc: {}\n".format(cc)
|
|
1868
|
+
ebuf += "Subject: {}!\n\n{}\n".format(subject, msg)
|
|
1869
|
+
|
|
1870
|
+
if PgLOG.PGLOG['EMLSEND']:
|
|
1871
|
+
estat = PgLOG.send_customized_email(f"{table}.{condition}", ebuf, logact)
|
|
1872
|
+
if estat != PgLOG.SUCCESS:
|
|
1873
|
+
estat = cache_customized_email(table, field, condition, ebuf, 0)
|
|
1874
|
+
if estat and logact:
|
|
1875
|
+
PgLOG.pglog("Email {} cached to '{}.{}' for {}, Subject: {}".format(receiver, table, field, condition, subject), logact)
|
|
1876
|
+
|
|
1877
|
+
return estat
|
|
1878
|
+
|
|
1879
|
+
#
|
|
1880
|
+
# email: full user email address
|
|
1881
|
+
#
|
|
1882
|
+
# get user real name from table ruser for a given email address
|
|
1883
|
+
# opts == 1 : include email
|
|
1884
|
+
# opts == 2 : include org_type
|
|
1885
|
+
# opts == 4 : include country
|
|
1886
|
+
# opts == 8 : include valid_email
|
|
1887
|
+
# opts == 16 : include org
|
|
1888
|
+
#
|
|
1889
|
+
def get_ruser_names(email, opts = 0, date = None):
|
|
1890
|
+
|
|
1891
|
+
fields = "lname lstname, fname fstname"
|
|
1892
|
+
|
|
1893
|
+
if opts&1: fields += ", email"
|
|
1894
|
+
if opts&2: fields += ", org_type"
|
|
1895
|
+
if opts&4: fields += ", country"
|
|
1896
|
+
if opts&8: fields += ", valid_email"
|
|
1897
|
+
if opts&16: fields += ", org"
|
|
1898
|
+
|
|
1899
|
+
if date:
|
|
1900
|
+
datecond = "rdate <= '{}' AND (end_date IS NULL OR end_date >= '{}')".format(date, date)
|
|
1901
|
+
else:
|
|
1902
|
+
datecond = "end_date IS NULL"
|
|
1903
|
+
date = time.strftime("%Y-%m-%d", (time.gmtime() if PgLOG.PGLOG['GMTZ'] else time.localtime()))
|
|
1904
|
+
emcnd = "email = '{}'".format(email)
|
|
1905
|
+
pgrec = pgget("ruser", fields, "{} AND {}".format(emcnd, datecond), PgLOG.LGEREX)
|
|
1906
|
+
if not pgrec: # missing user record add one in
|
|
1907
|
+
PgLOG.pglog("{}: email not in ruser for {}".format(email, date), PgLOG.LOGWRN)
|
|
1908
|
+
# check again if a user is on file with different date range
|
|
1909
|
+
pgrec = pgget("ruser", fields, emcnd, PgLOG.LGEREX)
|
|
1910
|
+
if not pgrec and pgget("dssdb.user", '', emcnd):
|
|
1911
|
+
fields = "lstname, fstname"
|
|
1912
|
+
if opts&1: fields += ", email"
|
|
1913
|
+
if opts&2: fields += ", org_type"
|
|
1914
|
+
if opts&4: fields += ", country"
|
|
1915
|
+
if opts&8: fields += ", email valid_email"
|
|
1916
|
+
if opts&16: fields += ", org_name org"
|
|
1917
|
+
pgrec = pgget("dssdb.user", fields, emcnd, PgLOG.LGEREX)
|
|
1918
|
+
|
|
1919
|
+
if pgrec and pgrec['lstname']:
|
|
1920
|
+
pgrec['name'] = (pgrec['fstname'].capitalize() + ' ') if pgrec['fstname'] else ''
|
|
1921
|
+
pgrec['name'] += pgrec['lstname'].capitalize()
|
|
1922
|
+
else:
|
|
1923
|
+
if not pgrec: pgrec = {}
|
|
1924
|
+
pgrec['name'] = email.split('@')[0]
|
|
1925
|
+
if opts&1: pgrec['email'] = email
|
|
1926
|
+
|
|
1927
|
+
return pgrec
|
|
1928
|
+
|
|
1929
|
+
#
|
|
1930
|
+
# cache a customized email for sending it later
|
|
1931
|
+
#
|
|
1932
|
+
def cache_customized_email(table, field, condition, emlmsg, logact = 0):
|
|
1933
|
+
|
|
1934
|
+
pgrec = {field: emlmsg}
|
|
1935
|
+
if pgupdt(table, pgrec, condition, logact|PgLOG.ERRLOG):
|
|
1936
|
+
if logact: PgLOG.pglog("Email cached to '{}.{}' for {}".format(table, field, condition), logact&(~PgLOG.EXITLG))
|
|
1937
|
+
return PgLOG.SUCCESS
|
|
1938
|
+
else:
|
|
1939
|
+
msg = "cache email to '{}.{}' for {}".format(table, field, condition)
|
|
1940
|
+
PgLOG.pglog(f"Error {msg}, try to send directly now", logact|PgLOG.ERRLOG)
|
|
1941
|
+
return PgLOG.send_customized_email(msg, emlmsg, logact)
|
|
1942
|
+
|
|
1943
|
+
#
|
|
1944
|
+
# otype: user organization type
|
|
1945
|
+
# email: user email address)
|
|
1946
|
+
#
|
|
1947
|
+
# return: orgonizaion type like DSS, NCAR, UNIV...
|
|
1948
|
+
#
|
|
1949
|
+
def get_org_type(otype, email):
|
|
1950
|
+
|
|
1951
|
+
if not otype: otype = "OTHER"
|
|
1952
|
+
if email:
|
|
1953
|
+
ms = re.search(r'(@|\.)ucar\.edu$', email)
|
|
1954
|
+
if ms:
|
|
1955
|
+
mc = ms.group(1)
|
|
1956
|
+
if otype == 'UCAR' or otype == 'OTHER': otype = 'NCAR'
|
|
1957
|
+
if otype == 'NCAR' and mc == '@':
|
|
1958
|
+
ms = re.match(r'^(.+)@', email)
|
|
1959
|
+
if ms and pgget("dssgrp", "", "logname = '{}'".format(ms.group(1))): otype = 'DSS'
|
|
1960
|
+
else:
|
|
1961
|
+
ms = re.search(r'\.(mil|org|gov|edu|com|net)(\.\w\w|$)', email)
|
|
1962
|
+
if ms:
|
|
1963
|
+
otype = ms.group(1).upper()
|
|
1964
|
+
if otype == 'EDU': otype = "UNIV"
|
|
1965
|
+
|
|
1966
|
+
return otype
|
|
1967
|
+
|
|
1968
|
+
#
|
|
1969
|
+
# join values and handle the null values
|
|
1970
|
+
#
|
|
1971
|
+
def join_values(vstr, vals):
|
|
1972
|
+
|
|
1973
|
+
if vstr:
|
|
1974
|
+
vstr += "\n"
|
|
1975
|
+
elif vstr is None:
|
|
1976
|
+
vstr = ''
|
|
1977
|
+
|
|
1978
|
+
return "{}Value{}({})".format(vstr, ('s' if len(vals) > 1 else ''), ', '.join(map(str, vals)))
|
|
1979
|
+
|
|
1980
|
+
#
|
|
1981
|
+
# check table hostname to find the system down times. Cache the result for 10 minutes
|
|
1982
|
+
#
|
|
1983
|
+
def get_system_downs(hostname, logact = 0):
|
|
1984
|
+
|
|
1985
|
+
curtime = int(time.time())
|
|
1986
|
+
newhost = 0
|
|
1987
|
+
|
|
1988
|
+
if hostname not in SYSDOWN:
|
|
1989
|
+
SYSDOWN[hostname] = {}
|
|
1990
|
+
newhost = 1
|
|
1991
|
+
if newhost or (curtime - SYSDOWN[hostname]['chktime']) > 600:
|
|
1992
|
+
SYSDOWN[hostname]['chktime'] = curtime
|
|
1993
|
+
SYSDOWN[hostname]['start'] = 0
|
|
1994
|
+
SYSDOWN[hostname]['end'] = 0
|
|
1995
|
+
SYSDOWN[hostname]['active'] = 1
|
|
1996
|
+
SYSDOWN[hostname]['path'] = None
|
|
1997
|
+
|
|
1998
|
+
pgrec = pgget('hostname', 'service, domain, downstart, downend',
|
|
1999
|
+
"hostname = '{}'".format(hostname), logact)
|
|
2000
|
+
if pgrec:
|
|
2001
|
+
if pgrec['service'] == 'N':
|
|
2002
|
+
SYSDOWN[hostname]['start'] = curtime
|
|
2003
|
+
SYSDOWN[hostname]['active'] = 0
|
|
2004
|
+
else:
|
|
2005
|
+
start = int(datetime.timestamp(pgrec['downstart'])) if pgrec['downstart'] else 0
|
|
2006
|
+
end = int(datetime.timestamp(pgrec['downend'])) if pgrec['downend'] else 0
|
|
2007
|
+
if start > 0 and (end == 0 or end > curtime):
|
|
2008
|
+
SYSDOWN[hostname]['start'] = start
|
|
2009
|
+
SYSDOWN[hostname]['end'] = end
|
|
2010
|
+
if pgrec['service'] == 'S' and pgrec['domain'] and re.match(r'^/', pgrec['domain']):
|
|
2011
|
+
SYSDOWN[hostname]['path'] = pgrec['domain']
|
|
2012
|
+
|
|
2013
|
+
SYSDOWN[hostname]['curtime'] = curtime
|
|
2014
|
+
|
|
2015
|
+
return SYSDOWN[hostname]
|
|
2016
|
+
|
|
2017
|
+
#
|
|
2018
|
+
# return seconds for how long the system will continue to be down
|
|
2019
|
+
#
|
|
2020
|
+
def system_down_time(hostname, offset, logact = 0):
|
|
2021
|
+
|
|
2022
|
+
down = get_system_downs(hostname, logact)
|
|
2023
|
+
if down['start'] and down['curtime'] >= (down['start'] - offset):
|
|
2024
|
+
if not down['end']:
|
|
2025
|
+
if PgLOG.PGLOG['PGBATCH'] == PgLOG.PGLOG['PBSNAME']:
|
|
2026
|
+
return PgLOG.PGLOG['PBSTIME']
|
|
2027
|
+
elif down['curtime'] <= down['end']:
|
|
2028
|
+
return (down['end'] - down['curtime'])
|
|
2029
|
+
|
|
2030
|
+
return 0 # the system is not down
|
|
2031
|
+
|
|
2032
|
+
#
|
|
2033
|
+
# return string message if the system is down
|
|
2034
|
+
#
|
|
2035
|
+
def system_down_message(hostname, path, offset, logact = 0):
|
|
2036
|
+
|
|
2037
|
+
down = get_system_downs(hostname, logact)
|
|
2038
|
+
msg = None
|
|
2039
|
+
if down['start'] and down['curtime'] >= (down['start'] - offset):
|
|
2040
|
+
match = match_down_path(path, down['path'])
|
|
2041
|
+
if match:
|
|
2042
|
+
msg = "{}{}:".format(hostname, ('-' + path) if match > 0 else '')
|
|
2043
|
+
if not down['active']:
|
|
2044
|
+
msg += " Not in Service"
|
|
2045
|
+
else:
|
|
2046
|
+
msg += " Planned down, started at " + PgLOG.current_datetime(down['start'])
|
|
2047
|
+
if not down['end']:
|
|
2048
|
+
msg += " And no end time specified"
|
|
2049
|
+
elif down['curtime'] <= down['end']:
|
|
2050
|
+
msg = " And will end by " + PgLOG.current_datetime(down['end'])
|
|
2051
|
+
|
|
2052
|
+
return msg
|
|
2053
|
+
|
|
2054
|
+
#
|
|
2055
|
+
# return 1 if given path match daemon paths, 0 if not; -1 if cannot compare
|
|
2056
|
+
#
|
|
2057
|
+
def match_down_path(path, dpaths):
|
|
2058
|
+
|
|
2059
|
+
if not (path and dpaths): return -1
|
|
2060
|
+
|
|
2061
|
+
paths = re.split(':', dpaths)
|
|
2062
|
+
|
|
2063
|
+
for p in paths:
|
|
2064
|
+
if re.match(r'^{}'.format(p), path): return 1
|
|
2065
|
+
|
|
2066
|
+
return 0
|
|
2067
|
+
|
|
2068
|
+
# validate is login user is in DECS group
|
|
2069
|
+
# check all node if skpdsg is false, otherwise check non-DSG nodes
|
|
2070
|
+
def validate_decs_group(cmdname, logname, skpdsg):
|
|
2071
|
+
|
|
2072
|
+
if skpdsg and PgLOG.PGLOG['DSGHOSTS'] and re.search(r'(^|:){}'.format(PgLOG.PGLOG['HOSTNAME']), PgLOG.PGLOG['DSGHOSTS']): return
|
|
2073
|
+
if not logname: lgname = PgLOG.PGLOG['CURUID']
|
|
2074
|
+
|
|
2075
|
+
if not pgget("dssgrp", '', "logname = '{}'".format(logname), PgLOG.LGEREX):
|
|
2076
|
+
PgLOG.pglog("{}: Must be in DECS Group to run '{}' on {}".format(logname, cmdname, PgLOG.PGLOG['HOSTNAME']), PgLOG.LGEREX)
|
|
2077
|
+
|
|
2078
|
+
#
|
|
2079
|
+
# add an allusage record into yearly table; create a new yearly table if it does not exist
|
|
2080
|
+
# year -- year to identify the yearly table, evaluated if missing
|
|
2081
|
+
# records -- hash to hold one or multiple records.
|
|
2082
|
+
# Dict keys: email -- user email address,
|
|
2083
|
+
# org_type -- organization type
|
|
2084
|
+
# country -- country code
|
|
2085
|
+
# dsid -- dataset ID
|
|
2086
|
+
# date -- date data accessed
|
|
2087
|
+
# time -- time data accessed
|
|
2088
|
+
# quarter -- quarter of the year data accessed
|
|
2089
|
+
# size -- bytes of data accessed
|
|
2090
|
+
# method -- delivery methods: MSS,Web,Ftp,Tape,Cd,Disk,Paper,cArt,Micro
|
|
2091
|
+
# source -- usage source flag: W - wusage, O - ordusage
|
|
2092
|
+
# midx -- refer to mbr2loc.midx if not 0
|
|
2093
|
+
# ip -- user IP address
|
|
2094
|
+
# region -- user region name; for example, Colorado
|
|
2095
|
+
#
|
|
2096
|
+
# isarray -- if true, mutiple records provided via arrays for each hash key
|
|
2097
|
+
# docheck -- if 1, check and add only if record is not on file
|
|
2098
|
+
# docheck -- if 2, check and add if record is not on file, and update if exists
|
|
2099
|
+
# docheck -- if 4, check and add if record is not on file, and update if exists,
|
|
2100
|
+
# and also checking NULL email value too
|
|
2101
|
+
#
|
|
2102
|
+
def add_yearly_allusage(year, records, isarray = 0, docheck = 0):
|
|
2103
|
+
|
|
2104
|
+
acnt = 0
|
|
2105
|
+
if not year:
|
|
2106
|
+
ms = re.match(r'^(\d\d\d\d)', str(records['date'][0] if isarray else records['date']))
|
|
2107
|
+
if ms: year = ms.group(1)
|
|
2108
|
+
tname = "allusage_{}".format(year)
|
|
2109
|
+
if isarray:
|
|
2110
|
+
cnt = len(records['email'])
|
|
2111
|
+
if 'quarter' not in records: records['quarter'] = [0]*cnt
|
|
2112
|
+
for i in range(cnt):
|
|
2113
|
+
if not records['quarter'][i]:
|
|
2114
|
+
ms = re.search(r'-(\d+)-', str(records['date'][i]))
|
|
2115
|
+
if ms: records['quarter'][i] = int((int(ms.group(1))-1)/3)+1
|
|
2116
|
+
if docheck:
|
|
2117
|
+
for i in range(cnt):
|
|
2118
|
+
record = {}
|
|
2119
|
+
for key in records:
|
|
2120
|
+
record[key] = records[key][i]
|
|
2121
|
+
cnd = "email = '{}' AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
2122
|
+
record['email'], record['dsid'], record['method'], record['date'], record['time'])
|
|
2123
|
+
pgrec = pgget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
|
|
2124
|
+
if docheck == 4 and not pgrec:
|
|
2125
|
+
cnd = "email IS NULL AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
2126
|
+
record['dsid'], record['method'], record['date'], record['time'])
|
|
2127
|
+
pgrec = pgget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
|
|
2128
|
+
if pgrec:
|
|
2129
|
+
if docheck > 1: acnt += pgupdt(tname, record, "aidx = {}".format(pgrec['aidx']), PgLOG.LGEREX)
|
|
2130
|
+
else:
|
|
2131
|
+
acnt += pgadd(tname, record, PgLOG.LGEREX|PgLOG.ADDTBL)
|
|
2132
|
+
else:
|
|
2133
|
+
acnt = pgmadd(tname, records, PgLOG.LGEREX|PgLOG.ADDTBL)
|
|
2134
|
+
else:
|
|
2135
|
+
record = records
|
|
2136
|
+
if not ('quarter' in record and record['quarter']):
|
|
2137
|
+
ms = re.search(r'-(\d+)-', str(record['date']))
|
|
2138
|
+
if ms: record['quarter'] = int((int(ms.group(1))-1)/3)+1
|
|
2139
|
+
if docheck:
|
|
2140
|
+
cnd = "email = '{}' AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
2141
|
+
record['email'], record['dsid'], record['method'], record['date'], record['time'])
|
|
2142
|
+
pgrec = pgget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
|
|
2143
|
+
if docheck == 4 and not pgrec:
|
|
2144
|
+
cnd = "email IS NULL AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
|
|
2145
|
+
record['dsid'], record['method'], record['date'], record['time'])
|
|
2146
|
+
pgrec = pgget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
|
|
2147
|
+
if pgrec:
|
|
2148
|
+
if docheck > 1: acnt = pgupdt(tname, record, "aidx = {}".format(pgrec['aidx']), PgLOG.LGEREX)
|
|
2149
|
+
return acnt
|
|
2150
|
+
acnt = pgadd(tname, record, PgLOG.LGEREX|PgLOG.ADDTBL)
|
|
2151
|
+
|
|
2152
|
+
return acnt
|
|
2153
|
+
|
|
2154
|
+
#
|
|
2155
|
+
# add a wusage record into yearly table; create a new yearly table if it does not exist
|
|
2156
|
+
# year -- year to identify the yearly table, evaluated if missing
|
|
2157
|
+
# records -- hash to hold one or multiple records.
|
|
2158
|
+
# Dict keys: wid - reference to wfile.wid
|
|
2159
|
+
# wuid_read - reference to wuser.wuid, 0 if missing email
|
|
2160
|
+
# dsid - reference to dataset.dsid at the time of read
|
|
2161
|
+
# date_read - date file read
|
|
2162
|
+
# time_read - time file read
|
|
2163
|
+
# quarter - quarter of the year data accessed
|
|
2164
|
+
# size_read - bytes of data read
|
|
2165
|
+
# method - download methods: WEB, CURL, MGET, FTP and MGET
|
|
2166
|
+
# locflag - location flag: Glade or Object
|
|
2167
|
+
# ip - IP address
|
|
2168
|
+
#
|
|
2169
|
+
# isarray -- if true, mutiple records provided via arrays for each hash key
|
|
2170
|
+
#
|
|
2171
|
+
def add_yearly_wusage(year, records, isarray = 0):
|
|
2172
|
+
|
|
2173
|
+
acnt = 0
|
|
2174
|
+
if not year:
|
|
2175
|
+
ms = re.match(r'^(\d\d\d\d)', str(records['date_read'][0] if isarray else records['date_read']))
|
|
2176
|
+
if ms: year = ms.group(1)
|
|
2177
|
+
tname = "wusage_{}".format(year)
|
|
2178
|
+
if isarray:
|
|
2179
|
+
if 'quarter' not in records:
|
|
2180
|
+
cnt = len(records['wid'])
|
|
2181
|
+
records['quarter'] = [0]*cnt
|
|
2182
|
+
for i in range(cnt):
|
|
2183
|
+
ms = re.search(r'-(\d+)-', str(records['date_read'][i]))
|
|
2184
|
+
if ms: records['quarter'][i] = (int((int(ms.group(1))-1)/3)+1)
|
|
2185
|
+
acnt = pgmadd(tname, records, PgLOG.LGEREX|PgLOG.ADDTBL)
|
|
2186
|
+
else:
|
|
2187
|
+
record = records
|
|
2188
|
+
if 'quarter' not in record:
|
|
2189
|
+
ms = re.search(r'-(\d+)-', str(record['date_read']))
|
|
2190
|
+
if ms: record['quarter'] = (int((int(ms.group(1))-1)/3)+1)
|
|
2191
|
+
acnt = pgadd(tname, record, PgLOG.LGEREX|PgLOG.ADDTBL)
|
|
2192
|
+
|
|
2193
|
+
return acnt
|
|
2194
|
+
|
|
2195
|
+
#
|
|
2196
|
+
# double quote a array of single or sign delimited strings
|
|
2197
|
+
#
|
|
2198
|
+
def pgnames(ary, sign = None, joinstr = None):
|
|
2199
|
+
|
|
2200
|
+
pgary = []
|
|
2201
|
+
for a in ary:
|
|
2202
|
+
pgary.append(pgname(a, sign))
|
|
2203
|
+
|
|
2204
|
+
if joinstr == None:
|
|
2205
|
+
return pgary
|
|
2206
|
+
else:
|
|
2207
|
+
return joinstr.join(pgary)
|
|
2208
|
+
|
|
2209
|
+
#
|
|
2210
|
+
# double quote a single or sign delimited string
|
|
2211
|
+
#
|
|
2212
|
+
def pgname(str, sign = None):
|
|
2213
|
+
|
|
2214
|
+
if sign:
|
|
2215
|
+
nstr = ''
|
|
2216
|
+
names = str.split(sign[0])
|
|
2217
|
+
for name in names:
|
|
2218
|
+
if nstr: nstr += sign[0]
|
|
2219
|
+
nstr += pgname(name, sign[1:])
|
|
2220
|
+
else:
|
|
2221
|
+
nstr = str.strip()
|
|
2222
|
+
if nstr and nstr.find('"') < 0:
|
|
2223
|
+
if not re.match(r'^[a-z_][a-z0-9_]*$', nstr) or nstr in PGRES:
|
|
2224
|
+
nstr = '"{}"'.format(nstr)
|
|
2225
|
+
|
|
2226
|
+
return nstr
|
|
2227
|
+
|
|
2228
|
+
#
|
|
2229
|
+
# get a postgres password for given host, port, dbname, usname
|
|
2230
|
+
#
|
|
2231
|
+
def get_pgpass_password():
|
|
2232
|
+
|
|
2233
|
+
if PGDBI['PWNAME']: return PGDBI['PWNAME']
|
|
2234
|
+
pwname = get_baopassword()
|
|
2235
|
+
if not pwname: pwname = get_pgpassword()
|
|
2236
|
+
|
|
2237
|
+
return pwname
|
|
2238
|
+
|
|
2239
|
+
def get_pgpassword():
|
|
2240
|
+
|
|
2241
|
+
if not DBPASS: read_pgpass()
|
|
2242
|
+
dbport = str(PGDBI['DBPORT']) if PGDBI['DBPORT'] else '5432'
|
|
2243
|
+
pwname = DBPASS.get((PGDBI['DBSHOST'], dbport, PGDBI['DBNAME'], PGDBI['LNNAME']))
|
|
2244
|
+
if not pwname: pwname = DBPASS.get((PGDBI['DBHOST'], dbport, PGDBI['DBNAME'], PGDBI['LNNAME']))
|
|
2245
|
+
return pwname
|
|
2246
|
+
|
|
2247
|
+
def get_baopassword():
|
|
2248
|
+
|
|
2249
|
+
dbname = PGDBI['DBNAME']
|
|
2250
|
+
if dbname not in DBBAOS: read_openbao()
|
|
2251
|
+
return DBBAOS[dbname].get(PGDBI['LNNAME'])
|
|
2252
|
+
|
|
2253
|
+
#
|
|
2254
|
+
# Reads the .pgpass file and returns a dictionary of credentials.
|
|
2255
|
+
#
|
|
2256
|
+
def read_pgpass():
|
|
2257
|
+
|
|
2258
|
+
pgpass = PgLOG.PGLOG['DSSHOME'] + '/.pgpass'
|
|
2259
|
+
if not op.isfile(pgpass): pgpass = PgLOG.PGLOG['GDEXHOME'] + '/.pgpass'
|
|
2260
|
+
try:
|
|
2261
|
+
with open(pgpass, "r") as f:
|
|
2262
|
+
for line in f:
|
|
2263
|
+
line = line.strip()
|
|
2264
|
+
if not line or line.startswith("#"): continue
|
|
2265
|
+
dbhost, dbport, dbname, lnname, pwname = line.split(":")
|
|
2266
|
+
DBPASS[(dbhost, dbport, dbname, lnname)] = pwname
|
|
2267
|
+
except Exception as e:
|
|
2268
|
+
PgLOG.pglog(str(e), PGDBI['ERRLOG'])
|
|
2269
|
+
|
|
2270
|
+
#
|
|
2271
|
+
# Reads OpenBao secrets and returns a dictionary of credentials.
|
|
2272
|
+
#
|
|
2273
|
+
def read_openbao():
|
|
2274
|
+
|
|
2275
|
+
dbname = PGDBI['DBNAME']
|
|
2276
|
+
DBBAOS[dbname] = {}
|
|
2277
|
+
url = 'https://bao.k8s.ucar.edu/'
|
|
2278
|
+
baopath = {
|
|
2279
|
+
'ivaddb' : 'gdex/pgdb03',
|
|
2280
|
+
'ispddb' : 'gdex/pgdb03',
|
|
2281
|
+
'default' : 'gdex/pgdb01'
|
|
2282
|
+
}
|
|
2283
|
+
dbpath = baopath[dbname] if dbname in baopath else baopath['default']
|
|
2284
|
+
client = hvac.Client(url=PGDBI.get('BAOURL'))
|
|
2285
|
+
client.token = PgLOG.PGLOG.get('BAOTOKEN')
|
|
2286
|
+
try:
|
|
2287
|
+
read_response = client.secrets.kv.v2.read_secret_version(
|
|
2288
|
+
path=dbpath,
|
|
2289
|
+
mount_point='kv',
|
|
2290
|
+
raise_on_deleted_version=False
|
|
2291
|
+
)
|
|
2292
|
+
except Exception as e:
|
|
2293
|
+
return PgLOG.pglog(str(e), PGDBI['ERRLOG'])
|
|
2294
|
+
|
|
2295
|
+
baos = read_response['data']['data']
|
|
2296
|
+
for key in baos:
|
|
2297
|
+
ms = re.match(r'^(\w*)pass(\w*)$', key)
|
|
2298
|
+
if not ms: continue
|
|
2299
|
+
baoname = None
|
|
2300
|
+
pre = ms.group(1)
|
|
2301
|
+
suf = ms.group(2)
|
|
2302
|
+
if pre:
|
|
2303
|
+
baoname = 'metadata' if pre == 'meta' else pre
|
|
2304
|
+
elif suf == 'word':
|
|
2305
|
+
baoname = 'postgres'
|
|
2306
|
+
if baoname: DBBAOS[dbname][baoname] = baos[key]
|