rda-python-dbms 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1948 @@
1
+ #
2
+ ###############################################################################
3
+ #
4
+ # Title : MyDBI.py -- My DataBase Interface
5
+ # Author : Zaihua Ji, zji@ucar.edu
6
+ # Date : 03/07/2016
7
+ # 2024-04-11 restored the deleted one
8
+ # Purpose : Python library module to handlequery and manipulate MySQL database
9
+ #
10
+ # Github : https://github.com/NCAR/rda-python-dbms.git
11
+ #
12
+ ###############################################################################
13
+
14
+ import os
15
+ import re
16
+ import time
17
+ import mysql.connector as MySQL
18
+ from os import path as op
19
+ from rda_python_common import PgLOG
20
+
21
+ mydb = None # reference to a connected database object
22
+ curtran = 0 # 0 - no transaction, > 0 - in transaction, current action count
23
+ NMISSES = [] # array of mising userno
24
+ LMISSES = [] # array of mising logname
25
+ TABLES = {} # record table field information
26
+ SPECIALIST = {} # hash array refrences to specialist info of dsids
27
+ SYSDOWN = {}
28
+ MYDBI = {}
29
+ ADDTBLS = []
30
+ MYSIGNS = ['!', '<', '>', '<>']
31
+
32
+ MYSTRS = {
33
+ 'wfile' : ['wfile']
34
+ }
35
+
36
+ # hard coded socket paths for machine_dbnames
37
+ DBSOCKS = {
38
+ 'default' : "/data/dssdb/tmp/mysql.sock",
39
+ 'obsua' : "/data/upadb/tmp/mysql.sock",
40
+ 'ivaddb' : "/data/ivaddb/tmp/mysql.sock",
41
+ 'ispddb' : "/data/ispddb/tmp/mysql.sock"
42
+ }
43
+
44
+ # hard coded db ports for machine_dbnames
45
+ DBPORTS = {
46
+ 'default' : 3306,
47
+ 'obsua' : 3307,
48
+ 'upadb' : 3307,
49
+ 'ispddb' : 3307
50
+ }
51
+
52
+ # home path for check db on alter host
53
+ VIEWHOMES = {
54
+ 'default' : PgLOG.PGLOG['DSSDBHM']
55
+ }
56
+
57
+ #
58
+ # MySQL specified query timestamp format
59
+ #
60
+ fmtyr = lambda fn: "year({})".format(fn)
61
+ fmtqt = lambda fn: "quarter({})".format(fn)
62
+ fmtmn = lambda fn: "month({})".format(fn)
63
+ fmtdt = lambda fn: "date({})".format(fn)
64
+ fmtym = lambda fn: "date_format({}, '%Y-%m')".format(fn)
65
+ fmthr = lambda fn: "hour({})".format(fn)
66
+
67
+ #
68
+ # set environments and defaults
69
+ #
70
+ def SETMYDBI(name, value):
71
+ MYDBI[name] = PgLOG.get_environment(name, value)
72
+
73
+ SETMYDBI('CDHOST', 'rda-db.ucar.edu') # common domain for db host for master server
74
+ SETMYDBI('DEFNAME', 'dssdb')
75
+ SETMYDBI('DEFHOST', PgLOG.PGLOG['SQLHOST'])
76
+ SETMYDBI("DEFPORT", 0)
77
+ SETMYDBI("DEFSOCK", '')
78
+ SETMYDBI("DBNAME", MYDBI['DEFNAME'])
79
+ SETMYDBI("LNNAME", MYDBI['DBNAME'])
80
+ SETMYDBI("PWNAME", MYDBI['LNNAME'])
81
+ SETMYDBI("DBHOST", (os.environ['DSSDBHOST'] if os.environ.get('DSSDBHOST') else MYDBI['DEFHOST']))
82
+ SETMYDBI("DBPORT", 0)
83
+ SETMYDBI("LOGACT", PgLOG.LOGERR) # default logact
84
+ SETMYDBI("DBSOCK", '')
85
+ SETMYDBI("DATADIR", PgLOG.PGLOG['DSDHOME'])
86
+ SETMYDBI("BCKPATH", PgLOG.PGLOG['DSSDBHM'] + "/backup")
87
+ SETMYDBI("SQLPATH", PgLOG.PGLOG['DSSDBHM'] + "/sql")
88
+ SETMYDBI("VWNAME", MYDBI['DBNAME'])
89
+ SETMYDBI("VWPORT", 0)
90
+ SETMYDBI("VWSOCK", '')
91
+
92
+ MYDBI['DBSHOST'] = PgLOG.get_short_host(MYDBI['DBHOST'])
93
+ MYDBI['DEFSHOST'] = PgLOG.get_short_host(MYDBI['DEFHOST'])
94
+ MYDBI['VWHOST'] = PgLOG.PGLOG['VIEWHOST']
95
+ MYDBI['VWSHOST'] = PgLOG.get_short_host(MYDBI['VWHOST'])
96
+ MYDBI['VWHOME'] = (VIEWHOMES[PgLOG.PGLOG['HOSTNAME']] if PgLOG.PGLOG['HOSTNAME'] in VIEWHOMES else VIEWHOMES['default'])
97
+ MYDBI['VHSET'] = 0
98
+ MYDBI['MTRANS'] = 5000 # max number of changes in one transactions
99
+ MYDBI['MAXICNT'] = 3000000 # maximum number of records in each table
100
+
101
+ #
102
+ # create a myddl command string and return it
103
+ #
104
+ def get_myddl_command(tname):
105
+
106
+ ms = re.match(r'^(.+)\.(.+)$', tname)
107
+ if ms:
108
+ dbname = ms.group(1)
109
+ tname = ms.group(2)
110
+ else:
111
+ dbname = MYDBI['DBNAME']
112
+
113
+ return "myddl {} -aa -h {} -d {} -u {}".format(tname, MYDBI['DBHOST'], dbname, MYDBI['LNNAME'])
114
+
115
+ #
116
+ # set default connection for dssdb MySQL Server
117
+ #
118
+ def dssdb_dbname():
119
+ default_dbinfo(MYDBI['DEFNAME'], PgLOG.PGLOG['SQLHOST'])
120
+
121
+ #
122
+ # set default connection for obsua MySQL Server
123
+ #
124
+ def obsua_dbname():
125
+ default_dbinfo('obsua', "rda-db-rep.ucar.edu", None, None, 3307)
126
+
127
+ #
128
+ # set default connection for ivadb MySQL Server
129
+ #
130
+ def ivaddb_dbname():
131
+ default_dbinfo('ivaddb', "rda-db-icoads.ucar.edu")
132
+
133
+ #
134
+ # set a default database info with hard coded info
135
+ #
136
+ def default_dbinfo(dbname = None, dbhost = None, lnname = None, pwname = None, dbport = None, socket = None):
137
+
138
+ if not dbname: dbname = MYDBI['DEFNAME']
139
+ if not dbhost: dbhost = MYDBI['DEFHOST']
140
+ if dbport is None: dbport = MYDBI['DEFPORT']
141
+ if socket is None: socket = MYDBI['DEFSOCK']
142
+
143
+ set_dbname(dbname, lnname, pwname, dbhost, dbport, socket)
144
+
145
+ #
146
+ # get the datbase sock file name of a given dbname for local connection
147
+ #
148
+ def get_dbsock(dbname):
149
+
150
+ return (DBSOCKS[dbname] if dbname in DBSOCKS else DBSOCKS['default'])
151
+
152
+ #
153
+ # get the datbase port number of a given dbname for remote connection
154
+ #
155
+ def get_dbport(dbname):
156
+
157
+ return (DBPORTS[dbname] if dbname in DBPORTS else DBPORTS['default'])
158
+
159
+ #
160
+ # set connection for viewing database information
161
+ #
162
+ def view_dbinfo(dbname = None, lnname = None, pwname = None):
163
+
164
+ if not dbname: dbname = MYDBI['DEFNAME']
165
+
166
+ set_dbname(dbname, lnname, pwname, PgLOG.PGLOG['VIEWHOST'], MYDBI['VWPORT'])
167
+ if dbname and dbname != MYDBI['VWNAME']: MYDBI['VWNAME'] = dbname
168
+
169
+ #
170
+ # set connection for given dbname
171
+ #
172
+ def set_dbname(dbname = None, lnname = None, pwname = None, dbhost = None, dbport = None, socket = None):
173
+
174
+ changed = 0
175
+
176
+ if dbname and dbname != MYDBI['DBNAME']:
177
+ MYDBI['PWNAME'] = MYDBI['LNNAME'] = MYDBI['DBNAME'] = dbname
178
+ changed = 1
179
+ if lnname and lnname != MYDBI['LNNAME']:
180
+ MYDBI['PWNAME'] = MYDBI['LNNAME'] = lnname
181
+ changed = 1
182
+ if pwname and pwname != MYDBI['PWNAME']:
183
+ MYDBI['PWNAME'] = pwname
184
+ changed = 1
185
+ if dbhost and dbhost != MYDBI['DBHOST']:
186
+ MYDBI['DBHOST'] = dbhost
187
+ MYDBI['DBSHOST'] = PgLOG.get_short_host(dbhost)
188
+ changed = 1
189
+ if MYDBI['DBSHOST'] == PgLOG.PGLOG['HOSTNAME']:
190
+ if socket is None: socket = get_dbsock(dbname)
191
+ if socket != MYDBI['DBSOCK']:
192
+ MYDBI['DBSOCK'] = socket
193
+ changed = 1
194
+ else:
195
+ if not dbport: dbport = get_dbport(dbname)
196
+ if dbport != MYDBI['DBPORT']:
197
+ MYDBI['DBPORT'] = dbport
198
+ changed = 1
199
+
200
+ if changed and mydb is not None: mydisconnect(1)
201
+
202
+ #
203
+ # start a database transaction and exit if fails
204
+ #
205
+ def starttran():
206
+
207
+ global curtran
208
+
209
+ if curtran: endtran() # try to end previous transaction
210
+ if not (mydb and mydb.is_connected): myconnect(1)
211
+ mydb.start_transaction()
212
+ curtran = 1
213
+
214
+ #
215
+ # end a transaction with changes committed and exit if fails
216
+ #
217
+ def endtran():
218
+
219
+ global curtran
220
+ if curtran and mydb and mydb.is_connected(): mydb.commit()
221
+ curtran = 0
222
+
223
+ #
224
+ # end a transaction without changes committed and exit inside if fails
225
+ #
226
+ def aborttran():
227
+
228
+ global curtran
229
+ if curtran and mydb and mydb.is_connected(): mydb.rollback()
230
+ curtran = 0
231
+
232
+ #
233
+ # record error message to dscheck record and clean the lock
234
+ #
235
+ def record_dscheck_error(errmsg):
236
+
237
+ cnd = PgLOG.PGLOG['DSCHECK']['chkcnd']
238
+ if PgLOG.PGLOG['NOQUIT']: PgLOG.PGLOG['NOQUIT'] = 0
239
+ dflags = PgLOG.PGLOG['DSCHECK']['dflags']
240
+
241
+ myrec = myget("dscheck", "mcount, tcount, lockhost, pid", cnd, PgLOG.LGEREX)
242
+ if not myrec: return 0
243
+ if not myrec['pid'] and not myrec['lockhost']: return 0
244
+ (chost, cpid) = PgLOG.current_process_info()
245
+ if myrec['pid'] != cpid or myrec['lockhost'] != chost: return 0
246
+
247
+ # update dscheck record only if it is still locked by the current process
248
+ record = {}
249
+ record['chktime'] = int(time.time())
250
+ record['status'] = "E"
251
+ record['pid'] = 0 # release lock
252
+ if dflags:
253
+ record['dflags'] = dflags
254
+ record['mcount'] = myrec['mcount'] + 1
255
+ else:
256
+ record['dflags'] = ''
257
+
258
+ if errmsg:
259
+ errmsg = PgLOG.break_long_string(errmsg, 512, None, 50, None, 50, 25)
260
+ if myrec['tcount'] > 1: errmsg = "Try {}: {}".format(myrec['tcount'], errmsg)
261
+ record['errmsg'] = errmsg
262
+
263
+ return myupdt("dscheck", record, cnd, MYDBI['LOGACT'])
264
+
265
+ #
266
+ # local function to log query error
267
+ #
268
+ def qelog(myerr, sleep, sqlstr, vals, mycnt, logact = 0):
269
+
270
+ retry = " Sleep {}(sec) & ".format(sleep) if sleep else " "
271
+ if not logact: logact = MYDBI['LOGACT']
272
+
273
+ if sqlstr:
274
+ if sqlstr.find("Retry ") == 0:
275
+ retry += "the {} ".format(PgLOG.int2order(mycnt+1))
276
+ elif sleep:
277
+ retry += "the {} Retry: \n".format(PgLOG.int2order(mycnt+1))
278
+ elif mycnt:
279
+ retry = " Error the {} Retry: \n".format(PgLOG.int2order(mycnt))
280
+ else:
281
+ retry = "\n"
282
+ sqlstr = retry + sqlstr
283
+ else:
284
+ sqlstr = ''
285
+
286
+ if vals: sqlstr += " with values: " + str(vals)
287
+
288
+ if myerr.errno: sqlstr = "{}{}".format(str(myerr), sqlstr)
289
+ if logact&PgLOG.EXITLG and PgLOG.PGLOG['DSCHECK']: record_dscheck_error(sqlstr)
290
+ PgLOG.pglog(sqlstr, logact)
291
+ if sleep: time.sleep(sleep)
292
+
293
+ return PgLOG.FAILURE # if not exit in PgLOG.pglog()
294
+
295
+ #
296
+ # try to add a new table according the table not exist error
297
+ #
298
+ def try_add_table(errstr, logact):
299
+
300
+ ms = re.match(r"1146.* Table '.+\.(.+)' doesn't exist", errstr)
301
+ if ms:
302
+ tname = ms.group(1)
303
+ add_a_table(tname)
304
+
305
+ #
306
+ # add a new table for given table name
307
+ #
308
+ def add_a_table(tname, logact):
309
+
310
+ if tname not in ADDTBLS:
311
+ PgLOG.pgsystem(get_myddl_command(tname), logact)
312
+ ADDTBLS.append(tname)
313
+
314
+ #
315
+ # local function to log query error
316
+ #
317
+ def check_dberror(myerr, mycnt, sqlstr, ary, logact = 0):
318
+
319
+ ret = PgLOG.FAILURE
320
+
321
+ if not logact: logact = MYDBI['LOGACT']
322
+ if mycnt < PgLOG.PGLOG['DBRETRY']:
323
+ if myerr.errno == 1040 or myerr.errno == 2003 or myerr.errno == 2005:
324
+ if MYDBI['DBNAME'] == MYDBI['DEFNAME'] and MYDBI['DBSHOST'] != MYDBI['DEFSHOST']:
325
+ default_dbinfo()
326
+ qelog(myerr, 0, "Retry Connecting to {} on {}".format(MYDBI['DBNAME'], MYDBI['DBHOST']), ary, mycnt, PgLOG.MSGLOG)
327
+ else:
328
+ qelog(myerr, 5+5*mycnt, "Retry Connecting", ary, mycnt, PgLOG.LOGWRN)
329
+
330
+ return PgLOG.SUCCESS
331
+ elif myerr.errno == -1 or myerr.errno == 2006 or myerr.errno == 2013: # lost connection & reconnect server
332
+ qelog(myerr, 0, "Retry Connecting", ary, mycnt, PgLOG.LOGWRN)
333
+ myconnect(1, mycnt + 1)
334
+ return (PgLOG.FAILURE if not mydb else PgLOG.SUCCESS)
335
+ elif myerr.errno == 1205: # try to lock again
336
+ qelog(myerr, 10, "Retry Locking", ary, mycnt, PgLOG.LOGWRN)
337
+ return PgLOG.SUCCESS
338
+ elif myerr.errno == 1146 and logact&PgLOG.ADDTBL: # try to add table
339
+ qelog(myerr, 0, "Retry after adding a table", ary, mycnt, PgLOG.LOGWRN)
340
+ try_add_table(str(myerr), logact)
341
+ return PgLOG.SUCCESS
342
+
343
+ if logact&PgLOG.DOLOCK and myerr.errno == 1205: logact &= ~PgLOG.EXITLG # no exit for lock error
344
+ return qelog(myerr, 0, sqlstr, ary, mycnt, logact)
345
+
346
+ #
347
+ # return hash reference to mysql batch mode command and output file name
348
+ #
349
+ def mybatch(sqlfile, foreground = 0):
350
+
351
+ if(MYDBI['VWHOST'] and MYDBI['VWHOME'] and
352
+ MYDBI['DBSHOST'] == MYDBI['VWSHOST'] and MYDBI['DBNAME'] == MYDBI['VWNAME']):
353
+ slave = "/{}/{}.slave".format(MYDBI['VWHOME'], MYDBI['VWHOST'])
354
+ if not op.exists(slave):
355
+ set_dbname(None, None, None, MYDBI['DEFHOST'], MYDBI['DEFPORT'])
356
+
357
+ if MYDBI['DBSHOST'] == PgLOG.PGLOG['HOSTNAME']:
358
+ if not MYDBI['DBSOCK']: MYDBI['DBSOCK'] = get_dbsock(MYDBI['DBNAME'])
359
+ options = "-h localhost -S " + MYDBI['DBSOCK']
360
+ else:
361
+ if not MYDBI['DBPORT']: MYDBI['DBPORT'] = get_dbport(MYDBI['DBNAME'])
362
+ options = "-h {} -P {}".format(MYDBI['DBHOST'], MYDBI['DBPORT'])
363
+
364
+ options += " -u {} -p{} {}".format(MYDBI['LNNAME'], MYDBI['PWNAME'], MYDBI['DBNAME'])
365
+
366
+ if sqlfile: return options
367
+
368
+ if foreground:
369
+ batch = "mysql -vvv {} < {} |".format(options, sqlfile)
370
+ else:
371
+ batch['out'] = sqlfile
372
+ if re.search(r'\.sql$', batch['out']):
373
+ batch['out'] = re.sub(r'\.sql$', '.out', batch['out'])
374
+ else:
375
+ batch['out'] += ".out"
376
+
377
+ batch['cmd'] = "mysql {} < {} > {} 2>&1".format(options, sqlfile , batch['out'])
378
+
379
+ return batch
380
+
381
+ #
382
+ # start a connection to dssdb database and return a DBI object; None if error
383
+ # force connect if connect > 0
384
+ #
385
+ def myconnect(reconnect = 0, mycnt = 0):
386
+
387
+ global mydb
388
+
389
+ if mydb and reconnect:
390
+ if mydb.is_connected(): return mydb # no need reconnect
391
+ try:
392
+ mydb.reconnect()
393
+ return mydb
394
+ except MySQL.Error as myerr:
395
+ check_dberror(myerr, mycnt+1, '', None, MYDBI['LOGACT'])
396
+ mydisconnect(1)
397
+ elif mydb:
398
+ return mydb
399
+ elif reconnect:
400
+ reconnect = 0 # initial connection
401
+
402
+ if(MYDBI['DBSHOST'] != PgLOG.PGLOG['HOSTNAME'] and MYDBI['VWHOST'] and
403
+ MYDBI['VWHOME'] and MYDBI['DBSHOST'] == MYDBI['VWSHOST'] and MYDBI['DBNAME'] == MYDBI['VWNAME']):
404
+ slave = "/{}/{}.slave".format(MYDBI['VWHOME'], MYDBI['VWHOST'])
405
+ if not op.exists(slave): set_dbname(None, None, None, MYDBI['DEFHOST'], MYDBI['DEFPORT'])
406
+
407
+ while True:
408
+ config = {'database' : MYDBI['DBNAME'],
409
+ 'user' : MYDBI['LNNAME'],
410
+ 'auth_plugin' : 'mysql_native_password',
411
+ 'password' : '******'}
412
+ if MYDBI['DBSHOST'] == PgLOG.PGLOG['HOSTNAME']:
413
+ if not MYDBI['DBSOCK']: MYDBI['DBSOCK'] = get_dbsock(MYDBI['DBNAME'])
414
+ config['host'] = 'localhost'
415
+ config['unix_socket'] = MYDBI['DBSOCK']
416
+ else:
417
+ if not MYDBI['DBPORT']: MYDBI['DBPORT'] = get_dbport(MYDBI['DBNAME'])
418
+ config['host'] = MYDBI['DBHOST'] if MYDBI['DBHOST'] else MYDBI['CDHOST']
419
+ if MYDBI['DBPORT']: config['port'] = MYDBI['DBPORT']
420
+
421
+ sqlstr = "MySQL.connect(**{})".format(config)
422
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, sqlstr)
423
+
424
+ config['password'] = MYDBI['PWNAME']
425
+ config['autocommit'] = True
426
+ try:
427
+ PgLOG.PGLOG['MYDBBUF'] = mydb = MySQL.connect(**config)
428
+ if reconnect: PgLOG.pglog("{} Reconnected at {}".format(sqlstr, PgLOG.current_datetime()), PgLOG.MSGLOG|PgLOG.FRCLOG)
429
+ return mydb
430
+ except MySQL.Error as myerr:
431
+ if not check_dberror(myerr, mycnt, sqlstr, None, MYDBI['LOGACT']|PgLOG.EXITLG): return PgLOG.FAILURE
432
+ mycnt += 1
433
+
434
+
435
+ #
436
+ # return a MySQL cursor upon success
437
+ #
438
+ def mycursor():
439
+
440
+ mycur = None
441
+
442
+ if not mydb:
443
+ myconnect()
444
+ if not mydb: return PgLOG.FAILURE
445
+
446
+ mycnt = 0
447
+ while True:
448
+ try:
449
+ mycur = mydb.cursor()
450
+ except MySQL.Error as myerr:
451
+ if mycnt == 0 and not mydb.is_connected():
452
+ myconnect(1)
453
+ elif not check_dberror(myerr, mycnt, '', None, MYDBI['LOGACT']|PgLOG.EXITLG):
454
+ return PgLOG.FAILURE
455
+ else:
456
+ break
457
+ mycnt += 1
458
+
459
+ return mycur
460
+
461
+ #
462
+ # disconnect to dssdb database
463
+ #
464
+ def mydisconnect(stopit = 1):
465
+
466
+ global mydb
467
+ if mydb:
468
+ if stopit: mydb.close()
469
+ PgLOG.PGLOG['MYDBBUF'] = mydb = None
470
+
471
+ #
472
+ # decode mysql byte fields for multiple records
473
+ #
474
+ def decode_byte_records(flds, myrecs):
475
+
476
+ cnt = len(myrecs[flds[0]])
477
+ for fld in flds:
478
+ if fld in myrecs:
479
+ for i in range(cnt):
480
+ val = myrecs[fld][i]
481
+ if hasattr(val, 'decode'): myrecs[fld][i] = val.decode()
482
+
483
+ #
484
+ # decode mysql byte fields for single record
485
+ #
486
+ def decode_byte_record(flds, myrec):
487
+
488
+ for fld in flds:
489
+ if fld in myrec:
490
+ val = myrec[fld]
491
+ if hasattr(val, 'decode'): myrec[fld] = val.decode()
492
+
493
+ #
494
+ # gather table field default information as hash array with field names as keys
495
+ # and default values as values
496
+ # the whole table information is cached to a hash array with table names as keys
497
+ #
498
+ def mytable(tablename, logact = MYDBI['LOGACT']):
499
+
500
+ if tablename in TABLES: return TABLES[tablename].copy() # cached already
501
+
502
+ intms = r'^(tinyint|smallint|mediumint|bigint|int)$'
503
+ numms = r'^(\d+)$'
504
+ fields = "column_name col, data_type typ, is_nullable nil, column_default def"
505
+ condition = table_condition(tablename)
506
+ mycnt = 0
507
+ while True:
508
+ myrecs = mymget('information_schema.columns', fields, condition, logact)
509
+ cnt = len(myrecs['col']) if myrecs else 0
510
+ if cnt: break
511
+ if mycnt == 0 and logact&PgLOG.ADDTBL:
512
+ add_a_table(tablename, logact)
513
+ else:
514
+ return PgLOG.pglog(tablename + ": Table not exists", logact)
515
+ mycnt += 1
516
+
517
+ mytable = {}
518
+ decode_byte_records(['typ', 'def'], myrecs)
519
+ for i in range(cnt):
520
+ name = myrecs['col'][i]
521
+ typ = myrecs['typ'][i]
522
+ dflt = myrecs['def'][i]
523
+ isint = re.match(intms, typ)
524
+ if dflt != None:
525
+ if isint and isinstance(dflt, str) and re.match(numms, dflt):
526
+ dflt = int(dflt)
527
+ elif myrecs['nil'][i] == 'YES':
528
+ dflt = None
529
+ elif isint:
530
+ dflt = 0
531
+ else:
532
+ dflt = ''
533
+ mytable[name] = dflt
534
+
535
+ TABLES[tablename] = mytable.copy()
536
+ return mytable
537
+
538
+ #
539
+ # local fucntion: insert prepare for myadd() and mymadd()
540
+ #
541
+ def prepare_insert(tablename, fields):
542
+
543
+ strfld = ""
544
+ strplc = ""
545
+ sep = ''
546
+ for fld in fields:
547
+ strfld += sep + fld
548
+ strplc += sep + "%s"
549
+ sep = ","
550
+
551
+ sqlstr = "INSERT INTO {} ({}) VALUES ({})".format(tablename, strfld, strplc)
552
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, sqlstr)
553
+
554
+ return sqlstr
555
+
556
+
557
+ #
558
+ # local fucntion: prepare default value for single record
559
+ #
560
+ def prepare_default(tablename, record, logact = 0):
561
+
562
+ table = mytable(tablename, logact)
563
+
564
+ for fld in record:
565
+ val = record[fld]
566
+ if val is None:
567
+ vlen = 0
568
+ elif isinstance(val, str):
569
+ vlen = len(val)
570
+ else:
571
+ vlen = 1
572
+ if vlen == 0: record[fld] = table[fld]
573
+
574
+ #
575
+ # local fucntion: prepare default value for multiple records
576
+ #
577
+ def prepare_defaults(tablename, records, logact = 0):
578
+
579
+ table = mytable(tablename, logact)
580
+
581
+ for fld in records:
582
+ vals = records[fld]
583
+ vcnt = len(vals)
584
+ for i in range(vcnt):
585
+ if vals[i] is None:
586
+ vlen = 0
587
+ elif isinstance(vals[i], str):
588
+ vlen = len(vals[i])
589
+ else:
590
+ vlen = 1
591
+ if vlen == 0: records[fld][i] = table[fld]
592
+
593
+ #
594
+ # insert one record into tablename
595
+ # tablename: add record for one table name each call
596
+ # record: hash reference with keys as field names and hash values as field values
597
+ # return PgLOG.SUCCESS or PgLOG.FAILURE
598
+ #
599
+ def myadd(tablename, record, logact = 0):
600
+
601
+ global curtran
602
+ if not logact: logact = MYDBI['LOGACT']
603
+ if not record: return PgLOG.pglog("Nothing adds to " + tablename, logact)
604
+ if(logact&PgLOG.DODFLT): prepare_default(tablename, record, logact)
605
+
606
+ fields = list(record)
607
+ values = tuple(record.values())
608
+
609
+ sqlstr = prepare_insert(tablename, fields)
610
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "Insert to " + tablename + " for " + str(values))
611
+
612
+ ret = mycnt = ccnt = 0
613
+ while True:
614
+ mycur = mycursor()
615
+ if not mycur: return PgLOG.FAILURE
616
+ try:
617
+ mycur.execute(sqlstr, values)
618
+ if(logact&PgLOG.AUTOID):
619
+ ret = mycur.lastrowid
620
+ else:
621
+ ret = 1
622
+ mycur.close()
623
+ ccnt = 1
624
+ except MySQL.Error as myerr:
625
+ if not check_dberror(myerr, mycnt, sqlstr, values, logact): return PgLOG.FAILURE
626
+ else:
627
+ break
628
+ mycnt += 1
629
+
630
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "myadd: 1 record added to " + tablename + ", return " + str(ret))
631
+ if(logact&PgLOG.ENDLCK):
632
+ endtran()
633
+ elif curtran:
634
+ curtran += ccnt
635
+ if curtran > MYDBI['MTRANS']: starttran()
636
+
637
+ return ret
638
+
639
+ #
640
+ # insert multiple records into tablename
641
+ # tablename: add records for one table name each call
642
+ # records: dict with field names as keys and each value is a list of field values
643
+ # return PgLOG.SUCCESS or PgLOG.FAILURE
644
+ #
645
+ def mymadd(tablename, records, logact = 0):
646
+
647
+ global curtran
648
+ if not logact: logact = MYDBI['LOGACT']
649
+ if not records: return PgLOG.pglog("Nothing to insert to table " + tablename, logact)
650
+ if logact&PgLOG.DODFLT: prepare_defaults(tablename, records, logact)
651
+
652
+ fields = list(records)
653
+ v = records.values()
654
+ values = tuple(zip(*v))
655
+ cntrow = len(values)
656
+ ids = [] if logact&PgLOG.AUTOID else None
657
+
658
+ sqlstr = prepare_insert(tablename, fields)
659
+ if PgLOG.PGLOG['DBGLEVEL']:
660
+ for row in values: PgLOG.mydbg(1000, "Insert: " + str(row))
661
+
662
+ count = mycnt = 0
663
+ while True:
664
+ mycur = mycursor()
665
+ if not mycur: return PgLOG.FAILURE
666
+ if ids is None:
667
+ while count < cntrow:
668
+ ncount = count + MYDBI['MTRANS']
669
+ if ncount > cntrow: ncount = cntrow
670
+ try:
671
+ mycur.executemany(sqlstr, values[count:ncount])
672
+ count = ncount
673
+ except MySQL.Error as myerr:
674
+ if not check_dberror(myerr, mycnt, sqlstr, values[count], logact): return PgLOG.FAILURE
675
+ break
676
+ else:
677
+ while count < cntrow:
678
+ record = values[count]
679
+ try:
680
+ mycur.execute(sqlstr, record)
681
+ ids.append(mycur.lastrowid)
682
+ count += 1
683
+ except MySQL.Error as myerr:
684
+ if not check_dberror(myerr, mycnt, sqlstr, record, logact): return PgLOG.FAILURE
685
+ break
686
+ if count >= cntrow: break
687
+ mycnt += 1
688
+
689
+ mycur.close()
690
+ if(PgLOG.PGLOG['DBGLEVEL']): PgLOG.mydbg(1000, "mymadd: {} of {} record(s) added to {}".format(count, cntrow, tablename))
691
+
692
+ if(logact&PgLOG.ENDLCK):
693
+ endtran()
694
+ elif curtran:
695
+ curtran += count
696
+ if curtran > MYDBI['MTRANS']: starttran()
697
+
698
+ return (ids if ids else count)
699
+
700
+ #
701
+ # local function: select prepare for myget() and mymget()
702
+ #
703
+ def prepare_select(tablenames, fields = None, condition = None, logact = 0):
704
+
705
+ sqlstr = ''
706
+ if tablenames:
707
+ if logact&PgLOG.DOLOCK:
708
+ starttran()
709
+ if condition:
710
+ condition += " FOR UPDATE"
711
+ else:
712
+ condition = "FOR UPDATE"
713
+ if fields:
714
+ sqlstr = "SELECT " + fields
715
+ else:
716
+ sqlstr = "SELECT count(*) cntrec"
717
+
718
+ sqlstr += " FROM " + tablenames
719
+ if condition:
720
+ if re.match(r'^\s*(ORDER|GROUP|HAVING|OFFSET|LIMIT)\s', condition, re.I):
721
+ sqlstr += " " + condition # no where clause, append directly
722
+ else:
723
+ sqlstr += " WHERE " + condition
724
+
725
+ elif fields:
726
+ sqlstr = "SELECT " + fields
727
+ elif condition:
728
+ sqlstr = condition
729
+
730
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, sqlstr)
731
+
732
+ return sqlstr
733
+
734
+ #
735
+ # tablenames: comma deliminated string of one or more tables and more than one table for joining,
736
+ # fields: comma deliminated string of one or more field names,
737
+ # condition: querry conditions for where clause
738
+ # return a dict reference with keys as field names upon success
739
+ #
740
+ def myget(tablenames, fields, condition = None, logact = 0):
741
+
742
+ if not logact: logact = MYDBI['LOGACT']
743
+ if fields and condition and not re.search(r'limit 1$', condition, re.I): condition += " LIMIT 1"
744
+ sqlstr = prepare_select(tablenames, fields, condition, logact)
745
+ ucname = True if logact&PgLOG.UCNAME else False
746
+ mycnt = 0
747
+ while True:
748
+ mycur = mycursor()
749
+ if not mycur: return PgLOG.FAILURE
750
+ try:
751
+ mycur.execute(sqlstr)
752
+ v = mycur.fetchone()
753
+ if v:
754
+ if ucname:
755
+ c = [col.upper() for col in mycur.column_names]
756
+ else:
757
+ c = mycur.column_names
758
+ record = dict(zip(c,v))
759
+ if tablenames in MYSTRS: decode_byte_record(MYSTRS[tablenames], record)
760
+ else:
761
+ record = None
762
+ mycur.close()
763
+ except MySQL.Error as myerr:
764
+ if not check_dberror(myerr, mycnt, sqlstr, None, logact): return PgLOG.FAILURE
765
+ else:
766
+ break
767
+ mycnt += 1
768
+
769
+ if record and tablenames and not fields:
770
+ if PgLOG.PGLOG['DBGLEVEL']:
771
+ PgLOG.mydbg(1000, "myget: {} record(s) found from {}".format(record['cntrec'], tablenames))
772
+ return record['cntrec']
773
+ elif PgLOG.PGLOG['DBGLEVEL']:
774
+ cnt = 1 if record else 0
775
+ PgLOG.mydbg(1000, "myget: {} record retrieved from {}".format(cnt, tablenames))
776
+
777
+ return record
778
+
779
+ #
780
+ # tablenames: comma deliminated string of one or more tables and more than one table for joining,
781
+ # fields: comma deliminated string of one or more field names,
782
+ # condition: querry conditions for where clause
783
+ # return a two dimension array reference with field names and values upon success
784
+ #
785
+ def myaget(tablenames, fields, condition = None, logact = 0):
786
+
787
+ if not logact: logact = MYDBI['LOGACT']
788
+ if fields and condition and not re.search(r'limit 1$', condition, re.I): condition += " LIMIT 1"
789
+ sqlstr = prepare_select(tablenames, fields, condition, logact)
790
+ ucname = True if logact&PgLOG.UCNAME else False
791
+ mycnt = 0
792
+ while True:
793
+ mycur = mycursor()
794
+ if not mycur: return PgLOG.FAILURE
795
+ try:
796
+ mycur.execute(sqlstr)
797
+ v = mycur.fetchone()
798
+ if v:
799
+ if ucname:
800
+ c = [col.upper() for col in mycur.column_names]
801
+ else:
802
+ c = mycur.column_names
803
+ record = [c, v]
804
+ else:
805
+ record = None
806
+ mycur.close()
807
+ except MySQL.Error as myerr:
808
+ if not check_dberror(myerr, mycnt, sqlstr, None, logact): return PgLOG.FAILURE
809
+ else:
810
+ break
811
+ mycnt += 1
812
+
813
+ if PgLOG.PGLOG['DBGLEVEL']:
814
+ cnt = 1 if record else 0
815
+ PgLOG.mydbg(1000, "myget: {} record retrieved from {}".format(cnt, tablenames))
816
+
817
+ return record
818
+
819
+ #
820
+ # tablenames: comma deliminated string of one or more tables and more than one table for joining,
821
+ # fields: comma deliminated string of one or more field names,
822
+ # condition: querry conditions for where clause
823
+ # return a dict reference with keys as field names upon success, values for each field name
824
+ # are in a list. All lists are the same length with missing values set to None
825
+ #
826
+ def mymget(tablenames, fields, condition, logact = 0):
827
+
828
+ if not logact: logact = MYDBI['LOGACT']
829
+ if isinstance(condition, dict): return myhget(tablenames, fields, condition, logact)
830
+ sqlstr = prepare_select(tablenames, fields, condition, logact)
831
+ ucname = True if logact&PgLOG.UCNAME else False
832
+ count = mycnt = 0
833
+ while True:
834
+ mycur = mycursor()
835
+ if not mycur: return PgLOG.FAILURE
836
+ try:
837
+ mycur.execute(sqlstr)
838
+ rows = mycur.fetchall()
839
+ if rows:
840
+ if ucname:
841
+ cols = [col.upper() for col in mycur.column_names]
842
+ else:
843
+ cols = mycur.column_names
844
+ ccnt = len(cols)
845
+ values = list(zip(*rows))
846
+ records = {}
847
+ for i in range(ccnt):
848
+ records[cols[i]] = list(values[i])
849
+ if tablenames in MYSTRS: decode_byte_records(MYSTRS[tablenames], records)
850
+
851
+ else:
852
+ records = None
853
+ mycur.close()
854
+ except MySQL.Error as myerr:
855
+ if not check_dberror(myerr, mycnt, sqlstr, None, logact): return PgLOG.FAILURE
856
+ else:
857
+ break
858
+ mycnt += 1
859
+
860
+ if PgLOG.PGLOG['DBGLEVEL']:
861
+ count = len(records[cols[0]]) if records else 0
862
+ PgLOG.mydbg(1000, "mymget: {} record(s) retrieved from {}".format(count, tablenames))
863
+
864
+ return records
865
+
866
+ #
867
+ # local function: select prepare for myhget()
868
+ #
869
+ def prepare_hash_select(tablenames, fields, cndstr, cndflds):
870
+
871
+ # build condition string
872
+ for fld in cndflds:
873
+ if cndstr:
874
+ cndstr += " AND {}=%s".format(fld)
875
+ else:
876
+ cndstr = fld + "=%s"
877
+
878
+ sqlstr = "SELECT {} FROM {} WHERE {}".format(fields, tablenames, cndstr)
879
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, sqlstr)
880
+
881
+ return sqlstr
882
+
883
+ #
884
+ # tablenames: comma deliminated string of one or more tables
885
+ # fields: comma deliminated string of one or more field names,
886
+ # cndstr: string query condition for where clause
887
+ # cnddict: condition values, dict with field names : value lists
888
+ # return a dict(field names : value lists) upon success
889
+ #
890
+ # retrieve multiple records from tablenames one for each row condition in condition dict
891
+ #
892
+ def myhget(tablenames, fields, cndstr, cnddict, logact = 0):
893
+
894
+ if not logact: logact = MYDBI['LOGACT']
895
+ if not tablenames: return PgLOG.pglog("Miss Table name to query", logact)
896
+ if not fields: return PgLOG.pglog("Nothing to query " + tablenames, logact)
897
+ if not cnddict: return PgLOG.pglog("Miss condition dict values to query " + tablenames, logact)
898
+ ucname = True if logact&PgLOG.UCNAME else False
899
+
900
+ cndflds = list(cnddict)
901
+ v = cnddict.values()
902
+ values = tuple(zip(*v))
903
+ cntval = len(values)
904
+
905
+ sqlstr = prepare_hash_select(tablenames, fields, cndstr, cndflds)
906
+ if PgLOG.PGLOG['DBGLEVEL']:
907
+ for row in values:
908
+ PgLOG.mydbg(1000, "Query from " + tablenames + " for Condition values: " + str(row))
909
+
910
+ count = mycnt = 0
911
+ while True:
912
+ mycur = mycursor()
913
+ if not mycur: return PgLOG.FAILURE
914
+ try:
915
+ mycur.executemany(sqlstr, values)
916
+ rows = mycur.fetchall()
917
+ if rows:
918
+ if ucname:
919
+ cols = [col.upper() for col in mycur.column_names]
920
+ else:
921
+ cols = mycur.column_names
922
+ ccnt = len(cols)
923
+ values = list(zip(*rows))
924
+ records = {}
925
+ for i in range(ccnt):
926
+ records[cols[i]] = list(values[i])
927
+ if tablenames in MYSTRS: decode_byte_record(MYSTRS[tablenames], records)
928
+ else:
929
+ records = None
930
+ mycur.close()
931
+ except MySQL.Error as myerr:
932
+ if not check_dberror(myerr, mycnt, sqlstr, values[0], logact): return PgLOG.FAILURE
933
+ else:
934
+ break
935
+ mycnt += 1
936
+
937
+ if PgLOG.PGLOG['DBGLEVEL']:
938
+ count = len(records[cols[0]]) if records else 0
939
+ PgLOG.mydbg(1000, "myhget: {} record(s) retrieved from {}".format(count, tablenames))
940
+
941
+ return records
942
+
943
+ #
944
+ # local fucntion: update prepare for mymupdt
945
+ #
946
+ def prepare_update(tablename, fields, cndstr, cndflds = None):
947
+
948
+ strset = []
949
+ # build set string
950
+ for fld in fields:
951
+ strset.append("{}=%s".format(fld))
952
+ strset = ",".join(strset)
953
+
954
+ # build condition string
955
+ if not cndstr:
956
+ cndstr = []
957
+ try:
958
+ for fld in cndflds:
959
+ cndstr.append("{}=%s".format(fld))
960
+ cndstr = " AND ".join(cndstr)
961
+ except NameError as e:
962
+ PgLOG.pglog("[prepare_update] NameError: {}".format(e), PgLOG.LGEREX)
963
+
964
+ sqlstr = "UPDATE {} SET {} WHERE {}".format(tablename, strset, cndstr)
965
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, sqlstr)
966
+
967
+ return sqlstr
968
+
969
+ #
970
+ # update one or multiple rows in tablename
971
+ # tablename: update for one table name each call
972
+ # record: dict with field names : values
973
+ # condition: update conditions for where clause)
974
+ # return number of rows undated upon success
975
+ #
976
+ def myupdt(tablename, record, condition, logact = 0):
977
+
978
+ global curtran
979
+ if not logact: logact = MYDBI['LOGACT']
980
+ if not record: PgLOG.pglog("Nothing updates to " + tablename, logact)
981
+ if not condition or isinstance(condition, int): PgLOG.pglog("Miss condition to update " + tablename, logact)
982
+ if logact&PgLOG.DODFLT: prepare_default(tablename, record, logact)
983
+
984
+ fields = list(record)
985
+ values = tuple(record.values())
986
+
987
+ sqlstr = prepare_update(tablename, fields, condition)
988
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "Update {} for {}".format(tablename, values))
989
+
990
+ ret = mycnt = 0
991
+ while True:
992
+ mycur = mycursor()
993
+ if not mycur: return PgLOG.FAILURE
994
+ try:
995
+ mycur.execute(sqlstr, values)
996
+ ret = mycur.rowcount
997
+ mycur.close()
998
+ except MySQL.Error as myerr:
999
+ if not check_dberror(myerr, mycnt, sqlstr, values, logact): return PgLOG.FAILURE
1000
+ else:
1001
+ break
1002
+ mycnt += 1
1003
+
1004
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "myupdt: {} record(s) updated to {}".format(ret, tablename))
1005
+ if(logact&PgLOG.ENDLCK):
1006
+ endtran()
1007
+ elif curtran:
1008
+ curtran += ret
1009
+ if curtran > MYDBI['MTRANS']: starttran()
1010
+
1011
+ return ret
1012
+
1013
+ #
1014
+ # update multiple records in tablename
1015
+ # tablename: update for one table name each call
1016
+ # records: update values, dict with field names : value lists
1017
+ # cnddict: condition values, dict with field names : value lists
1018
+ # return number of records updated upon success
1019
+ #
1020
+ def mymupdt(tablename, records, condhash, logact = 0):
1021
+
1022
+ global curtran
1023
+ if not logact: logact = MYDBI['LOGACT']
1024
+ if not records: PgLOG.pglog("Nothing updates to " + tablename, logact)
1025
+ if not condhash or isinstance(condhash, int): PgLOG.pglog("Miss condition to update to " + tablename, logact)
1026
+ if logact&PgLOG.DODFLT: prepare_defaults(tablename, records, logact)
1027
+
1028
+ fields = list(records)
1029
+ fldvals = tuple(records.values())
1030
+ cntrow = len(fldvals[0])
1031
+ cndflds = list(condhash)
1032
+ cndvals = tuple(condhash.values())
1033
+ count = len(cndvals[0])
1034
+ if count != cntrow: return PgLOG.pglog("Field/Condition value counts Miss match {}/{} to update {}".format(cntrow, count, tablename), logact)
1035
+ v = fldvals + cndvals
1036
+ values = tuple(zip(*v))
1037
+
1038
+ sqlstr = prepare_update(tablename, fields, None, condhash)
1039
+ if PgLOG.PGLOG['DBGLEVEL']:
1040
+ for row in values: PgLOG.mydbg(1000, "Update {} for {}".format(tablename, row))
1041
+
1042
+ count = mycnt = 0
1043
+ while True:
1044
+ mycur = mycursor()
1045
+ if not mycur: return PgLOG.FAILURE
1046
+ while count < cntrow:
1047
+ ncount = count + MYDBI['MTRANS']
1048
+ if ncount > cntrow: ncount = cntrow
1049
+ try:
1050
+ mycur.executemany(sqlstr, values[count:ncount])
1051
+ count = ncount
1052
+ except MySQL.Error as myerr:
1053
+ if not check_dberror(myerr, mycnt, sqlstr, values[0], logact): return PgLOG.FAILURE
1054
+ break
1055
+ if count >= cntrow: break
1056
+ mycnt += 1
1057
+
1058
+ mycur.close()
1059
+
1060
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "mymupdt: {}/{} record(s) updated to {}".format(count, cntrow, tablename))
1061
+ if(logact&PgLOG.ENDLCK):
1062
+ endtran()
1063
+ elif curtran:
1064
+ curtran += count
1065
+ if curtran > MYDBI['MTRANS']: starttran()
1066
+
1067
+ return count
1068
+
1069
+ #
1070
+ # delete one or mutiple records in tablename according condition
1071
+ # tablename: delete for one table name each call
1072
+ # condition: delete conditions for where clause
1073
+ # return number of records deleted upon success
1074
+ #
1075
+ def mydel(tablename, condition, logact = 0):
1076
+
1077
+ global curtran
1078
+ if not logact: logact = MYDBI['LOGACT']
1079
+ if not condition: PgLOG.pglog("Miss condition to delete from " + tablename, logact)
1080
+
1081
+ sqlstr = "DELETE FROM {} WHERE {}".format(tablename, condition)
1082
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(100, sqlstr)
1083
+
1084
+ ret = mycnt = 0
1085
+ while True:
1086
+ mycur = mycursor()
1087
+ if not mycur: return PgLOG.FAILURE
1088
+ try:
1089
+ mycur.execute(sqlstr)
1090
+ ret = mycur.rowcount
1091
+ mycur.close()
1092
+ except MySQL.Error as myerr:
1093
+ if not check_dberror(myerr, mycnt, sqlstr, None, logact): return PgLOG.FAILURE
1094
+ else:
1095
+ break
1096
+ mycnt += 1
1097
+
1098
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "mydel: {} record(s) deleted from {}".format(ret, tablename))
1099
+ if logact&PgLOG.ENDLCK:
1100
+ endtran()
1101
+ elif curtran:
1102
+ curtran += ret
1103
+ if curtran > MYDBI['MTRANS']: starttran()
1104
+
1105
+ return ret
1106
+
1107
+ #
1108
+ # sqlstr: a complete sql string
1109
+ # return number of record affected upon success
1110
+ #
1111
+ def myexec(sqlstr, logact = 0):
1112
+
1113
+ global curtran
1114
+ if not logact: logact = MYDBI['LOGACT']
1115
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(100, sqlstr)
1116
+
1117
+ ret = mycnt = 0
1118
+ while True:
1119
+ mycur = mycursor()
1120
+ if not mycur: return PgLOG.FAILURE
1121
+ try:
1122
+ mycur.execute(sqlstr)
1123
+ ret = mycur.rowcount
1124
+ mycur.close()
1125
+ except MySQL.Error as myerr:
1126
+ if not check_dberror(myerr, mycnt, sqlstr, None, logact): return PgLOG.FAILURE
1127
+ else:
1128
+ break
1129
+ mycnt += 1
1130
+
1131
+ if PgLOG.PGLOG['DBGLEVEL']: PgLOG.mydbg(1000, "myexec: {} record(s) affected for {}".format(ret, sqlstr))
1132
+ if logact&PgLOG.ENDLCK:
1133
+ endtran()
1134
+ elif curtran:
1135
+ curtran += ret
1136
+ if curtran > MYDBI['MTRANS']: starttran()
1137
+
1138
+ return ret
1139
+
1140
+ #
1141
+ # tablename: one table name to a temporary table
1142
+ # fromtable: table name data gathing from
1143
+ # fields: table name data gathing from
1144
+ # condition: querry conditions for where clause
1145
+ # return number of records created upon success
1146
+ #
1147
+ def mytemp(tablename, fromtable, fields, condition = None, logact = 0):
1148
+
1149
+ sqlstr = "CREATE TEMPORARY TABLE {} SELECT {} FROM {}".format(tablename, fields, fromtable)
1150
+ if condition: sqlstr += " WHERE " + condition
1151
+
1152
+ return myexec(sqlstr, logact)
1153
+
1154
+ #
1155
+ # get condition for given table name for accessing information_schema
1156
+ #
1157
+ def table_condition(tablename):
1158
+
1159
+ ms = re.match(r'(.+)\.(.+)', tablename)
1160
+ if ms:
1161
+ dbname = ms.group(1)
1162
+ tbname = ms.group(2)
1163
+ else:
1164
+ dbname = MYDBI['DBNAME']
1165
+ tbname = tablename
1166
+
1167
+ return "TABLE_NAME = '{}' AND TABLE_SCHEMA = '{}'".format(tbname, dbname)
1168
+
1169
+ #
1170
+ # check if a given table name exists or not
1171
+ # tablename: one table name to check
1172
+ #
1173
+ def mycheck(tablename, logact = 0):
1174
+
1175
+ condition = table_condition(tablename)
1176
+
1177
+ ret = myget('information_schema.tables', None, condition, logact)
1178
+ return (PgLOG.SUCCESS if ret else PgLOG.FAILURE)
1179
+
1180
+ #
1181
+ # group of functions to check parent records and add an empty one if missed
1182
+ # return user.uid upon success, 0 otherwise
1183
+ #
1184
+ def check_user_uid(userno, date = None):
1185
+
1186
+ if not userno: return 0
1187
+ if type(userno) is str: userno = int(userno)
1188
+
1189
+ if date is None:
1190
+ datecond = "until_date IS NULL"
1191
+ date = 'today'
1192
+ else:
1193
+ datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
1194
+
1195
+ myrec = myget("user", "uid", "userno = {} AND {}".format(userno, datecond), MYDBI['LOGACT'])
1196
+ if myrec: return myrec['uid']
1197
+
1198
+ if userno not in NMISSES:
1199
+ PgLOG.pglog("{}: Scientist ID NOT on file for {}".format(userno, date), PgLOG.LGWNEM)
1200
+ NMISSES.append(userno)
1201
+
1202
+ # check again if a user is on file with different date range
1203
+ myrec = myget("user", "uid", "userno = {}".format(userno), MYDBI['LOGACT'])
1204
+ if myrec: return myrec['uid']
1205
+
1206
+ myrec = ucar_user_info(userno)
1207
+ if not myrec: myrec = {'userno' : userno, 'stat_flag' : 'M'}
1208
+ uid = myadd("user", myrec, (MYDBI['LOGACT']|PgLOG.EXITLG|PgLOG.AUTOID))
1209
+ if uid: PgLOG.pglog("{}: Scientist ID Added as user.uid = {}".format(userno, uid), PgLOG.LGWNEM)
1210
+
1211
+ return uid
1212
+
1213
+ # return user.uid upon success, 0 otherwise
1214
+ def get_user_uid(logname, date = None):
1215
+
1216
+ if not logname: return 0
1217
+ if not date:
1218
+ date = 'today'
1219
+ datecond = "until_date IS NULL"
1220
+ else:
1221
+ datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
1222
+
1223
+ myrec = myget("user", "uid", "logname = '{}' AND {}".format(logname, datecond), MYDBI['LOGACT'])
1224
+ if myrec: return myrec['uid']
1225
+
1226
+ if logname not in LMISSES:
1227
+ PgLOG.pglog("{}: UCAR Login Name NOT on file for {}".format(logname, date), PgLOG.LGWNEM)
1228
+ LMISSES.append(logname)
1229
+
1230
+ # check again if a user is on file with different date range
1231
+ myrec = myget("user", "uid", "logname = '{}'".format(logname), MYDBI['LOGACT'])
1232
+ if myrec: return myrec['uid']
1233
+
1234
+ myrec = ucar_user_info(0, logname)
1235
+ if not myrec: myrec = {'logname' : logname, 'stat_flag' : 'M'}
1236
+ uid = myadd("user", myrec, (MYDBI['LOGACT']|PgLOG.EXITLG|PgLOG.AUTOID))
1237
+ if uid: PgLOG.pglog("{}: UCAR Login Name Added as user.uid = {}".format(logname, uid), PgLOG.LGWNEM)
1238
+
1239
+ return uid
1240
+
1241
+ #
1242
+ # get ucar user info for given userno (scientist number) or logname (Ucar login)
1243
+ #
1244
+ def ucar_user_info(userno, logname = None):
1245
+
1246
+ MATCH = {
1247
+ 'upid' : "upid",
1248
+ 'uid' : "userno",
1249
+ 'username' : "logname",
1250
+ 'lastName' : "lstname",
1251
+ 'firstName' : "fstname",
1252
+ 'active' : "stat_flag",
1253
+ 'internalOrg' : "division",
1254
+ 'externalOrg' : "org_name",
1255
+ 'country' : "country",
1256
+ 'forwardEmail' : "email",
1257
+ 'email' : "ucaremail",
1258
+ 'phone' : "phoneno"
1259
+ }
1260
+
1261
+ buf = PgLOG.pgsystem("pgperson " + ("-uid {}".format(userno) if userno else "-username {}".format(logname)), PgLOG.LOGWRN, 20)
1262
+ if not buf: return None
1263
+
1264
+ myrec = {}
1265
+ for line in buf.split('\n'):
1266
+ ms = re.match(r'^(.+)<=>(.*)$', line)
1267
+ if ms:
1268
+ (key, val) = ms.groups()
1269
+ if key in MATCH:
1270
+ if key == 'upid' and myrec: break # get one record only
1271
+ myrec[MATCH[key]] = val
1272
+
1273
+ if not myrec: return None
1274
+
1275
+ if userno:
1276
+ myrec['userno'] = userno
1277
+ elif myrec['userno']:
1278
+ myrec['userno'] = userno = int(myrec['userno'])
1279
+ if myrec['upid']: myrec['upid'] = int(myrec['upid'])
1280
+ if myrec['stat_flag']: myrec['stat_flag'] = 'A' if myrec['stat_flag'] == '1' else 'C'
1281
+ if myrec['email'] and re.search(r'\.ucar\.edu$', myrec['email']. re.I):
1282
+ myrec['email'] = myrec['ucaremail']
1283
+ myrec['country'] = set_country_code(myrec['email'], myrec['country'])
1284
+ if myrec['division']:
1285
+ val = "NCAR"
1286
+ else:
1287
+ val = None
1288
+ myrec['org_type'] = get_org_type(val, myrec['email'])
1289
+
1290
+ buf = PgLOG.pgsystem("pgusername {}".format(myrec[logname]), PgLOG.LOGWRN, 20)
1291
+ if not buf: return myrec
1292
+
1293
+ for line in buf.split('\n'):
1294
+ ms = re.match(r'^(.+)<=>(.*)$', line)
1295
+ if ms:
1296
+ (key, val) = ms.groups()
1297
+ if key == 'startDate':
1298
+ m = re.match(r'^(\d+-\d+-\d+)\s', val)
1299
+ if m:
1300
+ myrec['start_date'] = m.group(1)
1301
+ else:
1302
+ myrec['start_date'] = val
1303
+
1304
+ if key == 'endDate':
1305
+ m = re.match(r'^(\d+-\d+-\d+)\s', val)
1306
+ if m:
1307
+ myrec['until_date'] = m.group(1)
1308
+ else:
1309
+ myrec['until_date'] = val
1310
+
1311
+ return myrec
1312
+
1313
+ #
1314
+ # set country code for given coutry name or email address
1315
+ #
1316
+ def set_country_code(email, country = None):
1317
+
1318
+ codes = {
1319
+ 'CHINA' : "P.R.CHINA",
1320
+ 'ENGLAND' : "UNITED.KINGDOM",
1321
+ 'FR' : "FRANCE",
1322
+ 'KOREA' : "SOUTH.KOREA",
1323
+ 'USSR' : "RUSSIA",
1324
+ 'US' : "UNITED.STATES",
1325
+ 'U.S.A.' : "UNITED.STATES"
1326
+ }
1327
+
1328
+ if country:
1329
+ country = country.upper()
1330
+ ms = re.match(r'^(\w+)\s(\w+)$', country)
1331
+ if ms:
1332
+ country = ms.group(1) + '.' + ms.group(2)
1333
+ elif country in codes:
1334
+ country = codes[country]
1335
+ else:
1336
+ country = email_to_country(email)
1337
+
1338
+ return country
1339
+
1340
+ # return wuser.wuid upon success, 0 otherwise
1341
+ def check_wuser_wuid(email, date = None):
1342
+
1343
+ if not email: return 0
1344
+ emcond = "email = '{}'".format(email)
1345
+ if not date:
1346
+ date = 'today'
1347
+ datecond = "until_date IS NULL"
1348
+ else:
1349
+ datecond = "(start_date IS NULL OR start_date <= '{}') AND (until_date IS NULL OR until_date >= '{}')".format(date, date)
1350
+
1351
+ myrec = myget("wuser", "wuid", "{} AND {}".format(emcond, datecond), MYDBI['LOGACT'])
1352
+ if myrec: return myrec['wuid']
1353
+
1354
+ # check again if a user is on file with different date range
1355
+ myrec = myget("wuser", "wuid", emcond, PgLOG.LOGERR)
1356
+ if myrec: return myrec['wuid']
1357
+
1358
+ # now add one in
1359
+ record = {'email' : email}
1360
+ # check again if a ruser is on file
1361
+ myrec = myget("ruser", "*", emcond + " AND end_date IS NULL", MYDBI['LOGACT'])
1362
+ if not myrec: myrec = myget("ruser", "*", emcond, MYDBI['LOGACT'])
1363
+
1364
+ if myrec:
1365
+ record['ruid'] = myrec['id']
1366
+ record['fstname'] = myrec['fname']
1367
+ record['lstname'] = myrec['lname']
1368
+ record['country'] = myrec['country']
1369
+ record['org_type'] = get_org_type(myrec['org_type'], myrec['email'])
1370
+ record['start_date'] = str(myrec['rdate'])
1371
+ if myrec['end_date']:
1372
+ record['until_date'] = str(myrec['end_date'])
1373
+ record['stat_flag'] = 'C'
1374
+ else:
1375
+ record['stat_flag'] = 'A'
1376
+
1377
+ if myrec['title']: record['utitle'] = myrec['title']
1378
+ if myrec['mname']: record['midinit'] = myrec['mname'][0]
1379
+ if myrec['org']: record['org_name'] = myrec['org']
1380
+ else:
1381
+ record['stat_flag'] = 'M'
1382
+ record['org_type'] = get_org_type('', email)
1383
+ record['country'] = email_to_country(email)
1384
+
1385
+ wuid = myadd("wuser", record, PgLOG.LOGERR|PgLOG.AUTOID)
1386
+ if wuid:
1387
+ if myrec:
1388
+ PgLOG.pglog("{}({}, {}) Added as wuid({})".format(email, myrec['lname'], myrec['fname'], wuid), PgLOG.LGWNEM)
1389
+ else:
1390
+ PgLOG.pglog("{} Added as wuid({})".format(email, wuid), PgLOG.LGWNEM)
1391
+ return wuid
1392
+
1393
+ return 0
1394
+
1395
+ #
1396
+ # for given email to get long country name
1397
+ #
1398
+ def email_to_country(email):
1399
+
1400
+ ms = re.search(r'\.(\w\w)$', email)
1401
+ if ms:
1402
+ myrec = myget("countries", "token", "domain_id = '{}'".format(ms.group(1)), MYDBI['LOGACT']|PgLOG.EXITLG)
1403
+ if myrec: return myrec['token']
1404
+ elif re.search(r'\.(gov|edu|mil|org|com|net)$', email):
1405
+ return "UNITED.STATES"
1406
+ else:
1407
+ return "UNKNOWN"
1408
+
1409
+ #
1410
+ # check wfile recursively to find the matching record
1411
+ #
1412
+ def check_wfile_recursive(wfile, dscond):
1413
+
1414
+ myrec = myget("wfile", "*", "{} = '{}'".format(dscond, wfile), MYDBI['LOGACT'])
1415
+ if not myrec:
1416
+ if wfile.find('/') > -1:
1417
+ myrec = check_wfile_recursive(op.basename(wfile), dscond)
1418
+ else:
1419
+ myrec = myget("wfile", "*", "{} LIKE '%{}'".format(dscond, wfile), MYDBI['LOGATC'])
1420
+
1421
+ return myrec
1422
+
1423
+ #
1424
+ # if filelists is published for given dataset, reset it to 'P'
1425
+ #
1426
+ def reset_rdadb_version(dsid):
1427
+
1428
+ myexec("UPDATE dataset SET version = version + 1 WHERE dsid = '{}'".format(dsid), MYDBI['LOGACT'])
1429
+
1430
+ #
1431
+ # check the use rdadb flag in table dataset for a given dataset and given values
1432
+ #
1433
+ def use_rdadb(dsid, logact = 0, vals = None):
1434
+
1435
+ ret = '' # default to empty in case dataset not in RDADB
1436
+ if dsid:
1437
+ myrec = myget("dataset", "use_rdadb", "dsid = '{}'".format(dsid), MYDBI['LOGACT']|PgLOG.EXITLG)
1438
+ if myrec:
1439
+ ret = 'N' # default to 'N' if dataset record in RDADB already
1440
+ if myrec['use_rdadb']:
1441
+ if not vals: vals = "IPYMW" # default to Internal; Publishable; Yes RDADB
1442
+ if vals.find(myrec['use_rdadb']) > -1:
1443
+ ret = myrec['use_rdadb']
1444
+ elif logact:
1445
+ PgLOG.pglog("Dataset '{}' is not in RDADB!".format(dsid), logact)
1446
+
1447
+ return ret
1448
+
1449
+ #
1450
+ # fld: field name for querry condition
1451
+ # vals: reference to aaray of values
1452
+ # isstr: 1 for string values requires quotes and support wildcard
1453
+ # noand: 1 for skiping the leading ' AND ' for condition
1454
+ # return a condition string for a given field
1455
+ #
1456
+ def get_field_condition(fld, vals, isstr = 0, noand = 0):
1457
+
1458
+ cnd = wcnd = negative = ''
1459
+ sign = "="
1460
+ logic = " OR "
1461
+ count = len(vals) if vals else 0
1462
+ if count == 0: return ''
1463
+ ncnt = scnt = wcnt = cnt = 0
1464
+ for i in range(count):
1465
+ val = vals[i]
1466
+ if val is None or (i > 0 and val == vals[i-1]): continue
1467
+ if i == 0 and val == MYSIGNS[0]:
1468
+ negative = "NOT "
1469
+ logic = " AND "
1470
+ continue
1471
+ if scnt == 0 and isinstance(val, str):
1472
+ ms = re.match(r'^({})$'.format('|'.join(MYSIGNS[1:])), val)
1473
+ if ms:
1474
+ osign = sign = ms.group(1)
1475
+ scnt += 1
1476
+ if sign == "<>":
1477
+ scnt += 1
1478
+ sign = negative + "BETWEEN"
1479
+ elif negative:
1480
+ sign = "<=" if (sign == ">") else ">="
1481
+ continue
1482
+ if isstr:
1483
+ if not isinstance(val, str): val = str(val)
1484
+ if sign == "=":
1485
+ if not val:
1486
+ ncnt += 1 # found null string
1487
+ elif val.find('%') > -1:
1488
+ sign = negative + "LIKE"
1489
+ elif re.search(r'[\[\(\?\.]', val):
1490
+ sign = negative + "REGEXP"
1491
+ if val.find("'") != 0:
1492
+ val = "'{}'".format(val)
1493
+ elif isinstance(val, str):
1494
+ if val.find('.') > -1:
1495
+ val = float(val)
1496
+ else:
1497
+ val = int(val)
1498
+ if sign == "=":
1499
+ if cnt > 0: cnd += ", "
1500
+ cnd += str(val)
1501
+ cnt += 1
1502
+ else:
1503
+ if sign == "AND":
1504
+ wcnd += " {} {}".format(sign, val)
1505
+ else:
1506
+ if wcnt > 0: wcnd += logic
1507
+ wcnd += "{} {} {}".format(fld, sign, val)
1508
+ wcnt += 1
1509
+ if re.search(r'BETWEEN$', sign):
1510
+ sign = "AND"
1511
+ else:
1512
+ sign = "="
1513
+ scnt = 0
1514
+
1515
+ if scnt > 0:
1516
+ s = 's' if scnt > 1 else ''
1517
+ PgLOG.pglog("Need {} value{} after sign '{}'".format(scnt, s, osign), PgLOG.LGEREX)
1518
+ if wcnt > 1: wcnd = "({})".format(wcnd)
1519
+ if cnt > 0:
1520
+ if cnt > 1:
1521
+ cnd = "{} {}IN ({})".format(fld, negative, cnd)
1522
+ else:
1523
+ cnd = "{} {} {}".format(fld, ("<>" if negative else "="), cnd)
1524
+ if ncnt > 0:
1525
+ ncnd = "{} IS {}NULL".format(fld, negative)
1526
+ cnd = "({}{}{})".format(cnd, logic, ncnd)
1527
+ if wcnt > 0: cnd = "({}{}{})".format(cnd, logic, wcnd)
1528
+ elif wcnt > 0:
1529
+ cnd = wcnd
1530
+ if cnd and not noand: cnd = " AND " + cnd
1531
+
1532
+ return cnd
1533
+
1534
+ #
1535
+ # build up fieldname string for given or default condition
1536
+ #
1537
+ def fieldname_string(fnames, dnames = None, anames = None, wflds = None):
1538
+
1539
+ if not fnames:
1540
+ fnames = dnames # include default fields names
1541
+ elif re.match(r'^all$', fnames, re.I):
1542
+ fnames = anames # include all field names
1543
+
1544
+ if not wflds: return fnames
1545
+
1546
+ for wfld in wflds:
1547
+ if not wfld or fnames.find(wfld) > -1: continue # empty field, or included already
1548
+ if wfld == "Q":
1549
+ pos = fnames.find("R") # request name
1550
+ elif wfld == "Y":
1551
+ pos = fnames.find("X") # parent group name
1552
+ elif wfld == "G":
1553
+ pos = fnames.find("I") # group name
1554
+ else:
1555
+ pos = -1 # prepend other with-field names
1556
+
1557
+ if pos == -1:
1558
+ fnames = wfld + fnames # prepend with-field
1559
+ else:
1560
+ fnames = fnames[0:pos] + wfld + fnames[pos:] # insert with-field
1561
+
1562
+ return fnames
1563
+
1564
+ #
1565
+ # Function get_group_field_path(gindex: group index
1566
+ # dsid: dataset id
1567
+ # field: path field name: webpath or savedpath)
1568
+ # go through group tree upward to find a none-empty path, return it or null
1569
+ #
1570
+ def get_group_field_path(gindex, dsid, field):
1571
+
1572
+ if gindex:
1573
+ myrec = myget("dsgroup", "pindex, {}".format(field),
1574
+ "dsid = '{}' AND gindex = {}".format(dsid, gindex), MYDBI['LOGACT']|PgLOG.EXITLG)
1575
+ else:
1576
+ myrec = myget("dataset", field,
1577
+ "dsid = '{}'".format(dsid), MYDBI['LOGACT']|PgLOG.EXITLG)
1578
+ if myrec:
1579
+ if myrec[field]:
1580
+ return myrec[field]
1581
+ elif gindex:
1582
+ return get_group_field_path(myrec['pindex'], dsid, field)
1583
+ else:
1584
+ return None
1585
+
1586
+ #
1587
+ # get the specialist info for a given dataset
1588
+ #
1589
+ def get_specialist(dsid, logact = 0):
1590
+
1591
+ if not logact: logact = MYDBI['LOGACT']
1592
+ if dsid in SPECIALIST: return SPECIALIST['dsid']
1593
+
1594
+ myrec = myget("dsowner, dssgrp", "specialist, lstname, fstname",
1595
+ "specialist = logname AND dsid = '{}' AND priority = 1".format(dsid), logact)
1596
+ if myrec:
1597
+ if myrec['specialist'] == "datahelp" or myrec['specialist'] == "dss":
1598
+ myrec['lstname'] = "Help"
1599
+ myrec['fstname'] = "Data"
1600
+ else:
1601
+ myrec['specialist'] = "datahelp"
1602
+ myrec['lstname'] = "Help"
1603
+ myrec['fstname'] = "Data"
1604
+
1605
+ SPECIALIST['dsid'] = myrec # cache specialist info for dsowner of dsid
1606
+ return myrec
1607
+
1608
+ #
1609
+ # build customized email from get_email()
1610
+ #
1611
+ def build_customized_email(table, field, condition, subject, logact = 0):
1612
+
1613
+ msg = PgLOG.get_email()
1614
+
1615
+ if not msg: return PgLOG.FAILURE
1616
+
1617
+ sender = PgLOG.PGLOG['CURUID'] + "@ucar.edu"
1618
+ receiver = PgLOG.PGLOG['EMLADDR'] if PgLOG.PGLOG['EMLADDR'] else (PgLOG.PGLOG['CURUID'] + "@ucar.edu")
1619
+ if receiver.find(sender) < 0: PgLOG.add_carbon_copy(sender, 1)
1620
+ ebuf = "From: {}\nTo: {}\n".format(sender, receiver)
1621
+ if PgLOG.PGLOG['CCDADDR']: ebuf += "Cc: {}\n".format(PgLOG.PGLOG['CCDADDR'])
1622
+ if not subject: subject = "Message from {}-{}".format(PgLOG.PGLOG['HOSTNAME'], PgLOG.get_command())
1623
+ ebuf += "Subject: {}!\n\n{}\n".format(subject, msg)
1624
+
1625
+ estat = cache_customized_email(table, field, condition, ebuf, logact)
1626
+ if estat and logact:
1627
+ PgLOG.pglog("Email {} cached to '{}.{}' for {}, Subject: {}".format(receiver, table, field, condition, subject), logact)
1628
+
1629
+ return estat
1630
+
1631
+ #
1632
+ # email: full user email address
1633
+ #
1634
+ # get user real name from table ruser for a given email address
1635
+ # opts == 1 : include email
1636
+ # opts == 2 : include org_type
1637
+ # opts == 4 : include country
1638
+ # opts == 8 : include valid_email
1639
+ # opts == 16 : include org
1640
+ #
1641
+ def get_ruser_names(email, opts = 0, date = None):
1642
+
1643
+ fields = "lname lstname, fname fstname"
1644
+
1645
+ if opts is None: opts = 0
1646
+ if opts&1: fields += ", email"
1647
+ if opts&2: fields += ", org_type"
1648
+ if opts&4: fields += ", country"
1649
+ if opts&8: fields += ", valid_email"
1650
+ if opts&16: fields += ", org"
1651
+
1652
+ if date:
1653
+ datecond = "rdate <= '{}' AND (end_date IS NULL OR end_date >= '{}')".format(date, date)
1654
+ else:
1655
+ datecond = "end_date IS NULL"
1656
+ date = time.strftime("%Y-%m-%d", (time.gmtime() if PgLOG.PGLOG['GMTZ'] else time.localtime()))
1657
+ emcnd = "email = '{}'".format(email)
1658
+ myrec = myget("ruser", fields, "{} AND {}".format(emcnd, datecond), PgLOG.LGEREX)
1659
+ if not myrec: # missing user record add one in
1660
+ PgLOG.pglog("{}: email not in ruser for {}".format(email, date), PgLOG.LOGWRN)
1661
+ # check again if a user is on file with different date range
1662
+ myrec = myget("ruser", fields, emcnd, PgLOG.LGEREX)
1663
+ if not myrec and myget("user", '', emcnd):
1664
+ fields = "lstname, fstname"
1665
+ if opts&1: fields += ", email"
1666
+ if opts&2: fields += ", org_type"
1667
+ if opts&4: fields += ", country"
1668
+ if opts&8: fields += ", email valid_email"
1669
+ if opts&16: fields += ", org_name org"
1670
+ myrec = myget("user", fields, emcnd, PgLOG.LGEREX)
1671
+
1672
+ if myrec and myrec['lstname']:
1673
+ myrec['name'] = (myrec['fstname'].capitalize() + ' ') if myrec['fstname'] else ''
1674
+ myrec['name'] += myrec['lstname'].capitalize()
1675
+ else:
1676
+ if not myrec: myrec = {}
1677
+ myrec['name'] = email.split('@')[0]
1678
+ if opts&1: myrec['email'] = email
1679
+
1680
+ return myrec
1681
+
1682
+ #
1683
+ # cache a customized email for sending it later
1684
+ #
1685
+ def cache_customized_email(table, field, condition, emlmsg, logact = 0):
1686
+
1687
+ myrec = {field : emlmsg}
1688
+ if myupdt(table, myrec, condition, logact|PgLOG.ERRLOG):
1689
+ if logact: PgLOG.pglog("Email cached to '{}.{}' for {}".format(table, field, condition), logact&(~PgLOG.EXITLG))
1690
+ return PgLOG.SUCCESS
1691
+ else:
1692
+ msg = "cache email to '{}.{}' for {}".format(table, field, condition)
1693
+ PgLOG.pglog("Error {}, try to send directly now".format(msg), logact|PgLOG.ERRLOG)
1694
+ return PgLOG.send_customized_email(msg, emlmsg, logact)
1695
+
1696
+ #
1697
+ # otype: user organization type
1698
+ # email: user email address)
1699
+ #
1700
+ # return: orgonizaion type like DSS, NCAR, UNIV...
1701
+ #
1702
+ def get_org_type(otype, email):
1703
+
1704
+ if not otype: otype = "OTHER"
1705
+ if email:
1706
+ ms = re.search(r'(@|\.)ucar\.edu$', email)
1707
+ if ms:
1708
+ mc = ms.group(1)
1709
+ if otype == 'UCAR' or otype == 'OTHER': otype = 'NCAR'
1710
+ if otype == 'NCAR' and mc == '@':
1711
+ ms = re.match(r'^(.+)@', email)
1712
+ if ms and myget("dssgrp", "", "logname = '{}'".format(ms.group(1))): otype = 'DECS'
1713
+ else:
1714
+ ms = re.search(r'\.(mil|org|gov|edu|com|net)(\.\w\w|$)', email)
1715
+ if ms:
1716
+ otype = ms.group(1).upper()
1717
+ if otype == 'EDU': otype = "UNIV"
1718
+
1719
+ return otype
1720
+
1721
+ #
1722
+ # join values and handle the null values
1723
+ #
1724
+ def join_values(vstr, vals):
1725
+
1726
+ if vstr:
1727
+ vstr += "\n"
1728
+ elif vstr is None:
1729
+ vstr = ''
1730
+
1731
+ return "{}Value{}({})".format(vstr, ('s' if len(vals) > 1 else ''), ', '.join(map(str, vals)))
1732
+
1733
+ #
1734
+ # check table hostname to find the system down times. Cache the result for 10 minutes
1735
+ #
1736
+ def get_system_downs(hostname, logact = 0):
1737
+
1738
+ curtime = int(time.time())
1739
+ newhost = 0
1740
+
1741
+ if hostname not in SYSDOWN:
1742
+ SYSDOWN[hostname] = {}
1743
+ newhost = 1
1744
+ if newhost or (curtime - SYSDOWN[hostname]['chktime']) > 600:
1745
+ SYSDOWN[hostname]['chktime'] = curtime
1746
+ SYSDOWN[hostname]['start'] = 0
1747
+ SYSDOWN[hostname]['end'] = 0
1748
+ SYSDOWN[hostname]['active'] = 1
1749
+ SYSDOWN[hostname]['path'] = None
1750
+
1751
+ myrec = myget('hostname', 'service, domain, UNIX_TIMESTAMP(downstart) start, UNIX_TIMESTAMP(downend) end',
1752
+ "hostname = '{}'".format(hostname), logact)
1753
+ if myrec:
1754
+ if myrec['service'] == 'N':
1755
+ SYSDOWN[hostname]['start'] = curtime
1756
+ SYSDOWN[hostname]['active'] = 0
1757
+ else:
1758
+ start = myrec['start']
1759
+ end = myrec['end']
1760
+ if start and (not end or end > curtime):
1761
+ SYSDOWN[hostname]['start'] = start
1762
+ SYSDOWN[hostname]['end'] = end if end else None
1763
+ if myrec['service'] == 'S' and myrec['domain'] and re.match(r'^/', myrec['domain']):
1764
+ SYSDOWN[hostname]['path'] = myrec['domain']
1765
+
1766
+ SYSDOWN[hostname]['curtime'] = curtime
1767
+
1768
+ return SYSDOWN[hostname]
1769
+
1770
+ #
1771
+ # return seconds for how long the system will continue to be down
1772
+ #
1773
+ def system_down_time(hostname, offset, logact = 0):
1774
+
1775
+ down = get_system_downs(hostname, logact)
1776
+ if down['start'] and down['curtime'] >= (down['start'] - offset):
1777
+ if not down['end']:
1778
+ if PgLOG.PGLOG['MYBATCH'] == PgLOG.PGLOG['PBSNAME']:
1779
+ return PgLOG.PGLOG['PBSTIME']
1780
+ elif PgLOG.PGLOG['MYBATCH'] == PgLOG.PGLOG['SLMNAME']:
1781
+ return PgLOG.PGLOG['SLMTIME']
1782
+ elif down['curtime'] <= down['end']:
1783
+ return (down['end'] - down['curtime'])
1784
+
1785
+ return 0 # the system is not down
1786
+
1787
+ #
1788
+ # return string message if the system is down
1789
+ #
1790
+ def system_down_message(hostname, path, offset, logact = 0):
1791
+
1792
+ down = get_system_downs(hostname, logact)
1793
+ msg = None
1794
+ if down['start'] and down['curtime'] >= (down['start'] - offset):
1795
+ match = match_down_path(path, down['path'])
1796
+ if match:
1797
+ msg = "{}{}:".format(hostname, ('-' + path) if match > 0 else '')
1798
+ if not down['active']:
1799
+ msg += " Not in Service"
1800
+ else:
1801
+ msg += " Planned down, started at " + PgLOG.current_datetime(down['start'])
1802
+ if not down['end']:
1803
+ msg += " And no end time specified"
1804
+ elif down['curtime'] <= down['end']:
1805
+ msg = " And will end by " + PgLOG.current_datetime(down['end'])
1806
+
1807
+ return msg
1808
+
1809
+ #
1810
+ # return 1 if given path match daemon paths, 0 if not; -1 if cannot compare
1811
+ #
1812
+ def match_down_path(path, dpaths):
1813
+
1814
+ if not (path and dpaths): return -1
1815
+
1816
+ paths = re.split(':', dpaths)
1817
+
1818
+ for p in paths:
1819
+ if re.match(r'^{}'.format(p), path): return 1
1820
+
1821
+ return 0
1822
+
1823
+ # validate is login user is in DECS group
1824
+ # check all node if skpdsg is false, otherwise check non-DSG nodes
1825
+ def validate_decs_group(cmdname, logname, skpdsg):
1826
+
1827
+ if skpdsg and PgLOG.PGLOG['DSGHOSTS'] and re.search(r'(^|:){}'.format(PgLOG.PGLOG['HOSTNAME']), PgLOG.PGLOG['DSGHOSTS']): return
1828
+ if not logname: lgname = PgLOG.PGLOG['CURUID']
1829
+
1830
+ if not myget("dssgrp", '', "logname = '{}'".format(logname), PgLOG.LGEREX):
1831
+ PgLOG.pglog("{}: Must be in DECS Group to run '{}' on {}".format(logname, cmdname, PgLOG.PGLOG['HOSTNAME']), PgLOG.LGEREX)
1832
+
1833
+ #
1834
+ # add an allusage record into yearly table; create a new yearly table if it does not exist
1835
+ # year -- year to identify the yearly table, evaluated if missing
1836
+ # records -- hash to hold one or multiple records.
1837
+ # Dict keys: email -- user email address,
1838
+ # org_type -- organization type
1839
+ # country -- country code
1840
+ # dsid -- dataset ID
1841
+ # date -- date data accessed
1842
+ # time -- time data accessed
1843
+ # quarter -- quarter of the year data accessed
1844
+ # size -- bytes of data accessed
1845
+ # method -- delivery methods: MSS,Web,Ftp,Tape,Cd,Disk,Paper,cArt,Micro
1846
+ # source -- usage source flag: W - wusage, O - ordusage
1847
+ # midx -- refer to mbr2loc.midx if not 0
1848
+ # ip -- user IP address
1849
+ # region -- user region name; for example, Colorado
1850
+ #
1851
+ # isarray -- if true, mutiple records provided via arrays for each hash key
1852
+ # docheck -- if 1, check and add only if record is not on file
1853
+ # docheck -- if 2, check and add if record is not on file, and update if exists
1854
+ # docheck -- if 4, check and add if record is not on file, and update if exists,
1855
+ # and also checking NULL email value too
1856
+ #
1857
+ def add_yearly_allusage(year, records, isarray = 0, docheck = 0):
1858
+
1859
+ acnt = 0
1860
+ if not year:
1861
+ ms = re.match(r'^(\d\d\d\d)', str(records['date'][0] if isarray else records['date']))
1862
+ if ms: year = ms.group(1)
1863
+ tname = "allusage_{}".format(year)
1864
+ if isarray:
1865
+ cnt = len(records['email'])
1866
+ if 'quarter' not in records: records['quarter'] = [0]*cnt
1867
+ for i in range(cnt):
1868
+ if not records['quarter'][i]:
1869
+ ms = re.search(r'-(\d+)-', str(records['date'][i]))
1870
+ if ms: records['quarter'][i] = int((int(ms.group(1))-1)/3)+1
1871
+ if docheck:
1872
+ for i in range(cnt):
1873
+ record = {}
1874
+ for key in records:
1875
+ record[key] = records[key][i]
1876
+ cnd = "email = '{}' AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
1877
+ record['email'], record['dsid'], record['method'], record['date'], record['time'])
1878
+ myrec = myget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
1879
+ if docheck == 4 and not myrec:
1880
+ cnd = "email IS NULL AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
1881
+ record['dsid'], record['method'], record['date'], record['time'])
1882
+ myrec = myget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
1883
+ if myrec:
1884
+ if docheck > 1: acnt += myupdt(tname, record, "aidx = {}".format(myrec['aidx']), PgLOG.LGEREX)
1885
+ else:
1886
+ acnt += myadd(tname, record, PgLOG.LGEREX|PgLOG.ADDTBL)
1887
+ else:
1888
+ acnt = mymadd(tname, records, PgLOG.LGEREX|PgLOG.ADDTBL)
1889
+ else:
1890
+ record = records
1891
+ if not ('quarter' in record and record['quarter']):
1892
+ ms = re.search(r'-(\d+)-', str(record['date']))
1893
+ if ms: record['quarter'] = int((int(ms.group(1))-1)/3)+1
1894
+ if docheck:
1895
+ cnd = "email = '{}' AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
1896
+ record['email'], record['dsid'], record['method'], record['date'], record['time'])
1897
+ myrec = myget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
1898
+ if docheck == 4 and not myrec:
1899
+ cnd = "email IS NULL AND dsid = '{}' AND method = '{}' AND date = '{}' AND time = '{}'".format(
1900
+ record['dsid'], record['method'], record['date'], record['time'])
1901
+ myrec = myget(tname, 'aidx', cnd, PgLOG.LOGERR|PgLOG.ADDTBL)
1902
+ if myrec:
1903
+ if docheck > 1: acnt = myupdt(tname, record, "aidx = {}".format(myrec['aidx']), PgLOG.LGEREX)
1904
+ return acnt
1905
+ acnt = myadd(tname, record, PgLOG.LGEREX|PgLOG.ADDTBL)
1906
+
1907
+ return acnt
1908
+
1909
+ #
1910
+ # add a wusage record into yearly table; create a new yearly table if it does not exist
1911
+ # year -- year to identify the yearly table, evaluated if missing
1912
+ # records -- hash to hold one or multiple records.
1913
+ # Dict keys: wid - reference to wfile.wid
1914
+ # wuid_read - reference to wuser.wuid, 0 if missing email
1915
+ # dsid - reference to dataset.dsid at the time of read
1916
+ # date_read - date file read
1917
+ # time_read - time file read
1918
+ # quarter - quarter of the year data accessed
1919
+ # size_read - bytes of data read
1920
+ # method - download methods: WEB, CURL, MGET, FTP and MGET
1921
+ # locflag - location flag: Glade or Object
1922
+ # ip - IP address
1923
+ #
1924
+ # isarray -- if true, mutiple records provided via arrays for each hash key
1925
+ #
1926
+ def add_yearly_wusage(year, records, isarray = 0):
1927
+
1928
+ acnt = 0
1929
+ if not year:
1930
+ ms = re.match(r'^(\d\d\d\d)', str(records['date_read'][0] if isarray else records['date_read']))
1931
+ if ms: year = ms.group(1)
1932
+ tname = "wusage_{}".format(year)
1933
+ if isarray:
1934
+ if 'quarter' not in records:
1935
+ cnt = len(records['wid'])
1936
+ records['quarter'] = [0]*cnt
1937
+ for i in range(cnt):
1938
+ ms = re.search(r'-(\d+)-', str(records['date_read'][i]))
1939
+ if ms: records['quarter'][i] = (int((int(ms.group(1))-1)/3)+1)
1940
+ acnt = mymadd(tname, records, PgLOG.LGEREX|PgLOG.ADDTBL)
1941
+ else:
1942
+ record = records
1943
+ if 'quarter' not in record:
1944
+ ms = re.search(r'-(\d+)-', str(record['date_read']))
1945
+ if ms: record['quarter'] = (int((int(ms.group(1))-1)/3)+1)
1946
+ acnt = myadd(tname, record, PgLOG.LGEREX|PgLOG.ADDTBL)
1947
+
1948
+ return acnt