rda-python-metrics 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rda-python-metrics might be problematic. Click here for more details.
- rda_python_metrics/PgIPInfo.py +188 -0
- rda_python_metrics/PgView.py +782 -0
- rda_python_metrics/__init__.py +1 -0
- rda_python_metrics/fillawsusage.py +282 -0
- rda_python_metrics/fillawsusage.usg +17 -0
- rda_python_metrics/fillcodusage.py +247 -0
- rda_python_metrics/fillcodusage.usg +21 -0
- rda_python_metrics/fillcountry.py +79 -0
- rda_python_metrics/fillendtime.py +93 -0
- rda_python_metrics/fillglobususage.py +287 -0
- rda_python_metrics/fillglobususage.usg +17 -0
- rda_python_metrics/fillipinfo.py +185 -0
- rda_python_metrics/fillipinfo.usg +18 -0
- rda_python_metrics/filloneorder.py +155 -0
- rda_python_metrics/filloneorder.usg +41 -0
- rda_python_metrics/fillrdadb.py +151 -0
- rda_python_metrics/fillrdadb.usg +32 -0
- rda_python_metrics/filltdsusage.py +289 -0
- rda_python_metrics/filltdsusage.usg +17 -0
- rda_python_metrics/filluser.py +216 -0
- rda_python_metrics/filluser.usg +16 -0
- rda_python_metrics/logarch.py +359 -0
- rda_python_metrics/logarch.usg +27 -0
- rda_python_metrics/pgperson.py +72 -0
- rda_python_metrics/pgusername.py +50 -0
- rda_python_metrics/viewallusage.py +350 -0
- rda_python_metrics/viewallusage.usg +198 -0
- rda_python_metrics/viewcheckusage.py +289 -0
- rda_python_metrics/viewcheckusage.usg +185 -0
- rda_python_metrics/viewcodusage.py +314 -0
- rda_python_metrics/viewcodusage.usg +184 -0
- rda_python_metrics/viewordusage.py +340 -0
- rda_python_metrics/viewordusage.usg +224 -0
- rda_python_metrics/viewrqstusage.py +362 -0
- rda_python_metrics/viewrqstusage.usg +217 -0
- rda_python_metrics/viewtdsusage.py +323 -0
- rda_python_metrics/viewtdsusage.usg +191 -0
- rda_python_metrics/viewwebfile.py +294 -0
- rda_python_metrics/viewwebfile.usg +212 -0
- rda_python_metrics/viewwebusage.py +371 -0
- rda_python_metrics/viewwebusage.usg +211 -0
- rda_python_metrics-1.0.4.dist-info/METADATA +18 -0
- rda_python_metrics-1.0.4.dist-info/RECORD +47 -0
- rda_python_metrics-1.0.4.dist-info/WHEEL +5 -0
- rda_python_metrics-1.0.4.dist-info/entry_points.txt +22 -0
- rda_python_metrics-1.0.4.dist-info/licenses/LICENSE +21 -0
- rda_python_metrics-1.0.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,782 @@
|
|
|
1
|
+
#
|
|
2
|
+
###############################################################################
|
|
3
|
+
#
|
|
4
|
+
# Title : PgView.py
|
|
5
|
+
# Author : Zaihua Ji, zji@ucar.edu
|
|
6
|
+
# Date : 09/24/2020
|
|
7
|
+
# 2025-03-27 transferred to package rda_python_metrics from
|
|
8
|
+
# https://github.com/NCAR/rda-database.git
|
|
9
|
+
# Purpose : python library module to help rountinely updates of new data
|
|
10
|
+
# for one or multiple datasets
|
|
11
|
+
#
|
|
12
|
+
# Github : https://github.com/NCAR/rda-python-metrics.git
|
|
13
|
+
#
|
|
14
|
+
###############################################################################
|
|
15
|
+
#
|
|
16
|
+
import os
|
|
17
|
+
import re
|
|
18
|
+
from rda_python_common import PgLOG
|
|
19
|
+
from rda_python_common import PgUtil
|
|
20
|
+
from rda_python_common import PgDBI
|
|
21
|
+
|
|
22
|
+
#
|
|
23
|
+
# simple_output(params: reference to parameter hush array
|
|
24
|
+
# flds: reference to field hush array
|
|
25
|
+
# records: PgSQL query result)
|
|
26
|
+
# generate a simple view without header and page information by using the passed in PgSQL query result
|
|
27
|
+
#
|
|
28
|
+
def simple_output(params, flds, records, totals = None):
|
|
29
|
+
|
|
30
|
+
cols = params['C'][0]
|
|
31
|
+
ccnt = len(cols) # get output dimensions
|
|
32
|
+
sep = params['L'][0] if 'L' in params else ' '
|
|
33
|
+
slen = len(sep)
|
|
34
|
+
|
|
35
|
+
# get total line length, dynamically evaluating column lengthes if column 4 in %FLDS is zero
|
|
36
|
+
rcnt = linelen = 0
|
|
37
|
+
if 'A' in params: rcnt = int(params['A'][0])
|
|
38
|
+
for i in range(ccnt):
|
|
39
|
+
if not (rcnt or cols[i] == 'X'): rcnt = len(records[cols[i]])
|
|
40
|
+
fld = flds[cols[i]]
|
|
41
|
+
if not fld[4]: fld[4] = PgUtil.get_column_length(fld[0], records[cols[i]])
|
|
42
|
+
if linelen: linelen += slen
|
|
43
|
+
linelen += abs(fld[4])
|
|
44
|
+
|
|
45
|
+
# print position numbers for reference of read
|
|
46
|
+
nstr = '123456789'
|
|
47
|
+
nline = ''
|
|
48
|
+
for i in range(0,linelen,10):
|
|
49
|
+
nline += str(int(i/10)%10)
|
|
50
|
+
if (linelen-i) < 10:
|
|
51
|
+
nline += nstr[0:(linelen-i-1)]
|
|
52
|
+
else:
|
|
53
|
+
nline += nstr
|
|
54
|
+
print(nline)
|
|
55
|
+
|
|
56
|
+
# print column titles
|
|
57
|
+
tline = ''
|
|
58
|
+
for i in range(ccnt):
|
|
59
|
+
if i: tline += sep # delimiter to separate columns
|
|
60
|
+
fld = flds[cols[i]]
|
|
61
|
+
if fld[4] < 0 or fld[5] > 0: # right justify
|
|
62
|
+
tline += "{:>{}}".format(fld[0], abs(fld[4]))
|
|
63
|
+
else: # left justify
|
|
64
|
+
tline += "{:{}}".format(fld[0], abs(fld[4]))
|
|
65
|
+
print(tline)
|
|
66
|
+
|
|
67
|
+
# print result now
|
|
68
|
+
for j in range(rcnt):
|
|
69
|
+
sline = ''
|
|
70
|
+
for i in range(ccnt):
|
|
71
|
+
fld = flds[cols[i]]
|
|
72
|
+
if cols[i] == 'X':
|
|
73
|
+
val = j+1
|
|
74
|
+
else:
|
|
75
|
+
val = records[cols[i]][j]
|
|
76
|
+
if val is None:
|
|
77
|
+
if fld[4] < 0 or fld[5] > 0:
|
|
78
|
+
val = 0
|
|
79
|
+
else:
|
|
80
|
+
val = ' '
|
|
81
|
+
if i > 0: sline += sep # delimiter to separate columns
|
|
82
|
+
if fld[5] > 0: # right justify, numeric field with precision
|
|
83
|
+
sline += "{:{}.{}f}".format(abs(val), fld[4], fld[5])
|
|
84
|
+
elif fld[4] < 0: # right justify, negative field size
|
|
85
|
+
sline += "{:>{}}".format(str(val), -fld[4])
|
|
86
|
+
elif i < (ccnt-1): # left justify, normal display with trailing spaces
|
|
87
|
+
sline += "{:{}.{}}".format(str(val), fld[4], fld[4])
|
|
88
|
+
else: # normal display w/o trailing spaces
|
|
89
|
+
sline += str(val)
|
|
90
|
+
print(sline)
|
|
91
|
+
|
|
92
|
+
if totals:
|
|
93
|
+
print(''.join(['-']*linelen))
|
|
94
|
+
sline = ''
|
|
95
|
+
for i in range(ccnt):
|
|
96
|
+
if i > 0: sline += sep # delimiter to separate columns
|
|
97
|
+
fld = flds[cols[i]]
|
|
98
|
+
if cols[i] == 'X':
|
|
99
|
+
sline += "{:{}}".format('TOTAL', abs(fld[4]))
|
|
100
|
+
continue
|
|
101
|
+
val = totals[cols[i]]
|
|
102
|
+
if val is None:
|
|
103
|
+
sline += "{:{}}".format(' ', abs(fld[4]))
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
if fld[5] > 0: # right justify, numeric field with precision
|
|
107
|
+
sline += "{:{}.{}f}".format(abs(val), fld[4], fld[5])
|
|
108
|
+
elif fld[4] < 0: # right justify, negative field size
|
|
109
|
+
sline += "{:>{}}".format(str(val), -fld[4])
|
|
110
|
+
elif i < (ccnt-1): # left justify, normal display with trailing spaces
|
|
111
|
+
sline += "{:{}}".format(str(val), abs(fld[4]))
|
|
112
|
+
else: # normal display w/o trailing spaces
|
|
113
|
+
sline += str(val)
|
|
114
|
+
print(sline)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
#
|
|
118
|
+
# set_data_unit(fld: reference to original array of size field
|
|
119
|
+
# unit: given unit to show data size, 'BKMGTP'
|
|
120
|
+
# fname: the field name in RDADB, in form of SUM() if needed)
|
|
121
|
+
#
|
|
122
|
+
# change unit of data size and reset field length according to given data unit
|
|
123
|
+
#
|
|
124
|
+
def set_data_unit(fld, unit, fname, origin = 0):
|
|
125
|
+
|
|
126
|
+
factor = {'B' : 1, 'K' : 100, 'M' : 1000000, 'G' : 1000000000,
|
|
127
|
+
'T' : 1000000000000, 'P' : 1000000000000000}
|
|
128
|
+
|
|
129
|
+
if unit == 'B':
|
|
130
|
+
if re.match(r'^sum', fname):
|
|
131
|
+
fld[0] = re.sub(r'^MB', 'B', fld[0], 1)
|
|
132
|
+
fld[1] = fname
|
|
133
|
+
fld[4] = -17
|
|
134
|
+
fld[5] = -1
|
|
135
|
+
elif unit == 'M':
|
|
136
|
+
if not re.match(r'^sum', fname):
|
|
137
|
+
fld[0] = re.sub(r'^B', 'MB', fld[0], 1)
|
|
138
|
+
elif unit in factor:
|
|
139
|
+
fld[0] = re.sub(r'^M{0,1}B', unit+'B', fld[0], 1)
|
|
140
|
+
else:
|
|
141
|
+
PgLOG.pglog("{}: Unknown unit must be in ({})".format(unit, ','.join(factor)), PgLOG.LGEREX)
|
|
142
|
+
|
|
143
|
+
fact = factor[unit]
|
|
144
|
+
reverse = 0
|
|
145
|
+
if origin:
|
|
146
|
+
if fact >= origin:
|
|
147
|
+
fact /= origin
|
|
148
|
+
else:
|
|
149
|
+
fact = origin/fact
|
|
150
|
+
reverse = 1
|
|
151
|
+
|
|
152
|
+
if fact > 1:
|
|
153
|
+
fld[1] = "round({}{}{}, 4)".format(fname, '*' if reverse else '/', fact)
|
|
154
|
+
fld[4] = -14
|
|
155
|
+
fld[5] = 3
|
|
156
|
+
|
|
157
|
+
return fld
|
|
158
|
+
|
|
159
|
+
#
|
|
160
|
+
# get all available date(D)/month(M)/year(Y) for given conditions of
|
|
161
|
+
# of dates, daterange, months or years
|
|
162
|
+
#
|
|
163
|
+
def expand_time(exps, records, params, expand):
|
|
164
|
+
|
|
165
|
+
get = 0
|
|
166
|
+
opts = aold = aqtr = None
|
|
167
|
+
for opt in exps:
|
|
168
|
+
if opt == "D":
|
|
169
|
+
get |= 1
|
|
170
|
+
if 'D' in records: aold = records['D']
|
|
171
|
+
opts = expand['D'][1]
|
|
172
|
+
elif opt == "M":
|
|
173
|
+
get |= 2
|
|
174
|
+
if not aold and 'M' in records: aold = records['M']
|
|
175
|
+
if not opts: opts = expand['M'][1]
|
|
176
|
+
elif opt == "Y":
|
|
177
|
+
get |= 4
|
|
178
|
+
if not aold and 'Y' in records: aold = records['Y']
|
|
179
|
+
if not opts: opts = expand['Y'][1]
|
|
180
|
+
elif opt == "Q":
|
|
181
|
+
get |= 8
|
|
182
|
+
if 'Q' in records: aqtr = records['Q']
|
|
183
|
+
if not opts: opts = expand['Q'][1]
|
|
184
|
+
cqtr = 0
|
|
185
|
+
qcond = cond = None
|
|
186
|
+
for opt in opts:
|
|
187
|
+
if opt in params:
|
|
188
|
+
if opt == 'q':
|
|
189
|
+
qcond = params[opt]
|
|
190
|
+
cqtr = len(qcond)
|
|
191
|
+
elif not cond:
|
|
192
|
+
cond = params[opt]
|
|
193
|
+
break
|
|
194
|
+
|
|
195
|
+
if qcond and not cond:
|
|
196
|
+
PgLOG.pglog("no zero usage on temporal condition of quarter only", PgLOG.LGWNEX)
|
|
197
|
+
|
|
198
|
+
anew = []
|
|
199
|
+
if cond:
|
|
200
|
+
if opt == 'd':
|
|
201
|
+
anew = cond
|
|
202
|
+
elif opt == 'D':
|
|
203
|
+
(start, end) = PgUtil.daterange(cond[0], cond[1])
|
|
204
|
+
if not (end and start):
|
|
205
|
+
PgLOG.pglog("Must specify valid start and end dates", PgLOG.LGWNEX)
|
|
206
|
+
dy = dm = dd = 0
|
|
207
|
+
if get&1:
|
|
208
|
+
dd = 1
|
|
209
|
+
elif get&10:
|
|
210
|
+
dm = 1
|
|
211
|
+
else:
|
|
212
|
+
dy = 1
|
|
213
|
+
date = start
|
|
214
|
+
anew.append(date)
|
|
215
|
+
while date < end:
|
|
216
|
+
date = PgUtil.adddate(date, dy, dm, dd)
|
|
217
|
+
anew.append(date)
|
|
218
|
+
elif opt == 'm':
|
|
219
|
+
if (get&1) == 0:
|
|
220
|
+
anew = cond
|
|
221
|
+
else:
|
|
222
|
+
for month in cond:
|
|
223
|
+
for i in range(1, 29):
|
|
224
|
+
anew.append("{}-{:02}".format(month, i))
|
|
225
|
+
date = anew[27]
|
|
226
|
+
end = PgUtil.enddate(date, 0, 'M')
|
|
227
|
+
while date < end:
|
|
228
|
+
date = PgUtil.adddate(date, 0, 0, 1)
|
|
229
|
+
anew.append(date)
|
|
230
|
+
elif opt == 'y':
|
|
231
|
+
if (get&4) == 4:
|
|
232
|
+
anew = cond
|
|
233
|
+
else:
|
|
234
|
+
for year in cond:
|
|
235
|
+
for j in range(1, 13):
|
|
236
|
+
month = "{}={:02}".format(year, j)
|
|
237
|
+
if qcond:
|
|
238
|
+
qtr = int(j/3) + 1
|
|
239
|
+
i = 0
|
|
240
|
+
while i < cqtr:
|
|
241
|
+
if qcond[i] == qtr: break
|
|
242
|
+
i += 1
|
|
243
|
+
if i > cqtr: continue # skip month not in included quarters
|
|
244
|
+
if get&1:
|
|
245
|
+
for i in range(1, 29):
|
|
246
|
+
anew.append("{}-{:02}".format(month, i))
|
|
247
|
+
date = anew[27]
|
|
248
|
+
end = PgUtil.enddate(date, 0, 'M')
|
|
249
|
+
while date < end:
|
|
250
|
+
date = PgUtil.adddate(date, 0, 0, 1)
|
|
251
|
+
anew.append(date)
|
|
252
|
+
else:
|
|
253
|
+
anew.append(month)
|
|
254
|
+
elif records:
|
|
255
|
+
anew = aold
|
|
256
|
+
|
|
257
|
+
cnew = len(anew)
|
|
258
|
+
aret = []
|
|
259
|
+
for j in range(cnew):
|
|
260
|
+
date = anew[j]
|
|
261
|
+
if get&1 == 0:
|
|
262
|
+
if get&2: #get month
|
|
263
|
+
ms = re.match(r'^(\d\d\d\d-\d\d)', date)
|
|
264
|
+
if ms: date = ms.group(1)
|
|
265
|
+
elif get&4: #get year
|
|
266
|
+
ms = re.match(r'^(\d\d\d\d)-(\d+)', date)
|
|
267
|
+
if ms:
|
|
268
|
+
date = ms.group(1)
|
|
269
|
+
mn = int(ms.group(2))
|
|
270
|
+
if get&8:
|
|
271
|
+
qtr = mn - ((mn - 1)%3) #first month of quarter
|
|
272
|
+
date = "{}-{}".format(date, qtr)
|
|
273
|
+
else:
|
|
274
|
+
ms = re.match(r'^(\d\d\d\d)', date)
|
|
275
|
+
if ms:
|
|
276
|
+
date = ms.group(1)
|
|
277
|
+
if get&8 and aqtr:
|
|
278
|
+
qtr = 3*(aqtr[j]-1) + 1 #first month of quarter
|
|
279
|
+
date = "{}-{}".format(date, qtr)
|
|
280
|
+
|
|
281
|
+
if date not in aret:
|
|
282
|
+
aret.append(date)
|
|
283
|
+
|
|
284
|
+
rets = {}
|
|
285
|
+
for date in aret:
|
|
286
|
+
if get&1:
|
|
287
|
+
if 'D' not in rets: rets['D'] = []
|
|
288
|
+
rets['D'].append(date)
|
|
289
|
+
if get&2:
|
|
290
|
+
ms = re.match(r'^(\d\d\d\d-\d\d)', date)
|
|
291
|
+
if ms:
|
|
292
|
+
if 'M' not in rets: rets['M'] = []
|
|
293
|
+
rets['M'].append(ms.group(1))
|
|
294
|
+
if get&4:
|
|
295
|
+
ms = re.match(r'^(\d\d\d\d)', date)
|
|
296
|
+
if ms:
|
|
297
|
+
if 'Y' not in rets: rets['Y'] = []
|
|
298
|
+
rets['Y'].append(ms.group(1))
|
|
299
|
+
if get&8:
|
|
300
|
+
ms = re.match(r'^\d\d\d\d-(\d+)', date)
|
|
301
|
+
if ms:
|
|
302
|
+
if 'Q' not in rets: rets['Q'] = []
|
|
303
|
+
rets['Q'].append((int((int(ms.group(1)) - 1)/3) + 1))
|
|
304
|
+
|
|
305
|
+
return rets
|
|
306
|
+
|
|
307
|
+
#
|
|
308
|
+
# the detail query action for expand_query()
|
|
309
|
+
#
|
|
310
|
+
def query_action(exps, records, expand, tables, cond):
|
|
311
|
+
|
|
312
|
+
fields = ''
|
|
313
|
+
for exp in exps:
|
|
314
|
+
fields += ", " if fields else "DISTINCT "
|
|
315
|
+
fields += "{} {}".format(expand[exp][2], exp)
|
|
316
|
+
pgrecs = PgDBI.pgmget(tables, fields, cond, PgLOG.UCLWEX)
|
|
317
|
+
cnew = PgUtil.hashcount(pgrecs, 1)
|
|
318
|
+
cexp = len(exps)
|
|
319
|
+
cret = 0
|
|
320
|
+
rets = {}
|
|
321
|
+
for i in range(cnew):
|
|
322
|
+
j = 0
|
|
323
|
+
while j < cret:
|
|
324
|
+
k = 0
|
|
325
|
+
while k < cexp:
|
|
326
|
+
exp = exps[k]
|
|
327
|
+
if PgUtil.pgcmp(pgrecs[exp][i], rets[exp][j]): break
|
|
328
|
+
if k >= cexp: break
|
|
329
|
+
|
|
330
|
+
if j >= cret:
|
|
331
|
+
for k in range(cexp):
|
|
332
|
+
exp = exps[k]
|
|
333
|
+
if exp not in rets: rets[exp] = []
|
|
334
|
+
rets[exp].append(pgrecs[exp][i])
|
|
335
|
+
cret += 1
|
|
336
|
+
|
|
337
|
+
return rets
|
|
338
|
+
|
|
339
|
+
#
|
|
340
|
+
# build table name and join condition strings
|
|
341
|
+
#
|
|
342
|
+
def join_query_tables(tblname, tablenames = '', joins = '', tbljoin = ''):
|
|
343
|
+
|
|
344
|
+
if not tablenames:
|
|
345
|
+
return (tblname, "")
|
|
346
|
+
elif tablenames.find(tblname) > -1:
|
|
347
|
+
return (tablenames, joins)
|
|
348
|
+
|
|
349
|
+
if not tbljoin: tbljoin = tablenames.split(', ')[0]
|
|
350
|
+
cndstr = ''
|
|
351
|
+
jfield = 'dsid'
|
|
352
|
+
fmtstr = "{}.{} = {}.{}"
|
|
353
|
+
if tblname == 'gofile':
|
|
354
|
+
jfield = 'task_id'
|
|
355
|
+
elif tblname == 'wfile':
|
|
356
|
+
jfield = 'wid'
|
|
357
|
+
elif tblname == 'user':
|
|
358
|
+
jfield = 'uid'
|
|
359
|
+
elif tblname == 'emreceive':
|
|
360
|
+
jfield = 'email'
|
|
361
|
+
if tbljoin == 'wusage':
|
|
362
|
+
(tablenames, joins) = join_query_tables('wuser', tablenames, joins, tbljoin)
|
|
363
|
+
tbljoin = 'wuser'
|
|
364
|
+
elif tblname == 'ruser':
|
|
365
|
+
jfield = 'email'
|
|
366
|
+
if tbljoin == 'gotask': cndstr = " AND rdate <= DATE(completion_time) AND (end_date IS NULL OR end_date >= DATE(completion_time))"
|
|
367
|
+
elif tblname == 'wuser':
|
|
368
|
+
jfield = 'wuid'
|
|
369
|
+
if tbljoin == 'wusage':
|
|
370
|
+
fmtstr = "{}.{}_read = {}.{}"
|
|
371
|
+
else:
|
|
372
|
+
fmtstr = "{}.{}_request = {}.{}"
|
|
373
|
+
elif tblname == 'search.datasets':
|
|
374
|
+
if not PgLOG.PGLOG['NEWDSID']: fmtstr = "substring({}.{}, 3) = {}.{}"
|
|
375
|
+
if tbljoin == 'gotask':
|
|
376
|
+
(tablenames, joins) = join_query_tables('gofile', tablenames, joins, tbljoin)
|
|
377
|
+
tbljoin = 'gofile'
|
|
378
|
+
elif tblname == 'dsowner':
|
|
379
|
+
cndstr = " AND priority = 1"
|
|
380
|
+
if tbljoin == 'gotask':
|
|
381
|
+
(tablenames, joins) = join_query_tables('gofile', tablenames, joins, tbljoin)
|
|
382
|
+
tbljoin = 'gofile'
|
|
383
|
+
elif tblname == 'wfpurge':
|
|
384
|
+
jfield = 'index'
|
|
385
|
+
|
|
386
|
+
tablenames += ', ' + tblname # add to table name string
|
|
387
|
+
if joins: joins += " AND "
|
|
388
|
+
joins += fmtstr.format(tbljoin, jfield, tblname, jfield) + cndstr
|
|
389
|
+
|
|
390
|
+
return (tablenames, joins)
|
|
391
|
+
|
|
392
|
+
#
|
|
393
|
+
# expand reocrds via query action
|
|
394
|
+
#
|
|
395
|
+
def expand_query(expid, records, params, expand, vusg = None, sns = None, flds = None):
|
|
396
|
+
|
|
397
|
+
cols = params['C'][0]
|
|
398
|
+
exps = []
|
|
399
|
+
# gather the valid expands
|
|
400
|
+
for opt in expand:
|
|
401
|
+
fld = expand[opt]
|
|
402
|
+
if not (fld[0] == expid and cols.find(opt) > -1): continue
|
|
403
|
+
exps.append(opt)
|
|
404
|
+
|
|
405
|
+
if not exps: return None
|
|
406
|
+
if expid == "TIME": return expand_time(exps, records, params, expand)
|
|
407
|
+
|
|
408
|
+
# check and join tables
|
|
409
|
+
tables = joins = ''
|
|
410
|
+
for opt in exps:
|
|
411
|
+
fld = expand[opt]
|
|
412
|
+
(tables, joins) = join_query_tables(fld[3], tables, joins)
|
|
413
|
+
|
|
414
|
+
cond = ""
|
|
415
|
+
opts = expand[exps[0]][1]
|
|
416
|
+
for opt in opts:
|
|
417
|
+
if opt not in params: continue
|
|
418
|
+
sn = sns[opt]
|
|
419
|
+
fld = expand[sn] if sn in expand else flds[sn]
|
|
420
|
+
cond = get_view_condition(opt, sn, fld, params, vusg, cond)
|
|
421
|
+
(tables, joins) = join_query_tables(fld[3], tables, joins)
|
|
422
|
+
|
|
423
|
+
if joins and cond:
|
|
424
|
+
cond = "{} AND {}".format(joins, cond)
|
|
425
|
+
elif joins:
|
|
426
|
+
cond = joins
|
|
427
|
+
|
|
428
|
+
return query_action(exps, records, expand, tables, cond)
|
|
429
|
+
|
|
430
|
+
#
|
|
431
|
+
# build year list for yearly tables for given temporal conditions
|
|
432
|
+
#
|
|
433
|
+
def build_year_list(params, vusg):
|
|
434
|
+
|
|
435
|
+
yrs = []
|
|
436
|
+
|
|
437
|
+
tcnd = vusg['TCND'] if 'TCND' in vusg else []
|
|
438
|
+
rcnd = vusg['RCND'] if 'RCND' in vusg else []
|
|
439
|
+
for opt in tcnd:
|
|
440
|
+
if opt in params and params[opt]:
|
|
441
|
+
svals = params[opt]
|
|
442
|
+
lens = len(svals)
|
|
443
|
+
vals = [0]*lens
|
|
444
|
+
for i in range(lens):
|
|
445
|
+
ms = re.match(r"^'*(\d\d\d\d)", svals[i])
|
|
446
|
+
if ms: vals[i] = int(ms.group(1))
|
|
447
|
+
if opt in rcnd:
|
|
448
|
+
if lens == 1: vals.append(0)
|
|
449
|
+
if not vals[0]: vals[0] = 2004
|
|
450
|
+
if not vals[1]: vals[1] = int(PgUtil.curdate('YYYY'))
|
|
451
|
+
for yr in range(vals[0], vals[1]+1):
|
|
452
|
+
if yr not in yrs: yrs.append(yr)
|
|
453
|
+
else:
|
|
454
|
+
for yr in vals:
|
|
455
|
+
if yr and yr not in yrs: yrs.append(yr)
|
|
456
|
+
|
|
457
|
+
return yrs
|
|
458
|
+
|
|
459
|
+
#
|
|
460
|
+
#evaluate daterange, remove/add quotes as needed; add time ranges on if dt is True
|
|
461
|
+
#
|
|
462
|
+
def evaluate_daterange(dates, dr, dt):
|
|
463
|
+
|
|
464
|
+
if dates[0]:
|
|
465
|
+
ms = re.match(r"^'(\w.+\w)'$", dates[0])
|
|
466
|
+
if ms: dates[0] = ms.group(1)
|
|
467
|
+
if dates[1]:
|
|
468
|
+
ms = re.match(r"^'(\w.+\w)'$", dates[1])
|
|
469
|
+
if ms: dates[1] = ms.group(1)
|
|
470
|
+
if dr: dates = PgUtil.daterange(dates[0], dates[1])
|
|
471
|
+
if dt: dates = PgUtil.dtrange(dates)
|
|
472
|
+
if dates[0]: dates[0] = "'{}'".format(dates[0])
|
|
473
|
+
if dates[1]: dates[1] = "'{}'".format(dates[1])
|
|
474
|
+
|
|
475
|
+
return dates
|
|
476
|
+
|
|
477
|
+
#
|
|
478
|
+
# get view condition
|
|
479
|
+
#
|
|
480
|
+
def get_view_condition(opt, sn, fld, params, vusg, cond = ''):
|
|
481
|
+
|
|
482
|
+
cols = params['C'][0]
|
|
483
|
+
if 'HCND' in vusg and vusg['HCND'].find(opt) > -1 and cols.find(sn) < 0:
|
|
484
|
+
PgLOG.pglog("{}-{} Must be in FieldList: {} for Option -{}".format(sn, fld[0], cols, opt), PgLOG.LGWNEX)
|
|
485
|
+
|
|
486
|
+
dt = True if 'TOPT' in vusg and opt in vusg['TOPT'] else False
|
|
487
|
+
inputs = params[opt]
|
|
488
|
+
if inputs[0] == '!':
|
|
489
|
+
negative = 1
|
|
490
|
+
inputs.pop(0)
|
|
491
|
+
else:
|
|
492
|
+
negative = 0
|
|
493
|
+
vcond = ''
|
|
494
|
+
if 'RCND' in vusg and vusg['RCND'].find(opt) > -1: #build condition string for range options
|
|
495
|
+
if len(inputs) == 1: inputs.append('')
|
|
496
|
+
if opt == 'D': inputs = evaluate_daterange(inputs, True, dt)
|
|
497
|
+
if inputs[0] and inputs[1]:
|
|
498
|
+
if negative: vcond += 'NOT '
|
|
499
|
+
vcond += "BETWEEN {} AND {}".format(inputs[0], inputs[1])
|
|
500
|
+
elif inputs[0]:
|
|
501
|
+
vcond = "{} {}".format('<' if negative else '>=', inputs[0])
|
|
502
|
+
elif inputs[1]:
|
|
503
|
+
vcond = "{} {}".format('>' if negative else '<=', inputs[1])
|
|
504
|
+
elif 'ACND' in vusg and vusg['ACND'].find(opt) > -1: #condition string for array options
|
|
505
|
+
for input in inputs:
|
|
506
|
+
if vcond: vcond += " {} {} ".format((" AND" if negative else " OR"), fld[2])
|
|
507
|
+
if 'ECND' in vusg and vusg['ECND'].find(opt) > -1:
|
|
508
|
+
if opt in 'mMyY': # year/month entered
|
|
509
|
+
if negative: vcond += 'NOT '
|
|
510
|
+
dates = evaluate_daterange([input, input], True, dt)
|
|
511
|
+
vcond += "BETWEEN {} AND {}".format(dates[0], dates[1])
|
|
512
|
+
elif opt in 'dD' and dt: # date entered
|
|
513
|
+
if negative: vcond += 'NOT '
|
|
514
|
+
dates = evaluate_daterange([input, input], False, dt)
|
|
515
|
+
vcond += "BETWEEN {} AND {}".format(dates[0], dates[1])
|
|
516
|
+
else:
|
|
517
|
+
PgLOG.pglog("-{}: NOT evaluable condition option".format(opt), PgLOG.LGEREX)
|
|
518
|
+
elif 'SFLD' in vusg and vusg['SFLD'].find(sn) > -1 and re.search(r'[%_]', input):
|
|
519
|
+
if negative: vcond += 'NOT '
|
|
520
|
+
vcond += "LIKE " + input
|
|
521
|
+
else:
|
|
522
|
+
vcond += "{} {}".format('<>' if negative else '=', input)
|
|
523
|
+
if vcond:
|
|
524
|
+
if cond: cond += " AND "
|
|
525
|
+
cond += "({} {})".format(fld[2], vcond)
|
|
526
|
+
|
|
527
|
+
return cond
|
|
528
|
+
|
|
529
|
+
#
|
|
530
|
+
# reorder expanded result
|
|
531
|
+
#
|
|
532
|
+
def order_records(recs, oflds, cnt = 0):
|
|
533
|
+
|
|
534
|
+
if not cnt: cnt = PgUtil.hashcount(recs, 1)
|
|
535
|
+
if cnt < 2 or not oflds: return recs
|
|
536
|
+
|
|
537
|
+
oary = []
|
|
538
|
+
dary = []
|
|
539
|
+
for oname in oflds:
|
|
540
|
+
uname = oname.upper()
|
|
541
|
+
if oname == uname:
|
|
542
|
+
desc = 1
|
|
543
|
+
else:
|
|
544
|
+
desc = -1
|
|
545
|
+
if uname in recs:
|
|
546
|
+
oary.append(uname)
|
|
547
|
+
dary.append(desc)
|
|
548
|
+
if not oary: return recs
|
|
549
|
+
|
|
550
|
+
srecs = [None]*cnt
|
|
551
|
+
ocnt = len(oary)
|
|
552
|
+
for i in range(cnt):
|
|
553
|
+
srecs[i] = [None]*(ocnt+1)
|
|
554
|
+
for j in range(ocnt):
|
|
555
|
+
srecs[i][j] = recs[oary[j]][i]
|
|
556
|
+
srecs[i][ocnt] = i
|
|
557
|
+
|
|
558
|
+
srecs = PgUtil.quicksort(srecs, 0, cnt-1, dary, ocnt)
|
|
559
|
+
|
|
560
|
+
# reset order of records according reordered srecs{}
|
|
561
|
+
rets = {}
|
|
562
|
+
for oname in recs:
|
|
563
|
+
rets[oname] = [None]*cnt
|
|
564
|
+
for i in range(cnt):
|
|
565
|
+
j = srecs[i][ocnt]
|
|
566
|
+
rets[oname][i] = recs[oname][j]
|
|
567
|
+
|
|
568
|
+
return rets
|
|
569
|
+
|
|
570
|
+
#
|
|
571
|
+
# for given country info to get long country name
|
|
572
|
+
#
|
|
573
|
+
def get_country_name(cid):
|
|
574
|
+
|
|
575
|
+
if not cid or len(cid) != 2: return cid
|
|
576
|
+
pgrec = PgDBI.pgget("countries", "token", "domain_id = '{}'".format(cid), PgLOG.LGEREX)
|
|
577
|
+
|
|
578
|
+
return (pgrec['token'] if pgrec else cid)
|
|
579
|
+
|
|
580
|
+
#
|
|
581
|
+
# get group index array from given group IDs and dataset IDs
|
|
582
|
+
#
|
|
583
|
+
def get_group_indices(grpids, dsids, indices):
|
|
584
|
+
|
|
585
|
+
cnd = PgDBI.get_field_condition("grpid", grpids, 1, 1)
|
|
586
|
+
if dsids: cnd += PgDBI.get_field_condition("dsid", dsids, 1)
|
|
587
|
+
if indices: cnd += PgDBI.get_field_condition("gindex", indices, 1)
|
|
588
|
+
pgrecs = PgDBI.pgmget("dsgroup", "DISTINCT gindex", cnd, PgLOG.LGEREX)
|
|
589
|
+
|
|
590
|
+
return (pgrecs['gindex'] if pgrecs else None)
|
|
591
|
+
|
|
592
|
+
#
|
|
593
|
+
# expand groups to include IDs or titles or both
|
|
594
|
+
#
|
|
595
|
+
def expand_groups(indices, dsids, igid, ititle):
|
|
596
|
+
|
|
597
|
+
if not indices: return None
|
|
598
|
+
count = len(indices)
|
|
599
|
+
|
|
600
|
+
sindices = []
|
|
601
|
+
for i in range(count):
|
|
602
|
+
sindices.append("{}".format(indices[i]))
|
|
603
|
+
if indices[i]:
|
|
604
|
+
pgrec = PgDBI.pgget("dsgroup", "grpid, title", "dsid = '{}' AND gindex = {}".format(dsids[i], indices[i]), PgLOG.LGEREX)
|
|
605
|
+
if not pgrec: continue
|
|
606
|
+
if igid and pgrec['grpid']: sindices[i] += "-" . pgrec['grpid']
|
|
607
|
+
if ititle and pgrec['title']: sindices[i] += "-" . pgrec['title']
|
|
608
|
+
else:
|
|
609
|
+
if igid: sindices[i] += "-DATASET"
|
|
610
|
+
if ititle: sindices[i] += "-The WHOLE DATASET"
|
|
611
|
+
|
|
612
|
+
return sindices
|
|
613
|
+
|
|
614
|
+
#
|
|
615
|
+
# create condition for emails of users being notified for data updates
|
|
616
|
+
#
|
|
617
|
+
def notice_condition(dates, emids, dsid):
|
|
618
|
+
|
|
619
|
+
cond = "dsid = '{}' AND ".format(dsid)
|
|
620
|
+
count = len(emids) if emids else 0
|
|
621
|
+
if count > 0:
|
|
622
|
+
if count == 1:
|
|
623
|
+
cond += "emid = " + emids[0]
|
|
624
|
+
else:
|
|
625
|
+
cond += "emid IN ("
|
|
626
|
+
for i in range(count):
|
|
627
|
+
if i > 0: cond += ", "
|
|
628
|
+
cond += emids[i]
|
|
629
|
+
cond += ")"
|
|
630
|
+
else:
|
|
631
|
+
count = len(dates) if dates else 0
|
|
632
|
+
if count == 1:
|
|
633
|
+
cond += " AND date >= '{}'".format(dates[0])
|
|
634
|
+
else:
|
|
635
|
+
cond += " AND date BETWEEN '{}' AND '{}'".format(dates[0], dates[1])
|
|
636
|
+
pgrecs = PgDBI.pgmget("emnotice", "emid", cond, PgDBI.PGDBI['LOGACT']|PgLOG.EXITLG)
|
|
637
|
+
count = len(pgrecs['emid']) if pgrecs else 0
|
|
638
|
+
if count > 0:
|
|
639
|
+
emids = pgrecs['emid']
|
|
640
|
+
else:
|
|
641
|
+
PgLOG.pglog("Not Email Notice sent for " + cond, PgDBI.PGDBI['LOGACT']|PgLOG.EXITLG)
|
|
642
|
+
|
|
643
|
+
cond = " AND emreceive.emid "
|
|
644
|
+
if count == 1:
|
|
645
|
+
cond += "= " + emids[0]
|
|
646
|
+
else:
|
|
647
|
+
cond += "IN ("
|
|
648
|
+
for i in range(count):
|
|
649
|
+
if i > 0: cond += ", "
|
|
650
|
+
cond += emids[i]
|
|
651
|
+
cond += ")"
|
|
652
|
+
|
|
653
|
+
return cond
|
|
654
|
+
|
|
655
|
+
#
|
|
656
|
+
# get email list including historical ones
|
|
657
|
+
#
|
|
658
|
+
def include_historic_emails(emails, opt):
|
|
659
|
+
|
|
660
|
+
elist = {}
|
|
661
|
+
if not opt: opt = 3
|
|
662
|
+
|
|
663
|
+
for email in emails:
|
|
664
|
+
elist[email] = 1
|
|
665
|
+
if opt&1:
|
|
666
|
+
pgrec = PgDBI.pgget("user", "userno", "email = '{}'".format(email), PgLOG.LGEREX)
|
|
667
|
+
if pgrec and pgrec['userno']:
|
|
668
|
+
pgrecs = PgDBI.pgmget("user", "email", "userno = {} AND email <> '{}'".format(pgrec['userno'], email), PgLOG.LGEREX)
|
|
669
|
+
if pgrecs:
|
|
670
|
+
for em in pgrecs['email']:
|
|
671
|
+
elist[em] = 1
|
|
672
|
+
if opt&2:
|
|
673
|
+
pgrec = PgDBI.pgget("ruser", "id", "email = '{}'".format(email), PgLOG.LGEREX)
|
|
674
|
+
if pgrec and pgrec['id']:
|
|
675
|
+
pgrecs = PgDBI.pgmget("ruser", "email", "id = {} AND email <> '{}'".format(pgrec['id'], email), PgLOG.LGEREX)
|
|
676
|
+
if pgrecs:
|
|
677
|
+
for em in pgrecs['email']:
|
|
678
|
+
elist[em] = 1
|
|
679
|
+
|
|
680
|
+
emails = list(elist)
|
|
681
|
+
|
|
682
|
+
return emails
|
|
683
|
+
|
|
684
|
+
#
|
|
685
|
+
# combine two query dicts
|
|
686
|
+
#
|
|
687
|
+
def combine_hash(adict, bdict, gflds, sflds):
|
|
688
|
+
|
|
689
|
+
if not bdict: return adict
|
|
690
|
+
if not adict: return bdict
|
|
691
|
+
for fld in adict: adict[fld].extend(bdict[fld])
|
|
692
|
+
if not gflds: return adict
|
|
693
|
+
adict = order_records(adict, gflds)
|
|
694
|
+
acnt = len(adict[gflds[0]])
|
|
695
|
+
b = 0
|
|
696
|
+
a = b+1
|
|
697
|
+
while a < acnt:
|
|
698
|
+
gsame = 1
|
|
699
|
+
for fld in gflds:
|
|
700
|
+
if adict[fld][a] != adict[fld][b]:
|
|
701
|
+
gsame = 0
|
|
702
|
+
break
|
|
703
|
+
if gsame: # same group records
|
|
704
|
+
for fld in sflds:
|
|
705
|
+
adict[fld][b] += adict[fld][a]
|
|
706
|
+
del adict[fld][a]
|
|
707
|
+
for fld in gflds:
|
|
708
|
+
del adict[fld][a]
|
|
709
|
+
acnt -= 1
|
|
710
|
+
b = a
|
|
711
|
+
a = b+1
|
|
712
|
+
|
|
713
|
+
return adict
|
|
714
|
+
|
|
715
|
+
#
|
|
716
|
+
# compact a dict by group fields to get distinct count and total sum
|
|
717
|
+
#
|
|
718
|
+
def compact_hash_groups(adict, gflds, sflds, dflds, totals):
|
|
719
|
+
|
|
720
|
+
bdict = {}
|
|
721
|
+
ddict = {}
|
|
722
|
+
tdict = {}
|
|
723
|
+
acnt = PgUtil.hashcount(adict, 1)
|
|
724
|
+
if gflds: adict = order_records(adict, gflds, acnt)
|
|
725
|
+
for fld in dflds:
|
|
726
|
+
bdict[fld] = [0]
|
|
727
|
+
ddict[fld] = {adict[fld][0] : None}
|
|
728
|
+
for fld in sflds:
|
|
729
|
+
bdict[fld] = [adict[fld][0]]
|
|
730
|
+
for fld in gflds:
|
|
731
|
+
bdict[fld] = [adict[fld][0]]
|
|
732
|
+
|
|
733
|
+
if totals != None:
|
|
734
|
+
for fld in dflds:
|
|
735
|
+
totals[fld] = 0
|
|
736
|
+
tdict[fld] = {}
|
|
737
|
+
for fld in sflds:
|
|
738
|
+
totals[fld] = 0
|
|
739
|
+
for fld in gflds:
|
|
740
|
+
totals[fld] = None
|
|
741
|
+
|
|
742
|
+
p = b = 0
|
|
743
|
+
a = 1
|
|
744
|
+
while a < acnt:
|
|
745
|
+
gsame = True
|
|
746
|
+
for fld in gflds:
|
|
747
|
+
if adict[fld][a] != adict[fld][p]:
|
|
748
|
+
gsame = False
|
|
749
|
+
break
|
|
750
|
+
if gsame: # same group records
|
|
751
|
+
for fld in sflds:
|
|
752
|
+
if adict[fld][a]: bdict[fld][b] += adict[fld][a]
|
|
753
|
+
for fld in dflds:
|
|
754
|
+
ddict[fld][adict[fld][a]] = None
|
|
755
|
+
else:
|
|
756
|
+
for fld in dflds:
|
|
757
|
+
if totals:
|
|
758
|
+
for dkey in ddict[fld]:
|
|
759
|
+
tdict[fld][dkey] = None
|
|
760
|
+
bdict[fld][b] = len(ddict[fld])
|
|
761
|
+
bdict[fld].append(0)
|
|
762
|
+
ddict[fld] = {adict[fld][a] : None}
|
|
763
|
+
for fld in sflds:
|
|
764
|
+
if totals: totals[fld] += bdict[fld][b]
|
|
765
|
+
bdict[fld].append(adict[fld][a])
|
|
766
|
+
for fld in gflds:
|
|
767
|
+
bdict[fld].append(adict[fld][a])
|
|
768
|
+
b += 1
|
|
769
|
+
p = a
|
|
770
|
+
a += 1
|
|
771
|
+
|
|
772
|
+
if totals:
|
|
773
|
+
for fld in dflds:
|
|
774
|
+
for dkey in ddict[fld]:
|
|
775
|
+
tdict[fld][dkey] = None
|
|
776
|
+
totals[fld] = len(tdict[fld])
|
|
777
|
+
for fld in sflds:
|
|
778
|
+
totals[fld] += bdict[fld][b]
|
|
779
|
+
for fld in dflds:
|
|
780
|
+
bdict[fld][b] = len(ddict[fld])
|
|
781
|
+
|
|
782
|
+
return bdict
|