rda-python-metrics 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rda-python-metrics might be problematic. Click here for more details.

Files changed (47) hide show
  1. rda_python_metrics/PgIPInfo.py +188 -0
  2. rda_python_metrics/PgView.py +782 -0
  3. rda_python_metrics/__init__.py +1 -0
  4. rda_python_metrics/fillawsusage.py +282 -0
  5. rda_python_metrics/fillawsusage.usg +17 -0
  6. rda_python_metrics/fillcodusage.py +247 -0
  7. rda_python_metrics/fillcodusage.usg +21 -0
  8. rda_python_metrics/fillcountry.py +79 -0
  9. rda_python_metrics/fillendtime.py +93 -0
  10. rda_python_metrics/fillglobususage.py +287 -0
  11. rda_python_metrics/fillglobususage.usg +17 -0
  12. rda_python_metrics/fillipinfo.py +185 -0
  13. rda_python_metrics/fillipinfo.usg +18 -0
  14. rda_python_metrics/filloneorder.py +155 -0
  15. rda_python_metrics/filloneorder.usg +41 -0
  16. rda_python_metrics/fillrdadb.py +151 -0
  17. rda_python_metrics/fillrdadb.usg +32 -0
  18. rda_python_metrics/filltdsusage.py +289 -0
  19. rda_python_metrics/filltdsusage.usg +17 -0
  20. rda_python_metrics/filluser.py +216 -0
  21. rda_python_metrics/filluser.usg +16 -0
  22. rda_python_metrics/logarch.py +359 -0
  23. rda_python_metrics/logarch.usg +27 -0
  24. rda_python_metrics/pgperson.py +72 -0
  25. rda_python_metrics/pgusername.py +50 -0
  26. rda_python_metrics/viewallusage.py +350 -0
  27. rda_python_metrics/viewallusage.usg +198 -0
  28. rda_python_metrics/viewcheckusage.py +289 -0
  29. rda_python_metrics/viewcheckusage.usg +185 -0
  30. rda_python_metrics/viewcodusage.py +314 -0
  31. rda_python_metrics/viewcodusage.usg +184 -0
  32. rda_python_metrics/viewordusage.py +340 -0
  33. rda_python_metrics/viewordusage.usg +224 -0
  34. rda_python_metrics/viewrqstusage.py +362 -0
  35. rda_python_metrics/viewrqstusage.usg +217 -0
  36. rda_python_metrics/viewtdsusage.py +323 -0
  37. rda_python_metrics/viewtdsusage.usg +191 -0
  38. rda_python_metrics/viewwebfile.py +294 -0
  39. rda_python_metrics/viewwebfile.usg +212 -0
  40. rda_python_metrics/viewwebusage.py +371 -0
  41. rda_python_metrics/viewwebusage.usg +211 -0
  42. rda_python_metrics-1.0.4.dist-info/METADATA +18 -0
  43. rda_python_metrics-1.0.4.dist-info/RECORD +47 -0
  44. rda_python_metrics-1.0.4.dist-info/WHEEL +5 -0
  45. rda_python_metrics-1.0.4.dist-info/entry_points.txt +22 -0
  46. rda_python_metrics-1.0.4.dist-info/licenses/LICENSE +21 -0
  47. rda_python_metrics-1.0.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,323 @@
1
+ #!/usr/bin/env python3
2
+ #
3
+ ###############################################################################
4
+ #
5
+ # Title : viewtdsusage
6
+ # Author : Zaihua Ji, zji@ucar.edu
7
+ # Date : 03/15/2022
8
+ # 2025-03-28 transferred to package rda_python_metrics from
9
+ # https://github.com/NCAR/rda-database.git
10
+ # Purpose : python program to view tds usage information
11
+ #
12
+ # Github : https://github.com/NCAR/rda-python-metrics.git
13
+ #
14
+ ###############################################################################
15
+ #
16
+ import os
17
+ import re
18
+ import sys
19
+ from rda_python_common import PgLOG
20
+ from rda_python_common import PgUtil
21
+ from rda_python_common import PgDBI
22
+ from . import PgView
23
+
24
+ VUSG = {
25
+ 'SNMS' : "ABCDEFHIMNOPQRSTUWY", # all available short field names in %FLDS
26
+ 'OPTS' : 'AabcCdDeEfhHiLmMnoOqsStTUwyz', # all available options, used for %params
27
+ 'NOPT' : 'abhnwz', # stand alone option without inputs
28
+ 'ACND' : 'cdefiImMoqSty', # available array condition options
29
+ 'RCND' : 'DEsT', # available range condition options
30
+ 'CNDS' : 'acdDeEfiImMnoqsStTy', # condition options, ACND, RCND and 'a'
31
+ 'ECND' : 'my', # condition options need evaluating
32
+ 'SFLD' : 'DEFINOTUW', # string fields, to be quoted in condition
33
+ 'UFLD' : 'FNO', # string fields must be in upper case
34
+ 'LFLD' : 'EMPT' # string fields must be in lower case
35
+ }
36
+
37
+ # keys %FLDS - short field names
38
+ # column 0 - column title showing in usage view
39
+ # column 1 - field name in format as shown in select clauses
40
+ # column 2 - field name shown in where condition query string
41
+ # column 3 - table name that the field belongs to
42
+ # column 4 - output field length, the longer one of data size and comlun title, determine
43
+ # dynamically if it is 0. Negative values indicate right justification
44
+ # column 5 - precision for floating point value if positive and show total value if not zero
45
+ # column 6 - field flag to indicate it is a group, distinct or sum field
46
+ FLDS = {
47
+ # SHRTNM COLUMNNANE FIELDNAME CNDNAME TBLNAM Size Prc Grp/Sum
48
+ 'D' : ['DATE', "date", 'date', 'tdsusage', 10, 0, 'G'],
49
+ 'E' : ['EMAIL', "tdsusage.email", 'tdsusage.email', 'tdsusage', 0, 0, 'G'],
50
+ 'F' : ['EF', "etype", 'etype', 'tdsusage', 2, 0, 'G'],
51
+ 'I' : ['IP', "ip", 'ip', 'tdsusage', 0, 0, 'G'],
52
+ 'M' : ['MONTH', PgDBI.fmtym("date"), 'date', 'tdsusage', 7, 0, 'G'],
53
+ 'N' : ['COUNTRY', "country", 'country', 'tdsusage', 0, 0, 'G'],
54
+ 'O' : ['ORGTYPE', "org_type", 'org_type', 'tdsusage', 7, 0, 'G'],
55
+ 'P' : ['DSOWNER', "specialist", 'specialist', 'dsowner', 8, 0, 'G'],
56
+ 'Q' : ['QUARTER', "quarter", 'quarter', 'tdsusage', 7, 0, 'G'],
57
+ 'R' : ['DSTITLE', "search.datasets.title", 'search.datasets.title', 'search.datasets', 0, 0, 'G'],
58
+ 'S' : ['BYTESIZE', "size", 'size', 'tdsusage', -14, -1, 'G'],
59
+ 'T' : ['DATASET', "tdsusage.dsid", 'tdsusage.dsid', 'tdsusage', 0, 0, 'G'],
60
+ 'W' : ['METHOD', "method", 'method', 'tdsusage', 0, 0, 'G'],
61
+ 'Y' : ['YEAR', PgDBI.fmtyr("date"), 'date', 'tdsusage', 4, 0, 'G'],
62
+ 'A' : ['DSCOUNT', "tdsusage.dsid", 'A', 'tdsusage', -7, -1, 'D'],
63
+ 'B' : ['MBYTEREAD', "round(sum(size)/(1000000), 4)", 'B', 'tdsusage', -14, 3, 'S'],
64
+ 'C' : ['#UNIQUSER', "tdsusage.email", 'C', 'tdsusage', -9, -1, 'D'],
65
+ 'U' : ['#UNIQIP', "tdsusage.ip", 'U', 'tdsusage', -7, -1, 'D'],
66
+ 'H' : ['#ACCESS', "sum(fcount)", 'H', 'tdsusage', -8, -1, 'S'],
67
+ 'X' : ['INDEX', "", 'X', '', -6, 0, ' ']
68
+ }
69
+
70
+ # keys %EXPAND - short field names allow zero usage
71
+ # column 0 - expand ID for group of fields
72
+ # column 1 - field name shown in where condition query string
73
+ # column 2 - field name in format as shown in select clauses
74
+ # column 3 - table name that the field belongs to
75
+ EXPAND = {
76
+ # SHRTNM EXPID CNDSTR FIELDNAME TBLNAM
77
+ 'D' : ["TIME", "dDmy"],
78
+ 'M' : ["TIME", "dDmy"],
79
+ 'Q' : ["TIME", "dDmy"],
80
+ 'Y' : ["TIME", "dDmy"],
81
+
82
+ 'E' : ["USER", "eco", "email", "wuser", "user"],
83
+ 'O' : ["USER", "eco", "org_type", "wuser", "user"],
84
+ 'N' : ["USER", "eco", "country", "wuser", "user"],
85
+
86
+ 'R' : ["DSID", "fFsStT", "search.datasets.title", "search.datasets"],
87
+ 'T' : ["DSID", "fFsStT", "dataset.dsid", "dataset"],
88
+ 'P' : ["DSID", "fFsStT", "specialist", "dsowner"],
89
+
90
+ 'F' : ["METHOD", "fM", "etype", "tdsusage"],
91
+ 'W' : ["METHOD", "fM", "method", "tdsusage"]
92
+ }
93
+
94
+ # valid options for %params, a hash array of command line parameters
95
+ # a -- 1 to view all usage info available
96
+ # A -- number or records to return
97
+ # c -- array of specified country codes
98
+ # C -- a string of short field names for viewing usages
99
+ # d -- array of specified dates
100
+ # D -- dates range, array of 1 or 2 dates in format of YYYY-MM-DD
101
+ # e -- array of specified email addresses
102
+ # E -- use given date or date range for email notice of data update
103
+ # f -- array of specified flags for end point types
104
+ # h -- for give emails, include their histical emails registered before
105
+ # H -- a string of report title to replace the default one
106
+ # i -- array of specified IP addresses
107
+ # I -- use given email IDs for email notice of data update
108
+ # L -- column delimiter for output
109
+ # m -- array of specified months
110
+ # M -- array of specified download methods
111
+ # o -- array of specified orginization types
112
+ # O -- a string of short field names for sorting on
113
+ # q -- array of the specified quarters, normally combined with years
114
+ # s -- file size range, arrage of 1 or 2 sizes in unit of MByte
115
+ # S -- array of login names of specialists who owns the datasets
116
+ # t -- array of specified dataset names
117
+ # T -- dataset range, array of 1 or 2 dataset names
118
+ # U -- given unit for file or data sizes
119
+ # w -- generate view without totals
120
+ # y -- array of specified years
121
+ # z -- generate view including entries without usage
122
+
123
+ params = {}
124
+
125
+ # relationship between parameter options and short field names, A option is not
126
+ # related to a field name if it is not in keys %SNS
127
+ SNS = {
128
+ 'c' : 'N', 'd' : 'D', 'D' : 'D', 'e' : 'E', 'f' : 'F', 'i' : 'I', 'm' : 'M',
129
+ 'M' : 'W', 'o' : 'O', 'q' : 'Q', 's' : 'S', 'S' : 'P', 't' : 'T', 'T' : 'T', 'y' : 'Y'
130
+ }
131
+
132
+ tablenames = fieldnames = condition = ''
133
+ sfields = []
134
+ gfields = []
135
+ dfields = []
136
+ pgname = 'viewtdsusage'
137
+
138
+ #
139
+ # main function to run this program
140
+ #
141
+ def main():
142
+
143
+ PgDBI.view_dbinfo()
144
+ argv = sys.argv[1:]
145
+ inputs = []
146
+ option = 'C' # default option
147
+
148
+ for arg in argv:
149
+ if re.match(r'^-.*$', arg):
150
+ curopt = arg[1:2]
151
+ if curopt and VUSG['OPTS'].find(curopt) > -1:
152
+ if VUSG['NOPT'].find(option) > -1:
153
+ params[option] = 1
154
+ elif inputs:
155
+ params[option]= inputs # record input array
156
+ inputs = [] # empty input array
157
+ option = curopt # start a new option
158
+ else:
159
+ PgLOG.pglog(arg + ": Unknown Option", PgLOG.LGWNEX)
160
+ else:
161
+ val = arg
162
+ if val != '!':
163
+ if option == 's':
164
+ val = int(val)*1000000 # convert MBytes to Bytes
165
+ elif option in SNS:
166
+ sfld = SNS[option]
167
+ if VUSG['SFLD'].find(sfld) > -1:
168
+ if VUSG['UFLD'].find(sfld) > -1:
169
+ val = arg.upper() # in case not in upper case
170
+ elif VUSG['LFLD'].find(sfld) > -1:
171
+ val = arg.lower() # in case not in lower case
172
+ if option == 'c':
173
+ val = PgView.get_country_name(val)
174
+ elif option == 't' or option == 'T':
175
+ val = PgUtil.format_dataset_id(val) # add 'ds' if only numbers
176
+ val = "'{}'".format(val)
177
+ inputs.append(val)
178
+
179
+ # record the last option
180
+ if VUSG['NOPT'].find(option) > -1:
181
+ params[option] = 1
182
+ elif inputs:
183
+ params[option] = inputs # record input array
184
+
185
+ if not params:
186
+ PgLOG.show_usage(pgname)
187
+ else:
188
+ check_enough_options()
189
+
190
+ if 'o' not in params:
191
+ if 'e' not in params:
192
+ params['o'] = ['!', "'DSS'"] # default to exclude 'DSS' for organization
193
+ elif params['o'][0] == "'ALL'":
194
+ del params['o']
195
+
196
+ usgtable = "tdsusage"
197
+ build_query_strings(usgtable) # build tablenames, fieldnames, and conditions
198
+ records = PgDBI.pgmget(tablenames, fieldnames, condition, PgLOG.UCLWEX)
199
+ if not records: PgLOG.pglog("No Usage Found For Given Conditions", PgLOG.LGWNEX)
200
+ totals = None if 'w' in params else {}
201
+ if dfields or totals != None:
202
+ records = PgView.compact_hash_groups(records, gfields, sfields, dfields, totals)
203
+ if 'z' in params: records = expand_records(records)
204
+ ostr = params['O'][0] if 'O' in params else params['C'][0]
205
+ records = PgView.order_records(records, ostr.replace('X', ''))
206
+ PgView.simple_output(params, FLDS, records, totals)
207
+
208
+ PgLOG.pgexit(0)
209
+
210
+ #
211
+ # cehck if enough information entered on command line for generate view/report, exit if not
212
+ #
213
+ def check_enough_options():
214
+
215
+ cols = params['C'][0] if 'C' in params else 'X'
216
+ if cols == 'X': PgLOG.pglog("{}: miss field names '{}'".format(pgname, VUSG['SNMS']), PgLOG.LGWNEX)
217
+
218
+ if cols.find('Q') > -1 and cols.find('Y') < 0: # add Y if Q included
219
+ cols = re.sub('Q', 'YQ', cols)
220
+ params['C'][0] = cols
221
+
222
+ for sn in cols:
223
+ if sn == 'X': continue # do not process INDEX field
224
+ if VUSG['SNMS'].find(sn) < 0:
225
+ PgLOG.pglog("{}: Field {} must be in field names '{}X'".format(pgname, sn, VUSG['SNMS']), PgLOG.LGWNEX)
226
+ if 'z' not in params or sn in EXPAND: continue
227
+ fld = FLDS[sn]
228
+ if fld[6] != 'G': continue
229
+ PgLOG.pglog("{}: cannot show zero usage for unexpandable field {} - {}".formt(pgname, sn, fld[0]), PgLOG.LGWNEX)
230
+
231
+ if 'E' in params or 'I' in params:
232
+ if 'z' in params:
233
+ PgLOG.pglog(pgname + ": option -z and -E/-I can not be present at the same time", PgLOG.LGWNEX)
234
+ elif 't' not in params or len(params['t']) > 1:
235
+ PgLOG.pglog(pgname + ": specify one dataset for viewing usage of notified users", PgLOG.LGWNEX)
236
+ elif 'E' in params and 'I' in params:
237
+ PgLOG.pglog(pgname + ": option -E and -I can not be present at the same time", PgLOG.LGWNEX)
238
+
239
+ for opt in params:
240
+ if VUSG['CNDS'].find(opt) > -1: return
241
+ PgLOG.pglog("{}: miss condition options '{}'".format(pgname, VUSG['CNDS']), PgLOG.LGWNEX)
242
+
243
+ #
244
+ # process parameter options to build tds query strings
245
+ # global variables are used directly and nothing passes in and returns back
246
+ #
247
+ def build_query_strings(usgtable):
248
+
249
+ # initialize query strings
250
+ global condition, fieldnames, tablenames
251
+ joins = groupnames = ''
252
+ tablenames = usgtable
253
+ cols = params['C'][0]
254
+
255
+ if 'U' in params: # reset units for file and read sizes
256
+ if cols.find('B') > -1: FLDS['B'] = PgView.set_data_unit(FLDS['B'], params['U'][0], "sum(size)")
257
+ if cols.find('S') > -1: FLDS['S'] = PgView.set_data_unit(FLDS['S'], params['U'][0], "size")
258
+
259
+ if 'e' in params and 'h' in params: params['e'] = PgView.include_historic_emails(params['e'], 3)
260
+
261
+ for opt in params:
262
+ if opt == 'C': # build field, table and group names
263
+ for sn in cols:
264
+ if sn == 'X': continue # do not process INDEX field
265
+ fld = FLDS[sn]
266
+ if fieldnames: fieldnames += ', '
267
+ fieldnames += "{} {}".format(fld[1], sn) # add to field name string
268
+ (tablenames, joins) = PgView.join_query_tables(fld[3], tablenames, joins, usgtable)
269
+ if fld[6] == 'S':
270
+ sfields.append(sn)
271
+ else:
272
+ if groupnames: groupnames += ', '
273
+ groupnames += sn # add to group name string
274
+ if fld[6] == 'D':
275
+ dfields.append(sn)
276
+ else:
277
+ gfields.append(sn)
278
+ elif opt == 'O':
279
+ continue # order records later
280
+ elif VUSG['CNDS'].find(opt) > -1:
281
+ if VUSG['NOPT'].find(opt) > -1: continue
282
+ sn = SNS[opt]
283
+ fld = FLDS[sn]
284
+ # build having and where conditon strings
285
+ cnd = PgView.get_view_condition(opt, sn, fld, params, VUSG)
286
+ if cnd:
287
+ if condition: condition += ' AND '
288
+ condition += cnd
289
+ (tablenames, joins) = PgView.join_query_tables(fld[3], tablenames, joins, usgtable)
290
+
291
+
292
+ # append joins, group by, order by, and having strings to condition string
293
+ if 'E' in params or 'I' in params:
294
+ (tablenames, joins) = PgView.join_query_tables("emreceive", tablenames, joins, usgtable)
295
+ if joins:
296
+ if condition:
297
+ condition = "{} AND {}".format(joins, condition)
298
+ else:
299
+ condition = joins
300
+ if 'E' in params or 'I' in params:
301
+ condition += PgView.notice_condition(params['E'], None, params['t'][0])
302
+ if groupnames and sfields: condition += " GROUP BY " + groupnames
303
+
304
+
305
+ def expand_records(records):
306
+
307
+ recs = PgView.expand_query("TIME", records, params, EXPAND)
308
+
309
+ trecs = PgView.expand_query("USER", records, params, EXPAND, VUSG, SNS, FLDS)
310
+ recs = PgUtil.crosshash(recs, trecs)
311
+
312
+ trecs = PgView.expand_query("DSID", records, params, EXPAND, VUSG, SNS, FLDS)
313
+ recs = PgUtil.crosshash(recs, trecs)
314
+
315
+ trecs = PgView.expand_query("METHOD", records, params, EXPAND, VUSG, SNS, FLDS)
316
+ recs = PgUtil.crosshash(recs, trecs)
317
+
318
+ return PgUtil.joinhash(records, recs, 0, 1)
319
+
320
+ #
321
+ # call main() to start program
322
+ #
323
+ if __name__ == "__main__": main()
@@ -0,0 +1,191 @@
1
+
2
+ View usage information of Thredds Data Services from information
3
+ stored in MySQL database 'RDADB'.
4
+
5
+ Usage: viewtdsusage [-C] ColumnNames [-O OrderColumnNames] [-a] \
6
+ [-A RowLimit] [-c CountryCodes] [-d DateList] \
7
+ [-D StartDate [EndDate]] [-e EMailList] -h \
8
+ [-E StartNoticeDate [EndNoticeDate]] \
9
+ [-f EndTypeFlags] [-i IPAddresses] \
10
+ [-I EmailIDList] [-m MonthList] [-M AccessMethods] \
11
+ [-N MinNumberRead [MaxNumberRead]] \
12
+ [-o OrganizationTypes] \
13
+ [-q QuaterList] [-s MinSize [MaxSize]] \
14
+ [-S SpecialistLoginNames] [-t DatasetList] \
15
+ [-T MinDataset [MaxDataset]] [-y YearList] \
16
+ [-H Title] [-L Delimiter] [-U SizeUnit] \
17
+ [-w] [-z] [> OutputFileName] [| lp -d PrinterName]
18
+
19
+ Specify [-C] ColumnNames, refer to Option -C section for detail
20
+ description, and choose at least one of the condition options, -a, -c,
21
+ -d, -D, -e, -E, -f, -i, -I, -m, -M, -N, -o, -q, -s, -S -t, -T, and -y, to
22
+ run this application.
23
+
24
+ For all condition options, except option -a, an '!' sign can be added
25
+ between an option flag and its option values to get an excluding
26
+ condition. For example, choose '-o ! OrganizationTypes' to gather order
27
+ data usage by users from organization types other than the ones given in
28
+ OrganizationTypes. Refer to the example given at the end of this help
29
+ document for how to select excluding condition.
30
+
31
+ String condition options, -c, -e, -f, -g, -i, -M, -o, -S, and -t, allow wildcard
32
+ inputs. '%' matches any number of characters and '_' matches any one
33
+ character. Refer to the example given at the end of this help document
34
+ for how to use wildcard for string condition options.
35
+
36
+ Output of this application is defaulted to page format with a page
37
+ header on each page. A page header includes main title, sub titles and
38
+ column titles according to which column names and options are selected,
39
+ as well as page number and report date. If the output is used directly
40
+ for input of other applications, add option -w to remove page header
41
+ and show only the column titles and the usage information.
42
+
43
+
44
+ Column Options:
45
+ - Option -C, the ColumnNames must be present to run this application.
46
+ The flag -C can be omitted if it is the first parameter option on
47
+ the command line. The ColumnNames is a string that includes column
48
+ names listed below:
49
+
50
+ COLUMN - COLUMN - COLUMN
51
+ NAME - TITLE - DESCRIPTION
52
+ GroupColumns:
53
+ D*- DATE - format as YYYY-MM-DD, for example 2004-04-25
54
+ E*- EMAIL - user email address
55
+ F*- EF - end type flag, Files or Aggrenations
56
+ I*- IP - user IP address
57
+ M*- MONTH - format as YYYY-MM, for example 2004-04
58
+ N*- COUNTRY - country codes users from
59
+ O*- ORGTYPE - organization types (DSS, NCAR, UNIV and OTHER)
60
+ P*- DSOWNER - login names of specialists who own the datasets
61
+ Q*- QUARTER - quarter of year, 1, 2, 3, or 4
62
+ R*- DSTITLE - dataset titles
63
+ S - BSIZE - size of data read each time, default to Bytes
64
+ T*- DATASET - format as dsnnn.n, for example d540001
65
+ W*- METHOD - access methods
66
+ Y*- YEAR - format as YYYY, for example 2004
67
+
68
+ * - field names can processed with zero usages
69
+ SummaryColumns:
70
+ A - DSCOUNT - number of datasets in given GroupColumns
71
+ B - MBREAD - data sizes, default MB, read by given GroupColumns
72
+ C - #UNIQUSER - number of unique users in in given GroupColumns
73
+ U - #UNIQIP - number of unique users in in given GroupColumns
74
+ H - #READ - number of reads by given GroupColumns
75
+
76
+ IndexColumn:
77
+ X - INDEX - index of line, it should be the first column
78
+
79
+ The column names are used to build up string of ColumnNames, while
80
+ their associated column titles are shown in view/report output of
81
+ this application. The display order of the column titles is
82
+ determined by the order of the column names in the ColumnNames
83
+ string. At least one of the group and summary columns must be
84
+ selected, in the ColumnNames string, to generate all usage
85
+ view/report;
86
+
87
+ For example, choose '-C EMB' to display column titles of EMAIL,
88
+ MONTH and MBREAD, in the first, second and third columns
89
+ respectively, for numbers of MBytes of data read by each user
90
+ in each month;
91
+
92
+ - Option -O, sort data usage information in ascending or descending
93
+ order based on the column names specified in OrderColumnNames
94
+ string. These column names must be in the selected [-C]
95
+ ColumnNames string. If an column name is in upper case, its
96
+ associated column is sorted in ascending order, and a lower
97
+ case means sorting in descending order;
98
+
99
+
100
+ Condition Options:
101
+ - Option -a, for all usage in table 'tdsusage';
102
+
103
+ - Option -A, gives a row limit for querrying;
104
+
105
+ - Option -c, for files read by users from given country codes;
106
+
107
+ - Option -d, for data read on given dates, in format YYYY-MM-DD;
108
+
109
+ - Option -D, for data read between two given dates, each date
110
+ is in format YYYY-MM-DD. Omit EndDate for no upper limit;
111
+
112
+ - Option -e, for data read by users with given email addresses;
113
+
114
+ - Option -E, for data read by users who have been notified
115
+ data update of a specified dataset between two given dates,
116
+ each date is in format YYYY-MM-DD. Omit EndNoticeDate for
117
+ no upper limit;
118
+
119
+ - Option -f, for data read from flags of end point types, Files or
120
+ Aggrenations;
121
+
122
+ - Option -h, works with Option -e to include historical user emails
123
+ registered before;
124
+
125
+ - Option -i, for data read from machines with given IP addresses;
126
+
127
+ - Option -m, for data read in given months, in format YYYY-MM;
128
+
129
+ - Option -M, for data read via access methods;
130
+
131
+ - Option -N, for files for numbers of read by each group between
132
+ MinNumberRead and MaxNumberRead. Omit MaxNumberRead for no
133
+ upper limit;
134
+
135
+ - Option -o, for data read by users from given orgnization types.
136
+ It defaults to -o ! DSS to exclude usage from DSS specialists;
137
+ Set it to ALL to include all orgnization types;
138
+
139
+ - Option -q, for data read in given quarters;
140
+
141
+ - Option -s, for data sizes, unit of MByte, between MinSize and MaxSize.
142
+ Omit MaxSize for no upper limit;
143
+
144
+ - Option -S, for login names of specialsts who own the datasets;
145
+
146
+ - Option -t, for data associating to given dataset names;
147
+
148
+ - Option -T, for data associating to datasets between
149
+ MinDataset and MaxDataset. Omit MaxDataset for no upper limit.
150
+ For example, -T d540000 d550009, for datasets numbers d540000-d550009;
151
+
152
+ - Option -y, for data read in given years in format YYYY;
153
+
154
+
155
+ Miscellaneous Options:
156
+ - Option -w, view data usage in simple format without totals;
157
+
158
+ - Option -z, include datasets without without usage
159
+
160
+ - Option -H, use given report title to replace the default one;
161
+
162
+ - Option -L, use given delimiter for output, instead of defaulted spaces;
163
+
164
+ - Option -U, show data sizes in given unit SizeUnit [BKMG].
165
+ B - Byte, K - KiloBytes, M - MegaByte, and G - GigaByte;
166
+
167
+ - Option > OutputFilename, redirect output into an output file,
168
+ for example, ordusage.out, instead of viewing on screen directly;
169
+
170
+ - Option | lp -d PrinterName, redirect output to printer of PrinterName.
171
+ Replace PrinterName with lj100 to print through DSS LaserJet printer.
172
+
173
+
174
+ For example:
175
+ To view annual data usage in year 2005 with columns, INDEX(X),
176
+ EMAIL(E), ORGTYPE(O), #READ(H), and MBREAD(B); ordered by ORGTYPE as
177
+ ascending and MBREAD(B) as descending; the command line should be:
178
+
179
+ viewtdsusage XEOHB -y 2005 -O Ob
180
+
181
+ For usage by users not in Organization 'DDS', out of the file usage
182
+ gathered above, the command line should be:
183
+
184
+ viewtdsusage XEOHB -y 2005 -o ! DSS -O Ob
185
+
186
+ To redirect the previous output to a file named tdsusage.out:
187
+
188
+ viewtdsusage XEOHB -y 2005 ! DSS -O Ob > tdsusage.out
189
+
190
+ Then you can view the file or print it as a report.
191
+