ltbams 0.9.9__py3-none-any.whl → 1.0.2a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ams/__init__.py +4 -11
- ams/_version.py +3 -3
- ams/cases/5bus/pjm5bus_demo.xlsx +0 -0
- ams/cases/5bus/pjm5bus_jumper.xlsx +0 -0
- ams/cases/5bus/pjm5bus_uced.json +1062 -0
- ams/cases/5bus/pjm5bus_uced.xlsx +0 -0
- ams/cases/5bus/pjm5bus_uced_esd1.xlsx +0 -0
- ams/cases/5bus/pjm5bus_uced_ev.xlsx +0 -0
- ams/cases/ieee123/ieee123.xlsx +0 -0
- ams/cases/ieee123/ieee123_regcv1.xlsx +0 -0
- ams/cases/ieee14/ieee14.json +1166 -0
- ams/cases/ieee14/ieee14.raw +92 -0
- ams/cases/ieee14/ieee14_conn.xlsx +0 -0
- ams/cases/ieee14/ieee14_uced.xlsx +0 -0
- ams/cases/ieee39/ieee39.xlsx +0 -0
- ams/cases/ieee39/ieee39_uced.xlsx +0 -0
- ams/cases/ieee39/ieee39_uced_esd1.xlsx +0 -0
- ams/cases/ieee39/ieee39_uced_pvd1.xlsx +0 -0
- ams/cases/ieee39/ieee39_uced_vis.xlsx +0 -0
- ams/cases/matpower/benchmark.json +1594 -0
- ams/cases/matpower/case118.m +787 -0
- ams/cases/matpower/case14.m +129 -0
- ams/cases/matpower/case300.m +1315 -0
- ams/cases/matpower/case39.m +205 -0
- ams/cases/matpower/case5.m +62 -0
- ams/cases/matpower/case_ACTIVSg2000.m +9460 -0
- ams/cases/npcc/npcc.m +644 -0
- ams/cases/npcc/npcc_uced.xlsx +0 -0
- ams/cases/pglib/pglib_opf_case39_epri__api.m +243 -0
- ams/cases/wecc/wecc.m +714 -0
- ams/cases/wecc/wecc_uced.xlsx +0 -0
- ams/cli.py +6 -0
- ams/core/__init__.py +2 -0
- ams/core/documenter.py +652 -0
- ams/core/matprocessor.py +782 -0
- ams/core/model.py +330 -0
- ams/core/param.py +322 -0
- ams/core/service.py +918 -0
- ams/core/symprocessor.py +224 -0
- ams/core/var.py +59 -0
- ams/extension/__init__.py +5 -0
- ams/extension/eva.py +401 -0
- ams/interface.py +1085 -0
- ams/io/__init__.py +133 -0
- ams/io/json.py +82 -0
- ams/io/matpower.py +406 -0
- ams/io/psse.py +6 -0
- ams/io/pypower.py +103 -0
- ams/io/xlsx.py +80 -0
- ams/main.py +81 -4
- ams/models/__init__.py +24 -0
- ams/models/area.py +40 -0
- ams/models/bus.py +52 -0
- ams/models/cost.py +169 -0
- ams/models/distributed/__init__.py +3 -0
- ams/models/distributed/esd1.py +71 -0
- ams/models/distributed/ev.py +60 -0
- ams/models/distributed/pvd1.py +67 -0
- ams/models/group.py +231 -0
- ams/models/info.py +26 -0
- ams/models/line.py +238 -0
- ams/models/renewable/__init__.py +5 -0
- ams/models/renewable/regc.py +119 -0
- ams/models/reserve.py +94 -0
- ams/models/shunt.py +14 -0
- ams/models/static/__init__.py +2 -0
- ams/models/static/gen.py +165 -0
- ams/models/static/pq.py +61 -0
- ams/models/timeslot.py +69 -0
- ams/models/zone.py +49 -0
- ams/opt/__init__.py +12 -0
- ams/opt/constraint.py +175 -0
- ams/opt/exprcalc.py +127 -0
- ams/opt/expression.py +188 -0
- ams/opt/objective.py +174 -0
- ams/opt/omodel.py +432 -0
- ams/opt/optzbase.py +192 -0
- ams/opt/param.py +156 -0
- ams/opt/var.py +233 -0
- ams/pypower/__init__.py +8 -0
- ams/pypower/_compat.py +9 -0
- ams/pypower/core/__init__.py +8 -0
- ams/pypower/core/pips.py +894 -0
- ams/pypower/core/ppoption.py +244 -0
- ams/pypower/core/ppver.py +18 -0
- ams/pypower/core/solver.py +2451 -0
- ams/pypower/eps.py +6 -0
- ams/pypower/idx.py +174 -0
- ams/pypower/io.py +604 -0
- ams/pypower/make/__init__.py +11 -0
- ams/pypower/make/matrices.py +665 -0
- ams/pypower/make/pdv.py +506 -0
- ams/pypower/routines/__init__.py +7 -0
- ams/pypower/routines/cpf.py +513 -0
- ams/pypower/routines/cpf_callbacks.py +114 -0
- ams/pypower/routines/opf.py +1803 -0
- ams/pypower/routines/opffcns.py +1946 -0
- ams/pypower/routines/pflow.py +852 -0
- ams/pypower/toggle.py +1098 -0
- ams/pypower/utils.py +293 -0
- ams/report.py +212 -50
- ams/routines/__init__.py +23 -0
- ams/routines/acopf.py +117 -0
- ams/routines/cpf.py +65 -0
- ams/routines/dcopf.py +241 -0
- ams/routines/dcpf.py +209 -0
- ams/routines/dcpf0.py +196 -0
- ams/routines/dopf.py +150 -0
- ams/routines/ed.py +312 -0
- ams/routines/pflow.py +255 -0
- ams/routines/pflow0.py +113 -0
- ams/routines/routine.py +1033 -0
- ams/routines/rted.py +519 -0
- ams/routines/type.py +160 -0
- ams/routines/uc.py +376 -0
- ams/shared.py +63 -9
- ams/system.py +61 -22
- ams/utils/__init__.py +3 -0
- ams/utils/misc.py +77 -0
- ams/utils/paths.py +257 -0
- docs/Makefile +21 -0
- docs/make.bat +35 -0
- docs/source/_templates/autosummary/base.rst +5 -0
- docs/source/_templates/autosummary/class.rst +35 -0
- docs/source/_templates/autosummary/module.rst +65 -0
- docs/source/_templates/autosummary/module_toctree.rst +66 -0
- docs/source/api.rst +102 -0
- docs/source/conf.py +203 -0
- docs/source/examples/index.rst +34 -0
- docs/source/genmodelref.py +61 -0
- docs/source/genroutineref.py +47 -0
- docs/source/getting_started/copyright.rst +20 -0
- docs/source/getting_started/formats/index.rst +20 -0
- docs/source/getting_started/formats/matpower.rst +183 -0
- docs/source/getting_started/formats/psse.rst +46 -0
- docs/source/getting_started/formats/pypower.rst +223 -0
- docs/source/getting_started/formats/xlsx.png +0 -0
- docs/source/getting_started/formats/xlsx.rst +23 -0
- docs/source/getting_started/index.rst +76 -0
- docs/source/getting_started/install.rst +234 -0
- docs/source/getting_started/overview.rst +26 -0
- docs/source/getting_started/testcase.rst +45 -0
- docs/source/getting_started/verification.rst +13 -0
- docs/source/images/curent.ico +0 -0
- docs/source/images/dcopf_time.png +0 -0
- docs/source/images/sponsors/CURENT_Logo_NameOnTrans.png +0 -0
- docs/source/images/sponsors/CURENT_Logo_Transparent.png +0 -0
- docs/source/images/sponsors/CURENT_Logo_Transparent_Name.png +0 -0
- docs/source/images/sponsors/doe.png +0 -0
- docs/source/index.rst +108 -0
- docs/source/modeling/example.rst +159 -0
- docs/source/modeling/index.rst +17 -0
- docs/source/modeling/model.rst +210 -0
- docs/source/modeling/routine.rst +122 -0
- docs/source/modeling/system.rst +51 -0
- docs/source/release-notes.rst +398 -0
- ltbams-1.0.2a1.dist-info/METADATA +210 -0
- ltbams-1.0.2a1.dist-info/RECORD +188 -0
- {ltbams-0.9.9.dist-info → ltbams-1.0.2a1.dist-info}/WHEEL +1 -1
- ltbams-1.0.2a1.dist-info/top_level.txt +3 -0
- tests/__init__.py +0 -0
- tests/test_1st_system.py +33 -0
- tests/test_addressing.py +40 -0
- tests/test_andes_mats.py +61 -0
- tests/test_case.py +266 -0
- tests/test_cli.py +34 -0
- tests/test_export_csv.py +89 -0
- tests/test_group.py +83 -0
- tests/test_interface.py +216 -0
- tests/test_io.py +32 -0
- tests/test_jumper.py +27 -0
- tests/test_known_good.py +267 -0
- tests/test_matp.py +437 -0
- tests/test_model.py +54 -0
- tests/test_omodel.py +119 -0
- tests/test_paths.py +22 -0
- tests/test_report.py +251 -0
- tests/test_repr.py +21 -0
- tests/test_routine.py +178 -0
- tests/test_rtn_dcopf.py +101 -0
- tests/test_rtn_dcpf.py +77 -0
- tests/test_rtn_ed.py +279 -0
- tests/test_rtn_pflow.py +219 -0
- tests/test_rtn_rted.py +273 -0
- tests/test_rtn_uc.py +248 -0
- tests/test_service.py +73 -0
- ltbams-0.9.9.dist-info/LICENSE +0 -692
- ltbams-0.9.9.dist-info/METADATA +0 -859
- ltbams-0.9.9.dist-info/RECORD +0 -14
- ltbams-0.9.9.dist-info/top_level.txt +0 -1
- {ltbams-0.9.9.dist-info → ltbams-1.0.2a1.dist-info}/entry_points.txt +0 -0
ams/pypower/utils.py
ADDED
@@ -0,0 +1,293 @@
|
|
1
|
+
"""
|
2
|
+
PYPOWER utility functions.
|
3
|
+
"""
|
4
|
+
import logging # NOQA
|
5
|
+
from copy import deepcopy # NOQA
|
6
|
+
|
7
|
+
import numpy as np # NOQA
|
8
|
+
from numpy import flatnonzero as find # NOQA
|
9
|
+
import scipy.sparse as sp # NOQA
|
10
|
+
from scipy.sparse import csr_matrix as c_sparse # NOQA
|
11
|
+
|
12
|
+
from ams.pypower.idx import IDX # NOQA
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
def bustypes(bus, gen):
|
18
|
+
"""
|
19
|
+
Builds index lists of each type of bus (REF, PV, PQ).
|
20
|
+
|
21
|
+
Generators with "out-of-service" status are treated as PQ buses with
|
22
|
+
zero generation (regardless of Pg/Qg values in gen). Expects bus
|
23
|
+
and gen have been converted to use internal consecutive bus numbering.
|
24
|
+
|
25
|
+
Parameters
|
26
|
+
----------
|
27
|
+
bus : ndarray
|
28
|
+
Bus data.
|
29
|
+
gen : ndarray
|
30
|
+
Generator data.
|
31
|
+
|
32
|
+
Returns
|
33
|
+
-------
|
34
|
+
ref : ndarray
|
35
|
+
Index list of reference (REF) buses.
|
36
|
+
pv : ndarray
|
37
|
+
Index list of PV buses.
|
38
|
+
pq : ndarray
|
39
|
+
Index list of PQ buses.
|
40
|
+
|
41
|
+
Author
|
42
|
+
------
|
43
|
+
Ray Zimmerman (PSERC Cornell)
|
44
|
+
"""
|
45
|
+
# get generator status
|
46
|
+
nb = bus.shape[0]
|
47
|
+
ng = gen.shape[0]
|
48
|
+
# gen connection matrix, element i, j is 1 if, generator j at bus i is ON
|
49
|
+
Cg = c_sparse((gen[:, IDX.gen.GEN_STATUS] > 0,
|
50
|
+
(gen[:, IDX.gen.GEN_BUS], range(ng))), (nb, ng))
|
51
|
+
# number of generators at each bus that are ON
|
52
|
+
bus_gen_status = (Cg * np.ones(ng, int)).astype(bool)
|
53
|
+
|
54
|
+
# form index lists for slack, PV, and PQ buses
|
55
|
+
ref = find((bus[:, IDX.bus.BUS_TYPE] == IDX.bus.REF) & bus_gen_status) # ref bus index
|
56
|
+
pv = find((bus[:, IDX.bus.BUS_TYPE] == IDX.bus.PV) & bus_gen_status) # PV bus indices
|
57
|
+
pq = find((bus[:, IDX.bus.BUS_TYPE] == IDX.bus.PQ) | ~bus_gen_status) # PQ bus indices
|
58
|
+
|
59
|
+
# pick a new reference bus if for some reason there is none (may have been
|
60
|
+
# shut down)
|
61
|
+
if (len(ref) == 0) & (len(pv) > 0):
|
62
|
+
ref = np.zeros(1, dtype=int)
|
63
|
+
ref[0] = pv[0] # use the first PV bus
|
64
|
+
pv = pv[1:] # take it off PV list
|
65
|
+
return ref, pv, pq
|
66
|
+
|
67
|
+
|
68
|
+
def sub2ind(shape, I, J, row_major=False):
|
69
|
+
"""
|
70
|
+
Returns the linear indices of subscripts.
|
71
|
+
|
72
|
+
Parameters
|
73
|
+
----------
|
74
|
+
shape : tuple
|
75
|
+
Shape of the grid or matrix.
|
76
|
+
I : int
|
77
|
+
Row subscript.
|
78
|
+
J : int
|
79
|
+
Column subscript.
|
80
|
+
row_major : bool, optional
|
81
|
+
If True, uses row-major order (default is False, using column-major order).
|
82
|
+
|
83
|
+
Returns
|
84
|
+
-------
|
85
|
+
ind : int
|
86
|
+
Linear index corresponding to the subscripts (I, J).
|
87
|
+
"""
|
88
|
+
if row_major:
|
89
|
+
ind = (I % shape[0]) * shape[1] + (J % shape[1])
|
90
|
+
else:
|
91
|
+
ind = (J % shape[1]) * shape[0] + (I % shape[0])
|
92
|
+
|
93
|
+
return ind.astype(int)
|
94
|
+
|
95
|
+
|
96
|
+
def feval(func, *args, **kw_args):
|
97
|
+
"""
|
98
|
+
Evaluates the function func using positional arguments args
|
99
|
+
and keyword arguments kw_args.
|
100
|
+
|
101
|
+
Parameters
|
102
|
+
----------
|
103
|
+
func : str
|
104
|
+
Name of the function to evaluate.
|
105
|
+
*args : list
|
106
|
+
Positional arguments for the function.
|
107
|
+
**kw_args : dict
|
108
|
+
Keyword arguments for the function.
|
109
|
+
|
110
|
+
Returns
|
111
|
+
-------
|
112
|
+
result : any
|
113
|
+
Result of evaluating the function.
|
114
|
+
"""
|
115
|
+
return eval(func)(*args, **kw_args)
|
116
|
+
|
117
|
+
|
118
|
+
def have_fcn(name):
|
119
|
+
"""
|
120
|
+
Checks if a Python module with the given name exists.
|
121
|
+
|
122
|
+
Parameters
|
123
|
+
----------
|
124
|
+
name : str
|
125
|
+
Name of the Python module.
|
126
|
+
|
127
|
+
Returns
|
128
|
+
-------
|
129
|
+
bool
|
130
|
+
True if the module exists, False otherwise.
|
131
|
+
"""
|
132
|
+
try:
|
133
|
+
__import__(name)
|
134
|
+
return True
|
135
|
+
except ImportError:
|
136
|
+
return False
|
137
|
+
|
138
|
+
|
139
|
+
def get_reorder(A, idx, dim=0):
|
140
|
+
"""
|
141
|
+
Returns A with one of its dimensions indexed::
|
142
|
+
|
143
|
+
B = get_reorder(A, idx, dim)
|
144
|
+
|
145
|
+
Returns A[:, ..., :, idx, :, ..., :], where dim determines
|
146
|
+
in which dimension to place the idx.
|
147
|
+
|
148
|
+
@author: Ray Zimmerman (PSERC Cornell)
|
149
|
+
"""
|
150
|
+
ndims = np.ndim(A)
|
151
|
+
if ndims == 1:
|
152
|
+
B = A[idx].copy()
|
153
|
+
elif ndims == 2:
|
154
|
+
if dim == 0:
|
155
|
+
B = A[idx, :].copy()
|
156
|
+
elif dim == 1:
|
157
|
+
B = A[:, idx].copy()
|
158
|
+
else:
|
159
|
+
raise ValueError('dim (%d) may be 0 or 1' % dim)
|
160
|
+
else:
|
161
|
+
raise ValueError('number of dimensions (%d) may be 1 or 2' % dim)
|
162
|
+
|
163
|
+
return B
|
164
|
+
|
165
|
+
|
166
|
+
def set_reorder(A, B, idx, dim=0):
|
167
|
+
"""
|
168
|
+
Assigns B to A with one of the dimensions of A indexed.
|
169
|
+
|
170
|
+
@return: A after doing A(:, ..., :, IDX, :, ..., :) = B
|
171
|
+
where DIM determines in which dimension to place the IDX.
|
172
|
+
|
173
|
+
@see: L{get_reorder}
|
174
|
+
|
175
|
+
@author: Ray Zimmerman (PSERC Cornell)
|
176
|
+
"""
|
177
|
+
A = A.copy()
|
178
|
+
ndims = np.ndim(A)
|
179
|
+
A = A.astype(B.dtype)
|
180
|
+
if ndims == 1:
|
181
|
+
A[idx] = B
|
182
|
+
elif ndims == 2:
|
183
|
+
if dim == 0:
|
184
|
+
A[idx, :] = B
|
185
|
+
elif dim == 1:
|
186
|
+
A[:, idx] = B
|
187
|
+
else:
|
188
|
+
raise ValueError('dim (%d) may be 0 or 1' % dim)
|
189
|
+
else:
|
190
|
+
raise ValueError('number of dimensions (%d) may be 1 or 2' % dim)
|
191
|
+
|
192
|
+
return A
|
193
|
+
|
194
|
+
|
195
|
+
def isload(gen):
|
196
|
+
"""
|
197
|
+
Checks for dispatchable loads.
|
198
|
+
|
199
|
+
Parameters
|
200
|
+
----------
|
201
|
+
gen: np.ndarray
|
202
|
+
The generator matrix.
|
203
|
+
|
204
|
+
Returns
|
205
|
+
-------
|
206
|
+
array
|
207
|
+
A column vector of 1's and 0's. The 1's correspond to rows of the
|
208
|
+
C{gen} matrix which represent dispatchable loads. The current test is
|
209
|
+
C{Pmin < 0 and Pmax == 0}. This may need to be revised to allow sensible
|
210
|
+
specification of both elastic demand and pumped storage units.
|
211
|
+
"""
|
212
|
+
return (gen[:, IDX.gen.PMIN] < 0) & (gen[:, IDX.gen.PMAX] == 0)
|
213
|
+
|
214
|
+
|
215
|
+
def hasPQcap(gen, hilo='B'):
|
216
|
+
"""
|
217
|
+
Checks for P-Q capability curve constraints.
|
218
|
+
|
219
|
+
Parameters
|
220
|
+
----------
|
221
|
+
gen: np.ndarray
|
222
|
+
The generator matrix.
|
223
|
+
hilo : str, optional
|
224
|
+
If 'U' this function returns C{True} only for rows corresponding to
|
225
|
+
generators that require the upper constraint on Q.
|
226
|
+
If 'L', only for those requiring the lower constraint.
|
227
|
+
If not specified or has any other value it returns true for rows
|
228
|
+
corresponding to gens that require either or both of the constraints.
|
229
|
+
|
230
|
+
Returns
|
231
|
+
-------
|
232
|
+
array
|
233
|
+
A column vector of 1's and 0's. The 1's correspond to rows of the
|
234
|
+
C{gen} matrix which correspond to generators which have defined a
|
235
|
+
capability curve (with sloped upper and/or lower bound on Q) and require
|
236
|
+
that additional linear constraints be added to the OPF.
|
237
|
+
|
238
|
+
Notes
|
239
|
+
-----
|
240
|
+
The C{gen} matrix in version 2 of the PYPOWER case format includes columns
|
241
|
+
for specifying a P-Q capability curve for a generator defined as the
|
242
|
+
intersection of two half-planes and the box constraints on P and Q.
|
243
|
+
The two half planes are defined respectively as the area below the line
|
244
|
+
connecting (Pc1, Qc1max) and (Pc2, Qc2max) and the area above the line
|
245
|
+
connecting (Pc1, Qc1min) and (Pc2, Qc2min).
|
246
|
+
|
247
|
+
It is smart enough to return C{True} only if the corresponding linear
|
248
|
+
constraint is not redundant w.r.t the box constraints.
|
249
|
+
"""
|
250
|
+
# check for errors capability curve data
|
251
|
+
if np.any(gen[:, IDX.gen.PC1] > gen[:, IDX.gen.PC2]):
|
252
|
+
logger.debug('hasPQcap: Pc1 > Pc2')
|
253
|
+
if np.any(gen[:, IDX.gen.QC2MAX] > gen[:, IDX.gen.QC1MAX]):
|
254
|
+
logger.debug('hasPQcap: Qc2max > Qc1max')
|
255
|
+
if np.any(gen[:, IDX.gen.QC2MIN] < gen[:, IDX.gen.QC1MIN]):
|
256
|
+
logger.debug('hasPQcap: Qc2min < Qc1min')
|
257
|
+
|
258
|
+
L = np.zeros(gen.shape[0], bool)
|
259
|
+
U = np.zeros(gen.shape[0], bool)
|
260
|
+
k = np.nonzero(gen[:, IDX.gen.PC1] != gen[:, IDX.gen.PC2])
|
261
|
+
|
262
|
+
if hilo != 'U': # include lower constraint
|
263
|
+
Qmin_at_Pmax = gen[k, IDX.gen.QC1MIN] + (gen[k, IDX.gen.PMAX] - gen[k, IDX.gen.PC1]) * (
|
264
|
+
gen[k, IDX.gen.QC2MIN] - gen[k, IDX.gen.QC1MIN]) / (gen[k, IDX.gen.PC2] - gen[k, IDX.gen.PC1])
|
265
|
+
L[k] = Qmin_at_Pmax > gen[k, IDX.gen.QMIN]
|
266
|
+
|
267
|
+
if hilo != 'L': # include upper constraint
|
268
|
+
Qmax_at_Pmax = gen[k, IDX.gen.QC1MAX] + (gen[k, IDX.gen.PMAX] - gen[k, IDX.gen.PC1]) * (
|
269
|
+
gen[k, IDX.gen.QC2MAX] - gen[k, IDX.gen.QC1MAX]) / (gen[k, IDX.gen.PC2] - gen[k, IDX.gen.PC1])
|
270
|
+
U[k] = Qmax_at_Pmax < gen[k, IDX.gen.QMAX]
|
271
|
+
|
272
|
+
return L | U
|
273
|
+
|
274
|
+
|
275
|
+
def fairmax(x):
|
276
|
+
"""
|
277
|
+
Same as built-in C{max}, except breaks ties randomly.
|
278
|
+
|
279
|
+
Takes a vector as an argument and returns the same output as the
|
280
|
+
built-in function C{max} with two output parameters, except that
|
281
|
+
where the maximum value occurs at more than one position in the
|
282
|
+
vector, the index is chosen randomly from these positions as opposed
|
283
|
+
to just choosing the first occurance.
|
284
|
+
|
285
|
+
@see: C{max}
|
286
|
+
|
287
|
+
@author: Ray Zimmerman (PSERC Cornell)
|
288
|
+
"""
|
289
|
+
val = max(x) # find max value
|
290
|
+
i = np.nonzero(x == val) # find all positions where this occurs
|
291
|
+
n = len(i) # number of occurences
|
292
|
+
idx = i(np.fix(n * np.random()) + 1) # select index randomly among occurances
|
293
|
+
return val, idx
|
ams/report.py
CHANGED
@@ -4,6 +4,7 @@ Module for report generation.
|
|
4
4
|
import logging
|
5
5
|
from collections import OrderedDict
|
6
6
|
from time import strftime
|
7
|
+
from typing import List, Dict, Optional
|
7
8
|
|
8
9
|
from andes.io.txt import dump_data
|
9
10
|
from andes.shared import np
|
@@ -15,6 +16,9 @@ from ams.shared import copyright_msg
|
|
15
16
|
logger = logging.getLogger(__name__)
|
16
17
|
|
17
18
|
|
19
|
+
DECIMALS = 6
|
20
|
+
|
21
|
+
|
18
22
|
def report_info(system) -> list:
|
19
23
|
info = list()
|
20
24
|
info.append('AMS' + ' ' + version + '\n')
|
@@ -28,6 +32,12 @@ def report_info(system) -> list:
|
|
28
32
|
class Report:
|
29
33
|
"""
|
30
34
|
Report class to store routine analysis reports.
|
35
|
+
|
36
|
+
Notes
|
37
|
+
-----
|
38
|
+
Revised from the ANDES project (https://github.com/CURENT/andes).
|
39
|
+
Original author: Hantao Cui
|
40
|
+
License: GPL3
|
31
41
|
"""
|
32
42
|
|
33
43
|
def __init__(self, system):
|
@@ -51,7 +61,7 @@ class Report:
|
|
51
61
|
'Lines': system.Line.n,
|
52
62
|
'Transformers': np.count_nonzero(system.Line.trans.v == 1),
|
53
63
|
'Areas': system.Area.n,
|
54
|
-
'
|
64
|
+
'Zones': system.Zone.n,
|
55
65
|
})
|
56
66
|
|
57
67
|
def collect(self, rtn, horizon=None):
|
@@ -65,8 +75,6 @@ class Report:
|
|
65
75
|
horizon : str, optional
|
66
76
|
Timeslot to collect data from. Only single timeslot is supported.
|
67
77
|
"""
|
68
|
-
system = self.system
|
69
|
-
|
70
78
|
text = list()
|
71
79
|
header = list()
|
72
80
|
row_name = list()
|
@@ -75,47 +83,13 @@ class Report:
|
|
75
83
|
if not rtn.converged:
|
76
84
|
return text, header, row_name, data
|
77
85
|
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
# Use a dictionary comprehension to create vars_by_owner
|
87
|
-
owners = {
|
88
|
-
name: {'idx': [],
|
89
|
-
'name': [],
|
90
|
-
'header': [],
|
91
|
-
'data': [], }
|
92
|
-
for name in owners_all if name in owners_e and getattr(system, name).n > 0
|
93
|
-
}
|
94
|
-
|
95
|
-
# --- owner data: idx and name ---
|
96
|
-
for key, val in owners.items():
|
97
|
-
owner = getattr(system, key)
|
98
|
-
idx_v = owner.get_idx()
|
99
|
-
val['idx'] = idx_v
|
100
|
-
val['name'] = owner.get(src='name', attr='v', idx=idx_v)
|
101
|
-
val['header'].append('Name')
|
102
|
-
val['data'].append(val['name'])
|
103
|
-
|
104
|
-
# --- variables data ---
|
105
|
-
for key, var in rtn.vars.items():
|
106
|
-
owner_name = var.owner.class_name
|
107
|
-
idx_v = owners[owner_name]['idx']
|
108
|
-
header_v = key if var.unit is None else f'{key} ({var.unit})'
|
109
|
-
data_v = rtn.get(src=key, attr='v', idx=idx_v, horizon=horizon).round(6)
|
110
|
-
owners[owner_name]['header'].append(header_v)
|
111
|
-
owners[owner_name]['data'].append(data_v)
|
112
|
-
|
113
|
-
# --- dump data ---
|
114
|
-
for key, val in owners.items():
|
115
|
-
text.append([f'{key} DATA:\n'])
|
116
|
-
row_name.append(val['idx'])
|
117
|
-
header.append(val['header'])
|
118
|
-
data.append(val['data'])
|
86
|
+
owners = collect_owners(rtn)
|
87
|
+
owners = collect_vars(owners, rtn, horizon, DECIMALS)
|
88
|
+
owners = collect_exprs(owners, rtn, horizon, DECIMALS)
|
89
|
+
owners = collect_exprcs(owners, rtn, horizon, DECIMALS)
|
90
|
+
|
91
|
+
dump_collected_data(owners, text, header, row_name, data)
|
92
|
+
|
119
93
|
return text, header, row_name, data
|
120
94
|
|
121
95
|
def write(self):
|
@@ -174,15 +148,26 @@ class Report:
|
|
174
148
|
text.append([''])
|
175
149
|
row_name.append(
|
176
150
|
['Generation', 'Load'])
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
Qcol = [rtn.qg.v.sum().round(6), rtn.qd.v.sum().round(6)]
|
181
|
-
data.append([Pcol, Qcol])
|
151
|
+
|
152
|
+
if hasattr(rtn, 'pd'):
|
153
|
+
pd = rtn.pd.v.sum().round(DECIMALS)
|
182
154
|
else:
|
155
|
+
pd = rtn.system.PQ.p0.v.sum().round(DECIMALS)
|
156
|
+
if hasattr(rtn, 'qd'):
|
157
|
+
qd = rtn.qd.v.sum().round(DECIMALS)
|
158
|
+
else:
|
159
|
+
qd = rtn.system.PQ.q0.v.sum().round(DECIMALS)
|
160
|
+
|
161
|
+
if not hasattr(rtn, 'qg'):
|
183
162
|
header.append(['P (p.u.)'])
|
184
|
-
Pcol = [rtn.pg.v.sum().round(
|
163
|
+
Pcol = [rtn.pg.v.sum().round(DECIMALS), pd]
|
185
164
|
data.append([Pcol])
|
165
|
+
else:
|
166
|
+
header.append(['P (p.u.)', 'Q (p.u.)'])
|
167
|
+
Pcol = [rtn.pg.v.sum().round(DECIMALS), pd]
|
168
|
+
Qcol = [rtn.qg.v.sum().round(DECIMALS), qd]
|
169
|
+
data.append([Pcol, Qcol])
|
170
|
+
|
186
171
|
# --- routine data ---
|
187
172
|
text.extend(text_sum)
|
188
173
|
header.extend(header_sum)
|
@@ -192,3 +177,180 @@ class Report:
|
|
192
177
|
|
193
178
|
_, s = elapsed(t)
|
194
179
|
logger.info(f'Report saved to "{system.files.txt}" in {s}.')
|
180
|
+
|
181
|
+
|
182
|
+
def dump_collected_data(owners: dict, text: List, header: List, row_name: List, data: List) -> None:
|
183
|
+
"""
|
184
|
+
Dump collected data into the provided lists.
|
185
|
+
|
186
|
+
Parameters
|
187
|
+
----------
|
188
|
+
owners : dict
|
189
|
+
Dictionary of owners.
|
190
|
+
text : list
|
191
|
+
List to append text data to.
|
192
|
+
header : list
|
193
|
+
List to append header data to.
|
194
|
+
row_name : list
|
195
|
+
List to append row names to.
|
196
|
+
data : list
|
197
|
+
List to append data to.
|
198
|
+
"""
|
199
|
+
for key, val in owners.items():
|
200
|
+
text.append([f'{key} DATA:\n'])
|
201
|
+
row_name.append(val['idx'])
|
202
|
+
header.append(val['header'])
|
203
|
+
data.append(val['data'])
|
204
|
+
|
205
|
+
|
206
|
+
def collect_exprcs(owners: Dict, rtn, horizon: Optional[str], decimals: int) -> Dict:
|
207
|
+
"""
|
208
|
+
Collect expression calculations and populate the data dictionary.
|
209
|
+
|
210
|
+
Parameters
|
211
|
+
----------
|
212
|
+
owners : dict
|
213
|
+
Dictionary of owners.
|
214
|
+
rtn : Routine
|
215
|
+
Routine object to collect data from.
|
216
|
+
horizon : str, optional
|
217
|
+
Timeslot to collect data from. Only single timeslot is supported.
|
218
|
+
decimals : int
|
219
|
+
Number of decimal places to round the data.
|
220
|
+
|
221
|
+
Returns
|
222
|
+
-------
|
223
|
+
dict
|
224
|
+
Updated dictionary of owners with collected ExpressionCalc data.
|
225
|
+
"""
|
226
|
+
for key, exprc in rtn.exprcs.items():
|
227
|
+
if exprc.owner is None:
|
228
|
+
continue
|
229
|
+
owner_name = exprc.owner.class_name
|
230
|
+
idx_v = owners[owner_name]['idx']
|
231
|
+
header_v = key if exprc.unit is None else f'{key} ({exprc.unit})'
|
232
|
+
try:
|
233
|
+
data_v = rtn.get(src=key, attr='v', idx=idx_v, horizon=horizon).round(decimals)
|
234
|
+
except Exception:
|
235
|
+
data_v = [np.nan] * len(idx_v)
|
236
|
+
owners[owner_name]['header'].append(header_v)
|
237
|
+
owners[owner_name]['data'].append(data_v)
|
238
|
+
|
239
|
+
return owners
|
240
|
+
|
241
|
+
|
242
|
+
def collect_exprs(owners: Dict, rtn, horizon: Optional[str], decimals: int) -> Dict:
|
243
|
+
"""
|
244
|
+
Collect expressions and populate the data dictionary.
|
245
|
+
|
246
|
+
Parameters
|
247
|
+
----------
|
248
|
+
owners : dict
|
249
|
+
Dictionary of owners.
|
250
|
+
rtn : Routine
|
251
|
+
Routine object to collect data from.
|
252
|
+
horizon : str, optional
|
253
|
+
Timeslot to collect data from. Only single timeslot is supported.
|
254
|
+
decimals : int
|
255
|
+
Number of decimal places to round the data.
|
256
|
+
|
257
|
+
Returns
|
258
|
+
-------
|
259
|
+
dict
|
260
|
+
Updated dictionary of owners with collected Expression data.
|
261
|
+
"""
|
262
|
+
for key, expr in rtn.exprs.items():
|
263
|
+
if expr.owner is None:
|
264
|
+
continue
|
265
|
+
owner_name = expr.owner.class_name
|
266
|
+
idx_v = owners[owner_name]['idx']
|
267
|
+
header_v = key if expr.unit is None else f'{key} ({expr.unit})'
|
268
|
+
try:
|
269
|
+
data_v = rtn.get(src=key, attr='v', idx=idx_v, horizon=horizon).round(decimals)
|
270
|
+
except Exception:
|
271
|
+
data_v = [np.nan] * len(idx_v)
|
272
|
+
owners[owner_name]['header'].append(header_v)
|
273
|
+
owners[owner_name]['data'].append(data_v)
|
274
|
+
|
275
|
+
return owners
|
276
|
+
|
277
|
+
|
278
|
+
def collect_vars(owners: Dict, rtn, horizon: Optional[str], decimals: int) -> Dict:
|
279
|
+
"""
|
280
|
+
Collect variables and populate the data dictionary.
|
281
|
+
|
282
|
+
Parameters
|
283
|
+
----------
|
284
|
+
owners : dict
|
285
|
+
Dictionary of owners.
|
286
|
+
rtn : Routine
|
287
|
+
Routine object to collect data from.
|
288
|
+
horizon : str, optional
|
289
|
+
Timeslot to collect data from. Only single timeslot is supported.
|
290
|
+
decimals : int
|
291
|
+
Number of decimal places to round the data.
|
292
|
+
|
293
|
+
Returns
|
294
|
+
-------
|
295
|
+
dict
|
296
|
+
Updated dictionary of owners with collected Var data.
|
297
|
+
"""
|
298
|
+
|
299
|
+
for key, var in rtn.vars.items():
|
300
|
+
if var.owner is None:
|
301
|
+
continue
|
302
|
+
owner_name = var.owner.class_name
|
303
|
+
idx_v = owners[owner_name]['idx']
|
304
|
+
header_v = key if var.unit is None else f'{key} ({var.unit})'
|
305
|
+
try:
|
306
|
+
data_v = rtn.get(src=key, attr='v', idx=idx_v, horizon=horizon).round(decimals)
|
307
|
+
except Exception:
|
308
|
+
data_v = [np.nan] * len(idx_v)
|
309
|
+
owners[owner_name]['header'].append(header_v)
|
310
|
+
owners[owner_name]['data'].append(data_v)
|
311
|
+
|
312
|
+
return owners
|
313
|
+
|
314
|
+
|
315
|
+
def collect_owners(rtn):
|
316
|
+
"""
|
317
|
+
Initialize an owners dictionary for data collection.
|
318
|
+
|
319
|
+
Returns
|
320
|
+
-------
|
321
|
+
dict
|
322
|
+
A dictionary of initialized owners.
|
323
|
+
"""
|
324
|
+
# initialize data section by model
|
325
|
+
owners_all = ['Bus', 'Line', 'StaticGen',
|
326
|
+
'PV', 'Slack', 'RenGen',
|
327
|
+
'DG', 'ESD1', 'PVD1', 'VSG',
|
328
|
+
'StaticLoad']
|
329
|
+
|
330
|
+
# Filter owners that exist in the system
|
331
|
+
owners_e = list({
|
332
|
+
var.owner.class_name for var in rtn.vars.values() if var.owner is not None
|
333
|
+
}.union(
|
334
|
+
expr.owner.class_name for expr in rtn.exprs.values() if expr.owner is not None
|
335
|
+
).union(
|
336
|
+
exprc.owner.class_name for exprc in rtn.exprcs.values() if exprc.owner is not None
|
337
|
+
))
|
338
|
+
|
339
|
+
# Use a dictionary comprehension to create vars_by_owner
|
340
|
+
owners = {
|
341
|
+
name: {'idx': [],
|
342
|
+
'name': [],
|
343
|
+
'header': [],
|
344
|
+
'data': [], }
|
345
|
+
for name in owners_all if name in owners_e and getattr(rtn.system, name).n > 0
|
346
|
+
}
|
347
|
+
|
348
|
+
for key, val in owners.items():
|
349
|
+
owner = getattr(rtn.system, key)
|
350
|
+
idx_v = owner.get_all_idxes()
|
351
|
+
val['idx'] = idx_v
|
352
|
+
val['name'] = owner.get(src='name', attr='v', idx=idx_v)
|
353
|
+
val['header'].append('Name')
|
354
|
+
val['data'].append(val['name'])
|
355
|
+
|
356
|
+
return owners
|
ams/routines/__init__.py
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
"""
|
2
|
+
Scheduling routines.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from collections import OrderedDict
|
6
|
+
from andes.utils.func import list_flatten
|
7
|
+
|
8
|
+
all_routines = OrderedDict([
|
9
|
+
('dcpf', ['DCPF']),
|
10
|
+
('pflow', ['PFlow']),
|
11
|
+
('cpf', ['CPF']),
|
12
|
+
('acopf', ['ACOPF']),
|
13
|
+
('dcopf', ['DCOPF']),
|
14
|
+
('ed', ['ED', 'EDDG', 'EDES']),
|
15
|
+
('rted', ['RTED', 'RTEDDG', 'RTEDES', 'RTEDVIS']),
|
16
|
+
('uc', ['UC', 'UCDG', 'UCES']),
|
17
|
+
('dopf', ['DOPF', 'DOPFVIS']),
|
18
|
+
('pflow0', ['PFlow0']),
|
19
|
+
('dcpf0', ['DCPF0']),
|
20
|
+
])
|
21
|
+
|
22
|
+
class_names = list_flatten(list(all_routines.values()))
|
23
|
+
routine_cli = OrderedDict([(item.lower(), item) for item in class_names])
|