ltbams 1.0.11__py3-none-any.whl → 1.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. ams/_version.py +3 -3
  2. ams/core/matprocessor.py +183 -118
  3. ams/io/matpower.py +55 -20
  4. ams/io/psse.py +4 -0
  5. ams/opt/exprcalc.py +11 -0
  6. ams/routines/grbopt.py +2 -0
  7. ams/routines/pypower.py +21 -4
  8. ams/routines/routine.py +127 -15
  9. ams/shared.py +30 -2
  10. ams/system.py +51 -3
  11. ams/utils/paths.py +64 -0
  12. docs/source/index.rst +4 -3
  13. docs/source/release-notes.rst +25 -10
  14. {ltbams-1.0.11.dist-info → ltbams-1.0.13.dist-info}/METADATA +4 -2
  15. {ltbams-1.0.11.dist-info → ltbams-1.0.13.dist-info}/RECORD +18 -46
  16. {ltbams-1.0.11.dist-info → ltbams-1.0.13.dist-info}/WHEEL +1 -1
  17. {ltbams-1.0.11.dist-info → ltbams-1.0.13.dist-info}/top_level.txt +0 -1
  18. tests/__init__.py +0 -0
  19. tests/test_1st_system.py +0 -64
  20. tests/test_addressing.py +0 -40
  21. tests/test_case.py +0 -301
  22. tests/test_cli.py +0 -34
  23. tests/test_export_csv.py +0 -89
  24. tests/test_group.py +0 -83
  25. tests/test_interface.py +0 -238
  26. tests/test_io.py +0 -180
  27. tests/test_jumper.py +0 -27
  28. tests/test_known_good.py +0 -267
  29. tests/test_matp.py +0 -437
  30. tests/test_model.py +0 -54
  31. tests/test_omodel.py +0 -119
  32. tests/test_paths.py +0 -22
  33. tests/test_report.py +0 -251
  34. tests/test_repr.py +0 -21
  35. tests/test_routine.py +0 -178
  36. tests/test_rtn_acopf.py +0 -75
  37. tests/test_rtn_dcopf.py +0 -101
  38. tests/test_rtn_dcopf2.py +0 -103
  39. tests/test_rtn_ed.py +0 -279
  40. tests/test_rtn_opf.py +0 -142
  41. tests/test_rtn_pflow.py +0 -147
  42. tests/test_rtn_pypower.py +0 -315
  43. tests/test_rtn_rted.py +0 -273
  44. tests/test_rtn_uc.py +0 -248
  45. tests/test_service.py +0 -73
  46. {ltbams-1.0.11.dist-info → ltbams-1.0.13.dist-info}/entry_points.txt +0 -0
ams/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-05-23T17:38:11-0400",
11
+ "date": "2025-08-18T15:13:43-0700",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "6d7b555f5f48c751648594a535711b9fd21d2a85",
15
- "version": "1.0.11"
14
+ "full-revisionid": "1aab83cb951d517c906fe117499d482da8b6e66b",
15
+ "version": "1.0.13"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
ams/core/matprocessor.py CHANGED
@@ -3,18 +3,18 @@ Module for system matrix make.
3
3
  """
4
4
 
5
5
  import logging
6
- import os
7
- import sys
8
6
  from typing import Optional
9
7
 
10
8
  import numpy as np
11
9
 
12
10
  from andes.thirdparty.npfunc import safe_div
13
- from andes.shared import tqdm, tqdm_nb
14
- from andes.utils.misc import elapsed, is_notebook
11
+ from andes.utils.misc import elapsed
15
12
 
16
13
  from ams.opt import Param
17
- from ams.shared import pd, sps
14
+
15
+ from ams.utils.paths import get_export_path
16
+
17
+ from ams.shared import pd, sps, _init_pbar, _update_pbar
18
18
 
19
19
  logger = logging.getLogger(__name__)
20
20
 
@@ -71,35 +71,135 @@ class MParam(Param):
71
71
  self.col_names = col_names
72
72
  self.row_names = row_names
73
73
 
74
+ def load_npz(self, path=None):
75
+ """
76
+ Load the FULL matrix from a npz file.
77
+
78
+ Parameters
79
+ ----------
80
+ path : str, optional
81
+ Path of the npz file to load.
82
+
83
+ Returns
84
+ -------
85
+ MParam
86
+ The loaded MParam instance.
87
+
88
+ .. versionadded:: 1.0.13
89
+ """
90
+
91
+ if path is None:
92
+ raise ValueError("Path to the npz file is required.")
93
+
94
+ data = sps.load_npz(path) if self.sparse else np.load(path)
95
+
96
+ if self.sparse:
97
+ self._v = data.tocsr()
98
+ logging.debug(f"Loading sparse matrix {self.name} from npz format.")
99
+ else:
100
+ self._v = data['v']
101
+ logging.warning(f"Loading dense matrix {self.name} from npz format.")
102
+
103
+ return self
104
+
105
+ def load_csv(self, path=None, chunksize=None, dtype=float):
106
+ """
107
+ Load the matrix from an EXPORTED CSV file.
108
+
109
+ Parameters
110
+ ----------
111
+ path : str, optional
112
+ Path of the csv file to load.
113
+ chunksize : int, optional
114
+ If specified, read the csv file in chunks of this size.
115
+
116
+ Returns
117
+ -------
118
+ MParam
119
+ The loaded MParam instance.
120
+
121
+ .. versionadded:: 1.0.13
122
+ """
123
+
124
+ if path is None:
125
+ raise ValueError("Path to the csv file is required.")
126
+
127
+ if chunksize:
128
+ chunks = pd.read_csv(path, index_col=0, chunksize=chunksize, dtype=dtype)
129
+ df = pd.concat(chunks)
130
+ else:
131
+ df = pd.read_csv(path, index_col=0, dtype=dtype)
132
+
133
+ if self.sparse:
134
+ self._v = sps.csr_matrix(df.values)
135
+ logging.debug(f"Loading sparse matrix {self.name} from csv format.")
136
+ else:
137
+ self._v = df.values
138
+ self.col_names = df.columns.tolist()
139
+ self.row_names = df.index.tolist()
140
+
141
+ logging.debug(f"Loading matrix {self.name} from csv format.")
142
+ return self
143
+
144
+ def export_npz(self, path=None):
145
+ """
146
+ Export the matrix to a npz file.
147
+
148
+ Parameters
149
+ ----------
150
+ path : str, optional
151
+ Path of the npz file to export.
152
+
153
+ Returns
154
+ -------
155
+ str
156
+ The exported npz file name
157
+
158
+ .. versionadded:: 1.0.13
159
+ """
160
+
161
+ path, file_name = get_export_path(self.owner.system,
162
+ self.name,
163
+ path=path,
164
+ fmt='npz')
165
+
166
+ if sps.issparse(self._v):
167
+ sps.save_npz(path, self._v.tocsr())
168
+ logging.debug(f"Saving sparse matrix {self.name} to npz format.")
169
+ elif isinstance(self._v, np.ndarray):
170
+ np.savez(path, v=self._v) # Save with a key 'v' inside the NPZ archive
171
+ logging.warning(f"Saving dense matrix {self.name} to npz format.")
172
+ else:
173
+ raise TypeError(f"Unsupported matrix type: {type(self._v)}")
174
+
175
+ return file_name
176
+
74
177
  def export_csv(self, path=None):
75
178
  """
76
179
  Export the matrix to a CSV file.
77
180
 
181
+ In the exported CSV, columns are the bus idxes, and Line idxes are
182
+ used as row indexes.
183
+
78
184
  Parameters
79
185
  ----------
80
186
  path : str, optional
81
- Path to the output CSV file.
187
+ Path of the csv file to export.
82
188
 
83
189
  Returns
84
190
  -------
85
191
  str
86
- The path of the exported csv file
192
+ The exported csv file name
87
193
  """
88
194
 
89
- if path is None:
90
- if self.owner.system.files.fullname is None:
91
- logger.info("Input file name not detacted. Using `Untitled`.")
92
- file_name = f'Untitled_{self.name}'
93
- else:
94
- file_name = os.path.splitext(self.owner.system.files.fullname)[0]
95
- file_name += f'_{self.name}'
96
- path = os.path.join(os.getcwd(), file_name + '.csv')
97
- else:
98
- file_name = os.path.splitext(os.path.basename(path))[0]
195
+ path, file_name = get_export_path(self.owner.system,
196
+ self.name,
197
+ path=path,
198
+ fmt='csv')
99
199
 
100
200
  pd.DataFrame(data=self.v, columns=self.col_names, index=self.row_names).to_csv(path)
101
201
 
102
- return file_name + '.csv'
202
+ return file_name
103
203
 
104
204
  @property
105
205
  def v(self):
@@ -150,7 +250,6 @@ class MatProcessor:
150
250
  def __init__(self, system):
151
251
  self.system = system
152
252
  self.initialized = False
153
- self.pbar = None
154
253
 
155
254
  self.Cft = MParam(name='Cft', tex_name=r'C_{ft}',
156
255
  info='Line connectivity matrix',
@@ -183,10 +282,10 @@ class MatProcessor:
183
282
 
184
283
  self.PTDF = MParam(name='PTDF', tex_name=r'P_{TDF}',
185
284
  info='Power transfer distribution factor',
186
- v=None, sparse=False, owner=self)
285
+ v=None, sparse=True, owner=self)
187
286
  self.LODF = MParam(name='LODF', tex_name=r'O_{TDF}',
188
287
  info='Line outage distribution factor',
189
- v=None, sparse=False, owner=self)
288
+ v=None, sparse=True, owner=self)
190
289
 
191
290
  def build(self, force=False):
192
291
  """
@@ -271,6 +370,8 @@ class MatProcessor:
271
370
  row = np.array([system.Bus.idx2uid(x) for x in on_gen_bus])
272
371
  col = np.array([idx_gen.index(x) for x in on_gen_idx])
273
372
  self.Cg._v = sps.csr_matrix((np.ones(len(on_gen_idx)), (row, col)), (nb, ng))
373
+ self.Cg.col_names = idx_gen
374
+ self.Cg.row_names = system.Bus.idx.v
274
375
  return self.Cg._v
275
376
 
276
377
  def build_cl(self):
@@ -298,6 +399,8 @@ class MatProcessor:
298
399
  row = np.array([system.Bus.idx2uid(x) for x in on_load_bus])
299
400
  col = np.array([system.PQ.idx2uid(x) for x in on_load_idx])
300
401
  self.Cl._v = sps.csr_matrix((np.ones(len(on_load_idx)), (row, col)), (nb, npq))
402
+ self.Cl.col_names = idx_load
403
+ self.Cl.row_names = system.Bus.idx.v
301
404
  return self.Cl._v
302
405
 
303
406
  def build_csh(self):
@@ -325,6 +428,8 @@ class MatProcessor:
325
428
  row = np.array([system.Bus.idx2uid(x) for x in on_shunt_bus])
326
429
  col = np.array([system.Shunt.idx2uid(x) for x in on_shunt_idx])
327
430
  self.Csh._v = sps.csr_matrix((np.ones(len(on_shunt_idx)), (row, col)), (nb, nsh))
431
+ self.Csh.col_names = idx_shunt
432
+ self.Csh.row_names = system.Bus.idx.v
328
433
  return self.Csh._v
329
434
 
330
435
  def build_cft(self):
@@ -357,6 +462,10 @@ class MatProcessor:
357
462
  col_line = np.array([system.Line.idx2uid(x) for x in on_line_idx + on_line_idx])
358
463
  self.Cft._v = sps.csr_matrix((data_line, (row_line, col_line)), (nb, nl))
359
464
  self.CftT._v = self.Cft._v.T
465
+ self.Cft.col_names = idx_line
466
+ self.Cft.row_names = system.Bus.idx.v
467
+ self.CftT.col_names = system.Bus.idx.v
468
+ self.CftT.row_names = idx_line
360
469
  return self.Cft._v
361
470
 
362
471
  def build_bf(self):
@@ -384,6 +493,8 @@ class MatProcessor:
384
493
  t = system.Bus.idx2uid(system.Line.get(src='bus2', attr='v', idx=idx_line))
385
494
  ir = np.r_[range(nl), range(nl)] # double set of row indices
386
495
  self.Bf._v = sps.csr_matrix((np.r_[b, -b], (ir, np.r_[f, t])), (nl, nb))
496
+ self.Bf.col_names = system.Bus.idx.v
497
+ self.Bf.row_names = system.Line.idx.v
387
498
  return self.Bf._v
388
499
 
389
500
  def build_bbus(self):
@@ -396,6 +507,8 @@ class MatProcessor:
396
507
  DC bus admittance matrix.
397
508
  """
398
509
  self.Bbus._v = self.Cft._v * self.Bf._v
510
+ self.Bbus.col_names = self.system.Bus.idx.v
511
+ self.Bbus.row_names = self.system.Bus.idx.v
399
512
  return self.Bbus._v
400
513
 
401
514
  def build_pfinj(self):
@@ -411,6 +524,8 @@ class MatProcessor:
411
524
  b = self._calc_b()
412
525
  phi = self.system.Line.get(src='phi', attr='v', idx=idx_line)
413
526
  self.Pfinj._v = b * (-phi)
527
+ # NOTE: leave the row_names empty for the vector
528
+ self.Pfinj.col_names = self.system.Line.idx.v
414
529
  return self.Pfinj._v
415
530
 
416
531
  def build_pbusinj(self):
@@ -423,6 +538,8 @@ class MatProcessor:
423
538
  Bus power injection vector.
424
539
  """
425
540
  self.Pbusinj._v = self.Cft._v * self.Pfinj._v
541
+ # NOTE: leave the row_names empty for the vector
542
+ self.Pbusinj.col_names = self.system.Bus.idx.v
426
543
  return self.Pbusinj._v
427
544
 
428
545
  def _calc_b(self):
@@ -453,7 +570,7 @@ class MatProcessor:
453
570
  return b
454
571
 
455
572
  def build_ptdf(self, line=None, no_store=False,
456
- incremental=False, step=1000, no_tqdm=False,
573
+ incremental=False, step=1000, no_tqdm=True,
457
574
  permc_spec=None, use_umfpack=True):
458
575
  """
459
576
  Build the Power Transfer Distribution Factor (PTDF) matrix and optionally store it in `MParam.PTDF`.
@@ -461,11 +578,8 @@ class MatProcessor:
461
578
  PTDF[m, n] represents the increased line flow on line `m` for a 1 p.u. power injection at bus `n`.
462
579
  It is similar to the Generation Shift Factor (GSF).
463
580
 
464
- Note: There may be minor discrepancies between PTDF-based line flow and DCOPF-calculated line flow.
465
-
466
- For large cases, use `incremental=True` to calculate the sparse PTDF in chunks, which will be stored
467
- as a `scipy.sparse.lil_matrix`. In this mode, the PTDF is calculated in chunks, and a progress bar
468
- will be shown unless `no_tqdm=True`.
581
+ For large cases, use `incremental=True` to calculate the sparse PTDF in chunks. In this mode, the
582
+ PTDF is calculated in chunks, and thus more memory friendly.
469
583
 
470
584
  Parameters
471
585
  ----------
@@ -487,7 +601,7 @@ class MatProcessor:
487
601
 
488
602
  Returns
489
603
  -------
490
- PTDF : np.ndarray or scipy.sparse.lil_matrix
604
+ PTDF : scipy.sparse.lil_matrix
491
605
  Power transfer distribution factor.
492
606
 
493
607
  References
@@ -507,13 +621,18 @@ class MatProcessor:
507
621
 
508
622
  if line is None:
509
623
  luid = system.Line.idx2uid(system.Line.idx.v)
624
+ self.PTDF.row_names = system.Line.idx.v
510
625
  elif isinstance(line, (int, str)):
511
626
  try:
512
627
  luid = [system.Line.idx2uid(line)]
628
+ self.PTDF.row_names = [line]
513
629
  except ValueError:
514
630
  raise ValueError(f"Line {line} not found.")
515
631
  elif isinstance(line, list):
516
632
  luid = system.Line.idx2uid(line)
633
+ self.PTDF.row_names = line
634
+
635
+ self.PTDF.col_names = system.Bus.idx.v
517
636
 
518
637
  # build other matrices if not built
519
638
  if not self.initialized:
@@ -528,15 +647,7 @@ class MatProcessor:
528
647
 
529
648
  if incremental:
530
649
  # initialize progress bar
531
- if is_notebook():
532
- self.pbar = tqdm_nb(total=100, unit='%', file=sys.stdout,
533
- disable=no_tqdm)
534
- else:
535
- self.pbar = tqdm(total=100, unit='%', ncols=80, ascii=True,
536
- file=sys.stdout, disable=no_tqdm)
537
-
538
- self.pbar.update(0)
539
- last_pc = 0
650
+ pbar = _init_pbar(total=100, unit='%', no_tqdm=no_tqdm)
540
651
 
541
652
  H = sps.lil_matrix((nline, system.Bus.n))
542
653
 
@@ -549,23 +660,13 @@ class MatProcessor:
549
660
  use_umfpack=use_umfpack).T
550
661
  H[start:end, noslack] = sol
551
662
 
552
- # show progress in percentage
553
- perc = np.round(min((end / nline) * 100, 100), 2)
663
+ _update_pbar(pbar, end, nline)
554
664
 
555
- perc_diff = perc - last_pc
556
- if perc_diff >= 1:
557
- self.pbar.update(perc_diff)
558
- last_pc = perc
559
-
560
- # finish progress bar
561
- self.pbar.update(100 - last_pc)
562
- # removed `pbar` so that System object can be serialized
563
- self.pbar.close()
564
- self.pbar = None
565
665
  else:
566
- H = np.zeros((nline, nbus))
567
- H[:, noslack] = np.linalg.solve(Bbus.todense()[np.ix_(noslack, noref)].T,
568
- Bf.todense()[np.ix_(luid, noref)].T).T
666
+ H = sps.lil_matrix((nline, nbus))
667
+ sol = np.linalg.solve(Bbus.todense()[np.ix_(noslack, noref)].T,
668
+ Bf.todense()[np.ix_(luid, noref)].T).T
669
+ H[:, noslack] = sol
569
670
 
570
671
  # reshape results into 1D array if only one line
571
672
  if isinstance(line, (int, str)):
@@ -577,7 +678,7 @@ class MatProcessor:
577
678
  return H
578
679
 
579
680
  def build_lodf(self, line=None, no_store=False,
580
- incremental=False, step=1000, no_tqdm=False):
681
+ incremental=False, step=1000, no_tqdm=True):
581
682
  """
582
683
  Build the Line Outage Distribution Factor matrix and store it in the
583
684
  MParam `LODF`.
@@ -608,7 +709,7 @@ class MatProcessor:
608
709
 
609
710
  Returns
610
711
  -------
611
- LODF : np.ndarray, scipy.sparse.lil_matrix
712
+ LODF : scipy.sparse.lil_matrix
612
713
  Line outage distribution factor.
613
714
 
614
715
  References
@@ -636,81 +737,45 @@ class MatProcessor:
636
737
  # build PTDF if not built
637
738
  if self.PTDF._v is None:
638
739
  ptdf = self.build_ptdf(no_store=True, incremental=incremental, step=step)
639
- if incremental and isinstance(self.PTDF._v, np.ndarray):
640
- ptdf = sps.lil_matrix(self.PTDF._v)
641
740
 
642
- if incremental | (isinstance(ptdf, sps.spmatrix)):
643
- # initialize progress bar
644
- if is_notebook():
645
- self.pbar = tqdm_nb(total=100, unit='%', file=sys.stdout,
646
- disable=no_tqdm)
647
- else:
648
- self.pbar = tqdm(total=100, unit='%', ncols=80, ascii=True,
649
- file=sys.stdout, disable=no_tqdm)
650
-
651
- self.pbar.update(0)
652
- last_pc = 0
653
-
654
- LODF = sps.lil_matrix((nbranch, nline))
655
-
656
- # NOTE: for LODF, we are doing it columns by columns
657
- # reshape luid to list of list by step
658
- luidp = [luid[i:i + step] for i in range(0, len(luid), step)]
659
- for luidi in luidp:
660
- H_chunk = ptdf @ self.Cft._v[:, luidi]
661
- h_chunk = H_chunk.diagonal(-luidi[0])
662
- rden = safe_div(np.ones(H_chunk.shape),
663
- np.tile(np.ones_like(h_chunk) - h_chunk, (nbranch, 1)))
664
- H_chunk = H_chunk.multiply(rden).tolil()
665
- # NOTE: use lil_matrix to set diagonal values as -1
666
- rsid = sps.diags(H_chunk.diagonal(-luidi[0])) + sps.eye(H_chunk.shape[1])
667
- if H_chunk.shape[0] > rsid.shape[0]:
668
- Rsid = sps.lil_matrix(H_chunk.shape)
669
- Rsid[luidi, :] = rsid
670
- else:
671
- Rsid = rsid
672
- H_chunk = H_chunk - Rsid
673
- LODF[:, [luid.index(i) for i in luidi]] = H_chunk
674
-
675
- # show progress in percentage
676
- perc = np.round(min((luid.index(luidi[-1]) / nline) * 100, 100), 2)
677
-
678
- perc_diff = perc - last_pc
679
- if perc_diff >= 1:
680
- self.pbar.update(perc_diff)
681
- last_pc = perc
682
-
683
- # finish progress bar
684
- self.pbar.update(100 - last_pc)
685
- # removed `pbar` so that System object can be serialized
686
- self.pbar.close()
687
- self.pbar = None
688
- else:
689
- H = ptdf @ self.Cft._v[:, luid]
690
- h = np.diag(H, -luid[0])
691
- LODF = safe_div(H,
692
- np.tile(np.ones_like(h) - h, (nbranch, 1)))
693
- # # NOTE: reset the diagonal elements to -1
694
- rsid = np.diag(np.diag(LODF, -luid[0])) + np.eye(nline, nline)
695
- if LODF.shape[0] > rsid.shape[0]:
696
- Rsid = np.zeros_like(LODF)
697
- Rsid[luid, :] = rsid
741
+ # initialize progress bar
742
+ pbar = _init_pbar(total=100, unit='%', no_tqdm=no_tqdm)
743
+
744
+ LODF = sps.lil_matrix((nbranch, nline))
745
+
746
+ # NOTE: for LODF, we are doing it columns by columns
747
+ # reshape luid to list of list by step
748
+ luidp = [luid[i:i + step] for i in range(0, len(luid), step)]
749
+ for luidi in luidp:
750
+ H_chunk = ptdf @ self.Cft._v[:, luidi]
751
+ h_chunk = H_chunk.diagonal(-luidi[0])
752
+ rden = safe_div(np.ones(H_chunk.shape),
753
+ np.tile(np.ones_like(h_chunk) - h_chunk, (nbranch, 1)))
754
+ H_chunk = H_chunk.multiply(rden).tolil()
755
+ # NOTE: use lil_matrix to set diagonal values as -1
756
+ rsid = sps.diags(H_chunk.diagonal(-luidi[0])) + sps.eye(H_chunk.shape[1])
757
+ if H_chunk.shape[0] > rsid.shape[0]:
758
+ Rsid = sps.lil_matrix(H_chunk.shape)
759
+ Rsid[luidi, :] = rsid
698
760
  else:
699
761
  Rsid = rsid
700
- LODF = LODF - Rsid
762
+ H_chunk = H_chunk - Rsid
763
+ LODF[:, [luid.index(i) for i in luidi]] = H_chunk
764
+
765
+ _update_pbar(pbar, luid.index(luidi[-1]), nline)
701
766
 
702
767
  # reshape results into 1D array if only one line
703
768
  if isinstance(line, (int, str)):
704
769
  LODF = LODF[:, 0]
705
770
 
706
- if (not no_store) & (line is None):
771
+ if (not no_store) and (line is None):
707
772
  self.LODF._v = LODF
708
773
  return LODF
709
774
 
710
775
  def build_otdf(self, line=None):
711
776
  """
712
- Build the Outrage Transfer Distribution Factor (OTDF) matrix for line
713
- k outage: $OTDF_k = PTDF + LODF[:, k] @ PTDF[k, ]$.
777
+ Build the Outrage Transfer Distribution Factor (OTDF) matrix for
778
+ **line k** outage: $OTDF_k = PTDF + LODF[:, k] @ PTDF[k, ]$.
714
779
 
715
780
  OTDF_k[m, n] means the increased line flow on line `m` when there is
716
781
  1 p.u. power injection at bus `n` when line `k` is outage.
@@ -726,7 +791,7 @@ class MatProcessor:
726
791
 
727
792
  Returns
728
793
  -------
729
- OTDF : np.ndarray, scipy.sparse.csr_matrix
794
+ OTDF : scipy.sparse.csr_matrix
730
795
  Line outage distribution factor.
731
796
 
732
797
  References
@@ -751,4 +816,4 @@ class MatProcessor:
751
816
  luid = self.system.Line.idx2uid(line)
752
817
 
753
818
  otdf = ptdf + lodf[:, luid] @ ptdf[luid, :]
754
- return otdf
819
+ return otdf.tocsr()
ams/io/matpower.py CHANGED
@@ -455,16 +455,6 @@ def system2mpc(system) -> dict:
455
455
 
456
456
  This function is revised from ``andes.io.matpower.system2mpc``.
457
457
 
458
- In the ``gen`` section, slack generators are listed before PV generators.
459
-
460
- In the converted MPC, the indices of area (bus[:, 6]) and zone (bus[:, 10])
461
- may differ from the original MPC. However, the mapping relationship is preserved.
462
- For example, if the original MPC numbers areas starting from 1, the converted
463
- MPC may number them starting from 0.
464
-
465
- The coefficients ``c2`` and ``c1`` in the generator cost data are scaled by
466
- ``base_mva`` to match MATPOWER's unit convention (MW).
467
-
468
458
  Parameters
469
459
  ----------
470
460
  system : ams.core.system.System
@@ -474,6 +464,21 @@ def system2mpc(system) -> dict:
474
464
  -------
475
465
  mpc : dict
476
466
  A dictionary in MATPOWER format representing the converted AMS system.
467
+
468
+ Notes
469
+ -----
470
+ - In the `gen` section, slack generators are listed before PV generators.
471
+ - For uncontrolled generators (`ctrl.v == 0`), their max and min power
472
+ limits are set to their initial power (`p0.v`) in the converted MPC.
473
+ - In the converted MPC, the indices of area (`bus[:, 6]`) and zone (`bus[:, 10]`)
474
+ may differ from the original MPC. However, the mapping relationship is preserved.
475
+ For example, if the original MPC numbers areas starting from 1, the converted
476
+ MPC may number them starting from 0.
477
+ - The coefficients `c2` and `c1` in the generator cost data are scaled by
478
+ `baseMVA`.
479
+ - Unlike the XLSX and JSON converters, this implementation uses value providers
480
+ (`v`) instead of vin. As a result, any changes made through `model.set` will be
481
+ reflected in the generated MPC.
477
482
  """
478
483
 
479
484
  mpc = dict(version='2',
@@ -514,15 +519,31 @@ def system2mpc(system) -> dict:
514
519
  # --- PQ ---
515
520
  if system.PQ.n > 0:
516
521
  pq_pos = system.Bus.idx2uid(system.PQ.bus.v)
517
- u = system.PQ.u.v
518
- bus[pq_pos, 2] = u * system.PQ.p0.v * base_mva
519
- bus[pq_pos, 3] = u * system.PQ.q0.v * base_mva
522
+
523
+ p0e = system.PQ.u.v * system.PQ.p0.v
524
+ q0e = system.PQ.u.v * system.PQ.q0.v
525
+
526
+ # NOTE: ensure multiple PQ on the same bus are summed
527
+ # rather than overwritten. Same for Shunt.
528
+ p = np.zeros(system.Bus.n)
529
+ q = np.zeros(system.Bus.n)
530
+ np.add.at(p, pq_pos, p0e)
531
+ np.add.at(q, pq_pos, q0e)
532
+ bus[:, 2] = p * base_mva
533
+ bus[:, 3] = q * base_mva
520
534
 
521
535
  # --- Shunt ---
522
536
  if system.Shunt.n > 0:
523
537
  shunt_pos = system.Bus.idx2uid(system.Shunt.bus.v)
524
- bus[shunt_pos, 4] = system.Shunt.g.v * base_mva
525
- bus[shunt_pos, 5] = system.Shunt.b.v * base_mva
538
+
539
+ ge = system.Shunt.u.v * system.Shunt.g.v
540
+ be = system.Shunt.u.v * system.Shunt.b.v
541
+ g = np.zeros(system.Bus.n)
542
+ b = np.zeros(system.Bus.n)
543
+ np.add.at(g, shunt_pos, ge)
544
+ np.add.at(b, shunt_pos, be)
545
+ bus[:, 4] = g * base_mva
546
+ bus[:, 5] = b * base_mva
526
547
 
527
548
  # --- PV ---
528
549
  if system.PV.n > 0:
@@ -625,6 +646,8 @@ def mpc2m(mpc: dict, outfile: str) -> str:
625
646
  MATPOWER mpc dictionary.
626
647
  outfile : str
627
648
  Path to the output M-file.
649
+
650
+ .. versionadded:: 1.0.10
628
651
  """
629
652
  with open(outfile, 'w') as f:
630
653
  # Add version info
@@ -709,11 +732,6 @@ def write(system, outfile: str, overwrite: bool = None) -> bool:
709
732
  This function converts an AMS system object into a MATPOWER-compatible
710
733
  mpc dictionary and writes it to a specified output file in MATPOWER format.
711
734
 
712
- In the converted MPC, the indices of area (bus[:, 6]) and zone (bus[:, 10])
713
- may differ from the original MPC. However, the mapping relationship is preserved.
714
- For example, if the original MPC numbers areas starting from 1, the converted
715
- MPC may number them starting from 0.
716
-
717
735
  Parameters
718
736
  ----------
719
737
  system : ams.system.System
@@ -727,6 +745,23 @@ def write(system, outfile: str, overwrite: bool = None) -> bool:
727
745
  -------
728
746
  bool
729
747
  True if the file was successfully written, False otherwise.
748
+
749
+ Notes
750
+ -----
751
+ - In the `gen` section, slack generators are listed before PV generators.
752
+ - For uncontrolled generators (`ctrl.v == 0`), their max and min power
753
+ limits are set to their initial power (`p0.v`) in the converted MPC.
754
+ - In the converted MPC, the indices of area (`bus[:, 6]`) and zone (`bus[:, 10]`)
755
+ may differ from the original MPC. However, the mapping relationship is preserved.
756
+ For example, if the original MPC numbers areas starting from 1, the converted
757
+ MPC may number them starting from 0.
758
+ - The coefficients `c2` and `c1` in the generator cost data are scaled by
759
+ `baseMVA`.
760
+ - Unlike the XLSX and JSON converters, this implementation uses value providers
761
+ (`v`) instead of vin. As a result, any changes made through `model.set` will be
762
+ reflected in the generated MPC.
763
+
764
+ .. versionadded:: 1.0.10
730
765
  """
731
766
  if not confirm_overwrite(outfile, overwrite=overwrite):
732
767
  return False
ams/io/psse.py CHANGED
@@ -46,6 +46,8 @@ def write_raw(system, outfile: str, overwrite: bool = None):
46
46
  """
47
47
  Convert AMS system to PSS/E RAW file.
48
48
 
49
+ This method has not been fully benchmarked yet!
50
+
49
51
  Parameters
50
52
  ----------
51
53
  system : System
@@ -54,6 +56,8 @@ def write_raw(system, outfile: str, overwrite: bool = None):
54
56
  The output file path.
55
57
  overwrite : bool, optional
56
58
  If True, overwrite the file if it exists. If False, do not overwrite.
59
+
60
+ .. versionadded:: 1.0.10
57
61
  """
58
62
  if not confirm_overwrite(outfile, overwrite=overwrite):
59
63
  return False
ams/opt/exprcalc.py CHANGED
@@ -102,6 +102,17 @@ class ExpressionCalc(OptzBase):
102
102
  else:
103
103
  return self.optz.value
104
104
 
105
+ @v.setter
106
+ def v(self, value):
107
+ """
108
+ Set the ExpressionCalc value.
109
+ """
110
+ if self.optz is None:
111
+ raise ValueError("ExpressionCalc is not evaluated yet.")
112
+ if not isinstance(value, (int, float, np.ndarray)):
113
+ raise TypeError(f"Value must be a number or numpy array, got {type(value)}.")
114
+ self.optz.value = value
115
+
105
116
  @property
106
117
  def e(self):
107
118
  """
ams/routines/grbopt.py CHANGED
@@ -34,6 +34,8 @@ class OPF(DCPF1):
34
34
  Refer to the gurobi-optimods documentation for further details:
35
35
 
36
36
  https://gurobi-optimods.readthedocs.io/en/stable/mods/opf/opf.html
37
+
38
+ .. versionadded:: 1.0.10
37
39
  """
38
40
 
39
41
  def __init__(self, system, config):