pyMOTO 1.3.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pymoto/common/mma.py CHANGED
@@ -28,20 +28,50 @@ def residual(x, y, z, lam, xsi, eta, mu, zet, s, upp, low, P0, P1, Q0, Q1, epsi,
28
28
  ])
29
29
 
30
30
 
31
- def subsolv(epsimin, low, upp, alfa, beta, P, Q, a0, a, b, c, d):
32
- """ This function subsolv solves the MMA subproblem
33
- minimize SUM[ p0j/(uppj-xj) + q0j/(xj-lowj) ] + a0*z +
34
- + SUM[ ci*yi + 0.5*di*(yi)^2 ],
35
- subject to SUM[ pij/(uppj-xj) + qij/(xj-lowj) ] - ai*z - yi <= bi,
36
- alfaj <= xj <= betaj, yi >= 0, z >= 0.
37
- Input: m, n, low, upp, alfa, beta, p0, q0, P, Q, a0, a, b, c, d.
38
- Output: xmma,ymma,zmma, slack variables and Lagrange multiplers.
31
+ def subsolv(epsimin, low, upp, alfa, beta, P, Q, a0, a, b, c, d, x0=None):
32
+ r""" This function solves the MMA subproblem
33
+
34
+ minimize f_0(\vec{x}) + a_0*z + \sum_i^m[ c_i*y_i + 1/2*d_i*y_i^2 ],
35
+ subject to f_i(\vec{x}) - a_i*z - y_i <= b_i, for i = 1, ..., m
36
+ alfa_j <= x_j <= beta_j, for j = 1, ..., n
37
+ y_i >= 0, for i = 1, ..., m
38
+ z >= 0.
39
+
40
+ where:
41
+ MMA approximation: :math:`f_i(\vec{x}) = \sum_j\left( p_{ij}/(upp_j-x_j) + q_{ij}/(x_j-low_j) \right)`
42
+ m: The number of general constraints
43
+ n: The number of variables in :math:`\vec{x}`
44
+
45
+ Args:
46
+ epsimin: Solution tolerance on maximum residual
47
+ low: Column vector with the lower asymptotes
48
+ upp: Column vector with the upper asymptotes
49
+ alfa: Vector with the lower bounds for the variables :math:`\vec{x}`
50
+ beta: Vector with the upper bounds for the variables :math:`\vec{x}`
51
+ P: Upper asymptotic amplitudes
52
+ Q: Lower asymptotic amplitudes
53
+ a0: The constants :math:`a_0` in the term :math:`a_0\cdot z`
54
+ a: Vector with the constants :math:`a_i` in the terms :math:`a_i \cdot z`
55
+ c: Vector with the constants :math:`c_i` in the terms :math:`c_i \cdot y_i`
56
+ d: Vector with the constants :math:`d_i` in the terms :math:`0.5 \cdot d_i \cdot y_i^2`
57
+ x0 (optional): Initial guess, in case not given :math:`x_0 = (\alpha + \beta)/2` is used
58
+
59
+ Returns:
60
+ x: Vector with the optimal values of the variables :math:`\vec{x}` in the current MMA subproblem
61
+ y: Vector with the optimal values of the variables :math:`y_i` in the current MMA subproblem
62
+ z: Scalar with the optimal value of the variable :math:`z` in the current MMA subproblem
63
+ lam: Lagrange multipliers for the :math:`m` general MMA constraints
64
+ xsi: Lagrange multipliers for the :math:'n' constraints :math:`alfa_j - x_j <= 0`
65
+ eta: Lagrange multipliers for the :math:'n' constraints :math:`x_j - beta_j <= 0`
66
+ mu: Lagrange multipliers for the :math:`m` constraints :math:`-y_i <= 0`
67
+ zet: Lagrange multiplier for the single constraint :math:`-z <= 0`
68
+ s: Slack variables for the m general MMA constraints
39
69
  """
40
70
 
41
71
  n, m = len(alfa), len(a)
42
72
  epsi = 1.0
43
73
  maxittt = 400
44
- x = 0.5 * (alfa + beta)
74
+ x = 0.5 * (alfa + beta) if x0 is None else np.clip(x0, alfa+1e-10, beta-1e-10) # Design variables
45
75
  y = np.ones(m)
46
76
  z = 1.0
47
77
  lam = np.ones(m)
@@ -168,8 +198,7 @@ def subsolv(epsimin, low, upp, alfa, beta, P, Q, a0, a, b, c, d):
168
198
  sold = s.copy()
169
199
 
170
200
  # Do linesearch
171
- itto = 0
172
- while itto < maxittt:
201
+ for itto in range(maxittt):
173
202
  # Find new set of variables with stepsize
174
203
  x[:] = xold + steg * dx
175
204
  y[:] = yold + steg * dy
@@ -184,14 +213,13 @@ def subsolv(epsimin, low, upp, alfa, beta, P, Q, a0, a, b, c, d):
184
213
  residu = residual(x, y, z, lam, xsi, eta, mu, zet, s, upp, low, P0, P1, Q0, Q1, epsi, a0, a, b, c, d, alfa, beta)
185
214
  if np.linalg.norm(residu) < residunorm:
186
215
  break
187
- itto += 1
188
216
  steg /= 2 # Reduce stepsize
189
217
 
190
218
  residunorm = np.linalg.norm(residu)
191
219
  residumax = np.max(np.abs(residu))
192
220
 
193
221
  if ittt > maxittt - 2:
194
- print(f"MMA Subsolver: itt = {ittt}, at epsi = {epsi}")
222
+ print(f"MMA Subsolver: itt = {ittt}, at epsi = {'%.3e'%epsi}")
195
223
  # decrease epsilon with factor 10
196
224
  epsi /= 10
197
225
 
@@ -200,8 +228,7 @@ def subsolv(epsimin, low, upp, alfa, beta, P, Q, a0, a, b, c, d):
200
228
 
201
229
 
202
230
  class MMA:
203
- """
204
- Block for the MMA algorithm
231
+ r""" Class for the MMA optimization algorithm
205
232
  The design variables are set by keyword <variables> accepting a list of variables.
206
233
  The responses are set by keyword <responses> accepting a list of signals.
207
234
  If none are given, the internal sig_in and sig_out are used.
@@ -219,11 +246,16 @@ class MMA:
219
246
  xmin: Minimum design variable (can be a vector)
220
247
  xmax: Maximum design variable (can be a vector)
221
248
  fn_callback: A function that is called just before calling the response() in each iteration
222
- verbosity: 0 - No prints, 1 - Only convergence message, 2 - Convergence and iteration info, 3 - Extended info
249
+ verbosity: Level of information to print
250
+ 0 - No prints
251
+ 1 - Only convergence message
252
+ 2 - Convergence and iteration info (default)
253
+ 3 - Additional info on variables
254
+ 4 - Additional info on sensitivity information
223
255
 
224
256
  """
225
257
 
226
- def __init__(self, function, variables, responses, tolx=1e-4, tolf=0.0, move=0.1, maxit=100, xmin=0.0, xmax=1.0, fn_callback=None, verbosity=0, **kwargs):
258
+ def __init__(self, function, variables, responses, tolx=1e-4, tolf=0.0, move=0.1, maxit=100, xmin=0.0, xmax=1.0, fn_callback=None, verbosity=2, **kwargs):
227
259
  self.funbl = function
228
260
  self.verbosity = verbosity
229
261
 
@@ -246,19 +278,15 @@ class MMA:
246
278
 
247
279
  self.a0 = kwargs.get("a0", 1.0)
248
280
 
249
- self.epsimin = kwargs.get("epsimin", 1e-7) # Or 1e-7 ?? witout sqrt(m+n) or 1e-9
250
- self.raa0 = kwargs.get("raa0", 1e-5)
251
-
281
+ self.epsimin = kwargs.get("epsimin", 1e-10) # Or 1e-7 ?? witout sqrt(m+n) or 1e-9
252
282
  self.cCoef = kwargs.get("cCoef", 1e3) # Svanberg uses 1e3 in example? Old code had 1e7
253
283
 
254
- # Not used
255
- self.dxmin = kwargs.get("dxmin", 1e-5)
256
-
257
284
  self.albefa = kwargs.get("albefa", 0.1)
258
285
  self.asyinit = kwargs.get("asyinit", 0.5)
259
286
  self.asyincr = kwargs.get("asyincr", 1.2)
260
287
  self.asydecr = kwargs.get("asydecr", 0.7)
261
288
  self.asybound = kwargs.get("asybound", 10.0)
289
+ self.mmaversion = kwargs.get("mmaversion", "Svanberg2007") # Options are Svanberg1987, Svanberg2007
262
290
 
263
291
  self.ittomax = kwargs.get("ittomax", 400)
264
292
 
@@ -282,7 +310,6 @@ class MMA:
282
310
  self.d = np.ones(self.m)
283
311
  self.gold1 = np.zeros(self.m + 1)
284
312
  self.gold2 = self.gold1.copy()
285
- self.rho = self.raa0 * np.ones(self.m + 1)
286
313
 
287
314
  def response(self):
288
315
  change = 1
@@ -301,8 +328,7 @@ class MMA:
301
328
  self.xmin[self.cumlens[i]:self.cumlens[i+1]] = xminvals[i]
302
329
 
303
330
  if len(self.xmin) != self.n:
304
- raise RuntimeError(
305
- "Length of the xmin vector not correct ({} != {})".format(len(self.xmin), self.n))
331
+ raise RuntimeError(f"Length of the xmin vector ({len(self.xmin)}) should be equal to # design variables ({self.n})")
306
332
 
307
333
  if not hasattr(self.xmax, '__len__'):
308
334
  self.xmax = self.xmax * np.ones_like(xval)
@@ -313,17 +339,19 @@ class MMA:
313
339
  self.xmax[self.cumlens[i]:self.cumlens[i + 1]] = xmaxvals[i]
314
340
 
315
341
  if len(self.xmax) != self.n:
316
- raise RuntimeError("Length of the xmax vector not correct ({} != {})".format(len(self.xmax), self.n))
342
+ raise RuntimeError(f"Length of the xmax vector ({len(self.xmax)}) should be equal to # design variables ({self.n})")
317
343
 
318
- # Set movelimit in case of multiple
319
344
  if hasattr(self.move, '__len__'):
320
- if len(self.move) == len(self.variables):
321
- movevals = self.move
345
+ # Set movelimit in case of multiple are given
346
+ move_input = np.asarray(self.move).copy()
347
+ if move_input.size == len(self.variables):
322
348
  self.move = np.zeros_like(xval)
323
- for i in range(len(movevals)):
324
- self.move[self.cumlens[i]:self.cumlens[i + 1]] = movevals[i]
349
+ for i in range(move_input.size):
350
+ self.move[self.cumlens[i]:self.cumlens[i + 1]] = move_input[i]
325
351
  elif len(self.move) != self.n:
326
- raise RuntimeError("Length of the move vector not correct ({} != {})".format(len(self.move), self.n))
352
+ raise RuntimeError(f"Length of the move vector ({len(self.move)}) should be equal to number of "
353
+ f"design variable signals ({len(self.variables)}) or "
354
+ f"total number of design variables ({self.n}).")
327
355
 
328
356
  fcur = 0.0
329
357
  while self.iter < self.maxIt:
@@ -333,12 +361,9 @@ class MMA:
333
361
  # Set the new states
334
362
  for i, s in enumerate(self.variables):
335
363
  if self.cumlens[i+1]-self.cumlens[i] == 1:
336
- try:
337
- s.state[:] = xval[self.cumlens[i]]
338
- except TypeError:
339
- s.state = xval[self.cumlens[i]]
364
+ s.state = xval[self.cumlens[i]]
340
365
  else:
341
- s.state[:] = xval[self.cumlens[i]:self.cumlens[i+1]]
366
+ s.state = xval[self.cumlens[i]:self.cumlens[i+1]]
342
367
 
343
368
  if self.fn_callback is not None:
344
369
  self.fn_callback()
@@ -346,19 +371,11 @@ class MMA:
346
371
  # Calculate response
347
372
  self.funbl.response()
348
373
 
349
- # Update the states
350
- for i, s in enumerate(self.variables):
351
- if self.cumlens[i+1]-self.cumlens[i] == 1:
352
- try:
353
- xval[self.cumlens[i]] = s.state[:]
354
- except (TypeError, IndexError):
355
- xval[self.cumlens[i]] = s.state
356
- else:
357
- xval[self.cumlens[i]:self.cumlens[i+1]] = s.state[:]
358
-
359
374
  # Save response
360
375
  f = ()
361
376
  for s in self.responses:
377
+ if np.size(s.state) != 1:
378
+ raise TypeError("State of responses must be scalar.")
362
379
  f += (s.state, )
363
380
 
364
381
  # Check function change convergence criterion
@@ -388,27 +405,48 @@ class MMA:
388
405
  # Reset sensitivities for the next response
389
406
  self.funbl.reset()
390
407
 
391
- # Display info on variables
392
408
  if self.verbosity >= 3:
409
+ # Display info on variables
410
+ show_sensitivities = self.verbosity >= 4
411
+ msg = ""
393
412
  for i, s in enumerate(self.variables):
394
- isscal = self.cumlens[i + 1] - self.cumlens[i] == 1
395
- msg = "{0:>10s} = ".format(s.tag)
396
- if isscal:
397
- try:
398
- msg += " {0: .3e} ".format(s.state)
399
- except TypeError:
400
- msg += " {0: .3e} ".format(s.state[0])
413
+ if show_sensitivities:
414
+ msg += "{0:>10s} = ".format(s.tag[:10])
401
415
  else:
402
- msg += "[{0: .3e} ... {1: .3e}] ".format(min(s.state), max(s.state))
403
- for j, s_out in enumerate(self.responses):
404
- msg += "| {0:>10s}/{1:10s} = ".format("d"+s_out.tag, "d"+s.tag)
405
- if isscal:
406
- msg += " {0: .3e} ".format(df[j][self.cumlens[i]])
416
+ msg += f"{s.tag} = "
417
+
418
+ # Display value range
419
+ fmt = '% .2e'
420
+ minval, maxval = np.min(s.state), np.max(s.state)
421
+ mintag, maxtag = fmt % minval, fmt % maxval
422
+ if mintag == maxtag:
423
+ if show_sensitivities:
424
+ msg += f" {mintag} "
407
425
  else:
408
- msg += "[{0: .3e} ... {1: .3e}] ".format(min(df[j][self.cumlens[i]:self.cumlens[i+1]]), max(df[j][self.cumlens[i]:self.cumlens[i+1]]))
409
- print(msg)
426
+ msg += f" {mintag}"
427
+ else:
428
+ sep = '…' if len(s.state) > 2 else ','
429
+ msg += f"[{mintag}{sep}{maxtag}]"
430
+ if show_sensitivities:
431
+ msg += " "
432
+
433
+ if show_sensitivities:
434
+ # Display info on sensivity values
435
+ for j, s_out in enumerate(self.responses):
436
+ msg += "| {0:s}/{1:11s} = ".format("d" + s_out.tag, "d" + s.tag[:10])
437
+ minval = np.min(df[j][self.cumlens[i]:self.cumlens[i+1]])
438
+ maxval = np.max(df[j][self.cumlens[i]:self.cumlens[i+1]])
439
+ mintag, maxtag = fmt % minval, fmt % maxval
440
+ if mintag == maxtag:
441
+ msg += f" {mintag} "
442
+ else:
443
+ sep = '…' if self.cumlens[i + 1] - self.cumlens[i] > 2 else ','
444
+ msg += f"[{mintag}{sep}{maxtag}] "
445
+ msg += '\n'
446
+ elif i != len(self.variables)-1:
447
+ msg += ', '
448
+ print(msg)
410
449
 
411
- self.iter += 1
412
450
  xnew, change = self.mmasub(xval.copy(), np.hstack(f), np.vstack(df))
413
451
 
414
452
  # Stopping criteria on step size
@@ -419,6 +457,7 @@ class MMA:
419
457
  break
420
458
 
421
459
  xval = xnew
460
+ self.iter += 1
422
461
 
423
462
  def mmasub(self, xval, g, dg):
424
463
  if self.dx is None:
@@ -426,57 +465,6 @@ class MMA:
426
465
  if self.offset is None:
427
466
  self.offset = self.asyinit * np.ones(self.n)
428
467
 
429
- # Minimize f_0(x) + a_0*z + sum( c_i*y_i + 0.5*d_i*(y_i)^2 )
430
- # subject to f_i(x) - a_i*z - y_i <= 0, i = 1,...,m
431
- # xmin_j <= x_j <= xmax_j, j = 1,...,n
432
- # z >= 0, y_i >= 0, i = 1,...,m
433
- # *** INPUT:
434
- #
435
- # m = The number of general constraints.
436
- # n = The number of variables x_j.
437
- # iter = Current iteration number ( =1 the first time mmasub is called).
438
- # xval = Column vector with the current values of the variables x_j.
439
- # xmin = Column vector with the lower bounds for the variables x_j.
440
- # xmax = Column vector with the upper bounds for the variables x_j.
441
- # xold1 = xval, one iteration ago (provided that iter>1).
442
- # xold2 = xval, two iterations ago (provided that iter>2).
443
- # f0val = The value of the objective function f_0 at xval.
444
- # df0dx = Column vector with the derivatives of the objective function
445
- # f_0 with respect to the variables x_j, calculated at xval.
446
- # fval = Column vector with the values of the constraint functions f_i,
447
- # calculated at xval.
448
- # dfdx = (m x n)-matrix with the derivatives of the constraint functions
449
- # f_i with respect to the variables x_j, calculated at xval.
450
- # dfdx(i,j) = the derivative of f_i with respect to x_j.
451
- # low = Column vector with the lower asymptotes from the previous
452
- # iteration (provided that iter>1).
453
- # upp = Column vector with the upper asymptotes from the previous
454
- # iteration (provided that iter>1).
455
- # a0 = The constants a_0 in the term a_0*z.
456
- # a = Column vector with the constants a_i in the terms a_i*z.
457
- # c = Column vector with the constants c_i in the terms c_i*y_i.
458
- # d = Column vector with the constants d_i in the terms 0.5*d_i*(y_i)^2.
459
- #
460
-
461
- # *** OUTPUT:
462
- #
463
- # xmma = Column vector with the optimal values of the variables x_j
464
- # in the current MMA subproblem.
465
- # ymma = Column vector with the optimal values of the variables y_i
466
- # in the current MMA subproblem.
467
- # zmma = Scalar with the optimal value of the variable z
468
- # in the current MMA subproblem.
469
- # lam = Lagrange multipliers for the m general MMA constraints.
470
- # xsi = Lagrange multipliers for the n constraints alfa_j - x_j <= 0.
471
- # eta = Lagrange multipliers for the n constraints x_j - beta_j <= 0.
472
- # mu = Lagrange multipliers for the m constraints -y_i <= 0.
473
- # zet = Lagrange multiplier for the single constraint -z <= 0.
474
- # s = Slack variables for the m general MMA constraints.
475
- # low = Column vector with the lower asymptotes, calculated and used
476
- # in the current MMA subproblem.
477
- # upp = Column vector with the upper asymptotes, calculated and used
478
- # in the current MMA subproblem.
479
-
480
468
  # # ASYMPTOTES
481
469
  # Calculation of the asymptotes low and upp :
482
470
  # For iter = 1,2 the asymptotes are fixed depending on asyinit
@@ -519,36 +507,61 @@ class MMA:
519
507
  # # APPROXIMATE CONVEX SEPARABLE FUNCTIONS
520
508
  # Calculations of p0, q0, P, Q and b.
521
509
  # calculate the constant factor in calculations of pij and qij
510
+ # From: Svanberg(2007) - MMA and GCMMA, two methods for nonlinear optimization
511
+ dg_plus = np.maximum(+dg, 0)
512
+ dg_min = np.maximum(-dg, 0)
522
513
  dx2 = shift**2
523
- P = dx2 * np.maximum(+dg, 0)
524
- Q = dx2 * np.maximum(-dg, 0)
514
+ if '1987' in self.mmaversion:
515
+ # Original version
516
+ P = dx2 * dg_plus
517
+ Q = dx2 * dg_min
518
+ elif '2007' in self.mmaversion:
519
+ # Improved version -> Allows to use higher epsimin to get design variables closer to the bound.
520
+ P = dx2 * (1.001*dg_plus + 0.001*dg_min + 1e-5/self.dx)
521
+ Q = dx2 * (0.001*dg_plus + 1.001*dg_min + 1e-5/self.dx)
522
+ else:
523
+ raise ValueError("Only \"Svanberg1987\" or \"Svanberg2007\" are valid options")
525
524
 
526
525
  rhs = np.dot(P, 1 / shift) + np.dot(Q, 1 / shift) - g
527
526
  b = rhs[1:]
528
527
 
529
528
  # Solving the subproblem by a primal-dual Newton method
530
529
  epsimin_scaled = self.epsimin*np.sqrt(self.m + self.n)
531
- xmma, ymma, zmma, lam, xsi, eta, mu, zet, s = subsolv(epsimin_scaled, self.low, self.upp, alfa, beta, P, Q, self.a0, self.a, b, self.c, self.d)
530
+ xmma, ymma, zmma, lam, xsi, eta, mu, zet, s = subsolv(epsimin_scaled, self.low, self.upp, alfa, beta, P, Q, self.a0, self.a, b, self.c, self.d, x0=xval)
532
531
 
533
532
  self.gold2, self.gold1 = self.gold1, g.copy()
534
533
  self.xold2, self.xold1 = self.xold1, xval.copy()
535
534
  change = np.average(abs(xval - xmma))
536
535
 
537
536
  if self.verbosity >= 2:
537
+ # Display iteration status message
538
538
  msgs = ["g{0:d}({1:s}): {2:+.4e}".format(i, s.tag, g[i]) for i, s in enumerate(self.responses)]
539
- print("It. {0: 4d}, {1}".format(self.iter, ", ".join(msgs)))
539
+ max_infeasibility = max(g[1:])
540
+ is_feasible = max_infeasibility <= 0
541
+
542
+ feasibility_tag = 'f' if is_feasible else ' '
543
+ print("It. {0: 4d}, [{1:1s}] {2}".format(self.iter, feasibility_tag, ", ".join(msgs)))
540
544
 
541
- if self.verbosity >=3:
542
- # Print changes
543
- printstr = "Changes: "
545
+ if self.verbosity >= 3:
546
+ # Report design feasibility
547
+ iconst_max = np.argmax(g[1:])
548
+ print(f" | {np.sum(g[1:]>0)} / {len(g)-1} violated constraints, "
549
+ f"max. violation ({self.responses[iconst_max+1].tag}) = {'%.2g'%g[iconst_max+1]}")
550
+
551
+ # Print design changes
552
+ change_msgs = []
544
553
  for i, s in enumerate(self.variables):
545
- isscal = self.cumlens[i + 1] - self.cumlens[i] == 1
546
- if isscal:
547
- chg = abs(xval[self.cumlens[i]] - xmma[self.cumlens[i]])
554
+ minchg = np.min(abs(xval[self.cumlens[i]:self.cumlens[i + 1]] - xmma[self.cumlens[i]:self.cumlens[i + 1]]))
555
+ maxchg = np.max(abs(xval[self.cumlens[i]:self.cumlens[i + 1]] - xmma[self.cumlens[i]:self.cumlens[i + 1]]))
556
+ fmt = '%.2g'
557
+ mintag, maxtag = fmt % minchg, fmt % maxchg
558
+
559
+ if mintag == maxtag:
560
+ change_msgs.append(f"Δ({s.tag}) = {mintag}")
548
561
  else:
549
- chg = np.average(abs(xval[self.cumlens[i]:self.cumlens[i + 1]] - xmma[self.cumlens[i]:self.cumlens[i + 1]]))
562
+ change_msgs.append(f"Δ({s.tag}) = {mintag}…{maxtag}")
550
563
 
551
- printstr += "{0:s} = {1:.3e} ".format("Δ_"+s.tag, chg)
552
- print(printstr)
564
+ print(f" | Changes: {', '.join(change_msgs)}")
553
565
 
554
- return xmma, change
566
+ return xmma, change
567
+