qoro-divi 0.2.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- divi/__init__.py +8 -0
- divi/_pbar.py +73 -0
- divi/circuits.py +139 -0
- divi/exp/cirq/__init__.py +7 -0
- divi/exp/cirq/_lexer.py +126 -0
- divi/exp/cirq/_parser.py +889 -0
- divi/exp/cirq/_qasm_export.py +37 -0
- divi/exp/cirq/_qasm_import.py +35 -0
- divi/exp/cirq/exception.py +21 -0
- divi/exp/scipy/_cobyla.py +342 -0
- divi/exp/scipy/pyprima/LICENCE.txt +28 -0
- divi/exp/scipy/pyprima/__init__.py +263 -0
- divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
- divi/exp/scipy/pyprima/cobyla/cobyla.py +599 -0
- divi/exp/scipy/pyprima/cobyla/cobylb.py +849 -0
- divi/exp/scipy/pyprima/cobyla/geometry.py +240 -0
- divi/exp/scipy/pyprima/cobyla/initialize.py +269 -0
- divi/exp/scipy/pyprima/cobyla/trustregion.py +540 -0
- divi/exp/scipy/pyprima/cobyla/update.py +331 -0
- divi/exp/scipy/pyprima/common/__init__.py +0 -0
- divi/exp/scipy/pyprima/common/_bounds.py +41 -0
- divi/exp/scipy/pyprima/common/_linear_constraints.py +46 -0
- divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +64 -0
- divi/exp/scipy/pyprima/common/_project.py +224 -0
- divi/exp/scipy/pyprima/common/checkbreak.py +107 -0
- divi/exp/scipy/pyprima/common/consts.py +48 -0
- divi/exp/scipy/pyprima/common/evaluate.py +101 -0
- divi/exp/scipy/pyprima/common/history.py +39 -0
- divi/exp/scipy/pyprima/common/infos.py +30 -0
- divi/exp/scipy/pyprima/common/linalg.py +452 -0
- divi/exp/scipy/pyprima/common/message.py +336 -0
- divi/exp/scipy/pyprima/common/powalg.py +131 -0
- divi/exp/scipy/pyprima/common/preproc.py +393 -0
- divi/exp/scipy/pyprima/common/present.py +5 -0
- divi/exp/scipy/pyprima/common/ratio.py +56 -0
- divi/exp/scipy/pyprima/common/redrho.py +49 -0
- divi/exp/scipy/pyprima/common/selectx.py +346 -0
- divi/interfaces.py +25 -0
- divi/parallel_simulator.py +258 -0
- divi/qasm.py +220 -0
- divi/qem.py +191 -0
- divi/qlogger.py +119 -0
- divi/qoro_service.py +343 -0
- divi/qprog/__init__.py +13 -0
- divi/qprog/_graph_partitioning.py +619 -0
- divi/qprog/_mlae.py +182 -0
- divi/qprog/_qaoa.py +440 -0
- divi/qprog/_vqe.py +275 -0
- divi/qprog/_vqe_sweep.py +144 -0
- divi/qprog/batch.py +235 -0
- divi/qprog/optimizers.py +75 -0
- divi/qprog/quantum_program.py +493 -0
- divi/utils.py +116 -0
- qoro_divi-0.2.0b1.dist-info/LICENSE +190 -0
- qoro_divi-0.2.0b1.dist-info/LICENSES/Apache-2.0.txt +73 -0
- qoro_divi-0.2.0b1.dist-info/METADATA +57 -0
- qoro_divi-0.2.0b1.dist-info/RECORD +58 -0
- qoro_divi-0.2.0b1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module contains subroutines concerning the update of the interpolation set.
|
|
3
|
+
|
|
4
|
+
Translated from Zaikun Zhang's modern-Fortran reference implementation in PRIMA.
|
|
5
|
+
|
|
6
|
+
Dedicated to late Professor M. J. D. Powell FRS (1936--2015).
|
|
7
|
+
|
|
8
|
+
Python translation by Nickolai Belakovski.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
|
|
13
|
+
from ..common.consts import DEBUGGING
|
|
14
|
+
from ..common.infos import DAMAGING_ROUNDING, INFO_DEFAULT
|
|
15
|
+
from ..common.linalg import inprod, inv, isinv, matprod, outprod, primasum
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def updatexfc(jdrop, constr, cpen, cstrv, d, f, conmat, cval, fval, sim, simi):
|
|
19
|
+
"""
|
|
20
|
+
This function revises the simplex by updating the elements of SIM, SIMI, FVAL, CONMAT, and CVAL
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
# Local variables
|
|
24
|
+
itol = 1
|
|
25
|
+
|
|
26
|
+
# Sizes
|
|
27
|
+
num_constraints = np.size(constr)
|
|
28
|
+
num_vars = np.size(sim, 0)
|
|
29
|
+
|
|
30
|
+
# Preconditions
|
|
31
|
+
if DEBUGGING:
|
|
32
|
+
assert num_constraints >= 0
|
|
33
|
+
assert num_vars >= 1
|
|
34
|
+
assert jdrop >= 0 and jdrop <= num_vars + 1
|
|
35
|
+
assert not any(np.isnan(constr) | np.isneginf(constr))
|
|
36
|
+
assert not (np.isnan(cstrv) | np.isposinf(cstrv))
|
|
37
|
+
assert np.size(d) == num_vars and all(np.isfinite(d))
|
|
38
|
+
assert not (np.isnan(f) | np.isposinf(f))
|
|
39
|
+
assert (
|
|
40
|
+
np.size(conmat, 0) == num_constraints and np.size(conmat, 1) == num_vars + 1
|
|
41
|
+
)
|
|
42
|
+
assert not (np.isnan(conmat) | np.isneginf(conmat)).any()
|
|
43
|
+
assert np.size(cval) == num_vars + 1 and not any(
|
|
44
|
+
cval < 0 | np.isnan(cval) | np.isposinf(cval)
|
|
45
|
+
)
|
|
46
|
+
assert np.size(fval) == num_vars + 1 and not any(
|
|
47
|
+
np.isnan(fval) | np.isposinf(fval)
|
|
48
|
+
)
|
|
49
|
+
assert np.size(sim, 0) == num_vars and np.size(sim, 1) == num_vars + 1
|
|
50
|
+
assert np.isfinite(sim).all()
|
|
51
|
+
assert all(primasum(abs(sim[:, :num_vars]), axis=0) > 0)
|
|
52
|
+
assert np.size(simi, 0) == num_vars and np.size(simi, 1) == num_vars
|
|
53
|
+
assert np.isfinite(simi).all()
|
|
54
|
+
assert isinv(sim[:, :num_vars], simi, itol)
|
|
55
|
+
|
|
56
|
+
# ====================#
|
|
57
|
+
# Calculation starts #
|
|
58
|
+
# ====================#
|
|
59
|
+
|
|
60
|
+
# Do nothing when JDROP is None. This can only happen after a trust-region step.
|
|
61
|
+
if jdrop is None: # JDROP is None is impossible if the input is correct.
|
|
62
|
+
return conmat, cval, fval, sim, simi, INFO_DEFAULT
|
|
63
|
+
|
|
64
|
+
sim_old = sim
|
|
65
|
+
simi_old = simi
|
|
66
|
+
if jdrop < num_vars:
|
|
67
|
+
sim[:, jdrop] = d
|
|
68
|
+
simi_jdrop = simi[jdrop, :] / inprod(simi[jdrop, :], d)
|
|
69
|
+
simi -= outprod(matprod(simi, d), simi_jdrop)
|
|
70
|
+
simi[jdrop, :] = simi_jdrop
|
|
71
|
+
else: # jdrop == num_vars
|
|
72
|
+
sim[:, num_vars] += d
|
|
73
|
+
sim[:, :num_vars] -= np.tile(d, (num_vars, 1)).T
|
|
74
|
+
simid = matprod(simi, d)
|
|
75
|
+
sum_simi = primasum(simi, axis=0)
|
|
76
|
+
simi += outprod(simid, sum_simi / (1 - sum(simid)))
|
|
77
|
+
|
|
78
|
+
# Check whether SIMI is a poor approximation to the inverse of SIM[:, :NUM_VARS]
|
|
79
|
+
# Calculate SIMI from scratch if the current one is damaged by rounding errors.
|
|
80
|
+
itol = 1
|
|
81
|
+
erri = np.max(
|
|
82
|
+
abs(matprod(simi, sim[:, :num_vars]) - np.eye(num_vars))
|
|
83
|
+
) # np.max returns NaN if any input is NaN
|
|
84
|
+
if erri > 0.1 * itol or np.isnan(erri):
|
|
85
|
+
simi_test = inv(sim[:, :num_vars])
|
|
86
|
+
erri_test = np.max(
|
|
87
|
+
abs(matprod(simi_test, sim[:, :num_vars]) - np.eye(num_vars))
|
|
88
|
+
)
|
|
89
|
+
if erri_test < erri or (np.isnan(erri) and not np.isnan(erri_test)):
|
|
90
|
+
simi = simi_test
|
|
91
|
+
erri = erri_test
|
|
92
|
+
|
|
93
|
+
# If SIMI is satisfactory, then update FVAL, CONMAT, CVAL, and the pole position. Otherwise restore
|
|
94
|
+
# SIM and SIMI, and return with INFO = DAMAGING_ROUNDING.
|
|
95
|
+
if erri <= itol:
|
|
96
|
+
fval[jdrop] = f
|
|
97
|
+
conmat[:, jdrop] = constr
|
|
98
|
+
cval[jdrop] = cstrv
|
|
99
|
+
# Switch the best vertex to the pole position SIM[:, NUM_VARS] if it is not there already
|
|
100
|
+
conmat, cval, fval, sim, simi, info = updatepole(
|
|
101
|
+
cpen, conmat, cval, fval, sim, simi
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
info = DAMAGING_ROUNDING
|
|
105
|
+
sim = sim_old
|
|
106
|
+
simi = simi_old
|
|
107
|
+
|
|
108
|
+
# ==================#
|
|
109
|
+
# Calculation ends #
|
|
110
|
+
# ==================#
|
|
111
|
+
|
|
112
|
+
# Postconditions
|
|
113
|
+
if DEBUGGING:
|
|
114
|
+
assert (
|
|
115
|
+
np.size(conmat, 0) == num_constraints and np.size(conmat, 1) == num_vars + 1
|
|
116
|
+
)
|
|
117
|
+
assert not (np.isnan(conmat) | np.isneginf(conmat)).any()
|
|
118
|
+
assert np.size(cval) == num_vars + 1 and not any(
|
|
119
|
+
cval < 0 | np.isnan(cval) | np.isposinf(cval)
|
|
120
|
+
)
|
|
121
|
+
assert np.size(fval) == num_vars + 1 and not any(
|
|
122
|
+
np.isnan(fval) | np.isposinf(fval)
|
|
123
|
+
)
|
|
124
|
+
assert np.size(sim, 0) == num_vars and np.size(sim, 1) == num_vars + 1
|
|
125
|
+
assert np.isfinite(sim).all()
|
|
126
|
+
assert all(primasum(abs(sim[:, :num_vars]), axis=0) > 0)
|
|
127
|
+
assert np.size(simi, 0) == num_vars and np.size(simi, 1) == num_vars
|
|
128
|
+
assert np.isfinite(simi).all()
|
|
129
|
+
assert isinv(sim[:, :num_vars], simi, itol) or info == DAMAGING_ROUNDING
|
|
130
|
+
|
|
131
|
+
return sim, simi, fval, conmat, cval, info
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def findpole(cpen, cval, fval):
|
|
135
|
+
"""
|
|
136
|
+
This subroutine identifies the best vertex of the current simplex with respect to the merit
|
|
137
|
+
function PHI = F + CPEN * CSTRV.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
# Size
|
|
141
|
+
num_vars = np.size(fval) - 1
|
|
142
|
+
|
|
143
|
+
# Preconditions
|
|
144
|
+
if DEBUGGING:
|
|
145
|
+
assert cpen > 0
|
|
146
|
+
assert np.size(cval) == num_vars + 1 and not any(
|
|
147
|
+
cval < 0 | np.isnan(cval) | np.isposinf(cval)
|
|
148
|
+
)
|
|
149
|
+
assert np.size(fval) == num_vars + 1 and not any(
|
|
150
|
+
np.isnan(fval) | np.isposinf(fval)
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# ====================#
|
|
154
|
+
# Calculation starts #
|
|
155
|
+
# ====================#
|
|
156
|
+
|
|
157
|
+
# Identify the optimal vertex of the current simplex
|
|
158
|
+
jopt = np.size(fval) - 1
|
|
159
|
+
phi = fval + cpen * cval
|
|
160
|
+
phimin = min(phi)
|
|
161
|
+
# Essentially jopt = np.argmin(phi). However, we keep jopt = num_vars unless there
|
|
162
|
+
# is a strictly better choice. When there are multiple choices, we choose the jopt
|
|
163
|
+
# with the smallest value of cval.
|
|
164
|
+
if phimin < phi[jopt] or any((cval < cval[jopt]) & (phi <= phi[jopt])):
|
|
165
|
+
# While we could use argmin(phi), there may be two places where phi achieves
|
|
166
|
+
# phimin, and in that case we should choose the one with the smallest cval.
|
|
167
|
+
jopt = np.ma.array(cval, mask=(phi > phimin)).argmin()
|
|
168
|
+
|
|
169
|
+
# ==================#
|
|
170
|
+
# Calculation ends #
|
|
171
|
+
# ==================#
|
|
172
|
+
|
|
173
|
+
# Postconditions
|
|
174
|
+
if DEBUGGING:
|
|
175
|
+
assert jopt >= 0 and jopt < num_vars + 1
|
|
176
|
+
assert (
|
|
177
|
+
jopt == num_vars
|
|
178
|
+
or phi[jopt] < phi[num_vars]
|
|
179
|
+
or (phi[jopt] <= phi[num_vars] and cval[jopt] < cval[num_vars])
|
|
180
|
+
)
|
|
181
|
+
return jopt
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def updatepole(cpen, conmat, cval, fval, sim, simi):
|
|
185
|
+
# --------------------------------------------------------------------------------------------------!
|
|
186
|
+
# This subroutine identifies the best vertex of the current simplex with respect to the merit
|
|
187
|
+
# function PHI = F + CPEN * CSTRV, and then switch this vertex to SIM[:, NUM_VARS], which Powell called
|
|
188
|
+
# the "pole position" in his comments. CONMAT, CVAL, FVAL, and SIMI are updated accordingly.
|
|
189
|
+
#
|
|
190
|
+
# N.B. 1: In precise arithmetic, the following two procedures produce the same results:
|
|
191
|
+
# 1) apply UPDATEPOLE to SIM twice, first with CPEN = CPEN1 and then with CPEN = CPEN2;
|
|
192
|
+
# 2) apply UPDATEPOLE to SIM with CPEN = CPEN2.
|
|
193
|
+
# In finite-precision arithmetic, however, they may produce different results unless CPEN1 = CPEN2.
|
|
194
|
+
#
|
|
195
|
+
# N.B. 2: When JOPT == N+1, the best vertex is already at the pole position, so there is nothing to
|
|
196
|
+
# switch. However, as in Powell's code, the code below will check whether SIMI is good enough to
|
|
197
|
+
# work as the inverse of SIM(:, 1:N) or not. If not, Powell's code would invoke an error return of
|
|
198
|
+
# COBYLB; our implementation, however, will try calculating SIMI from scratch; if the recalculated
|
|
199
|
+
# SIMI is still of poor quality, then UPDATEPOLE will return with INFO = DAMAGING_ROUNDING,
|
|
200
|
+
# informing COBYLB that SIMI is poor due to damaging rounding errors.
|
|
201
|
+
#
|
|
202
|
+
# N.B. 3: UPDATEPOLE should be called when and only when FINDPOLE can potentially returns a value
|
|
203
|
+
# other than N+1. The value of FINDPOLE is determined by CPEN, CVAL, and FVAL, the latter two being
|
|
204
|
+
# decided by SIM. Thus UPDATEPOLE should be called after CPEN or SIM changes. COBYLA updates CPEN at
|
|
205
|
+
# only two places: the beginning of each trust-region iteration, and when REDRHO is called;
|
|
206
|
+
# SIM is updated only by UPDATEXFC, which itself calls UPDATEPOLE internally. Therefore, we only
|
|
207
|
+
# need to call UPDATEPOLE after updating CPEN at the beginning of each trust-region iteration and
|
|
208
|
+
# after each invocation of REDRHO.
|
|
209
|
+
|
|
210
|
+
# Local variables
|
|
211
|
+
itol = 1
|
|
212
|
+
|
|
213
|
+
# Sizes
|
|
214
|
+
num_constraints = conmat.shape[0]
|
|
215
|
+
num_vars = sim.shape[0]
|
|
216
|
+
|
|
217
|
+
# Preconditions
|
|
218
|
+
if DEBUGGING:
|
|
219
|
+
assert num_constraints >= 0
|
|
220
|
+
assert num_vars >= 1
|
|
221
|
+
assert cpen > 0
|
|
222
|
+
assert (
|
|
223
|
+
np.size(conmat, 0) == num_constraints and np.size(conmat, 1) == num_vars + 1
|
|
224
|
+
)
|
|
225
|
+
assert not (np.isnan(conmat) | np.isneginf(conmat)).any()
|
|
226
|
+
assert np.size(cval) == num_vars + 1 and not any(
|
|
227
|
+
cval < 0 | np.isnan(cval) | np.isposinf(cval)
|
|
228
|
+
)
|
|
229
|
+
assert np.size(fval) == num_vars + 1 and not any(
|
|
230
|
+
np.isnan(fval) | np.isposinf(fval)
|
|
231
|
+
)
|
|
232
|
+
assert np.size(sim, 0) == num_vars and np.size(sim, 1) == num_vars + 1
|
|
233
|
+
assert np.isfinite(sim).all()
|
|
234
|
+
assert all(primasum(abs(sim[:, :num_vars]), axis=0) > 0)
|
|
235
|
+
assert np.size(simi, 0) == num_vars and np.size(simi, 1) == num_vars
|
|
236
|
+
assert np.isfinite(simi).all()
|
|
237
|
+
assert isinv(sim[:, :num_vars], simi, itol)
|
|
238
|
+
|
|
239
|
+
# ====================#
|
|
240
|
+
# Calculation starts #
|
|
241
|
+
# ====================#
|
|
242
|
+
|
|
243
|
+
# INFO must be set, as it is an output.
|
|
244
|
+
info = INFO_DEFAULT
|
|
245
|
+
|
|
246
|
+
# Identify the optimal vertex of the current simplex.
|
|
247
|
+
jopt = findpole(cpen, cval, fval)
|
|
248
|
+
|
|
249
|
+
# Switch the best vertex to the pole position SIM[:, NUM_VARS] if it is not there already and update
|
|
250
|
+
# SIMI. Before the update, save a copy of SIM and SIMI. If the update is unsuccessful due to
|
|
251
|
+
# damaging rounding errors, we restore them and return with INFO = DAMAGING_ROUNDING.
|
|
252
|
+
sim_old = sim.copy()
|
|
253
|
+
simi_old = simi.copy()
|
|
254
|
+
if 0 <= jopt < num_vars:
|
|
255
|
+
# Unless there is a bug in FINDPOLE it is guaranteed that JOPT >= 0
|
|
256
|
+
# When JOPT == NUM_VARS, there is nothing to switch; in addition SIMI[JOPT, :] will be illegal.
|
|
257
|
+
# fval[[jopt, -1]] = fval[[-1, jopt]]
|
|
258
|
+
# conmat[:, [jopt, -1]] = conmat[:, [-1, jopt]] # Exchange CONMAT[:, JOPT] AND CONMAT[:, -1]
|
|
259
|
+
# cval[[jopt, -1]] = cval[[-1, jopt]]
|
|
260
|
+
sim[:, num_vars] += sim[:, jopt]
|
|
261
|
+
sim_jopt = sim[:, jopt].copy()
|
|
262
|
+
sim[:, jopt] = 0 # np.zeros(num_constraints)?
|
|
263
|
+
sim[:, :num_vars] -= np.tile(sim_jopt, (num_vars, 1)).T
|
|
264
|
+
# The above update is equivalent to multiplying SIM[:, :NUM_VARS] from the right side by a matrix whose
|
|
265
|
+
# JOPT-th row is [-1, -1, ..., -1], while all the other rows are the same as those of the
|
|
266
|
+
# identity matrix. It is easy to check that the inverse of this matrix is itself. Therefore,
|
|
267
|
+
# SIMI should be updated by a multiplication with this matrix (i.e. its inverse) from the left
|
|
268
|
+
# side, as is done in the following line. The JOPT-th row of the updated SIMI is minus the sum
|
|
269
|
+
# of all rows of the original SIMI, whereas all the other rows remain unchanged.
|
|
270
|
+
# NDB 20250114: In testing the cutest problem 'SYNTHES2' between the Python implementation and
|
|
271
|
+
# the Fortran bindings, I saw a difference between the following for loop and the
|
|
272
|
+
# np.sum command. The differences were small, on the order of 1e-16, i.e. epsilon.
|
|
273
|
+
# According to numpy documentation, np.sum sometimes uses partial pairwise summation,
|
|
274
|
+
# depending on the memory layout of the array and the axis specified.
|
|
275
|
+
# for i in range(simi.shape[1]):
|
|
276
|
+
# simi[jopt, i] = -sum(simi[:, i])
|
|
277
|
+
simi[jopt, :] = -primasum(simi, axis=0)
|
|
278
|
+
|
|
279
|
+
# Check whether SIMI is a poor approximation to the inverse of SIM[:, :NUM_VARS]
|
|
280
|
+
# Calculate SIMI from scratch if the current one is damaged by rounding errors.
|
|
281
|
+
erri = np.max(
|
|
282
|
+
abs(matprod(simi, sim[:, :num_vars]) - np.eye(num_vars))
|
|
283
|
+
) # np.max returns NaN if any input is NaN
|
|
284
|
+
itol = 1
|
|
285
|
+
if erri > 0.1 * itol or np.isnan(erri):
|
|
286
|
+
simi_test = inv(sim[:, :num_vars])
|
|
287
|
+
erri_test = np.max(
|
|
288
|
+
abs(matprod(simi_test, sim[:, :num_vars]) - np.eye(num_vars))
|
|
289
|
+
)
|
|
290
|
+
if erri_test < erri or (np.isnan(erri) and not np.isnan(erri_test)):
|
|
291
|
+
simi = simi_test
|
|
292
|
+
erri = erri_test
|
|
293
|
+
|
|
294
|
+
# If SIMI is satisfactory, then update FVAL, CONMAT, and CVAL. Otherwise restore SIM and SIMI, and
|
|
295
|
+
# return with INFO = DAMAGING_ROUNDING.
|
|
296
|
+
if erri <= itol:
|
|
297
|
+
if 0 <= jopt < num_vars:
|
|
298
|
+
fval[[jopt, num_vars]] = fval[[num_vars, jopt]]
|
|
299
|
+
conmat[:, [jopt, num_vars]] = conmat[:, [num_vars, jopt]]
|
|
300
|
+
cval[[jopt, num_vars]] = cval[[num_vars, jopt]]
|
|
301
|
+
else: # erri > itol or erri is NaN
|
|
302
|
+
info = DAMAGING_ROUNDING
|
|
303
|
+
sim = sim_old
|
|
304
|
+
simi = simi_old
|
|
305
|
+
|
|
306
|
+
# ==================#
|
|
307
|
+
# Calculation ends #
|
|
308
|
+
# ==================#
|
|
309
|
+
|
|
310
|
+
# Postconditions
|
|
311
|
+
if DEBUGGING:
|
|
312
|
+
assert findpole(cpen, cval, fval) == num_vars or info == DAMAGING_ROUNDING
|
|
313
|
+
assert (
|
|
314
|
+
np.size(conmat, 0) == num_constraints and np.size(conmat, 1) == num_vars + 1
|
|
315
|
+
)
|
|
316
|
+
assert not (np.isnan(conmat) | np.isneginf(conmat)).any()
|
|
317
|
+
assert np.size(cval) == num_vars + 1 and not any(
|
|
318
|
+
cval < 0 | np.isnan(cval) | np.isposinf(cval)
|
|
319
|
+
)
|
|
320
|
+
assert np.size(fval) == num_vars + 1 and not any(
|
|
321
|
+
np.isnan(fval) | np.isposinf(fval)
|
|
322
|
+
)
|
|
323
|
+
assert np.size(sim, 0) == num_vars and np.size(sim, 1) == num_vars + 1
|
|
324
|
+
assert np.isfinite(sim).all()
|
|
325
|
+
assert all(primasum(abs(sim[:, :num_vars]), axis=0) > 0)
|
|
326
|
+
assert np.size(simi, 0) == num_vars and np.size(simi, 1) == num_vars
|
|
327
|
+
assert np.isfinite(simi).all()
|
|
328
|
+
# Do not check SIMI = SIM[:, :num_vars]^{-1}, as it may not be true due to damaging rounding.
|
|
329
|
+
assert isinv(sim[:, :num_vars], simi, itol) or info == DAMAGING_ROUNDING
|
|
330
|
+
|
|
331
|
+
return conmat, cval, fval, sim, simi, info
|
|
File without changes
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.optimize import Bounds
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def process_bounds(bounds, lenx0):
|
|
6
|
+
"""
|
|
7
|
+
`bounds` can either be an object with the properties lb and ub, or a list of tuples
|
|
8
|
+
indicating a lower bound and an upper bound for each variable. If the list contains
|
|
9
|
+
fewer entries than the length of x0, the remaining entries will generated as -/+ infinity.
|
|
10
|
+
Some examples of valid lists of tuple, assuming len(x0) == 3:
|
|
11
|
+
[(0, 1), (2, 3), (4, 5)] -> returns [0, 2, 4], [1, 3, 5]
|
|
12
|
+
[(0, 1), (None, 3)] -> returns [0, -inf, -inf], [1, 3, inf]
|
|
13
|
+
[(0, 1), (-np.inf, 3)] -> returns [0, -inf, -inf], [1, 3, inf]
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
if bounds is None:
|
|
17
|
+
lb = np.array([-np.inf] * lenx0, dtype=np.float64)
|
|
18
|
+
ub = np.array([np.inf] * lenx0, dtype=np.float64)
|
|
19
|
+
return lb, ub
|
|
20
|
+
|
|
21
|
+
if isinstance(bounds, Bounds):
|
|
22
|
+
lb = np.array(bounds.lb, dtype=np.float64)
|
|
23
|
+
ub = np.array(bounds.ub, dtype=np.float64)
|
|
24
|
+
lb = np.concatenate((lb, -np.inf * np.ones(lenx0 - len(lb))))
|
|
25
|
+
ub = np.concatenate((ub, np.inf * np.ones(lenx0 - len(ub))))
|
|
26
|
+
return lb, ub
|
|
27
|
+
|
|
28
|
+
# If neither of the above conditions are true, we assume that bounds is a list of tuples
|
|
29
|
+
lb = np.array(
|
|
30
|
+
[bound[0] if bound[0] is not None else -np.inf for bound in bounds],
|
|
31
|
+
dtype=np.float64,
|
|
32
|
+
)
|
|
33
|
+
ub = np.array(
|
|
34
|
+
[bound[1] if bound[1] is not None else np.inf for bound in bounds],
|
|
35
|
+
dtype=np.float64,
|
|
36
|
+
)
|
|
37
|
+
# If there were fewer bounds than variables, pad the rest with -/+ infinity
|
|
38
|
+
lb = np.concatenate((lb, -np.inf * np.ones(lenx0 - len(lb))))
|
|
39
|
+
ub = np.concatenate((ub, np.inf * np.ones(lenx0 - len(ub))))
|
|
40
|
+
|
|
41
|
+
return lb, ub
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.optimize import LinearConstraint
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def combine_multiple_linear_constraints(constraints):
|
|
6
|
+
full_A = constraints[0].A
|
|
7
|
+
full_lb = constraints[0].lb
|
|
8
|
+
full_ub = constraints[0].ub
|
|
9
|
+
for constraint in constraints[1:]:
|
|
10
|
+
full_A = np.concatenate((full_A, constraint.A), axis=0)
|
|
11
|
+
full_lb = np.concatenate((full_lb, constraint.lb), axis=0)
|
|
12
|
+
full_ub = np.concatenate((full_ub, constraint.ub), axis=0)
|
|
13
|
+
return LinearConstraint(full_A, full_lb, full_ub)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def separate_LC_into_eq_and_ineq(linear_constraint):
|
|
17
|
+
# The Python interface receives linear constraints lb <= A*x <= ub, but the
|
|
18
|
+
# Fortran backend of PRIMA expects that the linear constraints are specified
|
|
19
|
+
# as A_eq*x = b_eq, A_ineq*x <= b_ineq.
|
|
20
|
+
# As such, we must:
|
|
21
|
+
# 1. for constraints with lb == ub, rewrite them as A_eq*x = lb;
|
|
22
|
+
# 2. for constraints with lb < ub, rewrite them as A_ineq*x <= b_ineq.
|
|
23
|
+
|
|
24
|
+
# We suppose lb == ub if ub <= lb + 2*epsilon, assuming that the preprocessing
|
|
25
|
+
# ensures lb <= ub.
|
|
26
|
+
epsilon = np.finfo(np.float64).eps
|
|
27
|
+
|
|
28
|
+
eq_indices = linear_constraint.ub <= (linear_constraint.lb + 2 * epsilon)
|
|
29
|
+
A_eq = linear_constraint.A[eq_indices]
|
|
30
|
+
b_eq = (linear_constraint.lb[eq_indices] + linear_constraint.ub[eq_indices]) / 2.0
|
|
31
|
+
|
|
32
|
+
ineq_lb_indices = linear_constraint.lb > -np.inf
|
|
33
|
+
A_ineq_lb = -linear_constraint.A[~eq_indices & ineq_lb_indices]
|
|
34
|
+
b_ineq_lb = -linear_constraint.lb[~eq_indices & ineq_lb_indices]
|
|
35
|
+
ineq_ub_indices = linear_constraint.ub < np.inf
|
|
36
|
+
A_ineq_ub = linear_constraint.A[~eq_indices & ineq_ub_indices]
|
|
37
|
+
b_ineq_ub = linear_constraint.ub[~eq_indices & ineq_ub_indices]
|
|
38
|
+
A_ineq = np.concatenate((A_ineq_lb, A_ineq_ub))
|
|
39
|
+
b_ineq = np.concatenate((b_ineq_lb, b_ineq_ub))
|
|
40
|
+
|
|
41
|
+
# Ensure dtype is float64, or set to None if empty
|
|
42
|
+
A_eq = np.array(A_eq, dtype=np.float64) if len(A_eq) > 0 else None
|
|
43
|
+
b_eq = np.array(b_eq, dtype=np.float64) if len(b_eq) > 0 else None
|
|
44
|
+
A_ineq = np.array(A_ineq, dtype=np.float64) if len(A_ineq) > 0 else None
|
|
45
|
+
b_ineq = np.array(b_ineq, dtype=np.float64) if len(b_ineq) > 0 else None
|
|
46
|
+
return A_eq, b_eq, A_ineq, b_ineq
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def transform_constraint_function(nlc):
|
|
5
|
+
"""
|
|
6
|
+
The Python interfaces receives the constraints as lb <= constraint(x) <= ub,
|
|
7
|
+
but the Fortran backend expects the nonlinear constraints to be constraint(x) <= 0.
|
|
8
|
+
Thus a conversion is needed.
|
|
9
|
+
|
|
10
|
+
In addition to the conversion, we add a check to ensure that the provided lower/upper bounds
|
|
11
|
+
have a shape consistent with the output of the constraint function.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def newconstraint(x):
|
|
15
|
+
values = np.atleast_1d(np.array(nlc.fun(x), dtype=np.float64))
|
|
16
|
+
|
|
17
|
+
# Upgrade the lower/upper bounds to vectors if necessary
|
|
18
|
+
lb = nlc.lb
|
|
19
|
+
try:
|
|
20
|
+
_ = len(lb)
|
|
21
|
+
except TypeError:
|
|
22
|
+
lb = np.array([nlc.lb] * len(values), dtype=np.float64)
|
|
23
|
+
|
|
24
|
+
ub = nlc.ub
|
|
25
|
+
try:
|
|
26
|
+
_ = len(ub)
|
|
27
|
+
except TypeError:
|
|
28
|
+
ub = np.array([nlc.ub] * len(values), dtype=np.float64)
|
|
29
|
+
|
|
30
|
+
# Check the shapes and raise an exception if they do not match
|
|
31
|
+
if len(values) != len(lb):
|
|
32
|
+
raise ValueError(
|
|
33
|
+
"The number of elements in the constraint function's output does not match the number of elements in the lower bound."
|
|
34
|
+
)
|
|
35
|
+
if len(values) != len(ub):
|
|
36
|
+
raise ValueError(
|
|
37
|
+
"The number of elements in the constraint function's output does not match the number of elements in the upper bound."
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Combine the upper and lower bounds to transform the function into the form
|
|
41
|
+
# expected by the Fortran backend.
|
|
42
|
+
return np.concatenate(
|
|
43
|
+
(
|
|
44
|
+
[lb_ii - vi for lb_ii, vi in zip(lb, values) if lb_ii > -np.inf],
|
|
45
|
+
[vi - ub_ii for ub_ii, vi in zip(ub, values) if ub_ii < np.inf],
|
|
46
|
+
)
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
return newconstraint
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def process_nl_constraints(nlcs):
|
|
53
|
+
functions = []
|
|
54
|
+
for nlc in nlcs:
|
|
55
|
+
fun_i = transform_constraint_function(nlc)
|
|
56
|
+
functions.append(fun_i)
|
|
57
|
+
|
|
58
|
+
def constraint_function(x):
|
|
59
|
+
values = np.empty(0, dtype=np.float64)
|
|
60
|
+
for fun in functions:
|
|
61
|
+
values = np.concatenate((values, fun(x)))
|
|
62
|
+
return values
|
|
63
|
+
|
|
64
|
+
return constraint_function
|