junshan-kit 2.5.1__py2.py3-none-any.whl → 2.7.3__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,602 @@
1
+ import torch, time
2
+ import cvxpy as cp
3
+ import numpy as np
4
+ np.set_printoptions(precision=8, suppress=True)
5
+
6
+
7
+ def add_cutting(x_his, f_his, g_his, x_k, g_k, loss, cutting_number = 10):
8
+ x_his.append(x_k)
9
+ g_his.append(g_k)
10
+ f_his.append(loss)
11
+
12
+ if len(f_his) > cutting_number:
13
+ x_his.pop(0)
14
+ g_his.pop(0)
15
+ f_his.pop(0)
16
+
17
+ return x_his, f_his, g_his
18
+
19
+
20
+ def cut_selection(x_his, f_his, g_his, M):
21
+ selected_x, selected_f, selected_g = [], [], []
22
+ for j in range(len(f_his)-1):
23
+ lhs = f_his[-1]
24
+ rhs = f_his[j] + torch.dot(g_his[j],(x_his[-1] - x_his[j])) + M * torch.norm(g_his[j] - g_his[-1],p=2) ** 2
25
+ # print((lhs.item(),rhs.item()))
26
+ if lhs >= rhs:
27
+ selected_x.append(x_his[j])
28
+ selected_g.append(g_his[j])
29
+ selected_f.append(f_his[j])
30
+
31
+ selected_x.append(x_his[-1])
32
+ selected_g.append(g_his[-1])
33
+ selected_f.append(f_his[-1])
34
+
35
+ return selected_x, selected_f, selected_g
36
+
37
+ def get_var(selected_x, selected_f, selected_g, delta):
38
+ Gk = torch.stack(selected_g, dim=0).T # 0.00059s
39
+ rk = delta * torch.norm(Gk[-1,:], p=2)
40
+ ek_list = []
41
+ for _ in range(len(selected_f)):
42
+ ek_list.append(selected_f[_] - selected_g[_] @ selected_x[_])
43
+
44
+ xk_tensor = torch.stack(selected_x, dim=0)
45
+ ek = torch.stack(ek_list, dim=0)
46
+
47
+ return Gk, rk, ek
48
+
49
+ # <sub_pf>
50
+ def subproblem_pf(Gk, ek, xk, delta, Paras):
51
+ # tensor ---> numpy (0.05s)
52
+ Gk_np = Gk.cpu().numpy()
53
+ ek_np = ek.cpu().numpy()
54
+ xk_np = xk.cpu().numpy()
55
+
56
+ # print(xk_np.dtype,xk_np.dtype)
57
+
58
+ n, m = Gk_np.shape
59
+
60
+ # define variable
61
+ lambda_var = cp.Variable(m, nonneg=True)
62
+ v = cp.Variable(nonneg=True)
63
+
64
+ # objective function
65
+ objective = cp.Minimize(
66
+ (delta / 2) * cp.quad_form(lambda_var, Gk_np.T @ Gk_np) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
67
+
68
+ # constraints
69
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + v == 1]
70
+
71
+ # SOVER
72
+ problem = cp.Problem(objective, constraints)
73
+ problem.solve()
74
+
75
+ # print("lambda* =", lambda_var.value)
76
+ # print("v* =", v.value)
77
+ # print("Optimal Value =", problem.value)
78
+ # print(type(lambda_var.value))
79
+
80
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
81
+ # a = xk_np - delta * Gk_np @ lambda_var.value
82
+
83
+ xk = xk - delta * Gk @ lambda_GPU
84
+
85
+ return xk
86
+
87
+ # <sub_pf>
88
+
89
+
90
+ def subproblem_tr(Gk, ek, xk, rk, Paras):
91
+
92
+ # tensor ---> numpy (0.05s)
93
+ Gk_np = Gk.cpu().numpy()
94
+ ek_np = ek.cpu().numpy()
95
+ xk_np = xk.cpu().numpy()
96
+ rk_np = rk.cpu().numpy()
97
+
98
+ # print(xk_np.dtype,xk_np.dtype)
99
+
100
+ n, m = Gk_np.shape
101
+ A = Gk.T @ Gk
102
+
103
+ # mu = 1e-4
104
+ # A = Gk.T @ Gk + mu * torch.eye(Gk.shape[1], device=Gk.device)
105
+
106
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
107
+ Lk_np = Lk.cpu().numpy()
108
+
109
+ # print(f"Lk = {torch.norm(Lk,p=2)},Gk = {torch.norm(Gk,p=2)}") # euqal
110
+ # assert False
111
+
112
+ # define variable
113
+ lambda_var = cp.Variable(m, nonneg=True)
114
+ v = cp.Variable(nonneg=True)
115
+ # s_time = time.time()
116
+ # objective function
117
+ # objective = cp.Minimize(
118
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
119
+
120
+ objective = cp.Minimize(
121
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
122
+
123
+ # objective = cp.Minimize(
124
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
125
+
126
+
127
+ # constraints
128
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + v == 1]
129
+
130
+ # SOVER
131
+ problem = cp.Problem(objective, constraints)
132
+ problem.solve()
133
+ # problem.solve(solver=cp.SCS, eps=1e-5)
134
+ # problem.solve(solver=cp.ECOS, abstol=1e-8, reltol=1e-8, feastol=1e-8)
135
+
136
+ # e_time = time.time()
137
+ # print(e_time - s_time)
138
+
139
+ # print("lambda* =", lambda_var.value)
140
+ # print("v* =", v.value)
141
+ # print("Optimal Value =", problem.value)
142
+ # print(type(lambda_var.value))
143
+
144
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
145
+ # a = xk_np - delta * Gk_np @ lambda_var.value
146
+
147
+ # print(f"xk = {xk.shape}, rk = {rk.shape},GK = {Gk.shape}, Lk = {Lk.shape}, lambda_GPU = {lambda_GPU}")
148
+
149
+ # print(f"{torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)}")
150
+ # print(f"{torch.norm(Lk @ lambda_GPU.reshape(-1,1),p=2)}")
151
+ # assert False
152
+
153
+ xk = xk.reshape(-1,1) - (rk / torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)) * Gk @ lambda_GPU.reshape(-1,1)
154
+
155
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Lk @ lambda_GPU.reshape(-1,1), p=2)) * (Gk @ lambda_GPU.reshape(-1,1))
156
+
157
+
158
+ # <SPBM-TR_Sub>
159
+ def subproblem_tr_2(Gk, ek, xk, rk, Paras):
160
+
161
+ # tensor ---> numpy (0.05s)
162
+ Gk_np = Gk.cpu().numpy()
163
+ ek_np = ek.cpu().numpy()
164
+ xk_np = xk.cpu().numpy()
165
+ rk_np = rk.cpu().numpy()
166
+
167
+ # print(xk_np.dtype,xk_np.dtype)
168
+
169
+ n, m = Gk_np.shape
170
+ A = Gk.T @ Gk
171
+
172
+ # print(f'A = {A}')
173
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
174
+
175
+ Lk_np = Lk.cpu().numpy()
176
+
177
+ # print(f"Lk = {Lk}")
178
+
179
+ # print(f"Lk = {torch.norm(Lk,p=2)},Gk = {torch.norm(Gk,p=2)}") # euqal
180
+ # assert False
181
+
182
+ # define variable
183
+ lambda_var = cp.Variable(m, nonneg=True)
184
+ nu = cp.Variable(nonneg=True)
185
+ # s_time = time.time()
186
+ # objective function
187
+ # objective = cp.Minimize(
188
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
189
+
190
+ objective = cp.Minimize(
191
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
192
+
193
+ # objective = cp.Minimize(
194
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
195
+
196
+
197
+ # constraints
198
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + nu == 1]
199
+
200
+ # SOVER
201
+ problem = cp.Problem(objective, constraints)
202
+ problem.solve()
203
+ # problem.solve(solver=cp.SCS, eps=1e-5)
204
+ # problem.solve(solver=cp.ECOS, abstol=1e-8, reltol=1e-8, feastol=1e-8)
205
+
206
+ # e_time = time.time()
207
+ # print(e_time - s_time)
208
+
209
+ # print("lambda* =", lambda_var.value)
210
+ # print("nu* =", nu.value)
211
+ # print("Optimal Value =", problem.value)
212
+ # print(type(lambda_var.value))
213
+
214
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
215
+ # a = xk_np - delta * Gk_np @ lambda_var.value
216
+
217
+ # print(f"xk = {xk.shape}, rk = {rk.shape},GK = {Gk.shape}, Lk = {Lk.shape}, lambda_GPU = {lambda_GPU}")
218
+
219
+ # print(f"{torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)}")
220
+ # print(f"{torch.norm(Lk @ lambda_GPU.reshape(-1,1),p=2)}")
221
+ # assert False
222
+
223
+ # case:
224
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)) * Gk @ lambda_GPU.reshape(-1,1)
225
+
226
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Lk @ lambda_GPU.reshape(-1,1), p=2)) * (Gk @ lambda_GPU.reshape(-1,1))
227
+
228
+ # return xk.reshape(-1)
229
+ eps = 1e-6
230
+ g_lambda = Gk @ lambda_GPU.reshape(-1, 1)
231
+ norm_g_lambda = torch.norm(g_lambda, p=2)
232
+
233
+ # print(nu.value)
234
+
235
+ if norm_g_lambda < eps: ## 0.01s
236
+ if lambda_var.value is None:
237
+ raise ValueError("lambda_var has not been solved yet")
238
+
239
+ v_star = np.dot(lambda_var.value, ek_np)
240
+ # print(f"v = {v_star}")
241
+
242
+ m = Gk.shape[1]
243
+ mu = cp.Variable(m, nonneg=True)
244
+
245
+ #
246
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
247
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
248
+
249
+ objective = cp.Minimize(term1 - term2)
250
+ problem = cp.Problem(objective)
251
+ problem.solve()
252
+
253
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
254
+ # print(mu_GPU)
255
+ # Clamp all elements in mu_GPU to be at least 1e-8 to avoid numerical instability (e.g., division by zero or log of zero)
256
+
257
+ # mu_GPU = torch.clamp(mu_GPU, min=1e-8)
258
+
259
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
260
+
261
+ # print(f"xk = {torch.norm(xk)}, Gk*mu = {Gk @ mu_GPU.reshape(-1,1)}")
262
+ return xk.reshape(-1)
263
+
264
+ # Otherwise, update normally.
265
+ xk = xk.reshape(-1, 1) - (rk / norm_g_lambda) * g_lambda
266
+
267
+ return xk.reshape(-1)
268
+ # <SPBM-TR_Sub>
269
+
270
+ # <SPBM_TR_NoneSpecial>
271
+ def subproblem_tr_NoneSpecial(Gk, ek, xk, rk, Paras):
272
+ # tensor ---> numpy (0.05s)
273
+ Gk_np = Gk.cpu().numpy()
274
+ ek_np = ek.cpu().numpy()
275
+ xk_np = xk.cpu().numpy()
276
+ rk_np = rk.cpu().numpy()
277
+
278
+ n, m = Gk_np.shape
279
+ A = Gk.T @ Gk
280
+
281
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
282
+ Lk_np = Lk.cpu().numpy()
283
+
284
+ # define variable (dual)
285
+ lambda_var = cp.Variable(m, nonneg=True)
286
+ nu = cp.Variable(nonneg=True)
287
+
288
+ # define objective function
289
+ objective = cp.Minimize(
290
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
291
+
292
+ # constraints
293
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + nu == 1]
294
+
295
+ # SOVER
296
+ problem = cp.Problem(objective, constraints)
297
+ problem.solve()
298
+
299
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
300
+
301
+ if lambda_var.value is None:
302
+ raise ValueError("lambda_var has not been solved yet")
303
+ # calculate optimal value of primal problem
304
+
305
+ Gk_xk = Gk_np.T @ xk_np.reshape(-1,1)
306
+ # print(Gk_xk)
307
+ # print(ek_np + Gk_xk)
308
+ # print(lambda_var.value)
309
+ v_star_item1 = np.dot(lambda_var.value, (ek_np + Gk_xk))
310
+ v_star_item2 = rk_np * np.linalg.norm(Gk_np@lambda_var.value)
311
+
312
+ v_star = v_star_item1 - v_star_item2
313
+
314
+ m = Gk.shape[1]
315
+ mu = cp.Variable(m, nonneg=True)
316
+
317
+ #
318
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
319
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
320
+
321
+ objective = cp.Minimize(term1 - term2)
322
+ problem = cp.Problem(objective)
323
+ problem.solve()
324
+
325
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
326
+ # print(mu_GPU)
327
+
328
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
329
+
330
+ # print(f"xk = {torch.norm(xk)}, Gk*mu = {Gk @ mu_GPU.reshape(-1,1)}")
331
+ return xk.reshape(-1)
332
+ # <SPBM_TR_NoneSpecial>
333
+
334
+
335
+
336
+ def subproblem_tr_3(Gk, ek, xk, rk, Paras):
337
+
338
+ # tensor ---> numpy (0.05s)
339
+ Gk_np = Gk.cpu().numpy()
340
+ ek_np = ek.cpu().numpy()
341
+ xk_np = xk.cpu().numpy()
342
+ rk_np = rk.cpu().numpy()
343
+
344
+ n, m = Gk_np.shape
345
+ A = Gk.T @ Gk
346
+
347
+ # print(f'A = {A}')
348
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
349
+
350
+ Lk_np = Lk.cpu().numpy()
351
+
352
+ # define variable
353
+ lambda_var = cp.Variable(m, nonneg=True)
354
+ nu = cp.Variable(nonneg=True)
355
+
356
+ objective = cp.Minimize(
357
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
358
+
359
+ # constraints
360
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + nu == 1]
361
+
362
+ # SOVER
363
+ problem = cp.Problem(objective, constraints)
364
+ problem.solve()
365
+
366
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
367
+ # a = xk_np - delta * Gk_np @ lambda_var.value
368
+
369
+ if lambda_var.value is None:
370
+ raise ValueError("lambda_var has not been solved yet")
371
+
372
+ v_star = np.dot(lambda_var.value, ek_np)
373
+ # print(f"v = {v_star}")
374
+
375
+ m = Gk.shape[1]
376
+ mu = cp.Variable(m, nonneg=True)
377
+
378
+ # Construct the objective function
379
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
380
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
381
+
382
+ objective = cp.Minimize(term1 - term2)
383
+ problem = cp.Problem(objective)
384
+ problem.solve()
385
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
386
+
387
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
388
+
389
+ return xk.reshape(-1)
390
+
391
+
392
+
393
+ def subproblem_tr_NoneLower(Gk, ek, xk, rk, Paras):
394
+ # tensor ---> numpy (0.05s)
395
+ Gk_np = Gk.cpu().numpy()
396
+ ek_np = ek.cpu().numpy()
397
+ xk_np = xk.cpu().numpy()
398
+ rk_np = rk.cpu().numpy()
399
+
400
+ # print(xk_np.dtype,xk_np.dtype)
401
+
402
+ n, m = Gk_np.shape
403
+ A = Gk.T @ Gk
404
+
405
+ # print(f'A = {A}')
406
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
407
+
408
+ Lk_np = Lk.cpu().numpy()
409
+
410
+ lambda_var = cp.Variable(m, nonneg=True)
411
+ nu = cp.Variable(nonneg=True)
412
+
413
+ objective = cp.Minimize(
414
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
415
+
416
+ # constraints
417
+ constraints = [cp.sum(lambda_var) == 1]
418
+
419
+
420
+ problem = cp.Problem(objective, constraints) # type: ignore
421
+ problem.solve()
422
+ # problem.solve(solver=cp.SCS, eps=1e-5)
423
+ # problem.solve(solver=cp.ECOS, abstol=1e-8, reltol=1e-8, feastol=1e-8)
424
+
425
+ # e_time = time.time()
426
+ # print(e_time - s_time)
427
+
428
+ # print("lambda* =", lambda_var.value)
429
+ # print("nu* =", nu.value)
430
+ # print("Optimal Value =", problem.value)
431
+ # print(type(lambda_var.value))
432
+
433
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
434
+ # a = xk_np - delta * Gk_np @ lambda_var.value
435
+
436
+ # print(f"xk = {xk.shape}, rk = {rk.shape},GK = {Gk.shape}, Lk = {Lk.shape}, lambda_GPU = {lambda_GPU}")
437
+
438
+ # print(f"{torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)}")
439
+ # print(f"{torch.norm(Lk @ lambda_GPU.reshape(-1,1),p=2)}")
440
+ # assert False
441
+
442
+ # case:
443
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)) * Gk @ lambda_GPU.reshape(-1,1)
444
+
445
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Lk @ lambda_GPU.reshape(-1,1), p=2)) * (Gk @ lambda_GPU.reshape(-1,1))
446
+
447
+ # return xk.reshape(-1)
448
+
449
+ eps = 1e-6
450
+ g_lambda = Gk @ lambda_GPU.reshape(-1, 1)
451
+ norm_g_lambda = torch.norm(g_lambda, p=2)
452
+
453
+ # print(nu.value)
454
+
455
+ if norm_g_lambda < eps: ## 0.01s
456
+ if lambda_var.value is None:
457
+ raise ValueError("lambda_var has not been solved yet")
458
+
459
+ v_star = np.dot(lambda_var.value, ek_np)
460
+ # print(f"v = {v_star}")
461
+
462
+ m = Gk.shape[1]
463
+ mu = cp.Variable(m, nonneg=True)
464
+
465
+ # Construct the objective function
466
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
467
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
468
+
469
+ objective = cp.Minimize(term1 - term2)
470
+ problem = cp.Problem(objective)
471
+ problem.solve()
472
+
473
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
474
+ # print(mu_GPU)
475
+ # Clamp all elements in mu_GPU to be at least 1e-8 to avoid numerical instability (e.g., division by zero or log of zero)
476
+
477
+ # mu_GPU = torch.clamp(mu_GPU, min=1e-8)
478
+
479
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
480
+
481
+ # print(f"xk = {torch.norm(xk)}, Gk*mu = {Gk @ mu_GPU.reshape(-1,1)}")
482
+ return xk.reshape(-1)
483
+
484
+ # therwise, update normally.
485
+ xk = xk.reshape(-1, 1) - (rk / norm_g_lambda) * g_lambda
486
+ return xk.reshape(-1)
487
+
488
+
489
+ def subproblem_pf_NoneLower(Gk, ek, xk, delta, Paras):
490
+ # tensor ---> numpy (0.05s)
491
+ Gk_np = Gk.cpu().numpy()
492
+ ek_np = ek.cpu().numpy()
493
+ xk_np = xk.cpu().numpy()
494
+
495
+ # print(xk_np.dtype,xk_np.dtype)
496
+
497
+ n, m = Gk_np.shape
498
+
499
+ # define variable
500
+ lambda_var = cp.Variable(m, nonneg=True)
501
+ # v = cp.Variable(nonneg=True)
502
+
503
+ # objective function
504
+ objective = cp.Minimize(
505
+ (delta / 2) * cp.quad_form(lambda_var, Gk_np.T @ Gk_np) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
506
+
507
+ # constraints
508
+ constraints = [cp.sum(lambda_var) == 1]
509
+
510
+ # SOVER
511
+ problem = cp.Problem(objective, constraints) # type: ignore
512
+ problem.solve()
513
+
514
+ # print("lambda* =", lambda_var.value)
515
+ # print("v* =", v.value)
516
+ # print("Optimal Value =", problem.value)
517
+ # print(type(lambda_var.value))
518
+
519
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
520
+ # a = xk_np - delta * Gk_np @ lambda_var.value
521
+
522
+ xk = xk - delta * Gk @ lambda_GPU
523
+
524
+ return xk
525
+
526
+ def bundle(Gk, ek, xk, delta, Paras):
527
+ # tensor ---> numpy (0.05s)
528
+ Gk_np = Gk.cpu().numpy()
529
+ ek_np = ek.cpu().numpy()
530
+ xk_np = xk.cpu().numpy()
531
+
532
+ # print(xk_np.dtype,xk_np.dtype)
533
+
534
+ n, m = Gk_np.shape
535
+
536
+ # define variable
537
+ lambda_var = cp.Variable(m, nonneg=True)
538
+ # v = cp.Variable(nonneg=True)
539
+
540
+ # objective function
541
+ objective = cp.Minimize(
542
+ (delta / 2) * cp.quad_form(lambda_var, Gk_np.T @ Gk_np) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
543
+
544
+ # constraints
545
+ constraints = [cp.sum(lambda_var) == 1]
546
+
547
+ # SOVER
548
+ problem = cp.Problem(objective, constraints) # type: ignore
549
+ problem.solve()
550
+
551
+ # print("lambda* =", lambda_var.value)
552
+ # print("v* =", v.value)
553
+ # print("Optimal Value =", problem.value)
554
+ # print(type(lambda_var.value))
555
+
556
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
557
+ # a = xk_np - delta * Gk_np @ lambda_var.value
558
+
559
+ xk = xk - delta * Gk @ lambda_GPU
560
+
561
+ return xk
562
+
563
+
564
+ def subproblem_tr_primal(Gk, ek, xk, rk, Paras):
565
+
566
+ # tensor ---> numpy (0.05s)
567
+ Gk_np = Gk.cpu().numpy()
568
+ ek_np = ek.cpu().numpy()
569
+ xk_np = xk.cpu().numpy()
570
+ rk_np = rk.cpu().numpy()
571
+
572
+ # print(Gk_np.shape)
573
+ n, m = Gk_np.shape
574
+
575
+
576
+ m_ones = np.ones(m)
577
+ x = cp.Variable(n)
578
+ v = cp.Variable()
579
+
580
+ objective = cp.Minimize(v)
581
+
582
+ constraints = [
583
+ Gk_np.T @ x + ek_np <= v * m_ones,
584
+ cp.norm(x - xk_np) <= rk_np,
585
+ v >= 0
586
+ ]
587
+
588
+ problem = cp.Problem(objective, constraints) # type: ignore
589
+ problem.solve()
590
+
591
+ return torch.from_numpy(x.value).float().to(Paras['device'])
592
+
593
+
594
+
595
+
596
+
597
+
598
+
599
+
600
+
601
+
602
+
File without changes