junshan-kit 2.4.0__py2.py3-none-any.whl → 2.4.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,601 @@
1
+ import torch, time
2
+ import cvxpy as cp
3
+ import numpy as np
4
+ np.set_printoptions(precision=8, suppress=True)
5
+
6
+
7
+ def add_cutting(x_his, f_his, g_his, x_k, g_k, loss, cutting_number = 10):
8
+ x_his.append(x_k)
9
+ g_his.append(g_k)
10
+ f_his.append(loss)
11
+
12
+ if len(f_his) > cutting_number:
13
+ x_his.pop(0)
14
+ g_his.pop(0)
15
+ f_his.pop(0)
16
+
17
+ return x_his, f_his, g_his
18
+
19
+
20
+ def cut_selection(x_his, f_his, g_his, M):
21
+ selected_x, selected_f, selected_g = [], [], []
22
+ for j in range(len(f_his)-1):
23
+ lhs = f_his[-1]
24
+ rhs = f_his[j] + torch.dot(g_his[j],(x_his[-1] - x_his[j])) + M * torch.norm(g_his[j] - g_his[-1],p=2) ** 2
25
+ # print((lhs.item(),rhs.item()))
26
+ if lhs >= rhs:
27
+ selected_x.append(x_his[j])
28
+ selected_g.append(g_his[j])
29
+ selected_f.append(f_his[j])
30
+
31
+ selected_x.append(x_his[-1])
32
+ selected_g.append(g_his[-1])
33
+ selected_f.append(f_his[-1])
34
+
35
+ return selected_x, selected_f, selected_g
36
+
37
+ def get_var(selected_x, selected_f, selected_g, delta):
38
+ Gk = torch.stack(selected_g, dim=0).T # 0.00059s
39
+ rk = delta * torch.norm(Gk[-1,:], p=2)
40
+ ek_list = []
41
+ for _ in range(len(selected_f)):
42
+ ek_list.append(selected_f[_] - selected_g[_] @ selected_x[_])
43
+
44
+ xk_tensor = torch.stack(selected_x, dim=0)
45
+ ek = torch.stack(ek_list, dim=0)
46
+
47
+ return Gk, rk, ek
48
+
49
+ # <sub_pf>
50
+ def subproblem_pf(Gk, ek, xk, delta, Paras):
51
+ # tensor ---> numpy (0.05s)
52
+ Gk_np = Gk.cpu().numpy()
53
+ ek_np = ek.cpu().numpy()
54
+ xk_np = xk.cpu().numpy()
55
+
56
+ # print(xk_np.dtype,xk_np.dtype)
57
+
58
+ n, m = Gk_np.shape
59
+
60
+ # define variable
61
+ lambda_var = cp.Variable(m, nonneg=True)
62
+ v = cp.Variable(nonneg=True)
63
+
64
+ # objective function
65
+ objective = cp.Minimize(
66
+ (delta / 2) * cp.quad_form(lambda_var, Gk_np.T @ Gk_np) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
67
+
68
+ # constraints
69
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + v == 1]
70
+
71
+ # SOVER
72
+ problem = cp.Problem(objective, constraints)
73
+ problem.solve()
74
+
75
+ # print("lambda* =", lambda_var.value)
76
+ # print("v* =", v.value)
77
+ # print("Optimal Value =", problem.value)
78
+ # print(type(lambda_var.value))
79
+
80
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
81
+ # a = xk_np - delta * Gk_np @ lambda_var.value
82
+
83
+ xk = xk - delta * Gk @ lambda_GPU
84
+
85
+ return xk
86
+
87
+ # <sub_pf>
88
+
89
+
90
+ def subproblem_tr(Gk, ek, xk, rk, Paras):
91
+
92
+ # tensor ---> numpy (0.05s)
93
+ Gk_np = Gk.cpu().numpy()
94
+ ek_np = ek.cpu().numpy()
95
+ xk_np = xk.cpu().numpy()
96
+ rk_np = rk.cpu().numpy()
97
+
98
+ # print(xk_np.dtype,xk_np.dtype)
99
+
100
+ n, m = Gk_np.shape
101
+ A = Gk.T @ Gk
102
+
103
+ # mu = 1e-4
104
+ # A = Gk.T @ Gk + mu * torch.eye(Gk.shape[1], device=Gk.device)
105
+
106
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
107
+ Lk_np = Lk.cpu().numpy()
108
+
109
+ # print(f"Lk = {torch.norm(Lk,p=2)},Gk = {torch.norm(Gk,p=2)}") # euqal
110
+ # assert False
111
+
112
+ # define variable
113
+ lambda_var = cp.Variable(m, nonneg=True)
114
+ v = cp.Variable(nonneg=True)
115
+ # s_time = time.time()
116
+ # objective function
117
+ # objective = cp.Minimize(
118
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
119
+
120
+ objective = cp.Minimize(
121
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
122
+
123
+ # objective = cp.Minimize(
124
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
125
+
126
+
127
+ # constraints
128
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + v == 1]
129
+
130
+ # SOVER
131
+ problem = cp.Problem(objective, constraints)
132
+ problem.solve()
133
+ # problem.solve(solver=cp.SCS, eps=1e-5)
134
+ # problem.solve(solver=cp.ECOS, abstol=1e-8, reltol=1e-8, feastol=1e-8)
135
+
136
+ # e_time = time.time()
137
+ # print(e_time - s_time)
138
+
139
+ # print("lambda* =", lambda_var.value)
140
+ # print("v* =", v.value)
141
+ # print("Optimal Value =", problem.value)
142
+ # print(type(lambda_var.value))
143
+
144
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
145
+ # a = xk_np - delta * Gk_np @ lambda_var.value
146
+
147
+ # print(f"xk = {xk.shape}, rk = {rk.shape},GK = {Gk.shape}, Lk = {Lk.shape}, lambda_GPU = {lambda_GPU}")
148
+
149
+ # print(f"{torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)}")
150
+ # print(f"{torch.norm(Lk @ lambda_GPU.reshape(-1,1),p=2)}")
151
+ # assert False
152
+
153
+ xk = xk.reshape(-1,1) - (rk / torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)) * Gk @ lambda_GPU.reshape(-1,1)
154
+
155
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Lk @ lambda_GPU.reshape(-1,1), p=2)) * (Gk @ lambda_GPU.reshape(-1,1))
156
+
157
+
158
+ # <SPBM-TR_Sub>
159
+ def subproblem_tr_2(Gk, ek, xk, rk, Paras):
160
+
161
+ # tensor ---> numpy (0.05s)
162
+ Gk_np = Gk.cpu().numpy()
163
+ ek_np = ek.cpu().numpy()
164
+ xk_np = xk.cpu().numpy()
165
+ rk_np = rk.cpu().numpy()
166
+
167
+ # print(xk_np.dtype,xk_np.dtype)
168
+
169
+ n, m = Gk_np.shape
170
+ A = Gk.T @ Gk
171
+
172
+ # print(f'A = {A}')
173
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
174
+
175
+ Lk_np = Lk.cpu().numpy()
176
+
177
+ # print(f"Lk = {Lk}")
178
+
179
+ # print(f"Lk = {torch.norm(Lk,p=2)},Gk = {torch.norm(Gk,p=2)}") # euqal
180
+ # assert False
181
+
182
+ # define variable
183
+ lambda_var = cp.Variable(m, nonneg=True)
184
+ nu = cp.Variable(nonneg=True)
185
+ # s_time = time.time()
186
+ # objective function
187
+ # objective = cp.Minimize(
188
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
189
+
190
+ objective = cp.Minimize(
191
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
192
+
193
+ # objective = cp.Minimize(
194
+ # rk_np * cp.norm(Gk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
195
+
196
+
197
+ # constraints
198
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + nu == 1]
199
+
200
+ # SOVER
201
+ problem = cp.Problem(objective, constraints)
202
+ problem.solve()
203
+ # problem.solve(solver=cp.SCS, eps=1e-5)
204
+ # problem.solve(solver=cp.ECOS, abstol=1e-8, reltol=1e-8, feastol=1e-8)
205
+
206
+ # e_time = time.time()
207
+ # print(e_time - s_time)
208
+
209
+ # print("lambda* =", lambda_var.value)
210
+ # print("nu* =", nu.value)
211
+ # print("Optimal Value =", problem.value)
212
+ # print(type(lambda_var.value))
213
+
214
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
215
+ # a = xk_np - delta * Gk_np @ lambda_var.value
216
+
217
+ # print(f"xk = {xk.shape}, rk = {rk.shape},GK = {Gk.shape}, Lk = {Lk.shape}, lambda_GPU = {lambda_GPU}")
218
+
219
+ # print(f"{torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)}")
220
+ # print(f"{torch.norm(Lk @ lambda_GPU.reshape(-1,1),p=2)}")
221
+ # assert False
222
+
223
+ # case:
224
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)) * Gk @ lambda_GPU.reshape(-1,1)
225
+
226
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Lk @ lambda_GPU.reshape(-1,1), p=2)) * (Gk @ lambda_GPU.reshape(-1,1))
227
+
228
+ # return xk.reshape(-1)
229
+ eps = 1e-6
230
+ g_lambda = Gk @ lambda_GPU.reshape(-1, 1)
231
+ norm_g_lambda = torch.norm(g_lambda, p=2)
232
+
233
+ # print(nu.value)
234
+
235
+ if norm_g_lambda < eps: ## 0.01s
236
+ if lambda_var.value is None:
237
+ raise ValueError("lambda_var has not been solved yet")
238
+
239
+ v_star = np.dot(lambda_var.value, ek_np)
240
+ # print(f"v = {v_star}")
241
+
242
+ m = Gk.shape[1]
243
+ mu = cp.Variable(m, nonneg=True)
244
+
245
+ #
246
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
247
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
248
+
249
+ objective = cp.Minimize(term1 - term2)
250
+ problem = cp.Problem(objective)
251
+ problem.solve()
252
+
253
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
254
+ # print(mu_GPU)
255
+ # Clamp all elements in mu_GPU to be at least 1e-8 to avoid numerical instability (e.g., division by zero or log of zero)
256
+
257
+ # mu_GPU = torch.clamp(mu_GPU, min=1e-8)
258
+
259
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
260
+
261
+ # print(f"xk = {torch.norm(xk)}, Gk*mu = {Gk @ mu_GPU.reshape(-1,1)}")
262
+ return xk.reshape(-1)
263
+
264
+ # Otherwise, update normally.
265
+ xk = xk.reshape(-1, 1) - (rk / norm_g_lambda) * g_lambda
266
+
267
+ return xk.reshape(-1)
268
+ # <SPBM-TR_Sub>
269
+
270
+ # <SPBM_TR_NoneSpecial>
271
+ def subproblem_tr_NoneSpecial(Gk, ek, xk, rk, Paras):
272
+ # tensor ---> numpy (0.05s)
273
+ Gk_np = Gk.cpu().numpy()
274
+ ek_np = ek.cpu().numpy()
275
+ xk_np = xk.cpu().numpy()
276
+ rk_np = rk.cpu().numpy()
277
+
278
+ n, m = Gk_np.shape
279
+ A = Gk.T @ Gk
280
+
281
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
282
+ Lk_np = Lk.cpu().numpy()
283
+
284
+ # define variable (dual)
285
+ lambda_var = cp.Variable(m, nonneg=True)
286
+ nu = cp.Variable(nonneg=True)
287
+
288
+ # define objective function
289
+ objective = cp.Minimize(
290
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
291
+
292
+ # constraints
293
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + nu == 1]
294
+
295
+ # SOVER
296
+ problem = cp.Problem(objective, constraints)
297
+ problem.solve()
298
+
299
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
300
+
301
+ if lambda_var.value is None:
302
+ raise ValueError("lambda_var has not been solved yet")
303
+ # calculate optimal value of primal problem
304
+
305
+ Gk_xk = Gk_np.T @ xk_np.reshape(-1,1)
306
+ # print(Gk_xk)
307
+ # print(ek_np + Gk_xk)
308
+ # print(lambda_var.value)
309
+ v_star_item1 = np.dot(lambda_var.value, (ek_np + Gk_xk))
310
+ v_star_item2 = rk_np * np.linalg.norm(Gk_np@lambda_var.value)
311
+
312
+ v_star = v_star_item1 - v_star_item2
313
+
314
+ m = Gk.shape[1]
315
+ mu = cp.Variable(m, nonneg=True)
316
+
317
+ #
318
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
319
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
320
+
321
+ objective = cp.Minimize(term1 - term2)
322
+ problem = cp.Problem(objective)
323
+ problem.solve()
324
+
325
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
326
+ # print(mu_GPU)
327
+
328
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
329
+
330
+ # print(f"xk = {torch.norm(xk)}, Gk*mu = {Gk @ mu_GPU.reshape(-1,1)}")
331
+ return xk.reshape(-1)
332
+ # <SPBM_TR_NoneSpecial>
333
+
334
+
335
+
336
+ def subproblem_tr_3(Gk, ek, xk, rk, Paras):
337
+
338
+ # tensor ---> numpy (0.05s)
339
+ Gk_np = Gk.cpu().numpy()
340
+ ek_np = ek.cpu().numpy()
341
+ xk_np = xk.cpu().numpy()
342
+ rk_np = rk.cpu().numpy()
343
+
344
+ n, m = Gk_np.shape
345
+ A = Gk.T @ Gk
346
+
347
+ # print(f'A = {A}')
348
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
349
+
350
+ Lk_np = Lk.cpu().numpy()
351
+
352
+ # define variable
353
+ lambda_var = cp.Variable(m, nonneg=True)
354
+ nu = cp.Variable(nonneg=True)
355
+
356
+ objective = cp.Minimize(
357
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
358
+
359
+ # constraints
360
+ constraints: list[cp.Constraint] = [cp.sum(lambda_var) + nu == 1]
361
+
362
+ # SOVER
363
+ problem = cp.Problem(objective, constraints)
364
+ problem.solve()
365
+
366
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
367
+ # a = xk_np - delta * Gk_np @ lambda_var.value
368
+
369
+ if lambda_var.value is None:
370
+ raise ValueError("lambda_var has not been solved yet")
371
+
372
+ v_star = np.dot(lambda_var.value, ek_np)
373
+ # print(f"v = {v_star}")
374
+
375
+ m = Gk.shape[1]
376
+ mu = cp.Variable(m, nonneg=True)
377
+
378
+ # Construct the objective function
379
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
380
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
381
+
382
+ objective = cp.Minimize(term1 - term2)
383
+ problem = cp.Problem(objective)
384
+ problem.solve()
385
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
386
+
387
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
388
+
389
+ return xk.reshape(-1)
390
+
391
+
392
+ def subproblem_tr_NoneLower(Gk, ek, xk, rk, Paras):
393
+ # tensor ---> numpy (0.05s)
394
+ Gk_np = Gk.cpu().numpy()
395
+ ek_np = ek.cpu().numpy()
396
+ xk_np = xk.cpu().numpy()
397
+ rk_np = rk.cpu().numpy()
398
+
399
+ # print(xk_np.dtype,xk_np.dtype)
400
+
401
+ n, m = Gk_np.shape
402
+ A = Gk.T @ Gk
403
+
404
+ # print(f'A = {A}')
405
+ Lk = torch.linalg.cholesky(A).T # In order to accelerate
406
+
407
+ Lk_np = Lk.cpu().numpy()
408
+
409
+ lambda_var = cp.Variable(m, nonneg=True)
410
+ nu = cp.Variable(nonneg=True)
411
+
412
+ objective = cp.Minimize(
413
+ rk_np * cp.norm(Lk_np @ lambda_var, 2) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
414
+
415
+ # constraints
416
+ constraints = [cp.sum(lambda_var) == 1]
417
+
418
+
419
+ problem = cp.Problem(objective, constraints) # type: ignore
420
+ problem.solve()
421
+ # problem.solve(solver=cp.SCS, eps=1e-5)
422
+ # problem.solve(solver=cp.ECOS, abstol=1e-8, reltol=1e-8, feastol=1e-8)
423
+
424
+ # e_time = time.time()
425
+ # print(e_time - s_time)
426
+
427
+ # print("lambda* =", lambda_var.value)
428
+ # print("nu* =", nu.value)
429
+ # print("Optimal Value =", problem.value)
430
+ # print(type(lambda_var.value))
431
+
432
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
433
+ # a = xk_np - delta * Gk_np @ lambda_var.value
434
+
435
+ # print(f"xk = {xk.shape}, rk = {rk.shape},GK = {Gk.shape}, Lk = {Lk.shape}, lambda_GPU = {lambda_GPU}")
436
+
437
+ # print(f"{torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)}")
438
+ # print(f"{torch.norm(Lk @ lambda_GPU.reshape(-1,1),p=2)}")
439
+ # assert False
440
+
441
+ # case:
442
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Gk @ lambda_GPU.reshape(-1,1),p=2)) * Gk @ lambda_GPU.reshape(-1,1)
443
+
444
+ # xk = xk.reshape(-1,1) - (rk / torch.norm(Lk @ lambda_GPU.reshape(-1,1), p=2)) * (Gk @ lambda_GPU.reshape(-1,1))
445
+
446
+ # return xk.reshape(-1)
447
+
448
+ eps = 1e-6
449
+ g_lambda = Gk @ lambda_GPU.reshape(-1, 1)
450
+ norm_g_lambda = torch.norm(g_lambda, p=2)
451
+
452
+ # print(nu.value)
453
+
454
+ if norm_g_lambda < eps: ## 0.01s
455
+ if lambda_var.value is None:
456
+ raise ValueError("lambda_var has not been solved yet")
457
+
458
+ v_star = np.dot(lambda_var.value, ek_np)
459
+ # print(f"v = {v_star}")
460
+
461
+ m = Gk.shape[1]
462
+ mu = cp.Variable(m, nonneg=True)
463
+
464
+ # Construct the objective function
465
+ term1 = 0.25 * cp.sum_squares(Lk_np @ mu)
466
+ term2 = mu @ (ek_np - v_star * np.ones(m) + Gk_np.T @ xk_np)
467
+
468
+ objective = cp.Minimize(term1 - term2)
469
+ problem = cp.Problem(objective)
470
+ problem.solve()
471
+
472
+ mu_GPU = torch.from_numpy(mu.value).float().to(Paras['device'])
473
+ # print(mu_GPU)
474
+ # Clamp all elements in mu_GPU to be at least 1e-8 to avoid numerical instability (e.g., division by zero or log of zero)
475
+
476
+ # mu_GPU = torch.clamp(mu_GPU, min=1e-8)
477
+
478
+ xk = xk.reshape(-1,1) - 0.5 * Gk @ mu_GPU.reshape(-1,1)
479
+
480
+ # print(f"xk = {torch.norm(xk)}, Gk*mu = {Gk @ mu_GPU.reshape(-1,1)}")
481
+ return xk.reshape(-1)
482
+
483
+ # therwise, update normally.
484
+ xk = xk.reshape(-1, 1) - (rk / norm_g_lambda) * g_lambda
485
+ return xk.reshape(-1)
486
+
487
+
488
+ def subproblem_pf_NoneLower(Gk, ek, xk, delta, Paras):
489
+ # tensor ---> numpy (0.05s)
490
+ Gk_np = Gk.cpu().numpy()
491
+ ek_np = ek.cpu().numpy()
492
+ xk_np = xk.cpu().numpy()
493
+
494
+ # print(xk_np.dtype,xk_np.dtype)
495
+
496
+ n, m = Gk_np.shape
497
+
498
+ # define variable
499
+ lambda_var = cp.Variable(m, nonneg=True)
500
+ # v = cp.Variable(nonneg=True)
501
+
502
+ # objective function
503
+ objective = cp.Minimize(
504
+ (delta / 2) * cp.quad_form(lambda_var, Gk_np.T @ Gk_np) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
505
+
506
+ # constraints
507
+ constraints = [cp.sum(lambda_var) == 1]
508
+
509
+ # SOVER
510
+ problem = cp.Problem(objective, constraints) # type: ignore
511
+ problem.solve()
512
+
513
+ # print("lambda* =", lambda_var.value)
514
+ # print("v* =", v.value)
515
+ # print("Optimal Value =", problem.value)
516
+ # print(type(lambda_var.value))
517
+
518
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
519
+ # a = xk_np - delta * Gk_np @ lambda_var.value
520
+
521
+ xk = xk - delta * Gk @ lambda_GPU
522
+
523
+ return xk
524
+
525
+ def bundle(Gk, ek, xk, delta, Paras):
526
+ # tensor ---> numpy (0.05s)
527
+ Gk_np = Gk.cpu().numpy()
528
+ ek_np = ek.cpu().numpy()
529
+ xk_np = xk.cpu().numpy()
530
+
531
+ # print(xk_np.dtype,xk_np.dtype)
532
+
533
+ n, m = Gk_np.shape
534
+
535
+ # define variable
536
+ lambda_var = cp.Variable(m, nonneg=True)
537
+ # v = cp.Variable(nonneg=True)
538
+
539
+ # objective function
540
+ objective = cp.Minimize(
541
+ (delta / 2) * cp.quad_form(lambda_var, Gk_np.T @ Gk_np) - (Gk_np.T @ xk_np + ek_np) @ lambda_var)
542
+
543
+ # constraints
544
+ constraints = [cp.sum(lambda_var) == 1]
545
+
546
+ # SOVER
547
+ problem = cp.Problem(objective, constraints) # type: ignore
548
+ problem.solve()
549
+
550
+ # print("lambda* =", lambda_var.value)
551
+ # print("v* =", v.value)
552
+ # print("Optimal Value =", problem.value)
553
+ # print(type(lambda_var.value))
554
+
555
+ lambda_GPU= torch.from_numpy(lambda_var.value).float().to(Paras['device']) # 1e-3
556
+ # a = xk_np - delta * Gk_np @ lambda_var.value
557
+
558
+ xk = xk - delta * Gk @ lambda_GPU
559
+
560
+ return xk
561
+
562
+
563
+ def subproblem_tr_primal(Gk, ek, xk, rk, Paras):
564
+
565
+ # tensor ---> numpy (0.05s)
566
+ Gk_np = Gk.cpu().numpy()
567
+ ek_np = ek.cpu().numpy()
568
+ xk_np = xk.cpu().numpy()
569
+ rk_np = rk.cpu().numpy()
570
+
571
+ # print(Gk_np.shape)
572
+ n, m = Gk_np.shape
573
+
574
+
575
+ m_ones = np.ones(m)
576
+ x = cp.Variable(n)
577
+ v = cp.Variable()
578
+
579
+ objective = cp.Minimize(v)
580
+
581
+ constraints = [
582
+ Gk_np.T @ x + ek_np <= v * m_ones,
583
+ cp.norm(x - xk_np) <= rk_np,
584
+ v >= 0
585
+ ]
586
+
587
+ problem = cp.Problem(objective, constraints) # type: ignore
588
+ problem.solve()
589
+
590
+ return torch.from_numpy(x.value).float().to(Paras['device'])
591
+
592
+
593
+
594
+
595
+
596
+
597
+
598
+
599
+
600
+
601
+
junshan_kit/check_args.py CHANGED
@@ -5,7 +5,7 @@ def get_args():
5
5
  parser = argparse.ArgumentParser(description="Combined config argument example")
6
6
 
7
7
  allowed_models = ["LS", "LRL2","ResNet18"]
8
- allowed_optimizers = ["Adam", "SGD",]
8
+ allowed_optimizers = ["ADAM", "SGD", "Bundle"]
9
9
  allowed_datasets = ["MNIST", "CIFAR100"]
10
10
 
11
11
  model_mapping = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: junshan_kit
3
- Version: 2.4.0
3
+ Version: 2.4.1
4
4
  Summary: This is an optimization tool.
5
5
  Author-email: Junshan Yin <junshanyin@163.com>
6
6
  Requires-Dist: kaggle==1.7.4.5
@@ -0,0 +1,16 @@
1
+ junshan_kit/ComOptimizers.py,sha256=MUgFnm1DbbvNKv5-7nHJCLOfq4VjoNk1KLRR-ji5rOA,4637
2
+ junshan_kit/DataProcessor.py,sha256=rp1zOTOoF98idwGM_QRzr8yC9M5fj1uyfVhdmV02kyQ,8962
3
+ junshan_kit/DataSets.py,sha256=rRaCPtlR5WvH0E1CAaaWbVkfS5QU12ak31VbREq_prE,8354
4
+ junshan_kit/ExperimentHub.py,sha256=OEiYRponPxvqL6SPEvtRLuKF-nA9tbDcOq-cboBevTE,11165
5
+ junshan_kit/Models.py,sha256=GRTunJON1vLQz2IxgsoOKvjP-3zSJJLuB3CkJTAiImo,6884
6
+ junshan_kit/Print_Info.py,sha256=vogYcXvoGcRGZV-7svi_mtiCZH6c8d-RhbZLFrLbKr8,3012
7
+ junshan_kit/SPBM.py,sha256=UNzuyiEvMnhB5S7tzheh2jwpy-yIOngjagzqJOk3v8g,13723
8
+ junshan_kit/SPBM_func.py,sha256=ExWdKbhaNcYifxX4NFTwCGKm43cUAQPbHO1iHzdZbVM,17300
9
+ junshan_kit/TrainingParas.py,sha256=hBiBxHkmollRA4jT93npIcdWdsLLD0laV5kCZqnyg5k,17324
10
+ junshan_kit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ junshan_kit/check_args.py,sha256=7m4xSPAwqqQ0SPeKc-MCewDIDB7kFgsNYS2AuTTzGtk,3599
12
+ junshan_kit/datahub.py,sha256=4c3P2TORMZ4va6NrSiojDCpnY_CGDlJV-5PG3u1_Isk,9081
13
+ junshan_kit/kit.py,sha256=hpA4Zpn1VAuhdJSBBXswVum0CSk6QnB05GGLYoaRatQ,9792
14
+ junshan_kit-2.4.1.dist-info/METADATA,sha256=4oSPIm8c_FRSDoZOjCS-XBrU1JRa0OKyYNtQe73senU,266
15
+ junshan_kit-2.4.1.dist-info/WHEEL,sha256=tkmg4JIqwd9H8mL30xA7crRmoStyCtGp0VWshokd1Jc,105
16
+ junshan_kit-2.4.1.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- junshan_kit/DataProcessor.py,sha256=MOKMkq4OE32VyLkgUD-D2J5dORmUDLfylAir0UiI04E,8665
2
- junshan_kit/DataSets.py,sha256=8_-2vgwYgXPZnNIt-4WYBu7tJpL8E9W3bcMT4jAc-KI,13831
3
- junshan_kit/ExperimentHub.py,sha256=h4mCCDi5HzGjNWqq7K2XUV1odUEoWw8IORsoIwT-8bA,6526
4
- junshan_kit/Models.py,sha256=GRTunJON1vLQz2IxgsoOKvjP-3zSJJLuB3CkJTAiImo,6884
5
- junshan_kit/Optimizers.py,sha256=w-2PP5u8w4WYLe8tHDtFIHcTRVOKC5tHbJ5Tm1ydsYM,3069
6
- junshan_kit/Print_Info.py,sha256=sF_-deHfaZ2N2nKVII2hkvNAdJynSdNuBYu0qn7pxG8,3122
7
- junshan_kit/TrainingParas.py,sha256=hBiBxHkmollRA4jT93npIcdWdsLLD0laV5kCZqnyg5k,17324
8
- junshan_kit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- junshan_kit/check_args.py,sha256=MmgDAPWy9Btjs1EBHtqBu0W0G7nlg7TF2rTcdJ81YJs,3590
10
- junshan_kit/datahub.py,sha256=4c3P2TORMZ4va6NrSiojDCpnY_CGDlJV-5PG3u1_Isk,9081
11
- junshan_kit/kit.py,sha256=hpA4Zpn1VAuhdJSBBXswVum0CSk6QnB05GGLYoaRatQ,9792
12
- junshan_kit-2.4.0.dist-info/METADATA,sha256=IUY3bnm8kFO294XlZmZ4JzA2A7iOzm9dOms2Sg7u8b4,266
13
- junshan_kit-2.4.0.dist-info/WHEEL,sha256=tkmg4JIqwd9H8mL30xA7crRmoStyCtGp0VWshokd1Jc,105
14
- junshan_kit-2.4.0.dist-info/RECORD,,