physbo 2.0.0__cp310-cp310-macosx_12_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- physbo/__init__.py +17 -0
- physbo/blm/__init__.py +17 -0
- physbo/blm/basis/__init__.py +8 -0
- physbo/blm/basis/fourier.py +148 -0
- physbo/blm/core/__init__.py +8 -0
- physbo/blm/core/model.py +257 -0
- physbo/blm/inf/__init__.py +8 -0
- physbo/blm/inf/exact.py +192 -0
- physbo/blm/lik/__init__.py +10 -0
- physbo/blm/lik/_src/__init__.py +8 -0
- physbo/blm/lik/_src/cov.py +113 -0
- physbo/blm/lik/gauss.py +136 -0
- physbo/blm/lik/linear.py +117 -0
- physbo/blm/predictor.py +238 -0
- physbo/blm/prior/__init__.py +8 -0
- physbo/blm/prior/gauss.py +215 -0
- physbo/gp/__init__.py +15 -0
- physbo/gp/core/__init__.py +11 -0
- physbo/gp/core/learning.py +364 -0
- physbo/gp/core/model.py +420 -0
- physbo/gp/core/prior.py +207 -0
- physbo/gp/cov/__init__.py +8 -0
- physbo/gp/cov/_src/__init__.py +1 -0
- physbo/gp/cov/_src/enhance_gauss.cpython-310-darwin.so +0 -0
- physbo/gp/cov/gauss.py +393 -0
- physbo/gp/inf/__init__.py +8 -0
- physbo/gp/inf/exact.py +231 -0
- physbo/gp/lik/__init__.py +8 -0
- physbo/gp/lik/gauss.py +179 -0
- physbo/gp/mean/__init__.py +9 -0
- physbo/gp/mean/const.py +150 -0
- physbo/gp/mean/zero.py +66 -0
- physbo/gp/predictor.py +170 -0
- physbo/misc/__init__.py +15 -0
- physbo/misc/_src/__init__.py +1 -0
- physbo/misc/_src/cholupdate.cpython-310-darwin.so +0 -0
- physbo/misc/_src/diagAB.cpython-310-darwin.so +0 -0
- physbo/misc/_src/logsumexp.cpython-310-darwin.so +0 -0
- physbo/misc/_src/traceAB.cpython-310-darwin.so +0 -0
- physbo/misc/centering.py +28 -0
- physbo/misc/gauss_elim.py +35 -0
- physbo/misc/set_config.py +299 -0
- physbo/opt/__init__.py +8 -0
- physbo/opt/adam.py +107 -0
- physbo/predictor.py +261 -0
- physbo/search/__init__.py +11 -0
- physbo/search/discrete/__init__.py +11 -0
- physbo/search/discrete/policy.py +804 -0
- physbo/search/discrete/results.py +192 -0
- physbo/search/discrete_multi/__init__.py +11 -0
- physbo/search/discrete_multi/policy.py +552 -0
- physbo/search/discrete_multi/results.py +128 -0
- physbo/search/pareto.py +206 -0
- physbo/search/score.py +155 -0
- physbo/search/score_multi.py +197 -0
- physbo/search/utility.py +101 -0
- physbo/variable.py +222 -0
- physbo-2.0.0.dist-info/METADATA +110 -0
- physbo-2.0.0.dist-info/RECORD +61 -0
- physbo-2.0.0.dist-info/WHEEL +5 -0
- physbo-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MPL-2.0
|
|
2
|
+
# Copyright (C) 2020- The University of Tokyo
|
|
3
|
+
#
|
|
4
|
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
5
|
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
6
|
+
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
import copy
|
|
10
|
+
import pickle
|
|
11
|
+
|
|
12
|
+
from .. import utility
|
|
13
|
+
|
|
14
|
+
MAX_SEARCH = int(30000)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class history:
|
|
18
|
+
def __init__(self):
|
|
19
|
+
self.num_runs = int(0)
|
|
20
|
+
self.total_num_search = int(0)
|
|
21
|
+
self.fx = np.zeros(MAX_SEARCH, dtype=float)
|
|
22
|
+
self.chosen_actions = np.zeros(MAX_SEARCH, dtype=int)
|
|
23
|
+
self.terminal_num_run = np.zeros(MAX_SEARCH, dtype=int)
|
|
24
|
+
|
|
25
|
+
self.time_total_ = np.zeros(MAX_SEARCH, dtype=float)
|
|
26
|
+
self.time_update_predictor_ = np.zeros(MAX_SEARCH, dtype=float)
|
|
27
|
+
self.time_get_action_ = np.zeros(MAX_SEARCH, dtype=float)
|
|
28
|
+
self.time_run_simulator_ = np.zeros(MAX_SEARCH, dtype=float)
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def time_total(self):
|
|
32
|
+
return copy.copy(self.time_total_[0 : self.num_runs])
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def time_update_predictor(self):
|
|
36
|
+
return copy.copy(self.time_update_predictor_[0 : self.num_runs])
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def time_get_action(self):
|
|
40
|
+
return copy.copy(self.time_get_action_[0 : self.num_runs])
|
|
41
|
+
|
|
42
|
+
@property
|
|
43
|
+
def time_run_simulator(self):
|
|
44
|
+
return copy.copy(self.time_run_simulator_[0 : self.num_runs])
|
|
45
|
+
|
|
46
|
+
def write(
|
|
47
|
+
self,
|
|
48
|
+
t,
|
|
49
|
+
action,
|
|
50
|
+
time_total=None,
|
|
51
|
+
time_update_predictor=None,
|
|
52
|
+
time_get_action=None,
|
|
53
|
+
time_run_simulator=None,
|
|
54
|
+
):
|
|
55
|
+
"""
|
|
56
|
+
Overwrite fx and chosen_actions by t and action.
|
|
57
|
+
|
|
58
|
+
Parameters
|
|
59
|
+
----------
|
|
60
|
+
t: numpy.ndarray
|
|
61
|
+
N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
|
|
62
|
+
action: numpy.ndarray
|
|
63
|
+
N dimensional array. The indexes of actions of each search candidate.
|
|
64
|
+
time_total: numpy.ndarray
|
|
65
|
+
N dimenstional array. The total elapsed time in each step.
|
|
66
|
+
If None (default), filled by 0.0.
|
|
67
|
+
time_update_predictor: numpy.ndarray
|
|
68
|
+
N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
|
|
69
|
+
If None (default), filled by 0.0.
|
|
70
|
+
time_get_action: numpy.ndarray
|
|
71
|
+
N dimenstional array. The elapsed time for getting next action in each step.
|
|
72
|
+
If None (default), filled by 0.0.
|
|
73
|
+
time_run_simulator: numpy.ndarray
|
|
74
|
+
N dimenstional array. The elapsed time for running the simulator in each step.
|
|
75
|
+
If None (default), filled by 0.0.
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
|
|
79
|
+
"""
|
|
80
|
+
N = utility.length_vector(t)
|
|
81
|
+
st = self.total_num_search
|
|
82
|
+
en = st + N
|
|
83
|
+
|
|
84
|
+
self.terminal_num_run[self.num_runs] = en
|
|
85
|
+
self.fx[st:en] = t
|
|
86
|
+
self.chosen_actions[st:en] = action
|
|
87
|
+
self.num_runs += 1
|
|
88
|
+
self.total_num_search += N
|
|
89
|
+
|
|
90
|
+
if time_total is None:
|
|
91
|
+
time_total = np.zeros(N, dtype=float)
|
|
92
|
+
self.time_total_[st:en] = time_total
|
|
93
|
+
|
|
94
|
+
if time_update_predictor is None:
|
|
95
|
+
time_update_predictor = np.zeros(N, dtype=float)
|
|
96
|
+
self.time_update_predictor_[st:en] = time_update_predictor
|
|
97
|
+
|
|
98
|
+
if time_get_action is None:
|
|
99
|
+
time_get_action = np.zeros(N, dtype=float)
|
|
100
|
+
self.time_get_action_[st:en] = time_get_action
|
|
101
|
+
|
|
102
|
+
if time_run_simulator is None:
|
|
103
|
+
time_run_simulator = np.zeros(N, dtype=float)
|
|
104
|
+
self.time_run_simulator_[st:en] = time_run_simulator
|
|
105
|
+
|
|
106
|
+
def export_sequence_best_fx(self):
|
|
107
|
+
"""
|
|
108
|
+
Export fx and actions at each sequence.
|
|
109
|
+
(The total number of data is num_runs.)
|
|
110
|
+
|
|
111
|
+
Returns
|
|
112
|
+
-------
|
|
113
|
+
best_fx: numpy.ndarray
|
|
114
|
+
best_actions: numpy.ndarray
|
|
115
|
+
"""
|
|
116
|
+
best_fx = np.zeros(self.num_runs, dtype=float)
|
|
117
|
+
best_actions = np.zeros(self.num_runs, dtype=int)
|
|
118
|
+
for n in range(self.num_runs):
|
|
119
|
+
index = np.argmax(self.fx[0 : self.terminal_num_run[n]])
|
|
120
|
+
best_actions[n] = self.chosen_actions[index]
|
|
121
|
+
best_fx[n] = self.fx[index]
|
|
122
|
+
|
|
123
|
+
return best_fx, best_actions
|
|
124
|
+
|
|
125
|
+
def export_all_sequence_best_fx(self):
|
|
126
|
+
"""
|
|
127
|
+
Export all fx and actions at each sequence.
|
|
128
|
+
(The total number of data is total_num_research.)
|
|
129
|
+
|
|
130
|
+
Returns
|
|
131
|
+
-------
|
|
132
|
+
best_fx: numpy.ndarray
|
|
133
|
+
best_actions: numpy.ndarray
|
|
134
|
+
"""
|
|
135
|
+
best_fx = np.zeros(self.total_num_search, dtype=float)
|
|
136
|
+
best_actions = np.zeros(self.total_num_search, dtype=int)
|
|
137
|
+
best_fx[0] = self.fx[0]
|
|
138
|
+
best_actions[0] = self.chosen_actions[0]
|
|
139
|
+
|
|
140
|
+
for n in range(1, self.total_num_search):
|
|
141
|
+
if best_fx[n - 1] < self.fx[n]:
|
|
142
|
+
best_fx[n] = self.fx[n]
|
|
143
|
+
best_actions[n] = self.chosen_actions[n]
|
|
144
|
+
else:
|
|
145
|
+
best_fx[n] = best_fx[n - 1]
|
|
146
|
+
best_actions[n] = best_actions[n - 1]
|
|
147
|
+
|
|
148
|
+
return best_fx, best_actions
|
|
149
|
+
|
|
150
|
+
def save(self, filename):
|
|
151
|
+
"""
|
|
152
|
+
Save the information of the history.
|
|
153
|
+
|
|
154
|
+
Parameters
|
|
155
|
+
----------
|
|
156
|
+
filename: str
|
|
157
|
+
The name of the file which stores the information of the history
|
|
158
|
+
Returns
|
|
159
|
+
-------
|
|
160
|
+
|
|
161
|
+
"""
|
|
162
|
+
N = self.total_num_search
|
|
163
|
+
M = self.num_runs
|
|
164
|
+
np.savez_compressed(
|
|
165
|
+
filename,
|
|
166
|
+
num_runs=M,
|
|
167
|
+
total_num_search=N,
|
|
168
|
+
fx=self.fx[0:N],
|
|
169
|
+
chosen_actions=self.chosen_actions[0:N],
|
|
170
|
+
terminal_num_run=self.terminal_num_run[0:M],
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
def load(self, filename):
|
|
174
|
+
"""
|
|
175
|
+
Load the information of the history.
|
|
176
|
+
|
|
177
|
+
Parameters
|
|
178
|
+
----------
|
|
179
|
+
filename: str
|
|
180
|
+
The name of the file which stores the information of the history
|
|
181
|
+
Returns
|
|
182
|
+
-------
|
|
183
|
+
|
|
184
|
+
"""
|
|
185
|
+
data = np.load(filename)
|
|
186
|
+
M = data["num_runs"]
|
|
187
|
+
N = data["total_num_search"]
|
|
188
|
+
self.num_runs = M
|
|
189
|
+
self.total_num_search = N
|
|
190
|
+
self.fx[0:N] = data["fx"]
|
|
191
|
+
self.chosen_actions[0:N] = data["chosen_actions"]
|
|
192
|
+
self.terminal_num_run[0:M] = data["terminal_num_run"]
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MPL-2.0
|
|
2
|
+
# Copyright (C) 2020- The University of Tokyo
|
|
3
|
+
#
|
|
4
|
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
5
|
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
6
|
+
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
7
|
+
|
|
8
|
+
from .policy import policy
|
|
9
|
+
from .results import history
|
|
10
|
+
|
|
11
|
+
# __all__ = ["policy", "history"]
|