eqc-models 0.9.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eqc_models-0.9.8.data/platlib/compile_extensions.py +23 -0
- eqc_models-0.9.8.data/platlib/eqc_models/__init__.py +15 -0
- eqc_models-0.9.8.data/platlib/eqc_models/algorithms/__init__.py +4 -0
- eqc_models-0.9.8.data/platlib/eqc_models/algorithms/base.py +10 -0
- eqc_models-0.9.8.data/platlib/eqc_models/algorithms/penaltymultiplier.py +169 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/__init__.py +6 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/allocation.py +367 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/portbase.py +128 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/portmomentum.py +137 -0
- eqc_models-0.9.8.data/platlib/eqc_models/assignment/__init__.py +5 -0
- eqc_models-0.9.8.data/platlib/eqc_models/assignment/qap.py +82 -0
- eqc_models-0.9.8.data/platlib/eqc_models/assignment/setpartition.py +170 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/__init__.py +72 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/base.py +150 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/constraints.py +276 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/operators.py +201 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.c +11363 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.pyx +72 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polynomial.py +274 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/quadratic.py +250 -0
- eqc_models-0.9.8.data/platlib/eqc_models/decoding.py +20 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/__init__.py +5 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/base.py +63 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/hypergraph.py +307 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/maxcut.py +155 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/maxkcut.py +184 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/__init__.py +15 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierbase.py +99 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqboost.py +423 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqsvm.py +237 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/clustering.py +323 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/clusteringbase.py +112 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/decomposition.py +363 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/forecast.py +255 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/forecastbase.py +139 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/regressor.py +220 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/regressorbase.py +97 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/reservoir.py +106 -0
- eqc_models-0.9.8.data/platlib/eqc_models/sequence/__init__.py +5 -0
- eqc_models-0.9.8.data/platlib/eqc_models/sequence/tsp.py +217 -0
- eqc_models-0.9.8.data/platlib/eqc_models/solvers/__init__.py +12 -0
- eqc_models-0.9.8.data/platlib/eqc_models/solvers/qciclient.py +707 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/__init__.py +6 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/fileio.py +38 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/polynomial.py +137 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/qplib.py +375 -0
- eqc_models-0.9.8.dist-info/LICENSE.txt +202 -0
- eqc_models-0.9.8.dist-info/METADATA +139 -0
- eqc_models-0.9.8.dist-info/RECORD +52 -0
- eqc_models-0.9.8.dist-info/WHEEL +5 -0
- eqc_models-0.9.8.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
# (C) Quantum Computing Inc., 2024.
|
|
2
|
+
# Import libs
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import time
|
|
6
|
+
import datetime
|
|
7
|
+
import json
|
|
8
|
+
import warnings
|
|
9
|
+
from functools import wraps
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
from qci_client import QciClient
|
|
13
|
+
|
|
14
|
+
from eqc_models import QuadraticModel
|
|
15
|
+
from eqc_models.solvers.qciclient import (
|
|
16
|
+
Dirac3CloudSolver,
|
|
17
|
+
Dirac3ContinuousCloudSolver,
|
|
18
|
+
Dirac1CloudSolver,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class DecompBase(QuadraticModel):
|
|
23
|
+
"""An Base class for decomposition algorithms.
|
|
24
|
+
|
|
25
|
+
Parameters
|
|
26
|
+
----------
|
|
27
|
+
|
|
28
|
+
relaxation_schedule: Relaxation schedule used by Dirac-3; default:
|
|
29
|
+
2.
|
|
30
|
+
|
|
31
|
+
num_samples: Number of samples used by Dirac-3; default: 1.
|
|
32
|
+
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
relaxation_schedule=2,
|
|
38
|
+
num_samples=1,
|
|
39
|
+
):
|
|
40
|
+
super(self).__init__(None, None, None)
|
|
41
|
+
|
|
42
|
+
self.relaxation_schedule = relaxation_schedule
|
|
43
|
+
self.num_samples = num_samples
|
|
44
|
+
|
|
45
|
+
def _get_hamiltonian(
|
|
46
|
+
self,
|
|
47
|
+
X: np.array,
|
|
48
|
+
):
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
def _set_model(self, J, C, sum_constraint):
|
|
52
|
+
# Set hamiltonians
|
|
53
|
+
self._C = C
|
|
54
|
+
self._J = J
|
|
55
|
+
self._H = C, J
|
|
56
|
+
self._sum_constraint = sum_constraint
|
|
57
|
+
|
|
58
|
+
# Set upper_bound
|
|
59
|
+
num_variables = C.shape[0]
|
|
60
|
+
self.upper_bound = sum_constraint * np.ones((num_variables,))
|
|
61
|
+
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
def _solve(self):
|
|
65
|
+
solver = Dirac3ContinuousCloudSolver()
|
|
66
|
+
response = solver.solve(
|
|
67
|
+
self,
|
|
68
|
+
relaxation_schedule=self.relaxation_schedule,
|
|
69
|
+
solution_precision=1,
|
|
70
|
+
sum_constraint=self._sum_constraint,
|
|
71
|
+
num_samples=self.num_samples,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
sol = response["results"]["solutions"][0]
|
|
75
|
+
|
|
76
|
+
return sol, response
|
|
77
|
+
|
|
78
|
+
def _solve_d1_test(self):
|
|
79
|
+
qubo = self._J
|
|
80
|
+
|
|
81
|
+
# Make sure matrix is symmetric to machine precision
|
|
82
|
+
qubo = 0.5 * (qubo + qubo.transpose())
|
|
83
|
+
|
|
84
|
+
# Instantiate
|
|
85
|
+
qci = QciClient()
|
|
86
|
+
|
|
87
|
+
# Create json objects
|
|
88
|
+
qubo_json = {
|
|
89
|
+
"file_name": "qubo_tutorial.json",
|
|
90
|
+
"file_config": {
|
|
91
|
+
"qubo": {"data": qubo, "num_variables": qubo.shape[0]},
|
|
92
|
+
},
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
response_json = qci.upload_file(file=qubo_json)
|
|
96
|
+
qubo_file_id = response_json["file_id"]
|
|
97
|
+
|
|
98
|
+
# Setup job json
|
|
99
|
+
job_params = {
|
|
100
|
+
"device_type": "dirac-1",
|
|
101
|
+
"alpha": 1.0,
|
|
102
|
+
"num_samples": 20,
|
|
103
|
+
}
|
|
104
|
+
job_json = qci.build_job_body(
|
|
105
|
+
job_type="sample-qubo",
|
|
106
|
+
job_params=job_params,
|
|
107
|
+
qubo_file_id=qubo_file_id,
|
|
108
|
+
job_name="tutorial_eqc1",
|
|
109
|
+
job_tags=["tutorial_eqc1"],
|
|
110
|
+
)
|
|
111
|
+
print(job_json)
|
|
112
|
+
|
|
113
|
+
# Run the job
|
|
114
|
+
job_response_json = qci.process_job(
|
|
115
|
+
job_body=job_json,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
print(job_response_json)
|
|
119
|
+
|
|
120
|
+
results = job_response_json["results"]
|
|
121
|
+
energies = results["energies"]
|
|
122
|
+
samples = results["solutions"]
|
|
123
|
+
|
|
124
|
+
if True:
|
|
125
|
+
print("Energies:", energies)
|
|
126
|
+
|
|
127
|
+
sol = np.array(samples[0])
|
|
128
|
+
|
|
129
|
+
print(sol)
|
|
130
|
+
|
|
131
|
+
return sol
|
|
132
|
+
|
|
133
|
+
def fit(self, X):
|
|
134
|
+
pass
|
|
135
|
+
|
|
136
|
+
def transform(self, X: np.array):
|
|
137
|
+
pass
|
|
138
|
+
|
|
139
|
+
def fit_transform(self, X):
|
|
140
|
+
pass
|
|
141
|
+
|
|
142
|
+
def get_dynamic_range(self):
|
|
143
|
+
C = self._C
|
|
144
|
+
J = self._J
|
|
145
|
+
|
|
146
|
+
if C is None:
|
|
147
|
+
return
|
|
148
|
+
|
|
149
|
+
if J is None:
|
|
150
|
+
return
|
|
151
|
+
|
|
152
|
+
absc = np.abs(C)
|
|
153
|
+
absj = np.abs(J)
|
|
154
|
+
minc = np.min(absc[absc > 0])
|
|
155
|
+
maxc = np.max(absc)
|
|
156
|
+
minj = np.min(absj[absj > 0])
|
|
157
|
+
maxj = np.max(absj)
|
|
158
|
+
|
|
159
|
+
minval = min(minc, minj)
|
|
160
|
+
maxval = max(maxc, maxj)
|
|
161
|
+
|
|
162
|
+
return 10 * np.log10(maxval / minval)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
class PCA(DecompBase):
|
|
166
|
+
"""
|
|
167
|
+
An implementation of Principal component analysis (PCA) that
|
|
168
|
+
uses QCi's Dirac-3.
|
|
169
|
+
|
|
170
|
+
Linear dimensionality reduction using Singular Value
|
|
171
|
+
Decomposition of the data to project it to a lower dimensional
|
|
172
|
+
space.
|
|
173
|
+
|
|
174
|
+
Parameters
|
|
175
|
+
----------
|
|
176
|
+
|
|
177
|
+
n_components: Number of components to keep; if n_components is not
|
|
178
|
+
set all components are kept; default: None.
|
|
179
|
+
|
|
180
|
+
relaxation_schedule: Relaxation schedule used by Dirac-3; default:
|
|
181
|
+
2.
|
|
182
|
+
|
|
183
|
+
num_samples: Number of samples used by Dirac-3; default: 1.
|
|
184
|
+
|
|
185
|
+
Examples
|
|
186
|
+
-----------
|
|
187
|
+
|
|
188
|
+
>>> from sklearn import datasets
|
|
189
|
+
>>> iris = datasets.load_iris()
|
|
190
|
+
>>> X = iris.data
|
|
191
|
+
>>> from sklearn.preprocessing import StandardScaler
|
|
192
|
+
>>> scaler = StandardScaler()
|
|
193
|
+
>>> X = scaler.fit_transform(X)
|
|
194
|
+
>>> from eqc_models.ml.decomposition import PCA
|
|
195
|
+
>>> from contextlib import redirect_stdout
|
|
196
|
+
>>> import io
|
|
197
|
+
>>> f = io.StringIO()
|
|
198
|
+
>>> with redirect_stdout(f):
|
|
199
|
+
... obj = PCA(
|
|
200
|
+
... n_components=4,
|
|
201
|
+
... relaxation_schedule=2,
|
|
202
|
+
... num_samples=1,
|
|
203
|
+
... )
|
|
204
|
+
... X_pca = obj.fit_transform(X)
|
|
205
|
+
|
|
206
|
+
"""
|
|
207
|
+
|
|
208
|
+
def __init__(
|
|
209
|
+
self,
|
|
210
|
+
n_components=None,
|
|
211
|
+
relaxation_schedule=2,
|
|
212
|
+
num_samples=1,
|
|
213
|
+
):
|
|
214
|
+
self.n_components = n_components
|
|
215
|
+
self.relaxation_schedule = relaxation_schedule
|
|
216
|
+
self.num_samples = num_samples
|
|
217
|
+
self.X = None
|
|
218
|
+
self.X_pca = None
|
|
219
|
+
|
|
220
|
+
def _get_hamiltonian(
|
|
221
|
+
self,
|
|
222
|
+
X: np.array,
|
|
223
|
+
):
|
|
224
|
+
num_records = X.shape[0]
|
|
225
|
+
num_features = X.shape[1]
|
|
226
|
+
|
|
227
|
+
J = -np.matmul(X.transpose(), X)
|
|
228
|
+
|
|
229
|
+
assert J.shape[0] == num_features
|
|
230
|
+
assert J.shape[1] == num_features
|
|
231
|
+
|
|
232
|
+
C = np.zeros((num_features, 1))
|
|
233
|
+
|
|
234
|
+
return J, C
|
|
235
|
+
|
|
236
|
+
def _get_first_component(self, X):
|
|
237
|
+
J, C = self._get_hamiltonian(X)
|
|
238
|
+
|
|
239
|
+
assert J.shape[0] == J.shape[1], "Inconsistent hamiltonian size!"
|
|
240
|
+
assert J.shape[0] == C.shape[0], "Inconsistent hamiltonian size!"
|
|
241
|
+
|
|
242
|
+
self._set_model(J, C, 1.0)
|
|
243
|
+
|
|
244
|
+
sol, response = self._solve()
|
|
245
|
+
|
|
246
|
+
assert len(sol) == C.shape[0], "Inconsistent solution size!"
|
|
247
|
+
|
|
248
|
+
fct = np.linalg.norm(sol)
|
|
249
|
+
if fct > 0:
|
|
250
|
+
fct = 1.0 / fct
|
|
251
|
+
|
|
252
|
+
v0 = fct * np.array(sol)
|
|
253
|
+
v0 = v0.reshape((v0.shape[0], 1))
|
|
254
|
+
|
|
255
|
+
lambda0 = np.matmul(np.matmul(v0.transpose(), -J), v0)[0][0]
|
|
256
|
+
|
|
257
|
+
assert lambda0 >= 0, "Unexpected negative eigenvalue!"
|
|
258
|
+
|
|
259
|
+
fct = np.sqrt(lambda0)
|
|
260
|
+
if fct > 0:
|
|
261
|
+
fct = 1.0 / fct
|
|
262
|
+
|
|
263
|
+
u0 = fct * np.matmul(X, v0)
|
|
264
|
+
u0 = u0.reshape(-1)
|
|
265
|
+
|
|
266
|
+
fct = np.linalg.norm(u0)
|
|
267
|
+
if fct > 0:
|
|
268
|
+
fct = 1.0 / fct
|
|
269
|
+
|
|
270
|
+
u0 = fct * u0
|
|
271
|
+
|
|
272
|
+
return u0, response
|
|
273
|
+
|
|
274
|
+
def fit(self, X):
|
|
275
|
+
"""
|
|
276
|
+
Build a PCA object from the training set X.
|
|
277
|
+
|
|
278
|
+
Parameters
|
|
279
|
+
----------
|
|
280
|
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
281
|
+
The training input samples.
|
|
282
|
+
|
|
283
|
+
Returns
|
|
284
|
+
-------
|
|
285
|
+
responses.
|
|
286
|
+
A dirct containing Dirac responses.
|
|
287
|
+
"""
|
|
288
|
+
|
|
289
|
+
num_features = X.shape[1]
|
|
290
|
+
if self.n_components is None:
|
|
291
|
+
n_components = num_features
|
|
292
|
+
else:
|
|
293
|
+
n_components = self.n_components
|
|
294
|
+
|
|
295
|
+
n_components = min(n_components, num_features)
|
|
296
|
+
|
|
297
|
+
self.X = X.copy()
|
|
298
|
+
self.X_pca = []
|
|
299
|
+
resp_hash = {}
|
|
300
|
+
for i in range(n_components):
|
|
301
|
+
u, resp = self._get_first_component(X)
|
|
302
|
+
self.X_pca.append(u)
|
|
303
|
+
u = u.reshape((u.shape[0], 1))
|
|
304
|
+
|
|
305
|
+
X = X - np.matmul(
|
|
306
|
+
u,
|
|
307
|
+
np.matmul(u.transpose(), X),
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
assert X.shape == self.X.shape, "Inconsistent size!"
|
|
311
|
+
|
|
312
|
+
resp_hash["component_%d_response" % (i + 1)] = resp
|
|
313
|
+
|
|
314
|
+
self.X_pca = np.array(self.X_pca).transpose()
|
|
315
|
+
|
|
316
|
+
assert self.X_pca.shape[0] == self.X.shape[0]
|
|
317
|
+
assert self.X_pca.shape[1] == n_components
|
|
318
|
+
|
|
319
|
+
return resp_hash
|
|
320
|
+
|
|
321
|
+
def transform(self, X: np.array):
|
|
322
|
+
"""
|
|
323
|
+
Apply dimensionality reduction to X.
|
|
324
|
+
|
|
325
|
+
X is projected on the first principal components previously extracted
|
|
326
|
+
from a training set.
|
|
327
|
+
|
|
328
|
+
Parameters
|
|
329
|
+
----------
|
|
330
|
+
X : array-like of shape (n_samples, n_features)
|
|
331
|
+
New data, where `n_samples` is the number of samples
|
|
332
|
+
and `n_features` is the number of features.
|
|
333
|
+
|
|
334
|
+
Returns
|
|
335
|
+
-------
|
|
336
|
+
X_new : array-like of shape (n_samples, n_components)
|
|
337
|
+
Projection of X in the first principal components, where `n_samples`
|
|
338
|
+
is the number of samples and `n_components` is the number of the components.
|
|
339
|
+
"""
|
|
340
|
+
if self.X is None:
|
|
341
|
+
return
|
|
342
|
+
|
|
343
|
+
return self.X_pca
|
|
344
|
+
|
|
345
|
+
def fit_transform(self, X):
|
|
346
|
+
"""
|
|
347
|
+
Fit the model with X and apply the dimensionality reduction on X.
|
|
348
|
+
|
|
349
|
+
Parameters
|
|
350
|
+
----------
|
|
351
|
+
X : array-like of shape (n_samples, n_features)
|
|
352
|
+
Training data, where `n_samples` is the number of samples
|
|
353
|
+
and `n_features` is the number of features.
|
|
354
|
+
|
|
355
|
+
Returns
|
|
356
|
+
-------
|
|
357
|
+
X_new : ndarray of shape (n_samples, n_components)
|
|
358
|
+
Transformed values.
|
|
359
|
+
"""
|
|
360
|
+
|
|
361
|
+
self.fit(X)
|
|
362
|
+
|
|
363
|
+
return self.transform(X)
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import numpy as np
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from sklearn.linear_model import LinearRegression, Ridge
|
|
5
|
+
|
|
6
|
+
from .reservoir import QciReservoir
|
|
7
|
+
from .forecastbase import BaseForecastModel
|
|
8
|
+
|
|
9
|
+
class ReservoirForecastModel(BaseForecastModel, QciReservoir):
|
|
10
|
+
"""
|
|
11
|
+
A reservoir based forecast model.
|
|
12
|
+
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
|
|
16
|
+
ip_addr: The IP address of the device.
|
|
17
|
+
|
|
18
|
+
num_nodes: Number of reservoir network nodes.
|
|
19
|
+
|
|
20
|
+
feature_scaling: The factor used to scale the reservoir output.
|
|
21
|
+
|
|
22
|
+
num_pads: Size of the pad used in the reservoir input;
|
|
23
|
+
default: 0.
|
|
24
|
+
|
|
25
|
+
reg_coef: L2 regularization coefficient for linear regression;
|
|
26
|
+
default: 0.
|
|
27
|
+
|
|
28
|
+
device: The QCi reservoir device. Currently only 'EmuCore' is
|
|
29
|
+
supported; default: EmuCore.
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
Examples
|
|
33
|
+
---------
|
|
34
|
+
|
|
35
|
+
>>> MAX_TRAIN_DAY = 800
|
|
36
|
+
>>> IP_ADDR = "172.22.19.49"
|
|
37
|
+
>>> FEATURE_SCALING = 0.1
|
|
38
|
+
>>> NUM_NODES = 1000
|
|
39
|
+
>>> NUM_PADS = 100
|
|
40
|
+
>>> LAGS = 2
|
|
41
|
+
>>> from contextlib import redirect_stdout
|
|
42
|
+
>>> import io
|
|
43
|
+
>>> f = io.StringIO()
|
|
44
|
+
>>> from eqc_models.ml import ReservoirForecastModel
|
|
45
|
+
>>> with redirect_stdout(f):
|
|
46
|
+
... model = ReservoirForecastModel(
|
|
47
|
+
... ip_addr=IP_ADDR,
|
|
48
|
+
... num_nodes=NUM_NODES,
|
|
49
|
+
... feature_scaling=FEATURE_SCALING,
|
|
50
|
+
... num_pads=NUM_PADS,
|
|
51
|
+
... device="EmuCore",
|
|
52
|
+
... )
|
|
53
|
+
... model.fit(
|
|
54
|
+
... data=train_df,
|
|
55
|
+
... feature_fields=["norm_cell_prod"],
|
|
56
|
+
... target_fields=["norm_cell_prod"],
|
|
57
|
+
... lags=LAGS,
|
|
58
|
+
... horizon_size=1,
|
|
59
|
+
... )
|
|
60
|
+
... y_train_pred = model.predict(train_df, mode="in_sample")
|
|
61
|
+
... y_test_pred = model.predict(test_df, mode="in_sample")
|
|
62
|
+
>>> model.close()
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
ip_addr,
|
|
69
|
+
num_nodes,
|
|
70
|
+
feature_scaling,
|
|
71
|
+
num_pads: int = 0,
|
|
72
|
+
reg_coef: float = 0.0,
|
|
73
|
+
device: str = "EmuCore",
|
|
74
|
+
):
|
|
75
|
+
super(ReservoirForecastModel).__init__()
|
|
76
|
+
BaseForecastModel.__init__(self)
|
|
77
|
+
QciReservoir.__init__(self, ip_addr, num_nodes)
|
|
78
|
+
|
|
79
|
+
assert device == "EmuCore", "Unknown device!"
|
|
80
|
+
|
|
81
|
+
self.ip_addr = ip_addr
|
|
82
|
+
self.num_nodes = num_nodes
|
|
83
|
+
self.feature_scaling = feature_scaling
|
|
84
|
+
self.num_pads = num_pads
|
|
85
|
+
self.reg_coef = reg_coef
|
|
86
|
+
self.device = device
|
|
87
|
+
|
|
88
|
+
self.lock_id = None
|
|
89
|
+
self.lin_model = None
|
|
90
|
+
self.feature_fields = None
|
|
91
|
+
self.target_fields = None
|
|
92
|
+
self.lags = None
|
|
93
|
+
self.horizon_size = None
|
|
94
|
+
self.zero_pad_data = None
|
|
95
|
+
self.train_pad_data = None
|
|
96
|
+
|
|
97
|
+
self.init_reservoir()
|
|
98
|
+
|
|
99
|
+
def close(self):
|
|
100
|
+
self.release_lock()
|
|
101
|
+
|
|
102
|
+
def fit(
|
|
103
|
+
self,
|
|
104
|
+
data: pd.DataFrame,
|
|
105
|
+
feature_fields: list,
|
|
106
|
+
target_fields: list,
|
|
107
|
+
lags: int = 0,
|
|
108
|
+
horizon_size: int = 1,
|
|
109
|
+
):
|
|
110
|
+
"""A function to train a forecast model.
|
|
111
|
+
|
|
112
|
+
Parameters
|
|
113
|
+
----------
|
|
114
|
+
|
|
115
|
+
data: A pandas data frame that contain the time series.
|
|
116
|
+
|
|
117
|
+
feature_fields: A list of fields in the data frame that are as
|
|
118
|
+
inputs to the reservoir.
|
|
119
|
+
|
|
120
|
+
target_fields: A list of fields in teh data frame that are to be
|
|
121
|
+
forecasted.
|
|
122
|
+
|
|
123
|
+
lags: Number of lags used; default = 0.
|
|
124
|
+
|
|
125
|
+
horizon_size: Size of the horizon, e.g. number of forecast
|
|
126
|
+
steps.
|
|
127
|
+
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
# Pad input
|
|
131
|
+
num_pads = self.num_pads
|
|
132
|
+
if num_pads is not None and num_pads > 0:
|
|
133
|
+
self.zero_pad_data = pd.DataFrame()
|
|
134
|
+
for item in data.columns:
|
|
135
|
+
self.zero_pad_data[item] = np.zeros(shape=(num_pads))
|
|
136
|
+
|
|
137
|
+
data = pd.concat([self.zero_pad_data, data])
|
|
138
|
+
|
|
139
|
+
# Prep data
|
|
140
|
+
fea_data = np.array(data[feature_fields])
|
|
141
|
+
targ_data = np.array(data[target_fields])
|
|
142
|
+
|
|
143
|
+
X_train, y_train, steps = self.prep_fea_targs(
|
|
144
|
+
fea_data=fea_data,
|
|
145
|
+
targ_data=targ_data,
|
|
146
|
+
window_size=lags + 1,
|
|
147
|
+
horizon_size=horizon_size,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Save some parameters
|
|
151
|
+
self.feature_fields = feature_fields
|
|
152
|
+
self.target_fields = target_fields
|
|
153
|
+
self.lags = lags
|
|
154
|
+
self.horizon_size = horizon_size
|
|
155
|
+
|
|
156
|
+
# Push to reservoir
|
|
157
|
+
X_train_resp = self.push_reservoir(X_train)
|
|
158
|
+
|
|
159
|
+
if num_pads is not None and num_pads > 0:
|
|
160
|
+
X_train_resp = X_train_resp[num_pads:]
|
|
161
|
+
y_train = y_train[num_pads:]
|
|
162
|
+
|
|
163
|
+
# Build linear model
|
|
164
|
+
#self.lin_model = LinearRegression(fit_intercept=True)
|
|
165
|
+
self.lin_model = Ridge(alpha=self.reg_coef, fit_intercept=True)
|
|
166
|
+
self.lin_model.fit(X_train_resp, y_train)
|
|
167
|
+
|
|
168
|
+
# Get predictions
|
|
169
|
+
y_train_pred = self.lin_model.predict(X_train_resp)
|
|
170
|
+
|
|
171
|
+
# Echo some stats
|
|
172
|
+
train_stats = self.get_stats(y_train, y_train_pred)
|
|
173
|
+
|
|
174
|
+
print("Training stats:", train_stats)
|
|
175
|
+
|
|
176
|
+
if num_pads is not None and num_pads > 0:
|
|
177
|
+
self.train_pad_data = data.tail(num_pads)
|
|
178
|
+
|
|
179
|
+
return
|
|
180
|
+
|
|
181
|
+
def predict(
|
|
182
|
+
self,
|
|
183
|
+
data: pd.DataFrame,
|
|
184
|
+
pad_mode: str = "zero",
|
|
185
|
+
mode: str = "in_sample",
|
|
186
|
+
):
|
|
187
|
+
"""A function to get predictions from forecast model.
|
|
188
|
+
|
|
189
|
+
Parameters
|
|
190
|
+
----------
|
|
191
|
+
|
|
192
|
+
data: A pandas data frame that contain the time series.
|
|
193
|
+
|
|
194
|
+
pad_mode: Mode of the reservoir input padding, either
|
|
195
|
+
'last_train' or 'zero'; default: 'zero.
|
|
196
|
+
|
|
197
|
+
mode: A value of 'out_of_sample' predicts the horizon
|
|
198
|
+
following the time series. A value of 'in_sample' predicts in
|
|
199
|
+
sample (used for testing); default: in_sample.
|
|
200
|
+
|
|
201
|
+
Returns
|
|
202
|
+
-------
|
|
203
|
+
|
|
204
|
+
The predictions: numpy.array((horizon_size, num_dims)).
|
|
205
|
+
|
|
206
|
+
"""
|
|
207
|
+
|
|
208
|
+
assert self.lin_model is not None, "Model not train yet!"
|
|
209
|
+
assert mode in ["in_sample", "out_of_sample"], (
|
|
210
|
+
"Unknown mode <%s>!" % mode
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
num_pads = self.num_pads
|
|
214
|
+
if num_pads is not None and num_pads > 0:
|
|
215
|
+
if pad_mode == "last_train":
|
|
216
|
+
pad_data = self.train_pad_data
|
|
217
|
+
else:
|
|
218
|
+
pad_data = self.zero_pad_data
|
|
219
|
+
|
|
220
|
+
data = pd.concat([pad_data, data])
|
|
221
|
+
|
|
222
|
+
num_records = data.shape[0]
|
|
223
|
+
fea_data = np.array(data[self.feature_fields])
|
|
224
|
+
targ_data = np.array(data[self.target_fields])
|
|
225
|
+
|
|
226
|
+
if mode == "in_sample":
|
|
227
|
+
X, y, _ = self.prep_fea_targs(
|
|
228
|
+
fea_data=fea_data,
|
|
229
|
+
targ_data=targ_data,
|
|
230
|
+
window_size=self.lags + 1,
|
|
231
|
+
horizon_size=self.horizon_size,
|
|
232
|
+
)
|
|
233
|
+
elif mode == "out_of_sample":
|
|
234
|
+
X = self.prep_out_of_sample(
|
|
235
|
+
fea_data=fea_data,
|
|
236
|
+
window_size=self.lags + 1,
|
|
237
|
+
horizon_size=self.horizon_size,
|
|
238
|
+
)
|
|
239
|
+
else:
|
|
240
|
+
assert False, "Unknown mode <%s>!" % mode
|
|
241
|
+
|
|
242
|
+
X_resp = self.push_reservoir(X)
|
|
243
|
+
|
|
244
|
+
if self.num_pads is not None and self.num_pads > 0:
|
|
245
|
+
X_resp = X_resp[self.num_pads:]
|
|
246
|
+
y = y[self.num_pads:]
|
|
247
|
+
|
|
248
|
+
y_pred = self.lin_model.predict(X_resp)
|
|
249
|
+
|
|
250
|
+
# Echo some stats
|
|
251
|
+
if mode == "in_sample":
|
|
252
|
+
stats = self.get_stats(y, y_pred)
|
|
253
|
+
print("In-sample prediction stats:", stats)
|
|
254
|
+
|
|
255
|
+
return y_pred
|