pydmoo 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydmoo/response/__init__.py +0 -0
- pydmoo/response/ar_model.py +96 -0
- pydmoo/response/tca_model.py +152 -0
- {pydmoo-0.0.8.dist-info → pydmoo-0.0.9.dist-info}/METADATA +1 -1
- pydmoo-0.0.9.dist-info/RECORD +8 -0
- pydmoo-0.0.8.dist-info/RECORD +0 -5
- {pydmoo-0.0.8.dist-info → pydmoo-0.0.9.dist-info}/WHEEL +0 -0
- {pydmoo-0.0.8.dist-info → pydmoo-0.0.9.dist-info}/licenses/LICENSE +0 -0
|
File without changes
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ARModel:
|
|
5
|
+
"""
|
|
6
|
+
Autoregressive (AR) model implementation from scratch.
|
|
7
|
+
|
|
8
|
+
Parameters
|
|
9
|
+
----------
|
|
10
|
+
p : int
|
|
11
|
+
Order of the AR model (number of lagged observations)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, p):
|
|
15
|
+
self.p = p # AR order
|
|
16
|
+
self.coef_ = None # Model coefficients (including intercept if present)
|
|
17
|
+
self.resid_ = None # Residuals after fitting
|
|
18
|
+
|
|
19
|
+
def fit(self, y, trend="c"):
|
|
20
|
+
"""
|
|
21
|
+
Fit AR model to time series data.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
y : ndarray, shape (M,)
|
|
26
|
+
Time series data of length M
|
|
27
|
+
trend : str, optional
|
|
28
|
+
'c' for constant (default), 'n' for no intercept
|
|
29
|
+
|
|
30
|
+
Returns
|
|
31
|
+
-------
|
|
32
|
+
self : returns an instance of self
|
|
33
|
+
"""
|
|
34
|
+
M = len(y)
|
|
35
|
+
if M <= self.p:
|
|
36
|
+
raise ValueError(f"Time series length M={M} must be > order p={self.p}")
|
|
37
|
+
|
|
38
|
+
# Construct design matrix X and target vector Y
|
|
39
|
+
X = np.zeros((M - self.p, self.p))
|
|
40
|
+
for i in range(self.p):
|
|
41
|
+
X[:, i] = y[(self.p - i - 1) : (M - i - 1)]
|
|
42
|
+
|
|
43
|
+
Y = y[self.p :]
|
|
44
|
+
|
|
45
|
+
# Add intercept if specified
|
|
46
|
+
if trend == "c":
|
|
47
|
+
X = np.column_stack([np.ones(X.shape[0]), X])
|
|
48
|
+
|
|
49
|
+
# Solve least squares problem
|
|
50
|
+
self.coef_ = np.linalg.lstsq(X, Y, rcond=None)[0]
|
|
51
|
+
|
|
52
|
+
# Store residuals
|
|
53
|
+
self.resid_ = Y - X @ self.coef_
|
|
54
|
+
|
|
55
|
+
return self
|
|
56
|
+
|
|
57
|
+
def predict(self, y, steps=1):
|
|
58
|
+
"""
|
|
59
|
+
Predict future values using the fitted AR model.
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
y : ndarray, shape (K,)
|
|
64
|
+
Input sequence (K >= p)
|
|
65
|
+
steps : int, optional
|
|
66
|
+
Number of steps to predict (default=1)
|
|
67
|
+
|
|
68
|
+
Returns
|
|
69
|
+
-------
|
|
70
|
+
predictions : ndarray, shape (steps,)
|
|
71
|
+
Predicted values
|
|
72
|
+
"""
|
|
73
|
+
if len(y) < self.p:
|
|
74
|
+
raise ValueError(f"Input length {len(y)} must be >= model order {self.p}")
|
|
75
|
+
if self.coef_ is None:
|
|
76
|
+
raise ValueError("Model must be fitted before prediction")
|
|
77
|
+
|
|
78
|
+
predictions = np.zeros(steps)
|
|
79
|
+
history = y[-self.p :].copy() # Last p observations
|
|
80
|
+
|
|
81
|
+
for i in range(steps):
|
|
82
|
+
# Prepare input vector
|
|
83
|
+
x = history[-self.p :][::-1] # Latest p observations in reverse order
|
|
84
|
+
|
|
85
|
+
# Add intercept if present (coef[0] is intercept)
|
|
86
|
+
if len(self.coef_) == self.p + 1:
|
|
87
|
+
x = np.insert(x, 0, 1)
|
|
88
|
+
|
|
89
|
+
# Make prediction
|
|
90
|
+
pred = np.dot(x, self.coef_)
|
|
91
|
+
predictions[i] = pred
|
|
92
|
+
|
|
93
|
+
# Update history
|
|
94
|
+
history = np.append(history, pred)
|
|
95
|
+
|
|
96
|
+
return predictions
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.linalg import eigh
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class TCAModel:
|
|
6
|
+
"""Transfer Component Analysis (TCA) for domain adaptation.
|
|
7
|
+
|
|
8
|
+
TCA finds a latent space where source and target domain distributions are similar.
|
|
9
|
+
|
|
10
|
+
Parameters
|
|
11
|
+
----------
|
|
12
|
+
kernel_type : {'linear', 'rbf', 'poly'}, default='rbf'
|
|
13
|
+
Type of kernel function to use:
|
|
14
|
+
- 'linear': Linear kernel (dot product)
|
|
15
|
+
- 'rbf': Radial Basis Function (Gaussian) kernel
|
|
16
|
+
- 'poly': Polynomial kernel
|
|
17
|
+
kernel_param : float, default=1.0
|
|
18
|
+
Parameter for kernel function:
|
|
19
|
+
- For 'rbf': gamma parameter (inverse of kernel width)
|
|
20
|
+
- For 'poly': degree of polynomial
|
|
21
|
+
dim : int, default=20
|
|
22
|
+
Dimensionality of the latent space (number of components to keep)
|
|
23
|
+
mu : float, default=0.5
|
|
24
|
+
Regularization parameter for numerical stability
|
|
25
|
+
|
|
26
|
+
Attributes
|
|
27
|
+
----------
|
|
28
|
+
W : ndarray of shape (n_samples, dim) or None
|
|
29
|
+
Learned transformation matrix. None before fitting.
|
|
30
|
+
X_train : ndarray of shape (n_samples, n_features) or None
|
|
31
|
+
Training data stored for transformation. None before fitting.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self, kernel_type='rbf', kernel_param=1.0, dim=20, mu=0.5):
|
|
35
|
+
# For the TCA parameters, we set the Gaussian kernel function to the default value and the expected dimensionality
|
|
36
|
+
# was set to be 20. The value of μ was set to 0.5.
|
|
37
|
+
self.kernel_type = kernel_type
|
|
38
|
+
self.kernel_param = kernel_param
|
|
39
|
+
self.dim = dim
|
|
40
|
+
self.mu = mu
|
|
41
|
+
self.W = None
|
|
42
|
+
self.X_train = None
|
|
43
|
+
|
|
44
|
+
def fit(self, Xs, Xt):
|
|
45
|
+
"""Fit TCA model to source and target data.
|
|
46
|
+
|
|
47
|
+
Parameters
|
|
48
|
+
----------
|
|
49
|
+
Xs : ndarray of shape (ns_samples, n_features)
|
|
50
|
+
Source domain feature matrix
|
|
51
|
+
Xt : ndarray of shape (nt_samples, n_features)
|
|
52
|
+
Target domain feature matrix
|
|
53
|
+
|
|
54
|
+
Returns
|
|
55
|
+
-------
|
|
56
|
+
self : object
|
|
57
|
+
Returns the instance itself.
|
|
58
|
+
"""
|
|
59
|
+
# Stack source and target data vertically
|
|
60
|
+
X = np.vstack((Xs, Xt))
|
|
61
|
+
n = X.shape[0] # Total number of samples
|
|
62
|
+
ns, nt = Xs.shape[0], Xt.shape[0] # Number of source/target samples
|
|
63
|
+
|
|
64
|
+
# Compute kernel matrix K using selected kernel function
|
|
65
|
+
K = self._kernel(X, X)
|
|
66
|
+
self.X_train = X # Store for transform phase
|
|
67
|
+
|
|
68
|
+
# Construct MMD (Maximum Mean Discrepancy) matrix
|
|
69
|
+
# This matrix encodes the distribution difference between domains
|
|
70
|
+
M = np.zeros((n, n))
|
|
71
|
+
M[:ns, :ns] = 1/(ns*ns) # Source-source block
|
|
72
|
+
M[ns:, ns:] = 1/(nt*nt) # Target-target block
|
|
73
|
+
M[:ns, ns:] = M[ns:, :ns] = -1/(ns*nt) # Cross blocks
|
|
74
|
+
|
|
75
|
+
# Centering matrix H = I - 1/n * 11^T
|
|
76
|
+
# Projects data onto the space orthogonal to the vector of ones
|
|
77
|
+
H = np.eye(n) - np.ones((n, n))/n
|
|
78
|
+
|
|
79
|
+
# Solve generalized eigenvalue problem:
|
|
80
|
+
# (K(M)K + μI)^-1 K(H)K w = λw
|
|
81
|
+
# Using pseudo-inverse for numerical stability
|
|
82
|
+
A = np.linalg.pinv(K @ M @ K + self.mu*np.eye(n)) @ K @ H @ K
|
|
83
|
+
|
|
84
|
+
# Compute eigenvalues and eigenvectors
|
|
85
|
+
# eigh returns eigenvalues in ascending order
|
|
86
|
+
eigvals, eigvecs = eigh(A)
|
|
87
|
+
|
|
88
|
+
# Select top 'dim' eigenvectors corresponding to largest eigenvalues
|
|
89
|
+
# [::-1] reverses order to get descending eigenvalues
|
|
90
|
+
self.W = eigvecs[:, np.argsort(eigvals)[-self.dim:][::-1]]
|
|
91
|
+
|
|
92
|
+
return self
|
|
93
|
+
|
|
94
|
+
def transform(self, X):
|
|
95
|
+
"""Transform data using learned TCA components.
|
|
96
|
+
|
|
97
|
+
Parameters
|
|
98
|
+
----------
|
|
99
|
+
X : ndarray of shape (n_samples, n_features)
|
|
100
|
+
Data to transform
|
|
101
|
+
|
|
102
|
+
Returns
|
|
103
|
+
-------
|
|
104
|
+
X_transformed : ndarray of shape (n_samples, dim)
|
|
105
|
+
Data projected to the latent space
|
|
106
|
+
|
|
107
|
+
Raises
|
|
108
|
+
------
|
|
109
|
+
ValueError
|
|
110
|
+
If fit() hasn't been called before transform()
|
|
111
|
+
"""
|
|
112
|
+
if self.W is None:
|
|
113
|
+
raise ValueError("Model not fitted yet. Call fit() first.")
|
|
114
|
+
|
|
115
|
+
# Compute kernel between new data and training data
|
|
116
|
+
K = self._kernel(X, self.X_train)
|
|
117
|
+
|
|
118
|
+
# Project to latent space: X' = K * W
|
|
119
|
+
return K @ self.W
|
|
120
|
+
|
|
121
|
+
def _kernel(self, X1, X2):
|
|
122
|
+
"""Compute kernel matrix between X1 and X2.
|
|
123
|
+
|
|
124
|
+
Parameters
|
|
125
|
+
----------
|
|
126
|
+
X1 : ndarray of shape (n_samples1, n_features)
|
|
127
|
+
First set of samples
|
|
128
|
+
X2 : ndarray of shape (n_samples2, n_features)
|
|
129
|
+
Second set of samples
|
|
130
|
+
|
|
131
|
+
Returns
|
|
132
|
+
-------
|
|
133
|
+
K : ndarray of shape (n_samples1, n_samples2)
|
|
134
|
+
Kernel matrix
|
|
135
|
+
|
|
136
|
+
Raises
|
|
137
|
+
------
|
|
138
|
+
ValueError
|
|
139
|
+
If kernel_type is not supported
|
|
140
|
+
"""
|
|
141
|
+
if self.kernel_type == 'linear':
|
|
142
|
+
return X1 @ X2.T # Linear kernel: K(x,y) = x^T y
|
|
143
|
+
elif self.kernel_type == 'rbf':
|
|
144
|
+
# RBF kernel: K(x,y) = exp(-γ||x-y||²)
|
|
145
|
+
# Efficient computation using ||x-y||² = ||x||² + ||y||² - 2x^T y
|
|
146
|
+
dist_sq = (np.sum(X1**2, axis=1)[:, np.newaxis] + np.sum(X2**2, axis=1) - 2 * X1 @ X2.T)
|
|
147
|
+
return np.exp(-self.kernel_param * dist_sq)
|
|
148
|
+
elif self.kernel_type == 'poly':
|
|
149
|
+
# Polynomial kernel: K(x,y) = (x^T y + 1)^d
|
|
150
|
+
return (X1 @ X2.T + 1) ** self.kernel_param
|
|
151
|
+
else:
|
|
152
|
+
raise ValueError(f"Unsupported kernel type: {self.kernel_type}. Choose from 'linear', 'rbf', 'poly'")
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
pydmoo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
pydmoo/response/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
pydmoo/response/ar_model.py,sha256=EmT46fEAcHJe7GrmKN9OVBv-x_nF-fZpJcPTX09FSGw,2676
|
|
4
|
+
pydmoo/response/tca_model.py,sha256=hfGdJUs01lYzkCvleJ0ScAsVVIoc-EmftBu_ej_ksBg,5405
|
|
5
|
+
pydmoo-0.0.9.dist-info/METADATA,sha256=QHOdy6xo98Eco92-HC_bYfTzdXeuO78HbMD4svKvDFs,1685
|
|
6
|
+
pydmoo-0.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
7
|
+
pydmoo-0.0.9.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
8
|
+
pydmoo-0.0.9.dist-info/RECORD,,
|
pydmoo-0.0.8.dist-info/RECORD
DELETED
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
pydmoo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
pydmoo-0.0.8.dist-info/METADATA,sha256=Rp-hmkTh1VEFBCI7xYhPT-ZHRcgYZ3qwRiHj9O1On1E,1685
|
|
3
|
-
pydmoo-0.0.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
4
|
-
pydmoo-0.0.8.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
5
|
-
pydmoo-0.0.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|