grasp-tool 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ """GNN-related modules (graph loader, training utilities, visualization)."""
@@ -0,0 +1,165 @@
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import math
6
+
7
+
8
+ class NonlinearTransformEmbedding(nn.Module):
9
+ def __init__(self, input_dim=1, output_dim=16):
10
+ super(NonlinearTransformEmbedding, self).__init__()
11
+ self.linear = nn.Linear(input_dim, output_dim)
12
+
13
+ def forward(self, weight):
14
+ weight = torch.tensor([[weight]], dtype=torch.float32)
15
+ embedding = F.relu(self.linear(weight))
16
+ return embedding.flatten()
17
+
18
+
19
+ def gaussian_embedding(weight, dim=16, sigma=0.1):
20
+ embeddings = np.random.normal(loc=weight, scale=sigma, size=dim)
21
+ return embeddings
22
+
23
+
24
+ def nonlinear_transform_embedding(weight, dim=16):
25
+ # np.random.seed(42)
26
+ transform_matrix = np.random.rand(dim, 1)
27
+ linear_output = np.dot(transform_matrix, np.array([[weight]]))
28
+ embedding = np.maximum(linear_output, 0)
29
+ embeddings = embedding.flatten()
30
+ return embeddings
31
+
32
+
33
+ def transformer_positional_encoding(seq_len, d_model):
34
+ position = np.arange(seq_len).reshape(-1, 1)
35
+ div_term = np.exp(np.arange(0, d_model, 2) * (-np.log(10000.0) / d_model))
36
+ pe = np.zeros((seq_len, d_model))
37
+ pe[:, 0::2] = np.sin(position * div_term)
38
+ pe[:, 1::2] = np.cos(position * div_term)
39
+ return pe
40
+
41
+
42
+ def fixed_positional_encoding(flags, num_positions=3, num_feature=16):
43
+ pos_embedding = nn.Embedding(num_positions, num_feature)
44
+ positional_encoding = pos_embedding(flags)
45
+ return positional_encoding
46
+
47
+
48
+ def generate_positional_encoding(seq_len=3, d_model=16):
49
+ position = np.arange(seq_len).reshape(-1, 1)
50
+ div_term = np.exp(np.arange(0, d_model, 2) * (-np.log(10000.0) / d_model))
51
+ pe = np.zeros((seq_len, d_model))
52
+ pe[:, 0::2] = np.sin(position * div_term)
53
+ pe[:, 1::2] = np.cos(position * div_term)
54
+ return pe
55
+
56
+
57
+ # Example usage
58
+ # positional_encoding = learnable_positional_encoding(seq_len = 2, d_model = 16)
59
+ # print(positional_encoding)
60
+ # positional_encoding = generate_positional_encoding(seq_len = 2, d_model = 16)
61
+ # print(positional_encoding)
62
+
63
+ # # Visualization
64
+ # plt.figure(figsize=(8, 4))
65
+ # for i in range(d_model):
66
+ # plt.plot(positional_encoding[:, i], label=f"Dim {i}")
67
+ # plt.xlabel("Position")
68
+ # plt.ylabel("Encoding Value")
69
+ # plt.title("Positional Encoding Visualization")
70
+ # plt.legend()
71
+ # plt.show()
72
+
73
+ # --- New Embeddings for count_ratio (0-1 continuous value) ---
74
+
75
+
76
+ def _continuous_sinusoidal_base_embedding(value_0_to_1, even_dim, base_value=100.0):
77
+ """Helper for sinusoidal embedding, expects even_dim.
78
+ Implements Transformer-style positional encoding adapted for a continuous value in [0,1].
79
+ Low dimensions have higher frequencies (shorter periods).
80
+ High dimensions have lower frequencies (longer periods).
81
+ The input 'value_0_to_1' is scaled by 2*pi before applying to sin/cos arguments.
82
+ The base value for period calculation is parameterizable.
83
+ """
84
+ if even_dim == 0:
85
+ return np.array([], dtype=np.float32)
86
+
87
+ position_scalar = float(value_0_to_1) * 2.0 * math.pi
88
+
89
+ dim_indices = np.arange(0, even_dim, 2, dtype=np.float64)
90
+
91
+ period_term = np.power(float(base_value), dim_indices / float(even_dim))
92
+
93
+ period_term[period_term == 0] = 1e-6
94
+
95
+ embedding = np.zeros(even_dim, dtype=np.float32)
96
+ embedding[0::2] = np.sin(position_scalar / period_term)
97
+ embedding[1::2] = np.cos(position_scalar / period_term)
98
+ return embedding.flatten()
99
+
100
+
101
+ def get_sinusoidal_embedding_for_continuous_value(
102
+ value_0_to_1, dim=16, base_value=100.0
103
+ ):
104
+ """
105
+ Generates a sinusoidal-based embedding for a continuous value in [0, 1].
106
+ If dim is odd, the last dimension will be the value itself.
107
+ Args:
108
+ value_0_to_1 (float): Input value, expected to be in [0, 1].
109
+ dim (int): Desired output embedding dimension.
110
+ base_value (float): The base value used in period calculation (default 100.0 for [0,1] scaled inputs).
111
+ Returns:
112
+ np.ndarray: Embedding vector of shape (dim,).
113
+ """
114
+ value_0_to_1 = np.clip(float(value_0_to_1), 0.0, 1.0)
115
+
116
+ if not isinstance(dim, int) or dim <= 0:
117
+ return np.array([], dtype=np.float32)
118
+
119
+ if dim == 1:
120
+ return np.array([value_0_to_1], dtype=np.float32)
121
+
122
+ if dim % 2 == 0:
123
+ return _continuous_sinusoidal_base_embedding(value_0_to_1, dim, base_value)
124
+ else:
125
+ sin_cos_embedding = _continuous_sinusoidal_base_embedding(
126
+ value_0_to_1, dim - 1, base_value
127
+ )
128
+ return np.append(sin_cos_embedding, value_0_to_1).astype(np.float32)
129
+
130
+
131
+ def get_rbf_embedding_for_continuous_value(value_0_to_1, dim=16, sigma=None):
132
+ """
133
+ Generates an RBF-based embedding for a continuous value in [0, 1].
134
+ Args:
135
+ value_0_to_1 (float): Input value, expected to be in [0, 1].
136
+ dim (int): Desired output embedding dimension (number of RBF centers).
137
+ sigma (float, optional): Width of the Gaussian RBF kernels.
138
+ If None, defaults to a heuristic based on dim.
139
+ Returns:
140
+ np.ndarray: Embedding vector of shape (dim,).
141
+ """
142
+ value_0_to_1 = np.clip(float(value_0_to_1), 0.0, 1.0)
143
+
144
+ if not isinstance(dim, int) or dim <= 0:
145
+ # print(f"Warning: Invalid dimension {dim} for RBF. Returning empty array.")
146
+ return np.array([], dtype=np.float32)
147
+
148
+ centers = np.linspace(0, 1, dim, dtype=np.float32)
149
+
150
+ if sigma is None:
151
+ if dim == 1:
152
+ sigma = 0.5 # For a single RBF, a wider sigma might be reasonable
153
+ else:
154
+ # Heuristic: make sigma such that RBFs overlap moderately
155
+ # This value means the std dev is about 1/dim of the range.
156
+ sigma = 1.0 / float(dim)
157
+ else:
158
+ sigma = float(sigma)
159
+
160
+ diffs = value_0_to_1 - centers
161
+ embedding = np.exp(-(diffs**2) / (2 * sigma**2))
162
+ return embedding.astype(np.float32)
163
+
164
+
165
+ # --- End of New Embeddings ---