unaiverse 0.1.6__cp311-cp311-musllinux_1_2_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unaiverse might be problematic. Click here for more details.

Files changed (50) hide show
  1. unaiverse/__init__.py +19 -0
  2. unaiverse/agent.py +2008 -0
  3. unaiverse/agent_basics.py +1846 -0
  4. unaiverse/clock.py +191 -0
  5. unaiverse/dataprops.py +1209 -0
  6. unaiverse/hsm.py +1880 -0
  7. unaiverse/modules/__init__.py +18 -0
  8. unaiverse/modules/cnu/__init__.py +17 -0
  9. unaiverse/modules/cnu/cnus.py +536 -0
  10. unaiverse/modules/cnu/layers.py +261 -0
  11. unaiverse/modules/cnu/psi.py +60 -0
  12. unaiverse/modules/hl/__init__.py +15 -0
  13. unaiverse/modules/hl/hl_utils.py +411 -0
  14. unaiverse/modules/networks.py +1509 -0
  15. unaiverse/modules/utils.py +680 -0
  16. unaiverse/networking/__init__.py +16 -0
  17. unaiverse/networking/node/__init__.py +18 -0
  18. unaiverse/networking/node/connpool.py +1261 -0
  19. unaiverse/networking/node/node.py +2223 -0
  20. unaiverse/networking/node/profile.py +446 -0
  21. unaiverse/networking/node/tokens.py +79 -0
  22. unaiverse/networking/p2p/__init__.py +198 -0
  23. unaiverse/networking/p2p/go.mod +127 -0
  24. unaiverse/networking/p2p/go.sum +548 -0
  25. unaiverse/networking/p2p/golibp2p.py +18 -0
  26. unaiverse/networking/p2p/golibp2p.pyi +135 -0
  27. unaiverse/networking/p2p/lib.go +2714 -0
  28. unaiverse/networking/p2p/lib.go.sha256 +1 -0
  29. unaiverse/networking/p2p/lib_types.py +312 -0
  30. unaiverse/networking/p2p/message_pb2.py +63 -0
  31. unaiverse/networking/p2p/messages.py +265 -0
  32. unaiverse/networking/p2p/mylogger.py +77 -0
  33. unaiverse/networking/p2p/p2p.py +929 -0
  34. unaiverse/networking/p2p/proto-go/message.pb.go +616 -0
  35. unaiverse/networking/p2p/unailib.cpython-311-x86_64-linux-musl.so +0 -0
  36. unaiverse/streamlib/__init__.py +15 -0
  37. unaiverse/streamlib/streamlib.py +210 -0
  38. unaiverse/streams.py +770 -0
  39. unaiverse/utils/__init__.py +16 -0
  40. unaiverse/utils/ask_lone_wolf.json +27 -0
  41. unaiverse/utils/lone_wolf.json +19 -0
  42. unaiverse/utils/misc.py +305 -0
  43. unaiverse/utils/sandbox.py +293 -0
  44. unaiverse/utils/server.py +435 -0
  45. unaiverse/world.py +175 -0
  46. unaiverse-0.1.6.dist-info/METADATA +365 -0
  47. unaiverse-0.1.6.dist-info/RECORD +50 -0
  48. unaiverse-0.1.6.dist-info/WHEEL +5 -0
  49. unaiverse-0.1.6.dist-info/licenses/LICENSE +43 -0
  50. unaiverse-0.1.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,261 @@
1
+ """
2
+ █████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
3
+ ░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
4
+ ░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
5
+ ░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
6
+ ░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
7
+ ░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
8
+ ░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
9
+ ░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
10
+ A Collectionless AI Project (https://collectionless.ai)
11
+ Registration/Login: https://unaiverse.io
12
+ Code Repositories: https://github.com/collectionlessai/
13
+ Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
14
+ """
15
+ import math
16
+ import torch
17
+ from .cnus import CNUs
18
+ import torch.nn.functional as F
19
+ from collections.abc import Iterable
20
+
21
+
22
+ class LinearCNU(CNUs):
23
+ def __init__(self, in_features, out_features, bias=True, device=None,
24
+ shared_keys=True, key_mem_units=2, psi_fn='identity', key_size=None, **kwargs):
25
+ self.in_features = in_features
26
+ self.out_features = out_features
27
+ self.bias = bias
28
+ self.shared_keys = shared_keys
29
+
30
+ if kwargs is not None:
31
+ assert 'q' not in kwargs, "The number of CNUs is automatically determined, do not set argument 'q'"
32
+ assert 'd' not in kwargs, "The size of each key can be specified with argument 'key_size', " \
33
+ "do not set argument 'd'"
34
+ assert 'm' not in kwargs, "The number of keys and memory units can be specified with argument " \
35
+ "'key_mem_units', do not set argument 'm'"
36
+ assert 'u' not in kwargs, "Size of each memory unit is automatically determined, do not set argument 'u'"
37
+
38
+ # Number of keys/memory units
39
+ kwargs['m'] = key_mem_units
40
+
41
+ # Size of each key
42
+ kwargs['d'] = in_features if key_size is None else key_size
43
+
44
+ # Function used to compare input against keys
45
+ kwargs['psi_fn'] = psi_fn
46
+
47
+ if not shared_keys:
48
+
49
+ # Each neuron is an independent cnu, with its own keys and its own memory units
50
+ kwargs['q'] = self.out_features
51
+ kwargs['u'] = self.in_features + (1 if self.bias else 0)
52
+ else:
53
+
54
+ # All the CNUs of the layer share the same keys, thus their memory units are concatenated
55
+ kwargs['q'] = 1
56
+ kwargs['u'] = self.out_features * (self.in_features + (1 if self.bias else 0))
57
+
58
+ # Creating neurons
59
+ super(LinearCNU, self).__init__(**kwargs)
60
+
61
+ # Switching device
62
+ if device is not None:
63
+ self.to(device)
64
+
65
+ # Clearing
66
+ if not self.bias:
67
+ self.bias = None
68
+
69
+ def forward(self, x):
70
+
71
+ # Getting weights
72
+ W = self.compute_weights(x)
73
+
74
+ # Ensuring the shape is right (needed when neurons share the same keys)
75
+ W = W.reshape((x.shape[0], self.out_features, -1)) # [b,q,1] => [b, out_features,(in_features + 1-if-bias)]
76
+
77
+ # Splitting into weights and biases
78
+ if self.bias:
79
+ weights = W[:, :, :-1] # [b,out_features,in_features]
80
+ bias = W[:, :, -1] # [b,out_features]
81
+ else:
82
+ weights = W # [b,out_features,in_features]
83
+ bias = None
84
+
85
+ # Batched linear projection: matmul([b,out_features,in_features], [b,in_features,1]) = [b,out_features,1]
86
+ # that we squeeze to [b,out_features]
87
+ o = torch.matmul(weights, x.unsqueeze(2)).squeeze(2) # [b,out_features]
88
+ if bias is not None:
89
+ o += bias
90
+ return o
91
+
92
+ def reset_parameters(self):
93
+ self.reset_memories = False
94
+ super().reset_parameters()
95
+
96
+ # We ensure that memories M are initialized as Pytorch does for the classic linear layer
97
+ q = self.M.shape[0]
98
+ m = self.M.shape[1]
99
+ self.M.data.zero_() # Ensures we don’t keep old values
100
+
101
+ for j in range(q):
102
+ for i in range(m):
103
+
104
+ # Initialize weight and bias separately for each memory
105
+ weight = torch.empty(self.out_features if self.shared_keys else 1, self.in_features)
106
+ torch.nn.init.kaiming_uniform_(weight, a=math.sqrt(5)) # Computes fan in
107
+
108
+ if self.bias:
109
+ bias = torch.empty(self.out_features if self.shared_keys else 1)
110
+ bound = 1 / math.sqrt(self.in_features)
111
+ torch.nn.init.uniform_(bias, -bound, bound)
112
+ weight_bias = torch.cat([weight, bias.unsqueeze(1)], dim=1)
113
+ else:
114
+ weight_bias = weight
115
+
116
+ # Store the flattened weight_bias into self.M[i]
117
+ self.M.data[j, i, :] = weight_bias.flatten()
118
+
119
+ def __str__(self):
120
+ s = "- in_features = " + str(self.in_features) + "\n"
121
+ s += "- out_features = " + str(self.out_features) + "\n"
122
+ s += "- bias = " + str(self.bias) + "\n"
123
+ return "[cnu-based Linear Layer]\n" + s + super().__str__()
124
+
125
+
126
+ class Conv2d(CNUs):
127
+
128
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, padding_mode='zeros',
129
+ dilation=1, groups=1, bias=True, device=None,
130
+ shared_keys=True, key_mem_units=2, psi_fn='reduce2d', key_size=None, **kwargs):
131
+ self.in_channels = in_channels
132
+ self.out_channels = out_channels
133
+ self.kernel_size = kernel_size if isinstance(kernel_size, Iterable) else (kernel_size, kernel_size)
134
+ self.stride = stride if isinstance(stride, Iterable) else (stride, stride)
135
+ self.padding = padding
136
+ self.padding_mode = padding_mode
137
+ self.dilation = dilation if isinstance(dilation, Iterable) else (dilation, dilation)
138
+ self.groups = groups
139
+ self.bias = bias
140
+ self.in_features = math.prod(self.kernel_size) * self.in_channels
141
+
142
+ valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
143
+ if padding_mode not in valid_padding_modes:
144
+ raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(valid_padding_modes,
145
+ padding_mode))
146
+ if isinstance(padding, str):
147
+ self.__reversed_padding_repeated_twice = [0, 0] * len(self.kernel_size)
148
+ if padding == 'same':
149
+ for d, k, i in zip(self.dilation, self.kernel_size,
150
+ range(len(self.kernel_size) - 1, -1, -1)):
151
+ total_padding = d * (k - 1)
152
+ left_pad = total_padding // 2
153
+ self.__reversed_padding_repeated_twice[2 * i] = left_pad
154
+ self.__reversed_padding_repeated_twice[2 * i + 1] = (total_padding - left_pad)
155
+ else:
156
+ self.padding = padding if isinstance(padding, Iterable) else (padding, padding)
157
+ self.__reversed_padding_repeated_twice = tuple(x for x in reversed(self.padding) for _ in range(2))
158
+
159
+ if kwargs is not None:
160
+ assert 'q' not in kwargs, "The number of CNUs is automatically determined, do not set argument 'q'"
161
+ assert 'd' not in kwargs, "The size of each key can be specified with argument 'key_size', " \
162
+ "do not set argument 'd'"
163
+ assert 'm' not in kwargs, "The number of keys and memory units can be specified with argument " \
164
+ "'key_mem_units', do not set argument 'm'"
165
+ assert 'u' not in kwargs, "Size of each memory unit is automatically determined, do not set argument 'u'"
166
+
167
+ # Number of keys/memory units
168
+ kwargs['m'] = key_mem_units
169
+
170
+ # Size of each key
171
+ if key_size is not None:
172
+ if isinstance(key_size, (tuple, list)):
173
+ key_size = math.prod(key_size)
174
+ kwargs['d'] = key_size
175
+ else:
176
+ kwargs['d'] = (5 * 5 * self.in_channels)
177
+
178
+ # Function used to compare input against keys
179
+ kwargs['psi_fn'] = psi_fn
180
+
181
+ if not shared_keys:
182
+
183
+ # Each neuron is an independent cnu, with its own keys and its own memory units
184
+ kwargs['q'] = self.out_channels
185
+ kwargs['u'] = self.in_features + (1 if self.bias else 0)
186
+ else:
187
+
188
+ # All the CNUs of the layer share the same keys, thus their memory units are concatenated
189
+ kwargs['q'] = 1
190
+ kwargs['u'] = self.out_channels * (self.in_features + (1 if self.bias else 0))
191
+
192
+ # Creating neurons
193
+ super(Conv2d, self).__init__(**kwargs)
194
+
195
+ # Switching device
196
+ if device is not None:
197
+ self.to(device)
198
+
199
+ def forward(self, x):
200
+
201
+ # Shortcuts
202
+ b, c, h, w = x.shape
203
+
204
+ # Getting weights
205
+ W = self.compute_weights(x)
206
+
207
+ # Ensuring the shape is right (needed when neurons share the same keys)
208
+ W = W.reshape((b, self.out_channels, -1)) # [b,q,1] => [b,out_channels,(in_features + 1-if-bias)]
209
+
210
+ # Splitting into weights and biases
211
+ if self.bias:
212
+ weights = W[:, :, :-1] # [b,out_channels,in_features]
213
+ bias = W[:, :, -1] # [b,out_channels]
214
+ else:
215
+ weights = W # [b,out_channels,in_features]
216
+ bias = None
217
+
218
+ # Creating tensor with convolutional filters
219
+ kernels = self.__mat2filters(weights)
220
+
221
+ # Stack all images along the channels
222
+ x = x.view(1, b * c, h, w)
223
+
224
+ # Convolution
225
+ if self.padding_mode != 'zeros':
226
+ x = F.conv2d(F.pad(x, self.__reversed_padding_repeated_twice, mode=self.padding_mode),
227
+ kernels, bias.flatten() if bias is not None else None, self.stride,
228
+ (0, 0), self.dilation, groups=(b * self.groups))
229
+ else:
230
+ x = F.conv2d(x, kernels, bias.flatten() if bias is not None else None, self.stride,
231
+ self.padding, self.dilation, groups=(b * self.groups))
232
+
233
+ return x.view(b, self.out_channels, x.shape[2], x.shape[3])
234
+
235
+ def __mat2filters(self, weights):
236
+ """
237
+ :param weights: tensor with blended memories (weights) with shape [b,out_channels,in_features]
238
+ """
239
+ if type(self.kernel_size) is tuple:
240
+ kernel_size_h, kernel_size_w = self.kernel_size
241
+ else:
242
+ kernel_size_h = self.kernel_size
243
+ kernel_size_w = self.kernel_size
244
+ b = weights.shape[0]
245
+ out_channels = b * weights.shape[1]
246
+ receptive_field_volume = weights.shape[2]
247
+ in_channels_div_b_times_groups = receptive_field_volume // (kernel_size_h * kernel_size_w)
248
+ return weights.reshape(out_channels, in_channels_div_b_times_groups, kernel_size_h, kernel_size_w)
249
+
250
+ def __str__(self):
251
+ s = "- in_channels = " + str(self.in_channels) + "\n"
252
+ s += "- out_channels = " + str(self.out_channels) + "\n"
253
+ s += "- kernel_size = " + str(self.kernel_size) + "\n"
254
+ s += "- stride = " + str(self.stride) + "\n"
255
+ s += "- padding = " + str(self.padding) + "\n"
256
+ s += "- padding_mode = " + str(self.padding_mode) + "\n"
257
+ s += "- dilation = " + str(self.dilation) + "\n"
258
+ s += "- groups = " + str(self.groups) + "\n"
259
+ s += "- bias = " + str(self.bias) + "\n"
260
+ s += "- in_features = " + str(self.in_features) + "\n"
261
+ return "[cnu-based Conv2d Layer]\n" + s + super().__str__()
@@ -0,0 +1,60 @@
1
+ """
2
+ █████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
3
+ ░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
4
+ ░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
5
+ ░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
6
+ ░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
7
+ ░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
8
+ ░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
9
+ ░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
10
+ A Collectionless AI Project (https://collectionless.ai)
11
+ Registration/Login: https://unaiverse.io
12
+ Code Repositories: https://github.com/collectionlessai/
13
+ Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
14
+ """
15
+ import math
16
+ import torch
17
+ import torch.nn.functional as F
18
+
19
+
20
+ def psi(x, mode, key_size, normalize=True):
21
+ if mode == "identity":
22
+ o = x.flatten(start_dim=1)
23
+ elif mode == "sign":
24
+ o = torch.sign(x.flatten(start_dim=1))
25
+ elif mode == "resize1d":
26
+ o = resize1d(x, key_size)
27
+ elif mode == "resize2d":
28
+ o = resize2d(x, key_size)
29
+ elif mode == "resize2d_sign":
30
+ o = torch.sign(resize2d(x, key_size))
31
+ else:
32
+ raise NotImplementedError
33
+ assert o.shape[1] == key_size, \
34
+ "The selected psi function (" \
35
+ + str(mode) + ") cannot map data to the target " \
36
+ "key_size (data_size: " + str(o.shape[1]) + ", key_size: " + str(key_size) + ")"
37
+ if normalize:
38
+ o = F.normalize(o, p=2.0, dim=1, eps=1e-12, out=None)
39
+ return o
40
+
41
+
42
+ def resize1d(I, key_size):
43
+ if I.shape[1] == key_size:
44
+ pass
45
+ else:
46
+ I = F.interpolate(I.unsqueeze(1), size=key_size, mode="linear").squeeze(1)
47
+ return I
48
+
49
+
50
+ def resize2d(I, key_size):
51
+ b, c, h, w = I.shape
52
+ spatial_key_size = key_size // c
53
+ ratio = float(spatial_key_size) / float(w * h)
54
+ w = int(round(math.sqrt(ratio) * w))
55
+ h = spatial_key_size // w
56
+ remainder = key_size - (c * h * w)
57
+ o = F.interpolate(I, size=(h, w), mode="bilinear").flatten(start_dim=1)
58
+ if h * w < spatial_key_size:
59
+ o = torch.cat([o, torch.zeros((b, remainder), device=o.device, dtype=o.dtype)], dim=1)
60
+ return o
@@ -0,0 +1,15 @@
1
+ """
2
+ █████ █████ ██████ █████ █████ █████ █████ ██████████ ███████████ █████████ ██████████
3
+ ░░███ ░░███ ░░██████ ░░███ ░░███ ░░███ ░░███ ░░███░░░░░█░░███░░░░░███ ███░░░░░███░░███░░░░░█
4
+ ░███ ░███ ░███░███ ░███ ██████ ░███ ░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░░░ ░███ █ ░
5
+ ░███ ░███ ░███░░███░███ ░░░░░███ ░███ ░███ ░███ ░██████ ░██████████ ░░█████████ ░██████
6
+ ░███ ░███ ░███ ░░██████ ███████ ░███ ░░███ ███ ░███░░█ ░███░░░░░███ ░░░░░░░░███ ░███░░█
7
+ ░███ ░███ ░███ ░░█████ ███░░███ ░███ ░░░█████░ ░███ ░ █ ░███ ░███ ███ ░███ ░███ ░ █
8
+ ░░████████ █████ ░░█████░░████████ █████ ░░███ ██████████ █████ █████░░█████████ ██████████
9
+ ░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░░ ░░░░░░░░░░
10
+ A Collectionless AI Project (https://collectionless.ai)
11
+ Registration/Login: https://unaiverse.io
12
+ Code Repositories: https://github.com/collectionlessai/
13
+ Main Developers: Stefano Melacci (Project Leader), Christian Di Maio, Tommaso Guidi
14
+ """
15
+ from . import hl_utils