unaiverse 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unaiverse/__init__.py +19 -0
- unaiverse/agent.py +2226 -0
- unaiverse/agent_basics.py +2389 -0
- unaiverse/clock.py +234 -0
- unaiverse/dataprops.py +1282 -0
- unaiverse/hsm.py +2471 -0
- unaiverse/modules/__init__.py +18 -0
- unaiverse/modules/cnu/__init__.py +17 -0
- unaiverse/modules/cnu/cnus.py +536 -0
- unaiverse/modules/cnu/layers.py +261 -0
- unaiverse/modules/cnu/psi.py +60 -0
- unaiverse/modules/hl/__init__.py +15 -0
- unaiverse/modules/hl/hl_utils.py +411 -0
- unaiverse/modules/networks.py +1509 -0
- unaiverse/modules/utils.py +748 -0
- unaiverse/networking/__init__.py +16 -0
- unaiverse/networking/node/__init__.py +18 -0
- unaiverse/networking/node/connpool.py +1332 -0
- unaiverse/networking/node/node.py +2752 -0
- unaiverse/networking/node/profile.py +446 -0
- unaiverse/networking/node/tokens.py +79 -0
- unaiverse/networking/p2p/__init__.py +188 -0
- unaiverse/networking/p2p/go.mod +127 -0
- unaiverse/networking/p2p/go.sum +548 -0
- unaiverse/networking/p2p/golibp2p.py +18 -0
- unaiverse/networking/p2p/golibp2p.pyi +136 -0
- unaiverse/networking/p2p/lib.go +2765 -0
- unaiverse/networking/p2p/lib_types.py +311 -0
- unaiverse/networking/p2p/message_pb2.py +50 -0
- unaiverse/networking/p2p/messages.py +360 -0
- unaiverse/networking/p2p/mylogger.py +78 -0
- unaiverse/networking/p2p/p2p.py +900 -0
- unaiverse/networking/p2p/proto-go/message.pb.go +846 -0
- unaiverse/stats.py +1506 -0
- unaiverse/streamlib/__init__.py +15 -0
- unaiverse/streamlib/streamlib.py +210 -0
- unaiverse/streams.py +804 -0
- unaiverse/utils/__init__.py +16 -0
- unaiverse/utils/lone_wolf.json +28 -0
- unaiverse/utils/misc.py +441 -0
- unaiverse/utils/sandbox.py +292 -0
- unaiverse/world.py +384 -0
- unaiverse-0.1.12.dist-info/METADATA +366 -0
- unaiverse-0.1.12.dist-info/RECORD +47 -0
- unaiverse-0.1.12.dist-info/WHEEL +5 -0
- unaiverse-0.1.12.dist-info/licenses/LICENSE +177 -0
- unaiverse-0.1.12.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2765 @@
|
|
|
1
|
+
// lib.go
|
|
2
|
+
// This Go program compiles into a C shared library (.so file on Linux/macOS, .dll on Windows)
|
|
3
|
+
// exposing libp2p functionalities (host creation, peer connection, pubsub, direct messaging)
|
|
4
|
+
// for use by other languages, primarily Python via CFFI or ctypes.
|
|
5
|
+
package main
|
|
6
|
+
|
|
7
|
+
/*
|
|
8
|
+
#include <stdlib.h>
|
|
9
|
+
*/
|
|
10
|
+
import "C" // Enables CGo features, allowing Go to call C code and vice-versa.
|
|
11
|
+
|
|
12
|
+
import (
|
|
13
|
+
// Standard Go libraries
|
|
14
|
+
"bytes" // For byte buffer manipulations (e.g., encoding/decoding, separators)
|
|
15
|
+
"errors" // For handling some types of errors
|
|
16
|
+
"container/list" // For an efficient ordered list (doubly-linked list for queues)
|
|
17
|
+
"context" // For managing cancellation signals and deadlines across API boundaries and goroutines
|
|
18
|
+
"crypto/rand" // For generating identity keys
|
|
19
|
+
"crypto/tls" // For TLS configuration and certificates
|
|
20
|
+
"encoding/base64" // For encoding binary message data into JSON-safe strings
|
|
21
|
+
"encoding/binary" // For encoding/decoding length prefixes in stream communication
|
|
22
|
+
"encoding/json" // For marshalling/unmarshalling data structures to/from JSON (used for C API communication)
|
|
23
|
+
"fmt" // For formatted string creation and printing
|
|
24
|
+
"io" // For input/output operations (e.g., reading from streams)
|
|
25
|
+
"log" // For logging information, warnings, and errors
|
|
26
|
+
"net" // For network-related errors and interfaces
|
|
27
|
+
"os" // For interacting with the operating system (e.g., Stdout)
|
|
28
|
+
"path/filepath" // For file path manipulations (e.g., saving/loading identity keys)
|
|
29
|
+
"strings" // For string manipulations (e.g., trimming, splitting)
|
|
30
|
+
"sync" // For synchronization primitives like Mutexes and RWMutexes to protect shared data
|
|
31
|
+
"time" // For time-related functions (e.g., timeouts, timestamps)
|
|
32
|
+
"unsafe" // For using Go pointers with C code (specifically C.free)
|
|
33
|
+
|
|
34
|
+
// Core libp2p libraries
|
|
35
|
+
libp2p "github.com/libp2p/go-libp2p" // Main libp2p package for creating a host
|
|
36
|
+
dht "github.com/libp2p/go-libp2p-kad-dht" // Kademlia DHT implementation for peer discovery and routing
|
|
37
|
+
"github.com/libp2p/go-libp2p/core/crypto" // Defines cryptographic primitives (keys, signatures)
|
|
38
|
+
"github.com/libp2p/go-libp2p/core/event" // Event bus for subscribing to libp2p events (connections, reachability changes)
|
|
39
|
+
"github.com/libp2p/go-libp2p/core/host" // Defines the main Host interface, representing a libp2p node
|
|
40
|
+
"github.com/libp2p/go-libp2p/core/network" // Defines network interfaces like Stream and Connection
|
|
41
|
+
"github.com/libp2p/go-libp2p/core/peer" // Defines Peer ID and AddrInfo types
|
|
42
|
+
"github.com/libp2p/go-libp2p/core/peerstore" // Defines the Peerstore interface for storing peer metadata (addresses, keys)
|
|
43
|
+
"github.com/libp2p/go-libp2p/core/routing" // Defines the Routing interface for peer routing (e.g., DHT)
|
|
44
|
+
rc "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" // Import for relay service options
|
|
45
|
+
autorelay "github.com/libp2p/go-libp2p/p2p/host/autorelay" // AutoRelay for automatic relay selection and usage
|
|
46
|
+
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" // Resource manager for controlling resource usage (connections, streams)
|
|
47
|
+
|
|
48
|
+
// transport protocols for libp2p
|
|
49
|
+
quic "github.com/libp2p/go-libp2p/p2p/transport/quic" // QUIC transport for peer-to-peer connections (e.g., for mobile devices)
|
|
50
|
+
"github.com/libp2p/go-libp2p/p2p/transport/tcp" // TCP transport for peer-to-peer connections (most common)
|
|
51
|
+
webrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc" // WebRTC transport for peer-to-peer connections (e.g., for browsers or mobile devices)
|
|
52
|
+
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket" // WebSocket transport for peer-to-peer connections (e.g., for browsers)
|
|
53
|
+
webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport" // WebTransport transport for peer-to-peer connections (e.g., for browsers)
|
|
54
|
+
|
|
55
|
+
// --- AutoTLS Imports ---
|
|
56
|
+
"github.com/caddyserver/certmagic" // Automatic TLS certificate management (used by p2p-forge)
|
|
57
|
+
golog "github.com/ipfs/go-log/v2" // IPFS logging library for structured logging
|
|
58
|
+
p2pforge "github.com/ipshipyard/p2p-forge/client" // p2p-forge library for automatic TLS and domain management
|
|
59
|
+
|
|
60
|
+
// protobuf
|
|
61
|
+
pg "unailib/proto-go" // Generated Protobuf code for our message formats
|
|
62
|
+
|
|
63
|
+
"google.golang.org/protobuf/proto" // Core Protobuf library for marshalling/unmarshalling messages
|
|
64
|
+
|
|
65
|
+
// PubSub library
|
|
66
|
+
pubsub "github.com/libp2p/go-libp2p-pubsub" // GossipSub implementation for publish/subscribe messaging
|
|
67
|
+
|
|
68
|
+
// Multiaddr libraries (libp2p's addressing format)
|
|
69
|
+
ma "github.com/multiformats/go-multiaddr" // Core multiaddr parsing and manipulation
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
// ChatProtocol defines the protocol ID string used for direct peer-to-peer messaging streams.
|
|
73
|
+
// This ensures that both peers understand how to interpret the data on the stream.
|
|
74
|
+
// const UnaiverseChatProtocol = "/unaiverse-chat-protocol/1.0.0"
|
|
75
|
+
const UnaiverseChatProtocol = "/unaiverse/chat/1.0.0"
|
|
76
|
+
const UnaiverseUserAgent = "go-libp2p/example/autotls"
|
|
77
|
+
const DisconnectionGracePeriod = 10 * time.Second
|
|
78
|
+
|
|
79
|
+
// ExtendedPeerInfo holds information about a connected peer.
|
|
80
|
+
type ExtendedPeerInfo struct {
|
|
81
|
+
ID peer.ID `json:"id"` // the Peer ID of the connected peer.
|
|
82
|
+
Addrs []ma.Multiaddr `json:"addrs"` // the Multiaddr(s) associated with the peer.
|
|
83
|
+
ConnectedAt time.Time `json:"connected_at"` // Timestamp when the connection was established.
|
|
84
|
+
Direction string `json:"direction"` // Direction of the connection: "inbound" or "outbound".
|
|
85
|
+
Misc int `json:"misc"` // Misc information (integer), custom usage
|
|
86
|
+
Relayed bool `json:"relayed"` // Currently unused (but used in JS)
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// RendezvousState holds the discovered peers from a rendezvous topic,
|
|
90
|
+
// along with metadata about the freshness of the data.
|
|
91
|
+
type RendezvousState struct {
|
|
92
|
+
Peers map[peer.ID]ExtendedPeerInfo `json:"peers"`
|
|
93
|
+
UpdateCount int64 `json:"update_count"`
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// QueuedMessage represents a message received either directly or via PubSub.
|
|
97
|
+
//
|
|
98
|
+
// This lightweight version stores the binary payload in the `Data` field,
|
|
99
|
+
// while the `From` field contains the Peer ID of the sender for security reasons.
|
|
100
|
+
// It has to match with the 'sender' field in the ProtoBuf payload of the message.
|
|
101
|
+
type QueuedMessage struct {
|
|
102
|
+
From peer.ID `json:"from"` // The VERIFIED peer ID of the sender from the network layer.
|
|
103
|
+
Data []byte `json:"-"` // The raw data payload (Protobuf encoded).
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// MessageStore holds the QueuedMessages for each channel in separate FIFO queues.
|
|
107
|
+
// It has a maximum number of channels and a maximum queue length per channel.
|
|
108
|
+
type MessageStore struct {
|
|
109
|
+
mu sync.Mutex // protects the message store from concurrent access.
|
|
110
|
+
messagesByChannel map[string]*list.List // stores a FIFO queue of messages for each channel
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// NodeConfig contains the parameters to initialize a node
|
|
114
|
+
type NodeConfig struct {
|
|
115
|
+
IdentityDir string `json:"identity_dir"`
|
|
116
|
+
PredefinedPort int `json:"predefined_port"`
|
|
117
|
+
ListenIPs []string `json:"listen_ips"`
|
|
118
|
+
|
|
119
|
+
// Group Relay Logic
|
|
120
|
+
Relay struct {
|
|
121
|
+
EnableClient bool `json:"enable_client"`
|
|
122
|
+
EnableService bool `json:"enable_service"`
|
|
123
|
+
WithBroadLimits bool `json:"with_broad_limits"`
|
|
124
|
+
} `json:"relay"`
|
|
125
|
+
|
|
126
|
+
// Group TLS Logic (Mutually exclusive logic becomes clear here)
|
|
127
|
+
TLS struct {
|
|
128
|
+
AutoTLS bool `json:"auto_tls"`
|
|
129
|
+
Domain string `json:"domain"`
|
|
130
|
+
CertPath string `json:"cert_path"`
|
|
131
|
+
KeyPath string `json:"key_path"`
|
|
132
|
+
} `json:"tls"`
|
|
133
|
+
|
|
134
|
+
// explicit configuration for network environment
|
|
135
|
+
Network struct {
|
|
136
|
+
Isolated bool `json:"isolated"` // only allows connections with friendly peers
|
|
137
|
+
ForcePublic bool `json:"force_public"` // Replaces knowsIsPublic
|
|
138
|
+
} `json:"network"`
|
|
139
|
+
|
|
140
|
+
// Group DHT logic
|
|
141
|
+
DHT struct {
|
|
142
|
+
Enabled bool `json:"enabled"`
|
|
143
|
+
Keep bool `json:"keep"` // to keep it running after init
|
|
144
|
+
} `json:"dht"`
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// CreateNodeResponse defines the structure of our success message.
|
|
148
|
+
type CreateNodeResponse struct {
|
|
149
|
+
Addresses []string `json:"addresses"`
|
|
150
|
+
IsPublic bool `json:"isPublic"`
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// NodeInstance holds ALL state for a single libp2p node.
|
|
154
|
+
type NodeInstance struct {
|
|
155
|
+
// Core Components
|
|
156
|
+
host host.Host
|
|
157
|
+
pubsub *pubsub.PubSub
|
|
158
|
+
dht *dht.IpfsDHT
|
|
159
|
+
ctx context.Context
|
|
160
|
+
cancel context.CancelFunc
|
|
161
|
+
certManager *p2pforge.P2PForgeCertMgr
|
|
162
|
+
messageStore *MessageStore
|
|
163
|
+
|
|
164
|
+
// Address Cache
|
|
165
|
+
addrMutex sync.RWMutex
|
|
166
|
+
localAddrs []ma.Multiaddr
|
|
167
|
+
|
|
168
|
+
// Static relay
|
|
169
|
+
privateRelay *autorelay.AutoRelay
|
|
170
|
+
// privateRelayAddrs []ma.Multiaddr
|
|
171
|
+
|
|
172
|
+
// PubSub State
|
|
173
|
+
pubsubMutex sync.RWMutex
|
|
174
|
+
topics map[string]*pubsub.Topic
|
|
175
|
+
subscriptions map[string]*pubsub.Subscription
|
|
176
|
+
|
|
177
|
+
// Peer State
|
|
178
|
+
peersMutex sync.RWMutex
|
|
179
|
+
friendlyPeers map[peer.ID]ExtendedPeerInfo
|
|
180
|
+
|
|
181
|
+
// Stream State
|
|
182
|
+
streamsMutex sync.Mutex
|
|
183
|
+
persistentChatStreams map[peer.ID]network.Stream
|
|
184
|
+
|
|
185
|
+
// Disconnection Grace Period State
|
|
186
|
+
disconnectionMutex sync.Mutex
|
|
187
|
+
disconnectionTimers map[peer.ID]context.CancelFunc
|
|
188
|
+
|
|
189
|
+
// Rendezvous State
|
|
190
|
+
rendezvousMutex sync.RWMutex
|
|
191
|
+
rendezvousState *RendezvousState
|
|
192
|
+
|
|
193
|
+
// a copy of its own index for logging
|
|
194
|
+
instanceIndex int
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// --- Create a package-level logger ---
|
|
198
|
+
var logger = golog.Logger("unailib")
|
|
199
|
+
|
|
200
|
+
// --- Multi-Instance State Management ---
|
|
201
|
+
var (
|
|
202
|
+
// Set the libp2p configuration parameters.
|
|
203
|
+
maxInstances int
|
|
204
|
+
maxChannelQueueLen int
|
|
205
|
+
maxUniqueChannels int
|
|
206
|
+
MaxMessageSize uint32
|
|
207
|
+
|
|
208
|
+
// A single slice to hold all our instances.
|
|
209
|
+
allInstances []*NodeInstance
|
|
210
|
+
// A SINGLE mutex to protect the allInstances slice itself (during create/close).
|
|
211
|
+
globalInstanceMutex sync.RWMutex
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
// --- Helper Functions ---
|
|
215
|
+
// jsonErrorResponse creates a JSON string representing an error state.
|
|
216
|
+
// It takes a base message and an optional error, formats them, escapes the message
|
|
217
|
+
// for JSON embedding, and returns a C string pointer (`*C.char`).
|
|
218
|
+
// The caller (usually C/Python) is responsible for freeing this C string using FreeString.
|
|
219
|
+
func jsonErrorResponse(
|
|
220
|
+
message string,
|
|
221
|
+
err error,
|
|
222
|
+
) *C.char {
|
|
223
|
+
|
|
224
|
+
errMsg := message
|
|
225
|
+
if err != nil {
|
|
226
|
+
errMsg = fmt.Sprintf("%s: %s", message, err.Error())
|
|
227
|
+
}
|
|
228
|
+
logger.Errorf("[GO] â Error: %s", errMsg)
|
|
229
|
+
// Ensure error messages are escaped properly for JSON embedding
|
|
230
|
+
escapedErrMsg := escapeStringForJSON(errMsg)
|
|
231
|
+
// Format into a standard {"state": "Error", "message": "..."} JSON structure.
|
|
232
|
+
jsonError := fmt.Sprintf(`{"state":"Error","message":"%s"}`, escapedErrMsg)
|
|
233
|
+
// Convert the Go string to a C string (allocates memory in C heap).
|
|
234
|
+
return C.CString(jsonError)
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// jsonSuccessResponse creates a JSON string representing a success state.
|
|
238
|
+
// It takes an arbitrary Go object (`message`), marshals it into JSON, wraps it
|
|
239
|
+
// in a standard {"state": "Success", "message": {...}} structure, and returns
|
|
240
|
+
// a C string pointer (`*C.char`).
|
|
241
|
+
// The caller (usually C/Python) is responsible for freeing this C string using FreeString.
|
|
242
|
+
func jsonSuccessResponse(
|
|
243
|
+
message interface{},
|
|
244
|
+
) *C.char {
|
|
245
|
+
|
|
246
|
+
// Marshal the provided Go data structure into JSON bytes.
|
|
247
|
+
jsonData, err := json.Marshal(message)
|
|
248
|
+
if err != nil {
|
|
249
|
+
// If marshalling fails, return a JSON error response instead.
|
|
250
|
+
return jsonErrorResponse("Failed to marshal success response", err)
|
|
251
|
+
}
|
|
252
|
+
// Format into the standard success structure.
|
|
253
|
+
jsonSuccess := fmt.Sprintf(`{"state":"Success","message":%s}`, string(jsonData))
|
|
254
|
+
// Convert the Go string to a C string (allocates memory in C heap).
|
|
255
|
+
return C.CString(jsonSuccess)
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// escapeStringForJSON performs basic escaping of characters (like double quotes and backslashes)
|
|
259
|
+
// within a string to ensure it's safe to embed within a JSON string value.
|
|
260
|
+
// It uses Go's standard JSON encoder for robust escaping.
|
|
261
|
+
func escapeStringForJSON(
|
|
262
|
+
s string,
|
|
263
|
+
) string {
|
|
264
|
+
|
|
265
|
+
var buf bytes.Buffer
|
|
266
|
+
// Encode the string using Go's JSON encoder, which handles escaping.
|
|
267
|
+
json.NewEncoder(&buf).Encode(s)
|
|
268
|
+
// The encoder adds surrounding quotes and a trailing newline, which we remove.
|
|
269
|
+
res := buf.String()
|
|
270
|
+
// Check bounds before slicing to avoid panic.
|
|
271
|
+
if len(res) > 2 && res[0] == '"' && res[len(res)-2] == '"' {
|
|
272
|
+
return res[1 : len(res)-2] // Trim surrounding quotes and newline
|
|
273
|
+
}
|
|
274
|
+
// Fallback if encoding behaves unexpectedly (e.g., empty string).
|
|
275
|
+
return s
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// getInstance is a new helper to safely retrieve a node instance.
|
|
279
|
+
// It handles bounds checking, nil checks, and locking.
|
|
280
|
+
func getInstance(instanceIndex int) (*NodeInstance, error) {
|
|
281
|
+
if instanceIndex < 0 || instanceIndex >= maxInstances {
|
|
282
|
+
return nil, fmt.Errorf("invalid instance index: %d. Must be between 0 and %d", instanceIndex, maxInstances-1)
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// Use a Read Lock, which is fast and allows concurrent reads.
|
|
286
|
+
globalInstanceMutex.RLock()
|
|
287
|
+
instance := allInstances[instanceIndex]
|
|
288
|
+
globalInstanceMutex.RUnlock()
|
|
289
|
+
|
|
290
|
+
if instance == nil {
|
|
291
|
+
return nil, fmt.Errorf("instance %d is not initialized or has been closed", instanceIndex)
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// Check host as a proxy for *full* initialization,
|
|
295
|
+
// as it's set late in CreateNode
|
|
296
|
+
if instance.host == nil {
|
|
297
|
+
return nil, fmt.Errorf("instance %d is not fully initialized (host is nil)", instanceIndex)
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
return instance, nil
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// newMessageStore initializes a new MessageStore.
|
|
304
|
+
func newMessageStore() *MessageStore {
|
|
305
|
+
return &MessageStore{
|
|
306
|
+
messagesByChannel: make(map[string]*list.List),
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
func loadOrCreateIdentity(keyPath string) (crypto.PrivKey, error) {
|
|
311
|
+
// Check if key file already exists.
|
|
312
|
+
if _, err := os.Stat(keyPath); err == nil {
|
|
313
|
+
// Key file exists, read and unmarshal it.
|
|
314
|
+
bytes, err := os.ReadFile(keyPath)
|
|
315
|
+
if err != nil {
|
|
316
|
+
return nil, fmt.Errorf("failed to read existing key file: %w", err)
|
|
317
|
+
}
|
|
318
|
+
// load the key
|
|
319
|
+
privKey, err := crypto.UnmarshalPrivateKey(bytes)
|
|
320
|
+
if err != nil {
|
|
321
|
+
return nil, fmt.Errorf("failed to unmarshal corrupt private key: %w", err)
|
|
322
|
+
}
|
|
323
|
+
return privKey, nil
|
|
324
|
+
|
|
325
|
+
} else if os.IsNotExist(err) {
|
|
326
|
+
// Key file does not exist, generate a new one.
|
|
327
|
+
logger.Infof("[GO] đ Generating new persistent peer identity in %s\n", keyPath)
|
|
328
|
+
privKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
|
329
|
+
if err != nil {
|
|
330
|
+
return nil, fmt.Errorf("failed to generate new key: %w", err)
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Marshal the new key to bytes.
|
|
334
|
+
bytes, err := crypto.MarshalPrivateKey(privKey)
|
|
335
|
+
if err != nil {
|
|
336
|
+
return nil, fmt.Errorf("failed to marshal new private key: %w", err)
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// Write the new key to a file.
|
|
340
|
+
if err := os.WriteFile(keyPath, bytes, 0400); err != nil {
|
|
341
|
+
return nil, fmt.Errorf("failed to write new key file: %w", err)
|
|
342
|
+
}
|
|
343
|
+
return privKey, nil
|
|
344
|
+
|
|
345
|
+
} else {
|
|
346
|
+
// Another error occurred (e.g., permissions).
|
|
347
|
+
return nil, fmt.Errorf("failed to stat key file: %w", err)
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
func getListenAddrs(ips []string, tcpPort int, tlsMode string) ([]ma.Multiaddr, error) {
|
|
352
|
+
if len(ips) == 0 {
|
|
353
|
+
ips = []string{"0.0.0.0"}
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
var listenAddrs []ma.Multiaddr
|
|
357
|
+
quicPort := 0
|
|
358
|
+
webtransPort := 0
|
|
359
|
+
webrtcPort := 0
|
|
360
|
+
if tcpPort != 0 {
|
|
361
|
+
quicPort = tcpPort + 1
|
|
362
|
+
webtransPort = tcpPort + 2
|
|
363
|
+
webrtcPort = tcpPort +3
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// --- Create Multiaddrs for both protocols from the single IP list ---
|
|
367
|
+
for _, ip := range ips {
|
|
368
|
+
// TCP
|
|
369
|
+
tcpMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, tcpPort))
|
|
370
|
+
// QUIC
|
|
371
|
+
quicMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/udp/%d/quic-v1", ip, quicPort))
|
|
372
|
+
// WebTransport
|
|
373
|
+
webtransMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/udp/%d/quic-v1/webtransport", ip, webtransPort))
|
|
374
|
+
// WebRTC Direct
|
|
375
|
+
webrtcMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/udp/%d/webrtc-direct", ip, webrtcPort))
|
|
376
|
+
|
|
377
|
+
listenAddrs = append(listenAddrs, tcpMaddr, quicMaddr, webtransMaddr, webrtcMaddr)
|
|
378
|
+
|
|
379
|
+
switch tlsMode {
|
|
380
|
+
case "autotls":
|
|
381
|
+
// This is the special multiaddr that triggers AutoTLS
|
|
382
|
+
wssMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d/tls/sni/*.%s/ws", ip, tcpPort, p2pforge.DefaultForgeDomain))
|
|
383
|
+
listenAddrs = append(listenAddrs, wssMaddr)
|
|
384
|
+
case "domain":
|
|
385
|
+
// This is the standard secure WebSocket address with provided domain
|
|
386
|
+
wssMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d/tls/ws", ip, tcpPort))
|
|
387
|
+
listenAddrs = append(listenAddrs, wssMaddr)
|
|
388
|
+
default:
|
|
389
|
+
// Fallback to a standard, non-secure WebSocket address
|
|
390
|
+
wsMaddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d/ws", ip, tcpPort))
|
|
391
|
+
listenAddrs = append(listenAddrs, wsMaddr)
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
logger.Debugf("[GO] đ§ Prepared Listen Addresses: %v\n", listenAddrs)
|
|
396
|
+
|
|
397
|
+
return listenAddrs, nil
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
func setupPubSub(ni *NodeInstance) error {
|
|
401
|
+
psOptions := []pubsub.Option{
|
|
402
|
+
// pubsub.WithFloodPublish(true),
|
|
403
|
+
pubsub.WithMaxMessageSize(int(MaxMessageSize)),
|
|
404
|
+
}
|
|
405
|
+
ps, err := pubsub.NewGossipSub(ni.ctx, ni.host, psOptions...)
|
|
406
|
+
if err != nil {
|
|
407
|
+
return err
|
|
408
|
+
}
|
|
409
|
+
ni.pubsub = ps // Set the pubsub field on the instance
|
|
410
|
+
return nil
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
func setupNotifiers(ni *NodeInstance) {
|
|
414
|
+
ni.host.Network().Notify(&network.NotifyBundle{
|
|
415
|
+
ConnectedF: func(_ network.Network, conn network.Conn) {
|
|
416
|
+
remotePeerID := conn.RemotePeer()
|
|
417
|
+
logger.Debugf("[GO] đ Instance %d: Event - Connected to %s (Direction: %s)\n", ni.instanceIndex, remotePeerID, conn.Stat().Direction)
|
|
418
|
+
// --- Abort Graceful Disconnect if active ---
|
|
419
|
+
ni.disconnectionMutex.Lock()
|
|
420
|
+
if cancelTimer, exists := ni.disconnectionTimers[remotePeerID]; exists {
|
|
421
|
+
cancelTimer() // Stop the cleanup timer
|
|
422
|
+
delete(ni.disconnectionTimers, remotePeerID)
|
|
423
|
+
logger.Debugf("[GO] âģī¸ Instance %d: Peer %s reconnected within grace period. Cleanup aborted.\n", ni.instanceIndex, remotePeerID)
|
|
424
|
+
}
|
|
425
|
+
ni.disconnectionMutex.Unlock()
|
|
426
|
+
},
|
|
427
|
+
DisconnectedF: func(_ network.Network, conn network.Conn) {
|
|
428
|
+
remotePeerID := conn.RemotePeer()
|
|
429
|
+
logger.Debugf("[GO] đ Instance %d: Event - Disconnected from %s\n", ni.instanceIndex, remotePeerID)
|
|
430
|
+
|
|
431
|
+
// Get the host for this instance to query its network state.
|
|
432
|
+
if ni.host == nil {
|
|
433
|
+
// This shouldn't happen if the notifier is active, but a safe check.
|
|
434
|
+
logger.Warnf("[GO] â ī¸ Instance %d: DisconnectedF: Host is nil, cannot perform connection check.\n", ni.instanceIndex)
|
|
435
|
+
return
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
// Check if this is the LAST connection to this peer
|
|
439
|
+
if len(ni.host.Network().ConnsToPeer(remotePeerID)) == 0 {
|
|
440
|
+
// If it's a friendlyPeer, wait for the grace period, otherwise close immediately
|
|
441
|
+
ni.peersMutex.RLock()
|
|
442
|
+
_, isFriendly := ni.friendlyPeers[remotePeerID]
|
|
443
|
+
ni.peersMutex.RUnlock()
|
|
444
|
+
|
|
445
|
+
if isFriendly {
|
|
446
|
+
logger.Debugf("[GO] âŗ Instance %d: Last connection to %s closed. Starting %v grace period timer...\n", ni.instanceIndex, remotePeerID, DisconnectionGracePeriod)
|
|
447
|
+
|
|
448
|
+
// We create a context that we can cancel if they reconnect
|
|
449
|
+
ctx, cancelTimer := context.WithCancel(context.Background())
|
|
450
|
+
|
|
451
|
+
ni.disconnectionMutex.Lock()
|
|
452
|
+
// If a timer already exists (rare race condition), cancel the old one first
|
|
453
|
+
if oldCancel, exists := ni.disconnectionTimers[remotePeerID]; exists {
|
|
454
|
+
oldCancel()
|
|
455
|
+
}
|
|
456
|
+
ni.disconnectionTimers[remotePeerID] = cancelTimer
|
|
457
|
+
ni.disconnectionMutex.Unlock()
|
|
458
|
+
|
|
459
|
+
// Run cleanup in a goroutine
|
|
460
|
+
go func() {
|
|
461
|
+
select {
|
|
462
|
+
case <-time.After(DisconnectionGracePeriod):
|
|
463
|
+
// Timer expired! Proceed to cleanup.
|
|
464
|
+
case <-ctx.Done():
|
|
465
|
+
// Context cancelled (user reconnected). Stop here.
|
|
466
|
+
return
|
|
467
|
+
case <-ni.ctx.Done():
|
|
468
|
+
// Node is shutting down. Stop here.
|
|
469
|
+
return
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
// --- Timer Expired: Execute Cleanup ---
|
|
473
|
+
// Remove from timer map
|
|
474
|
+
ni.disconnectionMutex.Lock()
|
|
475
|
+
// Double-check: did we get cancelled while waiting for lock?
|
|
476
|
+
if ctx.Err() != nil {
|
|
477
|
+
ni.disconnectionMutex.Unlock()
|
|
478
|
+
return
|
|
479
|
+
}
|
|
480
|
+
delete(ni.disconnectionTimers, remotePeerID)
|
|
481
|
+
ni.disconnectionMutex.Unlock()
|
|
482
|
+
|
|
483
|
+
// Final Safety Check: Are they actually connected now?
|
|
484
|
+
// (Handles race where they reconnect exactly when timer fires)
|
|
485
|
+
if len(ni.host.Network().ConnsToPeer(remotePeerID)) > 0 {
|
|
486
|
+
logger.Debugf("[GO] â ī¸ Instance %d: Grace period expired for %s, but peer is connected again. Skipping cleanup.\n", ni.instanceIndex, remotePeerID)
|
|
487
|
+
return
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
logger.Debugf("[GO] đī¸ Instance %d: Grace period ended for %s. Removing peer data.\n", ni.instanceIndex, remotePeerID)
|
|
491
|
+
|
|
492
|
+
// 3. Clean up friendlyPeers
|
|
493
|
+
ni.peersMutex.Lock()
|
|
494
|
+
if _, exists := ni.friendlyPeers[remotePeerID]; exists {
|
|
495
|
+
delete(ni.friendlyPeers, remotePeerID)
|
|
496
|
+
logger.Debugf("[GO] Instance %d: Removed %s from friendlyPeers.\n", ni.instanceIndex, remotePeerID)
|
|
497
|
+
}
|
|
498
|
+
ni.peersMutex.Unlock()
|
|
499
|
+
|
|
500
|
+
// 4. Clean up persistent streams
|
|
501
|
+
ni.streamsMutex.Lock()
|
|
502
|
+
if stream, ok := ni.persistentChatStreams[remotePeerID]; ok {
|
|
503
|
+
logger.Debugf("[GO] Instance %d: Cleaning up persistent stream for %s.\n", ni.instanceIndex, remotePeerID)
|
|
504
|
+
_ = stream.Close()
|
|
505
|
+
delete(ni.persistentChatStreams, remotePeerID)
|
|
506
|
+
}
|
|
507
|
+
ni.streamsMutex.Unlock()
|
|
508
|
+
|
|
509
|
+
}()
|
|
510
|
+
} else {
|
|
511
|
+
logger.Debugf("[GO] Instance %d: Last connection to %s closed. Removing from tracked peers.\n", ni.instanceIndex, remotePeerID)
|
|
512
|
+
|
|
513
|
+
// Also clean up persistent stream if one existed for this peer
|
|
514
|
+
ni.streamsMutex.Lock()
|
|
515
|
+
if stream, ok := ni.persistentChatStreams[remotePeerID]; ok {
|
|
516
|
+
logger.Debugf("[GO] Instance %d: Cleaning up persistent stream for disconnected peer %s via DisconnectedF notifier.\n", ni.instanceIndex, remotePeerID)
|
|
517
|
+
_ = stream.Close() // Attempt graceful close
|
|
518
|
+
delete(ni.persistentChatStreams, remotePeerID)
|
|
519
|
+
}
|
|
520
|
+
ni.streamsMutex.Unlock()
|
|
521
|
+
}
|
|
522
|
+
} else {
|
|
523
|
+
logger.Debugf("[GO] Instance %d: DisconnectedF: Still have %d active connections to %s, not removing.\n", ni.instanceIndex, len(ni.host.Network().ConnsToPeer(remotePeerID)), remotePeerID)
|
|
524
|
+
}
|
|
525
|
+
},
|
|
526
|
+
})
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
// enforceProtocolCompliance ensures that any connected peer supports the required chat protocol.
|
|
530
|
+
// If a peer finishes identification but lacks the protocol, they are immediately disconnected.
|
|
531
|
+
func enforceProtocolCompliance(ni *NodeInstance) {
|
|
532
|
+
// 1. Subscribe to the identification completed event
|
|
533
|
+
sub, err := ni.host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted))
|
|
534
|
+
if err != nil {
|
|
535
|
+
logger.Errorf("[GO] â Instance %d: Failed to subscribe to identification events: %v", ni.instanceIndex, err)
|
|
536
|
+
return
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
logger.Infof("[GO] đĄī¸ Instance %d: Strict Isolation ENABLED. Monitoring for non-compliant peers.", ni.instanceIndex)
|
|
540
|
+
|
|
541
|
+
go func() {
|
|
542
|
+
defer sub.Close()
|
|
543
|
+
for {
|
|
544
|
+
select {
|
|
545
|
+
case <-ni.ctx.Done():
|
|
546
|
+
return
|
|
547
|
+
case evt, ok := <-sub.Out():
|
|
548
|
+
if !ok {
|
|
549
|
+
return
|
|
550
|
+
}
|
|
551
|
+
idEvt := evt.(event.EvtPeerIdentificationCompleted)
|
|
552
|
+
|
|
553
|
+
// Skip check for self
|
|
554
|
+
if idEvt.Peer == ni.host.ID() {
|
|
555
|
+
continue
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
isCompliant := false
|
|
559
|
+
for _, proto := range idEvt.Protocols {
|
|
560
|
+
if string(proto) == UnaiverseChatProtocol {
|
|
561
|
+
isCompliant = true
|
|
562
|
+
break
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
// 4. Action: Disconnect if not compliant
|
|
567
|
+
if !isCompliant {
|
|
568
|
+
logger.Warnf("[GO] đĢ Instance %d: Kicking peer %s. (Reason: Protocol Mismatch).", ni.instanceIndex, idEvt.Peer)
|
|
569
|
+
// Disconnect
|
|
570
|
+
ni.host.Network().ClosePeer(idEvt.Peer)
|
|
571
|
+
// Optional: Clean from peerstore to free memory immediately
|
|
572
|
+
ni.host.Peerstore().RemovePeer(idEvt.Peer)
|
|
573
|
+
} else {
|
|
574
|
+
logger.Debugf("[GO] â
Instance %d: Peer %s verified compliant.", ni.instanceIndex, idEvt.Peer)
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
}()
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
// handleAddressUpdateEvents listens for libp2p address changes and updates the local cache.
|
|
582
|
+
func handleAddressUpdateEvents(ni *NodeInstance, sub event.Subscription) {
|
|
583
|
+
defer sub.Close()
|
|
584
|
+
|
|
585
|
+
// Initialize cache immediately with current state to avoid race conditions at startup
|
|
586
|
+
ni.addrMutex.Lock()
|
|
587
|
+
ni.localAddrs = ni.host.Addrs()
|
|
588
|
+
ni.addrMutex.Unlock()
|
|
589
|
+
|
|
590
|
+
for {
|
|
591
|
+
select {
|
|
592
|
+
case <-ni.ctx.Done():
|
|
593
|
+
return
|
|
594
|
+
case _, ok := <-sub.Out():
|
|
595
|
+
if !ok {
|
|
596
|
+
return
|
|
597
|
+
}
|
|
598
|
+
// We only use the event as a trigger but we take the addresses from the Host
|
|
599
|
+
allAddresses := ni.host.Addrs()
|
|
600
|
+
ni.addrMutex.Lock()
|
|
601
|
+
ni.localAddrs = allAddresses
|
|
602
|
+
ni.addrMutex.Unlock()
|
|
603
|
+
|
|
604
|
+
// Log addresses to verify
|
|
605
|
+
addrsStr := make([]string, len(allAddresses))
|
|
606
|
+
for i, a := range allAddresses {
|
|
607
|
+
addrsStr[i] = a.String()
|
|
608
|
+
}
|
|
609
|
+
logger.Infof("[GO] đ Instance %d: Updated local addresses (updating cache). Addrs: %v", ni.instanceIndex, addrsStr)
|
|
610
|
+
}
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
// Helper to filter peers for PeerSource
|
|
615
|
+
func (ni *NodeInstance) isSuitableForPeerSource(pid peer.ID) bool {
|
|
616
|
+
ps := ni.host.Peerstore()
|
|
617
|
+
|
|
618
|
+
// 1. Check for Relay Hop Protocol (NEEDED)
|
|
619
|
+
protocols, err := ps.GetProtocols(pid)
|
|
620
|
+
if err != nil {
|
|
621
|
+
return false
|
|
622
|
+
}
|
|
623
|
+
isRelay := false
|
|
624
|
+
for _, proto := range protocols {
|
|
625
|
+
if proto == "/libp2p/circuit/relay/0.2.0/hop" {
|
|
626
|
+
isRelay = true
|
|
627
|
+
break
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
if !isRelay {
|
|
631
|
+
return false
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
// Only accept wss-enabled nodes as relay
|
|
635
|
+
addrs := ps.Addrs(pid)
|
|
636
|
+
isSuitable := false
|
|
637
|
+
for _, addr := range addrs {
|
|
638
|
+
_, err = addr.ValueForProtocol(ma.P_WS)
|
|
639
|
+
if err == nil {
|
|
640
|
+
_, err := addr.ValueForProtocol(ma.P_TLS)
|
|
641
|
+
if err == nil {
|
|
642
|
+
isSuitable = true
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
return isSuitable
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
// PeerSource acts as the peer discovery backend for AutoRelay.
|
|
651
|
+
// It combines a local cache lookup (fast/free) with a DHT random walk (slow/expensive).
|
|
652
|
+
func (ni *NodeInstance) PeerSource(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
|
|
653
|
+
out := make(chan peer.AddrInfo)
|
|
654
|
+
|
|
655
|
+
go func() {
|
|
656
|
+
defer close(out)
|
|
657
|
+
|
|
658
|
+
// Safety checks: Ensure host and DHT are fully initialized
|
|
659
|
+
if ni.host == nil || ni.dht == nil {
|
|
660
|
+
return
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
// Keep track of peers we've already sent in this batch
|
|
664
|
+
sentPeers := make(map[peer.ID]struct{})
|
|
665
|
+
peersFound := 0
|
|
666
|
+
|
|
667
|
+
// --- PHASE 1: Scavenge Local Peerstore ---
|
|
668
|
+
localPeers := ni.host.Peerstore().Peers()
|
|
669
|
+
for _, pid := range localPeers {
|
|
670
|
+
if peersFound >= numPeers {
|
|
671
|
+
return
|
|
672
|
+
}
|
|
673
|
+
if pid == ni.host.ID() {
|
|
674
|
+
continue
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
// Add it if it meets our criteria
|
|
678
|
+
if ni.isSuitableForPeerSource(pid) {
|
|
679
|
+
info := ni.host.Peerstore().PeerInfo(pid)
|
|
680
|
+
if len(info.Addrs) == 0 {
|
|
681
|
+
continue
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
select {
|
|
685
|
+
case out <- info:
|
|
686
|
+
sentPeers[pid] = struct{}{}
|
|
687
|
+
peersFound++
|
|
688
|
+
case <-ctx.Done():
|
|
689
|
+
return
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
// --- PHASE 2: DHT Random Walk ---
|
|
695
|
+
if peersFound < numPeers {
|
|
696
|
+
logger.Debugf("[GO] â ī¸ Instance %d: Local peerstore insufficient (%d/%d). Starting DHT walk...",
|
|
697
|
+
ni.instanceIndex, peersFound, numPeers)
|
|
698
|
+
|
|
699
|
+
for peersFound < numPeers {
|
|
700
|
+
randomKey := make([]byte, 32)
|
|
701
|
+
rand.Read(randomKey)
|
|
702
|
+
randomKeyStr := string(randomKey)
|
|
703
|
+
|
|
704
|
+
candidatePIDs, err := ni.dht.GetClosestPeers(ctx, randomKeyStr)
|
|
705
|
+
if err != nil {
|
|
706
|
+
select {
|
|
707
|
+
case <-ctx.Done():
|
|
708
|
+
return
|
|
709
|
+
case <-time.After(2 * time.Second):
|
|
710
|
+
continue
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
for _, pid := range candidatePIDs {
|
|
715
|
+
if peersFound >= numPeers {
|
|
716
|
+
return
|
|
717
|
+
}
|
|
718
|
+
if pid == ni.host.ID() {
|
|
719
|
+
continue
|
|
720
|
+
}
|
|
721
|
+
if _, alreadySent := sentPeers[pid]; alreadySent {
|
|
722
|
+
continue
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
info := ni.host.Peerstore().PeerInfo(pid)
|
|
726
|
+
if len(info.Addrs) > 0 {
|
|
727
|
+
select {
|
|
728
|
+
case out <- info:
|
|
729
|
+
sentPeers[pid] = struct{}{}
|
|
730
|
+
peersFound++
|
|
731
|
+
case <-ctx.Done():
|
|
732
|
+
return
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
}()
|
|
739
|
+
|
|
740
|
+
return out
|
|
741
|
+
}
|
|
742
|
+
|
|
743
|
+
// --- Core Logic Functions ---
|
|
744
|
+
|
|
745
|
+
// storeReceivedMessage processes a raw message received either from a direct stream
|
|
746
|
+
// or a PubSub topic. The sender peerID and the channel to store are retrieved in handleStream and readFromSubscription
|
|
747
|
+
func storeReceivedMessage(
|
|
748
|
+
ni *NodeInstance,
|
|
749
|
+
from peer.ID,
|
|
750
|
+
channel string,
|
|
751
|
+
data []byte,
|
|
752
|
+
) {
|
|
753
|
+
// Get the message store for this instance
|
|
754
|
+
store := ni.messageStore
|
|
755
|
+
if store == nil {
|
|
756
|
+
logger.Errorf("[GO] â storeReceivedMessage: Message store not initialized for instance %d\n", ni.instanceIndex)
|
|
757
|
+
return // Cannot process message if store is nil
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
// Create the minimal message envelope.
|
|
761
|
+
newMessage := &QueuedMessage{
|
|
762
|
+
From: from,
|
|
763
|
+
Data: data,
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
// Lock the store mutex before accessing the shared maps.
|
|
767
|
+
store.mu.Lock()
|
|
768
|
+
defer store.mu.Unlock()
|
|
769
|
+
|
|
770
|
+
// Check if this channel already has a message list.
|
|
771
|
+
messageList, channelExists := store.messagesByChannel[channel]
|
|
772
|
+
if !channelExists {
|
|
773
|
+
// If the channel does not exist, check if we can create a new message queue.
|
|
774
|
+
if len(store.messagesByChannel) >= maxUniqueChannels {
|
|
775
|
+
logger.Warnf("[GO] đī¸ Instance %d: Message store full. Discarding message for new channel '%s'.\n", ni.instanceIndex, channel)
|
|
776
|
+
return
|
|
777
|
+
}
|
|
778
|
+
messageList = list.New()
|
|
779
|
+
store.messagesByChannel[channel] = messageList
|
|
780
|
+
logger.Debugf("[GO] ⨠Instance %d: Created new channel queue '%s'. Total channels: %d\n", ni.instanceIndex, channel, len(store.messagesByChannel))
|
|
781
|
+
}
|
|
782
|
+
|
|
783
|
+
// If the channel already has a message list, check its length.
|
|
784
|
+
if messageList.Len() >= maxChannelQueueLen {
|
|
785
|
+
logger.Warnf("[GO] đī¸ Instance %d: Queue for channel '%s' full. Discarding message.\n", ni.instanceIndex, channel)
|
|
786
|
+
return
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
messageList.PushBack(newMessage)
|
|
790
|
+
logger.Debugf("[GO] đĨ Instance %d: Queued message on channel '%s' from %s. New queue length: %d\n", ni.instanceIndex, channel, from, messageList.Len())
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
// readFromSubscription runs as a dedicated goroutine for each active PubSub subscription for a specific instance.
|
|
794
|
+
// It continuously waits for new messages on the subscription's channel (`sub.Next(ctx)`),
|
|
795
|
+
// routes them to `storeReceivedMessage`, and handles errors and context cancellation gracefully.
|
|
796
|
+
// You need to provide the full Channel to uniquely identify the subscription.
|
|
797
|
+
func readFromSubscription(
|
|
798
|
+
ni *NodeInstance,
|
|
799
|
+
sub *pubsub.Subscription,
|
|
800
|
+
) {
|
|
801
|
+
// Get the topic string directly from the subscription object.
|
|
802
|
+
topic := sub.Topic()
|
|
803
|
+
|
|
804
|
+
if ni.ctx == nil || ni.host == nil {
|
|
805
|
+
logger.Errorf("[GO] â readFromSubscription: Context or Host not initialized for instance %d. Exiting goroutine.\n", ni.instanceIndex)
|
|
806
|
+
return
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
logger.Infof("[GO] đ Instance %d: Started listener goroutine for topic: %s\n", ni.instanceIndex, topic)
|
|
810
|
+
defer logger.Infof("[GO] đ Instance %d: Exiting listener goroutine for topic: %s\n", ni.instanceIndex, topic) // Log when goroutine exits
|
|
811
|
+
|
|
812
|
+
for {
|
|
813
|
+
// Check if the main context has been cancelled (e.g., during node shutdown).
|
|
814
|
+
if ni.ctx.Err() != nil {
|
|
815
|
+
logger.Debugf("[GO] đ Instance %d: Context cancelled, stopping listener goroutine for topic: %s\n", ni.instanceIndex, topic)
|
|
816
|
+
return // Exit the goroutine.
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
// Wait for the next message from the subscription. This blocks until a message
|
|
820
|
+
// arrives, the context is cancelled, or an error occurs.
|
|
821
|
+
msg, err := sub.Next(ni.ctx)
|
|
822
|
+
if err != nil {
|
|
823
|
+
// Check for expected errors during shutdown or cancellation.
|
|
824
|
+
if err == context.Canceled || err == context.DeadlineExceeded || err == pubsub.ErrSubscriptionCancelled || ni.ctx.Err() != nil {
|
|
825
|
+
logger.Debugf("[GO] đ Instance %d: Subscription listener for topic '%s' stopping gracefully: %v\n", ni.instanceIndex, topic, err)
|
|
826
|
+
return // Exit goroutine cleanly.
|
|
827
|
+
}
|
|
828
|
+
// Handle EOF, which can sometimes occur. Treat it as a reason to stop.
|
|
829
|
+
if err == io.EOF {
|
|
830
|
+
logger.Debugf("[GO] đ Instance %d: Subscription listener for topic '%s' encountered EOF, stopping: %v\n", ni.instanceIndex, topic, err)
|
|
831
|
+
return // Exit goroutine.
|
|
832
|
+
}
|
|
833
|
+
// Log other errors but attempt to continue (they might be transient).
|
|
834
|
+
logger.Errorf("[GO] â Instance %d: Error reading from subscription '%s': %v. Continuing...\n", ni.instanceIndex, topic, err)
|
|
835
|
+
// Pause briefly to avoid busy-looping on persistent errors.
|
|
836
|
+
time.Sleep(1 * time.Second)
|
|
837
|
+
continue // Continue the loop to try reading again.
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
logger.Infof("[GO] đŦ Instance %d (id: %s): Received new PubSub message on topic '%s' from %s\n", ni.instanceIndex, ni.host.ID().String(), topic, msg.GetFrom())
|
|
841
|
+
|
|
842
|
+
// Ignore messages published by the local node itself.
|
|
843
|
+
if msg.GetFrom() == ni.host.ID() {
|
|
844
|
+
continue // Skip processing self-sent messages.
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
// Handle Rendezvous or Standard Messages
|
|
848
|
+
if strings.HasSuffix(topic, ":rv") {
|
|
849
|
+
// This is a rendezvous update.
|
|
850
|
+
// 1. First, unmarshal the outer Protobuf message.
|
|
851
|
+
var protoMsg pg.Message
|
|
852
|
+
if err := proto.Unmarshal(msg.Data, &protoMsg); err != nil {
|
|
853
|
+
logger.Warnf("â ī¸ Instance %d: Could not decode Protobuf message on topic '%s': %v\n", ni.instanceIndex, topic, err)
|
|
854
|
+
continue
|
|
855
|
+
}
|
|
856
|
+
|
|
857
|
+
// 2. The actual payload is a JSON string within the 'json_content' field.
|
|
858
|
+
jsonPayload := protoMsg.GetJsonContent()
|
|
859
|
+
if jsonPayload == "" {
|
|
860
|
+
logger.Warnf("â ī¸ Instance %d: Rendezvous message on topic '%s' has empty JSON content.\n", ni.instanceIndex, topic)
|
|
861
|
+
continue
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
// 3. Now, unmarshal the inner JSON payload.
|
|
865
|
+
var updatePayload struct {
|
|
866
|
+
Peers []ExtendedPeerInfo `json:"peers"`
|
|
867
|
+
UpdateCount int64 `json:"update_count"`
|
|
868
|
+
}
|
|
869
|
+
if err := json.Unmarshal([]byte(jsonPayload), &updatePayload); err != nil {
|
|
870
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Could not decode rendezvous update payload on topic '%s': %v\n", ni.instanceIndex, topic, err)
|
|
871
|
+
continue // Skip this malformed message.
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
// 4. Create a new map from the decoded peer list.
|
|
875
|
+
newPeerMap := make(map[peer.ID]ExtendedPeerInfo)
|
|
876
|
+
for _, peerInfo := range updatePayload.Peers {
|
|
877
|
+
newPeerMap[peerInfo.ID] = peerInfo
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
// 5. Safely replace the old map with the new one.
|
|
881
|
+
ni.rendezvousMutex.Lock()
|
|
882
|
+
// If this is the first update for this instance, initialize the state struct.
|
|
883
|
+
if ni.rendezvousState == nil {
|
|
884
|
+
ni.rendezvousState = &RendezvousState{}
|
|
885
|
+
}
|
|
886
|
+
rendezvousState := ni.rendezvousState
|
|
887
|
+
rendezvousState.Peers = newPeerMap
|
|
888
|
+
rendezvousState.UpdateCount = updatePayload.UpdateCount
|
|
889
|
+
ni.rendezvousMutex.Unlock()
|
|
890
|
+
|
|
891
|
+
logger.Debugf("[GO] â
Instance %d: Updated rendezvous peers from topic '%s'. Found %d peers. Update count: %d.\n", ni.instanceIndex, topic, len(newPeerMap), updatePayload.UpdateCount)
|
|
892
|
+
} else {
|
|
893
|
+
// This is a standard message. Queue it as before.
|
|
894
|
+
logger.Debugf("[GO] đ Instance %d: Storing new pubsub message from topic '%s'.\n", ni.instanceIndex, topic)
|
|
895
|
+
storeReceivedMessage(ni, msg.GetFrom(), topic, msg.Data)
|
|
896
|
+
}
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
// handleStream reads from a direct message stream using the new framing protocol.
|
|
901
|
+
// It expects the stream to start with a 4-byte length prefix, followed by a 1-byte channel name length,
|
|
902
|
+
// the channel name itself, and finally the Protobuf-encoded payload.
|
|
903
|
+
func handleStream(ni *NodeInstance, s network.Stream) {
|
|
904
|
+
senderPeerID := s.Conn().RemotePeer()
|
|
905
|
+
streamID := s.ID()
|
|
906
|
+
ni.peersMutex.Lock()
|
|
907
|
+
existingPeer, peerExists := ni.friendlyPeers[senderPeerID]
|
|
908
|
+
|
|
909
|
+
// 1. Gather fresh info (Addresses & Direction)
|
|
910
|
+
direction := "incoming"
|
|
911
|
+
if s.Stat().Direction == network.DirOutbound {
|
|
912
|
+
direction = "outgoing"
|
|
913
|
+
}
|
|
914
|
+
knownAddrs := ni.host.Peerstore().Addrs(senderPeerID)
|
|
915
|
+
if len(knownAddrs) == 0 {
|
|
916
|
+
knownAddrs = []ma.Multiaddr{s.Conn().RemoteMultiaddr()}
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
if !peerExists {
|
|
920
|
+
// CASE A: New Application Peer
|
|
921
|
+
ni.friendlyPeers[senderPeerID] = ExtendedPeerInfo{
|
|
922
|
+
ID: senderPeerID,
|
|
923
|
+
Addrs: knownAddrs,
|
|
924
|
+
ConnectedAt: time.Now(),
|
|
925
|
+
Direction: direction,
|
|
926
|
+
Relayed: false,
|
|
927
|
+
}
|
|
928
|
+
logger.Infof("[GO] â Instance %d: Peer %s promoted to App Peer via Stream %s (Incoming).", ni.instanceIndex, senderPeerID, streamID)
|
|
929
|
+
} else {
|
|
930
|
+
// CASE B: Existing Peer - Update Addresses
|
|
931
|
+
// We keep ConnectedAt and Direction from the original session start.
|
|
932
|
+
existingPeer.Addrs = knownAddrs
|
|
933
|
+
ni.friendlyPeers[senderPeerID] = existingPeer
|
|
934
|
+
logger.Debugf("[GO] đ Instance %d: Refreshed addresses for Peer %s via Stream %s.", ni.instanceIndex, senderPeerID, streamID)
|
|
935
|
+
}
|
|
936
|
+
ni.peersMutex.Unlock()
|
|
937
|
+
logger.Debugf("[GO] đĨ Instance %d: Accepted INCOMING stream %s from %s. Storing for duplex use.\n", ni.instanceIndex, streamID, senderPeerID)
|
|
938
|
+
|
|
939
|
+
// Store the newly accepted stream so we can use it to send messages back to this peer.
|
|
940
|
+
ni.streamsMutex.Lock()
|
|
941
|
+
ni.persistentChatStreams[senderPeerID] = s
|
|
942
|
+
ni.streamsMutex.Unlock()
|
|
943
|
+
|
|
944
|
+
// This defer block ensures cleanup happens when the stream is closed by either side.
|
|
945
|
+
defer func() {
|
|
946
|
+
logger.Debugf("[GO] đ§š Instance %d: Stream %s with %s closed. Removing from map.\n", ni.instanceIndex, streamID, senderPeerID)
|
|
947
|
+
ni.streamsMutex.Lock()
|
|
948
|
+
if current, ok := ni.persistentChatStreams[senderPeerID]; ok && current == s {
|
|
949
|
+
delete(ni.persistentChatStreams, senderPeerID)
|
|
950
|
+
}
|
|
951
|
+
ni.streamsMutex.Unlock()
|
|
952
|
+
s.Close() // Ensure the stream is fully closed.
|
|
953
|
+
}()
|
|
954
|
+
|
|
955
|
+
for {
|
|
956
|
+
// Read 4-byte total length
|
|
957
|
+
var totalLen uint32
|
|
958
|
+
if err := binary.Read(s, binary.BigEndian, &totalLen); err != nil {
|
|
959
|
+
if err == io.EOF {
|
|
960
|
+
logger.Debugf("[GO] đ Instance %d: Stream %s with %s closed (EOF).\n", ni.instanceIndex, streamID, senderPeerID)
|
|
961
|
+
} else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
962
|
+
logger.Warnf("[GO] âŗ Instance %d: Timeout reading length from Stream %s (%s): %v\n", ni.instanceIndex, streamID, senderPeerID, err)
|
|
963
|
+
} else if errors.Is(err, network.ErrReset) {
|
|
964
|
+
logger.Warnf("[GO] âī¸ Instance %d: Stream %s with %s reset.\n", ni.instanceIndex, streamID, senderPeerID)
|
|
965
|
+
} else {
|
|
966
|
+
logger.Errorf("[GO] â Instance %d: Error reading length from Stream %s (%s): %v\n", ni.instanceIndex, streamID, senderPeerID, err)
|
|
967
|
+
}
|
|
968
|
+
return
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
if totalLen > MaxMessageSize {
|
|
972
|
+
logger.Errorf("[GO] â Instance %d: Message len %d exceeds limit on Stream %s. Resetting.\n", ni.instanceIndex, totalLen, streamID)
|
|
973
|
+
s.Reset()
|
|
974
|
+
return
|
|
975
|
+
}
|
|
976
|
+
|
|
977
|
+
// Read Channel Length
|
|
978
|
+
var channelLen uint8
|
|
979
|
+
if err := binary.Read(s, binary.BigEndian, &channelLen); err != nil {
|
|
980
|
+
logger.Errorf("[GO] â Instance %d: Error reading channel len from Stream %s: %v\n", ni.instanceIndex, streamID, err)
|
|
981
|
+
return
|
|
982
|
+
}
|
|
983
|
+
|
|
984
|
+
// Read Channel Name
|
|
985
|
+
channelBytes := make([]byte, channelLen)
|
|
986
|
+
if _, err := io.ReadFull(s, channelBytes); err != nil {
|
|
987
|
+
logger.Errorf("[GO] â Instance %d: Error reading channel from Stream %s: %v\n", ni.instanceIndex, streamID, err)
|
|
988
|
+
return
|
|
989
|
+
}
|
|
990
|
+
channel := string(channelBytes)
|
|
991
|
+
|
|
992
|
+
// Read Payload
|
|
993
|
+
payloadLen := totalLen - uint32(channelLen) - 1
|
|
994
|
+
payload := make([]byte, payloadLen)
|
|
995
|
+
if _, err := io.ReadFull(s, payload); err != nil {
|
|
996
|
+
logger.Errorf("[GO] â Instance %d: Error reading payload from Stream %s: %v\n", ni.instanceIndex, streamID, err)
|
|
997
|
+
return
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
logger.Infof("[GO] đ¨ Instance %d: Received msg on channel '%s' via Stream %s from %s.\n", ni.instanceIndex, channel, streamID, senderPeerID)
|
|
1001
|
+
storeReceivedMessage(ni, senderPeerID, channel, payload)
|
|
1002
|
+
}
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
// setupDirectMessageHandler configures the libp2p host for a specific instance
|
|
1006
|
+
// to listen for incoming streams using the custom ChatProtocol.
|
|
1007
|
+
// When a peer opens a stream with this protocol ID, the provided handler function
|
|
1008
|
+
// is invoked to manage communication on that stream.
|
|
1009
|
+
func setupDirectMessageHandler(
|
|
1010
|
+
ni *NodeInstance,
|
|
1011
|
+
) {
|
|
1012
|
+
if ni.host == nil {
|
|
1013
|
+
logger.Errorf("[GO] â Instance %d: Cannot setup direct message handler: Host not initialized\n", ni.instanceIndex)
|
|
1014
|
+
return
|
|
1015
|
+
}
|
|
1016
|
+
|
|
1017
|
+
// Set a handler function for the UnaiverseChatProtocol. This function will be called
|
|
1018
|
+
// automatically by libp2p whenever a new incoming stream for this protocol is accepted.
|
|
1019
|
+
// Use a closure to capture the NodeInstance pointer.
|
|
1020
|
+
ni.host.SetStreamHandler(UnaiverseChatProtocol, func(s network.Stream) {
|
|
1021
|
+
handleStream(ni, s)
|
|
1022
|
+
})
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
// This function constructs and writes a message using our new framing protocol for direct messages.
|
|
1026
|
+
// It takes a writer (e.g., a network stream), the channel name, and the payload data.
|
|
1027
|
+
// The message format is:
|
|
1028
|
+
// - 4-byte total length (including all the following parts)
|
|
1029
|
+
// - 1-byte channel name length
|
|
1030
|
+
// - channel name (as a UTF-8 string)
|
|
1031
|
+
// - payload (Protobuf-encoded data).
|
|
1032
|
+
func writeDirectMessageFrame(w io.Writer, channel string, payload []byte) error {
|
|
1033
|
+
channelBytes := []byte(channel)
|
|
1034
|
+
channelLen := uint8(len(channelBytes))
|
|
1035
|
+
|
|
1036
|
+
// Check if channel name is too long for our 1-byte length prefix.
|
|
1037
|
+
if len(channelBytes) > 255 {
|
|
1038
|
+
return fmt.Errorf("channel name exceeds 255 bytes limit: %s", channel)
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
// Total length = 1 (for channel len) + len(channel) + len(payload)
|
|
1042
|
+
totalLength := uint32(1 + len(channelBytes) + len(payload))
|
|
1043
|
+
|
|
1044
|
+
// --- Add size check before writing ---
|
|
1045
|
+
if totalLength > MaxMessageSize {
|
|
1046
|
+
return fmt.Errorf("outgoing message size (%d) exceeds limit (%d)", totalLength, MaxMessageSize)
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
buf := new(bytes.Buffer)
|
|
1050
|
+
|
|
1051
|
+
// Write total length (4 bytes)
|
|
1052
|
+
if err := binary.Write(buf, binary.BigEndian, totalLength); err != nil {
|
|
1053
|
+
return fmt.Errorf("failed to write total length: %w", err)
|
|
1054
|
+
}
|
|
1055
|
+
// Write channel length (1 byte)
|
|
1056
|
+
if err := binary.Write(buf, binary.BigEndian, channelLen); err != nil {
|
|
1057
|
+
return fmt.Errorf("failed to write channel length: %w", err)
|
|
1058
|
+
}
|
|
1059
|
+
// Write channel name
|
|
1060
|
+
if _, err := buf.Write(channelBytes); err != nil {
|
|
1061
|
+
return fmt.Errorf("failed to write channel name: %w", err)
|
|
1062
|
+
}
|
|
1063
|
+
// Write payload
|
|
1064
|
+
if _, err := buf.Write(payload); err != nil {
|
|
1065
|
+
return fmt.Errorf("failed to write payload: %w", err)
|
|
1066
|
+
}
|
|
1067
|
+
|
|
1068
|
+
// Write the entire frame to the stream.
|
|
1069
|
+
if _, err := w.Write(buf.Bytes()); err != nil {
|
|
1070
|
+
return fmt.Errorf("failed to write framed message to stream: %w", err)
|
|
1071
|
+
}
|
|
1072
|
+
return nil
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
// goGetNodeAddresses is the internal Go function that performs the core logic
|
|
1076
|
+
// of fetching and formatting node addresses.
|
|
1077
|
+
// It takes a pointer to a NodeInstance and a targetPID. If targetPID is empty (peer.ID("")),
|
|
1078
|
+
// it fetches addresses for the local node of the given instance.
|
|
1079
|
+
// It returns a slice of fully formatted multiaddress strings and an error if one occurs.
|
|
1080
|
+
func goGetNodeAddresses(
|
|
1081
|
+
ni *NodeInstance,
|
|
1082
|
+
targetPID peer.ID,
|
|
1083
|
+
) ([]string, error) {
|
|
1084
|
+
if ni.host == nil {
|
|
1085
|
+
errMsg := fmt.Sprintf("Instance %d: Host not initialized", ni.instanceIndex)
|
|
1086
|
+
logger.Errorf("[GO] â goGetNodeAddresses: %s\n", errMsg)
|
|
1087
|
+
return nil, fmt.Errorf("%s", errMsg)
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
// Determine the actual Peer ID to resolve addresses for.
|
|
1091
|
+
resolvedPID := targetPID
|
|
1092
|
+
isThisNode := false
|
|
1093
|
+
if targetPID == "" || targetPID == ni.host.ID() {
|
|
1094
|
+
resolvedPID = ni.host.ID()
|
|
1095
|
+
isThisNode = true
|
|
1096
|
+
}
|
|
1097
|
+
|
|
1098
|
+
// --- 1. Gather all candidate addresses from the host and peerstore ---
|
|
1099
|
+
var candidateAddrs []ma.Multiaddr
|
|
1100
|
+
if isThisNode {
|
|
1101
|
+
ni.addrMutex.RLock()
|
|
1102
|
+
candidateAddrs = append(candidateAddrs, ni.localAddrs...)
|
|
1103
|
+
// candidateAddrs = append(candidateAddrs, ni.privateRelayAddrs...)
|
|
1104
|
+
ni.addrMutex.RUnlock()
|
|
1105
|
+
} else {
|
|
1106
|
+
// --- Remote Peer Addresses ---
|
|
1107
|
+
ni.peersMutex.RLock()
|
|
1108
|
+
if epi, exists := ni.friendlyPeers[resolvedPID]; exists {
|
|
1109
|
+
candidateAddrs = append(candidateAddrs, epi.Addrs...)
|
|
1110
|
+
}
|
|
1111
|
+
ni.peersMutex.RUnlock()
|
|
1112
|
+
candidateAddrs = append(candidateAddrs, ni.host.Peerstore().Addrs(resolvedPID)...)
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
// --- 2. Process and filter candidate addresses ---
|
|
1116
|
+
addrSet := make(map[string]struct{})
|
|
1117
|
+
for _, addr := range candidateAddrs {
|
|
1118
|
+
// if addr == nil || manet.IsIPLoopback(addr) || manet.IsIPUnspecified(addr) {
|
|
1119
|
+
// continue
|
|
1120
|
+
// }
|
|
1121
|
+
|
|
1122
|
+
// Use the idiomatic `peer.SplitAddr` to check if the address already includes a Peer ID.
|
|
1123
|
+
var finalAddr ma.Multiaddr
|
|
1124
|
+
transportAddr, idInAddr := peer.SplitAddr(addr)
|
|
1125
|
+
if transportAddr == nil {
|
|
1126
|
+
continue
|
|
1127
|
+
}
|
|
1128
|
+
|
|
1129
|
+
// handle cases for different transport protocols
|
|
1130
|
+
if strings.HasPrefix(transportAddr.String(), "/p2p-circuit/") {
|
|
1131
|
+
continue
|
|
1132
|
+
}
|
|
1133
|
+
if strings.Contains(transportAddr.String(), "*") {
|
|
1134
|
+
continue
|
|
1135
|
+
}
|
|
1136
|
+
|
|
1137
|
+
// handle cases based on presence and correctness of Peer ID in the address
|
|
1138
|
+
switch {
|
|
1139
|
+
case idInAddr == resolvedPID:
|
|
1140
|
+
// Case A: The address is already perfect and has the correct Peer ID. Use it as is.
|
|
1141
|
+
finalAddr = addr
|
|
1142
|
+
|
|
1143
|
+
case idInAddr == "":
|
|
1144
|
+
// Case B: The address is missing a Peer ID. This is common for addresses from the
|
|
1145
|
+
// peerstore and for relayed addresses like `/p2p/RELAY_ID/p2p-circuit`. We must append ours.
|
|
1146
|
+
p2pComponent, _ := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", resolvedPID.String()))
|
|
1147
|
+
finalAddr = addr.Encapsulate(p2pComponent)
|
|
1148
|
+
|
|
1149
|
+
case idInAddr != resolvedPID:
|
|
1150
|
+
// Case C: The address has the WRONG Peer ID. This is stale or incorrect data. Discard it.
|
|
1151
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Discarding stale address for peer %s: %s\n", ni.instanceIndex, resolvedPID, addr)
|
|
1152
|
+
continue
|
|
1153
|
+
}
|
|
1154
|
+
addrSet[finalAddr.String()] = struct{}{}
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
// --- 4. Convert the final set of unique addresses to a slice for returning. ---
|
|
1158
|
+
result := make([]string, 0, len(addrSet))
|
|
1159
|
+
for addr := range addrSet {
|
|
1160
|
+
result = append(result, addr)
|
|
1161
|
+
}
|
|
1162
|
+
|
|
1163
|
+
if len(result) == 0 {
|
|
1164
|
+
logger.Warnf("[GO] â ī¸ goGetNodeAddresses: No suitable addresses found for peer %s.", resolvedPID)
|
|
1165
|
+
}
|
|
1166
|
+
|
|
1167
|
+
return result, nil
|
|
1168
|
+
}
|
|
1169
|
+
|
|
1170
|
+
// Close gracefully shuts down all components of this node instance.
|
|
1171
|
+
// This REPLACES the old `closeSingleInstance` function.
|
|
1172
|
+
func (ni *NodeInstance) Close() error {
|
|
1173
|
+
logger.Infof("[GO] đ Instance %d: Closing node...", ni.instanceIndex)
|
|
1174
|
+
|
|
1175
|
+
// --- Stop Cert Manager FIRST ---
|
|
1176
|
+
if ni.certManager != nil {
|
|
1177
|
+
logger.Debugf("[GO] - Instance %d: Stopping AutoTLS cert manager...\n", ni.instanceIndex)
|
|
1178
|
+
ni.certManager.Stop()
|
|
1179
|
+
}
|
|
1180
|
+
|
|
1181
|
+
// --- Cancel Main Context ---
|
|
1182
|
+
if ni.cancel != nil {
|
|
1183
|
+
logger.Debugf("[GO] - Instance %d: Cancelling main context...\n", ni.instanceIndex)
|
|
1184
|
+
ni.cancel()
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
// Give goroutines time to react to context cancellation
|
|
1188
|
+
time.Sleep(200 * time.Millisecond)
|
|
1189
|
+
|
|
1190
|
+
// --- Close DHT Client ---
|
|
1191
|
+
if ni.dht != nil {
|
|
1192
|
+
logger.Debugf("[GO] - Instance %d: Closing DHT...\n", ni.instanceIndex)
|
|
1193
|
+
if err := ni.dht.Close(); err != nil {
|
|
1194
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing DHT: %v\n", ni.instanceIndex, err)
|
|
1195
|
+
}
|
|
1196
|
+
ni.dht = nil
|
|
1197
|
+
}
|
|
1198
|
+
|
|
1199
|
+
// --- Close AutoRelay ---
|
|
1200
|
+
if ni.privateRelay != nil {
|
|
1201
|
+
logger.Debugf("[GO] - Instance %d: Closing AutoRelay service...\n", ni.instanceIndex)
|
|
1202
|
+
if err := ni.privateRelay.Close(); err != nil { //
|
|
1203
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing AutoRelay: %v\n", ni.instanceIndex, err)
|
|
1204
|
+
}
|
|
1205
|
+
ni.privateRelay = nil
|
|
1206
|
+
}
|
|
1207
|
+
|
|
1208
|
+
// --- Close Persistent Outgoing Streams ---
|
|
1209
|
+
ni.streamsMutex.Lock()
|
|
1210
|
+
if len(ni.persistentChatStreams) > 0 {
|
|
1211
|
+
logger.Debugf("[GO] - Instance %d: Closing %d persistent outgoing streams...\n", ni.instanceIndex, len(ni.persistentChatStreams))
|
|
1212
|
+
for pid, stream := range ni.persistentChatStreams {
|
|
1213
|
+
logger.Debugf("[GO] - Instance %d: Closing stream to %s\n", ni.instanceIndex, pid)
|
|
1214
|
+
_ = stream.Close() // Attempt graceful close
|
|
1215
|
+
}
|
|
1216
|
+
}
|
|
1217
|
+
ni.persistentChatStreams = make(map[peer.ID]network.Stream) // Clear the map
|
|
1218
|
+
ni.streamsMutex.Unlock()
|
|
1219
|
+
|
|
1220
|
+
// --- Clean Up PubSub State ---
|
|
1221
|
+
ni.pubsubMutex.Lock()
|
|
1222
|
+
if len(ni.subscriptions) > 0 {
|
|
1223
|
+
logger.Debugf("[GO] - Instance %d: Ensuring PubSub subscriptions (%d) are cancelled...\n", ni.instanceIndex, len(ni.subscriptions))
|
|
1224
|
+
for channel, sub := range ni.subscriptions {
|
|
1225
|
+
logger.Debugf("[GO] - Instance %d: Cancelling subscription to topic: %s\n", ni.instanceIndex, channel)
|
|
1226
|
+
sub.Cancel()
|
|
1227
|
+
}
|
|
1228
|
+
}
|
|
1229
|
+
ni.subscriptions = make(map[string]*pubsub.Subscription) // Clear the map
|
|
1230
|
+
ni.topics = make(map[string]*pubsub.Topic) // Clear the map
|
|
1231
|
+
ni.pubsubMutex.Unlock()
|
|
1232
|
+
|
|
1233
|
+
// --- Close Host Instance ---
|
|
1234
|
+
var hostErr error
|
|
1235
|
+
if ni.host != nil {
|
|
1236
|
+
logger.Debugf("[GO] - Instance %d: Closing host instance...\n", ni.instanceIndex)
|
|
1237
|
+
hostErr = ni.host.Close()
|
|
1238
|
+
if hostErr != nil {
|
|
1239
|
+
logger.Warnf("[GO] â ī¸ %s (proceeding with cleanup)\n", hostErr)
|
|
1240
|
+
} else {
|
|
1241
|
+
logger.Debugf("[GO] - Instance %d: Host closed successfully.\n", ni.instanceIndex)
|
|
1242
|
+
}
|
|
1243
|
+
}
|
|
1244
|
+
|
|
1245
|
+
// --- Clear Remaining State for this instance ---
|
|
1246
|
+
ni.peersMutex.Lock()
|
|
1247
|
+
ni.friendlyPeers = make(map[peer.ID]ExtendedPeerInfo) // Clear the map
|
|
1248
|
+
ni.peersMutex.Unlock()
|
|
1249
|
+
|
|
1250
|
+
// Clear also the addresses
|
|
1251
|
+
ni.addrMutex.Lock()
|
|
1252
|
+
ni.localAddrs = nil
|
|
1253
|
+
// ni.privateRelayAddrs = nil
|
|
1254
|
+
ni.addrMutex.Unlock()
|
|
1255
|
+
|
|
1256
|
+
// Clear the MessageStore for this instance
|
|
1257
|
+
if ni.messageStore != nil {
|
|
1258
|
+
ni.messageStore.mu.Lock()
|
|
1259
|
+
ni.messageStore.messagesByChannel = make(map[string]*list.List) // Clear the message store
|
|
1260
|
+
ni.messageStore.mu.Unlock()
|
|
1261
|
+
}
|
|
1262
|
+
logger.Debugf("[GO] - Instance %d: Cleared connected peers map and message buffer.\n", ni.instanceIndex)
|
|
1263
|
+
|
|
1264
|
+
// Clear the rendezvous state for this instance
|
|
1265
|
+
ni.rendezvousMutex.Lock()
|
|
1266
|
+
ni.rendezvousState = nil // Clear the state
|
|
1267
|
+
ni.rendezvousMutex.Unlock()
|
|
1268
|
+
|
|
1269
|
+
// Explicitly cancel all running grace period timers so goroutines exit immediately.
|
|
1270
|
+
ni.disconnectionMutex.Lock()
|
|
1271
|
+
if len(ni.disconnectionTimers) > 0 {
|
|
1272
|
+
logger.Debugf("[GO] - Instance %d: Cancelling %d active disconnection timers...\n", ni.instanceIndex, len(ni.disconnectionTimers))
|
|
1273
|
+
for _, cancelTimer := range ni.disconnectionTimers {
|
|
1274
|
+
cancelTimer()
|
|
1275
|
+
}
|
|
1276
|
+
}
|
|
1277
|
+
ni.disconnectionTimers = nil // Clear the map
|
|
1278
|
+
ni.disconnectionMutex.Unlock()
|
|
1279
|
+
|
|
1280
|
+
// Nil out components to signify the instance is fully closed
|
|
1281
|
+
ni.host = nil
|
|
1282
|
+
ni.pubsub = nil
|
|
1283
|
+
ni.ctx = nil
|
|
1284
|
+
ni.cancel = nil
|
|
1285
|
+
ni.certManager = nil
|
|
1286
|
+
ni.messageStore = nil
|
|
1287
|
+
|
|
1288
|
+
if hostErr != nil {
|
|
1289
|
+
return hostErr
|
|
1290
|
+
}
|
|
1291
|
+
|
|
1292
|
+
logger.Infof("[GO] â
Instance %d: Node closed successfully.\n", ni.instanceIndex)
|
|
1293
|
+
return nil
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
// --- Exported C Functions ---
|
|
1297
|
+
// These functions are callable from C (and thus Python). They act as the API boundary.
|
|
1298
|
+
|
|
1299
|
+
// This function MUST be called once from Python before any other library function.
|
|
1300
|
+
//
|
|
1301
|
+
//export InitializeLibrary
|
|
1302
|
+
func InitializeLibrary(
|
|
1303
|
+
maxInstancesC C.int,
|
|
1304
|
+
maxUniqueChannelsC C.int,
|
|
1305
|
+
maxChannelQueueLenC C.int,
|
|
1306
|
+
maxMessageSizeC C.int,
|
|
1307
|
+
logConfigJSONC *C.char,
|
|
1308
|
+
) {
|
|
1309
|
+
// --- Configure Logging FIRST ---
|
|
1310
|
+
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
|
|
1311
|
+
configStr := C.GoString(logConfigJSONC)
|
|
1312
|
+
golog.SetAllLoggers(golog.LevelFatal)
|
|
1313
|
+
if configStr != "" {
|
|
1314
|
+
var logLevels map[string]string
|
|
1315
|
+
if err := json.Unmarshal([]byte(configStr), &logLevels); err != nil {
|
|
1316
|
+
log.Printf("[GO] â ī¸ Invalid log config JSON: %v. Using defaults.\n", err)
|
|
1317
|
+
} else {
|
|
1318
|
+
for logger, levelStr := range logLevels {
|
|
1319
|
+
if err := golog.SetLogLevel(logger, levelStr); err != nil {
|
|
1320
|
+
log.Printf("[GO] â ī¸ Failed to set log level for '%s': %v\n", logger, err)
|
|
1321
|
+
}
|
|
1322
|
+
}
|
|
1323
|
+
}
|
|
1324
|
+
}
|
|
1325
|
+
|
|
1326
|
+
maxInstances = int(maxInstancesC)
|
|
1327
|
+
maxUniqueChannels = int(maxUniqueChannelsC)
|
|
1328
|
+
maxChannelQueueLen = int(maxChannelQueueLenC)
|
|
1329
|
+
MaxMessageSize = uint32(maxMessageSizeC)
|
|
1330
|
+
|
|
1331
|
+
// Initialize the *single* global slice.
|
|
1332
|
+
allInstances = make([]*NodeInstance, maxInstances)
|
|
1333
|
+
logger.Infof("[GO] â
Go library initialized with MaxInstances=%d, MaxUniqueChannels=%d and MaxChannelQueueLen=%d\n", maxInstances, maxUniqueChannels, maxChannelQueueLen)
|
|
1334
|
+
}
|
|
1335
|
+
|
|
1336
|
+
// CreateNode initializes and starts a new libp2p host (node) for a specific instance.
|
|
1337
|
+
// It configures the node based on the provided parameters (port, relay capabilities, UPnP).
|
|
1338
|
+
// Parameters:
|
|
1339
|
+
// - instanceIndexC (C.int): The index for this node instance (0 to maxInstances-1).
|
|
1340
|
+
// - predefinedPortC (C.int): The TCP port to listen on (0 for random).
|
|
1341
|
+
// - enableRelayClientC (C.int): 1 if this node should enable relay communications (client mode)
|
|
1342
|
+
// - enableRelayServiceC (C.int): 1 to set this node as a relay service (server mode),
|
|
1343
|
+
// - knowsIsPublicC (C.int): 1 to assume public reachability, 0 otherwise (-> tries to assess it in any possible way).
|
|
1344
|
+
// - maxConnectionsC (C.int): The maximum number of connections this node can maintain.
|
|
1345
|
+
//
|
|
1346
|
+
// Returns:
|
|
1347
|
+
// - *C.char: A JSON string indicating success (with node addresses) or failure (with an error message).
|
|
1348
|
+
// The structure is `{"state":"Success", "message": ["/ip4/.../p2p/...", ...]}` or `{"state":"Error", "message":"..."}`.
|
|
1349
|
+
// - IMPORTANT: The caller (C/Python) MUST free the returned C string using the `FreeString` function
|
|
1350
|
+
// exported by this library to avoid memory leaks. Returns NULL only on catastrophic failure before JSON creation.
|
|
1351
|
+
//
|
|
1352
|
+
//export CreateNode
|
|
1353
|
+
func CreateNode(
|
|
1354
|
+
instanceIndexC C.int,
|
|
1355
|
+
configJSONC *C.char,
|
|
1356
|
+
) (ret *C.char) {
|
|
1357
|
+
|
|
1358
|
+
instanceIndex := int(instanceIndexC)
|
|
1359
|
+
|
|
1360
|
+
if instanceIndex < 0 || instanceIndex >= maxInstances {
|
|
1361
|
+
errMsg := fmt.Errorf("invalid instance index: %d. Must be between 0 and %d", instanceIndex, maxInstances-1)
|
|
1362
|
+
return jsonErrorResponse("Invalid instance index", errMsg)
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
// --- Instance Creation and State Check ---
|
|
1366
|
+
globalInstanceMutex.Lock()
|
|
1367
|
+
if allInstances[instanceIndex] != nil {
|
|
1368
|
+
globalInstanceMutex.Unlock()
|
|
1369
|
+
msg := fmt.Sprintf("Instance %d is already initialized. Please call CloseNode first.", instanceIndex)
|
|
1370
|
+
return jsonErrorResponse(msg, nil)
|
|
1371
|
+
}
|
|
1372
|
+
|
|
1373
|
+
// --- Create the new instance object ---
|
|
1374
|
+
ni := &NodeInstance{
|
|
1375
|
+
instanceIndex: instanceIndex,
|
|
1376
|
+
topics: make(map[string]*pubsub.Topic),
|
|
1377
|
+
subscriptions: make(map[string]*pubsub.Subscription),
|
|
1378
|
+
friendlyPeers: make(map[peer.ID]ExtendedPeerInfo),
|
|
1379
|
+
persistentChatStreams: make(map[peer.ID]network.Stream),
|
|
1380
|
+
disconnectionTimers: make(map[peer.ID]context.CancelFunc),
|
|
1381
|
+
messageStore: newMessageStore(),
|
|
1382
|
+
}
|
|
1383
|
+
ni.ctx, ni.cancel = context.WithCancel(context.Background())
|
|
1384
|
+
isPublic := false
|
|
1385
|
+
|
|
1386
|
+
// Store it in the global slice
|
|
1387
|
+
allInstances[instanceIndex] = ni
|
|
1388
|
+
globalInstanceMutex.Unlock()
|
|
1389
|
+
|
|
1390
|
+
logger.Infof("[GO] đ Instance %d: Starting CreateNode...", instanceIndex)
|
|
1391
|
+
// --- Centralized Cleanup on Failure ---
|
|
1392
|
+
var success bool = false
|
|
1393
|
+
defer func() {
|
|
1394
|
+
if !success {
|
|
1395
|
+
// If `success` is still false when CreateNode exits, an error
|
|
1396
|
+
// must have occurred. We call Close() and remove the instance.
|
|
1397
|
+
logger.Warnf("[GO] â ī¸ Instance %d: CreateNode failed, cleaning up...", instanceIndex)
|
|
1398
|
+
ni.Close() // Call the new method!
|
|
1399
|
+
globalInstanceMutex.Lock()
|
|
1400
|
+
allInstances[instanceIndex] = nil // Remove it from the global list
|
|
1401
|
+
globalInstanceMutex.Unlock()
|
|
1402
|
+
}
|
|
1403
|
+
}()
|
|
1404
|
+
|
|
1405
|
+
// 1. Parse Configuration
|
|
1406
|
+
configJSON := C.GoString(configJSONC)
|
|
1407
|
+
var cfg NodeConfig
|
|
1408
|
+
if err := json.Unmarshal([]byte(configJSON), &cfg); err != nil {
|
|
1409
|
+
return jsonErrorResponse("Invalid Configuration JSON", err)
|
|
1410
|
+
}
|
|
1411
|
+
|
|
1412
|
+
// --- Sanity checks on the config ---
|
|
1413
|
+
// If one of the three parameters for custom certificates is specified, all three are required.
|
|
1414
|
+
if cfg.TLS.Domain != "" || cfg.TLS.CertPath != "" || cfg.TLS.KeyPath != "" {
|
|
1415
|
+
if cfg.TLS.Domain == "" || cfg.TLS.CertPath == "" || cfg.TLS.KeyPath == "" {
|
|
1416
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Missing at least one of 'Domain', 'CertPath' or 'KeyPath'.", instanceIndex), nil)
|
|
1417
|
+
}
|
|
1418
|
+
} // in the following, cfg.TLS.Domain != "" will be used as flag for useCustomTLS
|
|
1419
|
+
|
|
1420
|
+
// Having both customTLS and autoTLS is not allowed
|
|
1421
|
+
if cfg.TLS.Domain != "" && cfg.TLS.AutoTLS {
|
|
1422
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Cannot specify both a 'Domain' and 'AutoTLS'.", instanceIndex), nil)
|
|
1423
|
+
}
|
|
1424
|
+
|
|
1425
|
+
// If we use AutoTLS we need the DHT on
|
|
1426
|
+
if cfg.TLS.AutoTLS && !cfg.DHT.Enabled {
|
|
1427
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Using TLS requires DHT 'Enabled'.", instanceIndex), nil)
|
|
1428
|
+
}
|
|
1429
|
+
|
|
1430
|
+
// If we want RelayService we must be public (either forced or via AutoNat)
|
|
1431
|
+
if cfg.Relay.EnableService {
|
|
1432
|
+
if !cfg.Relay.EnableClient {
|
|
1433
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Cannot set libp2p.DisableRelay() if we want to offer relay services.", instanceIndex), nil)
|
|
1434
|
+
}
|
|
1435
|
+
if !(cfg.DHT.Enabled || cfg.Network.ForcePublic) {
|
|
1436
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: A relay needs to be publicly reachable (forced or discovered).", instanceIndex), nil)
|
|
1437
|
+
}
|
|
1438
|
+
}
|
|
1439
|
+
|
|
1440
|
+
// If we want to keep dht it needs to be enabled
|
|
1441
|
+
if cfg.DHT.Keep && !cfg.DHT.Enabled {
|
|
1442
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Cannot set 'DHT.Keep' if DHT is not 'Enabled'.", instanceIndex), nil)
|
|
1443
|
+
}
|
|
1444
|
+
|
|
1445
|
+
// --- Load or Create Persistent Identity ---
|
|
1446
|
+
keyPath := filepath.Join(cfg.IdentityDir, "identity.key")
|
|
1447
|
+
privKey, err := loadOrCreateIdentity(keyPath)
|
|
1448
|
+
if err != nil {
|
|
1449
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to prepare identity", instanceIndex), err)
|
|
1450
|
+
}
|
|
1451
|
+
|
|
1452
|
+
// --- AutoTLS Cert Manager Setup (if enabled) ---
|
|
1453
|
+
var certManager *p2pforge.P2PForgeCertMgr
|
|
1454
|
+
if cfg.TLS.AutoTLS {
|
|
1455
|
+
logger.Debugf("[GO] - Instance %d: AutoTLS is ENABLED. Setting up certificate manager...\n", instanceIndex)
|
|
1456
|
+
certManager, err = p2pforge.NewP2PForgeCertMgr(
|
|
1457
|
+
p2pforge.WithCAEndpoint(p2pforge.DefaultCAEndpoint),
|
|
1458
|
+
p2pforge.WithCertificateStorage(&certmagic.FileStorage{Path: filepath.Join(cfg.IdentityDir, "p2p-forge-certs")}),
|
|
1459
|
+
p2pforge.WithUserAgent(UnaiverseUserAgent),
|
|
1460
|
+
p2pforge.WithRegistrationDelay(10*time.Second),
|
|
1461
|
+
)
|
|
1462
|
+
if err != nil {
|
|
1463
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create AutoTLS cert manager", instanceIndex), err)
|
|
1464
|
+
}
|
|
1465
|
+
certManager.Start()
|
|
1466
|
+
ni.certManager = certManager
|
|
1467
|
+
}
|
|
1468
|
+
|
|
1469
|
+
// --- 4. Libp2p Options Assembly ---
|
|
1470
|
+
tlsMode := "none"
|
|
1471
|
+
if cfg.TLS.AutoTLS {
|
|
1472
|
+
tlsMode = "autotls"
|
|
1473
|
+
} else if cfg.TLS.Domain != "" {
|
|
1474
|
+
tlsMode = "domain"
|
|
1475
|
+
}
|
|
1476
|
+
listenAddrs, err := getListenAddrs(cfg.ListenIPs, cfg.PredefinedPort, tlsMode)
|
|
1477
|
+
if err != nil {
|
|
1478
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create multiaddrs", instanceIndex), err)
|
|
1479
|
+
}
|
|
1480
|
+
|
|
1481
|
+
// --- Configure Custom Resource Manager ---
|
|
1482
|
+
scalingLimits := rcmgr.DefaultLimits
|
|
1483
|
+
libp2p.SetDefaultServiceLimits(&scalingLimits)
|
|
1484
|
+
|
|
1485
|
+
// These apply per unique Peer ID.
|
|
1486
|
+
scalingLimits.PeerBaseLimit.Conns = 64
|
|
1487
|
+
scalingLimits.PeerBaseLimit.ConnsInbound = 64
|
|
1488
|
+
scalingLimits.PeerBaseLimit.ConnsOutbound = 64
|
|
1489
|
+
|
|
1490
|
+
// Tweak System Limits
|
|
1491
|
+
scalingLimits.SystemBaseLimit.Conns = 256
|
|
1492
|
+
scalingLimits.SystemBaseLimit.ConnsInbound = 128
|
|
1493
|
+
scalingLimits.SystemBaseLimit.ConnsOutbound = 128
|
|
1494
|
+
|
|
1495
|
+
// Compute the concrete limits
|
|
1496
|
+
scaledLimits := scalingLimits.AutoScale()
|
|
1497
|
+
|
|
1498
|
+
// Raise the per-IP limits
|
|
1499
|
+
customIP4Limits := []rcmgr.ConnLimitPerSubnet{
|
|
1500
|
+
{
|
|
1501
|
+
PrefixLength: 32, // /32 means "one specific IP address"
|
|
1502
|
+
ConnCount: 1024, // Allow 1024 conns from the same IP
|
|
1503
|
+
},
|
|
1504
|
+
}
|
|
1505
|
+
customIP6Limits := []rcmgr.ConnLimitPerSubnet{
|
|
1506
|
+
{
|
|
1507
|
+
PrefixLength: 56,
|
|
1508
|
+
ConnCount: 1024,
|
|
1509
|
+
},
|
|
1510
|
+
}
|
|
1511
|
+
|
|
1512
|
+
// Create the limiter and manager
|
|
1513
|
+
limiter := rcmgr.NewFixedLimiter(scaledLimits)
|
|
1514
|
+
rm, err := rcmgr.NewResourceManager(
|
|
1515
|
+
limiter,
|
|
1516
|
+
rcmgr.WithLimitPerSubnet(customIP4Limits, customIP6Limits),
|
|
1517
|
+
)
|
|
1518
|
+
if err != nil {
|
|
1519
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create resource manager", instanceIndex), err)
|
|
1520
|
+
}
|
|
1521
|
+
|
|
1522
|
+
options := []libp2p.Option{
|
|
1523
|
+
libp2p.Identity(privKey),
|
|
1524
|
+
libp2p.ListenAddrs(listenAddrs...),
|
|
1525
|
+
libp2p.DefaultSecurity,
|
|
1526
|
+
libp2p.DefaultMuxers,
|
|
1527
|
+
libp2p.Transport(tcp.NewTCPTransport),
|
|
1528
|
+
libp2p.ShareTCPListener(),
|
|
1529
|
+
libp2p.Transport(quic.NewTransport),
|
|
1530
|
+
libp2p.Transport(webtransport.New),
|
|
1531
|
+
libp2p.Transport(webrtc.New),
|
|
1532
|
+
libp2p.ResourceManager(rm),
|
|
1533
|
+
libp2p.UserAgent(UnaiverseUserAgent),
|
|
1534
|
+
libp2p.NATPortMap(),
|
|
1535
|
+
libp2p.EnableHolePunching(),
|
|
1536
|
+
}
|
|
1537
|
+
|
|
1538
|
+
// Add WebSocket transport, with or without TLS based on cert availability
|
|
1539
|
+
if cfg.TLS.Domain != "" {
|
|
1540
|
+
// We already have certificates, use them
|
|
1541
|
+
logger.Debugf("[GO] - Instance %d: Certificates provided, setting up secure WebSocket (WSS).\n", instanceIndex)
|
|
1542
|
+
cert, err := tls.LoadX509KeyPair(cfg.TLS.CertPath, cfg.TLS.KeyPath)
|
|
1543
|
+
if err != nil {
|
|
1544
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to load Custom TLS certificate and key", instanceIndex), err)
|
|
1545
|
+
}
|
|
1546
|
+
tlsConfig := &tls.Config{Certificates: []tls.Certificate{cert}}
|
|
1547
|
+
// let's also create a custom address factory to ensure we always advertise the correct domain name
|
|
1548
|
+
domainAddressFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
|
1549
|
+
// Replace the IP part of the WSS address with our domain.
|
|
1550
|
+
result := make([]ma.Multiaddr, 0, len(addrs))
|
|
1551
|
+
for _, addr := range addrs {
|
|
1552
|
+
if strings.Contains(addr.String(), "/tls/ws") || strings.Contains(addr.String(), "/wss") {
|
|
1553
|
+
// This is our WSS listener. Create the public /dns4 version.
|
|
1554
|
+
portStr, err := addr.ValueForProtocol(ma.P_TCP)
|
|
1555
|
+
if err != nil {
|
|
1556
|
+
// Should not happen for a TCP/WS address, but safe fallback
|
|
1557
|
+
result = append(result, addr)
|
|
1558
|
+
continue
|
|
1559
|
+
}
|
|
1560
|
+
dnsAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%s/tls/ws", cfg.TLS.Domain, portStr))
|
|
1561
|
+
result = append(result, dnsAddr)
|
|
1562
|
+
} else {
|
|
1563
|
+
// Keep other addresses (like QUIC) as they are.
|
|
1564
|
+
result = append(result, addr)
|
|
1565
|
+
}
|
|
1566
|
+
}
|
|
1567
|
+
return result
|
|
1568
|
+
}
|
|
1569
|
+
options = append(options,
|
|
1570
|
+
libp2p.Transport(ws.New, ws.WithTLSConfig(tlsConfig)),
|
|
1571
|
+
libp2p.AddrsFactory(domainAddressFactory),
|
|
1572
|
+
)
|
|
1573
|
+
logger.Debugf("[GO] - Instance %d: Loaded custom TLS certificate and key for WSS.\n", instanceIndex)
|
|
1574
|
+
} else if cfg.TLS.AutoTLS {
|
|
1575
|
+
// No certificates, create them automatically
|
|
1576
|
+
options = append(options,
|
|
1577
|
+
libp2p.Transport(ws.New, ws.WithTLSConfig(certManager.TLSConfig())),
|
|
1578
|
+
libp2p.AddrsFactory(certManager.AddressFactory()),
|
|
1579
|
+
)
|
|
1580
|
+
} else {
|
|
1581
|
+
// No certificates, use plain WS
|
|
1582
|
+
logger.Debugf("[GO] - Instance %d: No certificates found, setting up non-secure WebSocket.\n", instanceIndex)
|
|
1583
|
+
options = append(options, libp2p.Transport(ws.New))
|
|
1584
|
+
}
|
|
1585
|
+
|
|
1586
|
+
// Prepare discovering the bootstrap peers
|
|
1587
|
+
if cfg.DHT.Enabled {
|
|
1588
|
+
// Add any possible option to be publicly reachable
|
|
1589
|
+
discoveryOpts := []libp2p.Option{
|
|
1590
|
+
libp2p.EnableAutoNATv2(),
|
|
1591
|
+
libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
|
|
1592
|
+
bootstrapAddrInfos := dht.GetDefaultBootstrapPeerAddrInfos()
|
|
1593
|
+
dhtOptions := []dht.Option{
|
|
1594
|
+
dht.Mode(dht.ModeClient),
|
|
1595
|
+
dht.BootstrapPeers(bootstrapAddrInfos...),
|
|
1596
|
+
}
|
|
1597
|
+
var err error
|
|
1598
|
+
ni.dht, err = dht.New(ni.ctx, h, dhtOptions...)
|
|
1599
|
+
return ni.dht, err
|
|
1600
|
+
}),
|
|
1601
|
+
}
|
|
1602
|
+
options = append(options, discoveryOpts...)
|
|
1603
|
+
logger.Debugf("[GO] - Instance %d: Trying to be publicly reachable.\n", instanceIndex)
|
|
1604
|
+
}
|
|
1605
|
+
|
|
1606
|
+
// EnableRelay (the ability to *use* relays) is default, we can explicitly disable it if needed.
|
|
1607
|
+
if !cfg.Relay.EnableClient {
|
|
1608
|
+
// In this case we don't want to use the circuit-relay protocol.
|
|
1609
|
+
options = append(options, libp2p.DisableRelay()) // Explicitly disable using relays.
|
|
1610
|
+
logger.Debugf("[GO] - Instance %d: Relay client is DISABLED.\n", instanceIndex)
|
|
1611
|
+
} else {
|
|
1612
|
+
// Configure Relay Service (ability to *be* a relay)
|
|
1613
|
+
if cfg.Relay.EnableService {
|
|
1614
|
+
resources := rc.DefaultResources() // open this to see the default resource limits
|
|
1615
|
+
resources.MaxReservations = 1024 // default is 128
|
|
1616
|
+
resources.MaxCircuits = 32 // default is 16
|
|
1617
|
+
resources.BufferSize = 4096 // default is 2048
|
|
1618
|
+
resources.MaxReservationsPerIP = 1024 // default is 8
|
|
1619
|
+
resources.MaxReservationsPerASN = 1024 // default is 32
|
|
1620
|
+
if cfg.Relay.WithBroadLimits {
|
|
1621
|
+
// Enrich default limits
|
|
1622
|
+
resources.Limit = nil // same as setting rc.WithInfiniteLimits()
|
|
1623
|
+
logger.Debugf("[GO] - Instance %d: Relay service is ENABLED with custom resource configuration (WithBroadLimits).\n", instanceIndex)
|
|
1624
|
+
} else {
|
|
1625
|
+
logger.Debugf("[GO] - Instance %d: Relay service is ENABLED with default resource configuration.\n", instanceIndex)
|
|
1626
|
+
}
|
|
1627
|
+
// This single option enables the node to act as a relay for others.
|
|
1628
|
+
options = append(options, libp2p.EnableRelayService(rc.WithResources(resources)), libp2p.EnableNATService())
|
|
1629
|
+
} else {
|
|
1630
|
+
// In this case we want to use relays but not offer the service to others.
|
|
1631
|
+
// If we are exploiting the DHT we can start an AutoRelay with PeerSource
|
|
1632
|
+
if cfg.DHT.Keep {
|
|
1633
|
+
// Enable AutoRelay. This uses the services above (DHT, AutoNAT)
|
|
1634
|
+
// to find relays and bind to one if we are private.
|
|
1635
|
+
options = append(options, libp2p.EnableAutoRelayWithPeerSource(ni.PeerSource, autorelay.WithBootDelay(time.Second*10)))
|
|
1636
|
+
logger.Debugf("[GO] - Instance %d: AutoRelay client ENABLED.\n", instanceIndex)
|
|
1637
|
+
}
|
|
1638
|
+
}
|
|
1639
|
+
}
|
|
1640
|
+
|
|
1641
|
+
if cfg.Network.ForcePublic {
|
|
1642
|
+
// Force public reachability to test local relays
|
|
1643
|
+
options = append(options, libp2p.ForceReachabilityPublic())
|
|
1644
|
+
}
|
|
1645
|
+
|
|
1646
|
+
// Create the libp2p Host instance with the configured options for this instance.
|
|
1647
|
+
host, err := libp2p.New(options...)
|
|
1648
|
+
if err != nil {
|
|
1649
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create host", instanceIndex), err)
|
|
1650
|
+
}
|
|
1651
|
+
ni.host = host
|
|
1652
|
+
logger.Infof("[GO] â
Instance %d: Host created with ID: %s\n", instanceIndex, ni.host.ID())
|
|
1653
|
+
|
|
1654
|
+
if cfg.Network.Isolated {
|
|
1655
|
+
// Turn on the "Protocol Police"
|
|
1656
|
+
enforceProtocolCompliance(ni)
|
|
1657
|
+
}
|
|
1658
|
+
|
|
1659
|
+
// --- Link Host to Cert Manager ---
|
|
1660
|
+
if cfg.TLS.AutoTLS {
|
|
1661
|
+
certManager.ProvideHost(ni.host)
|
|
1662
|
+
logger.Debugf("[GO] - Instance %d: Provided host to AutoTLS cert manager.\n", instanceIndex)
|
|
1663
|
+
}
|
|
1664
|
+
|
|
1665
|
+
// --- Start Address Reporting & Caching ---
|
|
1666
|
+
cacheSub, err := ni.host.EventBus().Subscribe(new(event.EvtLocalAddressesUpdated))
|
|
1667
|
+
if err != nil {
|
|
1668
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create address cache subscription", instanceIndex), err)
|
|
1669
|
+
}
|
|
1670
|
+
go handleAddressUpdateEvents(ni, cacheSub)
|
|
1671
|
+
logger.Debugf("[GO] đ§ Instance %d: Address cache background listener started.", instanceIndex)
|
|
1672
|
+
|
|
1673
|
+
if cfg.Network.ForcePublic {
|
|
1674
|
+
isPublic = true
|
|
1675
|
+
logger.Debugf("[GO] âŗ Instance %d: ForcePublic is ON. Waiting for addresses to settle...", instanceIndex)
|
|
1676
|
+
waitCtx, waitCancel := context.WithTimeout(ni.ctx, 5*time.Second)
|
|
1677
|
+
defer waitCancel()
|
|
1678
|
+
|
|
1679
|
+
ticker := time.NewTicker(100 * time.Millisecond)
|
|
1680
|
+
defer ticker.Stop()
|
|
1681
|
+
|
|
1682
|
+
AddressWaitLoop:
|
|
1683
|
+
for {
|
|
1684
|
+
select {
|
|
1685
|
+
case <-waitCtx.Done():
|
|
1686
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Timed out waiting for addresses (proceeding anyway).", instanceIndex)
|
|
1687
|
+
break AddressWaitLoop
|
|
1688
|
+
case <-ticker.C:
|
|
1689
|
+
// Check if the host has reported addresses yet
|
|
1690
|
+
if len(ni.host.Addrs()) > 0 {
|
|
1691
|
+
logger.Debugf("[GO] â
Instance %d: Addresses populated.", instanceIndex)
|
|
1692
|
+
break AddressWaitLoop
|
|
1693
|
+
}
|
|
1694
|
+
}
|
|
1695
|
+
}
|
|
1696
|
+
} else {
|
|
1697
|
+
// --- Wait for Reachability Update ---
|
|
1698
|
+
// Subscribe to reachability events
|
|
1699
|
+
reachSub, err := ni.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
|
|
1700
|
+
if err != nil {
|
|
1701
|
+
return jsonErrorResponse("Failed to subscribe to reachability events", err)
|
|
1702
|
+
}
|
|
1703
|
+
defer reachSub.Close()
|
|
1704
|
+
|
|
1705
|
+
timeoutCtx, timeoutCancel := context.WithTimeout(ni.ctx, 30*time.Second)
|
|
1706
|
+
defer timeoutCancel()
|
|
1707
|
+
logger.Debugf("[GO] âŗ Instance %d: Waiting for reachability update.", instanceIndex)
|
|
1708
|
+
|
|
1709
|
+
WAIT_LOOP:
|
|
1710
|
+
for {
|
|
1711
|
+
select {
|
|
1712
|
+
case evt := <-reachSub.Out():
|
|
1713
|
+
rEvt := evt.(event.EvtLocalReachabilityChanged)
|
|
1714
|
+
if rEvt.Reachability == network.ReachabilityPublic {
|
|
1715
|
+
logger.Debugf("[GO] đļ Instance %d: Reachability -> PUBLIC", instanceIndex)
|
|
1716
|
+
isPublic = true
|
|
1717
|
+
} else {
|
|
1718
|
+
isPublic = false
|
|
1719
|
+
}
|
|
1720
|
+
break WAIT_LOOP
|
|
1721
|
+
|
|
1722
|
+
case <-timeoutCtx.Done():
|
|
1723
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Timeout. Proceeding with best effort. (Public: %t)", instanceIndex, isPublic)
|
|
1724
|
+
break WAIT_LOOP
|
|
1725
|
+
|
|
1726
|
+
// 4. Node Shutdown
|
|
1727
|
+
case <-ni.ctx.Done():
|
|
1728
|
+
return jsonErrorResponse("Context cancelled during init", nil)
|
|
1729
|
+
}
|
|
1730
|
+
}
|
|
1731
|
+
}
|
|
1732
|
+
|
|
1733
|
+
// --- PubSub Initialization ---
|
|
1734
|
+
if err := setupPubSub(ni); err != nil {
|
|
1735
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create PubSub", instanceIndex), err)
|
|
1736
|
+
}
|
|
1737
|
+
logger.Debugf("[GO] â
Instance %d: PubSub (GossipSub) initialized.\n", instanceIndex)
|
|
1738
|
+
|
|
1739
|
+
// --- Setup Notifiers and Handlers ---
|
|
1740
|
+
setupNotifiers(ni)
|
|
1741
|
+
logger.Debugf("[GO] đ Instance %d: Registered network event notifier.\n", instanceIndex)
|
|
1742
|
+
|
|
1743
|
+
setupDirectMessageHandler(ni)
|
|
1744
|
+
logger.Debugf("[GO] â
Instance %d: Direct message handler set up.\n", instanceIndex)
|
|
1745
|
+
|
|
1746
|
+
// --- Close DHT if needed ---
|
|
1747
|
+
if !cfg.DHT.Keep {
|
|
1748
|
+
if ni.dht != nil {
|
|
1749
|
+
logger.Debugf("[GO] - Instance %d: Closing DHT client...\n", instanceIndex)
|
|
1750
|
+
if err := ni.dht.Close(); err != nil {
|
|
1751
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing DHT: %v\n", instanceIndex, err)
|
|
1752
|
+
}
|
|
1753
|
+
ni.dht = nil
|
|
1754
|
+
}
|
|
1755
|
+
}
|
|
1756
|
+
|
|
1757
|
+
// --- Get Final Addresses ---
|
|
1758
|
+
nodeAddresses, err := goGetNodeAddresses(ni, "")
|
|
1759
|
+
if err != nil {
|
|
1760
|
+
return jsonErrorResponse(
|
|
1761
|
+
fmt.Sprintf("Instance %d: Failed to obtain node addresses after waiting for reachability", instanceIndex),
|
|
1762
|
+
err,
|
|
1763
|
+
)
|
|
1764
|
+
}
|
|
1765
|
+
|
|
1766
|
+
// --- Build and return the new structured response ---
|
|
1767
|
+
response := CreateNodeResponse{
|
|
1768
|
+
Addresses: nodeAddresses,
|
|
1769
|
+
IsPublic: isPublic,
|
|
1770
|
+
}
|
|
1771
|
+
|
|
1772
|
+
logger.Infof("[GO] đ Instance %d: Node addresses: %v\n", instanceIndex, nodeAddresses)
|
|
1773
|
+
reachabilityStatus := map[bool]string{true: "Public", false: "Private"}[isPublic]
|
|
1774
|
+
logger.Infof("[GO] đ Instance %d: Node creation complete. Reachability status: %s\n", instanceIndex, reachabilityStatus)
|
|
1775
|
+
success = true // Mark success to avoid cleanup in defer.
|
|
1776
|
+
return jsonSuccessResponse(response)
|
|
1777
|
+
}
|
|
1778
|
+
|
|
1779
|
+
// ConnectTo attempts to establish a connection with a remote peer given its multiaddress for a specific instance.
|
|
1780
|
+
// Parameters:
|
|
1781
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
1782
|
+
// - addrsJSONC (*C.char): Pointer to a JSON string containing the list of addresses that can be dialed.
|
|
1783
|
+
//
|
|
1784
|
+
// Returns:
|
|
1785
|
+
// - *C.char: A JSON string indicating success (with peer AddrInfo of the winning connection) or failure (with an error message).
|
|
1786
|
+
// Structure: `{"state":"Success", "message": {"ID": "...", "Addrs": ["...", ...]}}` or `{"state":"Error", "message":"..."}`.
|
|
1787
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
1788
|
+
//
|
|
1789
|
+
//export ConnectTo
|
|
1790
|
+
func ConnectTo(
|
|
1791
|
+
instanceIndexC C.int,
|
|
1792
|
+
addrsJSONC *C.char,
|
|
1793
|
+
) *C.char {
|
|
1794
|
+
|
|
1795
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
1796
|
+
if err != nil {
|
|
1797
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
1798
|
+
}
|
|
1799
|
+
|
|
1800
|
+
goAddrsJSON := C.GoString(addrsJSONC)
|
|
1801
|
+
logger.Debugf("[GO] đ Instance %d: Attempting to connect to peer with addresses: %s\n", ni.instanceIndex, goAddrsJSON)
|
|
1802
|
+
|
|
1803
|
+
// --- Unmarshal Address List from JSON ---
|
|
1804
|
+
var addrStrings []string
|
|
1805
|
+
if err := json.Unmarshal([]byte(goAddrsJSON), &addrStrings); err != nil {
|
|
1806
|
+
return jsonErrorResponse("Failed to parse addresses JSON", err)
|
|
1807
|
+
}
|
|
1808
|
+
if len(addrStrings) == 0 {
|
|
1809
|
+
return jsonErrorResponse("Address list is empty", nil)
|
|
1810
|
+
}
|
|
1811
|
+
|
|
1812
|
+
// --- Create AddrInfo from the list ---
|
|
1813
|
+
addrInfo, err := peer.AddrInfoFromString(addrStrings[0])
|
|
1814
|
+
if err != nil {
|
|
1815
|
+
return jsonErrorResponse("Invalid first multiaddress in list", err)
|
|
1816
|
+
}
|
|
1817
|
+
|
|
1818
|
+
// Add the rest of the addresses to the AddrInfo struct
|
|
1819
|
+
for i := 1; i < len(addrStrings); i++ {
|
|
1820
|
+
maddr, err := ma.NewMultiaddr(addrStrings[i])
|
|
1821
|
+
if err != nil {
|
|
1822
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Skipping invalid multiaddress '%s' in list: %v\n", ni.instanceIndex, addrStrings[i], err)
|
|
1823
|
+
continue
|
|
1824
|
+
}
|
|
1825
|
+
// You might want to add a check here to ensure subsequent addresses are for the same peer ID
|
|
1826
|
+
addrInfo.Addrs = append(addrInfo.Addrs, maddr)
|
|
1827
|
+
}
|
|
1828
|
+
|
|
1829
|
+
// Check if attempting to connect to the local node itself.
|
|
1830
|
+
if addrInfo.ID == ni.host.ID() {
|
|
1831
|
+
logger.Debugf("[GO] âšī¸ Instance %d: Attempting to connect to self (%s), skipping explicit connection.\n", ni.instanceIndex, addrInfo.ID)
|
|
1832
|
+
// Connecting to self is usually not necessary or meaningful in libp2p.
|
|
1833
|
+
// Return success, indicating the "connection" is implicitly present.
|
|
1834
|
+
return jsonSuccessResponse(addrInfo) // Caller frees.
|
|
1835
|
+
}
|
|
1836
|
+
|
|
1837
|
+
// --- 1. ESTABLISH CONNECTION ---
|
|
1838
|
+
// Use a context with a timeout for the connection attempt to prevent blocking indefinitely.
|
|
1839
|
+
connCtx, cancel := context.WithTimeout(ni.ctx, 30*time.Second) // 30-second timeout.
|
|
1840
|
+
defer cancel() // Ensure context is cancelled eventually.
|
|
1841
|
+
|
|
1842
|
+
// Add the peer's address(es) to the local peerstore for this instance. This helps libp2p find the peer.
|
|
1843
|
+
// ConnectedAddrTTL suggests the address is likely valid for a short time after connection.
|
|
1844
|
+
// Use PermanentAddrTTL if the address is known to be stable.
|
|
1845
|
+
ni.host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, peerstore.ConnectedAddrTTL)
|
|
1846
|
+
|
|
1847
|
+
// Initiate the connection attempt. libp2p will handle dialing and negotiation.
|
|
1848
|
+
logger.Debugf("[GO] - Instance %d: Attempting host.Connect to %s...\n", ni.instanceIndex, addrInfo.ID)
|
|
1849
|
+
if err := ni.host.Connect(connCtx, *addrInfo); err != nil {
|
|
1850
|
+
// Check if the error was due to the connection timeout.
|
|
1851
|
+
if connCtx.Err() == context.DeadlineExceeded {
|
|
1852
|
+
errMsg := fmt.Sprintf("Instance %d: Connection attempt to %s timed out after 30s", ni.instanceIndex, addrInfo.ID)
|
|
1853
|
+
logger.Errorf("[GO] â %s\n", errMsg)
|
|
1854
|
+
return jsonErrorResponse(errMsg, nil) // Return specific timeout error (caller frees).
|
|
1855
|
+
}
|
|
1856
|
+
// Handle other connection errors.
|
|
1857
|
+
errMsg := fmt.Sprintf("Instance %d: Failed to connect to peer %s", ni.instanceIndex, addrInfo.ID)
|
|
1858
|
+
// Example: Check for specific common errors if needed
|
|
1859
|
+
// if strings.Contains(err.Error(), "no route to host") { ... }
|
|
1860
|
+
return jsonErrorResponse(errMsg, err) // Return generic connection error (caller frees).
|
|
1861
|
+
}
|
|
1862
|
+
|
|
1863
|
+
// --- 2. FIND THE WINNING ADDRESS ---
|
|
1864
|
+
// After a successful connection, query the host's network for active connections to the peer.
|
|
1865
|
+
// This is where you find the 'winning' address.
|
|
1866
|
+
conns := ni.host.Network().ConnsToPeer(addrInfo.ID)
|
|
1867
|
+
var winningAddr string
|
|
1868
|
+
if len(conns) > 0 {
|
|
1869
|
+
winningAddr = fmt.Sprintf("%s/p2p/%s", conns[0].RemoteMultiaddr().String(), addrInfo.ID.String())
|
|
1870
|
+
logger.Debugf("[GO] â
Instance %d: Successfully connected to peer %s via: %s\n", ni.instanceIndex, addrInfo.ID, winningAddr)
|
|
1871
|
+
} else {
|
|
1872
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Connect succeeded for %s, but no active connection found immediately. It may be pending.\n", ni.instanceIndex, addrInfo.ID)
|
|
1873
|
+
}
|
|
1874
|
+
|
|
1875
|
+
// Success: log the successful connection and return the response.
|
|
1876
|
+
logger.Infof("[GO] â
Instance %d: Successfully initiated connection to multiaddress: %s\n", ni.instanceIndex, winningAddr)
|
|
1877
|
+
winningAddrInfo, err := peer.AddrInfoFromString(winningAddr)
|
|
1878
|
+
if err != nil {
|
|
1879
|
+
return jsonErrorResponse("Invalid winner multiaddress.", err)
|
|
1880
|
+
}
|
|
1881
|
+
return jsonSuccessResponse(winningAddrInfo) // Caller frees.
|
|
1882
|
+
}
|
|
1883
|
+
|
|
1884
|
+
// StartStaticRelay configures and starts the AutoRelay service using a specific
|
|
1885
|
+
// static relay (e.g., the subnetwork owner). This replaces manual reservation logic.
|
|
1886
|
+
//
|
|
1887
|
+
// Parameters:
|
|
1888
|
+
// - instanceIndexC: The node instance.
|
|
1889
|
+
// - relayAddrInfoJSONC: JSON string of the relay's AddrInfo (id + addrs).
|
|
1890
|
+
//
|
|
1891
|
+
//export StartStaticRelay
|
|
1892
|
+
func StartStaticRelay(
|
|
1893
|
+
instanceIndexC C.int,
|
|
1894
|
+
relayAddrInfoJSONC *C.char,
|
|
1895
|
+
) *C.char {
|
|
1896
|
+
|
|
1897
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
1898
|
+
if err != nil {
|
|
1899
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
1900
|
+
}
|
|
1901
|
+
|
|
1902
|
+
// --- 1. Handle Switching Subnetworks ---
|
|
1903
|
+
// If an AutoRelay service is already running, close it first.
|
|
1904
|
+
if ni.privateRelay != nil {
|
|
1905
|
+
logger.Debugf("[GO] đ Instance %d: Switching Relay. Closing existing AutoRelay service...", ni.instanceIndex)
|
|
1906
|
+
if err := ni.privateRelay.Close(); err != nil { //
|
|
1907
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing old AutoRelay: %v", ni.instanceIndex, err)
|
|
1908
|
+
}
|
|
1909
|
+
ni.privateRelay = nil
|
|
1910
|
+
// // Also clean up any existing relayed addresses
|
|
1911
|
+
// ni.addrMutex.Lock()
|
|
1912
|
+
// ni.privateRelayAddrs = nil
|
|
1913
|
+
// ni.addrMutex.Unlock()
|
|
1914
|
+
logger.Infof("[GO] đ Instance %d: Previous AutoRelay service closed.", ni.instanceIndex)
|
|
1915
|
+
}
|
|
1916
|
+
|
|
1917
|
+
// --- 2. Parse the New Relay's AddrInfo ---
|
|
1918
|
+
relayInfoJSON := C.GoString(relayAddrInfoJSONC)
|
|
1919
|
+
var relayInfo peer.AddrInfo
|
|
1920
|
+
if err := json.Unmarshal([]byte(relayInfoJSON), &relayInfo); err != nil {
|
|
1921
|
+
return jsonErrorResponse("Failed to parse relay AddrInfo JSON", err)
|
|
1922
|
+
}
|
|
1923
|
+
|
|
1924
|
+
logger.Debugf("[GO] đ Instance %d: Configuring Static AutoRelay with peer %s", ni.instanceIndex, relayInfo.ID)
|
|
1925
|
+
|
|
1926
|
+
// --- 3. Configure AutoRelay Options ---
|
|
1927
|
+
opts := []autorelay.Option{
|
|
1928
|
+
autorelay.WithStaticRelays([]peer.AddrInfo{relayInfo}),
|
|
1929
|
+
autorelay.WithNumRelays(1),
|
|
1930
|
+
autorelay.WithBootDelay(0),
|
|
1931
|
+
}
|
|
1932
|
+
|
|
1933
|
+
// --- 4. Create the AutoRelay Service ---
|
|
1934
|
+
// This initializes the service but might not start the background workers yet.
|
|
1935
|
+
ar, err := autorelay.NewAutoRelay(ni.host, opts...) //
|
|
1936
|
+
if err != nil {
|
|
1937
|
+
return jsonErrorResponse("Failed to create AutoRelay service", err)
|
|
1938
|
+
}
|
|
1939
|
+
|
|
1940
|
+
// --- 5. Start the Service ---
|
|
1941
|
+
// This kicks off the background goroutines to connect and reserve slots.
|
|
1942
|
+
// It returns immediately.
|
|
1943
|
+
ar.Start()
|
|
1944
|
+
|
|
1945
|
+
// Store the reference so we can close it later.
|
|
1946
|
+
ni.privateRelay = ar
|
|
1947
|
+
|
|
1948
|
+
logger.Infof("[GO] â
Instance %d: Static AutoRelay service started. Target: %s", ni.instanceIndex, relayInfo.ID)
|
|
1949
|
+
|
|
1950
|
+
return jsonSuccessResponse("Static AutoRelay enabled")
|
|
1951
|
+
}
|
|
1952
|
+
|
|
1953
|
+
// DisconnectFrom attempts to close any active connections to a specified peer
|
|
1954
|
+
// and removes the peer from the internally tracked list for a specific instance.
|
|
1955
|
+
// Parameters:
|
|
1956
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
1957
|
+
// - peerIDC (*C.char): The Peer ID string of the peer to disconnect from.
|
|
1958
|
+
//
|
|
1959
|
+
// Returns:
|
|
1960
|
+
// - *C.char: A JSON string indicating success or failure.
|
|
1961
|
+
// Structure: `{"state":"Success", "message":"Disconnected from peer ..."}` or `{"state":"Error", "message":"..."}`.
|
|
1962
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
1963
|
+
//
|
|
1964
|
+
//export DisconnectFrom
|
|
1965
|
+
func DisconnectFrom(
|
|
1966
|
+
instanceIndexC C.int,
|
|
1967
|
+
peerIDC *C.char,
|
|
1968
|
+
) *C.char {
|
|
1969
|
+
|
|
1970
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
1971
|
+
if err != nil {
|
|
1972
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
1973
|
+
}
|
|
1974
|
+
|
|
1975
|
+
goPeerID := C.GoString(peerIDC)
|
|
1976
|
+
logger.Debugf("[GO] đ Instance %d: Attempting to disconnect from peer: %s\n", ni.instanceIndex, goPeerID)
|
|
1977
|
+
|
|
1978
|
+
pid, err := peer.Decode(goPeerID)
|
|
1979
|
+
if err != nil {
|
|
1980
|
+
return jsonErrorResponse(
|
|
1981
|
+
fmt.Sprintf("Instance %d: Failed to decode peer ID", ni.instanceIndex), err,
|
|
1982
|
+
)
|
|
1983
|
+
}
|
|
1984
|
+
|
|
1985
|
+
if pid == ni.host.ID() {
|
|
1986
|
+
logger.Debugf("[GO] âšī¸ Instance %d: Attempting to disconnect from self (%s), skipping.\n", ni.instanceIndex, pid)
|
|
1987
|
+
return jsonSuccessResponse("Cannot disconnect from self")
|
|
1988
|
+
}
|
|
1989
|
+
|
|
1990
|
+
// --- Close Persistent Outgoing Stream (if exists) for this instance ---
|
|
1991
|
+
ni.streamsMutex.Lock()
|
|
1992
|
+
stream, exists := ni.persistentChatStreams[pid]
|
|
1993
|
+
if exists {
|
|
1994
|
+
logger.Debugf("[GO] âŗ Instance %d: Closing persistent outgoing stream to %s\n", ni.instanceIndex, pid)
|
|
1995
|
+
_ = stream.Close() // Attempt graceful close
|
|
1996
|
+
delete(ni.persistentChatStreams, pid)
|
|
1997
|
+
}
|
|
1998
|
+
ni.streamsMutex.Unlock() // Unlock before potentially blocking network call
|
|
1999
|
+
|
|
2000
|
+
// --- Close Network Connections ---
|
|
2001
|
+
conns := ni.host.Network().ConnsToPeer(pid)
|
|
2002
|
+
closedNetworkConn := false
|
|
2003
|
+
if len(conns) > 0 {
|
|
2004
|
+
logger.Debugf("[GO] - Instance %d: Closing %d active network connection(s) to peer %s...\n", ni.instanceIndex, len(conns), pid)
|
|
2005
|
+
err = ni.host.Network().ClosePeer(pid) // This closes the underlying connection(s)
|
|
2006
|
+
if err != nil {
|
|
2007
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing network connection(s) to peer %s: %v (proceeding with cleanup)\n", ni.instanceIndex, pid, err)
|
|
2008
|
+
} else {
|
|
2009
|
+
logger.Debugf("[GO] - Instance %d: Closed network connection(s) to peer: %s\n", ni.instanceIndex, pid)
|
|
2010
|
+
closedNetworkConn = true
|
|
2011
|
+
}
|
|
2012
|
+
} else {
|
|
2013
|
+
logger.Debugf("[GO] âšī¸ Instance %d: No active network connections found to peer %s.\n", ni.instanceIndex, pid)
|
|
2014
|
+
}
|
|
2015
|
+
|
|
2016
|
+
// --- Remove from Tracking Map for this instance ---
|
|
2017
|
+
ni.peersMutex.Lock()
|
|
2018
|
+
delete(ni.friendlyPeers, pid)
|
|
2019
|
+
ni.peersMutex.Unlock()
|
|
2020
|
+
|
|
2021
|
+
logMsg := fmt.Sprintf("Instance %d: Disconnected from peer %s", ni.instanceIndex, goPeerID)
|
|
2022
|
+
if !exists && !closedNetworkConn && len(conns) == 0 {
|
|
2023
|
+
logMsg = fmt.Sprintf("Instance %d: Disconnected from peer %s (not connected or tracked)", ni.instanceIndex, goPeerID)
|
|
2024
|
+
}
|
|
2025
|
+
logger.Infof("[GO] đ %s\n", logMsg)
|
|
2026
|
+
|
|
2027
|
+
return jsonSuccessResponse(logMsg)
|
|
2028
|
+
}
|
|
2029
|
+
|
|
2030
|
+
// GetConnectedPeers returns a list of peers currently tracked as connected for a specific instance.
|
|
2031
|
+
// Note: This relies on the internal `connectedPeersInstances` map which is updated during
|
|
2032
|
+
// connect/disconnect operations and incoming streams. It may optionally perform
|
|
2033
|
+
// a liveness check.
|
|
2034
|
+
// Parameters:
|
|
2035
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2036
|
+
//
|
|
2037
|
+
// Returns:
|
|
2038
|
+
// - *C.char: A JSON string containing a list of connected peers' information.
|
|
2039
|
+
// Structure: `{"state":"Success", "message": [ExtendedPeerInfo, ...]}` or `{"state":"Error", "message":"..."}`.
|
|
2040
|
+
// Each `ExtendedPeerInfo` object has `addr_info` (ID, Addrs), `connected_at`, `direction`, and `misc`.
|
|
2041
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
2042
|
+
//
|
|
2043
|
+
//export GetConnectedPeers
|
|
2044
|
+
func GetConnectedPeers(
|
|
2045
|
+
instanceIndexC C.int,
|
|
2046
|
+
) *C.char {
|
|
2047
|
+
|
|
2048
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2049
|
+
if err != nil {
|
|
2050
|
+
// If getInstance errors, it means the host isn't ready.
|
|
2051
|
+
// Return success with an empty list, as it's a query, not an operation.
|
|
2052
|
+
logger.Warnf("[GO] â ī¸ Instance %d: GetConnectedPeers called but instance is not ready: %v\n", ni.instanceIndex, err)
|
|
2053
|
+
return jsonSuccessResponse([]ExtendedPeerInfo{})
|
|
2054
|
+
}
|
|
2055
|
+
|
|
2056
|
+
// Use a Write Lock for the entire critical section to avoid mixing RLock and Lock.
|
|
2057
|
+
ni.peersMutex.RLock()
|
|
2058
|
+
defer ni.peersMutex.RUnlock() // Ensure lock is released.
|
|
2059
|
+
|
|
2060
|
+
// Create a slice to hold the results directly from the map.
|
|
2061
|
+
peersList := make([]ExtendedPeerInfo, 0, len(ni.friendlyPeers))
|
|
2062
|
+
|
|
2063
|
+
for _, peerInfo := range ni.friendlyPeers {
|
|
2064
|
+
peersList = append(peersList, peerInfo)
|
|
2065
|
+
}
|
|
2066
|
+
|
|
2067
|
+
logger.Debugf("[GO] âšī¸ Instance %d: Reporting %d currently tracked and active peers.\n", ni.instanceIndex, len(peersList))
|
|
2068
|
+
|
|
2069
|
+
// Return the list of active peers as a JSON success response.
|
|
2070
|
+
return jsonSuccessResponse(peersList) // Caller frees.
|
|
2071
|
+
}
|
|
2072
|
+
|
|
2073
|
+
// GetRendezvousPeers returns a list of peers currently tracked as part of the world for a specific instance.
|
|
2074
|
+
// Note: This relies on the internal `rendezvousDiscoveredPeersInstances` map which is updated by pubsub
|
|
2075
|
+
// Parameters:
|
|
2076
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2077
|
+
//
|
|
2078
|
+
// Returns:
|
|
2079
|
+
// - *C.char: A JSON string containing a list of connected peers' information.
|
|
2080
|
+
// Structure: `{"state":"Success", "message": [ExtendedPeerInfo, ...]}` or `{"state":"Error", "message":"..."}`.
|
|
2081
|
+
// Each `ExtendedPeerInfo` object has `addr_info` (ID, Addrs), `connected_at`, `direction`, and `misc`.
|
|
2082
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
2083
|
+
//
|
|
2084
|
+
//export GetRendezvousPeers
|
|
2085
|
+
func GetRendezvousPeers(
|
|
2086
|
+
instanceIndexC C.int,
|
|
2087
|
+
) *C.char {
|
|
2088
|
+
|
|
2089
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2090
|
+
if err != nil {
|
|
2091
|
+
// If instance isn't ready, we definitely don't have rendezvous peers.
|
|
2092
|
+
return C.CString(`{"state":"Empty"}`)
|
|
2093
|
+
}
|
|
2094
|
+
|
|
2095
|
+
ni.rendezvousMutex.RLock()
|
|
2096
|
+
rendezvousState := ni.rendezvousState
|
|
2097
|
+
ni.rendezvousMutex.RUnlock()
|
|
2098
|
+
|
|
2099
|
+
// If the state pointer is nil, it means we haven't received the first update yet.
|
|
2100
|
+
if rendezvousState == nil {
|
|
2101
|
+
return C.CString(`{"state":"Empty"}`)
|
|
2102
|
+
}
|
|
2103
|
+
|
|
2104
|
+
// Extract the list of extendedPeerInfo to return it
|
|
2105
|
+
peersList := make([]ExtendedPeerInfo, 0, len(rendezvousState.Peers))
|
|
2106
|
+
for _, peerInfo := range rendezvousState.Peers {
|
|
2107
|
+
peersList = append(peersList, peerInfo)
|
|
2108
|
+
}
|
|
2109
|
+
|
|
2110
|
+
// This struct will be marshaled to JSON with exactly the fields you want.
|
|
2111
|
+
responsePayload := struct {
|
|
2112
|
+
Peers []ExtendedPeerInfo `json:"peers"`
|
|
2113
|
+
UpdateCount int64 `json:"update_count"`
|
|
2114
|
+
}{
|
|
2115
|
+
Peers: peersList,
|
|
2116
|
+
UpdateCount: rendezvousState.UpdateCount,
|
|
2117
|
+
}
|
|
2118
|
+
|
|
2119
|
+
// The state exists, so return the whole struct.
|
|
2120
|
+
logger.Debugf("[GO] âšī¸ Instance %d: Reporting %d rendezvous peers (UpdateCount: %d).\n", ni.instanceIndex, len(rendezvousState.Peers), rendezvousState.UpdateCount)
|
|
2121
|
+
return jsonSuccessResponse(responsePayload) // Caller frees.
|
|
2122
|
+
}
|
|
2123
|
+
|
|
2124
|
+
// GetNodeAddresses is the C-exported wrapper for goGetNodeAddresses.
|
|
2125
|
+
// It handles C-Go type conversions and JSON marshaling.
|
|
2126
|
+
//
|
|
2127
|
+
//export GetNodeAddresses
|
|
2128
|
+
func GetNodeAddresses(
|
|
2129
|
+
instanceIndexC C.int,
|
|
2130
|
+
peerIDC *C.char,
|
|
2131
|
+
) *C.char {
|
|
2132
|
+
|
|
2133
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2134
|
+
if err != nil {
|
|
2135
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
2136
|
+
}
|
|
2137
|
+
peerIDStr := C.GoString(peerIDC) // Raw string from C
|
|
2138
|
+
|
|
2139
|
+
var pidForInternalCall peer.ID // This will be peer.ID("") for local
|
|
2140
|
+
|
|
2141
|
+
if peerIDStr == "" || peerIDStr == ni.host.ID().String() {
|
|
2142
|
+
// Convention: Empty peer.ID ("") passed to goGetNodeAddresses means "local node".
|
|
2143
|
+
pidForInternalCall = "" // This is peer.ID("")
|
|
2144
|
+
} else {
|
|
2145
|
+
pidForInternalCall, err = peer.Decode(peerIDStr)
|
|
2146
|
+
if err != nil {
|
|
2147
|
+
errMsg := fmt.Sprintf("Instance %d: Failed to decode peer ID '%s'", ni.instanceIndex, peerIDStr)
|
|
2148
|
+
return jsonErrorResponse(errMsg, err)
|
|
2149
|
+
}
|
|
2150
|
+
}
|
|
2151
|
+
|
|
2152
|
+
// Call the internal Go function with the resolved peer.ID or empty peer.ID for local
|
|
2153
|
+
addresses, err := goGetNodeAddresses(ni, pidForInternalCall)
|
|
2154
|
+
if err != nil {
|
|
2155
|
+
return jsonErrorResponse(err.Error(), nil)
|
|
2156
|
+
}
|
|
2157
|
+
|
|
2158
|
+
return jsonSuccessResponse(addresses)
|
|
2159
|
+
}
|
|
2160
|
+
|
|
2161
|
+
// SendMessageToPeer sends a message either directly to a specific peer or broadcasts it via PubSub for a specific instance.
|
|
2162
|
+
// Parameters:
|
|
2163
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2164
|
+
// - channelC (*C.char): Use the unique channel as defined above in the Message struct.
|
|
2165
|
+
// - dataC (*C.char): A pointer to the raw byte data of the message payload.
|
|
2166
|
+
// - lengthC (C.int): The length of the data buffer pointed to by `data`.
|
|
2167
|
+
//
|
|
2168
|
+
// Returns:
|
|
2169
|
+
// - *C.char: A JSON string with {"state": "Success/Error", "message": "..."}.
|
|
2170
|
+
// - IMPORTANT: The caller MUST free this string using FreeString.
|
|
2171
|
+
//
|
|
2172
|
+
//export SendMessageToPeer
|
|
2173
|
+
func SendMessageToPeer(
|
|
2174
|
+
instanceIndexC C.int,
|
|
2175
|
+
channelC *C.char,
|
|
2176
|
+
dataC *C.char,
|
|
2177
|
+
lengthC C.int,
|
|
2178
|
+
) *C.char {
|
|
2179
|
+
|
|
2180
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2181
|
+
if err != nil {
|
|
2182
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
2183
|
+
}
|
|
2184
|
+
|
|
2185
|
+
// Convert C inputs
|
|
2186
|
+
goChannel := C.GoString(channelC)
|
|
2187
|
+
goData := C.GoBytes(unsafe.Pointer(dataC), C.int(lengthC))
|
|
2188
|
+
|
|
2189
|
+
// --- Branch: Broadcast or Direct Send ---
|
|
2190
|
+
if strings.Contains(goChannel, "::ps:") {
|
|
2191
|
+
// --- Broadcast via specific PubSub Topic ---
|
|
2192
|
+
instancePubsub := ni.pubsub
|
|
2193
|
+
if instancePubsub == nil {
|
|
2194
|
+
// PubSub not initialized, cannot broadcast
|
|
2195
|
+
return jsonErrorResponse("PubSub not initialized, cannot broadcast", nil)
|
|
2196
|
+
}
|
|
2197
|
+
|
|
2198
|
+
ni.pubsubMutex.Lock()
|
|
2199
|
+
topic, exists := ni.topics[goChannel]
|
|
2200
|
+
if !exists {
|
|
2201
|
+
var err error
|
|
2202
|
+
logger.Debugf("[GO] - Instance %d: Joining PubSub topic '%s' for sending.\n", ni.instanceIndex, goChannel)
|
|
2203
|
+
topic, err = instancePubsub.Join(goChannel) // ps is instancePubsub
|
|
2204
|
+
if err != nil {
|
|
2205
|
+
ni.pubsubMutex.Unlock()
|
|
2206
|
+
// Failed to join PubSub topic
|
|
2207
|
+
return jsonErrorResponse(fmt.Sprintf("Failed to join PubSub topic '%s'", goChannel), err)
|
|
2208
|
+
}
|
|
2209
|
+
ni.topics[goChannel] = topic
|
|
2210
|
+
logger.Debugf("[GO] â
Instance %d: Joined PubSub topic: %s for publishing.\n", ni.instanceIndex, goChannel)
|
|
2211
|
+
}
|
|
2212
|
+
ni.pubsubMutex.Unlock()
|
|
2213
|
+
|
|
2214
|
+
// Directly publish the raw Protobuf payload.
|
|
2215
|
+
if err := topic.Publish(ni.ctx, goData); err != nil {
|
|
2216
|
+
// Failed to publish to topic
|
|
2217
|
+
return jsonErrorResponse(fmt.Sprintf("Failed to publish to topic '%s'", goChannel), err)
|
|
2218
|
+
}
|
|
2219
|
+
logger.Infof("[GO] đ Instance %d: Broadcast to topic '%s' (%d bytes)\n", ni.instanceIndex, goChannel, len(goData))
|
|
2220
|
+
return jsonSuccessResponse(fmt.Sprintf("Message broadcast to topic %s", goChannel))
|
|
2221
|
+
|
|
2222
|
+
} else if strings.Contains(goChannel, "::dm:") {
|
|
2223
|
+
// --- Direct Peer-to-Peer Message Sending (Persistent Stream Logic) ---
|
|
2224
|
+
receiverChannelIDStr := strings.Split(goChannel, "::dm:")[1] // Extract the receiver's channel ID from the format "dm:<peerID>-<channelSpecifier>"
|
|
2225
|
+
peerIDStr := strings.Split(receiverChannelIDStr, "-")[0]
|
|
2226
|
+
pid, err := peer.Decode(peerIDStr)
|
|
2227
|
+
if err != nil {
|
|
2228
|
+
// Invalid peer ID format
|
|
2229
|
+
return jsonErrorResponse("Invalid peer ID format in channel string", err)
|
|
2230
|
+
}
|
|
2231
|
+
|
|
2232
|
+
if pid == ni.host.ID() {
|
|
2233
|
+
// Attempt to send direct message to self
|
|
2234
|
+
return jsonErrorResponse("Attempt to send direct message to self is invalid", nil)
|
|
2235
|
+
}
|
|
2236
|
+
|
|
2237
|
+
ni.streamsMutex.Lock()
|
|
2238
|
+
stream, streamExists := ni.persistentChatStreams[pid]
|
|
2239
|
+
ni.streamsMutex.Unlock()
|
|
2240
|
+
|
|
2241
|
+
// If stream exists, try writing to it
|
|
2242
|
+
if streamExists {
|
|
2243
|
+
logger.Debugf("[GO] âŗ Instance %d: Reusing stream %s to %s\n", ni.instanceIndex, stream.ID(), pid)
|
|
2244
|
+
err = writeDirectMessageFrame(stream, goChannel, goData)
|
|
2245
|
+
if err == nil {
|
|
2246
|
+
logger.Infof("[GO] đ¤ Instance %d: Sent to %s via Stream %s (Reused)\n", ni.instanceIndex, pid, stream.ID())
|
|
2247
|
+
return jsonSuccessResponse(fmt.Sprintf("Direct message sent to %s (reused stream).", pid))
|
|
2248
|
+
}
|
|
2249
|
+
|
|
2250
|
+
// Write failed? Now we lock to remove the broken stream.
|
|
2251
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Write failed on Stream %s to %s: %v. Removing.\n", ni.instanceIndex, stream.ID(), pid, err)
|
|
2252
|
+
ni.streamsMutex.Lock()
|
|
2253
|
+
// Check if the stream in the map is still the broken one before deleting
|
|
2254
|
+
if s, ok := ni.persistentChatStreams[pid]; ok && s == stream {
|
|
2255
|
+
delete(ni.persistentChatStreams, pid)
|
|
2256
|
+
}
|
|
2257
|
+
ni.streamsMutex.Unlock()
|
|
2258
|
+
_ = stream.Close() // Close the broken stream
|
|
2259
|
+
return jsonErrorResponse(fmt.Sprintf("Failed to write to stream %s (closed).", pid), err)
|
|
2260
|
+
} else {
|
|
2261
|
+
// Stream does not exist, need to create a new one
|
|
2262
|
+
logger.Debugf("[GO] âŗ Instance %d: Creating NEW stream to %s...\n", ni.instanceIndex, pid)
|
|
2263
|
+
streamCtx, cancel := context.WithTimeout(ni.ctx, 20*time.Second)
|
|
2264
|
+
defer cancel()
|
|
2265
|
+
|
|
2266
|
+
newStream, err := ni.host.NewStream(
|
|
2267
|
+
network.WithAllowLimitedConn(streamCtx, UnaiverseChatProtocol),
|
|
2268
|
+
pid,
|
|
2269
|
+
UnaiverseChatProtocol,
|
|
2270
|
+
)
|
|
2271
|
+
|
|
2272
|
+
if err != nil {
|
|
2273
|
+
return jsonErrorResponse(fmt.Sprintf("Failed to open new stream to %s.", pid), err)
|
|
2274
|
+
}
|
|
2275
|
+
|
|
2276
|
+
// --- RACE CONDITION HANDLING ---
|
|
2277
|
+
// Double-check if another goroutine created a stream while we were unlocked
|
|
2278
|
+
ni.streamsMutex.Lock()
|
|
2279
|
+
existingStream, existsNow := ni.persistentChatStreams[pid]
|
|
2280
|
+
if existsNow {
|
|
2281
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Race detected. Using existing stream %s, closing our new %s.\n", ni.instanceIndex, existingStream.ID(), newStream.ID())
|
|
2282
|
+
_ = newStream.Close() // Close the redundant stream we just created.
|
|
2283
|
+
stream = existingStream
|
|
2284
|
+
} else {
|
|
2285
|
+
logger.Debugf("[GO] â
Instance %d: Opened and stored new persistent stream %s to %s\n", ni.instanceIndex, newStream.ID(), pid)
|
|
2286
|
+
ni.persistentChatStreams[pid] = newStream
|
|
2287
|
+
stream = newStream
|
|
2288
|
+
go handleStream(ni, newStream)
|
|
2289
|
+
}
|
|
2290
|
+
ni.streamsMutex.Unlock()
|
|
2291
|
+
|
|
2292
|
+
// --- Write message to the determined stream ---
|
|
2293
|
+
err = writeDirectMessageFrame(stream, goChannel, goData)
|
|
2294
|
+
if err != nil {
|
|
2295
|
+
logger.Errorf("[GO] â Instance %d: Write failed on NEW stream %s to %s: %v.\n", ni.instanceIndex, stream.ID(), pid, err)
|
|
2296
|
+
_ = stream.Close()
|
|
2297
|
+
ni.streamsMutex.Lock()
|
|
2298
|
+
if s, ok := ni.persistentChatStreams[pid]; ok && s == stream {
|
|
2299
|
+
delete(ni.persistentChatStreams, pid)
|
|
2300
|
+
}
|
|
2301
|
+
ni.streamsMutex.Unlock()
|
|
2302
|
+
return jsonErrorResponse(fmt.Sprintf("Failed to write to new stream to '%s' (needs reconnect).", pid), err)
|
|
2303
|
+
}
|
|
2304
|
+
|
|
2305
|
+
logger.Infof("[GO] đ¤ Instance %d: Sent to %s via Stream %s (New)\n", ni.instanceIndex, pid, stream.ID())
|
|
2306
|
+
return jsonSuccessResponse(fmt.Sprintf("Direct message sent to %s (new stream).", pid))
|
|
2307
|
+
}
|
|
2308
|
+
} else {
|
|
2309
|
+
// Invalid channel format
|
|
2310
|
+
return jsonErrorResponse(fmt.Sprintf("Invalid channel format '%s'", goChannel), nil)
|
|
2311
|
+
}
|
|
2312
|
+
}
|
|
2313
|
+
|
|
2314
|
+
// SubscribeToTopic joins a PubSub topic and starts listening for messages for a specific instance.
|
|
2315
|
+
// Parameters:
|
|
2316
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2317
|
+
// - channelC (*C.char): The Channel associated to the topic to subscribe to.
|
|
2318
|
+
//
|
|
2319
|
+
// Returns:
|
|
2320
|
+
// - *C.char: A JSON string indicating success or failure.
|
|
2321
|
+
// Structure: `{"state":"Success", "message":"Subscribed to topic ..."}` or `{"state":"Error", "message":"..."}`.
|
|
2322
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
2323
|
+
//
|
|
2324
|
+
//export SubscribeToTopic
|
|
2325
|
+
func SubscribeToTopic(
|
|
2326
|
+
instanceIndexC C.int,
|
|
2327
|
+
channelC *C.char,
|
|
2328
|
+
) *C.char {
|
|
2329
|
+
|
|
2330
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2331
|
+
if err != nil {
|
|
2332
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
2333
|
+
}
|
|
2334
|
+
|
|
2335
|
+
// Convert C string input to Go string.
|
|
2336
|
+
channel := C.GoString(channelC)
|
|
2337
|
+
logger.Debugf("[GO] <sub> Instance %d: Attempting to subscribe to topic: %s\n", ni.instanceIndex, channel)
|
|
2338
|
+
|
|
2339
|
+
// Get instance-specific state and mutex
|
|
2340
|
+
instancePubsub := ni.pubsub
|
|
2341
|
+
if ni.host == nil || instancePubsub == nil {
|
|
2342
|
+
return jsonErrorResponse(
|
|
2343
|
+
fmt.Sprintf("Instance %d: Host or PubSub not initialized", ni.instanceIndex), nil,
|
|
2344
|
+
)
|
|
2345
|
+
}
|
|
2346
|
+
|
|
2347
|
+
// Lock the mutex for safe access to the shared topics and subscriptions maps for this instance.
|
|
2348
|
+
ni.pubsubMutex.Lock()
|
|
2349
|
+
defer ni.pubsubMutex.Unlock() // Ensure mutex is unlocked when function returns.
|
|
2350
|
+
|
|
2351
|
+
// Check if already subscribed to this topic for this instance.
|
|
2352
|
+
if _, exists := ni.subscriptions[channel]; exists {
|
|
2353
|
+
logger.Debugf("[GO] <sub> Instance %d: Already subscribed to topic: %s\n", ni.instanceIndex, channel)
|
|
2354
|
+
// Return success, indicating the desired state is already met.
|
|
2355
|
+
return jsonSuccessResponse(
|
|
2356
|
+
fmt.Sprintf("Instance %d: Already subscribed to topic %s", ni.instanceIndex, channel),
|
|
2357
|
+
) // Caller frees.
|
|
2358
|
+
}
|
|
2359
|
+
|
|
2360
|
+
// If the channel ends with ":rv", it indicates a rendezvous topic, so we remove other ones
|
|
2361
|
+
// from the instanceTopics and instanceSubscriptions list, and we clean the rendezvousDiscoveredPeersInstances.
|
|
2362
|
+
if strings.HasSuffix(channel, ":rv") {
|
|
2363
|
+
logger.Debugf(" - Instance %d: Joining rendezvous topic '%s'. Cleaning up previous rendezvous state.\n", ni.instanceIndex, channel)
|
|
2364
|
+
// Remove all existing rendezvous topics and subscriptions for this instance.
|
|
2365
|
+
for existingChannel := range ni.topics {
|
|
2366
|
+
if strings.HasSuffix(existingChannel, ":rv") {
|
|
2367
|
+
logger.Debugf(" - Instance %d: Removing existing rendezvous topic '%s' from instance state.\n", ni.instanceIndex, existingChannel)
|
|
2368
|
+
|
|
2369
|
+
// Close the topic handle if it exists.
|
|
2370
|
+
if topic, exists := ni.topics[existingChannel]; exists {
|
|
2371
|
+
if err := topic.Close(); err != nil {
|
|
2372
|
+
logger.Warnf("â ī¸ Instance %d: Error closing topic handle for '%s': %v (proceeding with map cleanup)\n", ni.instanceIndex, existingChannel, err)
|
|
2373
|
+
}
|
|
2374
|
+
delete(ni.topics, existingChannel)
|
|
2375
|
+
}
|
|
2376
|
+
|
|
2377
|
+
// Remove the subscription if it exists.
|
|
2378
|
+
if sub, exists := ni.subscriptions[existingChannel]; exists {
|
|
2379
|
+
sub.Cancel() // Cancel the subscription
|
|
2380
|
+
delete(ni.subscriptions, existingChannel) // Remove from map
|
|
2381
|
+
}
|
|
2382
|
+
|
|
2383
|
+
// Also clean up rendezvous discovered peers for this instance.
|
|
2384
|
+
logger.Debugf(" - Instance %d: Resetting rendezvous state for new topic '%s'.\n", ni.instanceIndex, channel)
|
|
2385
|
+
ni.rendezvousMutex.Lock()
|
|
2386
|
+
ni.rendezvousState = nil
|
|
2387
|
+
ni.rendezvousMutex.Unlock()
|
|
2388
|
+
}
|
|
2389
|
+
}
|
|
2390
|
+
logger.Debugf(" - Instance %d: Cleaned up previous rendezvous state.\n", ni.instanceIndex)
|
|
2391
|
+
}
|
|
2392
|
+
|
|
2393
|
+
// --- Join the Topic ---
|
|
2394
|
+
// Get a handle for the topic. `Join` creates the topic if it doesn't exist locally
|
|
2395
|
+
// and returns a handle. It's safe to call Join multiple times; it's idempotent.
|
|
2396
|
+
// We store the handle primarily for potential future publishing from this node.
|
|
2397
|
+
topic, err := instancePubsub.Join(channel)
|
|
2398
|
+
if err != nil {
|
|
2399
|
+
errMsg := fmt.Sprintf("Instance %d: Failed to join topic '%s'", ni.instanceIndex, channel)
|
|
2400
|
+
return jsonErrorResponse(errMsg, err) // Caller frees.
|
|
2401
|
+
}
|
|
2402
|
+
// Store the topic handle in the map for this instance.
|
|
2403
|
+
ni.topics[channel] = topic
|
|
2404
|
+
logger.Debugf("[GO] - Instance %d: Obtained topic handle for: %s\n", ni.instanceIndex, channel)
|
|
2405
|
+
|
|
2406
|
+
// --- Subscribe to the Topic ---
|
|
2407
|
+
// Create an actual subscription to receive messages from the topic.
|
|
2408
|
+
sub, err := topic.Subscribe()
|
|
2409
|
+
if err != nil {
|
|
2410
|
+
// Close the newly created topic handle.
|
|
2411
|
+
err := topic.Close()
|
|
2412
|
+
if err != nil {
|
|
2413
|
+
// Log error but proceed with cleanup.
|
|
2414
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing topic handle for '%s': %v (proceeding with map cleanup)\n", ni.instanceIndex, channel, err)
|
|
2415
|
+
}
|
|
2416
|
+
// Remove the topic handle from our local map for this instance.
|
|
2417
|
+
delete(ni.topics, channel)
|
|
2418
|
+
errMsg := fmt.Sprintf("Instance %d: Failed to subscribe to topic '%s' after joining", ni.instanceIndex, channel)
|
|
2419
|
+
return jsonErrorResponse(errMsg, err) // Caller frees.
|
|
2420
|
+
}
|
|
2421
|
+
// Store the subscription object in the map for this instance.
|
|
2422
|
+
ni.subscriptions[channel] = sub
|
|
2423
|
+
logger.Debugf("[GO] - Instance %d: Created subscription object for: %s\n", ni.instanceIndex, channel)
|
|
2424
|
+
|
|
2425
|
+
// --- Start Listener Goroutine ---
|
|
2426
|
+
// Launch a background goroutine that will continuously read messages
|
|
2427
|
+
// from this new subscription and add them to the message buffer for this instance.
|
|
2428
|
+
// Pass the instance index, subscription object, and topic name (for logging).
|
|
2429
|
+
go readFromSubscription(ni, sub)
|
|
2430
|
+
|
|
2431
|
+
logger.Debugf("[GO] â
Instance %d: Subscribed successfully to topic: %s and started listener.\n", ni.instanceIndex, channel)
|
|
2432
|
+
return jsonSuccessResponse(
|
|
2433
|
+
fmt.Sprintf("Instance %d: Subscribed to topic %s", ni.instanceIndex, channel),
|
|
2434
|
+
) // Caller frees.
|
|
2435
|
+
}
|
|
2436
|
+
|
|
2437
|
+
// UnsubscribeFromTopic cancels an active PubSub subscription and cleans up related resources for a specific instance.
|
|
2438
|
+
// Parameters:
|
|
2439
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2440
|
+
// - channelC (*C.char): The Channel associated to the topic to unsubscribe from.
|
|
2441
|
+
//
|
|
2442
|
+
// Returns:
|
|
2443
|
+
// - *C.char: A JSON string indicating success or failure.
|
|
2444
|
+
// Structure: `{"state":"Success", "message":"Unsubscribed from topic ..."}` or `{"state":"Error", "message":"..."}`.
|
|
2445
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
2446
|
+
//
|
|
2447
|
+
//export UnsubscribeFromTopic
|
|
2448
|
+
func UnsubscribeFromTopic(
|
|
2449
|
+
instanceIndexC C.int,
|
|
2450
|
+
channelC *C.char,
|
|
2451
|
+
) *C.char {
|
|
2452
|
+
|
|
2453
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2454
|
+
if err != nil {
|
|
2455
|
+
// If instance is already gone, we can consider it "unsubscribed"
|
|
2456
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Unsubscribe called but instance is not ready: %v\n", ni.instanceIndex, err)
|
|
2457
|
+
return jsonSuccessResponse(fmt.Sprintf("Instance %d: Not subscribed (instance not running)", ni.instanceIndex))
|
|
2458
|
+
}
|
|
2459
|
+
|
|
2460
|
+
// Convert C string input to Go string.
|
|
2461
|
+
channel := C.GoString(channelC)
|
|
2462
|
+
logger.Debugf("[GO] </sub> Instance %d: Attempting to unsubscribe from topic: %s\n", ni.instanceIndex, channel)
|
|
2463
|
+
|
|
2464
|
+
// Lock the mutex for write access to shared maps for this instance.
|
|
2465
|
+
ni.pubsubMutex.Lock()
|
|
2466
|
+
defer ni.pubsubMutex.Unlock()
|
|
2467
|
+
|
|
2468
|
+
// --- Cancel the Subscription ---
|
|
2469
|
+
// Find the subscription object in the map for this instance.
|
|
2470
|
+
sub, subExists := ni.subscriptions[channel]
|
|
2471
|
+
if !subExists {
|
|
2472
|
+
logger.Warnf("[GO] </sub> Instance %d: Not currently subscribed to topic: %s (or already unsubscribed)\n", ni.instanceIndex, channel)
|
|
2473
|
+
// Also remove potential stale topic handle if subscription is gone.
|
|
2474
|
+
delete(ni.topics, channel)
|
|
2475
|
+
return jsonSuccessResponse(
|
|
2476
|
+
fmt.Sprintf("Instance %d: Not currently subscribed to topic %s", ni.instanceIndex, channel),
|
|
2477
|
+
) // Caller frees.
|
|
2478
|
+
}
|
|
2479
|
+
|
|
2480
|
+
// Cancel the subscription. This signals the associated `readFromSubscription` goroutine
|
|
2481
|
+
// (waiting on `sub.Next()`) to stop by causing `sub.Next()` to return an error (usually `ErrSubscriptionCancelled`).
|
|
2482
|
+
// It also cleans up internal PubSub resources related to this subscription.
|
|
2483
|
+
sub.Cancel()
|
|
2484
|
+
// Remove the subscription entry from our local map for this instance.
|
|
2485
|
+
delete(ni.subscriptions, channel)
|
|
2486
|
+
logger.Debugf("[GO] - Instance %d: Cancelled subscription object for topic: %s\n", ni.instanceIndex, channel)
|
|
2487
|
+
|
|
2488
|
+
// --- Close the Topic Handle ---
|
|
2489
|
+
// Find the corresponding topic handle for this instance. It's good practice to close this as well,
|
|
2490
|
+
// although PubSub might manage its lifecycle internally based on subscriptions.
|
|
2491
|
+
// Explicit closing ensures resources related to the *handle* (like internal routing state) are released.
|
|
2492
|
+
topic, topicExists := ni.topics[channel]
|
|
2493
|
+
if topicExists {
|
|
2494
|
+
logger.Debugf("[GO] - Instance %d: Closing topic handle for: %s\n", ni.instanceIndex, channel)
|
|
2495
|
+
// Close the topic handle.
|
|
2496
|
+
err := topic.Close()
|
|
2497
|
+
if err != nil {
|
|
2498
|
+
// Log error but proceed with cleanup.
|
|
2499
|
+
logger.Warnf("[GO] â ī¸ Instance %d: Error closing topic handle for '%s': %v (proceeding with map cleanup)\n", ni.instanceIndex, channel, err)
|
|
2500
|
+
}
|
|
2501
|
+
// Remove the topic handle from our local map for this instance.
|
|
2502
|
+
delete(ni.topics, channel)
|
|
2503
|
+
logger.Debugf("[GO] - Instance %d: Removed topic handle from local map for topic: %s\n", ni.instanceIndex, channel)
|
|
2504
|
+
} else {
|
|
2505
|
+
logger.Debugf("[GO] - Instance %d: No topic handle found in local map for '%s' to close (already removed or possibly never stored?).\n", ni.instanceIndex, channel)
|
|
2506
|
+
// Ensure removal from map even if handle wasn't found (e.g., inconsistent state).
|
|
2507
|
+
delete(ni.topics, channel)
|
|
2508
|
+
}
|
|
2509
|
+
|
|
2510
|
+
// If the channel ends with ":rv", it indicates a rendezvous topic, so we have closed the topic and the sub
|
|
2511
|
+
// but we also need to clean the rendezvousDiscoveredPeersInstances.
|
|
2512
|
+
if strings.HasSuffix(channel, ":rv") {
|
|
2513
|
+
logger.Debugf(" - Instance %d: Unsubscribing from rendezvous topic. Clearing state.\n", ni.instanceIndex)
|
|
2514
|
+
ni.rendezvousMutex.Lock()
|
|
2515
|
+
ni.rendezvousState = nil
|
|
2516
|
+
ni.rendezvousMutex.Unlock()
|
|
2517
|
+
}
|
|
2518
|
+
logger.Debugf("[GO] - Instance %d: Cleaned up previous rendezvous state.\n", ni.instanceIndex)
|
|
2519
|
+
|
|
2520
|
+
logger.Infof("[GO] â
Instance %d: Unsubscribed successfully from topic: %s\n", ni.instanceIndex, channel)
|
|
2521
|
+
return jsonSuccessResponse(
|
|
2522
|
+
fmt.Sprintf("Instance %d: Unsubscribed from topic %s", ni.instanceIndex, channel),
|
|
2523
|
+
) // Caller frees.
|
|
2524
|
+
}
|
|
2525
|
+
|
|
2526
|
+
// MessageQueueLength returns the total number of messages waiting across all channel queues for a specific instance.
|
|
2527
|
+
// Parameters:
|
|
2528
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2529
|
+
//
|
|
2530
|
+
// Returns:
|
|
2531
|
+
// - C.int: The total number of messages. Returns -1 if instance index is invalid.
|
|
2532
|
+
//
|
|
2533
|
+
//export MessageQueueLength
|
|
2534
|
+
func MessageQueueLength(
|
|
2535
|
+
instanceIndexC C.int,
|
|
2536
|
+
) C.int {
|
|
2537
|
+
|
|
2538
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2539
|
+
if err != nil {
|
|
2540
|
+
logger.Errorf("[GO] â MessageQueueLength: %v\n", err)
|
|
2541
|
+
return -1 // Return -1 if instance isn't valid
|
|
2542
|
+
}
|
|
2543
|
+
|
|
2544
|
+
// Get the message store for this instance
|
|
2545
|
+
store := ni.messageStore
|
|
2546
|
+
if store == nil {
|
|
2547
|
+
logger.Errorf("[GO] â Instance %d: Message store not initialized.\n", ni.instanceIndex)
|
|
2548
|
+
return 0 // Return 0 if store is nil (effectively empty)
|
|
2549
|
+
}
|
|
2550
|
+
|
|
2551
|
+
store.mu.Lock()
|
|
2552
|
+
defer store.mu.Unlock()
|
|
2553
|
+
|
|
2554
|
+
totalLength := 0
|
|
2555
|
+
// TODO: this makes sense but not for the check we are doing from python, think about it
|
|
2556
|
+
for _, messageList := range store.messagesByChannel {
|
|
2557
|
+
totalLength += messageList.Len()
|
|
2558
|
+
}
|
|
2559
|
+
|
|
2560
|
+
return C.int(totalLength)
|
|
2561
|
+
}
|
|
2562
|
+
|
|
2563
|
+
// PopMessages retrieves the oldest message from each channel's queue for a specific instance.
|
|
2564
|
+
// This function always pops one message per channel that has messages.
|
|
2565
|
+
// Parameters:
|
|
2566
|
+
// - instanceIndexC (C.int): The index of the node instance.
|
|
2567
|
+
//
|
|
2568
|
+
// Returns:
|
|
2569
|
+
// - *C.char: A JSON string representing a list of the popped messages.
|
|
2570
|
+
// Returns `{"state":"Empty"}` if no messages were available in any queue.
|
|
2571
|
+
// Returns `{"state":"Error", "message":"..."}` on failure.
|
|
2572
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
2573
|
+
//
|
|
2574
|
+
//export PopMessages
|
|
2575
|
+
func PopMessages(
|
|
2576
|
+
instanceIndexC C.int,
|
|
2577
|
+
) *C.char {
|
|
2578
|
+
|
|
2579
|
+
ni, err := getInstance(int(instanceIndexC))
|
|
2580
|
+
if err != nil {
|
|
2581
|
+
return jsonErrorResponse("Invalid instance", err)
|
|
2582
|
+
}
|
|
2583
|
+
|
|
2584
|
+
// Get the message store for this instance
|
|
2585
|
+
store := ni.messageStore
|
|
2586
|
+
if store == nil {
|
|
2587
|
+
logger.Errorf("[GO] â Instance %d: PopMessages: Message store not initialized.\n", ni.instanceIndex)
|
|
2588
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Message store not initialized", ni.instanceIndex), nil)
|
|
2589
|
+
}
|
|
2590
|
+
|
|
2591
|
+
store.mu.Lock() // Lock for the entire operation
|
|
2592
|
+
defer store.mu.Unlock()
|
|
2593
|
+
|
|
2594
|
+
if len(store.messagesByChannel) == 0 {
|
|
2595
|
+
return C.CString(`{"state":"Empty"}`)
|
|
2596
|
+
}
|
|
2597
|
+
|
|
2598
|
+
// Create a slice to hold the popped messages. Capacity is the number of channels.
|
|
2599
|
+
var poppedMessages []*QueuedMessage
|
|
2600
|
+
for channel, messageList := range store.messagesByChannel {
|
|
2601
|
+
if messageList.Len() > 0 {
|
|
2602
|
+
element := messageList.Front()
|
|
2603
|
+
msg := element.Value.(*QueuedMessage)
|
|
2604
|
+
poppedMessages = append(poppedMessages, msg)
|
|
2605
|
+
messageList.Remove(element)
|
|
2606
|
+
}
|
|
2607
|
+
// if the queue is now empty, we can delete it from the map to save space
|
|
2608
|
+
if messageList.Len() == 0 {
|
|
2609
|
+
delete(store.messagesByChannel, channel)
|
|
2610
|
+
}
|
|
2611
|
+
}
|
|
2612
|
+
|
|
2613
|
+
// After iterating, check if we actually popped anything
|
|
2614
|
+
if len(poppedMessages) == 0 {
|
|
2615
|
+
return C.CString(`{"state":"Empty"}`)
|
|
2616
|
+
}
|
|
2617
|
+
|
|
2618
|
+
// Marshal the slice of popped messages into a JSON array.
|
|
2619
|
+
// We create a temporary structure for JSON marshalling to include the base64-encoded data.
|
|
2620
|
+
payloads := make([]map[string]interface{}, len(poppedMessages))
|
|
2621
|
+
for i, msg := range poppedMessages {
|
|
2622
|
+
payloads[i] = map[string]interface{}{
|
|
2623
|
+
"from": msg.From,
|
|
2624
|
+
"data": base64.StdEncoding.EncodeToString(msg.Data),
|
|
2625
|
+
}
|
|
2626
|
+
}
|
|
2627
|
+
|
|
2628
|
+
jsonBytes, err := json.Marshal(payloads)
|
|
2629
|
+
if err != nil {
|
|
2630
|
+
logger.Errorf("[GO] â Instance %d: PopMessages: Failed to marshal messages to JSON: %v\n", ni.instanceIndex, err)
|
|
2631
|
+
// Messages have already been popped from the queue at this point.
|
|
2632
|
+
// Returning an error is the best we can do.
|
|
2633
|
+
return jsonErrorResponse(
|
|
2634
|
+
fmt.Sprintf("Instance %d: Failed to marshal popped messages", ni.instanceIndex), err,
|
|
2635
|
+
)
|
|
2636
|
+
}
|
|
2637
|
+
|
|
2638
|
+
return C.CString(string(jsonBytes))
|
|
2639
|
+
}
|
|
2640
|
+
|
|
2641
|
+
// CloseNode gracefully shuts down the libp2p host, cancels subscriptions, closes connections,
|
|
2642
|
+
// and cleans up all associated resources.
|
|
2643
|
+
// Parameters:
|
|
2644
|
+
// - instanceIndexC (C.int): The index of the node instance. If -1, closes all initialized instances.
|
|
2645
|
+
//
|
|
2646
|
+
// Returns:
|
|
2647
|
+
// - *C.char: A JSON string indicating the result of the closure attempt.
|
|
2648
|
+
// Structure: `{"state":"Success", "message":"Node closed successfully"}` or `{"state":"Error", "message":"Error closing host: ..."}`.
|
|
2649
|
+
// If closing all, the message will summarize the results.
|
|
2650
|
+
// - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
|
|
2651
|
+
//
|
|
2652
|
+
//export CloseNode
|
|
2653
|
+
func CloseNode(
|
|
2654
|
+
instanceIndexC C.int,
|
|
2655
|
+
) *C.char {
|
|
2656
|
+
|
|
2657
|
+
instanceIndex := int(instanceIndexC)
|
|
2658
|
+
|
|
2659
|
+
if instanceIndex == -1 {
|
|
2660
|
+
logger.Debugf("[GO] đ Closing all initialized instances of this node...")
|
|
2661
|
+
successCount := 0
|
|
2662
|
+
errorCount := 0
|
|
2663
|
+
var errorMessages []string
|
|
2664
|
+
|
|
2665
|
+
// acquire the global lock
|
|
2666
|
+
globalInstanceMutex.Lock()
|
|
2667
|
+
defer globalInstanceMutex.Unlock()
|
|
2668
|
+
|
|
2669
|
+
for i, ni := range allInstances {
|
|
2670
|
+
if ni != nil {
|
|
2671
|
+
logger.Debugf("[GO] đ Attempting to close instance %d...\n", i)
|
|
2672
|
+
|
|
2673
|
+
err := ni.Close() // Call the new method
|
|
2674
|
+
allInstances[i] = nil // Remove from slice
|
|
2675
|
+
|
|
2676
|
+
if err != nil {
|
|
2677
|
+
errorCount++
|
|
2678
|
+
errorMessages = append(errorMessages, fmt.Sprintf("Instance %d: %v", i, err))
|
|
2679
|
+
logger.Errorf("[GO] â Instance %d: Close failed: %v\n", i, err)
|
|
2680
|
+
} else {
|
|
2681
|
+
successCount++
|
|
2682
|
+
logger.Debugf("[GO] â
Instance %d: Closed successfully.\n", i)
|
|
2683
|
+
}
|
|
2684
|
+
}
|
|
2685
|
+
}
|
|
2686
|
+
|
|
2687
|
+
summaryMsg := fmt.Sprintf("Closed %d nodes successfully, %d failed.", successCount, errorCount)
|
|
2688
|
+
if errorCount > 0 {
|
|
2689
|
+
logger.Errorf("[GO] â Errors encountered during batch close:\n")
|
|
2690
|
+
for _, msg := range errorMessages {
|
|
2691
|
+
logger.Errorf(msg)
|
|
2692
|
+
}
|
|
2693
|
+
return jsonErrorResponse(summaryMsg, fmt.Errorf("details: %v", errorMessages))
|
|
2694
|
+
}
|
|
2695
|
+
|
|
2696
|
+
logger.Infof("[GO] đ All initialized nodes closed.")
|
|
2697
|
+
return jsonSuccessResponse(summaryMsg)
|
|
2698
|
+
|
|
2699
|
+
} else {
|
|
2700
|
+
if instanceIndex < 0 || instanceIndex >= maxInstances {
|
|
2701
|
+
err := fmt.Errorf("invalid instance index: %d. Must be between 0 and %d", instanceIndex, maxInstances-1)
|
|
2702
|
+
return jsonErrorResponse("Invalid instance index for single close", err) // Caller frees.
|
|
2703
|
+
}
|
|
2704
|
+
|
|
2705
|
+
globalInstanceMutex.Lock()
|
|
2706
|
+
defer globalInstanceMutex.Unlock()
|
|
2707
|
+
|
|
2708
|
+
instance := allInstances[instanceIndex]
|
|
2709
|
+
if instance == nil {
|
|
2710
|
+
logger.Debugf("[GO] âšī¸ Instance %d: Node was already closed.\n", instanceIndex)
|
|
2711
|
+
return jsonSuccessResponse(fmt.Sprintf("Instance %d: Node was already closed", instanceIndex))
|
|
2712
|
+
}
|
|
2713
|
+
|
|
2714
|
+
err := instance.Close()
|
|
2715
|
+
allInstances[instanceIndex] = nil
|
|
2716
|
+
|
|
2717
|
+
if err != nil {
|
|
2718
|
+
return jsonErrorResponse(fmt.Sprintf("Instance %d: Error closing host", instanceIndex), err)
|
|
2719
|
+
}
|
|
2720
|
+
|
|
2721
|
+
logger.Infof("[GO] đ Instance %d: Node closed successfully.\n", instanceIndex)
|
|
2722
|
+
return jsonSuccessResponse(fmt.Sprintf("Instance %d: Node closed successfully", instanceIndex))
|
|
2723
|
+
}
|
|
2724
|
+
}
|
|
2725
|
+
|
|
2726
|
+
// FreeString is called from the C/Python side to release the memory allocated by Go
|
|
2727
|
+
// when returning a `*C.char` (via `C.CString`).
|
|
2728
|
+
// Parameters:
|
|
2729
|
+
// - s (*C.char): The pointer to the C string previously returned by an exported Go function.
|
|
2730
|
+
//
|
|
2731
|
+
//export FreeString
|
|
2732
|
+
func FreeString(
|
|
2733
|
+
s *C.char,
|
|
2734
|
+
) {
|
|
2735
|
+
|
|
2736
|
+
// Check for NULL pointer before attempting to free.
|
|
2737
|
+
if s != nil {
|
|
2738
|
+
C.free(unsafe.Pointer(s)) // Use C.free via unsafe.Pointer to release the memory.
|
|
2739
|
+
}
|
|
2740
|
+
}
|
|
2741
|
+
|
|
2742
|
+
// FreeInt is provided for completeness but is generally **NOT** needed if Go functions
|
|
2743
|
+
// only return `C.int` (by value). It would only be necessary if a Go function manually
|
|
2744
|
+
// allocated memory for a C integer (`*C.int`) and returned the pointer, which is uncommon.
|
|
2745
|
+
// Parameters:
|
|
2746
|
+
// - i (*C.int): The pointer to the C integer previously allocated and returned by Go.
|
|
2747
|
+
//
|
|
2748
|
+
//export FreeInt
|
|
2749
|
+
func FreeInt(
|
|
2750
|
+
i *C.int,
|
|
2751
|
+
) {
|
|
2752
|
+
|
|
2753
|
+
// Check for NULL pointer.
|
|
2754
|
+
if i != nil {
|
|
2755
|
+
logger.Warnf("[GO] â ī¸ FreeInt called - Ensure a *C.int pointer was actually allocated and returned from Go (this is unusual).")
|
|
2756
|
+
C.free(unsafe.Pointer(i)) // Free the memory if it was indeed allocated.
|
|
2757
|
+
}
|
|
2758
|
+
}
|
|
2759
|
+
|
|
2760
|
+
// main is the entry point for a Go executable.
|
|
2761
|
+
func main() {
|
|
2762
|
+
// This message will typically only be seen if you run `go run lib.go`
|
|
2763
|
+
// or build and run as a standard executable, NOT when used as a shared library.
|
|
2764
|
+
logger.Debugf("[GO] libp2p Go library main function (not executed in c-shared library mode)")
|
|
2765
|
+
}
|