unaiverse 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unaiverse might be problematic. Click here for more details.

Files changed (45) hide show
  1. unaiverse/__init__.py +19 -0
  2. unaiverse/agent.py +2008 -0
  3. unaiverse/agent_basics.py +1844 -0
  4. unaiverse/clock.py +186 -0
  5. unaiverse/dataprops.py +1209 -0
  6. unaiverse/hsm.py +1880 -0
  7. unaiverse/modules/__init__.py +18 -0
  8. unaiverse/modules/cnu/__init__.py +17 -0
  9. unaiverse/modules/cnu/cnus.py +536 -0
  10. unaiverse/modules/cnu/layers.py +261 -0
  11. unaiverse/modules/cnu/psi.py +60 -0
  12. unaiverse/modules/hl/__init__.py +15 -0
  13. unaiverse/modules/hl/hl_utils.py +411 -0
  14. unaiverse/modules/networks.py +1509 -0
  15. unaiverse/modules/utils.py +680 -0
  16. unaiverse/networking/__init__.py +16 -0
  17. unaiverse/networking/node/__init__.py +18 -0
  18. unaiverse/networking/node/connpool.py +1265 -0
  19. unaiverse/networking/node/node.py +2203 -0
  20. unaiverse/networking/node/profile.py +446 -0
  21. unaiverse/networking/node/tokens.py +79 -0
  22. unaiverse/networking/p2p/__init__.py +259 -0
  23. unaiverse/networking/p2p/golibp2p.py +18 -0
  24. unaiverse/networking/p2p/golibp2p.pyi +135 -0
  25. unaiverse/networking/p2p/lib.go +2495 -0
  26. unaiverse/networking/p2p/lib_types.py +312 -0
  27. unaiverse/networking/p2p/message_pb2.py +63 -0
  28. unaiverse/networking/p2p/messages.py +265 -0
  29. unaiverse/networking/p2p/mylogger.py +77 -0
  30. unaiverse/networking/p2p/p2p.py +963 -0
  31. unaiverse/streamlib/__init__.py +15 -0
  32. unaiverse/streamlib/streamlib.py +210 -0
  33. unaiverse/streams.py +763 -0
  34. unaiverse/utils/__init__.py +16 -0
  35. unaiverse/utils/ask_lone_wolf.json +27 -0
  36. unaiverse/utils/lone_wolf.json +19 -0
  37. unaiverse/utils/misc.py +305 -0
  38. unaiverse/utils/sandbox.py +293 -0
  39. unaiverse/utils/server.py +435 -0
  40. unaiverse/world.py +175 -0
  41. unaiverse-0.1.0.dist-info/METADATA +363 -0
  42. unaiverse-0.1.0.dist-info/RECORD +45 -0
  43. unaiverse-0.1.0.dist-info/WHEEL +5 -0
  44. unaiverse-0.1.0.dist-info/licenses/LICENSE +43 -0
  45. unaiverse-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2495 @@
1
+ // lib.go
2
+ // This Go program compiles into a C shared library (.so file on Linux/macOS, .dll on Windows)
3
+ // exposing libp2p functionalities (host creation, peer connection, pubsub, direct messaging)
4
+ // for use by other languages, primarily Python via CFFI or ctypes.
5
+ package main
6
+
7
+ /*
8
+ #include <stdlib.h>
9
+ */
10
+ import "C" // Enables CGo features, allowing Go to call C code and vice-versa.
11
+
12
+ import (
13
+ // Standard Go libraries
14
+ "bytes" // For byte buffer manipulations (e.g., encoding/decoding, separators)
15
+ "container/list" // For an efficient ordered list (doubly-linked list for queues)
16
+ "context" // For managing cancellation signals and deadlines across API boundaries and goroutines
17
+ "encoding/base64" // For encoding binary message data into JSON-safe strings
18
+ "encoding/binary" // For encoding/decoding length prefixes in stream communication
19
+ "encoding/json" // For marshalling/unmarshalling data structures to/from JSON (used for C API communication)
20
+ "fmt" // For formatted string creation and printing
21
+ "io" // For input/output operations (e.g., reading from streams)
22
+ "log" // For logging information, warnings, and errors
23
+ "net" // For network-related errors and interfaces
24
+ "os" // For interacting with the operating system (e.g., Stdout)
25
+ "strings" // For string manipulations (e.g., trimming, splitting)
26
+ "sync" // For synchronization primitives like Mutexes and RWMutexes to protect shared data
27
+ "time" // For time-related functions (e.g., timeouts, timestamps)
28
+ "unsafe" // For using Go pointers with C code (specifically C.free)
29
+
30
+ // Core libp2p libraries
31
+ libp2p "github.com/libp2p/go-libp2p" // Main libp2p package for creating a host
32
+ dht "github.com/libp2p/go-libp2p-kad-dht"
33
+ "github.com/libp2p/go-libp2p/core/host" // Defines the main Host interface, representing a libp2p node
34
+ "github.com/libp2p/go-libp2p/core/network" // Defines network interfaces like Stream and Connection
35
+ "github.com/libp2p/go-libp2p/core/peer" // Defines Peer ID and AddrInfo types
36
+ "github.com/libp2p/go-libp2p/core/peerstore" // Defines the Peerstore interface for storing peer metadata (addresses, keys)
37
+ "github.com/libp2p/go-libp2p/core/routing"
38
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" // For managing resources (bandwidth, memory) for libp2p hosts
39
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client" // For establishing outbound relayed connections (acting as a client)
40
+ rc "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" // Import for relay service options
41
+ "github.com/libp2p/go-libp2p/core/event"
42
+
43
+ // transport protocols for libp2p
44
+ quic "github.com/libp2p/go-libp2p/p2p/transport/quic" // QUIC transport for peer-to-peer connections (e.g., for mobile devices)
45
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
46
+ webrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc" // WebRTC transport for peer-to-peer connections (e.g., for browsers or mobile devices)
47
+
48
+ // protobuf
49
+ "google.golang.org/protobuf/proto"
50
+ pg "unaiverse/networking/p2p/lib/proto-go"
51
+
52
+ // PubSub library
53
+ pubsub "github.com/libp2p/go-libp2p-pubsub" // GossipSub implementation for publish/subscribe messaging
54
+
55
+ // Multiaddr libraries (libp2p's addressing format)
56
+ ma "github.com/multiformats/go-multiaddr" // Core multiaddr parsing and manipulation
57
+ manet "github.com/multiformats/go-multiaddr/net" // Utilities for working with multiaddrs and net interfaces (checking loopback, etc.)
58
+ )
59
+
60
+ // ChatProtocol defines the protocol ID string used for direct peer-to-peer messaging streams.
61
+ // This ensures that both peers understand how to interpret the data on the stream.
62
+ const ChatProtocol = "/chat/1.0.0"
63
+
64
+ // ExtendedPeerInfo holds information about a connected peer.
65
+ type ExtendedPeerInfo struct {
66
+ ID peer.ID `json:"id"` // the Peer ID of the connected peer.
67
+ Addrs []ma.Multiaddr `json:"addrs"` // the Multiaddr(s) associated with the peer.
68
+ ConnectedAt time.Time `json:"connected_at"` // Timestamp when the connection was established.
69
+ Direction string `json:"direction"` // Direction of the connection: "inbound" or "outbound".
70
+ Misc int `json:"misc"` // Misc information (integer), custom usage
71
+ }
72
+
73
+ // RendezvousState holds the discovered peers from a rendezvous topic,
74
+ // along with metadata about the freshness of the data.
75
+ type RendezvousState struct {
76
+ Peers map[peer.ID]ExtendedPeerInfo `json:"peers"`
77
+ UpdateCount int64 `json:"update_count"`
78
+ }
79
+
80
+ // QueuedMessage represents a message received either directly or via PubSub.
81
+ //
82
+ // This lightweight version stores the binary payload in the `Data` field,
83
+ // while the `From` field contains the Peer ID of the sender for security reasons.
84
+ // It has to match with the 'sender' field in the ProtoBuf payload of the message.
85
+ type QueuedMessage struct {
86
+ From peer.ID `json:"from"` // The VERIFIED peer ID of the sender from the network layer.
87
+ Data []byte `json:"-"` // The raw data payload (Protobuf encoded).
88
+ }
89
+
90
+ // MessageStore holds the QueuedMessages for each channel in separate FIFO queues.
91
+ // It has a maximum number of channels and a maximum queue length per channel.
92
+ type MessageStore struct {
93
+ mu sync.Mutex // protects the message store from concurrent access.
94
+ messagesByChannel map[string]*list.List // stores a FIFO queue of messages for each channel
95
+ }
96
+
97
+ // CreateNodeResponse defines the structure of our success message.
98
+ type CreateNodeResponse struct {
99
+ Addresses []string `json:"addresses"`
100
+ IsPublic bool `json:"isPublic"`
101
+ }
102
+
103
+ // --- Multi-Instance State Management ---
104
+
105
+ var (
106
+ // Set the libp2p configuration parameters.
107
+ maxInstances int
108
+ maxChannelQueueLen int
109
+ maxUniqueChannels int
110
+ MaxMessageSize uint32
111
+
112
+ // Slices to hold state for each instance. Using arrays for fixed size.
113
+ hostInstances []host.Host
114
+ pubsubInstances []*pubsub.PubSub
115
+ contexts []context.Context
116
+ cancelContexts []context.CancelFunc
117
+ topicsInstances []map[string]*pubsub.Topic // map[instanceIndex]map[channel]*pubsub.Topic
118
+ subscriptionsInstances []map[string]*pubsub.Subscription // map[instanceIndex]map[channel]*pubsub.Subscription
119
+ connectedPeersInstances []map[peer.ID]ExtendedPeerInfo // map[instanceIndex]map[peerID]ExtendedPeerInfo
120
+ rendezvousDiscoveredPeersInstances []*RendezvousState // Slice of pointers to the state struct
121
+ persistentChatStreamsInstances []map[peer.ID]network.Stream // map[instanceIndex]map[peerID]network.Stream
122
+ messageStoreInstances []*MessageStore // map[instanceIndex]*MessageStore
123
+
124
+ // Mutexes for protecting concurrent access to instance-specific data.
125
+ connectedPeersMutexes []sync.RWMutex
126
+ persistentChatStreamsMutexes []sync.Mutex
127
+ pubsubMutexes []sync.RWMutex // Protects topicsInstances and subscriptionsInstances
128
+ rendezvousDiscoveredPeersMutexes []sync.RWMutex // Mutexes for protecting concurrent access to rendezvousDiscoveredPeersInstances.
129
+
130
+ // Global mutex to protect access to the instance state slices themselves
131
+ // (e.g., during initialization or checking if an instance exists).
132
+ // Use sparingly to avoid contention.
133
+ instanceStateMutex sync.RWMutex
134
+
135
+ // Flag to track if a specific instance index has been initialized
136
+ isInitialized []bool
137
+ )
138
+
139
+ // --- Helper Functions ---
140
+ // jsonErrorResponse creates a JSON string representing an error state.
141
+ // It takes a base message and an optional error, formats them, escapes the message
142
+ // for JSON embedding, and returns a C string pointer (`*C.char`).
143
+ // The caller (usually C/Python) is responsible for freeing this C string using FreeString.
144
+ func jsonErrorResponse(
145
+ message string,
146
+ err error,
147
+ ) *C.char {
148
+
149
+ errMsg := message
150
+ if err != nil {
151
+ errMsg = fmt.Sprintf("%s: %s", message, err.Error())
152
+ }
153
+ log.Printf("[GO] ❌ Error: %s", errMsg)
154
+ // Ensure error messages are escaped properly for JSON embedding
155
+ escapedErrMsg := escapeStringForJSON(errMsg)
156
+ // Format into a standard {"state": "Error", "message": "..."} JSON structure.
157
+ jsonError := fmt.Sprintf(`{"state":"Error","message":"%s"}`, escapedErrMsg)
158
+ // Convert the Go string to a C string (allocates memory in C heap).
159
+ return C.CString(jsonError)
160
+ }
161
+
162
+ // jsonSuccessResponse creates a JSON string representing a success state.
163
+ // It takes an arbitrary Go object (`message`), marshals it into JSON, wraps it
164
+ // in a standard {"state": "Success", "message": {...}} structure, and returns
165
+ // a C string pointer (`*C.char`).
166
+ // The caller (usually C/Python) is responsible for freeing this C string using FreeString.
167
+ func jsonSuccessResponse(
168
+ message interface{},
169
+ ) *C.char {
170
+
171
+ // Marshal the provided Go data structure into JSON bytes.
172
+ jsonData, err := json.Marshal(message)
173
+ if err != nil {
174
+ // If marshalling fails, return a JSON error response instead.
175
+ return jsonErrorResponse("Failed to marshal success response", err)
176
+ }
177
+ // Format into the standard success structure.
178
+ jsonSuccess := fmt.Sprintf(`{"state":"Success","message":%s}`, string(jsonData))
179
+ // Convert the Go string to a C string (allocates memory in C heap).
180
+ return C.CString(jsonSuccess)
181
+ }
182
+
183
+ // escapeStringForJSON performs basic escaping of characters (like double quotes and backslashes)
184
+ // within a string to ensure it's safe to embed within a JSON string value.
185
+ // It uses Go's standard JSON encoder for robust escaping.
186
+ func escapeStringForJSON(
187
+ s string,
188
+ ) string {
189
+
190
+ var buf bytes.Buffer
191
+ // Encode the string using Go's JSON encoder, which handles escaping.
192
+ json.NewEncoder(&buf).Encode(s)
193
+ // The encoder adds surrounding quotes and a trailing newline, which we remove.
194
+ res := buf.String()
195
+ // Check bounds before slicing to avoid panic.
196
+ if len(res) > 2 && res[0] == '"' && res[len(res)-2] == '"' {
197
+ return res[1 : len(res)-2] // Trim surrounding quotes and newline
198
+ }
199
+ // Fallback if encoding behaves unexpectedly (e.g., empty string).
200
+ return s
201
+ }
202
+
203
+ // newMessageStore initializes a new MessageStore.
204
+ func newMessageStore() *MessageStore {
205
+ return &MessageStore{
206
+ messagesByChannel: make(map[string]*list.List),
207
+ }
208
+ }
209
+
210
+ // checkInstanceIndex performs bounds checking on the provided instance index.
211
+ func checkInstanceIndex(
212
+ instanceIndex int,
213
+ ) error {
214
+
215
+ if instanceIndex < 0 || instanceIndex >= maxInstances {
216
+ return fmt.Errorf("invalid instance index: %d. Must be between 0 and %d", instanceIndex, maxInstances-1)
217
+ }
218
+ return nil
219
+ }
220
+
221
+ func cleanupFailedCreate(instanceIndex int) {
222
+ log.Printf("[GO] 🧹 Instance %d: Cleaning up after failed creation...", instanceIndex)
223
+ if hostInstances[instanceIndex] != nil { // Attempt cleanup before returning.
224
+ hostInstances[instanceIndex].Close()
225
+ hostInstances[instanceIndex] = nil
226
+ }
227
+ if cancelContexts[instanceIndex] != nil {
228
+ cancelContexts[instanceIndex]()
229
+ }
230
+ // Set all instance state to nil
231
+ pubsubInstances[instanceIndex] = nil
232
+ contexts[instanceIndex] = nil
233
+ cancelContexts[instanceIndex] = nil
234
+ topicsInstances[instanceIndex] = nil
235
+ subscriptionsInstances[instanceIndex] = nil
236
+ connectedPeersInstances[instanceIndex] = nil
237
+ rendezvousDiscoveredPeersInstances[instanceIndex] = nil
238
+ persistentChatStreamsInstances[instanceIndex] = nil
239
+ messageStoreInstances[instanceIndex] = nil
240
+ // Clear the mutexes for this instance
241
+ connectedPeersMutexes[instanceIndex] = sync.RWMutex{} // Reset to a new mutex
242
+ persistentChatStreamsMutexes[instanceIndex] = sync.Mutex{} // Reset to a new mutex
243
+ pubsubMutexes[instanceIndex] = sync.RWMutex{} // Reset to a new mutex
244
+ rendezvousDiscoveredPeersMutexes[instanceIndex] = sync.RWMutex{} // Reset to a new mutex
245
+ // Reset the isInitialized flag for this instance
246
+ instanceStateMutex.Lock()
247
+ isInitialized[instanceIndex] = false // Mark as uninitialized again
248
+ instanceStateMutex.Unlock()
249
+ }
250
+
251
+ func getListenAddrs(ipsJSON string, tcpPort int) ([]ma.Multiaddr, error) {
252
+ var ips []string
253
+
254
+ // --- Parse IPs from JSON ---
255
+ if ipsJSON == "" || ipsJSON == "[]" {
256
+ ips = []string{"0.0.0.0"} // Default if empty or not provided
257
+ } else {
258
+ if err := json.Unmarshal([]byte(ipsJSON), &ips); err != nil {
259
+ return nil, fmt.Errorf("failed to parse IPs JSON: %w", err)
260
+ }
261
+ if len(ips) == 0 { // Handle case of valid but empty JSON array "[]"
262
+ ips = []string{"0.0.0.0"}
263
+ }
264
+ }
265
+
266
+ var listenAddrs []ma.Multiaddr
267
+ quicPort := 0
268
+ webrtcPort := 0
269
+ if tcpPort != 0 {
270
+ quicPort = tcpPort + 1
271
+ webrtcPort = tcpPort + 2
272
+ }
273
+
274
+ // --- Create Multiaddrs for both protocols from the single IP list ---
275
+ for _, ip := range ips {
276
+ // Create TCP Multiaddr
277
+ tcpAddrStr := fmt.Sprintf("/ip4/%s/tcp/%d", ip, tcpPort)
278
+ tcpMaddr, err := ma.NewMultiaddr(tcpAddrStr)
279
+ if err != nil {
280
+ return nil, fmt.Errorf("failed to create TCP multiaddr for IP %s: %w", ip, err)
281
+ }
282
+ listenAddrs = append(listenAddrs, tcpMaddr)
283
+
284
+ // Create QUIC Multiaddr
285
+ quicAddrStr := fmt.Sprintf("/ip4/%s/udp/%d/quic-v1", ip, quicPort)
286
+ quicMaddr, err := ma.NewMultiaddr(quicAddrStr)
287
+ if err != nil {
288
+ return nil, fmt.Errorf("failed to create QUIC multiaddr for IP %s: %w", ip, err)
289
+ }
290
+ listenAddrs = append(listenAddrs, quicMaddr)
291
+
292
+ // Create WebRTC (UDP) Multiaddr
293
+ webrctAddrStr := fmt.Sprintf("/ip4/%s/udp/%d/webrtc-direct", ip, webrtcPort)
294
+ webrctMaddr, err := ma.NewMultiaddr(webrctAddrStr)
295
+ if err != nil {
296
+ return nil, fmt.Errorf("failed to create WebRTC multiaddr for IP %s: %w", ip, err)
297
+ }
298
+ listenAddrs = append(listenAddrs, webrctMaddr)
299
+ }
300
+
301
+ return listenAddrs, nil
302
+ }
303
+
304
+ func createResourceManager(maxConnections int) (network.ResourceManager, error) {
305
+ limits := rcmgr.DefaultLimits
306
+ libp2p.SetDefaultServiceLimits(&limits)
307
+ myLimits := rcmgr.PartialLimitConfig{
308
+ System: rcmgr.ResourceLimits{Conns: rcmgr.LimitVal(maxConnections)},
309
+ }
310
+ concreteLimits := myLimits.Build(limits.AutoScale())
311
+ return rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(concreteLimits))
312
+ }
313
+
314
+ func setupPubSub(instanceIndex int) error {
315
+ instanceCtx := contexts[instanceIndex]
316
+ instanceHost := hostInstances[instanceIndex]
317
+ psOptions := []pubsub.Option{
318
+ // pubsub.WithFloodPublish(true),
319
+ pubsub.WithMaxMessageSize(int(MaxMessageSize)),
320
+ }
321
+ ps, err := pubsub.NewGossipSub(instanceCtx, instanceHost, psOptions...)
322
+ if err != nil {
323
+ return err
324
+ }
325
+ pubsubInstances[instanceIndex] = ps
326
+ return nil
327
+ }
328
+
329
+ // setupNotifiers would contain the existing NotifierBundle logic
330
+ func setupNotifiers(instanceIndex int) {
331
+ hostInstances[instanceIndex].Network().Notify(&network.NotifyBundle{
332
+ ConnectedF: func(_ network.Network, conn network.Conn) {
333
+ log.Printf("[GO] 🔔 Instance %d: Event - Connected to %s (Direction: %s)\n", instanceIndex, conn.RemotePeer(), conn.Stat().Direction)
334
+
335
+ remotePeerID := conn.RemotePeer()
336
+ instanceHost := hostInstances[instanceIndex] // Available in CreateNode's scope
337
+
338
+ // --- 1. Gather all candidate addresses into a single slice ---
339
+ candidateAddrs := make([]ma.Multiaddr, 0)
340
+ candidateAddrs = append(candidateAddrs, conn.RemoteMultiaddr())
341
+ candidateAddrs = append(candidateAddrs, instanceHost.Peerstore().Addrs(remotePeerID)...)
342
+
343
+ // --- 2. Filter, format, and deduplicate in a single pass ---
344
+ finalPeerAddrs := make([]ma.Multiaddr, 0)
345
+ uniqueAddrStrings := make(map[string]struct{}) // Using an empty struct is more memory-efficient for a "set"
346
+
347
+ for _, addr := range candidateAddrs {
348
+ if addr == nil || manet.IsIPLoopback(addr) || manet.IsIPUnspecified(addr) {
349
+ continue
350
+ }
351
+
352
+ // Ensure the address is fully qualified with the peer's ID
353
+ var fullAddrStr string
354
+ if _, idInAddr := peer.SplitAddr(addr); idInAddr == "" {
355
+ fullAddrStr = fmt.Sprintf("%s/p2p/%s", addr.String(), remotePeerID.String())
356
+ } else {
357
+ fullAddrStr = addr.String()
358
+ }
359
+
360
+ // If we haven't seen this exact address string before, add it.
361
+ if _, exists := uniqueAddrStrings[fullAddrStr]; !exists {
362
+ maddr, err := ma.NewMultiaddr(fullAddrStr)
363
+ if err == nil {
364
+ finalPeerAddrs = append(finalPeerAddrs, maddr)
365
+ uniqueAddrStrings[fullAddrStr] = struct{}{}
366
+ }
367
+ }
368
+ }
369
+
370
+ if len(finalPeerAddrs) == 0 {
371
+ log.Printf("[GO] Instance %d: ConnectedF: Could not find any non-local addresses for %s immediately.\n", instanceIndex, remotePeerID)
372
+ }
373
+
374
+ // --- 3. Determine the direction ---
375
+ var directionString string
376
+ switch conn.Stat().Direction {
377
+ case network.DirInbound:
378
+ directionString = "incoming"
379
+ case network.DirOutbound:
380
+ directionString = "outgoing"
381
+ default:
382
+ directionString = "unknown"
383
+ }
384
+
385
+ // --- 4. Update the connected peers list ---
386
+ instanceConnectedPeersMutex := &connectedPeersMutexes[instanceIndex]
387
+ instanceConnectedPeers := connectedPeersInstances[instanceIndex]
388
+
389
+ instanceConnectedPeersMutex.Lock()
390
+ // It's possible this peer was already in the map if ConnectTo ran first,
391
+ // or if there were multiple connection events. Update generously.
392
+ if epi, exists := instanceConnectedPeers[remotePeerID]; exists {
393
+ epi.Addrs = finalPeerAddrs // Update with the new comprehensive list
394
+ epi.Direction = directionString
395
+ instanceConnectedPeers[remotePeerID] = epi
396
+ } else {
397
+ instanceConnectedPeers[remotePeerID] = ExtendedPeerInfo{
398
+ ID: remotePeerID,
399
+ Addrs: finalPeerAddrs,
400
+ ConnectedAt: time.Now(),
401
+ Direction: directionString,
402
+ Misc: 0,
403
+ }
404
+ }
405
+ instanceConnectedPeersMutex.Unlock()
406
+
407
+ log.Printf("[GO] Instance %d: Updated ConnectedPeers for %s via ConnectedF. Total addresses: %d. List: %v\n", instanceIndex, remotePeerID, len(finalPeerAddrs), finalPeerAddrs)
408
+ },
409
+ DisconnectedF: func(_ network.Network, conn network.Conn) {
410
+ log.Printf("[GO] 🔔 Instance %d: Event - Disconnected from %s\n", instanceIndex, conn.RemotePeer())
411
+ remotePeerID := conn.RemotePeer()
412
+
413
+ // Get the host for this instance to query its network state.
414
+ instanceHost := hostInstances[instanceIndex]
415
+ if instanceHost == nil {
416
+ // This shouldn't happen if the notifier is active, but a safe check.
417
+ log.Printf("[GO] ⚠️ Instance %d: DisconnectedF: Host is nil, cannot perform connection check.\n", instanceIndex)
418
+ return
419
+ }
420
+
421
+ // --- Check if this is the LAST connection to this peer ---
422
+ // libp2p can have multiple connections to a single peer (e.g., TCP, QUIC).
423
+ // We only want to consider the peer fully disconnected when ALL connections are gone.
424
+ if len(instanceHost.Network().ConnsToPeer(remotePeerID)) == 0 {
425
+ log.Printf("[GO] Instance %d: Last connection to %s closed. Removing from tracked peers.\n", instanceIndex, remotePeerID)
426
+
427
+ // Handle disconnection for ConnectedPeers
428
+ instanceConnectedPeersMutex := &connectedPeersMutexes[instanceIndex]
429
+ instanceConnectedPeersMutex.Lock()
430
+ if _, exists := connectedPeersInstances[instanceIndex][remotePeerID]; exists {
431
+ delete(connectedPeersInstances[instanceIndex], remotePeerID)
432
+ log.Printf("[GO] Instance %d: Removed %s from ConnectedPeers via DisconnectedF notifier.\n", instanceIndex, remotePeerID)
433
+ //peerRemoved = true
434
+ }
435
+ instanceConnectedPeersMutex.Unlock()
436
+
437
+ // Also clean up persistent stream if one existed for this peer
438
+ persistentChatStreamsMutexes[instanceIndex].Lock()
439
+ if stream, ok := persistentChatStreamsInstances[instanceIndex][remotePeerID]; ok {
440
+ log.Printf("[GO] Instance %d: Cleaning up persistent stream for disconnected peer %s via DisconnectedF notifier.\n", instanceIndex, remotePeerID)
441
+ _ = stream.Close() // Attempt graceful close
442
+ delete(persistentChatStreamsInstances[instanceIndex], remotePeerID)
443
+ }
444
+ persistentChatStreamsMutexes[instanceIndex].Unlock()
445
+ } else {
446
+ log.Printf("[GO] Instance %d: DisconnectedF: Still have %d active connections to %s, not removing from tracked peers.\n", instanceIndex, len(instanceHost.Network().ConnsToPeer(remotePeerID)), remotePeerID)
447
+ }
448
+ },
449
+ })
450
+ }
451
+
452
+ // waitForPublicReachability subscribes to the host's event bus and waits for the
453
+ // node to confirm its public reachability. It includes a timeout to prevent
454
+ // the startup from hanging indefinitely.
455
+ func waitForPublicReachability(h host.Host, timeout time.Duration) bool {
456
+ // 1. Subscribe to the reachability event.
457
+ sub, err := h.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
458
+ if err != nil {
459
+ log.Printf("[GO] ❌ Failed to subscribe to reachability events: %v", err)
460
+ return false
461
+ }
462
+ defer sub.Close() // Clean up the subscription when we're done.
463
+
464
+ log.Printf("[GO] ⏳ Waiting for public reachability confirmation (timeout: %s)...", timeout)
465
+
466
+ // 2. Wait for the event in a select loop with a timeout.
467
+ timeoutCh := time.After(timeout)
468
+ for {
469
+ select {
470
+ case evt := <-sub.Out():
471
+ // We received an event. Cast it to the correct type.
472
+ reachabilityEvent, ok := evt.(event.EvtLocalReachabilityChanged)
473
+ if !ok {
474
+ continue // Should not happen, but good practice to check.
475
+ }
476
+
477
+ log.Printf("[GO] 💡 Reachability status changed to: %s", reachabilityEvent.Reachability)
478
+
479
+ // Check if the new status is what we're waiting for.
480
+ if reachabilityEvent.Reachability == network.ReachabilityPublic {
481
+ log.Printf("[GO] ✅ Confirmed Public reachability via event.")
482
+ return true // Success! Return true.
483
+ } else if reachabilityEvent.Reachability == network.ReachabilityPrivate {
484
+ log.Printf("[GO] ⚠️ Node is behind a NAT or firewall (Private reachability).")
485
+ return false // Node is not publicly reachable.
486
+ }
487
+ case <-timeoutCh:
488
+ log.Printf("[GO] ⚠️ Timed out waiting for public reachability.")
489
+ return false // Timeout. Return false.
490
+ }
491
+ }
492
+ }
493
+
494
+ // --- Core Logic Functions ---
495
+
496
+ // storeReceivedMessage processes a raw message received either from a direct stream
497
+ // or a PubSub topic. The sender peerID and the channel to store are retrieved in handleStream and readFromSubscription
498
+ func storeReceivedMessage(
499
+ instanceIndex int,
500
+ from peer.ID,
501
+ channel string,
502
+ data []byte,
503
+ ) {
504
+ // Check instance index validity
505
+ if err := checkInstanceIndex(instanceIndex); err != nil {
506
+ log.Printf("[GO] ❌ storeReceivedMessage: %v\n", err)
507
+ return // Cannot process message for invalid instance
508
+ }
509
+
510
+ // Get the message store for this instance
511
+ store := messageStoreInstances[instanceIndex]
512
+ if store == nil {
513
+ log.Printf("[GO] ❌ storeReceivedMessage: Message store not initialized for instance %d\n", instanceIndex)
514
+ return // Cannot process message if store is nil
515
+ }
516
+
517
+ // Create the minimal message envelope.
518
+ newMessage := &QueuedMessage{
519
+ From: from,
520
+ Data: data,
521
+ }
522
+
523
+ // Lock the store mutex before accessing the shared maps.
524
+ store.mu.Lock()
525
+ defer store.mu.Unlock()
526
+
527
+ // Check if this channel already has a message list.
528
+ messageList, channelExists := store.messagesByChannel[channel]
529
+ if !channelExists {
530
+ // If the channel does not exist, check if we can create a new message queue.
531
+ if len(store.messagesByChannel) >= maxUniqueChannels {
532
+ log.Printf("[GO] 🗑️ Instance %d: Message store full. Discarding message for new channel '%s'.\n", instanceIndex, channel)
533
+ return
534
+ }
535
+ messageList = list.New()
536
+ store.messagesByChannel[channel] = messageList
537
+ log.Printf("[GO] ✨ Instance %d: Created new channel queue '%s'. Total channels: %d\n", instanceIndex, channel, len(store.messagesByChannel))
538
+ }
539
+
540
+ // If the channel already has a message list, check its length.
541
+ if messageList.Len() >= maxChannelQueueLen {
542
+ log.Printf("[GO] 🗑️ Instance %d: Queue for channel '%s' full. Discarding message.\n", instanceIndex, channel)
543
+ return
544
+ }
545
+
546
+ messageList.PushBack(newMessage)
547
+ log.Printf("[GO] 📥 Instance %d: Queued message on channel '%s' from %s. New queue length: %d\n", instanceIndex, channel, from, messageList.Len())
548
+ }
549
+
550
+ // readFromSubscription runs as a dedicated goroutine for each active PubSub subscription for a specific instance.
551
+ // It continuously waits for new messages on the subscription's channel (`sub.Next(ctx)`),
552
+ // routes them to `storeReceivedMessage`, and handles errors and context cancellation gracefully.
553
+ // You need to provide the full Channel to uniquely identify the subscription.
554
+ func readFromSubscription(
555
+ instanceIndex int,
556
+ sub *pubsub.Subscription,
557
+ ) {
558
+
559
+ // Check instance index validity (should be done before launching goroutine, but defensive check)
560
+ if err := checkInstanceIndex(instanceIndex); err != nil {
561
+ log.Printf("[GO] ❌ readFromSubscription: %v. Exiting goroutine.\n", err)
562
+ return
563
+ }
564
+
565
+ // Get the topic string directly from the subscription object.
566
+ topic := sub.Topic()
567
+ instanceCtx := contexts[instanceIndex]
568
+ instanceHost := hostInstances[instanceIndex]
569
+
570
+ if instanceCtx == nil || instanceHost == nil {
571
+ log.Printf("[GO] ❌ readFromSubscription: Context or Host not initialized for instance %d. Exiting goroutine.\n", instanceIndex)
572
+ return
573
+ }
574
+
575
+ log.Printf("[GO] 👂 Instance %d: Started listener goroutine for topic: %s\n", instanceIndex, topic)
576
+ defer log.Printf("[GO] 👂 Instance %d: Exiting listener goroutine for topic: %s\n", instanceIndex, topic) // Log when goroutine exits
577
+
578
+ for {
579
+ // Check if the main context has been cancelled (e.g., during node shutdown).
580
+ if instanceCtx.Err() != nil {
581
+ log.Printf("[GO] 👂 Instance %d: Context cancelled, stopping listener goroutine for topic: %s\n", instanceIndex, topic)
582
+ return // Exit the goroutine.
583
+ }
584
+
585
+ // Wait for the next message from the subscription. This blocks until a message
586
+ // arrives, the context is cancelled, or an error occurs.
587
+ msg, err := sub.Next(instanceCtx)
588
+ if err != nil {
589
+ // Check for expected errors during shutdown or cancellation.
590
+ if err == context.Canceled || err == context.DeadlineExceeded || err == pubsub.ErrSubscriptionCancelled || instanceCtx.Err() != nil {
591
+ log.Printf("[GO] 👂 Instance %d: Subscription listener for topic '%s' stopping gracefully: %v\n", instanceIndex, topic, err)
592
+ return // Exit goroutine cleanly.
593
+ }
594
+ // Handle EOF, which can sometimes occur. Treat it as a reason to stop.
595
+ if err == io.EOF {
596
+ log.Printf("[GO] 👂 Instance %d: Subscription listener for topic '%s' encountered EOF, stopping: %v\n", instanceIndex, topic, err)
597
+ return // Exit goroutine.
598
+ }
599
+ // Log other errors but attempt to continue (they might be transient).
600
+ log.Printf("[GO] ❌ Instance %d: Error reading from subscription '%s': %v. Continuing...\n", instanceIndex, topic, err)
601
+ // Pause briefly to avoid busy-looping on persistent errors.
602
+ time.Sleep(1 * time.Second)
603
+ continue // Continue the loop to try reading again.
604
+ }
605
+
606
+ log.Printf("[GO] 📬 Instance %d (id: %s): Received new PubSub message on topic '%s' from %s\n", instanceIndex, instanceHost.ID().String(), topic, msg.GetFrom())
607
+
608
+ // Ignore messages published by the local node itself.
609
+ if msg.GetFrom() == instanceHost.ID() {
610
+ continue // Skip processing self-sent messages.
611
+ }
612
+
613
+ // Handle Rendezvous or Standard Messages
614
+ if strings.HasSuffix(topic, ":rv") {
615
+ // This is a rendezvous update.
616
+ // 1. First, unmarshal the outer Protobuf message.
617
+ var protoMsg pg.Message
618
+ if err := proto.Unmarshal(msg.Data, &protoMsg); err != nil {
619
+ log.Printf("⚠️ Instance %d: Could not decode Protobuf message on topic '%s': %v\n", instanceIndex, topic, err)
620
+ continue
621
+ }
622
+
623
+ // 2. The actual payload is a JSON string within the 'json_content' field.
624
+ jsonPayload := protoMsg.GetJsonContent()
625
+ if jsonPayload == "" {
626
+ log.Printf("⚠️ Instance %d: Rendezvous message on topic '%s' has empty JSON content.\n", instanceIndex, topic)
627
+ continue
628
+ }
629
+
630
+ // 3. Now, unmarshal the inner JSON payload.
631
+ var updatePayload struct {
632
+ Peers []ExtendedPeerInfo `json:"peers"`
633
+ UpdateCount int64 `json:"update_count"`
634
+ }
635
+ if err := json.Unmarshal([]byte(jsonPayload), &updatePayload); err != nil {
636
+ log.Printf("[GO] ⚠️ Instance %d: Could not decode rendezvous update payload on topic '%s': %v\n", instanceIndex, topic, err)
637
+ continue // Skip this malformed message.
638
+ }
639
+
640
+ // 4. Create a new map from the decoded peer list.
641
+ newPeerMap := make(map[peer.ID]ExtendedPeerInfo)
642
+ for _, peerInfo := range updatePayload.Peers {
643
+ newPeerMap[peerInfo.ID] = peerInfo
644
+ }
645
+
646
+ // 5. Safely replace the old map with the new one.
647
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Lock()
648
+ // If this is the first update for this instance, initialize the state struct.
649
+ if rendezvousDiscoveredPeersInstances[instanceIndex] == nil {
650
+ rendezvousDiscoveredPeersInstances[instanceIndex] = &RendezvousState{}
651
+ }
652
+ rendezvousState := rendezvousDiscoveredPeersInstances[instanceIndex]
653
+ rendezvousState.Peers = newPeerMap
654
+ rendezvousState.UpdateCount = updatePayload.UpdateCount
655
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Unlock()
656
+
657
+ log.Printf("[GO] ✅ Instance %d: Updated rendezvous peers from topic '%s'. Found %d peers. Update count: %d.\n", instanceIndex, topic, len(newPeerMap), updatePayload.UpdateCount)
658
+ } else {
659
+ // This is a standard message. Queue it as before.
660
+ log.Printf("[GO] 📝 Instance %d: Storing new pubsub message from topic '%s'.\n", instanceIndex, topic)
661
+ storeReceivedMessage(instanceIndex, msg.GetFrom(), topic, msg.Data)
662
+ }
663
+ }
664
+ }
665
+
666
+ // handleStream reads from a direct message stream using the new framing protocol.
667
+ // It expects the stream to start with a 4-byte length prefix, followed by a 1-byte channel name length,
668
+ // the channel name itself, and finally the Protobuf-encoded payload.
669
+ func handleStream(instanceIndex int, s network.Stream) {
670
+ senderPeerID := s.Conn().RemotePeer()
671
+ log.Printf("[GO] 📥 Instance %d: Accepted new INCOMING stream from %s, storing for duplex communication.\n", instanceIndex, senderPeerID)
672
+
673
+ // This defer block ensures cleanup happens when the stream is closed by either side.
674
+ defer func() {
675
+ log.Printf("[GO] 🧹 Instance %d: Inbound stream from %s closed. Removing from persistent map.\n", instanceIndex, senderPeerID)
676
+ persistentChatStreamsMutexes[instanceIndex].Lock()
677
+ delete(persistentChatStreamsInstances[instanceIndex], senderPeerID)
678
+ persistentChatStreamsMutexes[instanceIndex].Unlock()
679
+ s.Close() // Ensure the stream is fully closed.
680
+ }()
681
+
682
+ // Store the newly accepted stream so we can use it to send messages back to this peer.
683
+ persistentChatStreamsMutexes[instanceIndex].Lock()
684
+ persistentChatStreamsInstances[instanceIndex][senderPeerID] = s
685
+ persistentChatStreamsMutexes[instanceIndex].Unlock()
686
+
687
+ for {
688
+ // --- REFACTORED: New Framing Protocol ---
689
+ // 1. Read the 4-byte total length prefix.
690
+ var totalLen uint32
691
+ if err := binary.Read(s, binary.BigEndian, &totalLen); err != nil {
692
+ if err == io.EOF {
693
+ log.Printf("[GO] 🔌 Instance %d: Direct stream with peer %s closed (EOF).\n", instanceIndex, senderPeerID)
694
+ } else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
695
+ log.Printf("[GO] ⏳ Instance %d: Timeout reading length from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
696
+ } else {
697
+ log.Printf("[GO] ❌ Instance %d: Unexpected error reading length from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
698
+ }
699
+ return // Exit handler for any read error on length.
700
+ }
701
+
702
+ // --- Check the message size ---
703
+ if totalLen > MaxMessageSize {
704
+ log.Printf("[GO] ❌ Instance %d: Received message length %d exceeds limit (%d) from %s. Resetting stream.\n", instanceIndex, totalLen, MaxMessageSize, senderPeerID)
705
+ s.Reset() // Forcefully close the stream due to protocol violation.
706
+ return
707
+ }
708
+ if totalLen == 0 {
709
+ log.Printf("[GO] ⚠️ Instance %d: Received zero length message frame from %s, continuing loop.\n", instanceIndex, senderPeerID)
710
+ continue
711
+ }
712
+
713
+ // 2. Read the 1-byte channel name length.
714
+ var channelLen uint8
715
+ if err := binary.Read(s, binary.BigEndian, &channelLen); err != nil {
716
+ if err == io.EOF {
717
+ log.Printf("[GO] 🔌 Instance %d: Direct stream with peer %s closed (EOF).\n", instanceIndex, senderPeerID)
718
+ } else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
719
+ log.Printf("[GO] ⏳ Instance %d: Timeout reading channel-length from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
720
+ } else {
721
+ log.Printf("[GO] ❌ Instance %d: Unexpected error reading channel-length from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
722
+ }
723
+ return // Exit handler for any read error on length.
724
+ }
725
+
726
+ // 3. Read the channel name string.
727
+ channelBytes := make([]byte, channelLen)
728
+ if _, err := io.ReadFull(s, channelBytes); err != nil {
729
+ if err == io.EOF {
730
+ log.Printf("[GO] 🔌 Instance %d: Direct stream with peer %s closed (EOF).\n", instanceIndex, senderPeerID)
731
+ } else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
732
+ log.Printf("[GO] ⏳ Instance %d: Timeout reading channel from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
733
+ } else {
734
+ log.Printf("[GO] ❌ Instance %d: Unexpected error reading channel from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
735
+ }
736
+ return // Exit handler for any read error on length.
737
+ }
738
+ channel := string(channelBytes)
739
+
740
+ // 4. Read the Protobuf payload.
741
+ payloadLen := totalLen - uint32(channelLen) - 1 // Subtract channel len byte and channel string
742
+ payload := make([]byte, payloadLen)
743
+ if _, err := io.ReadFull(s, payload); err != nil {
744
+ if err == io.EOF {
745
+ log.Printf("[GO] 🔌 Instance %d: Direct stream with peer %s closed (EOF).\n", instanceIndex, senderPeerID)
746
+ } else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
747
+ log.Printf("[GO] ⏳ Instance %d: Timeout reading payload from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
748
+ } else {
749
+ log.Printf("[GO] ❌ Instance %d: Unexpected error reading payload from direct stream with %s: %v\n", instanceIndex, senderPeerID, err)
750
+ }
751
+ return // Exit handler for any read error on length.
752
+ }
753
+
754
+ // 5. Store the message.
755
+ storeReceivedMessage(instanceIndex, senderPeerID, channel, payload)
756
+ }
757
+ }
758
+
759
+ // setupDirectMessageHandler configures the libp2p host for a specific instance
760
+ // to listen for incoming streams using the custom ChatProtocol.
761
+ // When a peer opens a stream with this protocol ID, the provided handler function
762
+ // is invoked to manage communication on that stream.
763
+ func setupDirectMessageHandler(
764
+ instanceIndex int,
765
+ ) {
766
+
767
+ // Check instance index validity
768
+ if err := checkInstanceIndex(instanceIndex); err != nil {
769
+ log.Printf("[GO] ❌ setupDirectMessageHandler: %v\n", err)
770
+ return // Cannot setup handler for invalid instance
771
+ }
772
+
773
+ instanceHost := hostInstances[instanceIndex]
774
+
775
+ if instanceHost == nil {
776
+ log.Printf("[GO] ❌ Instance %d: Cannot setup direct message handler: Host not initialized\n", instanceIndex)
777
+ return
778
+ }
779
+
780
+ // Set a handler function for the ChatProtocol. This function will be called
781
+ // automatically by libp2p whenever a new incoming stream for this protocol is accepted.
782
+ // Use a closure to capture the instanceIndex.
783
+ instanceHost.SetStreamHandler(ChatProtocol, func(s network.Stream) {
784
+ handleStream(instanceIndex, s)
785
+ })
786
+ }
787
+
788
+ // This function constructs and writes a message using our new framing protocol for direct messages.
789
+ // It takes a writer (e.g., a network stream), the channel name, and the payload data.
790
+ // The message format is:
791
+ // - 4-byte total length (including all the following parts)
792
+ // - 1-byte channel name length
793
+ // - channel name (as a UTF-8 string)
794
+ // - payload (Protobuf-encoded data).
795
+ func writeDirectMessageFrame(w io.Writer, channel string, payload []byte) error {
796
+ channelBytes := []byte(channel)
797
+ channelLen := uint8(len(channelBytes))
798
+
799
+ // Check if channel name is too long for our 1-byte length prefix.
800
+ if len(channelBytes) > 255 {
801
+ return fmt.Errorf("channel name exceeds 255 bytes limit: %s", channel)
802
+ }
803
+
804
+ // Total length = 1 (for channel len) + len(channel) + len(payload)
805
+ totalLength := uint32(1 + len(channelBytes) + len(payload))
806
+
807
+ // --- Add size check before writing ---
808
+ if totalLength > MaxMessageSize {
809
+ return fmt.Errorf("outgoing message size (%d) exceeds limit (%d)", totalLength, MaxMessageSize)
810
+ }
811
+
812
+ buf := new(bytes.Buffer)
813
+
814
+ // Write total length (4 bytes)
815
+ if err := binary.Write(buf, binary.BigEndian, totalLength); err != nil {
816
+ return fmt.Errorf("failed to write total length: %w", err)
817
+ }
818
+ // Write channel length (1 byte)
819
+ if err := binary.Write(buf, binary.BigEndian, channelLen); err != nil {
820
+ return fmt.Errorf("failed to write channel length: %w", err)
821
+ }
822
+ // Write channel name
823
+ if _, err := buf.Write(channelBytes); err != nil {
824
+ return fmt.Errorf("failed to write channel name: %w", err)
825
+ }
826
+ // Write payload
827
+ if _, err := buf.Write(payload); err != nil {
828
+ return fmt.Errorf("failed to write payload: %w", err)
829
+ }
830
+
831
+ // Write the entire frame to the stream.
832
+ if _, err := w.Write(buf.Bytes()); err != nil {
833
+ return fmt.Errorf("failed to write framed message to stream: %w", err)
834
+ }
835
+ return nil
836
+ }
837
+
838
+ // goGetNodeAddresses is the internal Go function that performs the core logic
839
+ // of fetching and formatting node addresses.
840
+ // It takes an instanceIndex and a targetPID. If targetPID is empty (peer.ID("")),
841
+ // it fetches addresses for the local node of the given instance.
842
+ // It returns a slice of fully formatted multiaddress strings and an error if one occurs.
843
+ func goGetNodeAddresses(
844
+ instanceIndex int,
845
+ targetPID peer.ID, // Changed from targetPeerIDStr string
846
+ ) ([]string, error) {
847
+ var resolvedPID peer.ID // This will be the ID we actually work with
848
+
849
+ instanceHost := hostInstances[instanceIndex]
850
+ if instanceHost == nil {
851
+ errMsg := fmt.Sprintf("Instance %d: Host not initialized", instanceIndex)
852
+ log.Printf("[GO] ❌ goGetNodeAddresses: %s\n", errMsg)
853
+ return nil, fmt.Errorf("%s", errMsg)
854
+ }
855
+
856
+ // Determine the actual Peer ID to use
857
+ if targetPID == "" { // peer.ID("") indicates request for local node
858
+ resolvedPID = instanceHost.ID()
859
+ } else {
860
+ resolvedPID = targetPID
861
+ log.Printf("[GO] ℹ️ Instance %d: goGetNodeAddresses called for specific peer: %s\n", instanceIndex, resolvedPID.String())
862
+ }
863
+
864
+ addrMap := make(map[string]ma.Multiaddr)
865
+ var candidateAddrs []ma.Multiaddr
866
+
867
+ // --- Address Gathering ---
868
+ // The logic below uses 'resolvedPID'
869
+ if resolvedPID == instanceHost.ID() { // Check if, after resolution, it's the local host
870
+ interfaceAddrs, err := instanceHost.Network().InterfaceListenAddresses()
871
+ if err != nil {
872
+ } else {
873
+ candidateAddrs = append(candidateAddrs, interfaceAddrs...)
874
+ }
875
+ candidateAddrs = append(candidateAddrs, instanceHost.Network().ListenAddresses()...)
876
+ candidateAddrs = append(candidateAddrs, instanceHost.Addrs()...)
877
+ candidateAddrs = append(candidateAddrs, instanceHost.Peerstore().Addrs(resolvedPID)...) // Use resolvedPID
878
+ } else {
879
+ // --- Remote Peer Addresses ---
880
+ remotePeerAddrsInStore := instanceHost.Peerstore().Addrs(resolvedPID) // Use resolvedPID
881
+ candidateAddrs = append(candidateAddrs, remotePeerAddrsInStore...)
882
+
883
+ connectedPeersMutexes[instanceIndex].RLock()
884
+ instanceConnectedPeers := connectedPeersInstances[instanceIndex]
885
+ if epi, exists := instanceConnectedPeers[resolvedPID]; exists { // Use resolvedPID
886
+ candidateAddrs = append(candidateAddrs, epi.Addrs...)
887
+ } else {
888
+ }
889
+ connectedPeersMutexes[instanceIndex].RUnlock()
890
+ }
891
+
892
+ // Common processing for all gathered candidate addresses
893
+ for _, addr := range candidateAddrs {
894
+ if addr == nil {
895
+ continue
896
+ }
897
+ if manet.IsIPLoopback(addr) || manet.IsIPUnspecified(addr) {
898
+ continue
899
+ }
900
+ addrMap[addr.String()] = addr
901
+ }
902
+
903
+ if len(addrMap) == 0 {
904
+ errMsg := fmt.Sprintf("Instance %d: No suitable base addresses found for peer %s after gathering and filtering.", instanceIndex, resolvedPID)
905
+ log.Printf("[GO] ⚠️ goGetNodeAddresses: %s\n", errMsg)
906
+ return nil, fmt.Errorf("%s", errMsg)
907
+ }
908
+
909
+ // --- Formatting Results ---
910
+ result := make([]string, 0, len(addrMap))
911
+ for _, currentAddr := range addrMap {
912
+ var fullAddrStr string
913
+ _, idInAddr := peer.SplitAddr(currentAddr)
914
+
915
+ if idInAddr == resolvedPID { // Use resolvedPID
916
+ fullAddrStr = currentAddr.String()
917
+ } else if idInAddr != "" && strings.Contains(currentAddr.String(), ma.ProtocolWithCode(ma.P_CIRCUIT).Name) {
918
+ fullAddrStr = fmt.Sprintf("%s/p2p/%s", currentAddr.String(), resolvedPID.String()) // Use resolvedPID
919
+ } else if idInAddr != "" && idInAddr != resolvedPID { // Use resolvedPID
920
+ continue
921
+ } else {
922
+ fullAddrStr = fmt.Sprintf("%s/p2p/%s", currentAddr.String(), resolvedPID.String()) // Use resolvedPID
923
+ }
924
+
925
+ if fullAddrStr != "" {
926
+ isDup := false
927
+ for _, rAddr := range result {
928
+ if rAddr == fullAddrStr {
929
+ isDup = true
930
+ break
931
+ }
932
+ }
933
+ if !isDup {
934
+ result = append(result, fullAddrStr)
935
+ }
936
+ }
937
+ }
938
+
939
+ if len(result) == 0 {
940
+ errMsg := fmt.Sprintf("Instance %d: No addresses to return for peer %s after formatting.", instanceIndex, resolvedPID)
941
+ log.Printf("[GO] ⚠️ goGetNodeAddresses: %s\n", errMsg)
942
+ return []string{}, nil // Return empty list, no error, if formatting yields nothing
943
+ }
944
+
945
+ return result, nil
946
+ }
947
+
948
+ // closeSingleInstance performs the cleanup for a specific node instance.
949
+ // It is called by CloseNode for either a single index or as part of the "close all" loop.
950
+ // It returns a *C.char JSON string indicating success or failure for that specific instance.
951
+ // This function assumes instanceIndex is already validated by the caller (CloseNode).
952
+ func closeSingleInstance(
953
+ instanceIndex int,
954
+ ) *C.char {
955
+
956
+ // Acquire global lock briefly to check if instance exists before proceeding
957
+ instanceStateMutex.RLock()
958
+ hostExists := hostInstances[instanceIndex] != nil
959
+ cancelExists := cancelContexts[instanceIndex] != nil
960
+ isInstInitialized := isInitialized[instanceIndex] // Check initialization flag
961
+ instanceStateMutex.RUnlock()
962
+
963
+ if !isInstInitialized {
964
+ // Should not happen if called from CloseNode after checking, but defensive
965
+ log.Printf("[GO] ℹ️ Instance %d: Node was not initialized (internal close call).\n", instanceIndex)
966
+ return jsonSuccessResponse(fmt.Sprintf("Instance %d: Node was not initialized", instanceIndex))
967
+ }
968
+
969
+ if !hostExists && !cancelExists {
970
+ log.Printf("[GO] ℹ️ Instance %d: Node was already closed (internal close call).\n", instanceIndex)
971
+ return jsonSuccessResponse(fmt.Sprintf("Instance %d: Node was already closed", instanceIndex))
972
+ }
973
+
974
+ // --- Cancel Main Context ---
975
+ // Acquire global lock to safely access/modify cancelContexts
976
+ instanceStateMutex.Lock()
977
+ if cancelContexts[instanceIndex] != nil {
978
+ log.Printf("[GO] - Instance %d: Cancelling main context...\n", instanceIndex)
979
+ cancelContexts[instanceIndex]()
980
+ // Do NOT set to nil here yet, wait until host is closed
981
+ } else {
982
+ log.Printf("[GO] - Instance %d: Context was already nil.\n", instanceIndex)
983
+ }
984
+ instanceStateMutex.Unlock() // Release global lock
985
+
986
+ // Give goroutines time to react to context cancellation (e.g., stream handlers, pubsub listeners)
987
+ time.Sleep(200 * time.Millisecond)
988
+
989
+ // --- Close Persistent Outgoing Streams ---
990
+ // Acquire instance-specific mutex
991
+ persistentChatStreamsMutexes[instanceIndex].Lock()
992
+ instancePersistentChatStreams := persistentChatStreamsInstances[instanceIndex]
993
+ if len(instancePersistentChatStreams) > 0 {
994
+ log.Printf("[GO] - Instance %d: Closing %d persistent outgoing streams...\n", instanceIndex, len(instancePersistentChatStreams))
995
+ for pid, stream := range instancePersistentChatStreams {
996
+ log.Printf("[GO] - Instance %d: Closing stream to %s\n", instanceIndex, pid)
997
+ _ = stream.Close() // Attempt graceful close
998
+ }
999
+ persistentChatStreamsInstances[instanceIndex] = make(map[peer.ID]network.Stream) // Clear the map
1000
+ } else {
1001
+ log.Printf("[GO] - Instance %d: No persistent outgoing streams to close.\n", instanceIndex)
1002
+ }
1003
+ persistentChatStreamsMutexes[instanceIndex].Unlock() // Release instance-specific mutex
1004
+
1005
+ // --- Clean Up PubSub State ---
1006
+ // Acquire instance-specific mutex
1007
+ pubsubMutexes[instanceIndex].Lock()
1008
+ instanceSubscriptions := subscriptionsInstances[instanceIndex]
1009
+
1010
+ if len(instanceSubscriptions) > 0 {
1011
+ log.Printf("[GO] - Instance %d: Ensuring PubSub subscriptions (%d) are cancelled...\n", instanceIndex, len(instanceSubscriptions))
1012
+ for channel, sub := range instanceSubscriptions {
1013
+ log.Printf("[GO] - Instance %d: Cancelling subscription to topic: %s\n", instanceIndex, channel)
1014
+ sub.Cancel()
1015
+ }
1016
+ }
1017
+ subscriptionsInstances[instanceIndex] = make(map[string]*pubsub.Subscription) // Clear the map
1018
+ topicsInstances[instanceIndex] = make(map[string]*pubsub.Topic) // Clear the map
1019
+ pubsubMutexes[instanceIndex].Unlock() // Release instance-specific mutex
1020
+ pubsubInstances[instanceIndex] = nil // Set instance PubSub to nil
1021
+
1022
+ // --- Close Host Instance ---
1023
+ hostErrStr := ""
1024
+ // Acquire global lock to safely access/modify hostInstances and cancelContexts
1025
+ instanceStateMutex.Lock()
1026
+ if hostInstances[instanceIndex] != nil {
1027
+ log.Printf("[GO] - Instance %d: Closing host instance...\n", instanceIndex)
1028
+ err := hostInstances[instanceIndex].Close()
1029
+ hostInstances[instanceIndex] = nil // Set instance host to nil
1030
+ // Now that host is closed, it's safe to set cancel context to nil
1031
+ cancelContexts[instanceIndex] = nil
1032
+ if err != nil {
1033
+ hostErrStr = fmt.Sprintf("Instance %d: Error closing host: %v", instanceIndex, err)
1034
+ log.Printf("[GO] ⚠️ %s (proceeding with cleanup)\n", hostErrStr)
1035
+ // Continue cleanup even if host close fails
1036
+ } else {
1037
+ log.Printf("[GO] - Instance %d: Host closed successfully.\n", instanceIndex)
1038
+ }
1039
+ } else {
1040
+ log.Printf("[GO] - Instance %d: Host instance was already nil.\n", instanceIndex)
1041
+ // If host was nil, ensure cancel context is also nil
1042
+ cancelContexts[instanceIndex] = nil
1043
+ }
1044
+ instanceStateMutex.Unlock() // Release global lock
1045
+
1046
+ // --- Clear Remaining State for this instance ---
1047
+ // Acquire instance-specific mutex
1048
+ connectedPeersMutexes[instanceIndex].Lock()
1049
+ connectedPeersInstances[instanceIndex] = make(map[peer.ID]ExtendedPeerInfo) // Clear the map
1050
+ connectedPeersMutexes[instanceIndex].Unlock() // Release instance-specific mutex
1051
+
1052
+ // Clear the MessageStore for this instance
1053
+ if messageStoreInstances[instanceIndex] != nil {
1054
+ messageStoreInstances[instanceIndex].mu.Lock()
1055
+ messageStoreInstances[instanceIndex].messagesByChannel = make(map[string]*list.List) // Clear the message store
1056
+ messageStoreInstances[instanceIndex].mu.Unlock()
1057
+ messageStoreInstances[instanceIndex] = nil // Set instance store to nil
1058
+ }
1059
+ log.Printf("[GO] - Instance %d: Cleared connected peers map and message buffer.\n", instanceIndex)
1060
+
1061
+ // Clear the rendezvous state for this instance
1062
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Lock()
1063
+ rendezvousDiscoveredPeersInstances[instanceIndex] = nil // Clear the map
1064
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Unlock() // Release instance-specific mutex
1065
+
1066
+ // Mark instance as uninitialized
1067
+ instanceStateMutex.Lock()
1068
+ isInitialized[instanceIndex] = false
1069
+ instanceStateMutex.Unlock()
1070
+
1071
+ // --- Return Result ---
1072
+ if hostErrStr != "" {
1073
+ // Return error if host closing failed, but mention cleanup attempt
1074
+ return jsonErrorResponse(
1075
+ fmt.Sprintf("Instance %d: Cleanup attempted, but encountered error during host closure", instanceIndex),
1076
+ fmt.Errorf("%s", hostErrStr),
1077
+ )
1078
+ }
1079
+
1080
+ log.Printf("[GO] ✅ Instance %d: Node closed successfully.\n", instanceIndex)
1081
+ return jsonSuccessResponse(fmt.Sprintf("Instance %d: Node closed successfully", instanceIndex))
1082
+ }
1083
+
1084
+ // --- Exported C Functions ---
1085
+ // These functions are callable from C (and thus Python). They act as the API boundary.
1086
+
1087
+ // This function MUST be called once from Python before any other library function.
1088
+ //
1089
+ //export InitializeLibrary
1090
+ func InitializeLibrary(
1091
+ maxInstancesC C.int,
1092
+ maxUniqueChannelsC C.int,
1093
+ maxChannelQueueLenC C.int,
1094
+ maxMessageSizeC C.int,
1095
+ enableLoggingC C.int,
1096
+ ) {
1097
+ // --- Configure Logging FIRST ---
1098
+ log.SetFlags(log.LstdFlags | log.Lmicroseconds)
1099
+ if int(enableLoggingC) == 1 {
1100
+ log.SetOutput(os.Stderr)
1101
+ } else {
1102
+ log.SetOutput(io.Discard)
1103
+ }
1104
+
1105
+ maxInstances = int(maxInstancesC)
1106
+ maxUniqueChannels = int(maxUniqueChannelsC)
1107
+ maxChannelQueueLen = int(maxChannelQueueLenC)
1108
+ MaxMessageSize = uint32(maxMessageSizeC)
1109
+
1110
+ // Now, initialize all the state slices with the correct size
1111
+ hostInstances = make([]host.Host, maxInstances)
1112
+ pubsubInstances = make([]*pubsub.PubSub, maxInstances)
1113
+ contexts = make([]context.Context, maxInstances)
1114
+ cancelContexts = make([]context.CancelFunc, maxInstances)
1115
+ topicsInstances = make([]map[string]*pubsub.Topic, maxInstances)
1116
+ subscriptionsInstances = make([]map[string]*pubsub.Subscription, maxInstances)
1117
+ connectedPeersInstances = make([]map[peer.ID]ExtendedPeerInfo, maxInstances)
1118
+ rendezvousDiscoveredPeersInstances = make([]*RendezvousState, maxInstances)
1119
+ persistentChatStreamsInstances = make([]map[peer.ID]network.Stream, maxInstances)
1120
+ messageStoreInstances = make([]*MessageStore, maxInstances)
1121
+
1122
+ // Mutexes for protecting concurrent access to instance-specific data.
1123
+ connectedPeersMutexes = make([]sync.RWMutex, maxInstances)
1124
+ persistentChatStreamsMutexes = make([]sync.Mutex, maxInstances)
1125
+ pubsubMutexes = make([]sync.RWMutex, maxInstances)
1126
+ rendezvousDiscoveredPeersMutexes = make([]sync.RWMutex, maxInstances)
1127
+
1128
+ // Flag to track if a specific instance index has been initialized
1129
+ isInitialized = make([]bool, maxInstances)
1130
+ log.Printf("[GO] ✅ Go library initialized with MaxInstances=%d, MaxUniqueChannels=%d and MaxChannelQueueLen=%d\n", maxInstances, maxUniqueChannels, maxChannelQueueLen)
1131
+ }
1132
+
1133
+ // CreateNode initializes and starts a new libp2p host (node) for a specific instance.
1134
+ // It configures the node based on the provided parameters (port, relay capabilities, UPnP).
1135
+ // Parameters:
1136
+ // - instanceIndexC (C.int): The index for this node instance (0 to maxInstances-1).
1137
+ // - predefinedPortC (C.int): The TCP port to listen on (0 for random).
1138
+ // - enableRelayClientC (C.int): 1 if this node should enable relay communications (client mode)
1139
+ // - enableRelayServiceC (C.int): 1 to set this node as a relay service (server mode),
1140
+ // - waitPublicC (C.int): 1 to try any possible attempt to be publicly reachable, 0 otherwise.
1141
+ // - maxConnectionsC (C.int): The maximum number of connections this node can maintain.
1142
+ //
1143
+ // Returns:
1144
+ // - *C.char: A JSON string indicating success (with node addresses) or failure (with an error message).
1145
+ // The structure is `{"state":"Success", "message": ["/ip4/.../p2p/...", ...]}` or `{"state":"Error", "message":"..."}`.
1146
+ // - IMPORTANT: The caller (C/Python) MUST free the returned C string using the `FreeString` function
1147
+ // exported by this library to avoid memory leaks. Returns NULL only on catastrophic failure before JSON creation.
1148
+ //
1149
+ //export CreateNode
1150
+ func CreateNode(
1151
+ instanceIndexC C.int,
1152
+ predefinedPortC C.int,
1153
+ ipsJSONC *C.char,
1154
+ enableRelayClientC C.int,
1155
+ enableRelayServiceC C.int,
1156
+ waitPublicC C.int,
1157
+ maxConnectionsC C.int,
1158
+ ) *C.char {
1159
+
1160
+ instanceIndex := int(instanceIndexC)
1161
+
1162
+ // --- Basic Instance Index Check ---
1163
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1164
+ return jsonErrorResponse("Invalid instance index", err)
1165
+ }
1166
+
1167
+ // --- Instance-Specific State Initialization (if not already initialized) ---
1168
+ instanceStateMutex.Lock()
1169
+ if isInitialized[instanceIndex] {
1170
+ instanceStateMutex.Unlock()
1171
+ msg := fmt.Sprintf("Instance %d is already initialized. Please call CloseNode first.", instanceIndex)
1172
+ return jsonErrorResponse(msg, nil)
1173
+ }
1174
+ isInitialized[instanceIndex] = true
1175
+ instanceStateMutex.Unlock()
1176
+ log.Printf("[GO] 🚀 Instance %d: Starting CreateNode...", instanceIndex)
1177
+
1178
+ // Initialize state maps and context for this instance
1179
+ contexts[instanceIndex], cancelContexts[instanceIndex] = context.WithCancel(context.Background())
1180
+ connectedPeersInstances[instanceIndex] = make(map[peer.ID]ExtendedPeerInfo)
1181
+ persistentChatStreamsInstances[instanceIndex] = make(map[peer.ID]network.Stream)
1182
+ topicsInstances[instanceIndex] = make(map[string]*pubsub.Topic)
1183
+ subscriptionsInstances[instanceIndex] = make(map[string]*pubsub.Subscription)
1184
+ messageStoreInstances[instanceIndex] = newMessageStore()
1185
+ rendezvousDiscoveredPeersInstances[instanceIndex] = nil
1186
+
1187
+ // --- Configuration ---
1188
+ // Convert C integer parameters to Go types.
1189
+ predefinedPort := int(predefinedPortC)
1190
+ ipsJSON := C.GoString(ipsJSONC)
1191
+ enableRelayClient := int(enableRelayClientC) == 1
1192
+ enableRelayService := int(enableRelayServiceC) == 1
1193
+ waitPublic := int(waitPublicC) == 1
1194
+ maxConnections := int(maxConnectionsC)
1195
+
1196
+ log.Printf("[GO] 🔧 Instance %d: Config: Port=%d, IPsJSON=%s, EnableRelayClient=%t, EnableRelayService=%t, WaitToBePublic=%t, MaxConnections=%d",
1197
+ instanceIndex, predefinedPort, ipsJSON, enableRelayClient, enableRelayService, waitPublic, maxConnections)
1198
+
1199
+ // --- 4. Libp2p Options Assembly ---
1200
+ listenAddrs, err := getListenAddrs(ipsJSON, predefinedPort)
1201
+ if err != nil {
1202
+ cleanupFailedCreate(instanceIndex)
1203
+ return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create multiaddrs", instanceIndex), err)
1204
+ }
1205
+
1206
+ // Setup Resource Manager
1207
+ limiter, err := createResourceManager(maxConnections)
1208
+ if err != nil {
1209
+ cleanupFailedCreate(instanceIndex)
1210
+ return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create resource manager", instanceIndex), err)
1211
+ }
1212
+
1213
+ options := []libp2p.Option{
1214
+ libp2p.ListenAddrs(listenAddrs...),
1215
+ libp2p.DefaultSecurity,
1216
+ libp2p.DefaultMuxers,
1217
+ libp2p.Transport(tcp.NewTCPTransport),
1218
+ libp2p.Transport(quic.NewTransport),
1219
+ libp2p.Transport(webrtc.New),
1220
+ libp2p.ResourceManager(limiter),
1221
+ }
1222
+
1223
+ // Configure Relay Service (ability to *be* a relay)
1224
+ if enableRelayService {
1225
+ // limit := rc.DefaultLimit() // open this to see the default limits
1226
+ resources := rc.DefaultResources() // open this to see the default resource limits
1227
+ // Set the duration for relayed connections. 0 means infinite.
1228
+ ttl := 2 * time.Hour // reduced to 2 hours, it will be the node's duty to refresh the reservation if needed.
1229
+ // limit.Duration = ttl
1230
+ // resources.Limit = limit
1231
+ resources.Limit = nil // same as setting rc.WithInfiniteLimits()
1232
+ resources.ReservationTTL = ttl
1233
+
1234
+ // This single option enables the node to act as a relay for others, including hopping,
1235
+ // with our custom resource limits.
1236
+ options = append(options, libp2p.EnableRelayService(rc.WithResources(resources)), libp2p.EnableNATService())
1237
+ log.Printf("[GO] - Instance %d: Relay service is ENABLED with custom resource configuration.\n", instanceIndex)
1238
+ }
1239
+
1240
+ // EnableRelay (the ability to *use* relays) is default, we can explicitly disable it if needed.
1241
+ if !enableRelayClient {
1242
+ options = append(options, libp2p.DisableRelay()) // Explicitly disable using relays.
1243
+ log.Printf("[GO] - Instance %d: Relay client is DISABLED.\n", instanceIndex)
1244
+ }
1245
+
1246
+ // Prepare discovering the bootstrap peers
1247
+ var idht *dht.IpfsDHT
1248
+ isPublic := false
1249
+ if waitPublic {
1250
+ // Add any possible option to be publicly reachable
1251
+ options = append(
1252
+ options,
1253
+ libp2p.NATPortMap(),
1254
+ libp2p.EnableHolePunching(),
1255
+ libp2p.EnableAutoNATv2(),
1256
+ libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
1257
+ bootstrapAddrInfos := dht.GetDefaultBootstrapPeerAddrInfos()
1258
+ // Define the DHT options for a "lazy" client
1259
+ dhtOptions := []dht.Option{
1260
+ dht.Mode(dht.ModeClient),
1261
+ dht.BootstrapPeers(bootstrapAddrInfos...),
1262
+ }
1263
+ var err error
1264
+ idht, err = dht.New(contexts[instanceIndex], h, dhtOptions...)
1265
+ return idht, err
1266
+ }),)
1267
+ log.Printf("[GO] - Instance %d: Trying to be publicly reachable.\n", instanceIndex)
1268
+ } else {
1269
+ if enableRelayService {
1270
+ // If not trying to be public, we can set the reachability to public to consent local deployment and relay exploitation.
1271
+ options = append(options, libp2p.ForceReachabilityPublic())
1272
+ isPublic = true // We assume it's public if we are a relay service and not trying to be public ourselves.
1273
+ }
1274
+ }
1275
+
1276
+ // Create the libp2p Host instance with the configured options for this instance.
1277
+ instanceHost, err := libp2p.New(options...)
1278
+ if err != nil {
1279
+ cleanupFailedCreate(instanceIndex)
1280
+ return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create host", instanceIndex), err)
1281
+ }
1282
+ hostInstances[instanceIndex] = instanceHost
1283
+ log.Printf("[GO] ✅ Instance %d: Host created with ID: %s\n", instanceIndex, instanceHost.ID())
1284
+
1285
+ // --- PubSub Initialization ---
1286
+ if err := setupPubSub(instanceIndex); err != nil {
1287
+ cleanupFailedCreate(instanceIndex)
1288
+ return jsonErrorResponse(fmt.Sprintf("Instance %d: Failed to create PubSub", instanceIndex), err)
1289
+ }
1290
+ log.Printf("[GO] ✅ Instance %d: PubSub (GossipSub) initialized.\n", instanceIndex)
1291
+
1292
+ // --- Setup Notifiers and Handlers ---
1293
+ setupNotifiers(instanceIndex)
1294
+ log.Printf("[GO] 🔔 Instance %d: Registered network event notifier.\n", instanceIndex)
1295
+
1296
+ setupDirectMessageHandler(instanceIndex)
1297
+ log.Printf("[GO] ✅ Instance %d: Direct message handler set up.\n", instanceIndex)
1298
+
1299
+ // --- Address Reporting ---
1300
+ // Give discovery mechanisms a moment to find the public address.
1301
+ log.Printf("[GO] ⏳ Instance %d: Waiting for address discovery and NAT to settle...\n", instanceIndex)
1302
+
1303
+ if waitPublic {
1304
+ // --- 🎯 : Wait for Public Reachability ---
1305
+ // This replaces your old address polling loop. We wait a maximum of 30 seconds.
1306
+ isPublic = waitForPublicReachability(instanceHost, 30*time.Second)
1307
+ if !isPublic {
1308
+ log.Printf("[GO] ⚠️ Instance %d: The node may not be directly dialable.", instanceIndex)
1309
+ }
1310
+ idht.Close() // Close DHT as we don't need it anymore for this simple node
1311
+
1312
+ // --- Cleanup Bootstrap Peers ---
1313
+ // The connectedPeers map was flooded with bootstrap peers during DHT setup.
1314
+ // We remove them now to keep the map clean for actual user connections.
1315
+ log.Printf("[GO] 🧹 Instance %d: Cleaning up bootstrap peer connections from the tracked list...\n", instanceIndex)
1316
+ connectedPeersMutexes[instanceIndex].Lock()
1317
+ connectedPeersInstances[instanceIndex] = make(map[peer.ID]ExtendedPeerInfo) // Clear the map
1318
+ connectedPeersMutexes[instanceIndex].Unlock()
1319
+ }
1320
+
1321
+ // --- Get Final Addresses ---
1322
+ nodeAddresses, err := goGetNodeAddresses(instanceIndex, "")
1323
+ if err != nil {
1324
+ // This is a more critical failure if we can't even get local addresses.
1325
+ cleanupFailedCreate(instanceIndex)
1326
+ return jsonErrorResponse(
1327
+ fmt.Sprintf("Instance %d: Failed to obtain node addresses after waiting for reachability", instanceIndex),
1328
+ err,
1329
+ )
1330
+ }
1331
+
1332
+ // --- Build and return the new structured response ---
1333
+ response := CreateNodeResponse{
1334
+ Addresses: nodeAddresses,
1335
+ IsPublic: isPublic,
1336
+ }
1337
+
1338
+ log.Printf("[GO] 🎉 Instance %d: Node creation complete.\n", instanceIndex)
1339
+ return jsonSuccessResponse(response)
1340
+ }
1341
+
1342
+ // ConnectTo attempts to establish a connection with a remote peer given its multiaddress for a specific instance.
1343
+ // Parameters:
1344
+ // - instanceIndexC (C.int): The index of the node instance.
1345
+ // - addrsJSONC (*C.char): Pointer to a JSON string containing the list of addresses that can be dialed.
1346
+ //
1347
+ // Returns:
1348
+ // - *C.char: A JSON string indicating success (with peer AddrInfo of the winning connection) or failure (with an error message).
1349
+ // Structure: `{"state":"Success", "message": {"ID": "...", "Addrs": ["...", ...]}}` or `{"state":"Error", "message":"..."}`.
1350
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
1351
+ //
1352
+ //export ConnectTo
1353
+ func ConnectTo(
1354
+ instanceIndexC C.int,
1355
+ addrsJSONC *C.char,
1356
+ ) *C.char {
1357
+
1358
+ instanceIndex := int(instanceIndexC)
1359
+ goAddrsJSON := C.GoString(addrsJSONC)
1360
+ log.Printf("[GO] 📞 Instance %d: Attempting to connect to peer with addresses: %s\n", instanceIndex, goAddrsJSON)
1361
+
1362
+ // Check instance index validity
1363
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1364
+ return jsonErrorResponse("Invalid instance index", err) // Caller frees.
1365
+ }
1366
+
1367
+ // Get instance-specific state
1368
+ instanceHost := hostInstances[instanceIndex]
1369
+ instanceCtx := contexts[instanceIndex]
1370
+
1371
+ // Check if the host is initialized for this instance.
1372
+ if instanceHost == nil {
1373
+ return jsonErrorResponse(
1374
+ fmt.Sprintf("Instance %d: Host not initialized, cannot connect", instanceIndex),
1375
+ nil,
1376
+ ) // Caller frees.
1377
+ }
1378
+ if instanceCtx == nil {
1379
+ return jsonErrorResponse(
1380
+ fmt.Sprintf("Instance %d: Context not initialized, cannot connect", instanceIndex),
1381
+ nil,
1382
+ ) // Caller frees.
1383
+ }
1384
+
1385
+ // --- Unmarshal Address List from JSON ---
1386
+ var addrStrings []string
1387
+ if err := json.Unmarshal([]byte(goAddrsJSON), &addrStrings); err != nil {
1388
+ return jsonErrorResponse("Failed to parse addresses JSON", err)
1389
+ }
1390
+ if len(addrStrings) == 0 {
1391
+ return jsonErrorResponse("Address list is empty", nil)
1392
+ }
1393
+
1394
+ // --- Create AddrInfo from the list ---
1395
+ addrInfo, err := peer.AddrInfoFromString(addrStrings[0])
1396
+ if err != nil {
1397
+ return jsonErrorResponse("Invalid first multiaddress in list", err)
1398
+ }
1399
+
1400
+ // Add the rest of the addresses to the AddrInfo struct
1401
+ for i := 1; i < len(addrStrings); i++ {
1402
+ maddr, err := ma.NewMultiaddr(addrStrings[i])
1403
+ if err != nil {
1404
+ log.Printf("[GO] ⚠️ Instance %d: Skipping invalid multiaddress '%s' in list: %v\n", instanceIndex, addrStrings[i], err)
1405
+ continue
1406
+ }
1407
+ // You might want to add a check here to ensure subsequent addresses are for the same peer ID
1408
+ addrInfo.Addrs = append(addrInfo.Addrs, maddr)
1409
+ }
1410
+
1411
+ // Check if attempting to connect to the local node itself.
1412
+ if addrInfo.ID == instanceHost.ID() {
1413
+ log.Printf("[GO] ℹ️ Instance %d: Attempting to connect to self (%s), skipping explicit connection.\n", instanceIndex, addrInfo.ID)
1414
+ // Connecting to self is usually not necessary or meaningful in libp2p.
1415
+ // Return success, indicating the "connection" is implicitly present.
1416
+ return jsonSuccessResponse(addrInfo) // Caller frees.
1417
+ }
1418
+
1419
+ // --- 1. ESTABLISH CONNECTION ---
1420
+ // Use a context with a timeout for the connection attempt to prevent blocking indefinitely.
1421
+ connCtx, cancel := context.WithTimeout(instanceCtx, 30*time.Second) // 30-second timeout.
1422
+ defer cancel() // Ensure context is cancelled eventually.
1423
+
1424
+ // Add the peer's address(es) to the local peerstore for this instance. This helps libp2p find the peer.
1425
+ // ConnectedAddrTTL suggests the address is likely valid for a short time after connection.
1426
+ // Use PermanentAddrTTL if the address is known to be stable.
1427
+ instanceHost.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, peerstore.ConnectedAddrTTL)
1428
+
1429
+ // Initiate the connection attempt. libp2p will handle dialing and negotiation.
1430
+ log.Printf("[GO] - Instance %d: Attempting host.Connect to %s...\n", instanceIndex, addrInfo.ID)
1431
+ if err := instanceHost.Connect(connCtx, *addrInfo); err != nil {
1432
+ // Check if the error was due to the connection timeout.
1433
+ if connCtx.Err() == context.DeadlineExceeded {
1434
+ errMsg := fmt.Sprintf("Instance %d: Connection attempt to %s timed out after 30s", instanceIndex, addrInfo.ID)
1435
+ log.Printf("[GO] ❌ %s\n", errMsg)
1436
+ return jsonErrorResponse(errMsg, nil) // Return specific timeout error (caller frees).
1437
+ }
1438
+ // Handle other connection errors.
1439
+ errMsg := fmt.Sprintf("Instance %d: Failed to connect to peer %s", instanceIndex, addrInfo.ID)
1440
+ // Example: Check for specific common errors if needed
1441
+ // if strings.Contains(err.Error(), "no route to host") { ... }
1442
+ return jsonErrorResponse(errMsg, err) // Return generic connection error (caller frees).
1443
+ }
1444
+
1445
+ // --- 2. FIND THE WINNING ADDRESS ---
1446
+ // After a successful connection, query the host's network for active connections to the peer.
1447
+ // This is where you find the 'winning' address.
1448
+ conns := instanceHost.Network().ConnsToPeer(addrInfo.ID)
1449
+ var winningAddr string
1450
+ if len(conns) > 0 {
1451
+ winningAddr = fmt.Sprintf("%s/p2p/%s", conns[0].RemoteMultiaddr().String(), addrInfo.ID.String())
1452
+ log.Printf("[GO] ✅ Instance %d: Successfully connected to peer %s via: %s\n", instanceIndex, addrInfo.ID, winningAddr)
1453
+ } else {
1454
+ log.Printf("[GO] ⚠️ Instance %d: Connect succeeded for %s, but no active connection found immediately. It may be pending.\n", instanceIndex, addrInfo.ID)
1455
+ }
1456
+
1457
+ // Success: log the successful connection and return the response.
1458
+ log.Printf("[GO] ✅ Instance %d: Successfully initiated connection to multiaddress: %s\n", instanceIndex, winningAddr)
1459
+ winningAddrInfo, err := peer.AddrInfoFromString(winningAddr)
1460
+ if err != nil {
1461
+ return jsonErrorResponse("Invalid winner multiaddress.", err)
1462
+ }
1463
+ return jsonSuccessResponse(winningAddrInfo) // Caller frees.
1464
+ }
1465
+
1466
+ // ReserveOnRelay attempts to reserve a slot on a specified relay node for a specific instance.
1467
+ // This allows the local node to be reachable via that relay, even if behind NAT/firewall.
1468
+ // The first connection with the relay node should be done in advance using ConnectTo.
1469
+ // Parameters:
1470
+ // - instanceIndexC (C.int): The index of the node instance.
1471
+ // - relayPeerIDC (*C.char): The peerID of the relay node.
1472
+ //
1473
+ // Returns:
1474
+ // - *C.char: A JSON string indicating success or failure.
1475
+ // On success, the `message` contains the expiration date of the reservation (ISO 8601).
1476
+ // Structure (Success): `{"state":"Success", "message": "2024-12-31T23:59:59Z"}`
1477
+ // Structure (Error): `{"state":"Error", "message":"..."}`
1478
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
1479
+ //
1480
+ //export ReserveOnRelay
1481
+ func ReserveOnRelay(
1482
+ instanceIndexC C.int,
1483
+ relayPeerIDC *C.char,
1484
+ ) *C.char {
1485
+
1486
+ instanceIndex := int(instanceIndexC)
1487
+ // Convert C string input to Go string.
1488
+ goRelayPeerID := C.GoString(relayPeerIDC)
1489
+ log.Printf("[GO] 🅿️ Instance %d: Attempting to reserve slot on relay with Peer ID: %s\n", instanceIndex, goRelayPeerID)
1490
+
1491
+ // Check instance index validity
1492
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1493
+ return jsonErrorResponse("Invalid instance index", err) // Caller frees.
1494
+ }
1495
+
1496
+ // Get instance-specific state
1497
+ instanceHost := hostInstances[instanceIndex]
1498
+ instanceCtx := contexts[instanceIndex]
1499
+
1500
+ // Check if the host is initialized for this instance.
1501
+ if instanceHost == nil {
1502
+ return jsonErrorResponse(
1503
+ fmt.Sprintf("Instance %d: Host not initialized, cannot reserve", instanceIndex), nil,
1504
+ ) // Caller frees.
1505
+ }
1506
+ if instanceCtx == nil {
1507
+ return jsonErrorResponse(
1508
+ fmt.Sprintf("Instance %d: Context not initialized, cannot reserve", instanceIndex), nil,
1509
+ ) // Caller frees.
1510
+ }
1511
+
1512
+ // --- Decode Peer ID and build AddrInfo from Peerstore ---
1513
+ relayPID, err := peer.Decode(goRelayPeerID)
1514
+ if err != nil {
1515
+ return jsonErrorResponse("Failed to decode relay Peer ID string", err)
1516
+ }
1517
+
1518
+ // Construct the AddrInfo using the ID and the addresses we know from the peerstore.
1519
+ relayInfo := peer.AddrInfo{
1520
+ ID: relayPID,
1521
+ Addrs: instanceHost.Peerstore().Addrs(relayPID),
1522
+ }
1523
+
1524
+ // Ensure the node is not trying to reserve a slot on itself.
1525
+ if relayInfo.ID == instanceHost.ID() {
1526
+ return jsonErrorResponse(
1527
+ fmt.Sprintf("Instance %d: Cannot reserve slot on self", instanceIndex), nil,
1528
+ ) // Caller frees.
1529
+ }
1530
+
1531
+ // --- VERIFY CONNECTION TO RELAY ---
1532
+ if len(instanceHost.Network().ConnsToPeer(relayInfo.ID)) == 0 {
1533
+ errMsg := fmt.Sprintf("Instance %d: Not connected to relay %s. Must connect before reserving.", instanceIndex, relayInfo.ID)
1534
+ return jsonErrorResponse(errMsg, nil)
1535
+ }
1536
+ log.Printf("[GO] - Instance %d: Verified connection to relay: %s\n", instanceIndex, relayInfo.ID)
1537
+
1538
+ // --- Attempt Reservation ---
1539
+ // Use a separate context with potentially longer timeout for the reservation itself.
1540
+ resCtx, resCancel := context.WithTimeout(instanceCtx, 60*time.Second) // 60-second timeout for reservation.
1541
+ defer resCancel()
1542
+ // Call the circuitv2 client function to request a reservation.
1543
+ // This performs the RPC communication with the relay.
1544
+ reservation, err := client.Reserve(resCtx, instanceHost, relayInfo)
1545
+ if err != nil {
1546
+ errMsg := fmt.Sprintf("Instance %d: Failed to reserve slot on relay %s", instanceIndex, relayInfo.ID)
1547
+ // Handle reservation timeout specifically.
1548
+ if resCtx.Err() == context.DeadlineExceeded {
1549
+ errMsg = fmt.Sprintf("Instance %d: Reservation attempt on relay %s timed out", instanceIndex, relayInfo.ID)
1550
+ return jsonErrorResponse(errMsg, nil) // Caller frees.
1551
+ }
1552
+ return jsonErrorResponse(errMsg, err) // Caller frees.
1553
+ }
1554
+
1555
+ // Although Reserve usually errors out if it fails, double-check if the reservation object is nil.
1556
+ if reservation == nil {
1557
+ errMsg := fmt.Sprintf("Instance %d: Reservation on relay %s returned nil voucher, but no error", instanceIndex, relayInfo.ID)
1558
+ return jsonErrorResponse(errMsg, nil) // Caller frees.
1559
+ }
1560
+
1561
+ // --- Construct Relayed Addresses and Update Local Peerstore ---
1562
+ // We construct a relayed address for each public address of the relay to maximize reachability.
1563
+ var constructedAddrs []ma.Multiaddr
1564
+ for _, relayAddr := range relayInfo.Addrs {
1565
+ // We only want to use public, usable addresses for the circuit
1566
+ if manet.IsIPLoopback(relayAddr) || manet.IsIPUnspecified(relayAddr) {
1567
+ continue
1568
+ }
1569
+
1570
+ // Ensure the relay's address in the peerstore includes its own Peer ID
1571
+ baseRelayAddrStr := relayAddr.String()
1572
+ if _, idInAddr := peer.SplitAddr(relayAddr); idInAddr == "" {
1573
+ baseRelayAddrStr = fmt.Sprintf("%s/p2p/%s", relayAddr.String(), relayInfo.ID.String())
1574
+ }
1575
+
1576
+ constructedAddrStr := fmt.Sprintf("%s/p2p-circuit/p2p/%s", baseRelayAddrStr, instanceHost.ID().String())
1577
+ constructedAddr, err := ma.NewMultiaddr(constructedAddrStr)
1578
+ if err == nil {
1579
+ constructedAddrs = append(constructedAddrs, constructedAddr)
1580
+ }
1581
+ }
1582
+
1583
+ if len(constructedAddrs) == 0 {
1584
+ return jsonErrorResponse("Reservation succeeded but failed to construct any valid relayed multiaddr", nil)
1585
+ }
1586
+
1587
+ log.Printf("[GO] - Instance %d: Adding %d constructed relayed address(es) to local peerstore (ID: %s) expiring at: %s\n", instanceIndex, len(constructedAddrs), instanceHost.ID(), reservation.Expiration.Format(time.RFC3339))
1588
+ instanceHost.Peerstore().AddAddrs(instanceHost.ID(), constructedAddrs, peerstore.PermanentAddrTTL)
1589
+
1590
+ log.Printf("[GO] ✅ Instance %d: Reservation successful on relay: %s.\n", instanceIndex, relayInfo.ID)
1591
+
1592
+ // Return the expiration time of the reservation as confirmation.
1593
+ return jsonSuccessResponse(reservation.Expiration)
1594
+ }
1595
+
1596
+ // DisconnectFrom attempts to close any active connections to a specified peer
1597
+ // and removes the peer from the internally tracked list for a specific instance.
1598
+ // Parameters:
1599
+ // - instanceIndexC (C.int): The index of the node instance.
1600
+ // - peerIDC (*C.char): The Peer ID string of the peer to disconnect from.
1601
+ //
1602
+ // Returns:
1603
+ // - *C.char: A JSON string indicating success or failure.
1604
+ // Structure: `{"state":"Success", "message":"Disconnected from peer ..."}` or `{"state":"Error", "message":"..."}`.
1605
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
1606
+ //
1607
+ //export DisconnectFrom
1608
+ func DisconnectFrom(
1609
+ instanceIndexC C.int,
1610
+ peerIDC *C.char,
1611
+ ) *C.char {
1612
+
1613
+ instanceIndex := int(instanceIndexC)
1614
+ goPeerID := C.GoString(peerIDC)
1615
+ log.Printf("[GO] 🔌 Instance %d: Attempting to disconnect from peer: %s\n", instanceIndex, goPeerID)
1616
+
1617
+ // Check instance index validity
1618
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1619
+ return jsonErrorResponse("Invalid instance index", err)
1620
+ }
1621
+
1622
+ // Get instance-specific state
1623
+ instanceHost := hostInstances[instanceIndex]
1624
+ instanceConnectedPeers := connectedPeersInstances[instanceIndex]
1625
+ instanceConnectedPeersMutex := &connectedPeersMutexes[instanceIndex]
1626
+ instancePersistentChatStreams := persistentChatStreamsInstances[instanceIndex]
1627
+ instancePersistentChatStreamsMutex := &persistentChatStreamsMutexes[instanceIndex]
1628
+
1629
+ if instanceHost == nil {
1630
+ return jsonErrorResponse(
1631
+ fmt.Sprintf("Instance %d: Host not initialized, cannot disconnect", instanceIndex), nil,
1632
+ )
1633
+ }
1634
+
1635
+ pid, err := peer.Decode(goPeerID)
1636
+ if err != nil {
1637
+ return jsonErrorResponse(
1638
+ fmt.Sprintf("Instance %d: Failed to decode peer ID", instanceIndex), err,
1639
+ )
1640
+ }
1641
+
1642
+ if pid == instanceHost.ID() {
1643
+ log.Printf("[GO] ℹ️ Instance %d: Attempting to disconnect from self (%s), skipping.\n", instanceIndex, pid)
1644
+ return jsonSuccessResponse("Cannot disconnect from self")
1645
+ }
1646
+
1647
+ // --- Close Persistent Outgoing Stream (if exists) for this instance ---
1648
+ instancePersistentChatStreamsMutex.Lock()
1649
+ stream, exists := instancePersistentChatStreams[pid]
1650
+ if exists {
1651
+ log.Printf("[GO] ↳ Instance %d: Closing persistent outgoing stream to %s\n", instanceIndex, pid)
1652
+ _ = stream.Close() // Attempt graceful close
1653
+ delete(instancePersistentChatStreams, pid)
1654
+ }
1655
+ instancePersistentChatStreamsMutex.Unlock() // Unlock before potentially blocking network call
1656
+
1657
+ // --- Close Network Connections ---
1658
+ conns := instanceHost.Network().ConnsToPeer(pid)
1659
+ closedNetworkConn := false
1660
+ if len(conns) > 0 {
1661
+ log.Printf("[GO] - Instance %d: Closing %d active network connection(s) to peer %s...\n", instanceIndex, len(conns), pid)
1662
+ err = instanceHost.Network().ClosePeer(pid) // This closes the underlying connection(s)
1663
+ if err != nil {
1664
+ log.Printf("[GO] ⚠️ Instance %d: Error closing network connection(s) to peer %s: %v (proceeding with cleanup)\n", instanceIndex, pid, err)
1665
+ } else {
1666
+ log.Printf("[GO] - Instance %d: Closed network connection(s) to peer: %s\n", instanceIndex, pid)
1667
+ closedNetworkConn = true
1668
+ }
1669
+ } else {
1670
+ log.Printf("[GO] ℹ️ Instance %d: No active network connections found to peer %s.\n", instanceIndex, pid)
1671
+ }
1672
+
1673
+ // --- Remove from Tracking Map for this instance ---
1674
+ instanceConnectedPeersMutex.Lock()
1675
+ delete(instanceConnectedPeers, pid)
1676
+ instanceConnectedPeersMutex.Unlock()
1677
+
1678
+ logMsg := fmt.Sprintf("Instance %d: Disconnected from peer %s", instanceIndex, goPeerID)
1679
+ if !exists && !closedNetworkConn && len(conns) == 0 {
1680
+ logMsg = fmt.Sprintf("Instance %d: Peer %s was not connected or tracked", instanceIndex, goPeerID)
1681
+ }
1682
+ log.Printf("[GO] ✅ %s\n", logMsg)
1683
+
1684
+ return jsonSuccessResponse(logMsg)
1685
+ }
1686
+
1687
+ // GetConnectedPeers returns a list of peers currently tracked as connected for a specific instance.
1688
+ // Note: This relies on the internal `connectedPeersInstances` map which is updated during
1689
+ // connect/disconnect operations and incoming streams. It may optionally perform
1690
+ // a liveness check.
1691
+ // Parameters:
1692
+ // - instanceIndexC (C.int): The index of the node instance.
1693
+ //
1694
+ // Returns:
1695
+ // - *C.char: A JSON string containing a list of connected peers' information.
1696
+ // Structure: `{"state":"Success", "message": [ExtendedPeerInfo, ...]}` or `{"state":"Error", "message":"..."}`.
1697
+ // Each `ExtendedPeerInfo` object has `addr_info` (ID, Addrs), `connected_at`, `direction`, and `misc`.
1698
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
1699
+ //
1700
+ //export GetConnectedPeers
1701
+ func GetConnectedPeers(
1702
+ instanceIndexC C.int,
1703
+ ) *C.char {
1704
+
1705
+ instanceIndex := int(instanceIndexC)
1706
+
1707
+ // Check instance index validity
1708
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1709
+ return jsonErrorResponse("Invalid instance index", err) // Caller frees.
1710
+ }
1711
+
1712
+ // Get instance-specific state and mutex
1713
+ instanceConnectedPeers := connectedPeersInstances[instanceIndex]
1714
+ instanceConnectedPeersMutex := &connectedPeersMutexes[instanceIndex]
1715
+ instanceHost := hostInstances[instanceIndex]
1716
+
1717
+ // Use a Write Lock for the entire critical section to avoid mixing RLock and Lock.
1718
+ instanceConnectedPeersMutex.RLock()
1719
+ defer instanceConnectedPeersMutex.RUnlock() // Ensure lock is released.
1720
+
1721
+ // Create a slice to hold the results directly from the map.
1722
+ peersList := make([]ExtendedPeerInfo, 0, len(instanceConnectedPeers))
1723
+ // Prior check if the host is initialized for this instance.
1724
+ if instanceHost != nil && instanceHost.Network() != nil {
1725
+ // Check if the connectedPeers map itself is initialized for this instance.
1726
+ // This map should be initialized in CreateNode.
1727
+ if instanceConnectedPeers == nil {
1728
+ log.Printf("[GO] ⚠️ Instance %d: GetConnectedPeers: connectedPeersInstances map is nil. Returning empty list.\n", instanceIndex)
1729
+ // Return success with an empty list.
1730
+ return jsonSuccessResponse([]ExtendedPeerInfo{})
1731
+ }
1732
+
1733
+ for _, peerInfo := range instanceConnectedPeers {
1734
+ peersList = append(peersList, peerInfo)
1735
+ }
1736
+ } else {
1737
+ // If host is not ready, return the current state of the map (which should be empty if CreateNode was called correctly).
1738
+ log.Printf("[GO] ⚠️ Instance %d: GetConnectedPeers called but host is not fully initialized. Returning potentially empty list based on map.\n", instanceIndex)
1739
+ for _, peerInfo := range instanceConnectedPeers {
1740
+ peersList = append(peersList, peerInfo)
1741
+ }
1742
+ }
1743
+
1744
+ log.Printf("[GO] ℹ️ Instance %d: Reporting %d currently tracked and active peers.\n", instanceIndex, len(peersList))
1745
+
1746
+ // Return the list of active peers as a JSON success response.
1747
+ return jsonSuccessResponse(peersList) // Caller frees.
1748
+ }
1749
+
1750
+ // GetRendezvousPeers returns a list of peers currently tracked as part of the world for a specific instance.
1751
+ // Note: This relies on the internal `rendezvousDiscoveredPeersInstances` map which is updated by pubsub
1752
+ // Parameters:
1753
+ // - instanceIndexC (C.int): The index of the node instance.
1754
+ //
1755
+ // Returns:
1756
+ // - *C.char: A JSON string containing a list of connected peers' information.
1757
+ // Structure: `{"state":"Success", "message": [ExtendedPeerInfo, ...]}` or `{"state":"Error", "message":"..."}`.
1758
+ // Each `ExtendedPeerInfo` object has `addr_info` (ID, Addrs), `connected_at`, `direction`, and `misc`.
1759
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
1760
+ //
1761
+ //export GetRendezvousPeers
1762
+ func GetRendezvousPeers(
1763
+ instanceIndexC C.int,
1764
+ ) *C.char {
1765
+
1766
+ instanceIndex := int(instanceIndexC)
1767
+
1768
+ // Check instance index validity
1769
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1770
+ return jsonErrorResponse("Invalid instance index", err) // Caller frees.
1771
+ }
1772
+
1773
+ rendezvousDiscoveredPeersMutexes[instanceIndex].RLock()
1774
+ // Get the pointer to the state struct.
1775
+ rendezvousState := rendezvousDiscoveredPeersInstances[instanceIndex]
1776
+ rendezvousDiscoveredPeersMutexes[instanceIndex].RUnlock()
1777
+
1778
+ // If the state pointer is nil, it means we haven't received the first update yet.
1779
+ if rendezvousState == nil {
1780
+ return C.CString(`{"state":"Empty"}`)
1781
+ }
1782
+
1783
+ // Extract the list of extendedPeerInfo to return it
1784
+ peersList := make([]ExtendedPeerInfo, 0, len(rendezvousState.Peers))
1785
+ for _, peerInfo := range rendezvousState.Peers {
1786
+ peersList = append(peersList, peerInfo)
1787
+ }
1788
+
1789
+ // This struct will be marshaled to JSON with exactly the fields you want.
1790
+ responsePayload := struct {
1791
+ Peers []ExtendedPeerInfo `json:"peers"`
1792
+ UpdateCount int64 `json:"update_count"`
1793
+ }{
1794
+ Peers: peersList,
1795
+ UpdateCount: rendezvousState.UpdateCount,
1796
+ }
1797
+
1798
+ // The state exists, so return the whole struct.
1799
+ log.Printf("[GO] ℹ️ Instance %d: Reporting %d rendezvous peers (UpdateCount: %d).\n", instanceIndex, len(rendezvousState.Peers), rendezvousState.UpdateCount)
1800
+ return jsonSuccessResponse(responsePayload) // Caller frees.
1801
+ }
1802
+
1803
+ // GetNodeAddresses is the C-exported wrapper for goGetNodeAddresses.
1804
+ // It handles C-Go type conversions and JSON marshaling.
1805
+ //
1806
+ //export GetNodeAddresses
1807
+ func GetNodeAddresses(
1808
+ instanceIndexC C.int,
1809
+ peerIDC *C.char,
1810
+ ) *C.char {
1811
+ instanceIndex := int(instanceIndexC)
1812
+ peerIDStr := C.GoString(peerIDC) // Raw string from C
1813
+
1814
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1815
+ return jsonErrorResponse("Invalid instance index", err)
1816
+ }
1817
+
1818
+ // instanceHost is needed here to compare against for "local" case,
1819
+ // or if goGetNodeAddresses itself didn't handle nil host for some reason.
1820
+ instanceHost := hostInstances[instanceIndex]
1821
+ if instanceHost == nil {
1822
+ // This check should ideally also be inside goGetNodeAddresses if it can be called
1823
+ // before host is fully up, but for the wrapper it's good.
1824
+ return jsonErrorResponse(fmt.Sprintf("Instance %d: Host not initialized", instanceIndex), nil)
1825
+ }
1826
+
1827
+ var pidForInternalCall peer.ID // This will be peer.ID("") for local
1828
+ var err error
1829
+
1830
+ if peerIDStr == "" || peerIDStr == instanceHost.ID().String() {
1831
+ // Convention: Empty peer.ID ("") passed to goGetNodeAddresses means "local node".
1832
+ pidForInternalCall = "" // This is peer.ID("")
1833
+ } else {
1834
+ pidForInternalCall, err = peer.Decode(peerIDStr)
1835
+ if err != nil {
1836
+ errMsg := fmt.Sprintf("Instance %d: Failed to decode peer ID '%s'", instanceIndex, peerIDStr)
1837
+ return jsonErrorResponse(errMsg, err)
1838
+ }
1839
+ }
1840
+
1841
+ // Call the internal Go function with the resolved peer.ID or empty peer.ID for local
1842
+ addresses, err := goGetNodeAddresses(instanceIndex, pidForInternalCall)
1843
+ if err != nil {
1844
+ return jsonErrorResponse(err.Error(), nil)
1845
+ }
1846
+
1847
+ return jsonSuccessResponse(addresses)
1848
+ }
1849
+
1850
+ // SendMessageToPeer sends a message either directly to a specific peer or broadcasts it via PubSub for a specific instance.
1851
+ // Parameters:
1852
+ // - instanceIndexC (C.int): The index of the node instance.
1853
+ // - channelC (*C.char): Use the unique channel as defined above in the Message struct.
1854
+ // - dataC (*C.char): A pointer to the raw byte data of the message payload.
1855
+ // - lengthC (C.int): The length of the data buffer pointed to by `data`.
1856
+ //
1857
+ // Returns:
1858
+ // - *C.char: A JSON string with {"state": "Success/Error", "message": "..."}.
1859
+ // - IMPORTANT: The caller MUST free this string using FreeString.
1860
+ //
1861
+ //export SendMessageToPeer
1862
+ func SendMessageToPeer(
1863
+ instanceIndexC C.int,
1864
+ channelC *C.char,
1865
+ dataC *C.char,
1866
+ lengthC C.int,
1867
+ ) *C.char {
1868
+
1869
+ instanceIndex := int(instanceIndexC)
1870
+ // Convert C inputs
1871
+ goChannel := C.GoString(channelC)
1872
+ goData := C.GoBytes(unsafe.Pointer(dataC), C.int(lengthC))
1873
+
1874
+ if err := checkInstanceIndex(instanceIndex); err != nil {
1875
+ // Invalid instance index, return error code.
1876
+ return jsonErrorResponse("Invalid instance index", err)
1877
+ }
1878
+
1879
+ // Get instance-specific state and mutexes
1880
+ instanceHost := hostInstances[instanceIndex]
1881
+ instanceCtx := contexts[instanceIndex]
1882
+
1883
+ if instanceHost == nil || instanceCtx == nil {
1884
+ // Host or context not initialized for this instance.
1885
+ return jsonErrorResponse("Host or Context not initialized for this instance", nil)
1886
+ }
1887
+
1888
+ // --- Branch: Broadcast or Direct Send ---
1889
+ if strings.Contains(goChannel, "::ps:") {
1890
+ // --- Broadcast via specific PubSub Topic ---
1891
+ instancePubsub := pubsubInstances[instanceIndex] // Get from instance state
1892
+ instanceCtx := contexts[instanceIndex] // Get from instance state
1893
+
1894
+ if instancePubsub == nil {
1895
+ // PubSub not initialized, cannot broadcast
1896
+ return jsonErrorResponse("PubSub not initialized, cannot broadcast", nil)
1897
+ }
1898
+
1899
+ pubsubMutexes[instanceIndex].Lock() // Changed to full Lock since we might Join
1900
+ topic, exists := topicsInstances[instanceIndex][goChannel]
1901
+ if !exists {
1902
+ var err error
1903
+ log.Printf("[GO] - Instance %d: Joining PubSub topic '%s' for sending.\n", instanceIndex, goChannel)
1904
+ topic, err = instancePubsub.Join(goChannel) // ps is instancePubsub
1905
+ if err != nil {
1906
+ pubsubMutexes[instanceIndex].Unlock()
1907
+ // Failed to join PubSub topic
1908
+ return jsonErrorResponse(fmt.Sprintf("Failed to join PubSub topic '%s'", goChannel), err)
1909
+ }
1910
+ topicsInstances[instanceIndex][goChannel] = topic // Store the new topic
1911
+ log.Printf("[GO] ✅ Instance %d: Joined PubSub topic: %s for publishing.\n", instanceIndex, goChannel)
1912
+ }
1913
+ pubsubMutexes[instanceIndex].Unlock() // Unlock after potentially joining
1914
+
1915
+ // Directly publish the raw Protobuf payload.
1916
+ if err := topic.Publish(instanceCtx, goData); err != nil {
1917
+ // Failed to publish to topic
1918
+ return jsonErrorResponse(fmt.Sprintf("Failed to publish to topic '%s'", goChannel), err)
1919
+ }
1920
+ log.Printf("[GO] 🌍 Instance %d: Broadcast to topic '%s' (%d bytes)\n", instanceIndex, goChannel, len(goData))
1921
+ return jsonSuccessResponse(fmt.Sprintf("Message broadcast to topic %s", goChannel))
1922
+
1923
+ } else if strings.Contains(goChannel, "::dm:") {
1924
+ // --- Direct Peer-to-Peer Message Sending (Persistent Stream Logic) ---
1925
+ receiverChannelIDStr := strings.Split(goChannel, "::dm:")[1] // Extract the receiver's channel ID from the format "dm:<peerID>-<channelSpecifier>"
1926
+ peerIDStr := strings.Split(receiverChannelIDStr, "-")[0]
1927
+ pid, err := peer.Decode(peerIDStr)
1928
+ if err != nil {
1929
+ // Invalid peer ID format
1930
+ return jsonErrorResponse("Invalid peer ID format in channel string", err)
1931
+ }
1932
+
1933
+ if pid == instanceHost.ID() {
1934
+ // Attempt to send direct message to self
1935
+ return jsonErrorResponse("Attempt to send direct message to self is invalid", nil)
1936
+ }
1937
+
1938
+ instancePersistentChatStreams := persistentChatStreamsInstances[instanceIndex]
1939
+ instancePersistentChatStreamsMutex := &persistentChatStreamsMutexes[instanceIndex]
1940
+
1941
+ // Acquire lock to access the persistent stream map for this instance
1942
+ instancePersistentChatStreamsMutex.Lock()
1943
+ stream, exists := instancePersistentChatStreams[pid]
1944
+
1945
+ // If stream exists, try writing to it
1946
+ if exists {
1947
+ log.Printf("[GO] ↳ Instance %d: Reusing existing stream to %s\n", instanceIndex, pid)
1948
+ err = writeDirectMessageFrame(stream, goChannel, goData)
1949
+ if err == nil {
1950
+ // Success writing to existing stream
1951
+ instancePersistentChatStreamsMutex.Unlock() // Unlock before returning
1952
+ log.Printf("[GO] 📤 Instance %d: Sent direct message to %s (on existing stream)\n", instanceIndex, pid)
1953
+ return jsonSuccessResponse(fmt.Sprintf("Direct message sent to %s (reused stream).", pid))
1954
+ }
1955
+ // Write failed on existing stream - assume it's broken
1956
+ log.Printf("[GO] ⚠️ Instance %d: Failed to write to existing stream for %s: %v. Closing and removing stream.", instanceIndex, pid, err)
1957
+ // Close the stream (Reset is more abrupt, Close attempts graceful)
1958
+ _ = stream.Close() // Ignore error during close, as we're removing it anyway
1959
+ // Remove from map
1960
+ delete(instancePersistentChatStreams, pid)
1961
+ // Unlock and return specific error
1962
+ instancePersistentChatStreamsMutex.Unlock()
1963
+ return jsonErrorResponse(fmt.Sprintf("Failed to write to existing stream for %s. Closing and removing stream.", pid), err)
1964
+ } else {
1965
+ // Stream does not exist, need to create a new one
1966
+ instancePersistentChatStreamsMutex.Unlock()
1967
+
1968
+ log.Printf("[GO] ↳ Instance %d: No existing stream to %s, creating new one...\n", instanceIndex, pid)
1969
+ streamCtx, cancel := context.WithTimeout(instanceCtx, 20*time.Second)
1970
+ defer cancel()
1971
+
1972
+ newStream, err := instanceHost.NewStream(
1973
+ network.WithAllowLimitedConn(streamCtx, "chat/1.0.0"),
1974
+ pid,
1975
+ ChatProtocol,
1976
+ )
1977
+
1978
+ // Re-acquire lock *after* NewStream finishes or errors
1979
+ instancePersistentChatStreamsMutex.Lock()
1980
+ defer instancePersistentChatStreamsMutex.Unlock()
1981
+
1982
+ if err != nil {
1983
+ // Failed to open a *new* stream
1984
+ if streamCtx.Err() == context.DeadlineExceeded || err == context.DeadlineExceeded {
1985
+ return jsonErrorResponse(fmt.Sprintf("Failed to open new stream to %s: Timeout", pid), err)
1986
+ }
1987
+ return jsonErrorResponse(fmt.Sprintf("Failed to open new stream to %s.", pid), err)
1988
+ }
1989
+
1990
+ // --- RACE CONDITION HANDLING ---
1991
+ // Double-check if another goroutine created a stream while we were unlocked
1992
+ existingStream, existsNow := instancePersistentChatStreams[pid]
1993
+ if existsNow {
1994
+ log.Printf("[GO] ⚠️ Instance %d: Race condition: Another stream to %s was created. Using existing one and closing the new one.", instanceIndex, pid)
1995
+ _ = newStream.Close() // Close the redundant stream we just created.
1996
+ stream = existingStream
1997
+ } else {
1998
+ log.Printf("[GO] ✅ Instance %d: Opened and stored new persistent stream to %s\n", instanceIndex, pid)
1999
+ instancePersistentChatStreams[pid] = newStream
2000
+ stream = newStream
2001
+ go handleStream(instanceIndex, newStream)
2002
+ }
2003
+
2004
+ // --- Write message to the determined stream ---
2005
+ err = writeDirectMessageFrame(stream, goChannel, goData)
2006
+ if err != nil {
2007
+ log.Printf("[GO] ❌ Instance %d: Failed to write initial message to stream for %s: %v. Closing and removing.", instanceIndex, pid, err)
2008
+ _ = stream.Close()
2009
+ if currentStream, ok := instancePersistentChatStreams[pid]; ok && currentStream == stream {
2010
+ delete(instancePersistentChatStreams, pid)
2011
+ }
2012
+ return jsonErrorResponse(fmt.Sprintf("Failed to write to new stream to '%s' (needs reconnect).", pid), err)
2013
+ }
2014
+
2015
+ log.Printf("[GO] 📤 Instance %d: Sent direct message to %s (on NEW stream)\n", instanceIndex, pid)
2016
+ return jsonSuccessResponse(fmt.Sprintf("Direct message sent to %s (new stream).", pid))
2017
+ }
2018
+ } else {
2019
+ // Invalid channel format
2020
+ return jsonErrorResponse(fmt.Sprintf("Invalid channel format '%s'", goChannel), nil)
2021
+ }
2022
+ }
2023
+
2024
+ // SubscribeToTopic joins a PubSub topic and starts listening for messages for a specific instance.
2025
+ // Parameters:
2026
+ // - instanceIndexC (C.int): The index of the node instance.
2027
+ // - channelC (*C.char): The Channel associated to the topic to subscribe to.
2028
+ //
2029
+ // Returns:
2030
+ // - *C.char: A JSON string indicating success or failure.
2031
+ // Structure: `{"state":"Success", "message":"Subscribed to topic ..."}` or `{"state":"Error", "message":"..."}`.
2032
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
2033
+ //
2034
+ //export SubscribeToTopic
2035
+ func SubscribeToTopic(
2036
+ instanceIndexC C.int,
2037
+ channelC *C.char,
2038
+ ) *C.char {
2039
+
2040
+ instanceIndex := int(instanceIndexC)
2041
+ // Convert C string input to Go string.
2042
+ channel := C.GoString(channelC)
2043
+ log.Printf("[GO] <sub> Instance %d: Attempting to subscribe to topic: %s\n", instanceIndex, channel)
2044
+
2045
+ // Check instance index validity
2046
+ if err := checkInstanceIndex(instanceIndex); err != nil {
2047
+ return jsonErrorResponse("Invalid instance index", err) // Caller frees.
2048
+ }
2049
+
2050
+ // Get instance-specific state and mutex
2051
+ instanceHost := hostInstances[instanceIndex]
2052
+ instancePubsub := pubsubInstances[instanceIndex]
2053
+ instancePubsubMutex := &pubsubMutexes[instanceIndex]
2054
+ instanceTopics := topicsInstances[instanceIndex]
2055
+ instanceSubscriptions := subscriptionsInstances[instanceIndex]
2056
+
2057
+ // Check if host and PubSub instances are ready for this instance.
2058
+ if instanceHost == nil || instancePubsub == nil {
2059
+ return jsonErrorResponse(
2060
+ fmt.Sprintf("Instance %d: Host or PubSub not initialized", instanceIndex), nil,
2061
+ ) // Caller frees.
2062
+ }
2063
+
2064
+ // Lock the mutex for safe access to the shared topics and subscriptions maps for this instance.
2065
+ instancePubsubMutex.Lock()
2066
+ defer instancePubsubMutex.Unlock() // Ensure mutex is unlocked when function returns.
2067
+
2068
+ // Check if already subscribed to this topic for this instance.
2069
+ if _, exists := instanceSubscriptions[channel]; exists {
2070
+ log.Printf("[GO] <sub> Instance %d: Already subscribed to topic: %s\n", instanceIndex, channel)
2071
+ // Return success, indicating the desired state is already met.
2072
+ return jsonSuccessResponse(
2073
+ fmt.Sprintf("Instance %d: Already subscribed to topic %s", instanceIndex, channel),
2074
+ ) // Caller frees.
2075
+ }
2076
+
2077
+ // If the channel ends with ":rv", it indicates a rendezvous topic, so we remove other ones
2078
+ // from the instanceTopics and instanceSubscriptions list, and we clean the rendezvousDiscoveredPeersInstances.
2079
+ if strings.HasSuffix(channel, ":rv") {
2080
+ log.Printf(" - Instance %d: Joining rendezvous topic '%s'. Cleaning up previous rendezvous state.\n", instanceIndex, channel)
2081
+ // Remove all existing rendezvous topics and subscriptions for this instance.
2082
+ for existingChannel := range instanceTopics {
2083
+ if strings.HasSuffix(existingChannel, ":rv") {
2084
+ log.Printf(" - Instance %d: Removing existing rendezvous topic '%s' from instance state.\n", instanceIndex, existingChannel)
2085
+
2086
+ // Close the topic handle if it exists.
2087
+ if topic, exists := instanceTopics[existingChannel]; exists {
2088
+ if err := topic.Close(); err != nil {
2089
+ log.Printf("⚠️ Instance %d: Error closing topic handle for '%s': %v (proceeding with map cleanup)\n", instanceIndex, existingChannel, err)
2090
+ }
2091
+ delete(instanceTopics, existingChannel)
2092
+ }
2093
+
2094
+ // Remove the subscription if it exists.
2095
+ if sub, exists := instanceSubscriptions[existingChannel]; exists {
2096
+ sub.Cancel() // Cancel the subscription
2097
+ delete(instanceSubscriptions, existingChannel) // Remove from map
2098
+ }
2099
+
2100
+ // Also clean up rendezvous discovered peers for this instance.
2101
+ log.Printf(" - Instance %d: Resetting rendezvous state for new topic '%s'.\n", instanceIndex, channel)
2102
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Lock()
2103
+ rendezvousDiscoveredPeersInstances[instanceIndex] = nil
2104
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Unlock()
2105
+ }
2106
+ }
2107
+ log.Printf(" - Instance %d: Cleaned up previous rendezvous state.\n", instanceIndex)
2108
+ }
2109
+
2110
+ // --- Join the Topic ---
2111
+ // Get a handle for the topic. `Join` creates the topic if it doesn't exist locally
2112
+ // and returns a handle. It's safe to call Join multiple times; it's idempotent.
2113
+ // We store the handle primarily for potential future publishing from this node.
2114
+ topic, err := instancePubsub.Join(channel)
2115
+ if err != nil {
2116
+ errMsg := fmt.Sprintf("Instance %d: Failed to join topic '%s'", instanceIndex, channel)
2117
+ return jsonErrorResponse(errMsg, err) // Caller frees.
2118
+ }
2119
+ // Store the topic handle in the map for this instance.
2120
+ instanceTopics[channel] = topic
2121
+ log.Printf("[GO] - Instance %d: Obtained topic handle for: %s\n", instanceIndex, channel)
2122
+
2123
+ // --- Subscribe to the Topic ---
2124
+ // Create an actual subscription to receive messages from the topic.
2125
+ sub, err := topic.Subscribe()
2126
+ if err != nil {
2127
+ // Close the newly created topic handle.
2128
+ err := topic.Close()
2129
+ if err != nil {
2130
+ // Log error but proceed with cleanup.
2131
+ log.Printf("[GO] ⚠️ Instance %d: Error closing topic handle for '%s': %v (proceeding with map cleanup)\n", instanceIndex, channel, err)
2132
+ }
2133
+ // Remove the topic handle from our local map for this instance.
2134
+ delete(instanceTopics, channel)
2135
+ errMsg := fmt.Sprintf("Instance %d: Failed to subscribe to topic '%s' after joining", instanceIndex, channel)
2136
+ return jsonErrorResponse(errMsg, err) // Caller frees.
2137
+ }
2138
+ // Store the subscription object in the map for this instance.
2139
+ instanceSubscriptions[channel] = sub
2140
+ log.Printf("[GO] - Instance %d: Created subscription object for: %s\n", instanceIndex, channel)
2141
+
2142
+ // --- Start Listener Goroutine ---
2143
+ // Launch a background goroutine that will continuously read messages
2144
+ // from this new subscription and add them to the message buffer for this instance.
2145
+ // Pass the instance index, subscription object, and topic name (for logging).
2146
+ go readFromSubscription(instanceIndex, sub)
2147
+
2148
+ log.Printf("[GO] ✅ Instance %d: Subscribed successfully to topic: %s and started listener.\n", instanceIndex, channel)
2149
+ return jsonSuccessResponse(
2150
+ fmt.Sprintf("Instance %d: Subscribed to topic %s", instanceIndex, channel),
2151
+ ) // Caller frees.
2152
+ }
2153
+
2154
+ // UnsubscribeFromTopic cancels an active PubSub subscription and cleans up related resources for a specific instance.
2155
+ // Parameters:
2156
+ // - instanceIndexC (C.int): The index of the node instance.
2157
+ // - channelC (*C.char): The Channel associated to the topic to unsubscribe from.
2158
+ //
2159
+ // Returns:
2160
+ // - *C.char: A JSON string indicating success or failure.
2161
+ // Structure: `{"state":"Success", "message":"Unsubscribed from topic ..."}` or `{"state":"Error", "message":"..."}`.
2162
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
2163
+ //
2164
+ //export UnsubscribeFromTopic
2165
+ func UnsubscribeFromTopic(
2166
+ instanceIndexC C.int,
2167
+ channelC *C.char,
2168
+ ) *C.char {
2169
+
2170
+ instanceIndex := int(instanceIndexC)
2171
+ // Convert C string input to Go string.
2172
+ channel := C.GoString(channelC)
2173
+ log.Printf("[GO] </sub> Instance %d: Attempting to unsubscribe from topic: %s\n", instanceIndex, channel)
2174
+
2175
+ // Check instance index validity
2176
+ if err := checkInstanceIndex(instanceIndex); err != nil {
2177
+ return jsonErrorResponse("Invalid instance index", err) // Caller frees.
2178
+ }
2179
+
2180
+ // Get instance-specific state and mutex
2181
+ instancePubsubMutex := &pubsubMutexes[instanceIndex]
2182
+ instanceTopics := topicsInstances[instanceIndex]
2183
+ instanceSubscriptions := subscriptionsInstances[instanceIndex]
2184
+
2185
+ // Check if host and PubSub are initialized. This is mostly for cleaning local state maps
2186
+ // if called after CloseNode, but Cancel/Close calls below require the instances.
2187
+ if hostInstances[instanceIndex] == nil || pubsubInstances[instanceIndex] == nil {
2188
+ log.Printf("[GO] Instance %d: Host/PubSub not initialized during Unsubscribe. Cleaning up local subscription state only.\n", instanceIndex)
2189
+ // Allow local map cleanup even if instances are gone.
2190
+ }
2191
+
2192
+ // Lock the mutex for write access to shared maps for this instance.
2193
+ instancePubsubMutex.Lock()
2194
+ defer instancePubsubMutex.Unlock() // Ensure unlock on exit.
2195
+
2196
+ // --- Cancel the Subscription ---
2197
+ // Find the subscription object in the map for this instance.
2198
+ sub, subExists := instanceSubscriptions[channel]
2199
+ if !subExists {
2200
+ log.Printf("[GO] </sub> Instance %d: Not currently subscribed to topic: %s (or already unsubscribed)\n", instanceIndex, channel)
2201
+ // Also remove potential stale topic handle if subscription is gone.
2202
+ delete(instanceTopics, channel)
2203
+ return jsonSuccessResponse(
2204
+ fmt.Sprintf("Instance %d: Not currently subscribed to topic %s", instanceIndex, channel),
2205
+ ) // Caller frees.
2206
+ }
2207
+
2208
+ // Cancel the subscription. This signals the associated `readFromSubscription` goroutine
2209
+ // (waiting on `sub.Next()`) to stop by causing `sub.Next()` to return an error (usually `ErrSubscriptionCancelled`).
2210
+ // It also cleans up internal PubSub resources related to this subscription.
2211
+ sub.Cancel()
2212
+ // Remove the subscription entry from our local map for this instance.
2213
+ delete(instanceSubscriptions, channel)
2214
+ log.Printf("[GO] - Instance %d: Cancelled subscription object for topic: %s\n", instanceIndex, channel)
2215
+
2216
+ // --- Close the Topic Handle ---
2217
+ // Find the corresponding topic handle for this instance. It's good practice to close this as well,
2218
+ // although PubSub might manage its lifecycle internally based on subscriptions.
2219
+ // Explicit closing ensures resources related to the *handle* (like internal routing state) are released.
2220
+ topic, topicExists := instanceTopics[channel]
2221
+ if topicExists {
2222
+ log.Printf("[GO] - Instance %d: Closing topic handle for: %s\n", instanceIndex, channel)
2223
+ // Close the topic handle.
2224
+ err := topic.Close()
2225
+ if err != nil {
2226
+ // Log error but proceed with cleanup.
2227
+ log.Printf("[GO] ⚠️ Instance %d: Error closing topic handle for '%s': %v (proceeding with map cleanup)\n", instanceIndex, channel, err)
2228
+ }
2229
+ // Remove the topic handle from our local map for this instance.
2230
+ delete(instanceTopics, channel)
2231
+ log.Printf("[GO] - Instance %d: Removed topic handle from local map for topic: %s\n", instanceIndex, channel)
2232
+ } else {
2233
+ log.Printf("[GO] - Instance %d: No topic handle found in local map for '%s' to close (already removed or possibly never stored?).\n", instanceIndex, channel)
2234
+ // Ensure removal from map even if handle wasn't found (e.g., inconsistent state).
2235
+ delete(instanceTopics, channel)
2236
+ }
2237
+
2238
+ // If the channel ends with ":rv", it indicates a rendezvous topic, so we have closed the topic and the sub
2239
+ // but we also need to clean the rendezvousDiscoveredPeersInstances.
2240
+ if strings.HasSuffix(channel, ":rv") {
2241
+ log.Printf(" - Instance %d: Unsubscribing from rendezvous topic. Clearing state.\n", instanceIndex)
2242
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Lock()
2243
+ rendezvousDiscoveredPeersInstances[instanceIndex] = nil
2244
+ rendezvousDiscoveredPeersMutexes[instanceIndex].Unlock()
2245
+ }
2246
+ log.Printf("[GO] - Instance %d: Cleaned up previous rendezvous state.\n", instanceIndex)
2247
+
2248
+ log.Printf("[GO] ✅ Instance %d: Unsubscribed successfully from topic: %s\n", instanceIndex, channel)
2249
+ return jsonSuccessResponse(
2250
+ fmt.Sprintf("Instance %d: Unsubscribed from topic %s", instanceIndex, channel),
2251
+ ) // Caller frees.
2252
+ }
2253
+
2254
+ // MessageQueueLength returns the total number of messages waiting across all channel queues for a specific instance.
2255
+ // Parameters:
2256
+ // - instanceIndexC (C.int): The index of the node instance.
2257
+ //
2258
+ // Returns:
2259
+ // - C.int: The total number of messages. Returns -1 if instance index is invalid.
2260
+ //
2261
+ //export MessageQueueLength
2262
+ func MessageQueueLength(
2263
+ instanceIndexC C.int,
2264
+ ) C.int {
2265
+
2266
+ instanceIndex := int(instanceIndexC)
2267
+
2268
+ // Check instance index validity
2269
+ if err := checkInstanceIndex(instanceIndex); err != nil {
2270
+ log.Printf("[GO] ❌ MessageQueueLength: %v\n", err)
2271
+ return -1 // Indicate invalid instance index
2272
+ }
2273
+
2274
+ // Get the message store for this instance
2275
+ store := messageStoreInstances[instanceIndex]
2276
+ if store == nil {
2277
+ log.Printf("[GO] ❌ Instance %d: Message store not initialized.\n", instanceIndex)
2278
+ return 0 // Return 0 if store is nil (effectively empty)
2279
+ }
2280
+
2281
+ store.mu.Lock()
2282
+ defer store.mu.Unlock()
2283
+
2284
+ totalLength := 0
2285
+ /// TODO: this makes sense but not for the check we are doing from python, think about it
2286
+ for _, messageList := range store.messagesByChannel {
2287
+ totalLength += messageList.Len()
2288
+ }
2289
+
2290
+ return C.int(totalLength)
2291
+ }
2292
+
2293
+ // PopMessages retrieves the oldest message from each channel's queue for a specific instance.
2294
+ // This function always pops one message per channel that has messages.
2295
+ // Parameters:
2296
+ // - instanceIndexC (C.int): The index of the node instance.
2297
+ //
2298
+ // Returns:
2299
+ // - *C.char: A JSON string representing a list of the popped messages.
2300
+ // Returns `{"state":"Empty"}` if no messages were available in any queue.
2301
+ // Returns `{"state":"Error", "message":"..."}` on failure.
2302
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
2303
+ //
2304
+ //export PopMessages
2305
+ func PopMessages(
2306
+ instanceIndexC C.int,
2307
+ ) *C.char {
2308
+ instanceIndex := int(instanceIndexC)
2309
+
2310
+ // Check instance index validity
2311
+ if err := checkInstanceIndex(instanceIndex); err != nil {
2312
+ return jsonErrorResponse("Invalid instance index", err)
2313
+ }
2314
+
2315
+ // Get the message store for this instance
2316
+ store := messageStoreInstances[instanceIndex]
2317
+ if store == nil {
2318
+ log.Printf("[GO] ❌ Instance %d: PopMessages: Message store not initialized.\n", instanceIndex)
2319
+ return jsonErrorResponse(fmt.Sprintf("Instance %d: Message store not initialized", instanceIndex), nil)
2320
+ }
2321
+
2322
+ store.mu.Lock() // Lock for the entire operation
2323
+ defer store.mu.Unlock()
2324
+
2325
+ if len(store.messagesByChannel) == 0 {
2326
+ return C.CString(`{"state":"Empty"}`)
2327
+ }
2328
+
2329
+ // Create a slice to hold the popped messages. Capacity is the number of channels.
2330
+ var poppedMessages []*QueuedMessage
2331
+ for _, messageList := range store.messagesByChannel {
2332
+ if messageList.Len() > 0 {
2333
+ element := messageList.Front()
2334
+ msg := element.Value.(*QueuedMessage)
2335
+ poppedMessages = append(poppedMessages, msg)
2336
+ messageList.Remove(element)
2337
+ }
2338
+ }
2339
+
2340
+ // After iterating, check if we actually popped anything
2341
+ if len(poppedMessages) == 0 {
2342
+ return C.CString(`{"state":"Empty"}`)
2343
+ }
2344
+
2345
+ // Marshal the slice of popped messages into a JSON array.
2346
+ // We create a temporary structure for JSON marshalling to include the base64-encoded data.
2347
+ payloads := make([]map[string]interface{}, len(poppedMessages))
2348
+ for i, msg := range poppedMessages {
2349
+ payloads[i] = map[string]interface{}{
2350
+ "from": msg.From,
2351
+ "data": base64.StdEncoding.EncodeToString(msg.Data),
2352
+ }
2353
+ }
2354
+
2355
+ jsonBytes, err := json.Marshal(payloads)
2356
+ if err != nil {
2357
+ log.Printf("[GO] ❌ Instance %d: PopMessages: Failed to marshal messages to JSON: %v\n", instanceIndex, err)
2358
+ // Messages have already been popped from the queue at this point.
2359
+ // Returning an error is the best we can do.
2360
+ return jsonErrorResponse(
2361
+ fmt.Sprintf("Instance %d: Failed to marshal popped messages", instanceIndex), err,
2362
+ )
2363
+ }
2364
+
2365
+ return C.CString(string(jsonBytes))
2366
+ }
2367
+
2368
+ // CloseNode gracefully shuts down the libp2p host, cancels subscriptions, closes connections,
2369
+ // and cleans up all associated resources.
2370
+ // Parameters:
2371
+ // - instanceIndexC (C.int): The index of the node instance. If -1, closes all initialized instances.
2372
+ //
2373
+ // Returns:
2374
+ // - *C.char: A JSON string indicating the result of the closure attempt.
2375
+ // Structure: `{"state":"Success", "message":"Node closed successfully"}` or `{"state":"Error", "message":"Error closing host: ..."}`.
2376
+ // If closing all, the message will summarize the results.
2377
+ // - IMPORTANT: The caller MUST free the returned C string using `FreeString`.
2378
+ //
2379
+ //export CloseNode
2380
+ func CloseNode(
2381
+ instanceIndexC C.int,
2382
+ ) *C.char {
2383
+
2384
+ instanceIndex := int(instanceIndexC)
2385
+
2386
+ if instanceIndex == -1 {
2387
+ log.Println("[GO] 🛑 Closing all initialized instances of this node...")
2388
+ successCount := 0
2389
+ errorCount := 0
2390
+ var errorMessages []string
2391
+
2392
+ // Iterate through all possible instance indices
2393
+ for i := 0; i < maxInstances; i++ {
2394
+ // Acquire global lock briefly to check if instance is initialized
2395
+ instanceStateMutex.RLock()
2396
+ isInstInitialized := isInitialized[i]
2397
+ instanceStateMutex.RUnlock()
2398
+
2399
+ if isInstInitialized {
2400
+ log.Printf("[GO] 🛑 Attempting to close instance %d...\n", i)
2401
+ // Call the single instance close logic internally
2402
+ // This internal call will handle its own instance-specific locks
2403
+ resultPtr := closeSingleInstance(i)
2404
+ resultJSON := C.GoString(resultPtr)
2405
+ C.free(unsafe.Pointer(resultPtr)) // Free the C string from the internal call
2406
+
2407
+ var result struct {
2408
+ State string `json:"state"`
2409
+ Message string `json:"message"`
2410
+ }
2411
+ if err := json.Unmarshal([]byte(resultJSON), &result); err != nil {
2412
+ errorCount++
2413
+ errorMessages = append(errorMessages, fmt.Sprintf("Instance %d: Failed to parse close result: %v", i, err))
2414
+ log.Printf("[GO] ❌ Instance %d: Failed to parse close result: %v\n", i, err)
2415
+ } else if result.State == "Error" {
2416
+ errorCount++
2417
+ errorMessages = append(errorMessages, fmt.Sprintf("Instance %d: %s", i, result.Message))
2418
+ log.Printf("[GO] ❌ Instance %d: Close failed: %s\n", i, result.Message)
2419
+ } else {
2420
+ successCount++
2421
+ log.Printf("[GO] ✅ Instance %d: Closed successfully.\n", i)
2422
+ }
2423
+ }
2424
+ }
2425
+
2426
+ summaryMsg := fmt.Sprintf("Closed %d nodes successfully, %d failed.", successCount, errorCount)
2427
+ if errorCount > 0 {
2428
+ log.Printf("[GO] ❌ Errors encountered during batch close:\n")
2429
+ for _, msg := range errorMessages {
2430
+ log.Println(msg)
2431
+ }
2432
+ return jsonErrorResponse(summaryMsg, fmt.Errorf("details: %v", errorMessages))
2433
+ }
2434
+
2435
+ log.Println("[GO] ✅ All initialized nodes closed.")
2436
+ return jsonSuccessResponse(summaryMsg)
2437
+
2438
+ } else {
2439
+ // --- Close a single specific instance ---
2440
+ log.Printf("[GO] 🛑 Closing single node instance %d...\n", instanceIndex)
2441
+ // Check instance index validity for a single close
2442
+ if err := checkInstanceIndex(instanceIndex); err != nil {
2443
+ return jsonErrorResponse("Invalid instance index for single close", err) // Caller frees.
2444
+ }
2445
+
2446
+ // Call the internal single instance close logic
2447
+ return closeSingleInstance(instanceIndex) // Caller frees the returned C string
2448
+ }
2449
+ }
2450
+
2451
+ // FreeString is called from the C/Python side to release the memory allocated by Go
2452
+ // when returning a `*C.char` (via `C.CString`).
2453
+ // Parameters:
2454
+ // - s (*C.char): The pointer to the C string previously returned by an exported Go function.
2455
+ //
2456
+ //export FreeString
2457
+ func FreeString(
2458
+ s *C.char,
2459
+ ) {
2460
+
2461
+ // Check for NULL pointer before attempting to free.
2462
+ if s != nil {
2463
+ C.free(unsafe.Pointer(s)) // Use C.free via unsafe.Pointer to release the memory.
2464
+ }
2465
+ }
2466
+
2467
+ // FreeInt is provided for completeness but is generally **NOT** needed if Go functions
2468
+ // only return `C.int` (by value). It would only be necessary if a Go function manually
2469
+ // allocated memory for a C integer (`*C.int`) and returned the pointer, which is uncommon.
2470
+ // Parameters:
2471
+ // - i (*C.int): The pointer to the C integer previously allocated and returned by Go.
2472
+ //
2473
+ //export FreeInt
2474
+ func FreeInt(
2475
+ i *C.int,
2476
+ ) {
2477
+
2478
+ // Check for NULL pointer.
2479
+ if i != nil {
2480
+ log.Println("[GO] ⚠️ FreeInt called - Ensure a *C.int pointer was actually allocated and returned from Go (this is unusual).")
2481
+ C.free(unsafe.Pointer(i)) // Free the memory if it was indeed allocated.
2482
+ }
2483
+ }
2484
+
2485
+ // main is the entry point for a Go executable. However, when building a C shared library
2486
+ // (`-buildmode=c-shared`), this function is not the primary entry point. The exported
2487
+ // functions (`//export FunctionName`) serve as the entry points callable from C.
2488
+ // Including a main function is still required by the Go compiler for the package `main`,
2489
+ // but its content doesn't run when the code is used as a library. It can be useful
2490
+ // for standalone testing of the package if needed.
2491
+ func main() {
2492
+ // This message will typically only be seen if you run `go run lib.go`
2493
+ // or build and run as a standard executable, NOT when used as a shared library.
2494
+ log.Println("[GO] libp2p Go library main function (not executed in c-shared library mode)")
2495
+ }