koi-net 1.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of koi-net might be problematic. Click here for more details.
- koi_net/__init__.py +1 -0
- koi_net/core.py +86 -0
- koi_net/identity.py +62 -0
- koi_net/network/__init__.py +1 -0
- koi_net/network/graph.py +112 -0
- koi_net/network/interface.py +249 -0
- koi_net/network/request_handler.py +105 -0
- koi_net/network/response_handler.py +57 -0
- koi_net/processor/__init__.py +1 -0
- koi_net/processor/default_handlers.py +151 -0
- koi_net/processor/handler.py +22 -0
- koi_net/processor/interface.py +222 -0
- koi_net/processor/knowledge_object.py +104 -0
- koi_net/protocol/__init__.py +0 -0
- koi_net/protocol/api_models.py +39 -0
- koi_net/protocol/consts.py +5 -0
- koi_net/protocol/edge.py +20 -0
- koi_net/protocol/event.py +48 -0
- koi_net/protocol/helpers.py +25 -0
- koi_net/protocol/node.py +17 -0
- koi_net-1.0.0b1.dist-info/METADATA +43 -0
- koi_net-1.0.0b1.dist-info/RECORD +24 -0
- koi_net-1.0.0b1.dist-info/WHEEL +4 -0
- koi_net-1.0.0b1.dist-info/licenses/LICENSE +21 -0
koi_net/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .core import NodeInterface
|
koi_net/core.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import httpx
|
|
3
|
+
from rid_lib.ext import Cache, Bundle
|
|
4
|
+
from .network import NetworkInterface
|
|
5
|
+
from .processor import ProcessorInterface
|
|
6
|
+
from .processor import default_handlers as _default_handlers
|
|
7
|
+
from .processor.handler import KnowledgeHandler
|
|
8
|
+
from .identity import NodeIdentity
|
|
9
|
+
from .protocol.node import NodeProfile
|
|
10
|
+
from .protocol.event import Event, EventType
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
class NodeInterface:
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
name: str,
|
|
18
|
+
profile: NodeProfile,
|
|
19
|
+
identity_file_path: str = "identity.json",
|
|
20
|
+
first_contact: str | None = None,
|
|
21
|
+
default_handlers: list[KnowledgeHandler] | None = None,
|
|
22
|
+
cache: Cache | None = None,
|
|
23
|
+
network: NetworkInterface | None = None,
|
|
24
|
+
processor: ProcessorInterface | None = None
|
|
25
|
+
):
|
|
26
|
+
self.cache = cache or Cache(directory_path=f"{name}_cache")
|
|
27
|
+
self.identity = NodeIdentity(
|
|
28
|
+
name=name,
|
|
29
|
+
profile=profile,
|
|
30
|
+
cache=self.cache,
|
|
31
|
+
file_path=identity_file_path
|
|
32
|
+
)
|
|
33
|
+
self.first_contact = first_contact
|
|
34
|
+
self.network = network or NetworkInterface(
|
|
35
|
+
file_path=f"{self.identity.rid.name}_event_queues.json",
|
|
36
|
+
first_contact=self.first_contact,
|
|
37
|
+
cache=self.cache,
|
|
38
|
+
identity=self.identity
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# pull all handlers defined in default_handlers module
|
|
42
|
+
if not default_handlers:
|
|
43
|
+
default_handlers = [
|
|
44
|
+
obj for obj in vars(_default_handlers).values()
|
|
45
|
+
if isinstance(obj, KnowledgeHandler)
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
self.processor = processor or ProcessorInterface(
|
|
49
|
+
cache=self.cache,
|
|
50
|
+
network=self.network,
|
|
51
|
+
identity=self.identity,
|
|
52
|
+
default_handlers=default_handlers
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
def initialize(self):
|
|
56
|
+
self.network.graph.generate()
|
|
57
|
+
|
|
58
|
+
self.processor.handle(
|
|
59
|
+
bundle=Bundle.generate(
|
|
60
|
+
rid=self.identity.rid,
|
|
61
|
+
contents=self.identity.profile.model_dump()
|
|
62
|
+
),
|
|
63
|
+
flush=True
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
if not self.network.graph.get_neighbors() and self.first_contact:
|
|
67
|
+
logger.info(f"I don't have any neighbors, reaching out to first contact {self.first_contact}")
|
|
68
|
+
|
|
69
|
+
events = [
|
|
70
|
+
Event.from_rid(EventType.FORGET, self.identity.rid),
|
|
71
|
+
Event.from_bundle(EventType.NEW, self.identity.bundle)
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
self.network.request_handler.broadcast_events(
|
|
76
|
+
url=self.first_contact,
|
|
77
|
+
events=events
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
except httpx.ConnectError:
|
|
81
|
+
logger.info("Failed to reach first contact")
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def finalize(self):
|
|
86
|
+
self.network.save_event_queues()
|
koi_net/identity.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from rid_lib.ext.bundle import Bundle
|
|
4
|
+
from rid_lib.ext.cache import Cache
|
|
5
|
+
from rid_lib.types.koi_net_node import KoiNetNode
|
|
6
|
+
from .protocol.node import NodeProfile
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NodeIdentityModel(BaseModel):
|
|
12
|
+
rid: KoiNetNode
|
|
13
|
+
profile: NodeProfile
|
|
14
|
+
|
|
15
|
+
class NodeIdentity:
|
|
16
|
+
_identity: NodeIdentityModel
|
|
17
|
+
file_path: str
|
|
18
|
+
cache: Cache
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
name: str,
|
|
23
|
+
profile: NodeProfile,
|
|
24
|
+
cache: Cache,
|
|
25
|
+
file_path: str = "identity.json"
|
|
26
|
+
):
|
|
27
|
+
self.cache = cache
|
|
28
|
+
self.file_path = file_path
|
|
29
|
+
|
|
30
|
+
self._identity = None
|
|
31
|
+
try:
|
|
32
|
+
with open(file_path, "r") as f:
|
|
33
|
+
self._identity = NodeIdentityModel.model_validate_json(f.read())
|
|
34
|
+
|
|
35
|
+
except FileNotFoundError:
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
if self._identity:
|
|
39
|
+
if self._identity.rid.name != name:
|
|
40
|
+
logger.warning("Node name changed which will change this node's RID, if you really want to do this manually delete the identity JSON file")
|
|
41
|
+
if self._identity.profile != profile:
|
|
42
|
+
self._identity.profile = profile
|
|
43
|
+
else:
|
|
44
|
+
self._identity = NodeIdentityModel(
|
|
45
|
+
rid=KoiNetNode.generate(name),
|
|
46
|
+
profile=profile,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
with open(file_path, "w") as f:
|
|
50
|
+
f.write(self._identity.model_dump_json(indent=2))
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def rid(self) -> KoiNetNode:
|
|
54
|
+
return self._identity.rid
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def profile(self) -> NodeProfile:
|
|
58
|
+
return self._identity.profile
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def bundle(self) -> Bundle:
|
|
62
|
+
return self.cache.read(self.rid)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .interface import NetworkInterface
|
koi_net/network/graph.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Literal
|
|
3
|
+
import networkx as nx
|
|
4
|
+
from rid_lib import RIDType
|
|
5
|
+
from rid_lib.ext import Cache
|
|
6
|
+
from rid_lib.types import KoiNetEdge, KoiNetNode
|
|
7
|
+
from ..identity import NodeIdentity
|
|
8
|
+
from ..protocol.edge import EdgeProfile, EdgeStatus
|
|
9
|
+
from ..protocol.node import NodeProfile
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class NetworkGraph:
|
|
15
|
+
def __init__(self, cache: Cache, identity: NodeIdentity):
|
|
16
|
+
self.cache = cache
|
|
17
|
+
self.dg = nx.DiGraph()
|
|
18
|
+
self.identity = identity
|
|
19
|
+
|
|
20
|
+
def generate(self):
|
|
21
|
+
logger.info("Generating network graph")
|
|
22
|
+
self.dg.clear()
|
|
23
|
+
for rid in self.cache.list_rids():
|
|
24
|
+
if type(rid) == KoiNetNode:
|
|
25
|
+
self.dg.add_node(rid)
|
|
26
|
+
logger.info(f"Added node {rid}")
|
|
27
|
+
|
|
28
|
+
elif type(rid) == KoiNetEdge:
|
|
29
|
+
edge_profile = self.get_edge_profile(rid)
|
|
30
|
+
if not edge_profile:
|
|
31
|
+
logger.warning(f"Failed to load {rid!r}")
|
|
32
|
+
continue
|
|
33
|
+
self.dg.add_edge(edge_profile.source, edge_profile.target, rid=rid)
|
|
34
|
+
logger.info(f"Added edge {rid} ({edge_profile.source} -> {edge_profile.target})")
|
|
35
|
+
logger.info("Done")
|
|
36
|
+
|
|
37
|
+
def get_node_profile(self, rid: KoiNetNode) -> NodeProfile | None:
|
|
38
|
+
bundle = self.cache.read(rid)
|
|
39
|
+
if bundle:
|
|
40
|
+
return bundle.validate_contents(NodeProfile)
|
|
41
|
+
|
|
42
|
+
def get_edge_profile(
|
|
43
|
+
self,
|
|
44
|
+
rid: KoiNetEdge | None = None,
|
|
45
|
+
source: KoiNetNode | None = None,
|
|
46
|
+
target: KoiNetNode | None = None,
|
|
47
|
+
) -> EdgeProfile | None:
|
|
48
|
+
if source and target:
|
|
49
|
+
if (source, target) not in self.dg.edges: return
|
|
50
|
+
edge_data = self.dg.get_edge_data(source, target)
|
|
51
|
+
if not edge_data: return
|
|
52
|
+
rid = edge_data.get("rid")
|
|
53
|
+
if not rid: return
|
|
54
|
+
elif not rid:
|
|
55
|
+
raise ValueError("Either 'rid' or 'source' and 'target' must be provided")
|
|
56
|
+
|
|
57
|
+
bundle = self.cache.read(rid)
|
|
58
|
+
if bundle:
|
|
59
|
+
return bundle.validate_contents(EdgeProfile)
|
|
60
|
+
|
|
61
|
+
def get_edges(
|
|
62
|
+
self,
|
|
63
|
+
direction: Literal["in", "out"] | None = None,
|
|
64
|
+
) -> list[KoiNetEdge]:
|
|
65
|
+
|
|
66
|
+
edges = []
|
|
67
|
+
if direction != "in":
|
|
68
|
+
out_edges = self.dg.out_edges(self.identity.rid)
|
|
69
|
+
edges.extend([e for e in out_edges])
|
|
70
|
+
|
|
71
|
+
if direction != "out":
|
|
72
|
+
in_edges = self.dg.in_edges(self.identity.rid)
|
|
73
|
+
edges.extend([e for e in in_edges])
|
|
74
|
+
|
|
75
|
+
edge_rids = []
|
|
76
|
+
for edge in edges:
|
|
77
|
+
edge_data = self.dg.get_edge_data(*edge)
|
|
78
|
+
if not edge_data: continue
|
|
79
|
+
edge_rid = edge_data.get("rid")
|
|
80
|
+
if not edge_rid: continue
|
|
81
|
+
edge_rids.append(edge_rid)
|
|
82
|
+
|
|
83
|
+
return edge_rids
|
|
84
|
+
|
|
85
|
+
def get_neighbors(
|
|
86
|
+
self,
|
|
87
|
+
direction: Literal["in", "out"] | None = None,
|
|
88
|
+
status: EdgeStatus | None = None,
|
|
89
|
+
allowed_type: RIDType | None = None
|
|
90
|
+
) -> list[KoiNetNode]:
|
|
91
|
+
|
|
92
|
+
neighbors = []
|
|
93
|
+
for edge_rid in self.get_edges(direction):
|
|
94
|
+
edge_profile = self.get_edge_profile(edge_rid)
|
|
95
|
+
|
|
96
|
+
if not edge_profile:
|
|
97
|
+
logger.warning(f"Failed to find edge {edge_rid!r} in cache")
|
|
98
|
+
continue
|
|
99
|
+
|
|
100
|
+
if status and edge_profile.status != status:
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
if allowed_type and allowed_type not in edge_profile.rid_types:
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
if edge_profile.target == self.identity.rid:
|
|
107
|
+
neighbors.append(edge_profile.source)
|
|
108
|
+
elif edge_profile.source == self.identity.rid:
|
|
109
|
+
neighbors.append(edge_profile.target)
|
|
110
|
+
|
|
111
|
+
return list(neighbors)
|
|
112
|
+
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from queue import Queue
|
|
3
|
+
import httpx
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from rid_lib import RID
|
|
6
|
+
from rid_lib.core import RIDType
|
|
7
|
+
from rid_lib.ext import Cache
|
|
8
|
+
from rid_lib.types import KoiNetNode
|
|
9
|
+
from .graph import NetworkGraph
|
|
10
|
+
from .request_handler import RequestHandler
|
|
11
|
+
from .response_handler import ResponseHandler
|
|
12
|
+
from ..protocol.node import NodeType
|
|
13
|
+
from ..protocol.edge import EdgeType
|
|
14
|
+
from ..protocol.event import Event
|
|
15
|
+
from ..identity import NodeIdentity
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class EventQueueModel(BaseModel):
|
|
21
|
+
webhook: dict[KoiNetNode, list[Event]]
|
|
22
|
+
poll: dict[KoiNetNode, list[Event]]
|
|
23
|
+
|
|
24
|
+
type EventQueue = dict[RID, Queue[Event]]
|
|
25
|
+
|
|
26
|
+
class NetworkInterface:
|
|
27
|
+
identity: NodeIdentity
|
|
28
|
+
cache: Cache
|
|
29
|
+
first_contact: str | None
|
|
30
|
+
graph: NetworkGraph
|
|
31
|
+
request_handler: RequestHandler
|
|
32
|
+
response_handler: ResponseHandler
|
|
33
|
+
poll_event_queue: EventQueue
|
|
34
|
+
webhook_event_queue: EventQueue
|
|
35
|
+
event_queues_file_path: str
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
file_path: str,
|
|
40
|
+
first_contact: str | None,
|
|
41
|
+
cache: Cache,
|
|
42
|
+
identity: NodeIdentity
|
|
43
|
+
):
|
|
44
|
+
self.identity = identity
|
|
45
|
+
self.cache = cache
|
|
46
|
+
self.first_contact = first_contact
|
|
47
|
+
self.graph = NetworkGraph(cache, identity)
|
|
48
|
+
self.request_handler = RequestHandler(cache)
|
|
49
|
+
self.response_handler = ResponseHandler(cache)
|
|
50
|
+
self.event_queues_file_path = file_path
|
|
51
|
+
|
|
52
|
+
self.poll_event_queue = dict()
|
|
53
|
+
self.webhook_event_queue = dict()
|
|
54
|
+
self.load_event_queues()
|
|
55
|
+
|
|
56
|
+
def load_event_queues(self):
|
|
57
|
+
try:
|
|
58
|
+
with open(self.event_queues_file_path, "r") as f:
|
|
59
|
+
queues = EventQueueModel.model_validate_json(f.read())
|
|
60
|
+
|
|
61
|
+
for node in queues.poll.keys():
|
|
62
|
+
for event in queues.poll[node]:
|
|
63
|
+
queue = self.poll_event_queue.setdefault(node, Queue())
|
|
64
|
+
queue.put(event)
|
|
65
|
+
|
|
66
|
+
for node in queues.webhook.keys():
|
|
67
|
+
for event in queues.webhook[node]:
|
|
68
|
+
queue = self.webhook_event_queue.setdefault(node, Queue())
|
|
69
|
+
queue.put(event)
|
|
70
|
+
|
|
71
|
+
except FileNotFoundError:
|
|
72
|
+
return
|
|
73
|
+
|
|
74
|
+
def save_event_queues(self):
|
|
75
|
+
events_model = EventQueueModel(
|
|
76
|
+
poll={
|
|
77
|
+
node: list(queue.queue)
|
|
78
|
+
for node, queue in self.poll_event_queue.items()
|
|
79
|
+
if not queue.empty()
|
|
80
|
+
},
|
|
81
|
+
webhook={
|
|
82
|
+
node: list(queue.queue)
|
|
83
|
+
for node, queue in self.webhook_event_queue.items()
|
|
84
|
+
if not queue.empty()
|
|
85
|
+
}
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
if len(events_model.poll) == 0 and len(events_model.webhook) == 0:
|
|
89
|
+
return
|
|
90
|
+
|
|
91
|
+
with open(self.event_queues_file_path, "w") as f:
|
|
92
|
+
f.write(events_model.model_dump_json(indent=2))
|
|
93
|
+
|
|
94
|
+
def push_event_to(self, event: Event, node: KoiNetNode, flush=False):
|
|
95
|
+
logger.info(f"Pushing event {event.event_type} {event.rid} to {node}")
|
|
96
|
+
|
|
97
|
+
node_profile = self.graph.get_node_profile(node)
|
|
98
|
+
if not node_profile:
|
|
99
|
+
logger.warning(f"Node {node!r} unknown to me")
|
|
100
|
+
|
|
101
|
+
# if there's an edge from me to the target node, override broadcast type
|
|
102
|
+
edge_profile = self.graph.get_edge_profile(
|
|
103
|
+
source=self.identity.rid,
|
|
104
|
+
target=node
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
if edge_profile:
|
|
108
|
+
if edge_profile.edge_type == EdgeType.WEBHOOK:
|
|
109
|
+
event_queue = self.webhook_event_queue
|
|
110
|
+
elif edge_profile.edge_type == EdgeType.POLL:
|
|
111
|
+
event_queue = self.poll_event_queue
|
|
112
|
+
else:
|
|
113
|
+
if node_profile.node_type == NodeType.FULL:
|
|
114
|
+
event_queue = self.webhook_event_queue
|
|
115
|
+
elif node_profile.node_type == NodeType.PARTIAL:
|
|
116
|
+
event_queue = self.poll_event_queue
|
|
117
|
+
|
|
118
|
+
queue = event_queue.setdefault(node, Queue())
|
|
119
|
+
queue.put(event)
|
|
120
|
+
|
|
121
|
+
if flush and event_queue is self.webhook_event_queue:
|
|
122
|
+
self.flush_webhook_queue(node)
|
|
123
|
+
|
|
124
|
+
def flush_queue(self, event_queue: EventQueue, node: KoiNetNode) -> list[Event]:
|
|
125
|
+
queue = event_queue.get(node)
|
|
126
|
+
events = list()
|
|
127
|
+
if queue:
|
|
128
|
+
while not queue.empty():
|
|
129
|
+
event = queue.get()
|
|
130
|
+
logger.info(f"Dequeued {event.event_type} '{event.rid}'")
|
|
131
|
+
events.append(event)
|
|
132
|
+
|
|
133
|
+
return events
|
|
134
|
+
|
|
135
|
+
def flush_poll_queue(self, node: KoiNetNode) -> list[Event]:
|
|
136
|
+
logger.info(f"Flushing poll queue for {node}")
|
|
137
|
+
return self.flush_queue(self.poll_event_queue, node)
|
|
138
|
+
|
|
139
|
+
def flush_webhook_queue(self, node: RID):
|
|
140
|
+
logger.info(f"Flushing webhook queue for {node}")
|
|
141
|
+
|
|
142
|
+
node_profile = self.graph.get_node_profile(node)
|
|
143
|
+
|
|
144
|
+
if not node_profile:
|
|
145
|
+
logger.warning(f"{node!r} not found")
|
|
146
|
+
|
|
147
|
+
if node_profile.node_type != NodeType.FULL:
|
|
148
|
+
logger.warning(f"{node!r} is a partial node!")
|
|
149
|
+
return
|
|
150
|
+
|
|
151
|
+
events = self.flush_queue(self.webhook_event_queue, node)
|
|
152
|
+
logger.info(f"Broadcasting {len(events)} events")
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
self.request_handler.broadcast_events(node, events=events)
|
|
156
|
+
except httpx.ConnectError:
|
|
157
|
+
logger.warning("Broadcast failed, requeuing events")
|
|
158
|
+
for event in events:
|
|
159
|
+
self.push_event_to(event, node)
|
|
160
|
+
|
|
161
|
+
def flush_all_webhook_queues(self):
|
|
162
|
+
for node in self.webhook_event_queue.keys():
|
|
163
|
+
self.flush_webhook_queue(node)
|
|
164
|
+
|
|
165
|
+
def get_state_providers(self, rid_type: RIDType):
|
|
166
|
+
logger.info(f"Looking for state providers of '{rid_type}'")
|
|
167
|
+
provider_nodes = []
|
|
168
|
+
for node_rid in self.cache.list_rids(rid_types=[KoiNetNode]):
|
|
169
|
+
node = self.graph.get_node_profile(node_rid)
|
|
170
|
+
|
|
171
|
+
if node.node_type == NodeType.FULL and rid_type in node.provides.state:
|
|
172
|
+
logger.info(f"Found provider '{node_rid}'")
|
|
173
|
+
provider_nodes.append(node_rid)
|
|
174
|
+
|
|
175
|
+
if not provider_nodes:
|
|
176
|
+
logger.info("Failed to find providers")
|
|
177
|
+
return provider_nodes
|
|
178
|
+
|
|
179
|
+
def fetch_remote_bundle(self, rid: RID):
|
|
180
|
+
logger.info(f"Fetching remote bundle '{rid}'")
|
|
181
|
+
remote_bundle = None
|
|
182
|
+
for node_rid in self.get_state_providers(type(rid)):
|
|
183
|
+
payload = self.request_handler.fetch_bundles(
|
|
184
|
+
node=node_rid, rids=[rid])
|
|
185
|
+
|
|
186
|
+
if payload.manifests:
|
|
187
|
+
remote_bundle = payload.manifests[0]
|
|
188
|
+
logger.info(f"Got bundle from '{node_rid}'")
|
|
189
|
+
break
|
|
190
|
+
|
|
191
|
+
if not remote_bundle:
|
|
192
|
+
logger.warning("Failed to fetch remote bundle")
|
|
193
|
+
|
|
194
|
+
return remote_bundle
|
|
195
|
+
|
|
196
|
+
def fetch_remote_manifest(self, rid: RID):
|
|
197
|
+
logger.info(f"Fetching remote manifest '{rid}'")
|
|
198
|
+
remote_manifest = None
|
|
199
|
+
for node_rid in self.get_state_providers(type(rid)):
|
|
200
|
+
payload = self.request_handler.fetch_manifests(
|
|
201
|
+
node=node_rid, rids=[rid])
|
|
202
|
+
|
|
203
|
+
if payload.manifests:
|
|
204
|
+
remote_manifest = payload.manifests[0]
|
|
205
|
+
logger.info(f"Got bundle from '{node_rid}'")
|
|
206
|
+
break
|
|
207
|
+
|
|
208
|
+
if not remote_manifest:
|
|
209
|
+
logger.warning("Failed to fetch remote bundle")
|
|
210
|
+
|
|
211
|
+
return remote_manifest
|
|
212
|
+
|
|
213
|
+
def poll_neighbors(self) -> list[Event]:
|
|
214
|
+
neighbors = self.graph.get_neighbors()
|
|
215
|
+
|
|
216
|
+
if not neighbors:
|
|
217
|
+
logger.info("No neighbors found, polling first contact")
|
|
218
|
+
try:
|
|
219
|
+
payload = self.request_handler.poll_events(
|
|
220
|
+
url=self.first_contact,
|
|
221
|
+
rid=self.identity.rid
|
|
222
|
+
)
|
|
223
|
+
if payload.events:
|
|
224
|
+
logger.info(f"Received {len(payload.events)} events from '{self.first_contact}'")
|
|
225
|
+
return payload.events
|
|
226
|
+
except httpx.ConnectError:
|
|
227
|
+
logger.info(f"Failed to reach first contact '{self.first_contact}'")
|
|
228
|
+
|
|
229
|
+
events = []
|
|
230
|
+
for node_rid in neighbors:
|
|
231
|
+
node = self.graph.get_node_profile(node_rid)
|
|
232
|
+
if not node: continue
|
|
233
|
+
if node.node_type != NodeType.FULL: continue
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
payload = self.request_handler.poll_events(
|
|
237
|
+
node=node_rid,
|
|
238
|
+
rid=self.identity.rid
|
|
239
|
+
)
|
|
240
|
+
if payload.events:
|
|
241
|
+
logger.info(f"Received {len(payload.events)} events from {node_rid!r}")
|
|
242
|
+
events.extend(payload.events)
|
|
243
|
+
except httpx.ConnectError:
|
|
244
|
+
logger.info(f"Failed to reach node '{node_rid}'")
|
|
245
|
+
continue
|
|
246
|
+
|
|
247
|
+
return events
|
|
248
|
+
|
|
249
|
+
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import httpx
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
from rid_lib import RID
|
|
5
|
+
from rid_lib.ext import Cache
|
|
6
|
+
from rid_lib.types.koi_net_node import KoiNetNode
|
|
7
|
+
from ..protocol.api_models import (
|
|
8
|
+
RidsPayload,
|
|
9
|
+
ManifestsPayload,
|
|
10
|
+
BundlesPayload,
|
|
11
|
+
EventsPayload,
|
|
12
|
+
FetchRids,
|
|
13
|
+
FetchManifests,
|
|
14
|
+
FetchBundles,
|
|
15
|
+
PollEvents
|
|
16
|
+
)
|
|
17
|
+
from ..protocol.consts import (
|
|
18
|
+
BROADCAST_EVENTS_PATH,
|
|
19
|
+
POLL_EVENTS_PATH,
|
|
20
|
+
FETCH_RIDS_PATH,
|
|
21
|
+
FETCH_MANIFESTS_PATH,
|
|
22
|
+
FETCH_BUNDLES_PATH
|
|
23
|
+
)
|
|
24
|
+
from ..protocol.node import NodeProfile, NodeType
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class RequestHandler:
|
|
31
|
+
cache: Cache
|
|
32
|
+
|
|
33
|
+
def __init__(self, cache: Cache):
|
|
34
|
+
self.cache = cache
|
|
35
|
+
|
|
36
|
+
def make_request(self, url, request: BaseModel) -> httpx.Response:
|
|
37
|
+
logger.info(f"Making request to {url}")
|
|
38
|
+
resp = httpx.post(
|
|
39
|
+
url=url,
|
|
40
|
+
data=request.model_dump_json()
|
|
41
|
+
)
|
|
42
|
+
return resp
|
|
43
|
+
|
|
44
|
+
def get_url(self, node_rid: KoiNetNode, url: str) -> str:
|
|
45
|
+
if not node_rid and not url:
|
|
46
|
+
raise ValueError("One of 'node_rid' and 'url' must be provided")
|
|
47
|
+
|
|
48
|
+
if node_rid:
|
|
49
|
+
# can't access get_node rn
|
|
50
|
+
bundle = self.cache.read(node_rid)
|
|
51
|
+
node = NodeProfile.model_validate(bundle.contents)
|
|
52
|
+
if node.node_type != NodeType.FULL:
|
|
53
|
+
raise Exception("Can't query partial node")
|
|
54
|
+
logger.info(f"Resolved {node_rid!r} to {node.base_url}")
|
|
55
|
+
return node.base_url
|
|
56
|
+
else:
|
|
57
|
+
return url
|
|
58
|
+
|
|
59
|
+
def broadcast_events(
|
|
60
|
+
self, node: RID = None, url: str = None, **kwargs
|
|
61
|
+
) -> None:
|
|
62
|
+
self.make_request(
|
|
63
|
+
self.get_url(node, url) + BROADCAST_EVENTS_PATH,
|
|
64
|
+
EventsPayload.model_validate(kwargs)
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
def poll_events(
|
|
68
|
+
self, node: RID = None, url: str = None, **kwargs
|
|
69
|
+
) -> EventsPayload:
|
|
70
|
+
resp = self.make_request(
|
|
71
|
+
self.get_url(node, url) + POLL_EVENTS_PATH,
|
|
72
|
+
PollEvents.model_validate(kwargs)
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
return EventsPayload.model_validate_json(resp.text)
|
|
76
|
+
|
|
77
|
+
def fetch_rids(
|
|
78
|
+
self, node: RID = None, url: str = None, **kwargs
|
|
79
|
+
) -> RidsPayload:
|
|
80
|
+
resp = self.make_request(
|
|
81
|
+
self.get_url(node, url) + FETCH_RIDS_PATH,
|
|
82
|
+
FetchRids.model_validate(kwargs)
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
return RidsPayload.model_validate_json(resp.text)
|
|
86
|
+
|
|
87
|
+
def fetch_manifests(
|
|
88
|
+
self, node: RID = None, url: str = None, **kwargs
|
|
89
|
+
) -> ManifestsPayload:
|
|
90
|
+
resp = self.make_request(
|
|
91
|
+
self.get_url(node, url) + FETCH_MANIFESTS_PATH,
|
|
92
|
+
FetchManifests.model_validate(kwargs)
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
return ManifestsPayload.model_validate_json(resp.text)
|
|
96
|
+
|
|
97
|
+
def fetch_bundles(
|
|
98
|
+
self, node: RID = None, url: str = None, **kwargs
|
|
99
|
+
) -> BundlesPayload:
|
|
100
|
+
resp = self.make_request(
|
|
101
|
+
self.get_url(node, url) + FETCH_BUNDLES_PATH,
|
|
102
|
+
FetchBundles.model_validate(kwargs)
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
return BundlesPayload.model_validate_json(resp.text)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from rid_lib import RID
|
|
3
|
+
from rid_lib.ext import Manifest, Cache
|
|
4
|
+
from rid_lib.ext.bundle import Bundle
|
|
5
|
+
from ..protocol.api_models import (
|
|
6
|
+
RidsPayload,
|
|
7
|
+
ManifestsPayload,
|
|
8
|
+
BundlesPayload,
|
|
9
|
+
FetchRids,
|
|
10
|
+
FetchManifests,
|
|
11
|
+
FetchBundles,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ResponseHandler:
|
|
18
|
+
cache: Cache
|
|
19
|
+
|
|
20
|
+
def __init__(self, cache: Cache):
|
|
21
|
+
self.cache = cache
|
|
22
|
+
|
|
23
|
+
def fetch_rids(self, req: FetchRids) -> RidsPayload:
|
|
24
|
+
logger.info(f"Request to fetch rids, allowed types {req.rid_types}")
|
|
25
|
+
rids = self.cache.list_rids(req.rid_types)
|
|
26
|
+
|
|
27
|
+
return RidsPayload(rids=rids)
|
|
28
|
+
|
|
29
|
+
def fetch_manifests(self, req: FetchManifests) -> ManifestsPayload:
|
|
30
|
+
logger.info(f"Request to fetch manifests, allowed types {req.rid_types}, rids {req.rids}")
|
|
31
|
+
|
|
32
|
+
manifests: list[Manifest] = []
|
|
33
|
+
not_found: list[RID] = []
|
|
34
|
+
|
|
35
|
+
for rid in (req.rids or self.cache.list_rids(req.rid_types)):
|
|
36
|
+
bundle = self.cache.read(rid)
|
|
37
|
+
if bundle:
|
|
38
|
+
manifests.append(bundle.manifest)
|
|
39
|
+
else:
|
|
40
|
+
not_found.append(rid)
|
|
41
|
+
|
|
42
|
+
return ManifestsPayload(manifests=manifests, not_found=not_found)
|
|
43
|
+
|
|
44
|
+
def fetch_bundles(self, req: FetchBundles) -> BundlesPayload:
|
|
45
|
+
logger.info(f"Request to fetch bundles, requested rids {req.rids}")
|
|
46
|
+
|
|
47
|
+
bundles: list[Bundle] = []
|
|
48
|
+
not_found: list[RID] = []
|
|
49
|
+
|
|
50
|
+
for rid in req.rids:
|
|
51
|
+
bundle = self.cache.read(rid)
|
|
52
|
+
if bundle:
|
|
53
|
+
bundles.append(bundle)
|
|
54
|
+
else:
|
|
55
|
+
not_found.append(rid)
|
|
56
|
+
|
|
57
|
+
return BundlesPayload(manifests=bundles, not_found=not_found)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .interface import ProcessorInterface
|