koi-net 1.2.0b1__py3-none-any.whl → 1.2.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of koi-net might be problematic. Click here for more details.

Files changed (40) hide show
  1. koi_net/__init__.py +2 -1
  2. koi_net/assembler.py +82 -0
  3. koi_net/context.py +5 -13
  4. koi_net/core.py +51 -206
  5. koi_net/effector.py +26 -14
  6. koi_net/handshaker.py +3 -3
  7. koi_net/identity.py +2 -3
  8. koi_net/interfaces/entrypoint.py +5 -0
  9. koi_net/{worker.py → interfaces/worker.py} +7 -0
  10. koi_net/lifecycle.py +40 -32
  11. koi_net/logger.py +176 -0
  12. koi_net/network/error_handler.py +6 -6
  13. koi_net/network/event_queue.py +8 -6
  14. koi_net/network/graph.py +8 -8
  15. koi_net/network/poll_event_buffer.py +26 -0
  16. koi_net/network/request_handler.py +23 -28
  17. koi_net/network/resolver.py +13 -13
  18. koi_net/network/response_handler.py +74 -9
  19. koi_net/poller.py +7 -5
  20. koi_net/processor/event_worker.py +14 -18
  21. koi_net/processor/{default_handlers.py → handlers.py} +26 -25
  22. koi_net/processor/kobj_queue.py +3 -3
  23. koi_net/{kobj_worker.py → processor/kobj_worker.py} +12 -13
  24. koi_net/processor/{knowledge_pipeline.py → pipeline.py} +24 -27
  25. koi_net/protocol/api_models.py +5 -2
  26. koi_net/protocol/envelope.py +5 -6
  27. koi_net/protocol/model_map.py +61 -0
  28. koi_net/protocol/secure.py +8 -8
  29. koi_net/secure.py +5 -5
  30. koi_net/sentry.py +13 -0
  31. koi_net/server.py +36 -86
  32. {koi_net-1.2.0b1.dist-info → koi_net-1.2.0b2.dist-info}/METADATA +2 -1
  33. koi_net-1.2.0b2.dist-info/RECORD +52 -0
  34. koi_net/behaviors.py +0 -51
  35. koi_net/models.py +0 -14
  36. koi_net/poll_event_buffer.py +0 -17
  37. koi_net-1.2.0b1.dist-info/RECORD +0 -49
  38. {koi_net-1.2.0b1.dist-info → koi_net-1.2.0b2.dist-info}/WHEEL +0 -0
  39. {koi_net-1.2.0b1.dist-info → koi_net-1.2.0b2.dist-info}/entry_points.txt +0 -0
  40. {koi_net-1.2.0b1.dist-info → koi_net-1.2.0b2.dist-info}/licenses/LICENSE +0 -0
koi_net/lifecycle.py CHANGED
@@ -1,22 +1,22 @@
1
- import logging
1
+ import structlog
2
2
  from contextlib import contextmanager, asynccontextmanager
3
3
 
4
4
  from rid_lib.ext import Bundle, Cache
5
5
  from rid_lib.types import KoiNetNode
6
6
 
7
- from koi_net.behaviors import Behaviors
8
- from koi_net.handshaker import Handshaker
9
- from koi_net.kobj_worker import KnowledgeProcessingWorker
10
- from koi_net.models import END
11
- from koi_net.network.event_queue import EventQueue
12
- from koi_net.processor.event_worker import EventProcessingWorker
13
-
7
+ from .handshaker import Handshaker
8
+ from .network.request_handler import RequestHandler
9
+ from .processor.kobj_worker import KnowledgeProcessingWorker
10
+ from .network.event_queue import EventQueue
11
+ from .processor.event_worker import EventProcessingWorker
12
+ from .protocol.api_models import ErrorResponse
13
+ from .interfaces.worker import STOP_WORKER
14
14
  from .config import NodeConfig
15
15
  from .processor.kobj_queue import KobjQueue
16
16
  from .network.graph import NetworkGraph
17
17
  from .identity import NodeIdentity
18
18
 
19
- logger = logging.getLogger(__name__)
19
+ log = structlog.stdlib.get_logger()
20
20
 
21
21
 
22
22
  class NodeLifecycle:
@@ -31,7 +31,7 @@ class NodeLifecycle:
31
31
  event_worker: EventProcessingWorker
32
32
  cache: Cache
33
33
  handshaker: Handshaker
34
- behaviors: Behaviors
34
+ request_handler: RequestHandler
35
35
 
36
36
  def __init__(
37
37
  self,
@@ -44,7 +44,7 @@ class NodeLifecycle:
44
44
  event_worker: EventProcessingWorker,
45
45
  cache: Cache,
46
46
  handshaker: Handshaker,
47
- behaviors: Behaviors
47
+ request_handler: RequestHandler
48
48
  ):
49
49
  self.config = config
50
50
  self.identity = identity
@@ -55,32 +55,32 @@ class NodeLifecycle:
55
55
  self.event_worker = event_worker
56
56
  self.cache = cache
57
57
  self.handshaker = handshaker
58
- self.behaviors = behaviors
58
+ self.request_handler = request_handler
59
59
 
60
60
  @contextmanager
61
61
  def run(self):
62
62
  """Synchronous context manager for node startup and shutdown."""
63
63
  try:
64
- logger.info("Starting node lifecycle...")
64
+ log.info("Starting node lifecycle...")
65
65
  self.start()
66
66
  yield
67
67
  except KeyboardInterrupt:
68
- logger.info("Keyboard interrupt!")
68
+ log.info("Keyboard interrupt!")
69
69
  finally:
70
- logger.info("Stopping node lifecycle...")
70
+ log.info("Stopping node lifecycle...")
71
71
  self.stop()
72
72
 
73
73
  @asynccontextmanager
74
74
  async def async_run(self):
75
75
  """Asynchronous context manager for node startup and shutdown."""
76
76
  try:
77
- logger.info("Starting async node lifecycle...")
77
+ log.info("Starting async node lifecycle...")
78
78
  self.start()
79
79
  yield
80
80
  except KeyboardInterrupt:
81
- logger.info("Keyboard interrupt!")
81
+ log.info("Keyboard interrupt!")
82
82
  finally:
83
- logger.info("Stopping async node lifecycle...")
83
+ log.info("Stopping async node lifecycle...")
84
84
  self.stop()
85
85
 
86
86
  def start(self):
@@ -91,7 +91,7 @@ class NodeLifecycle:
91
91
  of node bundle. Initiates handshake with first contact if node
92
92
  doesn't have any neighbors. Catches up with coordinator state.
93
93
  """
94
- logger.info("Starting processor worker thread")
94
+ log.info("Starting processor worker thread")
95
95
 
96
96
  self.kobj_worker.thread.start()
97
97
  self.event_worker.thread.start()
@@ -104,30 +104,38 @@ class NodeLifecycle:
104
104
  contents=self.identity.profile.model_dump()
105
105
  ))
106
106
 
107
- logger.debug("Waiting for kobj queue to empty")
107
+ log.debug("Waiting for kobj queue to empty")
108
108
  self.kobj_queue.q.join()
109
109
 
110
- # TODO: FACTOR OUT BEHAVIOR
111
-
112
110
  coordinators = self.graph.get_neighbors(direction="in", allowed_type=KoiNetNode)
113
111
 
114
- if len(coordinators) == 0 and self.config.koi_net.first_contact.rid:
115
- logger.debug(f"I don't have any edges with coordinators, reaching out to first contact {self.config.koi_net.first_contact.rid!r}")
112
+ if len(coordinators) > 0:
113
+ for coordinator in coordinators:
114
+ payload = self.request_handler.fetch_manifests(
115
+ node=coordinator,
116
+ rid_types=[KoiNetNode]
117
+ )
118
+ if type(payload) is ErrorResponse:
119
+ continue
120
+
121
+ for manifest in payload.manifests:
122
+ self.kobj_queue.put_kobj(
123
+ manifest=manifest,
124
+ source=coordinator
125
+ )
126
+
127
+ elif self.config.koi_net.first_contact.rid:
128
+ log.debug(f"I don't have any edges with coordinators, reaching out to first contact {self.config.koi_net.first_contact.rid!r}")
116
129
 
117
130
  self.handshaker.handshake_with(self.config.koi_net.first_contact.rid)
118
131
 
119
-
120
-
121
- for coordinator in self.behaviors.identify_coordinators():
122
- self.behaviors.catch_up_with(coordinator, rid_types=[KoiNetNode])
123
-
124
132
 
125
133
  def stop(self):
126
134
  """Stops a node.
127
135
 
128
136
  Finishes processing knowledge object queue.
129
137
  """
130
- logger.info(f"Waiting for kobj queue to empty ({self.kobj_queue.q.unfinished_tasks} tasks remaining)")
138
+ log.info(f"Waiting for kobj queue to empty ({self.kobj_queue.q.unfinished_tasks} tasks remaining)")
131
139
 
132
- self.kobj_queue.q.put(END)
133
- self.event_queue.q.put(END)
140
+ self.kobj_queue.q.put(STOP_WORKER)
141
+ self.event_queue.q.put(STOP_WORKER)
koi_net/logger.py ADDED
@@ -0,0 +1,176 @@
1
+ from datetime import datetime
2
+ import logging
3
+ from logging.handlers import RotatingFileHandler
4
+ import colorama
5
+ import structlog
6
+ import sys
7
+ # from sentry_sdk import logger as sentry_logger
8
+
9
+
10
+ def my_processor(_, __, event: dict):
11
+ # print(_, __, event)
12
+ event["path"] = event["module"] + "." + event["func_name"]
13
+ return event
14
+
15
+ # def sentry_processor(_, method, event: dict):
16
+ # print(event)
17
+ # if method == "critical":
18
+ # sentry_logger.fatal(
19
+ # event["event"],
20
+ # attributes=event
21
+ # )
22
+ # elif method == "info":
23
+ # sentry_logger.info(
24
+ # event["event"],
25
+ # attributes=event
26
+ # )
27
+ # elif method == "debug":
28
+ # sentry_logger.debug(
29
+ # event["event"],
30
+ # attributes=event
31
+ # )
32
+ # return event
33
+
34
+ console_renderer = structlog.dev.ConsoleRenderer(
35
+ columns=[
36
+ # Render the timestamp without the key name in yellow.
37
+ structlog.dev.Column(
38
+ "timestamp",
39
+ structlog.dev.KeyValueColumnFormatter(
40
+ key_style=None,
41
+ value_style=colorama.Style.DIM,
42
+ reset_style=colorama.Style.RESET_ALL,
43
+ value_repr=lambda t: datetime.fromisoformat(t).strftime("%Y-%m-%d %H:%M:%S"),
44
+ ),
45
+ ),
46
+ structlog.dev.Column(
47
+ "level",
48
+ structlog.dev.LogLevelColumnFormatter(
49
+ level_styles={
50
+ level: colorama.Style.BRIGHT + color
51
+ for level, color in {
52
+ "critical": colorama.Fore.RED,
53
+ "exception": colorama.Fore.RED,
54
+ "error": colorama.Fore.RED,
55
+ "warn": colorama.Fore.YELLOW,
56
+ "warning": colorama.Fore.YELLOW,
57
+ "info": colorama.Fore.GREEN,
58
+ "debug": colorama.Fore.GREEN,
59
+ "notset": colorama.Back.RED,
60
+ }.items()
61
+ },
62
+ reset_style=colorama.Style.RESET_ALL,
63
+ width=9
64
+ )
65
+ ),
66
+ # Render the event without the key name in bright magenta.
67
+
68
+ # Default formatter for all keys not explicitly mentioned. The key is
69
+ # cyan, the value is green.
70
+ structlog.dev.Column(
71
+ "path",
72
+ structlog.dev.KeyValueColumnFormatter(
73
+ key_style=None,
74
+ value_style=colorama.Fore.MAGENTA,
75
+ reset_style=colorama.Style.RESET_ALL,
76
+ value_repr=str,
77
+ width=30
78
+ ),
79
+ ),
80
+ # structlog.dev.Column(
81
+ # "func_name",
82
+ # structlog.dev.KeyValueColumnFormatter(
83
+ # key_style=None,
84
+ # value_style=colorama.Fore.MAGENTA,
85
+ # reset_style=colorama.Style.RESET_ALL,
86
+ # value_repr=str,
87
+ # prefix="(",
88
+ # postfix=")",
89
+ # width=15
90
+ # ),
91
+ # ),
92
+ structlog.dev.Column(
93
+ "event",
94
+ structlog.dev.KeyValueColumnFormatter(
95
+ key_style=None,
96
+ value_style=colorama.Fore.WHITE,
97
+ reset_style=colorama.Style.RESET_ALL,
98
+ value_repr=str,
99
+ width=30
100
+ ),
101
+ ),
102
+ structlog.dev.Column(
103
+ "",
104
+ structlog.dev.KeyValueColumnFormatter(
105
+ key_style=colorama.Fore.BLUE,
106
+ value_style=colorama.Fore.GREEN,
107
+ reset_style=colorama.Style.RESET_ALL,
108
+ value_repr=str,
109
+ ),
110
+ )
111
+ ]
112
+ )
113
+
114
+ structlog.configure(
115
+ processors=[
116
+ # If log level is too low, abort pipeline and throw away log entry.
117
+ structlog.stdlib.filter_by_level,
118
+ # Add the name of the logger to event dict.
119
+ structlog.stdlib.add_logger_name,
120
+ # Add log level to event dict.
121
+ structlog.stdlib.add_log_level,
122
+ # Perform %-style formatting.
123
+ structlog.stdlib.PositionalArgumentsFormatter(),
124
+ # Add a timestamp in ISO 8601 format.
125
+ structlog.processors.TimeStamper(fmt="iso"),
126
+ # If the "stack_info" key in the event dict is true, remove it and
127
+ # render the current stack trace in the "stack" key.
128
+ structlog.processors.StackInfoRenderer(),
129
+ # If the "exc_info" key in the event dict is either true or a
130
+ # sys.exc_info() tuple, remove "exc_info" and render the exception
131
+ # with traceback into the "exception" key.
132
+ # structlog.processors.format_exc_info,
133
+ # If some value is in bytes, decode it to a Unicode str.
134
+ structlog.processors.UnicodeDecoder(),
135
+ # Add callsite parameters.
136
+ structlog.processors.CallsiteParameterAdder(
137
+ {
138
+ structlog.processors.CallsiteParameter.MODULE,
139
+ structlog.processors.CallsiteParameter.FUNC_NAME,
140
+ # structlog.processors.CallsiteParameter.LINENO,
141
+ }
142
+ ),
143
+ my_processor,
144
+ # Render the final event dict as JSON.
145
+ # sentry_processor,
146
+ console_renderer
147
+ # structlog.processors.JSONRenderer()
148
+
149
+ ],
150
+ # `wrapper_class` is the bound logger that you get back from
151
+ # get_logger(). This one imitates the API of `logging.Logger`.
152
+ wrapper_class=structlog.stdlib.BoundLogger,
153
+ # `logger_factory` is used to create wrapped loggers that are used for
154
+ # OUTPUT. This one returns a `logging.Logger`. The final value (a JSON
155
+ # string) from the final processor (`JSONRenderer`) will be passed to
156
+ # the method of the same name as that you've called on the bound logger.
157
+ logger_factory=structlog.stdlib.LoggerFactory(),
158
+ # Effectively freeze configuration after creating the first bound
159
+ # logger.
160
+ cache_logger_on_first_use=True,
161
+ )
162
+
163
+ file_handler = RotatingFileHandler(
164
+ filename="app.log",
165
+ maxBytes=10 * 1024 * 1024,
166
+ backupCount=5,
167
+ encoding="utf-8"
168
+ )
169
+
170
+ logging.basicConfig(
171
+ format="%(message)s",
172
+ stream=sys.stdout,
173
+ level=logging.INFO,
174
+ )
175
+
176
+ # log = structlog.stdlib.get_logger()
@@ -1,11 +1,11 @@
1
- from logging import getLogger
1
+ import structlog
2
2
  from koi_net.handshaker import Handshaker
3
3
  from koi_net.protocol.errors import ErrorType
4
4
  from koi_net.protocol.event import EventType
5
5
  from rid_lib.types import KoiNetNode
6
6
  from ..processor.kobj_queue import KobjQueue
7
7
 
8
- logger = getLogger(__name__)
8
+ log = structlog.stdlib.get_logger()
9
9
 
10
10
 
11
11
  class ErrorHandler:
@@ -27,10 +27,10 @@ class ErrorHandler:
27
27
  self.timeout_counter.setdefault(node, 0)
28
28
  self.timeout_counter[node] += 1
29
29
 
30
- logger.debug(f"{node} has timed out {self.timeout_counter[node]} time(s)")
30
+ log.debug(f"{node} has timed out {self.timeout_counter[node]} time(s)")
31
31
 
32
32
  if self.timeout_counter[node] > 3:
33
- logger.debug(f"Exceeded time out limit, forgetting node")
33
+ log.debug(f"Exceeded time out limit, forgetting node")
34
34
  self.kobj_queue.put_kobj(rid=node, event_type=EventType.FORGET)
35
35
  # do something
36
36
 
@@ -41,10 +41,10 @@ class ErrorHandler:
41
41
  node: KoiNetNode
42
42
  ):
43
43
  """Attempts handshake when this node is unknown to target."""
44
- logger.info(f"Handling protocol error {error_type} for node {node!r}")
44
+ log.info(f"Handling protocol error {error_type} for node {node!r}")
45
45
  match error_type:
46
46
  case ErrorType.UnknownNode:
47
- logger.info("Peer doesn't know me, attempting handshake...")
47
+ log.info("Peer doesn't know me, attempting handshake...")
48
48
  self.handshaker.handshake_with(node)
49
49
 
50
50
  case ErrorType.InvalidKey: ...
@@ -1,14 +1,18 @@
1
- import logging
1
+ import structlog
2
2
  from queue import Queue
3
3
 
4
4
  from rid_lib.types import KoiNetNode
5
+ from pydantic import BaseModel
5
6
 
6
- from ..models import QueuedEvent
7
7
  from ..protocol.event import Event
8
8
 
9
- logger = logging.getLogger(__name__)
9
+ log = structlog.stdlib.get_logger()
10
10
 
11
11
 
12
+ class QueuedEvent(BaseModel):
13
+ event: Event
14
+ target: KoiNetNode
15
+
12
16
  class EventQueue:
13
17
  """Handles out going network event queues."""
14
18
  q: Queue[QueuedEvent]
@@ -20,9 +24,7 @@ class EventQueue:
20
24
  """Pushes event to queue of specified node.
21
25
 
22
26
  Event will be sent to webhook or poll queue depending on the
23
- node type and edge type of the specified node. If `flush` is set
24
- to `True`, the webhook queued will be flushed after pushing the
25
- event.
27
+ node type and edge type of the specified node.
26
28
  """
27
29
 
28
30
  self.q.put(QueuedEvent(target=target, event=event))
koi_net/network/graph.py CHANGED
@@ -1,4 +1,4 @@
1
- import logging
1
+ import structlog
2
2
  from typing import Literal
3
3
  import networkx as nx
4
4
  from rid_lib import RIDType
@@ -7,7 +7,7 @@ from rid_lib.types import KoiNetEdge, KoiNetNode
7
7
  from ..identity import NodeIdentity
8
8
  from ..protocol.edge import EdgeProfile, EdgeStatus
9
9
 
10
- logger = logging.getLogger(__name__)
10
+ log = structlog.stdlib.get_logger()
11
11
 
12
12
 
13
13
  class NetworkGraph:
@@ -24,22 +24,22 @@ class NetworkGraph:
24
24
 
25
25
  def generate(self):
26
26
  """Generates directed graph from cached KOI nodes and edges."""
27
- logger.debug("Generating network graph")
27
+ log.debug("Generating network graph")
28
28
  self.dg.clear()
29
29
  for rid in self.cache.list_rids():
30
30
  if type(rid) == KoiNetNode:
31
31
  self.dg.add_node(rid)
32
- logger.debug(f"Added node {rid!r}")
32
+ log.debug(f"Added node {rid!r}")
33
33
 
34
34
  elif type(rid) == KoiNetEdge:
35
35
  edge_bundle = self.cache.read(rid)
36
36
  if not edge_bundle:
37
- logger.warning(f"Failed to load {rid!r}")
37
+ log.warning(f"Failed to load {rid!r}")
38
38
  continue
39
39
  edge_profile = edge_bundle.validate_contents(EdgeProfile)
40
40
  self.dg.add_edge(edge_profile.source, edge_profile.target, rid=rid)
41
- logger.debug(f"Added edge {rid!r} ({edge_profile.source} -> {edge_profile.target})")
42
- logger.debug("Done")
41
+ log.debug(f"Added edge {rid!r} ({edge_profile.source} -> {edge_profile.target})")
42
+ log.debug("Done")
43
43
 
44
44
  def get_edge(self, source: KoiNetNode, target: KoiNetNode,) -> KoiNetEdge | None:
45
45
  """Returns edge RID given the RIDs of a source and target node."""
@@ -97,7 +97,7 @@ class NetworkGraph:
97
97
  edge_bundle = self.cache.read(edge_rid)
98
98
 
99
99
  if not edge_bundle:
100
- logger.warning(f"Failed to find edge {edge_rid!r} in cache")
100
+ log.warning(f"Failed to find edge {edge_rid!r} in cache")
101
101
  continue
102
102
 
103
103
  edge_profile = edge_bundle.validate_contents(EdgeProfile)
@@ -0,0 +1,26 @@
1
+ from rid_lib.types import KoiNetNode
2
+
3
+ from koi_net.protocol.event import Event
4
+
5
+
6
+ class PollEventBuffer:
7
+ buffers: dict[KoiNetNode, list[Event]]
8
+
9
+ def __init__(self):
10
+ self.buffers = dict()
11
+
12
+ def put(self, node: KoiNetNode, event: Event):
13
+ event_buf = self.buffers.setdefault(node, [])
14
+ event_buf.append(event)
15
+
16
+ def flush(self, node: KoiNetNode, limit: int = 0):
17
+ event_buf = self.buffers.get(node, [])
18
+
19
+ if limit and len(event_buf) > limit:
20
+ to_return = event_buf[:limit]
21
+ self.buffers[node] = event_buf[limit:]
22
+ else:
23
+ to_return = event_buf.copy()
24
+ self.buffers[node] = []
25
+
26
+ return to_return
@@ -1,9 +1,11 @@
1
- import logging
1
+ import structlog
2
2
  import httpx
3
3
  from rid_lib import RID
4
4
  from rid_lib.ext import Cache
5
5
  from rid_lib.types.koi_net_node import KoiNetNode
6
6
 
7
+ from koi_net.protocol.model_map import API_MODEL_MAP
8
+
7
9
  from ..identity import NodeIdentity
8
10
  from ..protocol.api_models import (
9
11
  RidsPayload,
@@ -30,8 +32,7 @@ from ..protocol.node import NodeProfile, NodeType
30
32
  from ..secure import Secure
31
33
  from .error_handler import ErrorHandler
32
34
 
33
-
34
- logger = logging.getLogger(__name__)
35
+ log = structlog.stdlib.get_logger()
35
36
 
36
37
 
37
38
  # Custom error types for request handling
@@ -74,7 +75,7 @@ class RequestHandler:
74
75
  def get_url(self, node_rid: KoiNetNode) -> str:
75
76
  """Retrieves URL of a node from its RID."""
76
77
 
77
- logger.debug(f"Getting URL for {node_rid!r}")
78
+ log.debug(f"Getting URL for {node_rid!r}")
78
79
  node_url = None
79
80
 
80
81
  if node_rid == self.identity.rid:
@@ -84,20 +85,20 @@ class RequestHandler:
84
85
 
85
86
  if node_bundle:
86
87
  node_profile = node_bundle.validate_contents(NodeProfile)
87
- logger.debug(f"Found node profile: {node_profile}")
88
+ log.debug(f"Found node profile: {node_profile}")
88
89
  if node_profile.node_type != NodeType.FULL:
89
90
  raise PartialNodeQueryError("Can't query partial node")
90
91
  node_url = node_profile.base_url
91
92
 
92
93
  else:
93
94
  if node_rid == self.identity.config.koi_net.first_contact.rid:
94
- logger.debug("Found URL of first contact")
95
+ log.debug("Found URL of first contact")
95
96
  node_url = self.identity.config.koi_net.first_contact.url
96
97
 
97
98
  if not node_url:
98
99
  raise NodeNotFoundError("Node not found")
99
100
 
100
- logger.debug(f"Resolved {node_rid!r} to {node_url}")
101
+ log.debug(f"Resolved {node_rid!r} to {node_url}")
101
102
  return node_url
102
103
 
103
104
  def make_request(
@@ -108,7 +109,7 @@ class RequestHandler:
108
109
  ) -> ResponseModels | None:
109
110
  """Makes a request to a node."""
110
111
  url = self.get_url(node) + path
111
- logger.info(f"Making request to {url}")
112
+ log.info(f"Making request to {url}")
112
113
 
113
114
  signed_envelope = self.secure.create_envelope(
114
115
  payload=request,
@@ -116,9 +117,12 @@ class RequestHandler:
116
117
  )
117
118
 
118
119
  try:
119
- result = httpx.post(url, data=signed_envelope.model_dump_json(exclude_none=True))
120
+ result = httpx.post(
121
+ url,
122
+ data=signed_envelope.model_dump_json(exclude_none=True)
123
+ )
120
124
  except httpx.ConnectError as err:
121
- logger.debug("Failed to connect")
125
+ log.debug("Failed to connect")
122
126
  self.error_handler.handle_connection_error(node)
123
127
  raise err
124
128
 
@@ -127,20 +131,11 @@ class RequestHandler:
127
131
  self.error_handler.handle_protocol_error(resp.error, node)
128
132
  return resp
129
133
 
130
- if path == BROADCAST_EVENTS_PATH:
131
- return None
132
- elif path == POLL_EVENTS_PATH:
133
- EnvelopeModel = SignedEnvelope[EventsPayload]
134
- elif path == FETCH_RIDS_PATH:
135
- EnvelopeModel = SignedEnvelope[RidsPayload]
136
- elif path == FETCH_MANIFESTS_PATH:
137
- EnvelopeModel = SignedEnvelope[ManifestsPayload]
138
- elif path == FETCH_BUNDLES_PATH:
139
- EnvelopeModel = SignedEnvelope[BundlesPayload]
140
- else:
141
- raise UnknownPathError(f"Unknown path '{path}'")
134
+ resp_env_model = API_MODEL_MAP[path].response_envelope
135
+ if resp_env_model is None:
136
+ return
142
137
 
143
- resp_envelope = EnvelopeModel.model_validate_json(result.text)
138
+ resp_envelope = resp_env_model.model_validate_json(result.text)
144
139
  self.secure.validate_envelope(resp_envelope)
145
140
 
146
141
  return resp_envelope.payload
@@ -157,7 +152,7 @@ class RequestHandler:
157
152
  """
158
153
  request = req or EventsPayload.model_validate(kwargs)
159
154
  self.make_request(node, BROADCAST_EVENTS_PATH, request)
160
- logger.info(f"Broadcasted {len(request.events)} event(s) to {node!r}")
155
+ log.info(f"Broadcasted {len(request.events)} event(s) to {node!r}")
161
156
 
162
157
  def poll_events(
163
158
  self,
@@ -172,7 +167,7 @@ class RequestHandler:
172
167
  request = req or PollEvents.model_validate(kwargs)
173
168
  resp = self.make_request(node, POLL_EVENTS_PATH, request)
174
169
  if type(resp) != ErrorResponse:
175
- logger.info(f"Polled {len(resp.events)} events from {node!r}")
170
+ log.info(f"Polled {len(resp.events)} events from {node!r}")
176
171
  return resp
177
172
 
178
173
  def fetch_rids(
@@ -188,7 +183,7 @@ class RequestHandler:
188
183
  request = req or FetchRids.model_validate(kwargs)
189
184
  resp = self.make_request(node, FETCH_RIDS_PATH, request)
190
185
  if type(resp) != ErrorResponse:
191
- logger.info(f"Fetched {len(resp.rids)} RID(s) from {node!r}")
186
+ log.info(f"Fetched {len(resp.rids)} RID(s) from {node!r}")
192
187
  return resp
193
188
 
194
189
  def fetch_manifests(
@@ -204,7 +199,7 @@ class RequestHandler:
204
199
  request = req or FetchManifests.model_validate(kwargs)
205
200
  resp = self.make_request(node, FETCH_MANIFESTS_PATH, request)
206
201
  if type(resp) != ErrorResponse:
207
- logger.info(f"Fetched {len(resp.manifests)} manifest(s) from {node!r}")
202
+ log.info(f"Fetched {len(resp.manifests)} manifest(s) from {node!r}")
208
203
  return resp
209
204
 
210
205
  def fetch_bundles(
@@ -220,5 +215,5 @@ class RequestHandler:
220
215
  request = req or FetchBundles.model_validate(kwargs)
221
216
  resp = self.make_request(node, FETCH_BUNDLES_PATH, request)
222
217
  if type(resp) != ErrorResponse:
223
- logger.info(f"Fetched {len(resp.bundles)} bundle(s) from {node!r}")
218
+ log.info(f"Fetched {len(resp.bundles)} bundle(s) from {node!r}")
224
219
  return resp