aprsd 4.0.2__py3-none-any.whl → 4.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,53 @@
1
+ import logging
2
+ from typing import Union
3
+
4
+ from oslo_config import cfg
5
+
6
+ from aprsd import packets
7
+ from aprsd.packets import core
8
+ from aprsd.utils import singleton
9
+
10
+ CONF = cfg.CONF
11
+ LOG = logging.getLogger('APRSD')
12
+
13
+
14
+ @singleton
15
+ class PacketTypeFilter:
16
+ """This filter is used to filter out packets that don't match a specific type.
17
+
18
+ To use this, register it with the PacketFilter class,
19
+ then instante it and call set_allow_list() with a list of packet types
20
+ you want to allow to pass the filtering. All other packets will be
21
+ filtered out.
22
+ """
23
+
24
+ filters = {
25
+ packets.Packet.__name__: packets.Packet,
26
+ packets.AckPacket.__name__: packets.AckPacket,
27
+ packets.BeaconPacket.__name__: packets.BeaconPacket,
28
+ packets.GPSPacket.__name__: packets.GPSPacket,
29
+ packets.MessagePacket.__name__: packets.MessagePacket,
30
+ packets.MicEPacket.__name__: packets.MicEPacket,
31
+ packets.ObjectPacket.__name__: packets.ObjectPacket,
32
+ packets.StatusPacket.__name__: packets.StatusPacket,
33
+ packets.ThirdPartyPacket.__name__: packets.ThirdPartyPacket,
34
+ packets.WeatherPacket.__name__: packets.WeatherPacket,
35
+ packets.UnknownPacket.__name__: packets.UnknownPacket,
36
+ }
37
+
38
+ allow_list = ()
39
+
40
+ def set_allow_list(self, filter_list):
41
+ tmp_list = []
42
+ for filter in filter_list:
43
+ LOG.warning(
44
+ f'Setting filter {filter} : {self.filters[filter]} to tmp {tmp_list}'
45
+ )
46
+ tmp_list.append(self.filters[filter])
47
+ self.allow_list = tuple(tmp_list)
48
+
49
+ def filter(self, packet: type[core.Packet]) -> Union[type[core.Packet], None]:
50
+ """Only allow packets of certain types to filter through."""
51
+ if self.allow_list:
52
+ if isinstance(packet, self.allow_list):
53
+ return packet
@@ -7,7 +7,7 @@ from aprsd.packets import core
7
7
  from aprsd.utils import objectstore
8
8
 
9
9
  CONF = cfg.CONF
10
- LOG = logging.getLogger("APRSD")
10
+ LOG = logging.getLogger('APRSD')
11
11
 
12
12
 
13
13
  class PacketList(objectstore.ObjectStoreMixin):
@@ -27,8 +27,8 @@ class PacketList(objectstore.ObjectStoreMixin):
27
27
 
28
28
  def _init_data(self):
29
29
  self.data = {
30
- "types": {},
31
- "packets": OrderedDict(),
30
+ 'types': {},
31
+ 'packets': OrderedDict(),
32
32
  }
33
33
 
34
34
  def rx(self, packet: type[core.Packet]):
@@ -37,11 +37,11 @@ class PacketList(objectstore.ObjectStoreMixin):
37
37
  self._total_rx += 1
38
38
  self._add(packet)
39
39
  ptype = packet.__class__.__name__
40
- type_stats = self.data["types"].setdefault(
40
+ type_stats = self.data['types'].setdefault(
41
41
  ptype,
42
- {"tx": 0, "rx": 0},
42
+ {'tx': 0, 'rx': 0},
43
43
  )
44
- type_stats["rx"] += 1
44
+ type_stats['rx'] += 1
45
45
 
46
46
  def tx(self, packet: type[core.Packet]):
47
47
  """Add a packet that was received."""
@@ -49,32 +49,32 @@ class PacketList(objectstore.ObjectStoreMixin):
49
49
  self._total_tx += 1
50
50
  self._add(packet)
51
51
  ptype = packet.__class__.__name__
52
- type_stats = self.data["types"].setdefault(
52
+ type_stats = self.data['types'].setdefault(
53
53
  ptype,
54
- {"tx": 0, "rx": 0},
54
+ {'tx': 0, 'rx': 0},
55
55
  )
56
- type_stats["tx"] += 1
56
+ type_stats['tx'] += 1
57
57
 
58
58
  def add(self, packet):
59
59
  with self.lock:
60
60
  self._add(packet)
61
61
 
62
62
  def _add(self, packet):
63
- if not self.data.get("packets"):
63
+ if not self.data.get('packets'):
64
64
  self._init_data()
65
- if packet.key in self.data["packets"]:
66
- self.data["packets"].move_to_end(packet.key)
67
- elif len(self.data["packets"]) == self.maxlen:
68
- self.data["packets"].popitem(last=False)
69
- self.data["packets"][packet.key] = packet
65
+ if packet.key in self.data['packets']:
66
+ self.data['packets'].move_to_end(packet.key)
67
+ elif len(self.data['packets']) == self.maxlen:
68
+ self.data['packets'].popitem(last=False)
69
+ self.data['packets'][packet.key] = packet
70
70
 
71
71
  def find(self, packet):
72
72
  with self.lock:
73
- return self.data["packets"][packet.key]
73
+ return self.data['packets'][packet.key]
74
74
 
75
75
  def __len__(self):
76
76
  with self.lock:
77
- return len(self.data["packets"])
77
+ return len(self.data['packets'])
78
78
 
79
79
  def total_rx(self):
80
80
  with self.lock:
@@ -87,17 +87,23 @@ class PacketList(objectstore.ObjectStoreMixin):
87
87
  def stats(self, serializable=False) -> dict:
88
88
  with self.lock:
89
89
  # Get last N packets directly using list slicing
90
- packets_list = list(self.data.get("packets", {}).values())
91
- pkts = packets_list[-CONF.packet_list_stats_maxlen :][::-1]
92
-
90
+ if CONF.packet_list_stats_maxlen >= 0:
91
+ packets_list = list(self.data.get('packets', {}).values())
92
+ pkts = packets_list[-CONF.packet_list_stats_maxlen :][::-1]
93
+ else:
94
+ # We have to copy here, because this get() results in a pointer
95
+ # to the packets internally here, which can change after this
96
+ # function returns, which would cause a problem trying to save
97
+ # the stats to disk.
98
+ pkts = self.data.get('packets', {}).copy()
93
99
  stats = {
94
- "total_tracked": self._total_rx
100
+ 'total_tracked': self._total_rx
95
101
  + self._total_tx, # Fixed typo: was rx + rx
96
- "rx": self._total_rx,
97
- "tx": self._total_tx,
98
- "types": self.data.get("types", {}), # Changed default from [] to {}
99
- "packet_count": len(self.data.get("packets", [])),
100
- "maxlen": self.maxlen,
101
- "packets": pkts,
102
+ 'rx': self._total_rx,
103
+ 'tx': self._total_tx,
104
+ 'types': self.data.get('types', {}), # Changed default from [] to {}
105
+ 'packet_count': len(self.data.get('packets', [])),
106
+ 'maxlen': self.maxlen,
107
+ 'packets': pkts,
102
108
  }
103
109
  return stats
aprsd/threads/__init__.py CHANGED
@@ -4,9 +4,8 @@ import queue
4
4
  # aprsd.threads
5
5
  from .aprsd import APRSDThread, APRSDThreadList # noqa: F401
6
6
  from .rx import ( # noqa: F401
7
- APRSDDupeRXThread,
8
7
  APRSDProcessPacketThread,
9
8
  APRSDRXThread,
10
9
  )
11
10
 
12
- packet_queue = queue.Queue(maxsize=20)
11
+ packet_queue = queue.Queue(maxsize=500)
aprsd/threads/rx.py CHANGED
@@ -8,20 +8,32 @@ from oslo_config import cfg
8
8
 
9
9
  from aprsd import packets, plugin
10
10
  from aprsd.client import client_factory
11
- from aprsd.packets import collector
11
+ from aprsd.packets import collector, filter
12
12
  from aprsd.packets import log as packet_log
13
13
  from aprsd.threads import APRSDThread, tx
14
- from aprsd.utils import trace
15
14
 
16
15
  CONF = cfg.CONF
17
- LOG = logging.getLogger("APRSD")
16
+ LOG = logging.getLogger('APRSD')
18
17
 
19
18
 
20
19
  class APRSDRXThread(APRSDThread):
20
+ """Main Class to connect to an APRS Client and recieve packets.
21
+
22
+ A packet is received in the main loop and then sent to the
23
+ process_packet method, which sends the packet through the collector
24
+ to track the packet for stats, and then put into the packet queue
25
+ for processing in a separate thread.
26
+ """
27
+
21
28
  _client = None
22
29
 
30
+ # This is the queue that packets are sent to for processing.
31
+ # We process packets in a separate thread to help prevent
32
+ # getting blocked by the APRS server trying to send us packets.
33
+ packet_queue = None
34
+
23
35
  def __init__(self, packet_queue):
24
- super().__init__("RX_PKT")
36
+ super().__init__('RX_PKT')
25
37
  self.packet_queue = packet_queue
26
38
 
27
39
  def stop(self):
@@ -52,7 +64,7 @@ class APRSDRXThread(APRSDThread):
52
64
  # kwargs. :(
53
65
  # https://github.com/rossengeorgiev/aprs-python/pull/56
54
66
  self._client.consumer(
55
- self._process_packet,
67
+ self.process_packet,
56
68
  raw=False,
57
69
  blocking=False,
58
70
  )
@@ -60,7 +72,7 @@ class APRSDRXThread(APRSDThread):
60
72
  aprslib.exceptions.ConnectionDrop,
61
73
  aprslib.exceptions.ConnectionError,
62
74
  ):
63
- LOG.error("Connection dropped, reconnecting")
75
+ LOG.error('Connection dropped, reconnecting')
64
76
  # Force the deletion of the client object connected to aprs
65
77
  # This will cause a reconnect, next time client.get_client()
66
78
  # is called
@@ -68,45 +80,18 @@ class APRSDRXThread(APRSDThread):
68
80
  time.sleep(5)
69
81
  except Exception:
70
82
  # LOG.exception(ex)
71
- LOG.error("Resetting connection and trying again.")
83
+ LOG.error('Resetting connection and trying again.')
72
84
  self._client.reset()
73
85
  time.sleep(5)
74
- # Continue to loop
75
- time.sleep(1)
76
86
  return True
77
87
 
78
- def _process_packet(self, *args, **kwargs):
79
- """Intermediate callback so we can update the keepalive time."""
80
- # Now call the 'real' packet processing for a RX'x packet
81
- self.process_packet(*args, **kwargs)
82
-
83
- @abc.abstractmethod
84
88
  def process_packet(self, *args, **kwargs):
85
- pass
86
-
87
-
88
- class APRSDDupeRXThread(APRSDRXThread):
89
- """Process received packets.
90
-
91
- This is the main APRSD Server command thread that
92
- receives packets and makes sure the packet
93
- hasn't been seen previously before sending it on
94
- to be processed.
95
- """
96
-
97
- @trace.trace
98
- def process_packet(self, *args, **kwargs):
99
- """This handles the processing of an inbound packet.
100
-
101
- When a packet is received by the connected client object,
102
- it sends the raw packet into this function. This function then
103
- decodes the packet via the client, and then processes the packet.
104
- Ack Packets are sent to the PluginProcessPacketThread for processing.
105
- All other packets have to be checked as a dupe, and then only after
106
- we haven't seen this packet before, do we send it to the
107
- PluginProcessPacketThread for processing.
108
- """
109
89
  packet = self._client.decode_packet(*args, **kwargs)
90
+ if not packet:
91
+ LOG.error(
92
+ 'No packet received from decode_packet. Most likely a failure to parse'
93
+ )
94
+ return
110
95
  packet_log.log(packet)
111
96
  pkt_list = packets.PacketList()
112
97
 
@@ -140,26 +125,55 @@ class APRSDDupeRXThread(APRSDRXThread):
140
125
  # If the packet came in within N seconds of the
141
126
  # Last time seeing the packet, then we drop it as a dupe.
142
127
  LOG.warning(
143
- f"Packet {packet.from_call}:{packet.msgNo} already tracked, dropping."
128
+ f'Packet {packet.from_call}:{packet.msgNo} already tracked, dropping.'
144
129
  )
145
130
  else:
146
131
  LOG.warning(
147
- f"Packet {packet.from_call}:{packet.msgNo} already tracked "
148
- f"but older than {CONF.packet_dupe_timeout} seconds. processing.",
132
+ f'Packet {packet.from_call}:{packet.msgNo} already tracked '
133
+ f'but older than {CONF.packet_dupe_timeout} seconds. processing.',
149
134
  )
150
135
  collector.PacketCollector().rx(packet)
151
136
  self.packet_queue.put(packet)
152
137
 
153
138
 
154
- class APRSDPluginRXThread(APRSDDupeRXThread):
155
- """ "Process received packets.
139
+ class APRSDFilterThread(APRSDThread):
140
+ def __init__(self, thread_name, packet_queue):
141
+ super().__init__(thread_name)
142
+ self.packet_queue = packet_queue
156
143
 
157
- For backwards compatibility, we keep the APRSDPluginRXThread.
158
- """
144
+ def filter_packet(self, packet):
145
+ # Do any packet filtering prior to processing
146
+ if not filter.PacketFilter().filter(packet):
147
+ return None
148
+ return packet
159
149
 
150
+ def print_packet(self, packet):
151
+ """Allow a child of this class to override this.
160
152
 
161
- class APRSDProcessPacketThread(APRSDThread):
162
- """Base class for processing received packets.
153
+ This is helpful if for whatever reason the child class
154
+ doesn't want to log packets.
155
+
156
+ """
157
+ packet_log.log(packet)
158
+
159
+ def loop(self):
160
+ try:
161
+ packet = self.packet_queue.get(timeout=1)
162
+ self.print_packet(packet)
163
+ if packet:
164
+ if self.filter_packet(packet):
165
+ self.process_packet(packet)
166
+ except queue.Empty:
167
+ pass
168
+ return True
169
+
170
+
171
+ class APRSDProcessPacketThread(APRSDFilterThread):
172
+ """Base class for processing received packets after they have been filtered.
173
+
174
+ Packets are received from the client, then filtered for dupes,
175
+ then sent to the packet queue. This thread pulls packets from
176
+ the packet queue for processing.
163
177
 
164
178
  This is the base class for processing packets coming from
165
179
  the consumer. This base class handles sending ack packets and
@@ -167,44 +181,38 @@ class APRSDProcessPacketThread(APRSDThread):
167
181
  for processing."""
168
182
 
169
183
  def __init__(self, packet_queue):
170
- self.packet_queue = packet_queue
171
- super().__init__("ProcessPKT")
184
+ super().__init__('ProcessPKT', packet_queue=packet_queue)
172
185
  if not CONF.enable_sending_ack_packets:
173
186
  LOG.warning(
174
- "Sending ack packets is disabled, messages "
175
- "will not be acknowledged.",
187
+ 'Sending ack packets is disabled, messages will not be acknowledged.',
176
188
  )
177
189
 
178
190
  def process_ack_packet(self, packet):
179
191
  """We got an ack for a message, no need to resend it."""
180
192
  ack_num = packet.msgNo
181
- LOG.debug(f"Got ack for message {ack_num}")
193
+ LOG.debug(f'Got ack for message {ack_num}')
182
194
  collector.PacketCollector().rx(packet)
183
195
 
184
196
  def process_piggyback_ack(self, packet):
185
197
  """We got an ack embedded in a packet."""
186
198
  ack_num = packet.ackMsgNo
187
- LOG.debug(f"Got PiggyBackAck for message {ack_num}")
199
+ LOG.debug(f'Got PiggyBackAck for message {ack_num}')
188
200
  collector.PacketCollector().rx(packet)
189
201
 
190
202
  def process_reject_packet(self, packet):
191
203
  """We got a reject message for a packet. Stop sending the message."""
192
204
  ack_num = packet.msgNo
193
- LOG.debug(f"Got REJECT for message {ack_num}")
205
+ LOG.debug(f'Got REJECT for message {ack_num}')
194
206
  collector.PacketCollector().rx(packet)
195
207
 
196
- def loop(self):
197
- try:
198
- packet = self.packet_queue.get(timeout=1)
199
- if packet:
200
- self.process_packet(packet)
201
- except queue.Empty:
202
- pass
203
- return True
204
-
205
208
  def process_packet(self, packet):
206
209
  """Process a packet received from aprs-is server."""
207
- LOG.debug(f"ProcessPKT-LOOP {self.loop_count}")
210
+ LOG.debug(f'ProcessPKT-LOOP {self.loop_count}')
211
+
212
+ # set this now as we are going to process it.
213
+ # This is used during dupe checking, so set it early
214
+ packet.processed = True
215
+
208
216
  our_call = CONF.callsign.lower()
209
217
 
210
218
  from_call = packet.from_call
@@ -227,7 +235,7 @@ class APRSDProcessPacketThread(APRSDThread):
227
235
  ):
228
236
  self.process_reject_packet(packet)
229
237
  else:
230
- if hasattr(packet, "ackMsgNo") and packet.ackMsgNo:
238
+ if hasattr(packet, 'ackMsgNo') and packet.ackMsgNo:
231
239
  # we got an ack embedded in this packet
232
240
  # we need to handle the ack
233
241
  self.process_piggyback_ack(packet)
@@ -267,7 +275,7 @@ class APRSDProcessPacketThread(APRSDThread):
267
275
  if not for_us:
268
276
  LOG.info("Got a packet meant for someone else '{packet.to_call}'")
269
277
  else:
270
- LOG.info("Got a non AckPacket/MessagePacket")
278
+ LOG.info('Got a non AckPacket/MessagePacket')
271
279
 
272
280
 
273
281
  class APRSDPluginProcessPacketThread(APRSDProcessPacketThread):
@@ -287,7 +295,7 @@ class APRSDPluginProcessPacketThread(APRSDProcessPacketThread):
287
295
  tx.send(subreply)
288
296
  else:
289
297
  wl = CONF.watch_list
290
- to_call = wl["alert_callsign"]
298
+ to_call = wl['alert_callsign']
291
299
  tx.send(
292
300
  packets.MessagePacket(
293
301
  from_call=CONF.callsign,
@@ -299,7 +307,7 @@ class APRSDPluginProcessPacketThread(APRSDProcessPacketThread):
299
307
  # We have a message based object.
300
308
  tx.send(reply)
301
309
  except Exception as ex:
302
- LOG.error("Plugin failed!!!")
310
+ LOG.error('Plugin failed!!!')
303
311
  LOG.exception(ex)
304
312
 
305
313
  def process_our_message_packet(self, packet):
@@ -355,11 +363,11 @@ class APRSDPluginProcessPacketThread(APRSDProcessPacketThread):
355
363
  if to_call == CONF.callsign and not replied:
356
364
  # Tailor the messages accordingly
357
365
  if CONF.load_help_plugin:
358
- LOG.warning("Sending help!")
366
+ LOG.warning('Sending help!')
359
367
  message_text = "Unknown command! Send 'help' message for help"
360
368
  else:
361
- LOG.warning("Unknown command!")
362
- message_text = "Unknown command!"
369
+ LOG.warning('Unknown command!')
370
+ message_text = 'Unknown command!'
363
371
 
364
372
  tx.send(
365
373
  packets.MessagePacket(
@@ -369,11 +377,11 @@ class APRSDPluginProcessPacketThread(APRSDProcessPacketThread):
369
377
  ),
370
378
  )
371
379
  except Exception as ex:
372
- LOG.error("Plugin failed!!!")
380
+ LOG.error('Plugin failed!!!')
373
381
  LOG.exception(ex)
374
382
  # Do we need to send a reply?
375
383
  if to_call == CONF.callsign:
376
- reply = "A Plugin failed! try again?"
384
+ reply = 'A Plugin failed! try again?'
377
385
  tx.send(
378
386
  packets.MessagePacket(
379
387
  from_call=CONF.callsign,
@@ -382,4 +390,4 @@ class APRSDPluginProcessPacketThread(APRSDProcessPacketThread):
382
390
  ),
383
391
  )
384
392
 
385
- LOG.debug("Completed process_our_message_packet")
393
+ LOG.debug('Completed process_our_message_packet')
aprsd/threads/stats.py CHANGED
@@ -1,8 +1,6 @@
1
1
  import logging
2
- import threading
3
2
  import time
4
3
 
5
- import wrapt
6
4
  from oslo_config import cfg
7
5
 
8
6
  from aprsd.stats import collector
@@ -10,18 +8,15 @@ from aprsd.threads import APRSDThread
10
8
  from aprsd.utils import objectstore
11
9
 
12
10
  CONF = cfg.CONF
13
- LOG = logging.getLogger("APRSD")
11
+ LOG = logging.getLogger('APRSD')
14
12
 
15
13
 
16
14
  class StatsStore(objectstore.ObjectStoreMixin):
17
15
  """Container to save the stats from the collector."""
18
16
 
19
- lock = threading.Lock()
20
- data = {}
21
-
22
- @wrapt.synchronized(lock)
23
17
  def add(self, stats: dict):
24
- self.data = stats
18
+ with self.lock:
19
+ self.data = stats
25
20
 
26
21
 
27
22
  class APRSDStatsStoreThread(APRSDThread):
@@ -31,7 +26,7 @@ class APRSDStatsStoreThread(APRSDThread):
31
26
  save_interval = 10
32
27
 
33
28
  def __init__(self):
34
- super().__init__("StatsStore")
29
+ super().__init__('StatsStore')
35
30
 
36
31
  def loop(self):
37
32
  if self.loop_count % self.save_interval == 0:
@@ -6,9 +6,8 @@ import threading
6
6
 
7
7
  from oslo_config import cfg
8
8
 
9
-
10
9
  CONF = cfg.CONF
11
- LOG = logging.getLogger("APRSD")
10
+ LOG = logging.getLogger('APRSD')
12
11
 
13
12
 
14
13
  class ObjectStoreMixin:
@@ -63,7 +62,7 @@ class ObjectStoreMixin:
63
62
  def _save_filename(self):
64
63
  save_location = CONF.save_location
65
64
 
66
- return "{}/{}.p".format(
65
+ return '{}/{}.p'.format(
67
66
  save_location,
68
67
  self.__class__.__name__.lower(),
69
68
  )
@@ -75,13 +74,13 @@ class ObjectStoreMixin:
75
74
  self._init_store()
76
75
  save_filename = self._save_filename()
77
76
  if len(self) > 0:
78
- LOG.info(
79
- f"{self.__class__.__name__}::Saving"
80
- f" {len(self)} entries to disk at "
81
- f"{save_filename}",
77
+ LOG.debug(
78
+ f'{self.__class__.__name__}::Saving'
79
+ f' {len(self)} entries to disk at '
80
+ f'{save_filename}',
82
81
  )
83
82
  with self.lock:
84
- with open(save_filename, "wb+") as fp:
83
+ with open(save_filename, 'wb+') as fp:
85
84
  pickle.dump(self.data, fp)
86
85
  else:
87
86
  LOG.debug(
@@ -97,21 +96,21 @@ class ObjectStoreMixin:
97
96
  return
98
97
  if os.path.exists(self._save_filename()):
99
98
  try:
100
- with open(self._save_filename(), "rb") as fp:
99
+ with open(self._save_filename(), 'rb') as fp:
101
100
  raw = pickle.load(fp)
102
101
  if raw:
103
102
  self.data = raw
104
103
  LOG.debug(
105
- f"{self.__class__.__name__}::Loaded {len(self)} entries from disk.",
104
+ f'{self.__class__.__name__}::Loaded {len(self)} entries from disk.',
106
105
  )
107
106
  else:
108
- LOG.debug(f"{self.__class__.__name__}::No data to load.")
107
+ LOG.debug(f'{self.__class__.__name__}::No data to load.')
109
108
  except (pickle.UnpicklingError, Exception) as ex:
110
- LOG.error(f"Failed to UnPickle {self._save_filename()}")
109
+ LOG.error(f'Failed to UnPickle {self._save_filename()}')
111
110
  LOG.error(ex)
112
111
  self.data = {}
113
112
  else:
114
- LOG.debug(f"{self.__class__.__name__}::No save file found.")
113
+ LOG.debug(f'{self.__class__.__name__}::No save file found.')
115
114
 
116
115
  def flush(self):
117
116
  """Nuke the old pickle file that stored the old results from last aprsd run."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aprsd
3
- Version: 4.0.2
3
+ Version: 4.1.0
4
4
  Summary: APRSd is a APRS-IS server that can be used to connect to APRS-IS and send and receive APRS packets.
5
5
  Author-email: Craig Lamparter <craig@craiger.org>, "Walter A. Boring IV" <waboring@hemna.com>, Emre Saglam <emresaglam@gmail.com>, Jason Martin <jhmartin@toger.us>, John <johng42@users.noreply.github.com>, Martiros Shakhzadyan <vrzh@vrzh.net>, Zoe Moore <zoenb@mailbox.org>, ranguli <hello@joshmurphy.ca>
6
6
  Maintainer-email: Craig Lamparter <craig@craiger.org>, "Walter A. Boring IV" <waboring@hemna.com>