aprsd 3.3.4__py2.py3-none-any.whl → 3.4.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aprsd/client.py +133 -20
- aprsd/clients/aprsis.py +6 -3
- aprsd/clients/fake.py +1 -1
- aprsd/clients/kiss.py +1 -1
- aprsd/cmds/completion.py +13 -27
- aprsd/cmds/fetch_stats.py +53 -57
- aprsd/cmds/healthcheck.py +32 -30
- aprsd/cmds/list_plugins.py +2 -2
- aprsd/cmds/listen.py +33 -17
- aprsd/cmds/send_message.py +2 -2
- aprsd/cmds/server.py +26 -9
- aprsd/cmds/webchat.py +34 -29
- aprsd/conf/common.py +46 -31
- aprsd/log/log.py +28 -6
- aprsd/main.py +4 -17
- aprsd/packets/__init__.py +3 -2
- aprsd/packets/collector.py +56 -0
- aprsd/packets/core.py +456 -321
- aprsd/packets/log.py +143 -0
- aprsd/packets/packet_list.py +83 -66
- aprsd/packets/seen_list.py +30 -19
- aprsd/packets/tracker.py +60 -62
- aprsd/packets/watch_list.py +64 -38
- aprsd/plugin.py +41 -16
- aprsd/plugins/email.py +35 -7
- aprsd/plugins/time.py +3 -2
- aprsd/plugins/version.py +4 -5
- aprsd/plugins/weather.py +0 -1
- aprsd/stats/__init__.py +20 -0
- aprsd/stats/app.py +46 -0
- aprsd/stats/collector.py +38 -0
- aprsd/threads/__init__.py +3 -2
- aprsd/threads/aprsd.py +67 -36
- aprsd/threads/keep_alive.py +55 -49
- aprsd/threads/log_monitor.py +46 -0
- aprsd/threads/rx.py +43 -24
- aprsd/threads/stats.py +44 -0
- aprsd/threads/tx.py +36 -17
- aprsd/utils/__init__.py +12 -0
- aprsd/utils/counter.py +6 -3
- aprsd/utils/json.py +20 -0
- aprsd/utils/objectstore.py +22 -17
- aprsd/web/admin/static/css/prism.css +4 -189
- aprsd/web/admin/static/js/charts.js +9 -7
- aprsd/web/admin/static/js/echarts.js +71 -9
- aprsd/web/admin/static/js/main.js +47 -6
- aprsd/web/admin/static/js/prism.js +11 -2246
- aprsd/web/admin/templates/index.html +18 -7
- aprsd/web/chat/static/js/gps.js +3 -1
- aprsd/web/chat/static/js/main.js +4 -3
- aprsd/web/chat/static/js/send-message.js +5 -2
- aprsd/web/chat/templates/index.html +1 -0
- aprsd/wsgi.py +62 -127
- {aprsd-3.3.4.dist-info → aprsd-3.4.0.dist-info}/METADATA +14 -16
- {aprsd-3.3.4.dist-info → aprsd-3.4.0.dist-info}/RECORD +60 -63
- {aprsd-3.3.4.dist-info → aprsd-3.4.0.dist-info}/WHEEL +1 -1
- aprsd-3.4.0.dist-info/pbr.json +1 -0
- aprsd/plugins/query.py +0 -81
- aprsd/rpc/__init__.py +0 -14
- aprsd/rpc/client.py +0 -165
- aprsd/rpc/server.py +0 -99
- aprsd/stats.py +0 -266
- aprsd/web/admin/static/json-viewer/jquery.json-viewer.css +0 -57
- aprsd/web/admin/static/json-viewer/jquery.json-viewer.js +0 -158
- aprsd/web/chat/static/json-viewer/jquery.json-viewer.css +0 -57
- aprsd/web/chat/static/json-viewer/jquery.json-viewer.js +0 -158
- aprsd-3.3.4.dist-info/pbr.json +0 -1
- {aprsd-3.3.4.dist-info → aprsd-3.4.0.dist-info}/LICENSE +0 -0
- {aprsd-3.3.4.dist-info → aprsd-3.4.0.dist-info}/entry_points.txt +0 -0
- {aprsd-3.3.4.dist-info → aprsd-3.4.0.dist-info}/top_level.txt +0 -0
aprsd/threads/keep_alive.py
CHANGED
@@ -5,7 +5,8 @@ import tracemalloc
|
|
5
5
|
|
6
6
|
from oslo_config import cfg
|
7
7
|
|
8
|
-
from aprsd import client, packets,
|
8
|
+
from aprsd import client, packets, utils
|
9
|
+
from aprsd.stats import collector
|
9
10
|
from aprsd.threads import APRSDThread, APRSDThreadList
|
10
11
|
|
11
12
|
|
@@ -24,61 +25,66 @@ class KeepAliveThread(APRSDThread):
|
|
24
25
|
self.max_delta = datetime.timedelta(**max_timeout)
|
25
26
|
|
26
27
|
def loop(self):
|
27
|
-
if self.
|
28
|
-
|
29
|
-
stats_obj = stats.APRSDStats()
|
28
|
+
if self.loop_count % 60 == 0:
|
29
|
+
stats_json = collector.Collector().collect()
|
30
30
|
pl = packets.PacketList()
|
31
31
|
thread_list = APRSDThreadList()
|
32
32
|
now = datetime.datetime.now()
|
33
|
-
|
34
|
-
if
|
35
|
-
|
33
|
+
|
34
|
+
if "EmailStats" in stats_json:
|
35
|
+
email_stats = stats_json["EmailStats"]
|
36
|
+
if email_stats.get("last_check_time"):
|
37
|
+
email_thread_time = utils.strfdelta(now - email_stats["last_check_time"])
|
38
|
+
else:
|
39
|
+
email_thread_time = "N/A"
|
36
40
|
else:
|
37
41
|
email_thread_time = "N/A"
|
38
42
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
43
|
+
if "APRSClientStats" in stats_json and stats_json["APRSClientStats"].get("transport") == "aprsis":
|
44
|
+
if stats_json["APRSClientStats"].get("server_keepalive"):
|
45
|
+
last_msg_time = utils.strfdelta(now - stats_json["APRSClientStats"]["server_keepalive"])
|
46
|
+
else:
|
47
|
+
last_msg_time = "N/A"
|
48
|
+
else:
|
49
|
+
last_msg_time = "N/A"
|
46
50
|
|
47
|
-
tracked_packets =
|
51
|
+
tracked_packets = stats_json["PacketTrack"]["total_tracked"]
|
52
|
+
tx_msg = 0
|
53
|
+
rx_msg = 0
|
54
|
+
if "PacketList" in stats_json:
|
55
|
+
msg_packets = stats_json["PacketList"].get("MessagePacket")
|
56
|
+
if msg_packets:
|
57
|
+
tx_msg = msg_packets.get("tx", 0)
|
58
|
+
rx_msg = msg_packets.get("rx", 0)
|
48
59
|
|
49
60
|
keepalive = (
|
50
61
|
"{} - Uptime {} RX:{} TX:{} Tracker:{} Msgs TX:{} RX:{} "
|
51
62
|
"Last:{} Email: {} - RAM Current:{} Peak:{} Threads:{}"
|
52
63
|
).format(
|
53
|
-
|
54
|
-
|
64
|
+
stats_json["APRSDStats"]["callsign"],
|
65
|
+
stats_json["APRSDStats"]["uptime"],
|
55
66
|
pl.total_rx(),
|
56
67
|
pl.total_tx(),
|
57
68
|
tracked_packets,
|
58
|
-
|
59
|
-
|
69
|
+
tx_msg,
|
70
|
+
rx_msg,
|
60
71
|
last_msg_time,
|
61
72
|
email_thread_time,
|
62
|
-
|
63
|
-
|
73
|
+
stats_json["APRSDStats"]["memory_current_str"],
|
74
|
+
stats_json["APRSDStats"]["memory_peak_str"],
|
64
75
|
len(thread_list),
|
65
76
|
)
|
66
77
|
LOG.info(keepalive)
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
thread_info[key]["age"] = age
|
78
|
-
if not alive:
|
79
|
-
LOG.error(f"Thread {thread}")
|
80
|
-
LOG.info(",".join(thread_out))
|
81
|
-
stats_obj.set_thread_info(thread_info)
|
78
|
+
if "APRSDThreadList" in stats_json:
|
79
|
+
thread_list = stats_json["APRSDThreadList"]
|
80
|
+
for thread_name in thread_list:
|
81
|
+
thread = thread_list[thread_name]
|
82
|
+
alive = thread["alive"]
|
83
|
+
age = thread["age"]
|
84
|
+
key = thread["name"]
|
85
|
+
if not alive:
|
86
|
+
LOG.error(f"Thread {thread}")
|
87
|
+
LOG.info(f"{key: <15} Alive? {str(alive): <5} {str(age): <20}")
|
82
88
|
|
83
89
|
# check the APRS connection
|
84
90
|
cl = client.factory.create()
|
@@ -90,18 +96,18 @@ class KeepAliveThread(APRSDThread):
|
|
90
96
|
if not cl.is_alive() and self.cntr > 0:
|
91
97
|
LOG.error(f"{cl.__class__.__name__} is not alive!!! Resetting")
|
92
98
|
client.factory.create().reset()
|
93
|
-
else:
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
99
|
+
# else:
|
100
|
+
# # See if we should reset the aprs-is client
|
101
|
+
# # Due to losing a keepalive from them
|
102
|
+
# delta_dict = utils.parse_delta_str(last_msg_time)
|
103
|
+
# delta = datetime.timedelta(**delta_dict)
|
104
|
+
#
|
105
|
+
# if delta > self.max_delta:
|
106
|
+
# # We haven't gotten a keepalive from aprs-is in a while
|
107
|
+
# # reset the connection.a
|
108
|
+
# if not client.KISSClient.is_enabled():
|
109
|
+
# LOG.warning(f"Resetting connection to APRS-IS {delta}")
|
110
|
+
# client.factory.create().reset()
|
105
111
|
|
106
112
|
# Check version every day
|
107
113
|
delta = now - self.checker_time
|
@@ -110,6 +116,6 @@ class KeepAliveThread(APRSDThread):
|
|
110
116
|
level, msg = utils._check_version()
|
111
117
|
if level:
|
112
118
|
LOG.warning(msg)
|
113
|
-
|
119
|
+
self.cntr += 1
|
114
120
|
time.sleep(1)
|
115
121
|
return True
|
aprsd/threads/log_monitor.py
CHANGED
@@ -1,25 +1,56 @@
|
|
1
|
+
import datetime
|
1
2
|
import logging
|
2
3
|
import threading
|
3
4
|
|
5
|
+
from oslo_config import cfg
|
6
|
+
import requests
|
4
7
|
import wrapt
|
5
8
|
|
6
9
|
from aprsd import threads
|
7
10
|
from aprsd.log import log
|
8
11
|
|
9
12
|
|
13
|
+
CONF = cfg.CONF
|
10
14
|
LOG = logging.getLogger("APRSD")
|
11
15
|
|
12
16
|
|
17
|
+
def send_log_entries(force=False):
|
18
|
+
"""Send all of the log entries to the web interface."""
|
19
|
+
if CONF.admin.web_enabled:
|
20
|
+
if force or LogEntries().is_purge_ready():
|
21
|
+
entries = LogEntries().get_all_and_purge()
|
22
|
+
print(f"Sending log entries {len(entries)}")
|
23
|
+
if entries:
|
24
|
+
try:
|
25
|
+
requests.post(
|
26
|
+
f"http://{CONF.admin.web_ip}:{CONF.admin.web_port}/log_entries",
|
27
|
+
json=entries,
|
28
|
+
auth=(CONF.admin.user, CONF.admin.password),
|
29
|
+
)
|
30
|
+
except Exception as ex:
|
31
|
+
LOG.warning(f"Failed to send log entries {len(entries)}")
|
32
|
+
LOG.warning(ex)
|
33
|
+
|
34
|
+
|
13
35
|
class LogEntries:
|
14
36
|
entries = []
|
15
37
|
lock = threading.Lock()
|
16
38
|
_instance = None
|
39
|
+
last_purge = datetime.datetime.now()
|
40
|
+
max_delta = datetime.timedelta(
|
41
|
+
hours=0.0, minutes=0, seconds=2,
|
42
|
+
)
|
17
43
|
|
18
44
|
def __new__(cls, *args, **kwargs):
|
19
45
|
if cls._instance is None:
|
20
46
|
cls._instance = super().__new__(cls)
|
21
47
|
return cls._instance
|
22
48
|
|
49
|
+
def stats(self) -> dict:
|
50
|
+
return {
|
51
|
+
"log_entries": self.entries,
|
52
|
+
}
|
53
|
+
|
23
54
|
@wrapt.synchronized(lock)
|
24
55
|
def add(self, entry):
|
25
56
|
self.entries.append(entry)
|
@@ -28,8 +59,18 @@ class LogEntries:
|
|
28
59
|
def get_all_and_purge(self):
|
29
60
|
entries = self.entries.copy()
|
30
61
|
self.entries = []
|
62
|
+
self.last_purge = datetime.datetime.now()
|
31
63
|
return entries
|
32
64
|
|
65
|
+
def is_purge_ready(self):
|
66
|
+
now = datetime.datetime.now()
|
67
|
+
if (
|
68
|
+
now - self.last_purge > self.max_delta
|
69
|
+
and len(self.entries) > 1
|
70
|
+
):
|
71
|
+
return True
|
72
|
+
return False
|
73
|
+
|
33
74
|
@wrapt.synchronized(lock)
|
34
75
|
def __len__(self):
|
35
76
|
return len(self.entries)
|
@@ -40,6 +81,10 @@ class LogMonitorThread(threads.APRSDThread):
|
|
40
81
|
def __init__(self):
|
41
82
|
super().__init__("LogMonitorThread")
|
42
83
|
|
84
|
+
def stop(self):
|
85
|
+
send_log_entries(force=True)
|
86
|
+
super().stop()
|
87
|
+
|
43
88
|
def loop(self):
|
44
89
|
try:
|
45
90
|
record = log.logging_queue.get(block=True, timeout=2)
|
@@ -54,6 +99,7 @@ class LogMonitorThread(threads.APRSDThread):
|
|
54
99
|
# Just ignore thi
|
55
100
|
pass
|
56
101
|
|
102
|
+
send_log_entries()
|
57
103
|
return True
|
58
104
|
|
59
105
|
def json_record(self, record):
|
aprsd/threads/rx.py
CHANGED
@@ -7,6 +7,8 @@ import aprslib
|
|
7
7
|
from oslo_config import cfg
|
8
8
|
|
9
9
|
from aprsd import client, packets, plugin
|
10
|
+
from aprsd.packets import collector
|
11
|
+
from aprsd.packets import log as packet_log
|
10
12
|
from aprsd.threads import APRSDThread, tx
|
11
13
|
|
12
14
|
|
@@ -16,15 +18,20 @@ LOG = logging.getLogger("APRSD")
|
|
16
18
|
|
17
19
|
class APRSDRXThread(APRSDThread):
|
18
20
|
def __init__(self, packet_queue):
|
19
|
-
super().__init__("
|
21
|
+
super().__init__("RX_PKT")
|
20
22
|
self.packet_queue = packet_queue
|
21
23
|
self._client = client.factory.create()
|
22
24
|
|
23
25
|
def stop(self):
|
24
26
|
self.thread_stop = True
|
25
|
-
|
27
|
+
if self._client:
|
28
|
+
self._client.stop()
|
26
29
|
|
27
30
|
def loop(self):
|
31
|
+
if not self._client:
|
32
|
+
self._client = client.factory.create()
|
33
|
+
time.sleep(1)
|
34
|
+
return True
|
28
35
|
# setup the consumer of messages and block until a messages
|
29
36
|
try:
|
30
37
|
# This will register a packet consumer with aprslib
|
@@ -36,23 +43,32 @@ class APRSDRXThread(APRSDThread):
|
|
36
43
|
# and the aprslib developer didn't want to allow a PR to add
|
37
44
|
# kwargs. :(
|
38
45
|
# https://github.com/rossengeorgiev/aprs-python/pull/56
|
39
|
-
self._client.
|
40
|
-
self.
|
46
|
+
self._client.consumer(
|
47
|
+
self._process_packet, raw=False, blocking=False,
|
41
48
|
)
|
42
|
-
|
43
49
|
except (
|
44
50
|
aprslib.exceptions.ConnectionDrop,
|
45
51
|
aprslib.exceptions.ConnectionError,
|
46
52
|
):
|
47
53
|
LOG.error("Connection dropped, reconnecting")
|
48
|
-
time.sleep(5)
|
49
54
|
# Force the deletion of the client object connected to aprs
|
50
55
|
# This will cause a reconnect, next time client.get_client()
|
51
56
|
# is called
|
52
57
|
self._client.reset()
|
58
|
+
time.sleep(5)
|
59
|
+
except Exception:
|
60
|
+
# LOG.exception(ex)
|
61
|
+
LOG.error("Resetting connection and trying again.")
|
62
|
+
self._client.reset()
|
63
|
+
time.sleep(5)
|
53
64
|
# Continue to loop
|
54
65
|
return True
|
55
66
|
|
67
|
+
def _process_packet(self, *args, **kwargs):
|
68
|
+
"""Intermediate callback so we can update the keepalive time."""
|
69
|
+
# Now call the 'real' packet processing for a RX'x packet
|
70
|
+
self.process_packet(*args, **kwargs)
|
71
|
+
|
56
72
|
@abc.abstractmethod
|
57
73
|
def process_packet(self, *args, **kwargs):
|
58
74
|
pass
|
@@ -80,7 +96,8 @@ class APRSDDupeRXThread(APRSDRXThread):
|
|
80
96
|
"""
|
81
97
|
packet = self._client.decode_packet(*args, **kwargs)
|
82
98
|
# LOG.debug(raw)
|
83
|
-
|
99
|
+
packet_log.log(packet)
|
100
|
+
pkt_list = packets.PacketList()
|
84
101
|
|
85
102
|
if isinstance(packet, packets.AckPacket):
|
86
103
|
# We don't need to drop AckPackets, those should be
|
@@ -91,7 +108,6 @@ class APRSDDupeRXThread(APRSDRXThread):
|
|
91
108
|
# For RF based APRS Clients we can get duplicate packets
|
92
109
|
# So we need to track them and not process the dupes.
|
93
110
|
found = False
|
94
|
-
pkt_list = packets.PacketList()
|
95
111
|
try:
|
96
112
|
# Find the packet in the list of already seen packets
|
97
113
|
# Based on the packet.key
|
@@ -100,14 +116,11 @@ class APRSDDupeRXThread(APRSDRXThread):
|
|
100
116
|
found = False
|
101
117
|
|
102
118
|
if not found:
|
103
|
-
#
|
104
|
-
|
105
|
-
# because it's a dupe within the time that
|
106
|
-
# we send the 3 acks for the packet.
|
107
|
-
pkt_list.rx(packet)
|
119
|
+
# We haven't seen this packet before, so we process it.
|
120
|
+
collector.PacketCollector().rx(packet)
|
108
121
|
self.packet_queue.put(packet)
|
109
122
|
elif packet.timestamp - found.timestamp < CONF.packet_dupe_timeout:
|
110
|
-
# If the packet came in within
|
123
|
+
# If the packet came in within N seconds of the
|
111
124
|
# Last time seeing the packet, then we drop it as a dupe.
|
112
125
|
LOG.warning(f"Packet {packet.from_call}:{packet.msgNo} already tracked, dropping.")
|
113
126
|
else:
|
@@ -115,7 +128,7 @@ class APRSDDupeRXThread(APRSDRXThread):
|
|
115
128
|
f"Packet {packet.from_call}:{packet.msgNo} already tracked "
|
116
129
|
f"but older than {CONF.packet_dupe_timeout} seconds. processing.",
|
117
130
|
)
|
118
|
-
|
131
|
+
collector.PacketCollector().rx(packet)
|
119
132
|
self.packet_queue.put(packet)
|
120
133
|
|
121
134
|
|
@@ -137,21 +150,24 @@ class APRSDProcessPacketThread(APRSDThread):
|
|
137
150
|
def __init__(self, packet_queue):
|
138
151
|
self.packet_queue = packet_queue
|
139
152
|
super().__init__("ProcessPKT")
|
140
|
-
self._loop_cnt = 1
|
141
153
|
|
142
154
|
def process_ack_packet(self, packet):
|
143
155
|
"""We got an ack for a message, no need to resend it."""
|
144
156
|
ack_num = packet.msgNo
|
145
|
-
LOG.
|
146
|
-
|
147
|
-
|
157
|
+
LOG.debug(f"Got ack for message {ack_num}")
|
158
|
+
collector.PacketCollector().rx(packet)
|
159
|
+
|
160
|
+
def process_piggyback_ack(self, packet):
|
161
|
+
"""We got an ack embedded in a packet."""
|
162
|
+
ack_num = packet.ackMsgNo
|
163
|
+
LOG.debug(f"Got PiggyBackAck for message {ack_num}")
|
164
|
+
collector.PacketCollector().rx(packet)
|
148
165
|
|
149
166
|
def process_reject_packet(self, packet):
|
150
167
|
"""We got a reject message for a packet. Stop sending the message."""
|
151
168
|
ack_num = packet.msgNo
|
152
|
-
LOG.
|
153
|
-
|
154
|
-
pkt_tracker.remove(ack_num)
|
169
|
+
LOG.debug(f"Got REJECT for message {ack_num}")
|
170
|
+
collector.PacketCollector().rx(packet)
|
155
171
|
|
156
172
|
def loop(self):
|
157
173
|
try:
|
@@ -160,12 +176,11 @@ class APRSDProcessPacketThread(APRSDThread):
|
|
160
176
|
self.process_packet(packet)
|
161
177
|
except queue.Empty:
|
162
178
|
pass
|
163
|
-
self._loop_cnt += 1
|
164
179
|
return True
|
165
180
|
|
166
181
|
def process_packet(self, packet):
|
167
182
|
"""Process a packet received from aprs-is server."""
|
168
|
-
LOG.debug(f"ProcessPKT-LOOP {self.
|
183
|
+
LOG.debug(f"ProcessPKT-LOOP {self.loop_count}")
|
169
184
|
our_call = CONF.callsign.lower()
|
170
185
|
|
171
186
|
from_call = packet.from_call
|
@@ -188,6 +203,10 @@ class APRSDProcessPacketThread(APRSDThread):
|
|
188
203
|
):
|
189
204
|
self.process_reject_packet(packet)
|
190
205
|
else:
|
206
|
+
if hasattr(packet, "ackMsgNo") and packet.ackMsgNo:
|
207
|
+
# we got an ack embedded in this packet
|
208
|
+
# we need to handle the ack
|
209
|
+
self.process_piggyback_ack(packet)
|
191
210
|
# Only ack messages that were sent directly to us
|
192
211
|
if isinstance(packet, packets.MessagePacket):
|
193
212
|
if to_call and to_call.lower() == our_call:
|
aprsd/threads/stats.py
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
import logging
|
2
|
+
import threading
|
3
|
+
import time
|
4
|
+
|
5
|
+
from oslo_config import cfg
|
6
|
+
import wrapt
|
7
|
+
|
8
|
+
from aprsd.stats import collector
|
9
|
+
from aprsd.threads import APRSDThread
|
10
|
+
from aprsd.utils import objectstore
|
11
|
+
|
12
|
+
|
13
|
+
CONF = cfg.CONF
|
14
|
+
LOG = logging.getLogger("APRSD")
|
15
|
+
|
16
|
+
|
17
|
+
class StatsStore(objectstore.ObjectStoreMixin):
|
18
|
+
"""Container to save the stats from the collector."""
|
19
|
+
lock = threading.Lock()
|
20
|
+
data = {}
|
21
|
+
|
22
|
+
@wrapt.synchronized(lock)
|
23
|
+
def add(self, stats: dict):
|
24
|
+
self.data = stats
|
25
|
+
|
26
|
+
|
27
|
+
class APRSDStatsStoreThread(APRSDThread):
|
28
|
+
"""Save APRSD Stats to disk periodically."""
|
29
|
+
|
30
|
+
# how often in seconds to write the file
|
31
|
+
save_interval = 10
|
32
|
+
|
33
|
+
def __init__(self):
|
34
|
+
super().__init__("StatsStore")
|
35
|
+
|
36
|
+
def loop(self):
|
37
|
+
if self.loop_count % self.save_interval == 0:
|
38
|
+
stats = collector.Collector().collect()
|
39
|
+
ss = StatsStore()
|
40
|
+
ss.add(stats)
|
41
|
+
ss.save()
|
42
|
+
|
43
|
+
time.sleep(1)
|
44
|
+
return True
|
aprsd/threads/tx.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
import logging
|
2
|
+
import threading
|
2
3
|
import time
|
3
4
|
|
4
5
|
from oslo_config import cfg
|
@@ -6,11 +7,14 @@ from rush import quota, throttle
|
|
6
7
|
from rush.contrib import decorator
|
7
8
|
from rush.limiters import periodic
|
8
9
|
from rush.stores import dictionary
|
10
|
+
import wrapt
|
9
11
|
|
10
12
|
from aprsd import client
|
11
13
|
from aprsd import conf # noqa
|
12
14
|
from aprsd import threads as aprsd_threads
|
13
|
-
from aprsd.packets import
|
15
|
+
from aprsd.packets import collector, core
|
16
|
+
from aprsd.packets import log as packet_log
|
17
|
+
from aprsd.packets import tracker
|
14
18
|
|
15
19
|
|
16
20
|
CONF = cfg.CONF
|
@@ -35,14 +39,19 @@ ack_t = throttle.Throttle(
|
|
35
39
|
|
36
40
|
msg_throttle_decorator = decorator.ThrottleDecorator(throttle=msg_t)
|
37
41
|
ack_throttle_decorator = decorator.ThrottleDecorator(throttle=ack_t)
|
42
|
+
s_lock = threading.Lock()
|
38
43
|
|
39
44
|
|
45
|
+
@wrapt.synchronized(s_lock)
|
40
46
|
@msg_throttle_decorator.sleep_and_retry
|
41
47
|
def send(packet: core.Packet, direct=False, aprs_client=None):
|
42
48
|
"""Send a packet either in a thread or directly to the client."""
|
43
49
|
# prepare the packet for sending.
|
44
50
|
# This constructs the packet.raw
|
45
51
|
packet.prepare()
|
52
|
+
# Have to call the collector to track the packet
|
53
|
+
# After prepare, as prepare assigns the msgNo
|
54
|
+
collector.PacketCollector().tx(packet)
|
46
55
|
if isinstance(packet, core.AckPacket):
|
47
56
|
_send_ack(packet, direct=direct, aprs_client=aprs_client)
|
48
57
|
else:
|
@@ -74,8 +83,12 @@ def _send_direct(packet, aprs_client=None):
|
|
74
83
|
cl = client.factory.create()
|
75
84
|
|
76
85
|
packet.update_timestamp()
|
77
|
-
|
78
|
-
|
86
|
+
packet_log.log(packet, tx=True)
|
87
|
+
try:
|
88
|
+
cl.send(packet)
|
89
|
+
except Exception as e:
|
90
|
+
LOG.error(f"Failed to send packet: {packet}")
|
91
|
+
LOG.error(e)
|
79
92
|
|
80
93
|
|
81
94
|
class SendPacketThread(aprsd_threads.APRSDThread):
|
@@ -83,10 +96,7 @@ class SendPacketThread(aprsd_threads.APRSDThread):
|
|
83
96
|
|
84
97
|
def __init__(self, packet):
|
85
98
|
self.packet = packet
|
86
|
-
|
87
|
-
super().__init__(f"TXPKT-{self.packet.msgNo}-{name}")
|
88
|
-
pkt_tracker = tracker.PacketTrack()
|
89
|
-
pkt_tracker.add(packet)
|
99
|
+
super().__init__(f"TX-{packet.to_call}-{self.packet.msgNo}")
|
90
100
|
|
91
101
|
def loop(self):
|
92
102
|
"""Loop until a message is acked or it gets delayed.
|
@@ -112,7 +122,7 @@ class SendPacketThread(aprsd_threads.APRSDThread):
|
|
112
122
|
return False
|
113
123
|
else:
|
114
124
|
send_now = False
|
115
|
-
if packet.send_count
|
125
|
+
if packet.send_count >= packet.retry_count:
|
116
126
|
# we reached the send limit, don't send again
|
117
127
|
# TODO(hemna) - Need to put this in a delayed queue?
|
118
128
|
LOG.info(
|
@@ -121,8 +131,7 @@ class SendPacketThread(aprsd_threads.APRSDThread):
|
|
121
131
|
"Message Send Complete. Max attempts reached"
|
122
132
|
f" {packet.retry_count}",
|
123
133
|
)
|
124
|
-
|
125
|
-
pkt_tracker.remove(packet.msgNo)
|
134
|
+
pkt_tracker.remove(packet.msgNo)
|
126
135
|
return False
|
127
136
|
|
128
137
|
# Message is still outstanding and needs to be acked.
|
@@ -141,7 +150,7 @@ class SendPacketThread(aprsd_threads.APRSDThread):
|
|
141
150
|
# no attempt time, so lets send it, and start
|
142
151
|
# tracking the time.
|
143
152
|
packet.last_send_time = int(round(time.time()))
|
144
|
-
|
153
|
+
_send_direct(packet)
|
145
154
|
packet.send_count += 1
|
146
155
|
|
147
156
|
time.sleep(1)
|
@@ -152,22 +161,24 @@ class SendPacketThread(aprsd_threads.APRSDThread):
|
|
152
161
|
|
153
162
|
class SendAckThread(aprsd_threads.APRSDThread):
|
154
163
|
loop_count: int = 1
|
164
|
+
max_retries = 3
|
155
165
|
|
156
166
|
def __init__(self, packet):
|
157
167
|
self.packet = packet
|
158
|
-
super().__init__(f"
|
168
|
+
super().__init__(f"TXAck-{packet.to_call}-{self.packet.msgNo}")
|
169
|
+
self.max_retries = CONF.default_ack_send_count
|
159
170
|
|
160
171
|
def loop(self):
|
161
172
|
"""Separate thread to send acks with retries."""
|
162
173
|
send_now = False
|
163
|
-
if self.packet.send_count == self.
|
174
|
+
if self.packet.send_count == self.max_retries:
|
164
175
|
# we reached the send limit, don't send again
|
165
176
|
# TODO(hemna) - Need to put this in a delayed queue?
|
166
|
-
LOG.
|
177
|
+
LOG.debug(
|
167
178
|
f"{self.packet.__class__.__name__}"
|
168
179
|
f"({self.packet.msgNo}) "
|
169
180
|
"Send Complete. Max attempts reached"
|
170
|
-
f" {self.
|
181
|
+
f" {self.max_retries}",
|
171
182
|
)
|
172
183
|
return False
|
173
184
|
|
@@ -188,7 +199,7 @@ class SendAckThread(aprsd_threads.APRSDThread):
|
|
188
199
|
send_now = True
|
189
200
|
|
190
201
|
if send_now:
|
191
|
-
|
202
|
+
_send_direct(self.packet)
|
192
203
|
self.packet.send_count += 1
|
193
204
|
self.packet.last_send_time = int(round(time.time()))
|
194
205
|
|
@@ -230,7 +241,15 @@ class BeaconSendThread(aprsd_threads.APRSDThread):
|
|
230
241
|
comment="APRSD GPS Beacon",
|
231
242
|
symbol=CONF.beacon_symbol,
|
232
243
|
)
|
233
|
-
|
244
|
+
try:
|
245
|
+
# Only send it once
|
246
|
+
pkt.retry_count = 1
|
247
|
+
send(pkt, direct=True)
|
248
|
+
except Exception as e:
|
249
|
+
LOG.error(f"Failed to send beacon: {e}")
|
250
|
+
client.factory.create().reset()
|
251
|
+
time.sleep(5)
|
252
|
+
|
234
253
|
self._loop_cnt += 1
|
235
254
|
time.sleep(1)
|
236
255
|
return True
|
aprsd/utils/__init__.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
"""Utilities and helper functions."""
|
2
2
|
|
3
3
|
import errno
|
4
|
+
import functools
|
4
5
|
import os
|
5
6
|
import re
|
6
7
|
import sys
|
@@ -22,6 +23,17 @@ else:
|
|
22
23
|
from collections.abc import MutableMapping
|
23
24
|
|
24
25
|
|
26
|
+
def singleton(cls):
|
27
|
+
"""Make a class a Singleton class (only one instance)"""
|
28
|
+
@functools.wraps(cls)
|
29
|
+
def wrapper_singleton(*args, **kwargs):
|
30
|
+
if wrapper_singleton.instance is None:
|
31
|
+
wrapper_singleton.instance = cls(*args, **kwargs)
|
32
|
+
return wrapper_singleton.instance
|
33
|
+
wrapper_singleton.instance = None
|
34
|
+
return wrapper_singleton
|
35
|
+
|
36
|
+
|
25
37
|
def env(*vars, **kwargs):
|
26
38
|
"""This returns the first environment variable set.
|
27
39
|
if none are non-empty, defaults to '' or keyword arg default
|
aprsd/utils/counter.py
CHANGED
@@ -1,9 +1,13 @@
|
|
1
1
|
from multiprocessing import RawValue
|
2
|
+
import random
|
2
3
|
import threading
|
3
4
|
|
4
5
|
import wrapt
|
5
6
|
|
6
7
|
|
8
|
+
MAX_PACKET_ID = 9999
|
9
|
+
|
10
|
+
|
7
11
|
class PacketCounter:
|
8
12
|
"""
|
9
13
|
Global Packet id counter class.
|
@@ -17,19 +21,18 @@ class PacketCounter:
|
|
17
21
|
"""
|
18
22
|
|
19
23
|
_instance = None
|
20
|
-
max_count = 9999
|
21
24
|
lock = threading.Lock()
|
22
25
|
|
23
26
|
def __new__(cls, *args, **kwargs):
|
24
27
|
"""Make this a singleton class."""
|
25
28
|
if cls._instance is None:
|
26
29
|
cls._instance = super().__new__(cls, *args, **kwargs)
|
27
|
-
cls._instance.val = RawValue("i", 1)
|
30
|
+
cls._instance.val = RawValue("i", random.randint(1, MAX_PACKET_ID))
|
28
31
|
return cls._instance
|
29
32
|
|
30
33
|
@wrapt.synchronized(lock)
|
31
34
|
def increment(self):
|
32
|
-
if self.val.value ==
|
35
|
+
if self.val.value == MAX_PACKET_ID:
|
33
36
|
self.val.value = 1
|
34
37
|
else:
|
35
38
|
self.val.value += 1
|