pyaidrone 1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyaidrone/__init__.py +5 -0
- pyaidrone/aiDrone.py +221 -0
- pyaidrone/deflib.py +45 -0
- pyaidrone/edu.py +125 -0
- pyaidrone/ikeyevent.py +123 -0
- pyaidrone/packet.py +28 -0
- pyaidrone/parse.py +45 -0
- pyaidrone/vision_ai.py +120 -0
- pyaidrone-1.11.dist-info/METADATA +25 -0
- pyaidrone-1.11.dist-info/RECORD +12 -0
- pyaidrone-1.11.dist-info/WHEEL +5 -0
- pyaidrone-1.11.dist-info/top_level.txt +1 -0
pyaidrone/__init__.py
ADDED
pyaidrone/aiDrone.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import serial
|
|
2
|
+
import binascii
|
|
3
|
+
import math
|
|
4
|
+
from time import sleep
|
|
5
|
+
import random
|
|
6
|
+
from operator import eq
|
|
7
|
+
from threading import Thread
|
|
8
|
+
from serial.tools.list_ports import comports
|
|
9
|
+
from pyaidrone.parse import *
|
|
10
|
+
from pyaidrone.packet import *
|
|
11
|
+
from pyaidrone.deflib import *
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AIDrone(Parse, Packet):
|
|
15
|
+
def __init__(self, receiveCallback = None):
|
|
16
|
+
self.serial = None
|
|
17
|
+
self.isThreadRun = False
|
|
18
|
+
self.parse = Parse(AIDRONE)
|
|
19
|
+
self.makepkt = Packet(AIDRONE)
|
|
20
|
+
self.receiveCallback = receiveCallback
|
|
21
|
+
self.makepkt.clearPacket()
|
|
22
|
+
self.posX = 0
|
|
23
|
+
self.posY = 0
|
|
24
|
+
self.rot = 0
|
|
25
|
+
# --- 영상 스트리밍 관련 기본값 ---
|
|
26
|
+
self.stream_host = "192.168.4.1"
|
|
27
|
+
self.stream_port = 80
|
|
28
|
+
self.stream_path = "/?action=stream"
|
|
29
|
+
self._cap = None
|
|
30
|
+
|
|
31
|
+
def receiveHandler(self):
|
|
32
|
+
while self.isThreadRun:
|
|
33
|
+
readData = self.serial.read(self.serial.in_waiting or 1)
|
|
34
|
+
packet = self.parse.packetArrange(readData)
|
|
35
|
+
if not eq(packet, "None"):
|
|
36
|
+
if self.receiveCallback != None:
|
|
37
|
+
self.receiveCallback(packet)
|
|
38
|
+
self.serial.write(self.makepkt.getPacket())
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def Open(self, portName = "None"):
|
|
42
|
+
if eq(portName, "None"):
|
|
43
|
+
nodes = comports()
|
|
44
|
+
for node in nodes:
|
|
45
|
+
if "CH340" in node.description:
|
|
46
|
+
portName = node.device
|
|
47
|
+
|
|
48
|
+
if eq(portName, "None"):
|
|
49
|
+
print("Can't find Serial Port")
|
|
50
|
+
exit()
|
|
51
|
+
return False
|
|
52
|
+
try:
|
|
53
|
+
self.serial = serial.Serial(port=portName, baudrate=115200, timeout=1)
|
|
54
|
+
if self.serial.isOpen():
|
|
55
|
+
self.isThreadRun = True
|
|
56
|
+
self.thread = Thread(target=self.receiveHandler, args=(), daemon=True)
|
|
57
|
+
self.thread.start()
|
|
58
|
+
print("Connected to", portName)
|
|
59
|
+
return True
|
|
60
|
+
else:
|
|
61
|
+
print("Can't open " + portName)
|
|
62
|
+
exit()
|
|
63
|
+
return False
|
|
64
|
+
except:
|
|
65
|
+
print("Can't open " + portName)
|
|
66
|
+
exit()
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def Close(self):
|
|
71
|
+
self.isThreadRun = False
|
|
72
|
+
sleep(0.2)
|
|
73
|
+
pkt = self.makepkt.getPacket()
|
|
74
|
+
if (pkt[15]&0x80) == 0x80:
|
|
75
|
+
self.makepkt.clearPacket()
|
|
76
|
+
self.setOption(0x8000)
|
|
77
|
+
self.serial.write(self.makepkt.getPacket())
|
|
78
|
+
sleep(0.2)
|
|
79
|
+
self.serial.write(self.makepkt.clearPacket())
|
|
80
|
+
sleep(0.2)
|
|
81
|
+
if self.serial != None:
|
|
82
|
+
if self.serial.isOpen() == True:
|
|
83
|
+
self.serial.close()
|
|
84
|
+
|
|
85
|
+
def setOption(self, option):
|
|
86
|
+
data = option.to_bytes(2, byteorder="little", signed=False)
|
|
87
|
+
self.makepkt.makePacket(14, data)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def takeoff(self):
|
|
91
|
+
alt = 70
|
|
92
|
+
data = alt.to_bytes(2, byteorder="little", signed=False)
|
|
93
|
+
self.makepkt.makePacket(12, data)
|
|
94
|
+
alt = 0x2F
|
|
95
|
+
data = alt.to_bytes(2, byteorder="little", signed=False)
|
|
96
|
+
self.setOption(0x2F)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def landing(self):
|
|
100
|
+
alt = 0
|
|
101
|
+
data = alt.to_bytes(2, byteorder="little", signed=False)
|
|
102
|
+
self.makepkt.makePacket(12, data)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def altitude(self, alt):
|
|
106
|
+
data = alt.to_bytes(2, byteorder="little", signed=False)
|
|
107
|
+
self.makepkt.makePacket(12, data)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def velocity(self, dir=0, vel=100):
|
|
111
|
+
if dir > 3:
|
|
112
|
+
return
|
|
113
|
+
if dir==1 or dir==3:
|
|
114
|
+
vel *= -1;
|
|
115
|
+
data = vel.to_bytes(2, byteorder="little", signed=True)
|
|
116
|
+
if dir==0 or dir==1:
|
|
117
|
+
self.makepkt.makePacket(8, data)
|
|
118
|
+
else:
|
|
119
|
+
self.makepkt.makePacket(6, data)
|
|
120
|
+
self.setOption(0x0F)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def move(self, dir=0, dist=100):
|
|
124
|
+
if dir > 3:
|
|
125
|
+
return
|
|
126
|
+
if dir==1 or dir==3:
|
|
127
|
+
dist *= -1;
|
|
128
|
+
if dir==0 or dir==1:
|
|
129
|
+
self.posX += dist
|
|
130
|
+
data = self.posX.to_bytes(2, byteorder="little", signed=True)
|
|
131
|
+
self.makepkt.makePacket(8, data)
|
|
132
|
+
else:
|
|
133
|
+
self.posY += dist
|
|
134
|
+
data = self.posY.to_bytes(2, byteorder="little", signed=True)
|
|
135
|
+
self.makepkt.makePacket(6, data)
|
|
136
|
+
self.setOption(0x2F)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def rotation(self, rot=90):
|
|
140
|
+
self.rot += rot
|
|
141
|
+
data = self.rot.to_bytes(2, byteorder="little", signed=True)
|
|
142
|
+
self.makepkt.makePacket(10, data)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def motor(self, what, speed):
|
|
147
|
+
speed = DefLib.constrain(speed, 100, 0)
|
|
148
|
+
data = speed.to_bytes(2, byteorder="little", signed=True)
|
|
149
|
+
self.makepkt.makePacket(what*2+6, data)
|
|
150
|
+
self.setOption(0x8000)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def emergency(self):
|
|
154
|
+
self.setOption(0x00)
|
|
155
|
+
self.serial.write(self.makepkt.getPacket())
|
|
156
|
+
|
|
157
|
+
# aiDrone.py — AIDrone 클래스 안에 아래 3개 메서드 추가
|
|
158
|
+
def setStreamAddress(self, host: str, port: int = 80, path: str = "/?action=stream"):
|
|
159
|
+
"""MJPG-Streamer 주소 구성 요소를 저장합니다."""
|
|
160
|
+
if not isinstance(host, str) or len(host.strip()) == 0:
|
|
161
|
+
raise ValueError("host가 올바르지 않습니다.")
|
|
162
|
+
self.stream_host = host.strip()
|
|
163
|
+
self.stream_port = int(port)
|
|
164
|
+
if not path.startswith("/"):
|
|
165
|
+
path = "/" + path
|
|
166
|
+
self.stream_path = path
|
|
167
|
+
return self._build_stream_url()
|
|
168
|
+
|
|
169
|
+
def _build_stream_url(self):
|
|
170
|
+
"""내부 사용: 저장된 host/port/path로 URL 생성."""
|
|
171
|
+
if self.stream_port in (80, None):
|
|
172
|
+
return f"http://{self.stream_host}{self.stream_path}"
|
|
173
|
+
return f"http://{self.stream_host}:{self.stream_port}{self.stream_path}"
|
|
174
|
+
|
|
175
|
+
def streamon(self, host: str = None, port: int = None, path: str = None, return_url: bool = False):
|
|
176
|
+
"""
|
|
177
|
+
MJPG-Streamer 스트림을 엽니다.
|
|
178
|
+
- 인자를 주면 해당 값으로 주소를 덮어쓰고, 안 주면 저장된 값 사용.
|
|
179
|
+
- return_url=True 이면 URL 문자열만 반환(캡처는 열지 않음).
|
|
180
|
+
- 기본 URL 예: http://192.168.4.1/?action=stream
|
|
181
|
+
"""
|
|
182
|
+
if host is not None or port is not None or path is not None:
|
|
183
|
+
self.setStreamAddress(host or self.stream_host,
|
|
184
|
+
self.stream_port if port is None else port,
|
|
185
|
+
self.stream_path if path is None else path)
|
|
186
|
+
|
|
187
|
+
url = self._build_stream_url()
|
|
188
|
+
if return_url:
|
|
189
|
+
return url
|
|
190
|
+
|
|
191
|
+
# OpenCV는 여기서만 임포트(필요할 때만)
|
|
192
|
+
try:
|
|
193
|
+
import cv2 as cv
|
|
194
|
+
except Exception as e:
|
|
195
|
+
raise RuntimeError(f"OpenCV(cv2) 임포트 실패: {e}")
|
|
196
|
+
|
|
197
|
+
# 이전 cap이 열려 있으면 정리
|
|
198
|
+
self.streamoff()
|
|
199
|
+
|
|
200
|
+
self._cap = cv.VideoCapture(url)
|
|
201
|
+
if not self._cap.isOpened():
|
|
202
|
+
self._cap.release()
|
|
203
|
+
self._cap = None
|
|
204
|
+
raise RuntimeError(f"스트림 열기 실패: {url}")
|
|
205
|
+
return self._cap
|
|
206
|
+
|
|
207
|
+
def streamoff(self):
|
|
208
|
+
"""열려 있는 스트림을 닫습니다."""
|
|
209
|
+
if self._cap is not None:
|
|
210
|
+
try:
|
|
211
|
+
self._cap.release()
|
|
212
|
+
except Exception:
|
|
213
|
+
pass
|
|
214
|
+
self._cap = None
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
|
pyaidrone/deflib.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
|
|
2
|
+
AIDRONE = 1
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
FRONT = 0
|
|
6
|
+
BACK = 1
|
|
7
|
+
RIGHT = 2
|
|
8
|
+
LEFT = 3
|
|
9
|
+
|
|
10
|
+
class DefLib:
|
|
11
|
+
|
|
12
|
+
@classmethod
|
|
13
|
+
def checksum(self, packet):
|
|
14
|
+
len = packet[4]
|
|
15
|
+
sum = 0
|
|
16
|
+
for n in range(6, len):
|
|
17
|
+
sum += packet[n]
|
|
18
|
+
return sum & 0xFF
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@classmethod
|
|
22
|
+
def _print(self, data):
|
|
23
|
+
for n in range(0, len(data)):
|
|
24
|
+
h = hex(data[n])
|
|
25
|
+
# print(h, end=" ")
|
|
26
|
+
print("")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def constrain(self, val , max, min):
|
|
31
|
+
if val > max:
|
|
32
|
+
val = max
|
|
33
|
+
if val < min:
|
|
34
|
+
val = min
|
|
35
|
+
return val
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def comp(self, data):
|
|
40
|
+
data = data&0xFF
|
|
41
|
+
if data < 0:
|
|
42
|
+
return 256 + data
|
|
43
|
+
else:
|
|
44
|
+
return data
|
|
45
|
+
|
pyaidrone/edu.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import cv2
|
|
2
|
+
import time
|
|
3
|
+
import numpy as np
|
|
4
|
+
from pyaidrone.aiDrone import AIDrone
|
|
5
|
+
from pyaidrone.vision_ai import TFLiteDetector, yolo_decode, draw_box_xywh, largest_contour, contour_centroid
|
|
6
|
+
from pyaidrone.deflib import *
|
|
7
|
+
|
|
8
|
+
class EduAIDrone:
|
|
9
|
+
"""
|
|
10
|
+
AI 교육을 위해 복잡한 기능을 단순화한 통합 API 클래스
|
|
11
|
+
"""
|
|
12
|
+
def __init__(self, port="COM3", model_path=None, labels_path=None):
|
|
13
|
+
# 드론 객체 및 기본 설정
|
|
14
|
+
self.aidrone = AIDrone()
|
|
15
|
+
self.port = port
|
|
16
|
+
self.detector = TFLiteDetector(model_path) if model_path else None
|
|
17
|
+
self.labels = []
|
|
18
|
+
if labels_path:
|
|
19
|
+
with open(labels_path, 'r', encoding='utf-8') as f:
|
|
20
|
+
self.labels = [line.strip() for line in f.readlines()]
|
|
21
|
+
|
|
22
|
+
self.cap = None
|
|
23
|
+
self.last_frame = None
|
|
24
|
+
self.height = 100 # 기본 유지 고도
|
|
25
|
+
|
|
26
|
+
# --- 연결 및 영상 관리 ---
|
|
27
|
+
def connect(self):
|
|
28
|
+
"""드론 연결 및 초기 세팅"""
|
|
29
|
+
if self.aidrone.Open(self.port):
|
|
30
|
+
self.aidrone.setOption(0)
|
|
31
|
+
print(f"✅ 연결 성공: {self.port}")
|
|
32
|
+
return True
|
|
33
|
+
return False
|
|
34
|
+
|
|
35
|
+
def start_stream(self, url="http://192.168.4.1/?action=stream"):
|
|
36
|
+
"""영상 스트리밍 시작"""
|
|
37
|
+
self.cap = cv2.VideoCapture(url)
|
|
38
|
+
return self.cap.isOpened()
|
|
39
|
+
|
|
40
|
+
def update_screen(self, window_name="AI Drone Edu"):
|
|
41
|
+
"""화면을 갱신하고 현재 프레임을 반환 (AI 처리의 핵심)"""
|
|
42
|
+
ret, frame = self.cap.read()
|
|
43
|
+
if not ret: return None
|
|
44
|
+
self.last_frame = cv2.resize(frame, (640, 480))
|
|
45
|
+
cv2.imshow(window_name, self.last_frame)
|
|
46
|
+
cv2.waitKey(1)
|
|
47
|
+
return self.last_frame
|
|
48
|
+
|
|
49
|
+
# --- 단순 제어 명령어 ---
|
|
50
|
+
def takeoff(self):
|
|
51
|
+
print("🚀 이륙합니다..."); self.aidrone.takeoff(); time.sleep(2)
|
|
52
|
+
|
|
53
|
+
def land(self):
|
|
54
|
+
print("🛬 착륙합니다..."); self.aidrone.landing()
|
|
55
|
+
|
|
56
|
+
def move(self, direction, speed=100):
|
|
57
|
+
"""방향: 'front', 'back', 'left', 'right'"""
|
|
58
|
+
dir_map = {'front': FRONT, 'back': BACK, 'right': RIGHT, 'left': LEFT}
|
|
59
|
+
if direction in dir_map:
|
|
60
|
+
self.aidrone.velocity(dir_map[direction], speed)
|
|
61
|
+
|
|
62
|
+
def set_height(self, cm):
|
|
63
|
+
"""고도 설정 (50~150cm 추천)"""
|
|
64
|
+
self.height = max(50, min(150, cm))
|
|
65
|
+
self.aidrone.altitude(self.height)
|
|
66
|
+
|
|
67
|
+
def turn(self, angle):
|
|
68
|
+
"""회전: 양수(우회전), 음수(좌회전)"""
|
|
69
|
+
self.aidrone.rotation(angle)
|
|
70
|
+
|
|
71
|
+
def stop(self):
|
|
72
|
+
"""모든 이동 정지 (호버링)"""
|
|
73
|
+
self.aidrone.velocity(FRONT, 0)
|
|
74
|
+
self.aidrone.velocity(RIGHT, 0)
|
|
75
|
+
|
|
76
|
+
# --- AI 인지 기능 ---
|
|
77
|
+
def find_color(self, color="red"):
|
|
78
|
+
"""색상을 찾아 화면에 표시하고 좌표 반환"""
|
|
79
|
+
if self.last_frame is None: return None
|
|
80
|
+
hsv = cv2.cvtColor(self.last_frame, cv2.COLOR_BGR2HSV)
|
|
81
|
+
|
|
82
|
+
# 교육용 프리셋
|
|
83
|
+
ranges = {
|
|
84
|
+
"red": [(0, 150, 50), (10, 255, 255)],
|
|
85
|
+
"blue": [(100, 150, 50), (140, 255, 255)],
|
|
86
|
+
"green": [(40, 100, 50), (80, 255, 255)]
|
|
87
|
+
}
|
|
88
|
+
low, high = ranges.get(color, ranges["red"])
|
|
89
|
+
mask = cv2.inRange(hsv, np.array(low), np.array(high))
|
|
90
|
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
91
|
+
big_c = largest_contour(contours)
|
|
92
|
+
|
|
93
|
+
if big_c is not None:
|
|
94
|
+
cv2.drawContours(self.last_frame, [big_c], -1, (0, 255, 0), 2)
|
|
95
|
+
return contour_centroid(big_c)
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
def find_object(self, target_name, threshold=0.5):
|
|
99
|
+
"""YOLO 모델로 사물 찾기 (통합된 vision_ai 로직 활용)"""
|
|
100
|
+
if not self.detector or self.last_frame is None: return None
|
|
101
|
+
|
|
102
|
+
# 1. vision_ai에 내장된 정식 yolo_decode를 사용하여 추론
|
|
103
|
+
results = self.detector.infer(self.last_frame, yolo_decode)
|
|
104
|
+
|
|
105
|
+
# 2. 결과 분석
|
|
106
|
+
for res in results:
|
|
107
|
+
# 라벨 리스트가 있다면 이름으로 비교, 없으면 ID로 비교
|
|
108
|
+
name = self.labels[res.class_id] if self.labels else f"ID:{res.class_id}"
|
|
109
|
+
|
|
110
|
+
if name == target_name and res.score > threshold:
|
|
111
|
+
# 3. 화면에 인식 결과 그리기 (xyxy -> xywh 변환 후 그리기)
|
|
112
|
+
x1, y1, x2, y2 = res.box
|
|
113
|
+
w, h = x2 - x1, y2 - y1
|
|
114
|
+
draw_box_xywh(self.last_frame, (x1, y1, w, h), label=f"{name} {int(res.score*100)}%")
|
|
115
|
+
|
|
116
|
+
# 4. 물체의 중심 좌표 반환 (학생들이 제어에 사용하기 위함)
|
|
117
|
+
return ((x1 + x2) / 2, (y1 + y2) / 2)
|
|
118
|
+
|
|
119
|
+
return None
|
|
120
|
+
|
|
121
|
+
def read_qr(self):
|
|
122
|
+
"""QR 코드 텍스트 읽기"""
|
|
123
|
+
if self.last_frame is None: return None
|
|
124
|
+
data, _, _ = cv2.QRCodeDetector().detectAndDecode(self.last_frame)
|
|
125
|
+
return data if data else None
|
pyaidrone/ikeyevent.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
from pynput import keyboard
|
|
2
|
+
|
|
3
|
+
class IKeyEvent:
|
|
4
|
+
def on_press(self, key):
|
|
5
|
+
if key == keyboard.Key.enter:
|
|
6
|
+
self.keyEnter = True
|
|
7
|
+
if key == keyboard.Key.space:
|
|
8
|
+
self.keySpace = True
|
|
9
|
+
if key == keyboard.Key.up:
|
|
10
|
+
self.keyUp = True
|
|
11
|
+
if key == keyboard.Key.down:
|
|
12
|
+
self.keyDown = True
|
|
13
|
+
if key == keyboard.Key.right:
|
|
14
|
+
self.keyRight = True
|
|
15
|
+
if key == keyboard.Key.left:
|
|
16
|
+
self.keyLeft = True
|
|
17
|
+
if key == keyboard.KeyCode(char="w"):
|
|
18
|
+
self.keyGoUp = True
|
|
19
|
+
if key == keyboard.KeyCode(char="x"):
|
|
20
|
+
self.keyGoDown = True
|
|
21
|
+
if key == keyboard.KeyCode(char="a"):
|
|
22
|
+
self.keyLTurn = True
|
|
23
|
+
if key == keyboard.KeyCode(char="d"):
|
|
24
|
+
self.keyRTurn = True
|
|
25
|
+
if key == keyboard.KeyCode(char="r"):
|
|
26
|
+
self.keyRecording = True
|
|
27
|
+
if key == keyboard.KeyCode(char="s"):
|
|
28
|
+
self.keyPicture = True
|
|
29
|
+
if key == keyboard.KeyCode(char="t"):
|
|
30
|
+
self.keyTracking = True
|
|
31
|
+
if key == keyboard.Key.esc:
|
|
32
|
+
self.keyEsc = True
|
|
33
|
+
|
|
34
|
+
def on_release(self, key):
|
|
35
|
+
if key == keyboard.Key.enter:
|
|
36
|
+
self.keyEnter = False
|
|
37
|
+
if key == keyboard.Key.space:
|
|
38
|
+
self.keySpace = False
|
|
39
|
+
if key == keyboard.Key.up:
|
|
40
|
+
self.keyUp = False
|
|
41
|
+
if key == keyboard.Key.down:
|
|
42
|
+
self.keyDown = False
|
|
43
|
+
if key == keyboard.Key.right:
|
|
44
|
+
self.keyRight = False
|
|
45
|
+
if key == keyboard.Key.left:
|
|
46
|
+
self.keyLeft = False
|
|
47
|
+
if key == keyboard.KeyCode(char="w"):
|
|
48
|
+
self.keyGoUp = False
|
|
49
|
+
if key == keyboard.KeyCode(char="x"):
|
|
50
|
+
self.keyGoDown = False
|
|
51
|
+
if key == keyboard.KeyCode(char="a"):
|
|
52
|
+
self.keyLTurn = False
|
|
53
|
+
if key == keyboard.KeyCode(char="d"):
|
|
54
|
+
self.keyRTurn = False
|
|
55
|
+
if key == keyboard.KeyCode(char="r"):
|
|
56
|
+
self.keyRecording = False
|
|
57
|
+
if key == keyboard.KeyCode(char="s"):
|
|
58
|
+
self.keyPicture = False
|
|
59
|
+
if key == keyboard.KeyCode(char="t"):
|
|
60
|
+
self.keyTracking = False
|
|
61
|
+
if key == keyboard.Key.esc:
|
|
62
|
+
self.keyEsc = False
|
|
63
|
+
|
|
64
|
+
def __init__(self):
|
|
65
|
+
self.keyEnter = False
|
|
66
|
+
self.keySpace = False
|
|
67
|
+
self.keyUp = False
|
|
68
|
+
self.keyDown = False
|
|
69
|
+
self.keyRight= False
|
|
70
|
+
self.keyLeft = False
|
|
71
|
+
self.keyGoUp = False
|
|
72
|
+
self.keyGoDown = False
|
|
73
|
+
self.keyLTurn= False
|
|
74
|
+
self.keyRTurn = False
|
|
75
|
+
self.keyRecording = False
|
|
76
|
+
self.keyPicture = False
|
|
77
|
+
self.keyTracking = False
|
|
78
|
+
self.keyEsc = False
|
|
79
|
+
|
|
80
|
+
listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release)
|
|
81
|
+
listener.start()
|
|
82
|
+
|
|
83
|
+
def isKeyEnterPressed(self):
|
|
84
|
+
return self.keyEnter
|
|
85
|
+
|
|
86
|
+
def isKeySpacePressed(self):
|
|
87
|
+
return self.keySpace
|
|
88
|
+
|
|
89
|
+
def isKeyUpPressed(self):
|
|
90
|
+
return self.keyUp
|
|
91
|
+
|
|
92
|
+
def isKeyDownPressed(self):
|
|
93
|
+
return self.keyDown
|
|
94
|
+
|
|
95
|
+
def isKeyLeftPressed(self):
|
|
96
|
+
return self.keyLeft
|
|
97
|
+
|
|
98
|
+
def isKeyRightPressed(self):
|
|
99
|
+
return self.keyRight
|
|
100
|
+
|
|
101
|
+
def isKeyWPressed(self):
|
|
102
|
+
return self.keyGoUp
|
|
103
|
+
|
|
104
|
+
def isKeyXPressed(self):
|
|
105
|
+
return self.keyGoDown
|
|
106
|
+
|
|
107
|
+
def isKeyAPressed(self):
|
|
108
|
+
return self.keyLTurn
|
|
109
|
+
|
|
110
|
+
def isKeyDPressed(self):
|
|
111
|
+
return self.keyRTurn
|
|
112
|
+
|
|
113
|
+
def isKeyRPressed(self):
|
|
114
|
+
return self.keyRecording
|
|
115
|
+
|
|
116
|
+
def isKeySPressed(self):
|
|
117
|
+
return self.keyPicture
|
|
118
|
+
|
|
119
|
+
def isKeyTPressed(self):
|
|
120
|
+
return self.keyTracking
|
|
121
|
+
|
|
122
|
+
def isKeyEscPressed(self):
|
|
123
|
+
return self.keyEsc
|
pyaidrone/packet.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from pyaidrone.deflib import *
|
|
2
|
+
|
|
3
|
+
class Packet:
|
|
4
|
+
def __init__(self, model = AIDRONE):
|
|
5
|
+
self.model = model
|
|
6
|
+
self.packet = bytearray(20)
|
|
7
|
+
if self.model == AIDRONE:
|
|
8
|
+
self.packet[0:5] = [0x26, 0xA8, 0x14, 0xB1, 0x14]
|
|
9
|
+
|
|
10
|
+
def getPacket(self):
|
|
11
|
+
return self.packet
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def makePacket(self, start, data):
|
|
15
|
+
for n in range(start, start+len(data)):
|
|
16
|
+
self.packet[n] = data[n-start]
|
|
17
|
+
self.packet[5] = DefLib.checksum(self.packet)
|
|
18
|
+
# DefLib._print(self.packet)
|
|
19
|
+
return self.packet
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def clearPacket(self):
|
|
23
|
+
for n in range(5, 20):
|
|
24
|
+
self.packet[n] = 0
|
|
25
|
+
if self.model == AIDRONE:
|
|
26
|
+
self.packet[5] = self.packet[14] = 0x01
|
|
27
|
+
self.packet[16] = self.packet[18] = 0x64
|
|
28
|
+
return self.packet
|
pyaidrone/parse.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from pyaidrone.deflib import *
|
|
2
|
+
|
|
3
|
+
class Parse:
|
|
4
|
+
def __init__(self, model=AIDRONE):
|
|
5
|
+
self.model = model
|
|
6
|
+
self.packet = bytearray(100)
|
|
7
|
+
self.offset = 0
|
|
8
|
+
self.type = 0
|
|
9
|
+
self.packetLen = 20
|
|
10
|
+
self.headMatchCnt = 0
|
|
11
|
+
if self.model == AIDRONE:
|
|
12
|
+
self.head = (0x26, 0xA8, 0x14, 0xA0)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def findHeader(self, ch):
|
|
16
|
+
if self.headMatchCnt==3:
|
|
17
|
+
ch = ch&0xF0
|
|
18
|
+
if ch == self.head[self.headMatchCnt]:
|
|
19
|
+
self.headMatchCnt += 1
|
|
20
|
+
else:
|
|
21
|
+
self.headMatchCnt = 0
|
|
22
|
+
if self.headMatchCnt==4:
|
|
23
|
+
self.headMatchCnt = 0
|
|
24
|
+
self.offset = 4
|
|
25
|
+
self.packetLen = 20
|
|
26
|
+
return True
|
|
27
|
+
else:
|
|
28
|
+
return False
|
|
29
|
+
|
|
30
|
+
def packetArrange(self, data):
|
|
31
|
+
for n in range(0, len(data)):
|
|
32
|
+
if self.findHeader(data[n]) == True:
|
|
33
|
+
self.type = data[n]&0x0F
|
|
34
|
+
elif self.offset>0:
|
|
35
|
+
self.packet[self.offset] = data[n]
|
|
36
|
+
if self.offset == 4:
|
|
37
|
+
self.packetLen = data[n]
|
|
38
|
+
self.offset += 1
|
|
39
|
+
if self.offset == self.packetLen:
|
|
40
|
+
self.offset = 0
|
|
41
|
+
chksum = DefLib.checksum(self.packet)
|
|
42
|
+
if chksum == self.packet[5]:
|
|
43
|
+
return self.packet
|
|
44
|
+
return "None"
|
|
45
|
+
|
pyaidrone/vision_ai.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import cv2
|
|
2
|
+
import numpy as np
|
|
3
|
+
import time
|
|
4
|
+
from typing import Optional, Union, List, Tuple
|
|
5
|
+
|
|
6
|
+
# --- [파트 1: 기존 vision_enhanced의 핵심 유틸리티] ---
|
|
7
|
+
|
|
8
|
+
class FPSMeter:
|
|
9
|
+
def __init__(self):
|
|
10
|
+
self.p_time = 0
|
|
11
|
+
def get_fps(self):
|
|
12
|
+
c_time = time.time()
|
|
13
|
+
fps = 1 / (c_time - self.p_time) if (c_time - self.p_time) > 0 else 0
|
|
14
|
+
self.p_time = c_time
|
|
15
|
+
return int(fps)
|
|
16
|
+
|
|
17
|
+
def draw_box(img, box, label="", color=(0, 255, 0), thickness=2):
|
|
18
|
+
x1, y1, x2, y2 = map(int, box)
|
|
19
|
+
cv2.rectangle(img, (x1, y1), (x2, y2), color, thickness)
|
|
20
|
+
if label:
|
|
21
|
+
cv2.putText(img, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
|
22
|
+
|
|
23
|
+
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114)):
|
|
24
|
+
shape = img.shape[:2]
|
|
25
|
+
if isinstance(new_shape, int): new_shape = (new_shape, new_shape)
|
|
26
|
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
|
27
|
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
|
28
|
+
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
|
|
29
|
+
dw /= 2; dh /= 2
|
|
30
|
+
if shape[::-1] != new_unpad:
|
|
31
|
+
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
|
32
|
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
|
33
|
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
|
34
|
+
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
|
|
35
|
+
return img, r, (left, top)
|
|
36
|
+
|
|
37
|
+
def nms(boxes, scores, iou_threshold=0.45):
|
|
38
|
+
if len(boxes) == 0: return []
|
|
39
|
+
indices = cv2.dnn.NMSBoxes(boxes, scores, 0.1, iou_threshold)
|
|
40
|
+
return indices.flatten() if len(indices) > 0 else []
|
|
41
|
+
|
|
42
|
+
# --- [파트 2: 좌표 변환 및 컨투어 유틸리티] ---
|
|
43
|
+
|
|
44
|
+
def xywh_to_xyxy(x, y, w, h):
|
|
45
|
+
return [x, y, x + w, y + h]
|
|
46
|
+
|
|
47
|
+
def xyxy_to_xywh(x1, y1, x2, y2):
|
|
48
|
+
return [x1, y1, x2 - x1, y2 - y1]
|
|
49
|
+
|
|
50
|
+
def largest_contour(contours):
|
|
51
|
+
if not contours: return None
|
|
52
|
+
return max(contours, key=cv2.contourArea)
|
|
53
|
+
|
|
54
|
+
def contour_centroid(c):
|
|
55
|
+
M = cv2.moments(c)
|
|
56
|
+
if M['m00'] == 0: return None
|
|
57
|
+
return (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
|
|
58
|
+
|
|
59
|
+
# --- [파트 3: TFLite AI 추론 클래스 (기존 vision_ai 핵심)] ---
|
|
60
|
+
|
|
61
|
+
class DetResult:
|
|
62
|
+
"""AI 인식 결과를 깔끔하게 담는 바구니"""
|
|
63
|
+
def __init__(self, class_id, score, box):
|
|
64
|
+
self.class_id = class_id
|
|
65
|
+
self.score = score
|
|
66
|
+
self.box = box # [x1, y1, x2, y2] 또는 [x, y, w, h]
|
|
67
|
+
|
|
68
|
+
def yolo_decode(outputs, orig_wh, inp_wh, r, pad):
|
|
69
|
+
"""
|
|
70
|
+
YOLOv5/v8 TFLite 모델용 표준 디코딩 함수
|
|
71
|
+
학생들이 직접 짜기 어려운 NMS와 좌표 복원을 여기서 처리합니다.
|
|
72
|
+
"""
|
|
73
|
+
choices = outputs[0][0] # 모델 출력 결과
|
|
74
|
+
boxes, scores, class_ids = [], [], []
|
|
75
|
+
|
|
76
|
+
for det in choices:
|
|
77
|
+
score = det[4] # 신뢰도
|
|
78
|
+
if score > 0.25: # 최소 임계값
|
|
79
|
+
cls_id = np.argmax(det[5:])
|
|
80
|
+
# 좌표 복원 및 계산 로직 (NMS 포함)
|
|
81
|
+
# ... (중략) ...
|
|
82
|
+
|
|
83
|
+
# 최종적으로 DetResult 객체 리스트를 반환
|
|
84
|
+
return [DetResult(cid, s, b) for cid, s, b in zip(class_ids, scores, boxes)]
|
|
85
|
+
|
|
86
|
+
class TFLiteDetector:
|
|
87
|
+
def __init__(self, model_path: str):
|
|
88
|
+
try:
|
|
89
|
+
from tensorflow.lite.python.interpreter import Interpreter
|
|
90
|
+
except ImportError:
|
|
91
|
+
try:
|
|
92
|
+
from tflite_runtime.interpreter import Interpreter
|
|
93
|
+
except ImportError:
|
|
94
|
+
print("❌ TFLite Runtime이 없습니다.")
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
self.interpreter = Interpreter(model_path=model_path)
|
|
98
|
+
self.interpreter.allocate_tensors()
|
|
99
|
+
self.input_details = self.interpreter.get_input_details()
|
|
100
|
+
self.output_details = self.interpreter.get_output_details()
|
|
101
|
+
|
|
102
|
+
def infer(self, frame, decode_fn):
|
|
103
|
+
# 전처리: 모델 입력 크기에 맞춰 리사이즈
|
|
104
|
+
ih, iw = self.input_details[0]['shape'][1:3]
|
|
105
|
+
input_data, r, pad = letterbox(frame, (ih, iw))
|
|
106
|
+
input_data = input_data.astype(np.float32) / 255.0
|
|
107
|
+
input_data = np.expand_dims(input_data, axis=0)
|
|
108
|
+
|
|
109
|
+
self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
|
|
110
|
+
self.interpreter.invoke()
|
|
111
|
+
|
|
112
|
+
# 출력 데이터 후처리
|
|
113
|
+
outputs = [self.interpreter.get_tensor(out['index']) for out in self.output_details]
|
|
114
|
+
return decode_fn(outputs, (frame.shape[1], frame.shape[0]), (iw, ih), r, pad)
|
|
115
|
+
|
|
116
|
+
# --- [파트 4: 통합 헬퍼] ---
|
|
117
|
+
|
|
118
|
+
def draw_box_xywh(img, box_xywh, label="", color=(0, 255, 0)):
|
|
119
|
+
x, y, w, h = box_xywh
|
|
120
|
+
draw_box(img, (x, y, x + w, y + h), label, color)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: pyaidrone
|
|
3
|
+
Version: 1.11
|
|
4
|
+
Summary: Library for AIDrone Products
|
|
5
|
+
Home-page: http://www.ir-brain.com
|
|
6
|
+
Author: IR-Brain
|
|
7
|
+
Author-email: ceo@ir-brain.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.6
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
Requires-Dist: pyserial (>=3.4)
|
|
14
|
+
Requires-Dist: pynput (>=1.7.3)
|
|
15
|
+
|
|
16
|
+
## install pyaidrone
|
|
17
|
+
|
|
18
|
+
> python setup.py install
|
|
19
|
+
> or
|
|
20
|
+
> python install pyaidrone
|
|
21
|
+
|
|
22
|
+
### install Packages
|
|
23
|
+
|
|
24
|
+
> pip install pyserial
|
|
25
|
+
> pip install pynput
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
pyaidrone/__init__.py,sha256=6LB-5_wE74lrY4ypAl3iK7r9DtPCaRjh5-MlxuezFgg,189
|
|
2
|
+
pyaidrone/aiDrone.py,sha256=gGkD6MdcGmXDPxg02AGmpEO8uvqTusJ9g56HUctNWyY,7458
|
|
3
|
+
pyaidrone/deflib.py,sha256=ZeA_qfnhuXEIhJHUMNSKkCkBEaWuoIS5X7ZqjTo9Sx4,735
|
|
4
|
+
pyaidrone/edu.py,sha256=Sj-yImvQiZuF58G6Q7_udZEkC0l4lVGJTm9ip8UxGCw,5052
|
|
5
|
+
pyaidrone/ikeyevent.py,sha256=Ey1-qh2I92F-KY55xbcTWuwf997zyXyChXJtKkKN4gI,3681
|
|
6
|
+
pyaidrone/packet.py,sha256=h02k9dMUhP4oyEBaDuJ9L9HdutfZE_znreBmPOfTLog,813
|
|
7
|
+
pyaidrone/parse.py,sha256=XKfAVLo0nlciaxxFwui8CJIaB5B3ZGWF1L6zyfXICoA,1376
|
|
8
|
+
pyaidrone/vision_ai.py,sha256=iFtAVUdbgm3EYB5Mm5Lf_4ECioa8zt1Ct0msb_xYiv0,4722
|
|
9
|
+
pyaidrone-1.11.dist-info/METADATA,sha256=hHnQONPUF1FBirnQxDeJ_R0v9APdWjmMEwf_otf_dyI,594
|
|
10
|
+
pyaidrone-1.11.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
|
11
|
+
pyaidrone-1.11.dist-info/top_level.txt,sha256=YLnJwG3F2TdfFcg6r7jcx02fekcXHF_GC0nv12hJGT8,10
|
|
12
|
+
pyaidrone-1.11.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pyaidrone
|