smartpi 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. smartpi/__init__.pyc +0 -0
  2. smartpi/_gui.pyc +0 -0
  3. smartpi/ai_asr.pyc +0 -0
  4. smartpi/ai_llm.pyc +0 -0
  5. smartpi/ai_tts.pyc +0 -0
  6. smartpi/ai_vad.pyc +0 -0
  7. smartpi/audio.pyc +0 -0
  8. smartpi/base_driver.pyc +0 -0
  9. smartpi/camera.pyc +0 -0
  10. smartpi/color_sensor.pyc +0 -0
  11. smartpi/cw2015.pyc +0 -0
  12. smartpi/flash.pyc +0 -0
  13. smartpi/humidity.pyc +0 -0
  14. smartpi/led.pyc +0 -0
  15. smartpi/light_sensor.pyc +0 -0
  16. smartpi/local_model.pyc +0 -0
  17. smartpi/mcp_client.pyc +0 -0
  18. smartpi/mcp_fastmcp.pyc +0 -0
  19. smartpi/mcp_intent_recognizer.pyc +0 -0
  20. smartpi/models/__init__.pyc +0 -0
  21. smartpi/models/snakers4_silero-vad/__init__.pyc +0 -0
  22. smartpi/models/snakers4_silero-vad/hubconf.pyc +0 -0
  23. smartpi/motor.pyc +0 -0
  24. smartpi/move.pyc +0 -0
  25. smartpi/onnx_hand_workflow.pyc +0 -0
  26. smartpi/onnx_image_workflow.pyc +0 -0
  27. smartpi/onnx_pose_workflow.pyc +0 -0
  28. smartpi/onnx_text_workflow.pyc +0 -0
  29. smartpi/onnx_voice_workflow.pyc +0 -0
  30. smartpi/posemodel/__init__.pyc +0 -0
  31. smartpi/posenet_utils.pyc +0 -0
  32. smartpi/rknn_hand_workflow.pyc +0 -0
  33. smartpi/rknn_image_workflow.pyc +0 -0
  34. smartpi/rknn_pose_workflow.pyc +0 -0
  35. smartpi/rknn_text_workflow.pyc +0 -0
  36. smartpi/rknn_voice_workflow.pyc +0 -0
  37. smartpi/servo.pyc +0 -0
  38. smartpi/temperature.pyc +0 -0
  39. smartpi/tencentcloud-speech-sdk-python/__init__.pyc +0 -0
  40. smartpi/tencentcloud-speech-sdk-python/asr/__init__.pyc +0 -0
  41. smartpi/tencentcloud-speech-sdk-python/asr/flash_recognizer.pyc +0 -0
  42. smartpi/tencentcloud-speech-sdk-python/asr/speech_recognizer.pyc +0 -0
  43. smartpi/tencentcloud-speech-sdk-python/common/__init__.pyc +0 -0
  44. smartpi/tencentcloud-speech-sdk-python/common/credential.pyc +0 -0
  45. smartpi/tencentcloud-speech-sdk-python/common/log.pyc +0 -0
  46. smartpi/tencentcloud-speech-sdk-python/common/utils.pyc +0 -0
  47. smartpi/tencentcloud-speech-sdk-python/soe/__init__.pyc +0 -0
  48. smartpi/tencentcloud-speech-sdk-python/soe/speaking_assessment.pyc +0 -0
  49. smartpi/tencentcloud-speech-sdk-python/tts/__init__.pyc +0 -0
  50. smartpi/tencentcloud-speech-sdk-python/tts/flowing_speech_synthesizer.pyc +0 -0
  51. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer.pyc +0 -0
  52. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer_ws.pyc +0 -0
  53. smartpi/tencentcloud-speech-sdk-python/vc/__init__.pyc +0 -0
  54. smartpi/tencentcloud-speech-sdk-python/vc/speech_convertor_ws.pyc +0 -0
  55. smartpi/text_gte_model/__init__.pyc +0 -0
  56. smartpi/text_gte_model/config/__init__.pyc +0 -0
  57. smartpi/text_gte_model/gte/__init__.pyc +0 -0
  58. smartpi/touch_sensor.pyc +0 -0
  59. smartpi/trace.pyc +0 -0
  60. smartpi/ultrasonic.pyc +0 -0
  61. {smartpi-1.1.3.dist-info → smartpi-1.1.4.dist-info}/METADATA +1 -1
  62. smartpi-1.1.4.dist-info/RECORD +77 -0
  63. smartpi/__init__.py +0 -8
  64. smartpi/_gui.py +0 -66
  65. smartpi/ai_asr.py +0 -1037
  66. smartpi/ai_llm.py +0 -934
  67. smartpi/ai_tts.py +0 -938
  68. smartpi/ai_vad.py +0 -83
  69. smartpi/audio.py +0 -125
  70. smartpi/base_driver.py +0 -618
  71. smartpi/camera.py +0 -84
  72. smartpi/color_sensor.py +0 -18
  73. smartpi/cw2015.py +0 -179
  74. smartpi/flash.py +0 -130
  75. smartpi/humidity.py +0 -20
  76. smartpi/led.py +0 -19
  77. smartpi/light_sensor.py +0 -72
  78. smartpi/local_model.py +0 -432
  79. smartpi/mcp_client.py +0 -100
  80. smartpi/mcp_fastmcp.py +0 -322
  81. smartpi/mcp_intent_recognizer.py +0 -408
  82. smartpi/models/__init__.py +0 -0
  83. smartpi/models/snakers4_silero-vad/__init__.py +0 -0
  84. smartpi/models/snakers4_silero-vad/hubconf.py +0 -56
  85. smartpi/motor.py +0 -177
  86. smartpi/move.py +0 -218
  87. smartpi/onnx_hand_workflow.py +0 -201
  88. smartpi/onnx_image_workflow.py +0 -176
  89. smartpi/onnx_pose_workflow.py +0 -482
  90. smartpi/onnx_text_workflow.py +0 -173
  91. smartpi/onnx_voice_workflow.py +0 -437
  92. smartpi/posemodel/__init__.py +0 -0
  93. smartpi/posenet_utils.py +0 -222
  94. smartpi/rknn_hand_workflow.py +0 -245
  95. smartpi/rknn_image_workflow.py +0 -405
  96. smartpi/rknn_pose_workflow.py +0 -592
  97. smartpi/rknn_text_workflow.py +0 -240
  98. smartpi/rknn_voice_workflow.py +0 -394
  99. smartpi/servo.py +0 -178
  100. smartpi/temperature.py +0 -18
  101. smartpi/tencentcloud-speech-sdk-python/__init__.py +0 -1
  102. smartpi/tencentcloud-speech-sdk-python/asr/__init__.py +0 -0
  103. smartpi/tencentcloud-speech-sdk-python/asr/flash_recognizer.py +0 -178
  104. smartpi/tencentcloud-speech-sdk-python/asr/speech_recognizer.py +0 -311
  105. smartpi/tencentcloud-speech-sdk-python/common/__init__.py +0 -1
  106. smartpi/tencentcloud-speech-sdk-python/common/credential.py +0 -6
  107. smartpi/tencentcloud-speech-sdk-python/common/log.py +0 -16
  108. smartpi/tencentcloud-speech-sdk-python/common/utils.py +0 -7
  109. smartpi/tencentcloud-speech-sdk-python/soe/__init__.py +0 -0
  110. smartpi/tencentcloud-speech-sdk-python/soe/speaking_assessment.py +0 -276
  111. smartpi/tencentcloud-speech-sdk-python/tts/__init__.py +0 -0
  112. smartpi/tencentcloud-speech-sdk-python/tts/flowing_speech_synthesizer.py +0 -294
  113. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer.py +0 -144
  114. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer_ws.py +0 -234
  115. smartpi/tencentcloud-speech-sdk-python/vc/__init__.py +0 -0
  116. smartpi/tencentcloud-speech-sdk-python/vc/speech_convertor_ws.py +0 -237
  117. smartpi/text_gte_model/__init__.py +0 -0
  118. smartpi/text_gte_model/config/__init__.py +0 -0
  119. smartpi/text_gte_model/gte/__init__.py +0 -0
  120. smartpi/touch_sensor.py +0 -16
  121. smartpi/trace.py +0 -120
  122. smartpi/ultrasonic.py +0 -20
  123. smartpi-1.1.3.dist-info/RECORD +0 -77
  124. {smartpi-1.1.3.dist-info → smartpi-1.1.4.dist-info}/WHEEL +0 -0
  125. {smartpi-1.1.3.dist-info → smartpi-1.1.4.dist-info}/top_level.txt +0 -0
@@ -1,408 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """使用MCP优化的意图识别器"""
3
- import asyncio
4
- import json
5
- import re
6
- from typing import List, Dict, Any
7
- from .mcp_client import MCPClient
8
- import logging
9
-
10
- # 配置日志
11
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- class MCPIntentRecognizer:
16
- """基于MCP的意图识别器"""
17
-
18
- def __init__(self, global_intents=None):
19
- self.mcp_client = None
20
- self.is_connected = False
21
- self.server_url = "http://127.0.0.1:8000/mcp"
22
- self.intent_tool_name = "recognize_intent"
23
- self.initialize_task = None
24
- # 初始化标志,避免重复初始化
25
- self._initializing = False
26
- # 存储全局意图映射
27
- self.global_intents = global_intents if global_intents is not None else {}
28
-
29
- # 构建英文代码到数字代码的映射
30
- self.en_code_to_intent_code = {}
31
- for intent_code, intent_info in self.global_intents.items():
32
- if "en_code" in intent_info:
33
- en_code = intent_info["en_code"]
34
- # 支持单个英文代码或英文代码列表
35
- if isinstance(en_code, list):
36
- for code in en_code:
37
- self.en_code_to_intent_code[code.lower()] = intent_code
38
- else:
39
- self.en_code_to_intent_code[en_code.lower()] = intent_code
40
-
41
- logger.info(f"MCPIntentRecognizer初始化,全局意图数量: {len(self.global_intents)}, 英文代码映射数量: {len(self.en_code_to_intent_code)}")
42
-
43
- def _get_intent_code_from_en_code(self, en_code):
44
- """
45
- 将英文意图代码转换为数字代码
46
-
47
- Args:
48
- en_code: 英文意图代码
49
-
50
- Returns:
51
- int: 对应的数字代码,如果未找到则返回None
52
- """
53
- if not en_code or not isinstance(en_code, str):
54
- return None
55
-
56
- # 转换为小写后查找
57
- return self.en_code_to_intent_code.get(en_code.lower())
58
-
59
- def update_en_code_mapping(self):
60
- """
61
- 更新英文代码到数字代码的映射
62
- 当全局意图被更新时,需要调用此方法来更新映射
63
- """
64
- # 清空旧的映射
65
- self.en_code_to_intent_code.clear()
66
-
67
- # 重新构建映射
68
- for intent_code, intent_info in self.global_intents.items():
69
- if "en_code" in intent_info:
70
- en_code = intent_info["en_code"]
71
- # 支持单个英文代码或英文代码列表
72
- if isinstance(en_code, list):
73
- for code in en_code:
74
- self.en_code_to_intent_code[code.lower()] = intent_code
75
- else:
76
- self.en_code_to_intent_code[en_code.lower()] = intent_code
77
-
78
- logger.info(f"英文代码映射已更新,当前映射数量: {len(self.en_code_to_intent_code)}")
79
-
80
- async def initialize(self):
81
- """初始化MCP客户端并连接到服务器"""
82
- # 避免重复初始化
83
- if self.is_connected or self._initializing:
84
- return self.is_connected
85
-
86
- self._initializing = True
87
-
88
- try:
89
- self.mcp_client = MCPClient()
90
- result = await self.mcp_client.connect(self.server_url)
91
- if result:
92
- logger.info("成功连接到MCP服务器")
93
- self.is_connected = True
94
- return True
95
- else:
96
- logger.warning("连接MCP服务器未成功返回")
97
- self.is_connected = False
98
- return False
99
- except Exception as e:
100
- logger.error(f"连接MCP服务器失败: {str(e)}")
101
- self.is_connected = False
102
- return False
103
- finally:
104
- self._initializing = False
105
-
106
- async def recognize_intent_async(self, user_input: str) -> List[Dict[str, Any]]:
107
- """异步识别用户输入中的意图"""
108
- # 确保已连接
109
- if not self.is_connected:
110
- await self.initialize()
111
- if not self.is_connected:
112
- logger.error("无法连接到MCP服务器")
113
- return []
114
-
115
- try:
116
- # 构建工具调用请求
117
- tool_call = {
118
- "id": "1",
119
- "type": "function",
120
- "function": {
121
- "name": self.intent_tool_name,
122
- "arguments": json.dumps({"user_input": user_input})
123
- }
124
- }
125
-
126
- # 调用MCP工具
127
- results = await self.mcp_client.call_tool([tool_call])
128
-
129
- # 解析结果
130
- if results:
131
- # 遍历所有结果,找到tool类型的响应
132
- for tool_result in results:
133
- if tool_result.get("role") == "tool":
134
- intent_result = tool_result.get("content", "[]")
135
- logger.info(f"从MCP接收到的意图结果: {intent_result}")
136
- parsed_result = self._parse_intent_result(intent_result)
137
-
138
- # 增强带参数意图识别,无论MCP是否返回结果,都尝试提取参数
139
- enhanced_result = []
140
-
141
- # 1. 如果MCP返回了意图,为每个意图添加参数和名称
142
- if parsed_result:
143
- for intent_data in parsed_result:
144
- # 如果没有参数,尝试提取,但仅对需要参数的意图进行提取
145
- if 'arg' not in intent_data or not intent_data['arg']:
146
- intent_code = intent_data['intent']
147
- # 只有特定的意图类型才需要参数(如音量调节、速度调节等)
148
- # 根据意图配置判断是否需要参数,或者直接检查意图代码
149
- # 这里我们假设只有部分意图需要参数,大部分不需要
150
- # 音量调节(2)、速度调节(7)等需要参数的意图列表
151
- parameter_intents = ['2', '7'] # 可以根据实际情况扩展
152
- if intent_code in parameter_intents:
153
- intent_data['arg'] = self._extract_parameters(user_input, intent_code)
154
- else:
155
- # 不需要参数的意图,保持空列表
156
- intent_data['arg'] = []
157
-
158
- enhanced_result.append(intent_data)
159
- else:
160
- # 2. 如果MCP没有返回意图,尝试直接从文本中识别音量调节等常见意图
161
- enhanced_result = self._direct_extract_intents(user_input)
162
-
163
- logger.info(f"增强后的意图识别结果: {enhanced_result}")
164
- return enhanced_result
165
- except Exception as e:
166
- logger.error(f"通过MCP识别意图失败: {str(e)}")
167
-
168
- # 如果发生异常,尝试直接从文本中识别意图
169
- return self._direct_extract_intents(user_input)
170
-
171
- def _extract_parameters(self, text, intent):
172
- """从文本中提取参数"""
173
- text_lower = text.lower()
174
-
175
- # 尝试提取数字参数,支持百分比形式
176
- numbers = re.findall(r'\d+(?:\.\d+)?', text_lower)
177
- if numbers:
178
- return [numbers[0]] # 返回第一个匹配的数字
179
-
180
- return []
181
-
182
- def _direct_extract_intents(self, user_input):
183
- """直接从文本中提取常见意图,特别是带参数的意图"""
184
- text_lower = user_input.lower()
185
- result = []
186
-
187
-
188
-
189
- # 直接使用从ai_llm.py传入的全局意图配置,实现集中管理
190
- for intent_code, intent_info in self.global_intents.items():
191
- # 检查意图是否已被识别(避免重复)
192
- intent_already_identified = any(item["intent"] == intent_code for item in result)
193
- if intent_already_identified:
194
- continue
195
-
196
- # 检查关键词
197
- for keyword in intent_info["keywords"]:
198
- if keyword in text_lower:
199
- # 提取参数
200
- args = self._extract_parameters(text_lower, intent_code)
201
- result.append({
202
- "intent": intent_code,
203
- "arg": args
204
- })
205
- break
206
-
207
- return result
208
-
209
- def recognize_intent(self, user_input: str) -> List[Dict[str, Any]]:
210
- """同步识别用户输入中的意图(供非异步代码调用)- 仅使用MCP,不包含回退机制"""
211
- try:
212
- # 增强日志记录
213
- logger.info(f"开始意图识别: {user_input}")
214
-
215
- # 如果事件循环已存在,使用现有的
216
- try:
217
- loop = asyncio.get_event_loop()
218
- if loop.is_closed():
219
- raise RuntimeError("Event loop is closed")
220
- except (RuntimeError, AssertionError):
221
- # 创建新的事件循环
222
- loop = asyncio.new_event_loop()
223
- asyncio.set_event_loop(loop)
224
-
225
- # 尝试通过MCP识别
226
- result = loop.run_until_complete(self.recognize_intent_async(user_input))
227
- logger.info(f"意图识别完成,结果: {result}")
228
- return result
229
- except Exception as e:
230
- logger.error(f"同步调用MCP意图识别失败: {str(e)}")
231
- return []
232
-
233
-
234
-
235
- def _parse_intent_result(self, intent_text: str) -> List[Dict[str, Any]]:
236
- """解析意图识别结果,与FastIntentRecognizer保持一致"""
237
- try:
238
- # 处理可能的格式问题
239
- if not intent_text or intent_text.strip() == "[]":
240
- return []
241
-
242
- # 第一步:确保所有双花括号都被替换为单花括号
243
- intent_text = intent_text.replace("{{", "{").replace("}}", "}")
244
-
245
- # 第二步:尝试多种解析策略
246
- # 策略1:尝试作为单个JSON对象解析
247
- try:
248
- if intent_text.strip().startswith("{"):
249
- intent_data = json.loads(intent_text)
250
- # 确保返回的格式与FastIntentRecognizer一致
251
- if "intent" in intent_data:
252
- return [intent_data]
253
- # 如果字段名为intent_code,则转换为intent
254
- elif "intent_code" in intent_data:
255
- converted = {"intent": intent_data["intent_code"]}
256
- if "arg" in intent_data:
257
- converted["arg"] = intent_data["arg"]
258
- return [converted]
259
- except Exception as e1:
260
- logger.warning(f"策略1解析失败: {str(e1)}")
261
-
262
- # 策略2:尝试作为JSON数组解析
263
- try:
264
- if not intent_text.strip().startswith("["):
265
- formatted_text = "[" + intent_text + "]"
266
- intents = json.loads(formatted_text)
267
- else:
268
- intents = json.loads(intent_text)
269
-
270
- # 确保返回的格式与FastIntentRecognizer一致
271
- if isinstance(intents, list):
272
- result = []
273
- for intent in intents:
274
- if isinstance(intent, dict):
275
- if "intent" in intent:
276
- result.append(intent)
277
- elif "intent_code" in intent:
278
- converted = {"intent": intent["intent_code"]}
279
- if "arg" in intent:
280
- converted["arg"] = intent["arg"]
281
- result.append(converted)
282
- return result if result else intents
283
- elif isinstance(intents, dict):
284
- if "intent" in intents:
285
- return [intents]
286
- elif "intent_code" in intents:
287
- converted = {"intent": intents["intent_code"]}
288
- if "arg" in intents:
289
- converted["arg"] = intents["arg"]
290
- return [converted]
291
- return [intents]
292
- except Exception as e2:
293
- logger.warning(f"策略2解析失败: {str(e2)}")
294
-
295
- # 策略3:处理可能包含多个意图的情况
296
- try:
297
- # 去除所有空白字符
298
- clean_text = ''.join(intent_text.split())
299
-
300
- if ",{" in clean_text:
301
- # 分割多个意图
302
- intent_parts = clean_text.split(",")
303
- intents = []
304
- for i, part in enumerate(intent_parts):
305
- try:
306
- # 确保每个部分都是有效的JSON对象
307
- if not part.startswith("{"):
308
- part = "{" + part
309
- if not part.endswith("}"):
310
- part = part + "}"
311
- intent_data = json.loads(part)
312
- # 转换格式
313
- if "intent" in intent_data:
314
- intents.append(intent_data)
315
- elif "intent_code" in intent_data:
316
- converted = {"intent": intent_data["intent_code"]}
317
- if "arg" in intent_data:
318
- converted["arg"] = intent_data["arg"]
319
- intents.append(converted)
320
- except Exception as e_inner:
321
- logger.warning(f"解析第{i+1}个意图失败: {str(e_inner)}")
322
- pass
323
- if intents:
324
- return intents
325
- except Exception as e3:
326
- logger.warning(f"策略3解析失败: {str(e3)}")
327
-
328
- # 策略4:尝试修复格式后再解析
329
- try:
330
- # 去除所有空白字符
331
- clean_text = ''.join(intent_text.split())
332
-
333
- # 确保是对象或数组格式
334
- if not clean_text.startswith("{") and not clean_text.startswith("["):
335
- # 尝试添加对象括号
336
- if clean_text.startswith("intent") or ":" in clean_text:
337
- clean_text = "{" + clean_text + "}"
338
- intent_data = json.loads(clean_text)
339
- # 转换格式
340
- if "intent" in intent_data:
341
- return [intent_data]
342
- elif "intent_code" in intent_data:
343
- intent_code_value = intent_data["intent_code"]
344
- if isinstance(intent_code_value, str) and intent_code_value.isalpha():
345
- numeric_code = self._get_intent_code_from_en_code(intent_code_value)
346
- if numeric_code:
347
- intent_code_value = numeric_code
348
- converted = {"intent": intent_code_value}
349
- if "arg" in intent_data:
350
- converted["arg"] = intent_data["arg"]
351
- return [converted]
352
- else:
353
- # 再次尝试解析
354
- intent_data = json.loads(clean_text)
355
- if isinstance(intent_data, list):
356
- # 检查列表中的每个意图
357
- for item in intent_data:
358
- if isinstance(item, dict) and "intent" in item:
359
- intent_value = item["intent"]
360
- if isinstance(intent_value, str) and intent_value.isalpha():
361
- numeric_code = self._get_intent_code_from_en_code(intent_value)
362
- if numeric_code:
363
- item["intent"] = numeric_code
364
- return intent_data
365
- else:
366
- if isinstance(intent_data, dict) and "intent" in intent_data:
367
- # 检查intent是否为英文代码,如果是则转换为数字代码
368
- intent_value = intent_data["intent"]
369
- if isinstance(intent_value, str) and intent_value.isalpha():
370
- numeric_code = self._get_intent_code_from_en_code(intent_value)
371
- if numeric_code:
372
- intent_data["intent"] = numeric_code
373
- return [intent_data]
374
- except Exception as e4:
375
- logger.warning(f"策略4解析失败: {str(e4)}")
376
-
377
- # 所有策略都失败
378
- logger.error("所有解析尝试都失败了,返回空列表")
379
- return []
380
- except Exception as e:
381
- logger.error(f"解析意图结果时发生严重错误: {str(e)}")
382
- return []
383
-
384
- async def close(self):
385
- """关闭MCP连接"""
386
- if self.mcp_client:
387
- try:
388
- # 优先尝试使用exit_stack关闭
389
- if hasattr(self.mcp_client, 'exit_stack'):
390
- await self.mcp_client.exit_stack.aclose()
391
- # 如果有close方法,也调用它
392
- elif hasattr(self.mcp_client, 'close'):
393
- await self.mcp_client.close()
394
- self.is_connected = False
395
- logger.info("MCP连接已关闭")
396
- except Exception as e:
397
- logger.error(f"关闭MCP连接时出错: {str(e)}")
398
-
399
-
400
- # 全局实例,在需要时才初始化
401
- global_mcp_intent_recognizer = None
402
-
403
-
404
- def get_mcp_intent_recognizer():
405
- global global_mcp_intent_recognizer
406
- if global_mcp_intent_recognizer is None:
407
- global_mcp_intent_recognizer = MCPIntentRecognizer()
408
- return global_mcp_intent_recognizer
File without changes
File without changes
@@ -1,56 +0,0 @@
1
- dependencies = ['torch', 'torchaudio']
2
- import torch
3
- import os
4
- import sys
5
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
6
- from silero_vad.utils_vad import (init_jit_model,
7
- get_speech_timestamps,
8
- save_audio,
9
- read_audio,
10
- VADIterator,
11
- collect_chunks,
12
- OnnxWrapper)
13
-
14
-
15
- def versiontuple(v):
16
- splitted = v.split('+')[0].split(".")
17
- version_list = []
18
- for i in splitted:
19
- try:
20
- version_list.append(int(i))
21
- except:
22
- version_list.append(0)
23
- return tuple(version_list)
24
-
25
-
26
- def silero_vad(onnx=False, force_onnx_cpu=False, opset_version=16):
27
- """Silero Voice Activity Detector
28
- Returns a model with a set of utils
29
- Please see https://github.com/snakers4/silero-vad for usage examples
30
- """
31
- available_ops = [15, 16]
32
- if onnx and opset_version not in available_ops:
33
- raise Exception(f'Available ONNX opset_version: {available_ops}')
34
-
35
- if not onnx:
36
- installed_version = torch.__version__
37
- supported_version = '1.12.0'
38
- if versiontuple(installed_version) < versiontuple(supported_version):
39
- raise Exception(f'Please install torch {supported_version} or greater ({installed_version} installed)')
40
-
41
- model_dir = os.path.join(os.path.dirname(__file__), 'src', 'silero_vad', 'data')
42
- if onnx:
43
- if opset_version == 16:
44
- model_name = 'silero_vad.onnx'
45
- else:
46
- model_name = f'silero_vad_16k_op{opset_version}.onnx'
47
- model = OnnxWrapper(os.path.join(model_dir, model_name), force_onnx_cpu)
48
- else:
49
- model = init_jit_model(os.path.join(model_dir, 'silero_vad.jit'))
50
- utils = (get_speech_timestamps,
51
- save_audio,
52
- read_audio,
53
- VADIterator,
54
- collect_chunks)
55
-
56
- return model, utils
smartpi/motor.py DELETED
@@ -1,177 +0,0 @@
1
- # coding=utf-8
2
- import time
3
- import struct
4
- from typing import List, Optional
5
- from smartpi import base_driver
6
-
7
-
8
- #��������ȡ port:����M�˿ڣ�
9
- def get_motor_encoder(port:bytes) -> Optional[bytes]:
10
- motor_str=[0xA0, 0x01, 0x01, 0xBE]
11
- motor_str[0]=0XA0+port
12
- time.sleep(0.005)
13
- response = base_driver.single_operate_sensor(motor_str,0)
14
- if response == None:
15
- return None
16
- else:
17
- code_data=response[4:-1]
18
- code_num=int.from_bytes(code_data, byteorder='big', signed=True)
19
- return code_num
20
-
21
- #����������� port:����M�˿ڣ�
22
- def reset_motor_encoder(port:bytes) -> Optional[bytes]:
23
- motor_str=[0xA0, 0x01, 0x03, 0xBE]
24
- motor_str[0]=0XA0+port
25
- # response = base_driver.single_operate_sensor(motor_str,0)
26
- time.sleep(0.005)
27
- base_driver.write_data(0X01, 0X02, motor_str)
28
- # if response == None:
29
- # return None
30
- # else:
31
- return 0
32
-
33
- #���﷽����� port:����M�˿ڣ�dir:0��1
34
- def set_motor_direction(port:bytes,direc:bytes) -> Optional[bytes]:
35
- motor_str=[0xA0, 0x01, 0x06, 0x71, 0x00, 0xBE]
36
- motor_str[0]=0XA0+port
37
- motor_str[4]=direc
38
- # response = base_driver.single_operate_sensor(motor_str,0)
39
- time.sleep(0.005)
40
- base_driver.write_data(0X01, 0X02, motor_str)
41
- # if response == None:
42
- # return None
43
- # else:
44
- return 0
45
-
46
- #�����ٶ�ת�� port:����M�˿ڣ�speed:-100~100
47
- def set_motor(port:bytes,speed:int) -> Optional[bytes]:
48
- motor_str=[0xA0, 0x01, 0x02, 0x71, 0x00, 0xBE]
49
- motor_str[0]=0XA0+port
50
- if speed>100:
51
- m_par=100
52
- elif speed>=0 and speed<=100:
53
- m_par=speed
54
- elif speed<-100:
55
- m_par=156
56
- elif speed<=0 and speed>=-100:
57
- m_par=256+speed
58
-
59
- motor_str[4]=m_par
60
-
61
- # response = base_driver.single_operate_sensor(motor_str,0)
62
- time.sleep(0.005)
63
- base_driver.write_data(0X01, 0X02, motor_str)
64
- # if response == None:
65
- # return None
66
- # else:
67
- return 0
68
-
69
- #����ֹͣ port:����M�˿ڣ�
70
- def set_motor_stop(port:bytes) -> Optional[bytes]:
71
- motor_str=[0xA0, 0x01, 0x0B, 0xBE]
72
- motor_str[0]=0XA0+port
73
- # response = base_driver.single_operate_sensor(motor_str,0)
74
- time.sleep(0.005)
75
- base_driver.write_data(0X01, 0X02, motor_str)
76
- # if response == None:
77
- # return None
78
- # else:
79
- return 0
80
-
81
- #����Ƕȿ��� port:����M�˿ڣ�speed:-100~100��degree:0~65535
82
- def set_motor_angle(port:bytes,speed:int,degree:int) -> Optional[bytes]:
83
- motor_str=[0xA0, 0x01, 0x04, 0x81, 0x00, 0x81, 0x00, 0x00, 0xBE]
84
- motor_str[0]=0XA0+port
85
-
86
- if speed>100:
87
- m_par=100
88
- elif speed>=0 and speed<=100:
89
- m_par=speed
90
- elif speed<-100:
91
- m_par=156
92
- elif speed<=0 and speed>=-100:
93
- m_par=256+speed
94
-
95
- motor_str[4]=m_par
96
- motor_str[6]=degree//256
97
- motor_str[7]=degree%256
98
- # response = base_driver.single_operate_sensor(motor_str,0)
99
- time.sleep(0.005)
100
- base_driver.write_data(0X01, 0X02, motor_str)
101
- # if response == None:
102
- # return None
103
- # else:
104
- return 0
105
-
106
- #���ﶨʱת�� port:����M�˿ڣ�speed:-100~100��second:1~256
107
- def set_motor_second(port:bytes,speed:int,second:float) -> Optional[bytes]:
108
- motor_str=[0xA0, 0x01, 0x08, 0x81, 0x00, 0x82, 0x00, 0x00, 0x00, 0x00, 0xBE]
109
- motor_str[0]=0XA0+port
110
-
111
- if speed>100:
112
- m_par=100
113
- elif speed>=0 and speed<=100:
114
- m_par=speed
115
- elif speed<-100:
116
- m_par=156
117
- elif speed<=0 and speed>=-100:
118
- m_par=256+speed
119
-
120
- motor_str[4]=m_par
121
-
122
- byte_data = struct.pack('f', second)
123
- byte_array = list(byte_data)
124
-
125
- motor_str[6]=byte_array[0]
126
- motor_str[7]=byte_array[1]
127
- motor_str[8]=byte_array[2]
128
- motor_str[9]=byte_array[3]
129
-
130
- # response = base_driver.single_operate_sensor(motor_str,0)
131
- time.sleep(0.005)
132
- base_driver.write_data(0X01, 0X02, motor_str)
133
- # if response == None:
134
- # return None
135
- # else:
136
- return 0
137
-
138
- #���ﶨ��ת�� port:����M�˿ڣ�speed:-100~100
139
- def set_motor_constspeed(port:bytes,speed:int) -> Optional[bytes]:
140
- motor_str=[0xA0, 0x01, 0x09, 0x71, 0x00, 0xBE]
141
- motor_str[0]=0XA0+port
142
-
143
- if speed>100:
144
- m_par=100
145
- elif speed>=0 and speed<=100:
146
- m_par=speed
147
- elif speed<-100:
148
- m_par=156
149
- elif speed<=0 and speed>=-100:
150
- m_par=256+speed
151
-
152
- motor_str[4]=m_par
153
-
154
- # response = base_driver.single_operate_sensor(motor_str,0)
155
- time.sleep(0.005)
156
- base_driver.write_data(0X01, 0X02, motor_str)
157
- # if response == None:
158
- # return None
159
- # else:
160
- return 0
161
-
162
- #�����ٶȶ�ȡ port:����M�˿ڣ�
163
- def get_motor_speed(port:bytes) -> Optional[bytes]:
164
- motor_str=[0xA0, 0x01, 0x10, 0xBE]
165
- motor_str[0]=0XA0+port
166
- time.sleep(0.005)
167
- response = base_driver.single_operate_sensor(motor_str,0)
168
- if response == None:
169
- return None
170
- else:
171
- code_data=response[4:-1]
172
- code_num=int.from_bytes(code_data, byteorder='big', signed=True)
173
- return code_num
174
-
175
-
176
-
177
-