smartpi 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. smartpi/__init__.py +8 -0
  2. smartpi/__init__.pyc +0 -0
  3. smartpi/_gui.py +66 -0
  4. smartpi/_gui.pyc +0 -0
  5. smartpi/ai_asr.py +1037 -0
  6. smartpi/ai_asr.pyc +0 -0
  7. smartpi/ai_llm.py +934 -0
  8. smartpi/ai_llm.pyc +0 -0
  9. smartpi/ai_tts.py +938 -0
  10. smartpi/ai_tts.pyc +0 -0
  11. smartpi/ai_vad.py +83 -0
  12. smartpi/ai_vad.pyc +0 -0
  13. smartpi/audio.py +125 -0
  14. smartpi/audio.pyc +0 -0
  15. smartpi/base_driver.py +618 -0
  16. smartpi/base_driver.pyc +0 -0
  17. smartpi/camera.py +84 -0
  18. smartpi/camera.pyc +0 -0
  19. smartpi/color_sensor.py +18 -0
  20. smartpi/color_sensor.pyc +0 -0
  21. smartpi/cw2015.py +179 -0
  22. smartpi/cw2015.pyc +0 -0
  23. smartpi/flash.py +130 -0
  24. smartpi/flash.pyc +0 -0
  25. smartpi/humidity.py +20 -0
  26. smartpi/humidity.pyc +0 -0
  27. smartpi/led.py +19 -0
  28. smartpi/led.pyc +0 -0
  29. smartpi/light_sensor.py +72 -0
  30. smartpi/light_sensor.pyc +0 -0
  31. smartpi/local_model.py +432 -0
  32. smartpi/local_model.pyc +0 -0
  33. smartpi/mcp_client.py +100 -0
  34. smartpi/mcp_client.pyc +0 -0
  35. smartpi/mcp_fastmcp.py +322 -0
  36. smartpi/mcp_fastmcp.pyc +0 -0
  37. smartpi/mcp_intent_recognizer.py +408 -0
  38. smartpi/mcp_intent_recognizer.pyc +0 -0
  39. smartpi/models/__init__.py +0 -0
  40. smartpi/models/__init__.pyc +0 -0
  41. smartpi/models/snakers4_silero-vad/__init__.py +0 -0
  42. smartpi/models/snakers4_silero-vad/__init__.pyc +0 -0
  43. smartpi/models/snakers4_silero-vad/hubconf.py +56 -0
  44. smartpi/models/snakers4_silero-vad/hubconf.pyc +0 -0
  45. smartpi/motor.py +177 -0
  46. smartpi/motor.pyc +0 -0
  47. smartpi/move.py +218 -0
  48. smartpi/move.pyc +0 -0
  49. smartpi/onnx_hand_workflow.py +201 -0
  50. smartpi/onnx_hand_workflow.pyc +0 -0
  51. smartpi/onnx_image_workflow.py +176 -0
  52. smartpi/onnx_image_workflow.pyc +0 -0
  53. smartpi/onnx_pose_workflow.py +482 -0
  54. smartpi/onnx_pose_workflow.pyc +0 -0
  55. smartpi/onnx_text_workflow.py +173 -0
  56. smartpi/onnx_text_workflow.pyc +0 -0
  57. smartpi/onnx_voice_workflow.py +437 -0
  58. smartpi/onnx_voice_workflow.pyc +0 -0
  59. smartpi/posemodel/__init__.py +0 -0
  60. smartpi/posemodel/__init__.pyc +0 -0
  61. smartpi/posenet_utils.py +222 -0
  62. smartpi/posenet_utils.pyc +0 -0
  63. smartpi/rknn_hand_workflow.py +245 -0
  64. smartpi/rknn_hand_workflow.pyc +0 -0
  65. smartpi/rknn_image_workflow.py +405 -0
  66. smartpi/rknn_image_workflow.pyc +0 -0
  67. smartpi/rknn_pose_workflow.py +592 -0
  68. smartpi/rknn_pose_workflow.pyc +0 -0
  69. smartpi/rknn_text_workflow.py +240 -0
  70. smartpi/rknn_text_workflow.pyc +0 -0
  71. smartpi/rknn_voice_workflow.py +394 -0
  72. smartpi/rknn_voice_workflow.pyc +0 -0
  73. smartpi/servo.py +178 -0
  74. smartpi/servo.pyc +0 -0
  75. smartpi/temperature.py +18 -0
  76. smartpi/temperature.pyc +0 -0
  77. smartpi/tencentcloud-speech-sdk-python/__init__.py +1 -0
  78. smartpi/tencentcloud-speech-sdk-python/__init__.pyc +0 -0
  79. smartpi/tencentcloud-speech-sdk-python/asr/__init__.py +0 -0
  80. smartpi/tencentcloud-speech-sdk-python/asr/__init__.pyc +0 -0
  81. smartpi/tencentcloud-speech-sdk-python/asr/flash_recognizer.py +178 -0
  82. smartpi/tencentcloud-speech-sdk-python/asr/flash_recognizer.pyc +0 -0
  83. smartpi/tencentcloud-speech-sdk-python/asr/speech_recognizer.py +311 -0
  84. smartpi/tencentcloud-speech-sdk-python/asr/speech_recognizer.pyc +0 -0
  85. smartpi/tencentcloud-speech-sdk-python/common/__init__.py +1 -0
  86. smartpi/tencentcloud-speech-sdk-python/common/__init__.pyc +0 -0
  87. smartpi/tencentcloud-speech-sdk-python/common/credential.py +6 -0
  88. smartpi/tencentcloud-speech-sdk-python/common/credential.pyc +0 -0
  89. smartpi/tencentcloud-speech-sdk-python/common/log.py +16 -0
  90. smartpi/tencentcloud-speech-sdk-python/common/log.pyc +0 -0
  91. smartpi/tencentcloud-speech-sdk-python/common/utils.py +7 -0
  92. smartpi/tencentcloud-speech-sdk-python/common/utils.pyc +0 -0
  93. smartpi/tencentcloud-speech-sdk-python/soe/__init__.py +0 -0
  94. smartpi/tencentcloud-speech-sdk-python/soe/__init__.pyc +0 -0
  95. smartpi/tencentcloud-speech-sdk-python/soe/speaking_assessment.py +276 -0
  96. smartpi/tencentcloud-speech-sdk-python/soe/speaking_assessment.pyc +0 -0
  97. smartpi/tencentcloud-speech-sdk-python/tts/__init__.py +0 -0
  98. smartpi/tencentcloud-speech-sdk-python/tts/__init__.pyc +0 -0
  99. smartpi/tencentcloud-speech-sdk-python/tts/flowing_speech_synthesizer.py +294 -0
  100. smartpi/tencentcloud-speech-sdk-python/tts/flowing_speech_synthesizer.pyc +0 -0
  101. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer.py +144 -0
  102. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer.pyc +0 -0
  103. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer_ws.py +234 -0
  104. smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer_ws.pyc +0 -0
  105. smartpi/tencentcloud-speech-sdk-python/vc/__init__.py +0 -0
  106. smartpi/tencentcloud-speech-sdk-python/vc/__init__.pyc +0 -0
  107. smartpi/tencentcloud-speech-sdk-python/vc/speech_convertor_ws.py +237 -0
  108. smartpi/tencentcloud-speech-sdk-python/vc/speech_convertor_ws.pyc +0 -0
  109. smartpi/text_gte_model/__init__.py +0 -0
  110. smartpi/text_gte_model/__init__.pyc +0 -0
  111. smartpi/text_gte_model/config/__init__.py +0 -0
  112. smartpi/text_gte_model/config/__init__.pyc +0 -0
  113. smartpi/text_gte_model/gte/__init__.py +0 -0
  114. smartpi/text_gte_model/gte/__init__.pyc +0 -0
  115. smartpi/touch_sensor.py +16 -0
  116. smartpi/touch_sensor.pyc +0 -0
  117. smartpi/trace.py +120 -0
  118. smartpi/trace.pyc +0 -0
  119. smartpi/ultrasonic.py +20 -0
  120. smartpi/ultrasonic.pyc +0 -0
  121. {smartpi-1.1.4.dist-info → smartpi-1.1.5.dist-info}/METADATA +3 -2
  122. smartpi-1.1.5.dist-info/RECORD +137 -0
  123. smartpi-1.1.4.dist-info/RECORD +0 -77
  124. {smartpi-1.1.4.dist-info → smartpi-1.1.5.dist-info}/WHEEL +0 -0
  125. {smartpi-1.1.4.dist-info → smartpi-1.1.5.dist-info}/top_level.txt +0 -0
smartpi/local_model.py ADDED
@@ -0,0 +1,432 @@
1
+ import os
2
+ import sys
3
+ import numpy as np
4
+
5
+ class LocalModel:
6
+ """
7
+ 统一管理RKNN和ONNX模型的推理接口
8
+ 支持五种分类类型:1-图片、2-手势、3-姿态、4-文字、5-音频
9
+ """
10
+ def __init__(self):
11
+ self.model_type = None # 模型类型:onnx 或 rknn
12
+ self.classification_type = None # 分类类型:1-5
13
+ self.workflow = None # 实际使用的工作流实例
14
+ self.last_result = None # 缓存的推理结果
15
+
16
+ def init_model(self, model_path, classification_type):
17
+ """
18
+ 初始化模型
19
+
20
+ :param model_path: 模型文件路径 (str 类型,支持 .onnx 或 .rknn 格式)
21
+ :param classification_type: 分类类型 (int 类型,1-5)
22
+ 1: 图片, 2: 手势, 3: 姿态, 4: 文字, 5: 音频
23
+ :return: 是否初始化成功 (bool 类型,True 表示成功,False 表示失败)
24
+ """
25
+ try:
26
+ # 验证分类类型
27
+ if classification_type < 1 or classification_type > 5:
28
+ raise ValueError("分类类型必须是1-5之间的整数")
29
+
30
+ self.classification_type = classification_type
31
+
32
+ # 根据文件扩展名判断模型类型
33
+ ext = os.path.splitext(model_path)[1].lower()
34
+ if ext == '.onnx':
35
+ self.model_type = 'onnx'
36
+ elif ext == '.rknn':
37
+ self.model_type = 'rknn'
38
+ else:
39
+ raise ValueError("只支持.onnx和.rknn格式的模型文件")
40
+
41
+ # 根据模型类型和分类类型加载对应的工作流
42
+ self._load_workflow(model_path)
43
+
44
+ return True
45
+ except Exception as e:
46
+ print(f"模型初始化失败: {e}")
47
+ return False
48
+
49
+ def _load_workflow(self, model_path):
50
+ """
51
+ 根据模型类型和分类类型加载对应的工作流
52
+ """
53
+ module_name = None
54
+ class_name = None
55
+
56
+ # 根据分类类型和模型类型确定要加载的模块
57
+ if self.classification_type == 1: # 图片
58
+ module_name = f"{self.model_type}_image_workflow"
59
+ class_name = "ImageWorkflow"
60
+ elif self.classification_type == 2: # 手势
61
+ module_name = f"{self.model_type}_hand_workflow"
62
+ class_name = "GestureWorkflow"
63
+ elif self.classification_type == 3: # 姿态
64
+ module_name = f"{self.model_type}_pose_workflow"
65
+ class_name = "PoseWorkflow"
66
+ elif self.classification_type == 4: # 文字
67
+ module_name = f"{self.model_type}_text_workflow"
68
+ class_name = "TextClassificationWorkflow"
69
+ elif self.classification_type == 5: # 音频
70
+ module_name = f"{self.model_type}_voice_workflow"
71
+ class_name = "Workflow"
72
+
73
+ # 动态导入模块
74
+ try:
75
+ # 确保能找到smartpi模块
76
+ if os.path.abspath(os.path.dirname(__file__)) not in sys.path:
77
+ sys.path.append(os.path.abspath(os.path.dirname(__file__)))
78
+
79
+ # 修复logging模块处理字符串级别的问题
80
+ import logging
81
+
82
+ # 保存原始的_checkLevel函数
83
+ original_check_level = logging._checkLevel
84
+
85
+ # 重写_checkLevel函数,使其能处理字符串级别
86
+ def patched_check_level(level):
87
+ if isinstance(level, str):
88
+ # 将字符串级别转换为对应的常量
89
+ level = level.upper()
90
+ if hasattr(logging, level):
91
+ return getattr(logging, level)
92
+ # 如果是数字或未知字符串,使用原始函数处理
93
+ return original_check_level(level)
94
+
95
+ # 应用补丁
96
+ logging._checkLevel = patched_check_level
97
+
98
+ # 确保所有标准日志级别都可用
99
+ for level_name, level_value in [
100
+ ('DEBUG', 10),
101
+ ('INFO', 20),
102
+ ('WARNING', 30),
103
+ ('ERROR', 40),
104
+ ('CRITICAL', 50)
105
+ ]:
106
+ if not hasattr(logging, level_name):
107
+ logging.addLevelName(level_value, level_name)
108
+ setattr(logging, level_name, level_value)
109
+
110
+ # 保存原始的logging配置
111
+ original_logging_level = logging.getLogger().level
112
+ original_logging_handlers = logging.getLogger().handlers.copy()
113
+
114
+ module = __import__(module_name, fromlist=[class_name])
115
+ workflow_class = getattr(module, class_name)
116
+
117
+ # 重置日志配置到原始状态
118
+ logging.getLogger().setLevel(original_logging_level)
119
+ logging.getLogger().handlers = original_logging_handlers
120
+
121
+ # 恢复原始的_checkLevel函数
122
+ logging._checkLevel = original_check_level
123
+
124
+ # 初始化工作流(文字分类需要特殊处理)
125
+ if self.classification_type == 4: # 文字分类
126
+ # 文字分类模型需要两个模型文件:特征提取模型和分类模型
127
+ # 这里假设分类模型路径中包含"class",特征模型在同一目录下
128
+ if "class" in model_path.lower():
129
+ # 构建特征提取模型路径
130
+ feature_model_path = model_path.replace("class", "feature")
131
+ if not os.path.exists(feature_model_path):
132
+ # 如果找不到带feature的模型,尝试使用默认路径
133
+ from onnx_text_workflow import default_feature_model, default_tokenizer_path
134
+ feature_model_path = default_feature_model
135
+ tokenizer_path = default_tokenizer_path
136
+ else:
137
+ tokenizer_path = None
138
+
139
+ self.workflow = workflow_class(model_path, feature_model_path, tokenizer_path)
140
+ else:
141
+ # 如果没有指定分类模型,使用默认初始化
142
+ self.workflow = workflow_class(model_path)
143
+ else:
144
+ # 其他类型直接初始化
145
+ self.workflow = workflow_class(model_path)
146
+
147
+ print(f"成功加载{self.model_type} {self._get_classification_name()}工作流")
148
+
149
+ except Exception as e:
150
+ print(f"加载工作流失败: {e}")
151
+ import traceback
152
+ traceback.print_exc()
153
+ raise
154
+
155
+ def predict(self, data):
156
+ """
157
+ 执行推理
158
+
159
+ :param data: 输入数据,根据分类类型不同而不同
160
+ 图片/手势/姿态: 图片路径 (str 类型) 或帧数据 (numpy.ndarray 类型)
161
+ 文字: 文本字符串 (str 类型) 或文本列表 (list[str] 类型)
162
+ 音频: 音频文件路径 (str 类型) 或音频数据 (numpy.ndarray 类型)
163
+ :return: 推理结果,具体类型取决于工作流类型
164
+ 可能是 dict、tuple、list 或其他类型,失败时返回 None
165
+ """
166
+ if not self.workflow:
167
+ raise RuntimeError("模型尚未初始化,请先调用init_model")
168
+
169
+ try:
170
+ result = None
171
+
172
+ # 根据分类类型选择合适的推理方法
173
+ if self.classification_type in [1, 2, 3]: # 图片/手势/姿态
174
+ if isinstance(data, str) and os.path.isfile(data):
175
+ # 输入是文件路径
176
+ if hasattr(self.workflow, 'inference'):
177
+ result = self.workflow.inference(data)
178
+ elif hasattr(self.workflow, 'predict'):
179
+ result = self.workflow.predict(data)
180
+ else:
181
+ raise RuntimeError(f"当前工作流不支持文件路径输入的推理")
182
+ else:
183
+ # 输入是帧数据
184
+ if hasattr(self.workflow, 'inference_frame'):
185
+ result = self.workflow.inference_frame(data)
186
+ else:
187
+ raise RuntimeError(f"当前工作流不支持帧数据输入的推理")
188
+
189
+ elif self.classification_type == 4: # 文字
190
+ if hasattr(self.workflow, 'predict'):
191
+ result = self.workflow.predict(data)
192
+ elif hasattr(self.workflow, 'inference'):
193
+ result = self.workflow.inference(data)
194
+ else:
195
+ raise RuntimeError(f"当前工作流不支持文字推理")
196
+
197
+ elif self.classification_type == 5: # 音频
198
+ if isinstance(data, str) and os.path.isfile(data):
199
+ # 输入是文件路径
200
+ if hasattr(self.workflow, 'inference'):
201
+ result = self.workflow.inference(data)
202
+ else:
203
+ raise RuntimeError(f"当前工作流不支持文件路径输入的音频推理")
204
+ else:
205
+ # 输入是音频数据(ndarray)
206
+ if hasattr(self.workflow, 'process_audio_segment'):
207
+ # 完全参考test_voice.py的实现
208
+
209
+ # 调用process_audio_segment处理音频数据,与test_voice.py保持一致
210
+ block_results, final_result = self.workflow.process_audio_segment(data)
211
+
212
+ # 优先使用final_result(与test_voice.py保持一致)
213
+ if final_result is not None:
214
+ result = final_result
215
+ elif block_results and len(block_results) > 0:
216
+ # 如果没有最终结果但有块结果,使用第一个块的结果
217
+ result = block_results[0]['result']
218
+ else:
219
+ # 没有任何结果
220
+ result = None
221
+ else:
222
+ raise RuntimeError(f"当前工作流不支持音频数据输入的推理")
223
+
224
+ # 缓存结果
225
+ self.last_result = result
226
+
227
+ return result
228
+
229
+ except Exception as e:
230
+ print(f"推理失败: {e}")
231
+ import traceback
232
+ traceback.print_exc()
233
+ return None
234
+
235
+ def predict_frame(self, frame):
236
+ """
237
+ 实时帧推理(专门用于图片、手势、姿态分类)
238
+
239
+ :param frame: 图像帧数据 (numpy.ndarray 类型)
240
+ :return: 推理结果,具体类型取决于工作流类型
241
+ 可能是 dict、tuple、list 或其他类型,失败时返回 None
242
+ """
243
+ if not self.workflow:
244
+ raise RuntimeError("模型尚未初始化,请先调用init_model")
245
+
246
+ if self.classification_type not in [1, 2, 3]:
247
+ raise ValueError("predict_frame只支持图片、手势和姿态分类模型")
248
+
249
+ try:
250
+ if hasattr(self.workflow, 'inference_frame'):
251
+ result = self.workflow.inference_frame(frame)
252
+ self.last_result = result
253
+ return result
254
+ elif hasattr(self.workflow, 'predict_frame'):
255
+ result = self.workflow.predict_frame(frame)
256
+ self.last_result = result
257
+ return result
258
+ else:
259
+ raise RuntimeError(f"当前工作流不支持帧推理")
260
+
261
+ except Exception as e:
262
+ print(f"帧推理失败: {e}")
263
+ import traceback
264
+ traceback.print_exc()
265
+ return None
266
+
267
+ def get_model_result(self, case=1):
268
+ """
269
+ 获取推理结果
270
+
271
+ :param case: 结果类型 (int 类型,默认值为 1)
272
+ 1: 分类结果
273
+ 2: 分类结果的置信度
274
+ :return: 对应的结果,具体类型取决于推理结果的格式
275
+ case=1 时,通常返回 str 或 int 类型
276
+ case=2 时,通常返回 float 类型
277
+ 无结果时返回 None
278
+ """
279
+ if not self.last_result:
280
+ return None
281
+
282
+ # 根据不同的工作流结果格式进行处理
283
+ try:
284
+ # 处理字典类型的结果(音频分类结果通常是这种格式,与test_voice.py保持一致)
285
+ if isinstance(self.last_result, dict):
286
+ if case == 1:
287
+ return self.last_result.get('class', None)
288
+ elif case == 2:
289
+ return self.last_result.get('confidence', None)
290
+
291
+ # 处理元组类型的结果
292
+ elif isinstance(self.last_result, tuple):
293
+ raw_result, formatted_result = self.last_result
294
+
295
+ if case == 1: # 分类结果
296
+ if isinstance(formatted_result, dict):
297
+ return formatted_result.get('class', None)
298
+ elif isinstance(formatted_result, list):
299
+ return formatted_result[0].get('class', None) if formatted_result else None
300
+ else:
301
+ return formatted_result
302
+
303
+ elif case == 2: # 置信度
304
+ if isinstance(formatted_result, dict):
305
+ return formatted_result.get('confidence', None)
306
+ elif isinstance(formatted_result, list):
307
+ return formatted_result[0].get('confidence', None) if formatted_result else None
308
+ elif isinstance(raw_result, list):
309
+ return max(raw_result[0]) if raw_result else None
310
+ elif isinstance(raw_result, dict):
311
+ return max(raw_result.values())
312
+ else:
313
+ return raw_result
314
+
315
+ # 处理列表类型的结果
316
+ elif isinstance(self.last_result, list):
317
+ if case == 1:
318
+ return self.last_result[0] if self.last_result else None
319
+ elif case == 2:
320
+ return max(self.last_result) if self.last_result else None
321
+
322
+ # 默认返回原始结果
323
+ return self.last_result
324
+
325
+ except Exception as e:
326
+ print(f"获取结果失败: {e}")
327
+ import traceback
328
+ traceback.print_exc()
329
+ return None
330
+
331
+ def clear_result(self):
332
+ """
333
+ 清除结果缓存并释放工作流资源
334
+
335
+ :return: 无返回值 (None 类型)
336
+ """
337
+ self.last_result = None
338
+
339
+ # 释放旧的工作流资源
340
+ if self.workflow:
341
+ try:
342
+ if hasattr(self.workflow, 'release'):
343
+ self.workflow.release()
344
+ self.workflow = None
345
+ except Exception as e:
346
+ print(f"释放工作流资源失败: {e}")
347
+
348
+ def _get_classification_name(self):
349
+ """
350
+ 获取分类类型的名称
351
+ """
352
+ names = {
353
+ 1: "图片",
354
+ 2: "手势",
355
+ 3: "姿态",
356
+ 4: "文字",
357
+ 5: "音频"
358
+ }
359
+ return names.get(self.classification_type, "未知")
360
+
361
+ # 全局实例
362
+ _local_model_instance = None
363
+
364
+ def init_model(model_path, classification_type):
365
+ """
366
+ 全局初始化模型函数
367
+
368
+ :param model_path: 模型文件路径 (str 类型,支持 .onnx 或 .rknn 格式)
369
+ :param classification_type: 分类类型 (int 类型,1-5)
370
+ 1: 图片, 2: 手势, 3: 姿态, 4: 文字, 5: 音频
371
+ :return: 是否初始化成功 (bool 类型,True 表示成功,False 表示失败)
372
+ """
373
+ global _local_model_instance
374
+ if not _local_model_instance:
375
+ _local_model_instance = LocalModel()
376
+ return _local_model_instance.init_model(model_path, classification_type)
377
+
378
+ def predict(data):
379
+ """
380
+ 全局推理函数
381
+
382
+ :param data: 输入数据,根据分类类型不同而不同
383
+ 图片/手势/姿态: 图片路径 (str 类型) 或帧数据 (numpy.ndarray 类型)
384
+ 文字: 文本字符串 (str 类型) 或文本列表 (list[str] 类型)
385
+ 音频: 音频文件路径 (str 类型) 或音频数据 (numpy.ndarray 类型)
386
+ :return: 推理结果,具体类型取决于工作流类型
387
+ 可能是 dict、tuple、list 或其他类型,失败时返回 None
388
+ """
389
+ global _local_model_instance
390
+ if not _local_model_instance:
391
+ raise RuntimeError("模型尚未初始化,请先调用init_model")
392
+ return _local_model_instance.predict(data)
393
+
394
+ def predict_frame(frame):
395
+ """
396
+ 全局帧推理函数(专门用于图片、手势、姿态分类)
397
+
398
+ :param frame: 图像帧数据 (numpy.ndarray 类型)
399
+ :return: 推理结果,具体类型取决于工作流类型
400
+ 可能是 dict、tuple、list 或其他类型,失败时返回 None
401
+ """
402
+ global _local_model_instance
403
+ if not _local_model_instance:
404
+ raise RuntimeError("模型尚未初始化,请先调用init_model")
405
+ return _local_model_instance.predict_frame(frame)
406
+
407
+ def get_model_result(case=1):
408
+ """
409
+ 全局获取结果函数
410
+
411
+ :param case: 结果类型 (int 类型,默认值为 1)
412
+ 1: 分类结果
413
+ 2: 分类结果的置信度
414
+ :return: 对应的结果,具体类型取决于推理结果的格式
415
+ case=1 时,通常返回 str 或 int 类型
416
+ case=2 时,通常返回 float 类型
417
+ 无结果时返回 None
418
+ """
419
+ global _local_model_instance
420
+ if not _local_model_instance:
421
+ raise RuntimeError("模型尚未初始化,请先调用init_model")
422
+ return _local_model_instance.get_model_result(case)
423
+
424
+ def clear_result():
425
+ """
426
+ 全局清除结果函数,用于清除结果缓存并释放工作流资源
427
+
428
+ :return: 无返回值 (None 类型)
429
+ """
430
+ global _local_model_instance
431
+ if _local_model_instance:
432
+ _local_model_instance.clear_result()
smartpi/local_model.pyc CHANGED
Binary file
smartpi/mcp_client.py ADDED
@@ -0,0 +1,100 @@
1
+
2
+ import json
3
+ import sys
4
+ import os
5
+ import asyncio
6
+ from typing import Optional
7
+
8
+ # 添加项目根目录到Python路径
9
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
10
+
11
+ from .mcp_fastmcp import Client, AsyncExitStack
12
+
13
+
14
+ class MCPClient:
15
+ """MCP客户端,负责与MCP服务器通信"""
16
+
17
+ def __init__(self):
18
+ self.exit_stack = AsyncExitStack()
19
+ self.session = None
20
+
21
+ async def connect(self, server_url: str):
22
+ try:
23
+ # 使用我们的简易fastmcp实现
24
+ self.session = Client()
25
+ await self.session.connect(server_url)
26
+ # 模拟ping操作
27
+ return True
28
+ except Exception as e:
29
+ print(f"连接MCP服务器失败: {str(e)}")
30
+ return False
31
+
32
+ async def get_tools(self):
33
+ if not self.session:
34
+ return []
35
+
36
+ # 使用我们的简易实现获取工具
37
+ try:
38
+ tools = await self.session.get_tools()
39
+ return tools
40
+ except Exception as e:
41
+ print(f"获取工具列表失败: {str(e)}")
42
+ return []
43
+
44
+ async def call_tool(self, tool_calls):
45
+ GREEN = "\033[32m"
46
+ RESET = "\033[0m"
47
+ messages = []
48
+
49
+ try:
50
+ # 适配工具调用格式
51
+ adapted_tool_calls = []
52
+ for tool_call in tool_calls:
53
+ # 处理不同的参数访问方式
54
+ if hasattr(tool_call, 'function'):
55
+ tool_name = tool_call.function.name
56
+ tool_args = json.loads(tool_call.function.arguments)
57
+ tool_call_id = tool_call.id
58
+ else:
59
+ # 处理字典格式的工具调用
60
+ tool_name = tool_call.get("function", {}).get("name")
61
+ tool_args = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
62
+ tool_call_id = tool_call.get("id", "1")
63
+
64
+ print(f'\n{GREEN}调用工具: {tool_name} , 参数: {tool_args}{RESET}\n')
65
+
66
+ # 构建适合我们简易实现的工具调用
67
+ adapted_call = {
68
+ "id": tool_call_id,
69
+ "type": "function",
70
+ "function": {
71
+ "name": tool_name,
72
+ "arguments": json.dumps(tool_args)
73
+ }
74
+ }
75
+ adapted_tool_calls.append(adapted_call)
76
+
77
+ # 调用我们的简易实现
78
+ results = await self.session.call_tool(adapted_tool_calls)
79
+ return results
80
+ except Exception as e:
81
+ print(f"调用工具失败: {str(e)}")
82
+ # 返回错误响应
83
+ return [
84
+ {"role": "system", "content": "Error processing tool call"},
85
+ {"role": "tool", "content": "[]"}
86
+ ]
87
+
88
+
89
+
90
+
91
+
92
+ if __name__ == "__main__":
93
+ async def main():
94
+ client = MCPClient()
95
+ # await client.connect("http://127.0.0.1:8000/mcp")
96
+ await client.connect("./ezai/server.py")
97
+ print(await client.get_tools())
98
+
99
+
100
+ asyncio.run(main())
smartpi/mcp_client.pyc CHANGED
Binary file