xiaozhi-sdk 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xiaozhi_sdk/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
- __version__ = "0.2.4"
1
+ __version__ = "0.2.6"
2
2
 
3
3
  from xiaozhi_sdk.core import XiaoZhiWebsocket # noqa
xiaozhi_sdk/cli.py CHANGED
@@ -2,6 +2,7 @@ import asyncio
2
2
  import io
3
3
  import logging
4
4
  import time
5
+ import uuid
5
6
  from collections import deque
6
7
  from typing import Optional
7
8
 
@@ -12,7 +13,11 @@ import sounddevice as sd
12
13
  from PIL import ImageGrab
13
14
 
14
15
  from xiaozhi_sdk import XiaoZhiWebsocket
15
- from xiaozhi_sdk.config import INPUT_SERVER_AUDIO_SAMPLE_RATE
16
+ from xiaozhi_sdk.config import (
17
+ INPUT_SERVER_AUDIO_CHANNELS,
18
+ INPUT_SERVER_AUDIO_FRAME_DURATION,
19
+ INPUT_SERVER_AUDIO_SAMPLE_RATE,
20
+ )
16
21
 
17
22
  # 定义自定义日志级别
18
23
  INFO1 = 21
@@ -73,6 +78,7 @@ logger.setLevel(logging.DEBUG)
73
78
  input_audio_buffer: deque[bytes] = deque()
74
79
  is_playing_audio = False
75
80
  is_end = False
81
+ human_speak_time = None
76
82
 
77
83
 
78
84
  def get_image_byte(data):
@@ -103,14 +109,25 @@ def get_image_byte(data):
103
109
 
104
110
 
105
111
  async def handle_message(message):
112
+ global is_playing_audio
113
+ global human_speak_time
114
+
106
115
  """处理接收到的消息"""
107
116
  global is_end
108
- if message["type"] == "stt": # 人类语音
117
+
118
+ if message["type"] == "tts" and message["state"] == "start": # start
119
+ pass
120
+
121
+ elif message["type"] == "stt": # 人类语音
122
+ human_speak_time = time.time()
109
123
  logger.info1("human: %s", message["text"])
124
+
110
125
  elif message["type"] == "tts" and message["state"] == "sentence_start": # AI语音
126
+ is_playing_audio = True # 防止打断
111
127
  logger.info2("AI: %s", message["text"])
128
+
112
129
  elif message["type"] == "tts" and message["state"] == "stop":
113
- pass
130
+ is_playing_audio = False
114
131
  # logger.info2("播放结束")
115
132
  # logger.info("聆听中...")
116
133
  elif message["type"] == "llm": # 表情
@@ -123,13 +140,14 @@ async def handle_message(message):
123
140
  is_end = True
124
141
 
125
142
 
126
- async def play_assistant_audio(audio_queue: deque[bytes], enable_audio):
143
+ async def play_assistant_audio(audio_queue: deque[bytes], enable_audio, audio_samplerate):
127
144
  """播放音频流"""
128
145
  global is_playing_audio
146
+ global human_speak_time
129
147
 
130
148
  stream = None
131
149
  if enable_audio:
132
- stream = sd.OutputStream(samplerate=INPUT_SERVER_AUDIO_SAMPLE_RATE, channels=1, dtype=np.int16)
150
+ stream = sd.OutputStream(samplerate=audio_samplerate, channels=INPUT_SERVER_AUDIO_CHANNELS, dtype=np.int16)
133
151
  stream.start()
134
152
  last_audio_time = None
135
153
 
@@ -144,6 +162,10 @@ async def play_assistant_audio(audio_queue: deque[bytes], enable_audio):
144
162
  continue
145
163
 
146
164
  is_playing_audio = True
165
+ if human_speak_time:
166
+ logger.debug("首个音频包响应时间:%s 秒", time.time() - human_speak_time)
167
+ human_speak_time = None
168
+
147
169
  pcm_data = audio_queue.popleft()
148
170
  if stream:
149
171
  stream.write(pcm_data)
@@ -165,10 +187,16 @@ class XiaoZhiClient:
165
187
  self.mac_address = ""
166
188
  self.wake_word = wake_word
167
189
 
168
- async def start(self, mac_address: str, serial_number: str, license_key: str, enable_audio):
190
+ async def start(self, mac_address: str, serial_number: str, license_key: str, enable_audio, audio_samplerate):
169
191
  """启动客户端连接"""
170
192
  self.mac_address = mac_address
171
- self.xiaozhi = XiaoZhiWebsocket(handle_message, url=self.url, ota_url=self.ota_url, wake_word=self.wake_word)
193
+ self.xiaozhi = XiaoZhiWebsocket(
194
+ handle_message,
195
+ url=self.url,
196
+ ota_url=self.ota_url,
197
+ wake_word=self.wake_word,
198
+ audio_sample_rate=audio_samplerate,
199
+ )
172
200
  from xiaozhi_sdk.utils.mcp_tool import take_photo
173
201
 
174
202
  take_photo["tool_func"] = get_image_byte
@@ -178,7 +206,7 @@ class XiaoZhiClient:
178
206
  self.mac_address, aec=False, serial_number=serial_number, license_key=license_key
179
207
  )
180
208
 
181
- asyncio.create_task(play_assistant_audio(self.xiaozhi.output_audio_queue, enable_audio))
209
+ asyncio.create_task(play_assistant_audio(self.xiaozhi.output_audio_queue, enable_audio, audio_samplerate))
182
210
 
183
211
  def audio_callback(self, indata, frames, time, status):
184
212
  """音频输入回调函数"""
@@ -202,30 +230,60 @@ class XiaoZhiClient:
202
230
 
203
231
 
204
232
  async def run_client(
205
- mac_address: str, url: str, ota_url: str, serial_number: str, license_key: str, enable_audio: bool, wake_word: str
233
+ mac_address: str,
234
+ url: str,
235
+ ota_url: str,
236
+ serial_number: str,
237
+ license_key: str,
238
+ enable_audio: bool,
239
+ wake_word: str,
240
+ audio_samplerate: int,
206
241
  ):
207
242
  """运行客户端的异步函数"""
208
243
  logger.debug("Recording... Press Ctrl+C to stop.")
209
244
  client = XiaoZhiClient(url, ota_url, wake_word)
210
- await client.start(mac_address, serial_number, license_key, enable_audio)
211
-
212
- with sd.InputStream(callback=client.audio_callback, channels=1, samplerate=16000, blocksize=960):
245
+ await client.start(mac_address, serial_number, license_key, enable_audio, audio_samplerate)
246
+ blocksize = audio_samplerate * INPUT_SERVER_AUDIO_FRAME_DURATION // 1000
247
+ with sd.InputStream(
248
+ callback=client.audio_callback,
249
+ channels=INPUT_SERVER_AUDIO_CHANNELS,
250
+ samplerate=audio_samplerate,
251
+ blocksize=blocksize,
252
+ ):
253
+ logger.info("聆听中...")
213
254
  await client.process_audio_input()
214
255
 
215
256
 
257
+ def get_mac_address():
258
+ mac = uuid.getnode()
259
+ mac_addr = ":".join(["%02x" % ((mac >> ele) & 0xFF) for ele in range(40, -8, -8)])
260
+ return mac_addr
261
+
262
+
216
263
  @click.command()
217
- @click.argument("mac_address")
264
+ @click.argument("mac_address", required=False)
218
265
  @click.option("--url", help="服务端websocket地址")
219
266
  @click.option("--ota_url", help="OTA地址")
220
267
  @click.option("--serial_number", default="", help="设备的序列号")
221
268
  @click.option("--license_key", default="", help="设备的授权密钥")
222
269
  @click.option("--enable_audio", default=True, help="是否开启音频播放")
223
270
  @click.option("--wake_word", default="", help="唤醒词")
271
+ @click.option("--audio_samplerate", default=INPUT_SERVER_AUDIO_SAMPLE_RATE, help="音频采样率,默认: 24000")
224
272
  def main(
225
- mac_address: str, url: str, ota_url: str, serial_number: str, license_key: str, enable_audio: bool, wake_word: str
273
+ mac_address: str,
274
+ url: str,
275
+ ota_url: str,
276
+ serial_number: str,
277
+ license_key: str,
278
+ enable_audio: bool,
279
+ wake_word: str,
280
+ audio_samplerate: int,
226
281
  ):
227
282
  """小智SDK客户端
228
283
 
229
284
  MAC_ADDRESS: 设备的MAC地址 (格式: XX:XX:XX:XX:XX:XX)
230
285
  """
231
- asyncio.run(run_client(mac_address, url, ota_url, serial_number, license_key, enable_audio, wake_word))
286
+ mac_address = mac_address or get_mac_address()
287
+ asyncio.run(
288
+ run_client(mac_address, url, ota_url, serial_number, license_key, enable_audio, wake_word, audio_samplerate)
289
+ )
xiaozhi_sdk/config.py CHANGED
@@ -1,3 +1,5 @@
1
- INPUT_SERVER_AUDIO_SAMPLE_RATE = 16000
1
+ INPUT_SERVER_AUDIO_SAMPLE_RATE = 24000
2
+ INPUT_SERVER_AUDIO_CHANNELS = 1
3
+ INPUT_SERVER_AUDIO_FRAME_DURATION = 60
2
4
 
3
5
  OTA_URL = "https://api.tenclass.net/xiaozhi/ota"
xiaozhi_sdk/core.py CHANGED
@@ -9,7 +9,11 @@ from typing import Any, Callable, Deque, Dict, Optional
9
9
 
10
10
  import websockets
11
11
 
12
- from xiaozhi_sdk.config import INPUT_SERVER_AUDIO_SAMPLE_RATE
12
+ from xiaozhi_sdk.config import (
13
+ INPUT_SERVER_AUDIO_CHANNELS,
14
+ INPUT_SERVER_AUDIO_FRAME_DURATION,
15
+ INPUT_SERVER_AUDIO_SAMPLE_RATE,
16
+ )
13
17
  from xiaozhi_sdk.iot import OtaDevice
14
18
  from xiaozhi_sdk.mcp import McpTool
15
19
  from xiaozhi_sdk.utils import get_wav_info, read_audio_file, setup_opus
@@ -27,15 +31,17 @@ class XiaoZhiWebsocket(McpTool):
27
31
  message_handler_callback: Optional[Callable] = None,
28
32
  url: Optional[str] = None,
29
33
  ota_url: Optional[str] = None,
30
- audio_sample_rate: int = 16000,
31
- audio_channels: int = 1,
34
+ audio_sample_rate: int = INPUT_SERVER_AUDIO_SAMPLE_RATE,
35
+ audio_channels: int = INPUT_SERVER_AUDIO_CHANNELS,
36
+ audio_frame_duration=INPUT_SERVER_AUDIO_FRAME_DURATION,
32
37
  wake_word: str = "",
33
38
  ):
34
39
  super().__init__()
35
40
  self.url = url
36
41
  self.ota_url = ota_url
37
42
  self.audio_channels = audio_channels
38
- self.audio_opus = AudioOpus(audio_sample_rate, audio_channels)
43
+ self.audio_frame_duration = audio_frame_duration
44
+ self.audio_opus = AudioOpus(audio_sample_rate, audio_channels, audio_frame_duration)
39
45
  self.wake_word = wake_word
40
46
 
41
47
  # 客户端标识
@@ -70,13 +76,13 @@ class XiaoZhiWebsocket(McpTool):
70
76
  hello_message = {
71
77
  "type": "hello",
72
78
  "version": 1,
73
- "features": {"mcp": True, "aec": aec},
79
+ "features": {"mcp": True, "aec": aec, "consistent_sample_rate": True},
74
80
  "transport": "websocket",
75
81
  "audio_params": {
76
82
  "format": "opus",
77
- "sample_rate": 16000,
78
- "channels": 1,
79
- "frame_duration": 60,
83
+ "sample_rate": self.audio_opus.sample_rate,
84
+ "channels": self.audio_opus.channels,
85
+ "frame_duration": self.audio_opus.frame_duration,
80
86
  },
81
87
  }
82
88
  await self.websocket.send(json.dumps(hello_message))
@@ -113,7 +119,7 @@ class XiaoZhiWebsocket(McpTool):
113
119
  current_dir = os.path.dirname(os.path.abspath(__file__))
114
120
  wav_path = os.path.join(current_dir, "../file/audio/greet.wav")
115
121
  framerate, channels = get_wav_info(wav_path)
116
- audio_opus = AudioOpus(framerate, channels)
122
+ audio_opus = AudioOpus(framerate, channels, self.audio_frame_duration)
117
123
 
118
124
  for pcm_data in read_audio_file(wav_path):
119
125
  opus_data = await audio_opus.pcm_to_opus(pcm_data)
@@ -137,8 +143,8 @@ class XiaoZhiWebsocket(McpTool):
137
143
 
138
144
  async def send_silence_audio(self, duration_seconds: float = 1.2) -> None:
139
145
  """发送静音音频"""
140
- frames_count = int(duration_seconds * 1000 / 60)
141
- pcm_frame = b"\x00\x00" * int(INPUT_SERVER_AUDIO_SAMPLE_RATE / 1000 * 60)
146
+ frames_count = int(duration_seconds * 1000 / self.audio_opus.frame_duration)
147
+ pcm_frame = b"\x00\x00" * int(self.audio_opus.sample_rate / 1000 * self.audio_opus.frame_duration)
142
148
 
143
149
  for _ in range(frames_count):
144
150
  await self.send_audio(pcm_frame)
@@ -159,6 +165,7 @@ class XiaoZhiWebsocket(McpTool):
159
165
  data = json.loads(message)
160
166
  message_type = data["type"]
161
167
  if message_type == "hello":
168
+ self.audio_opus.set_out_audio_frame(data["audio_params"])
162
169
  self.hello_received.set()
163
170
  self.session_id = data["session_id"]
164
171
  return
@@ -219,7 +226,7 @@ class XiaoZhiWebsocket(McpTool):
219
226
 
220
227
  await self._send_hello(self.aec)
221
228
  await self._start_listen()
222
- logger.debug("[websocket] Connection successful")
229
+ logger.debug("[websocket] Connection successful. mac_addr: %s", self.mac_addr)
223
230
  await asyncio.sleep(0.5)
224
231
 
225
232
  async def init_connection(
@@ -250,7 +257,9 @@ class XiaoZhiWebsocket(McpTool):
250
257
 
251
258
  if not await self.is_activate(ota_info):
252
259
  self.iot_task = asyncio.create_task(self._activate_iot_device(license_key, ota_info))
260
+ await self.send_wake_word("hi")
253
261
  logger.debug("[IOT] 设备未激活")
262
+ return
254
263
 
255
264
  if self.wake_word:
256
265
  await self.send_wake_word(self.wake_word)
xiaozhi_sdk/mcp.py CHANGED
@@ -12,24 +12,23 @@ from xiaozhi_sdk.utils.tool_func import _get_random_music_info
12
12
 
13
13
  logger = logging.getLogger("xiaozhi_sdk")
14
14
 
15
- mcp_initialize_payload: Dict[str, Any] = {
16
- "jsonrpc": "2.0",
17
- "id": 1,
18
- "result": {
19
- "protocolVersion": "2024-11-05",
20
- "capabilities": {"tools": {}},
21
- "serverInfo": {"name": "", "version": "0.0.1"},
22
- },
23
- }
24
-
25
- mcp_tools_payload: Dict[str, Any] = {
26
- "jsonrpc": "2.0",
27
- "id": 2,
28
- "result": {"tools": []},
29
- }
30
-
31
15
 
32
16
  class McpTool(object):
17
+ mcp_initialize_payload: Dict[str, Any] = {
18
+ "jsonrpc": "2.0",
19
+ "id": 1,
20
+ "result": {
21
+ "protocolVersion": "2024-11-05",
22
+ "capabilities": {"tools": {}},
23
+ "serverInfo": {"name": "", "version": "0.0.1"},
24
+ },
25
+ }
26
+
27
+ mcp_tools_payload: Dict[str, Any] = {
28
+ "id": 2,
29
+ "jsonrpc": "2.0",
30
+ "result": {"tools": []},
31
+ }
33
32
 
34
33
  def __init__(self):
35
34
  self.session_id = ""
@@ -131,8 +130,8 @@ class McpTool(object):
131
130
  # self.explain_url = "http://82.157.143.133:8000/vision/explain"
132
131
  self.explain_token = payload["params"]["capabilities"]["vision"]["token"]
133
132
 
134
- mcp_initialize_payload["id"] = payload["id"]
135
- await self.websocket.send(self.get_mcp_json(mcp_initialize_payload))
133
+ self.mcp_initialize_payload["id"] = payload["id"]
134
+ await self.websocket.send(self.get_mcp_json(self.mcp_initialize_payload))
136
135
 
137
136
  elif method == "notifications/initialized":
138
137
  # print("\nMCP 工具初始化")
@@ -142,9 +141,9 @@ class McpTool(object):
142
141
  logger.error("[MCP] 工具加载失败")
143
142
 
144
143
  elif method == "tools/list":
145
- mcp_tools_payload["id"] = payload["id"]
146
144
  tool_name_list = []
147
145
  mcp_tool_dict = copy.deepcopy(self.mcp_tool_dict)
146
+ mcp_tool_list = []
148
147
  for _, mcp_tool in mcp_tool_dict.items():
149
148
  tool_name_list.append(mcp_tool["name"])
150
149
  tool_func = mcp_tool.pop("tool_func", None)
@@ -152,9 +151,11 @@ class McpTool(object):
152
151
  logger.error("[MCP] Tool %s has no tool_func", mcp_tool["name"])
153
152
  return
154
153
  mcp_tool.pop("is_async", None)
155
- mcp_tools_payload["result"]["tools"].append(mcp_tool)
154
+ mcp_tool_list.append(mcp_tool)
156
155
 
157
- await self.websocket.send(self.get_mcp_json(mcp_tools_payload))
156
+ self.mcp_tools_payload["id"] = payload["id"]
157
+ self.mcp_tools_payload["result"]["tools"] = mcp_tool_list
158
+ await self.websocket.send(self.get_mcp_json(self.mcp_tools_payload))
158
159
  logger.debug("[MCP] 加载成功,当前可用工具列表为:%s", tool_name_list)
159
160
 
160
161
  elif method == "tools/call":
xiaozhi_sdk/opus.py CHANGED
@@ -1,61 +1,59 @@
1
- import math
2
-
3
1
  import av
4
2
  import numpy as np
5
3
  import opuslib
6
4
 
7
- from xiaozhi_sdk.config import INPUT_SERVER_AUDIO_SAMPLE_RATE
8
-
9
5
 
10
6
  class AudioOpus:
11
7
 
12
- def __init__(self, sample_rate, channels):
8
+ def __init__(self, sample_rate, channels, frame_duration):
9
+ self.frame_duration = frame_duration
13
10
  self.sample_rate = sample_rate
14
11
  self.channels = channels
12
+ self.frame_size = self.sample_rate * self.frame_duration // 1000
15
13
 
16
14
  # 创建 Opus 编码器
17
- self.opus_encoder = opuslib.Encoder(
18
- fs=sample_rate, channels=channels, application=opuslib.APPLICATION_VOIP # 采样率 # 单声道 # 语音应用
19
- )
15
+ self.opus_encoder = opuslib.Encoder(fs=sample_rate, channels=channels, application=opuslib.APPLICATION_VOIP)
16
+
17
+ self.resampler = av.AudioResampler(format="s16", layout="mono", rate=sample_rate)
18
+
19
+ def set_out_audio_frame(self, audio_params):
20
+ # 小智服务端 的 音频信息
21
+ self.out_frame_size = audio_params["sample_rate"] * audio_params["frame_duration"] // 1000
20
22
 
21
23
  # 创建 Opus 解码器
22
24
  self.opus_decoder = opuslib.Decoder(
23
- fs=INPUT_SERVER_AUDIO_SAMPLE_RATE, # 采样率
24
- channels=1, # 单声道
25
+ fs=audio_params["sample_rate"], # 采样率
26
+ channels=audio_params["channels"], # 单声道
25
27
  )
26
28
 
27
- self.resampler = av.AudioResampler(format="s16", layout="mono", rate=sample_rate)
28
-
29
29
  async def pcm_to_opus(self, pcm):
30
30
  pcm_array = np.frombuffer(pcm, dtype=np.int16)
31
31
  pcm_bytes = pcm_array.tobytes()
32
- return self.opus_encoder.encode(pcm_bytes, 960)
33
-
34
- @staticmethod
35
- def to_n_960(samples) -> np.ndarray:
36
- n = math.ceil(samples.shape[0] / 960)
37
- arr_padded = np.pad(samples, (0, 960 * n - samples.shape[0]), mode="constant", constant_values=0)
38
- return arr_padded.reshape(n, 960)
32
+ return self.opus_encoder.encode(pcm_bytes, self.frame_size)
39
33
 
40
34
  async def change_sample_rate(self, pcm_array) -> np.ndarray:
41
- if self.sample_rate == INPUT_SERVER_AUDIO_SAMPLE_RATE:
42
- return self.to_n_960(pcm_array)
43
-
35
+ # 采样率 变更
44
36
  frame = av.AudioFrame.from_ndarray(np.array(pcm_array).reshape(1, -1), format="s16", layout="mono")
45
- frame.sample_rate = INPUT_SERVER_AUDIO_SAMPLE_RATE # Assuming input is 16kHz
37
+ frame.sample_rate = self.opus_decoder._fs
46
38
  resampled_frames = self.resampler.resample(frame)
47
39
  samples = resampled_frames[0].to_ndarray().flatten()
48
40
  new_frame = av.AudioFrame.from_ndarray(
49
41
  samples.reshape(1, -1),
50
42
  format="s16",
51
43
  layout="mono",
44
+ # layout="stereo",
52
45
  )
53
46
  new_frame.sample_rate = self.sample_rate
54
47
  new_samples = new_frame.to_ndarray().flatten()
55
- return self.to_n_960(new_samples)
48
+
49
+ # 不足 self.frame_size 补 0
50
+ samples_padded = np.pad(
51
+ new_samples, (0, self.frame_size - new_samples.size), mode="constant", constant_values=0
52
+ )
53
+ return samples_padded.reshape(1, self.frame_size)
56
54
 
57
55
  async def opus_to_pcm(self, opus) -> np.ndarray:
58
- pcm_data = self.opus_decoder.decode(opus, 960)
56
+ pcm_data = self.opus_decoder.decode(opus, frame_size=self.out_frame_size)
59
57
  pcm_array = np.frombuffer(pcm_data, dtype=np.int16)
60
58
  samples = await self.change_sample_rate(pcm_array)
61
59
  return samples
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xiaozhi-sdk
3
- Version: 0.2.4
3
+ Version: 0.2.6
4
4
  Summary: 一个用于连接和控制小智智能设备的Python SDK,支持实时音频通信、MCP工具集成和设备管理功能。
5
5
  Author-email: dairoot <623815825@qq.com>
6
6
  License-Expression: MIT
@@ -63,6 +63,10 @@ python -m xiaozhi_sdk --help
63
63
  #### 连接设备(需要提供 MAC 地址)
64
64
 
65
65
  ```bash
66
+ # 默认本机 mac 地址
67
+ python -m xiaozhi_sdk
68
+
69
+ # 指定 mac 地址
66
70
  python -m xiaozhi_sdk 00:22:44:66:88:00
67
71
  ```
68
72
 
@@ -8,19 +8,19 @@ file/opus/linux-x64-libopus.so,sha256=FmXJqkxLpDzNFOHYkmOzmsp1hP0eIS5b6x_XfOs-IQ
8
8
  file/opus/macos-arm64-libopus.dylib,sha256=H7wXwkrGwb-hesMMZGFxWb0Ri1Y4m5GWiKsd8CfOhE8,357584
9
9
  file/opus/macos-x64-libopus.dylib,sha256=MqyL_OjwSACF4Xs_-KrGbcScy4IEprr5Rlkk3ddZye8,550856
10
10
  file/opus/windows-opus.dll,sha256=kLfhioMvbJhOgNMAldpWk3DCZqC5Xd70LRbHnACvAnw,463360
11
- xiaozhi_sdk/__init__.py,sha256=Id2mMKG5QTHZk_W2nXnzQG1BpuG4QwUaMpinb6kS4T0,77
11
+ xiaozhi_sdk/__init__.py,sha256=hszHgr7Kf8Xfmvits8MjRxs-JmoGZq1Xc6fL6ChJipg,77
12
12
  xiaozhi_sdk/__main__.py,sha256=i0ZJdHUqAKg9vwZrK_w0TJkzdotTYTK8aUeSPcJc1ks,210
13
- xiaozhi_sdk/cli.py,sha256=CEpjXW-vnT8DieaK1MV0AYCG4bmK2_vpIz-9zhWHVgE,7001
14
- xiaozhi_sdk/config.py,sha256=h4mpMeBf2vT9qYAqCCbGVGmMemkgk98pcXP2Rh4TEFc,89
15
- xiaozhi_sdk/core.py,sha256=JfYH7dyKxb1ikVkam74V_SMvn07MTdBfY7u3X6OdUxk,10723
13
+ xiaozhi_sdk/cli.py,sha256=7qL3-y_h5M00og2iqdaGKZx4-z5M3TKlhBAz1KeWBAY,8427
14
+ xiaozhi_sdk/config.py,sha256=tIQMBEoCkHXS-lR2IUSFNRkUEJ6EHF2rc_stx0gEIZU,160
15
+ xiaozhi_sdk/core.py,sha256=bpGInL2mPK-Y_n4IfJmSdeXnpAa9X2rqKJLxIgE8pNk,11345
16
16
  xiaozhi_sdk/iot.py,sha256=VVAheynp1iV4GCaoPywQWpKtlyoACDLswH8yfV_JZgI,2699
17
- xiaozhi_sdk/mcp.py,sha256=TVSHrWIy7qDy8DQOjQ9EAzVZ1SFtXkSgTDYysL-dRhk,6559
18
- xiaozhi_sdk/opus.py,sha256=r3nnYg0ZKAJTreb_3nKgfHJh06MJiMvnNMPO1SWdoMM,2224
17
+ xiaozhi_sdk/mcp.py,sha256=g80dijPgWQ-npx7sD4it4_kUMCTO40gIcbXoLgF1ksE,6712
18
+ xiaozhi_sdk/opus.py,sha256=f0nus9vxNo7TOfVEvrbJbpggWRCQlK2UubJF_Nu0Ups,2329
19
19
  xiaozhi_sdk/utils/__init__.py,sha256=XKSHWoFmuSkpwaIr308HybRzfFIXoT1Fd-eUKo_im6Y,1705
20
20
  xiaozhi_sdk/utils/mcp_tool.py,sha256=T6OIrSqcyAHQ85sduz5Klx646SoEnGD5ROBTKoX6NhE,4207
21
21
  xiaozhi_sdk/utils/tool_func.py,sha256=imwehfUlENjelYmGbGYgb6C82-ijs53XCxrtCpqrJps,3152
22
- xiaozhi_sdk-0.2.4.dist-info/licenses/LICENSE,sha256=Vwgps1iODKl43cAtME_0dawTjAzNW-O2BWiN5BHggww,1085
23
- xiaozhi_sdk-0.2.4.dist-info/METADATA,sha256=Z9KG9zhvIfBFNA5G95hFRIrEfw-dNVkEUN0fWmmDpaA,2121
24
- xiaozhi_sdk-0.2.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- xiaozhi_sdk-0.2.4.dist-info/top_level.txt,sha256=nBpue4hU5Ykm5CtYPsAdxSa_yqbtZsIT_gF_EkBaJPM,12
26
- xiaozhi_sdk-0.2.4.dist-info/RECORD,,
22
+ xiaozhi_sdk-0.2.6.dist-info/licenses/LICENSE,sha256=Vwgps1iODKl43cAtME_0dawTjAzNW-O2BWiN5BHggww,1085
23
+ xiaozhi_sdk-0.2.6.dist-info/METADATA,sha256=vgOY2f3vdRtAtlsLiErTlOHXlNS-yd2HetBAONAa72g,2190
24
+ xiaozhi_sdk-0.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
+ xiaozhi_sdk-0.2.6.dist-info/top_level.txt,sha256=nBpue4hU5Ykm5CtYPsAdxSa_yqbtZsIT_gF_EkBaJPM,12
26
+ xiaozhi_sdk-0.2.6.dist-info/RECORD,,