agora-python-server-sdk 2.1.6__tar.gz → 2.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agora-python-server-sdk might be problematic. Click here for more details.
- {agora_python_server_sdk-2.1.6/agora_python_server_sdk.egg-info → agora_python_server_sdk-2.2.0}/PKG-INFO +58 -1
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/README.md +58 -1
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/__init__.py +14 -4
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_ctypes_handle/_ctypes_data.py +34 -5
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_ctypes_handle/_local_user_observer.py +9 -1
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/agora_base.py +17 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/agora_service.py +4 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_user.py +52 -9
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_user_observer.py +3 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/utils/audio_consumer.py +1 -1
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0/agora_python_server_sdk.egg-info}/PKG-INFO +58 -1
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/setup.py +9 -4
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/MANIFEST.in +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_ctypes_handle/_audio_frame_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_ctypes_handle/_rtc_connection_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_ctypes_handle/_video_encoded_frame_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_ctypes_handle/_video_frame_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/_utils/globals.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/agora_parameter.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_encoded_frame_sender.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_frame_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_pcm_data_sender.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_sessionctrl.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_vad_manager.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_audio_track.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_video_track.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/media_node_factory.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/remote_audio_track.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/remote_video_track.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/rtc_connection.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/rtc_connection_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/utils/vad_dump.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/video_encoded_frame_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/video_encoded_image_sender.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/video_frame_observer.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/video_frame_sender.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/voice_detection.py +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora_python_server_sdk.egg-info/SOURCES.txt +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora_python_server_sdk.egg-info/dependency_links.txt +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora_python_server_sdk.egg-info/top_level.txt +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/pyproject.toml +0 -0
- {agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: agora_python_server_sdk
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.2.0
|
|
4
4
|
Summary: A Python SDK for Agora Server
|
|
5
5
|
Home-page: https://github.com/AgoraIO-Extensions/Agora-Python-Server-SDK
|
|
6
6
|
Classifier: Intended Audience :: Developers
|
|
@@ -51,6 +51,25 @@ python agora_rtc/examples/example_audio_pcm_send.py --appId=xxx --channelId=xxx
|
|
|
51
51
|
|
|
52
52
|
# Change log
|
|
53
53
|
|
|
54
|
+
2025.01.08 Release 2.2.0
|
|
55
|
+
-- Updates:
|
|
56
|
+
- Update the SDK version from 4.4.30 to 4.4.31. Done.
|
|
57
|
+
-- FEAT:
|
|
58
|
+
- Add serviceconfigure.
|
|
59
|
+
- Add domain_limit. Done.
|
|
60
|
+
- Add should_callback_when_muted. Done.
|
|
61
|
+
- Add colorspacetype to ExternalVideoFrame to support the encoding of solid-color backgrounds in virtual human scenarios. Done.
|
|
62
|
+
-- FEAT:
|
|
63
|
+
- Add the AudioMetaData interface: localuser::send_audio_meta_data. Done.
|
|
64
|
+
- Add the OnAudioMetaDataReceived callback to localuserObserver::on_audio_meta_data_received. Done.
|
|
65
|
+
-- Sample modifications.
|
|
66
|
+
|
|
67
|
+
2024.12.17 Release 2.1.7
|
|
68
|
+
--Changes:
|
|
69
|
+
|
|
70
|
+
Fixed the typeError issue in LocalUser::sub/unsub audio/video.
|
|
71
|
+
Adjusted the default stopRecogCount for VAD from 30 to 50.
|
|
72
|
+
Modified sample_vad.
|
|
54
73
|
## 2024.12.09 Release 2.1.6
|
|
55
74
|
- New Features:
|
|
56
75
|
-- Added AudioVadManager to manage VAD (Voice Activity Detection) instances.
|
|
@@ -279,3 +298,41 @@ Store the LLM results in a cache as they are received.
|
|
|
279
298
|
Perform a reverse scan of the cached data to find the most recent punctuation mark.
|
|
280
299
|
Truncate the data from the start to the most recent punctuation mark and pass it to TTS for synthesis.
|
|
281
300
|
Remove the truncated data from the cache. The remaining data should be moved to the beginning of the cache and continue waiting for additional data from the LLM.
|
|
301
|
+
|
|
302
|
+
##VAD Configuration Parameters
|
|
303
|
+
AgoraAudioVadConfigV2 Properties
|
|
304
|
+
|
|
305
|
+
Property Name Type Description Default Value Value Range
|
|
306
|
+
preStartRecognizeCount int Number of audio frames saved before detecting speech 16 [0, ]
|
|
307
|
+
startRecognizeCount int Total number of audio frames to detect speech start 30 [1, max]
|
|
308
|
+
stopRecognizeCount int Number of audio frames to detect speech stop 50 [1, max]
|
|
309
|
+
activePercent float Percentage of active frames in startRecognizeCount frames 0.7 [0.0, 1.0]
|
|
310
|
+
inactivePercent float Percentage of inactive frames in stopRecognizeCount frames 0.5 [0.0, 1.0]
|
|
311
|
+
startVoiceProb int Probability that an audio frame contains human voice 70 [0, 100]
|
|
312
|
+
stopVoiceProb int Probability that an audio frame contains human voice 70 [0, 100]
|
|
313
|
+
startRmsThreshold int Energy dB threshold for detecting speech start -50 [-100, 0]
|
|
314
|
+
stopRmsThreshold int Energy dB threshold for detecting speech stop -50 [-100, 0]
|
|
315
|
+
Notes:
|
|
316
|
+
startRmsThreshold and stopRmsThreshold:
|
|
317
|
+
|
|
318
|
+
The higher the value, the louder the speaker's voice needs to be compared to the surrounding background noise.
|
|
319
|
+
In quiet environments, it is recommended to use the default value of -50.
|
|
320
|
+
In noisy environments, you can increase the threshold to between -40 and -30 to reduce false positives.
|
|
321
|
+
Adjusting these thresholds based on the actual use case and audio characteristics can achieve optimal performance.
|
|
322
|
+
stopRecognizeCount:
|
|
323
|
+
|
|
324
|
+
This value reflects how long to wait after detecting non-human voice before concluding that the user has stopped speaking. It controls the gap between consecutive speech utterances. Within this gap, VAD will treat adjacent sentences as part of the same speech.
|
|
325
|
+
A shorter gap will increase the likelihood of adjacent sentences being recognized as separate speech segments. Typically, it is recommended to set this value between 50 and 80.
|
|
326
|
+
For example: "Good afternoon, [interval_between_sentences] what are some fun places to visit in Beijing?"
|
|
327
|
+
|
|
328
|
+
If the interval_between_sentences between the speaker's phrases is greater than the stopRecognizeCount, the VAD will recognize the above as two separate VADs:
|
|
329
|
+
|
|
330
|
+
VAD1: Good afternoon
|
|
331
|
+
VAD2: What are some fun places to visit in Beijing?
|
|
332
|
+
If the interval_between_sentences is less than stopRecognizeCount, the VAD will recognize the above as a single VAD:
|
|
333
|
+
|
|
334
|
+
VAD: Good afternoon, what are some fun places to visit in Beijing?
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
If latency is a concern, you can lower this value, or consult with the development team to determine how to manage latency while ensuring semantic continuity in speech recognition. This will help avoid the AI being interrupted too sensitively.
|
|
@@ -36,6 +36,25 @@ python agora_rtc/examples/example_audio_pcm_send.py --appId=xxx --channelId=xxx
|
|
|
36
36
|
|
|
37
37
|
# Change log
|
|
38
38
|
|
|
39
|
+
2025.01.08 Release 2.2.0
|
|
40
|
+
-- Updates:
|
|
41
|
+
- Update the SDK version from 4.4.30 to 4.4.31. Done.
|
|
42
|
+
-- FEAT:
|
|
43
|
+
- Add serviceconfigure.
|
|
44
|
+
- Add domain_limit. Done.
|
|
45
|
+
- Add should_callback_when_muted. Done.
|
|
46
|
+
- Add colorspacetype to ExternalVideoFrame to support the encoding of solid-color backgrounds in virtual human scenarios. Done.
|
|
47
|
+
-- FEAT:
|
|
48
|
+
- Add the AudioMetaData interface: localuser::send_audio_meta_data. Done.
|
|
49
|
+
- Add the OnAudioMetaDataReceived callback to localuserObserver::on_audio_meta_data_received. Done.
|
|
50
|
+
-- Sample modifications.
|
|
51
|
+
|
|
52
|
+
2024.12.17 Release 2.1.7
|
|
53
|
+
--Changes:
|
|
54
|
+
|
|
55
|
+
Fixed the typeError issue in LocalUser::sub/unsub audio/video.
|
|
56
|
+
Adjusted the default stopRecogCount for VAD from 30 to 50.
|
|
57
|
+
Modified sample_vad.
|
|
39
58
|
## 2024.12.09 Release 2.1.6
|
|
40
59
|
- New Features:
|
|
41
60
|
-- Added AudioVadManager to manage VAD (Voice Activity Detection) instances.
|
|
@@ -263,4 +282,42 @@ To achieve a balance between clarity and minimal delay, the following steps shou
|
|
|
263
282
|
Store the LLM results in a cache as they are received.
|
|
264
283
|
Perform a reverse scan of the cached data to find the most recent punctuation mark.
|
|
265
284
|
Truncate the data from the start to the most recent punctuation mark and pass it to TTS for synthesis.
|
|
266
|
-
Remove the truncated data from the cache. The remaining data should be moved to the beginning of the cache and continue waiting for additional data from the LLM.
|
|
285
|
+
Remove the truncated data from the cache. The remaining data should be moved to the beginning of the cache and continue waiting for additional data from the LLM.
|
|
286
|
+
|
|
287
|
+
##VAD Configuration Parameters
|
|
288
|
+
AgoraAudioVadConfigV2 Properties
|
|
289
|
+
|
|
290
|
+
Property Name Type Description Default Value Value Range
|
|
291
|
+
preStartRecognizeCount int Number of audio frames saved before detecting speech 16 [0, ]
|
|
292
|
+
startRecognizeCount int Total number of audio frames to detect speech start 30 [1, max]
|
|
293
|
+
stopRecognizeCount int Number of audio frames to detect speech stop 50 [1, max]
|
|
294
|
+
activePercent float Percentage of active frames in startRecognizeCount frames 0.7 [0.0, 1.0]
|
|
295
|
+
inactivePercent float Percentage of inactive frames in stopRecognizeCount frames 0.5 [0.0, 1.0]
|
|
296
|
+
startVoiceProb int Probability that an audio frame contains human voice 70 [0, 100]
|
|
297
|
+
stopVoiceProb int Probability that an audio frame contains human voice 70 [0, 100]
|
|
298
|
+
startRmsThreshold int Energy dB threshold for detecting speech start -50 [-100, 0]
|
|
299
|
+
stopRmsThreshold int Energy dB threshold for detecting speech stop -50 [-100, 0]
|
|
300
|
+
Notes:
|
|
301
|
+
startRmsThreshold and stopRmsThreshold:
|
|
302
|
+
|
|
303
|
+
The higher the value, the louder the speaker's voice needs to be compared to the surrounding background noise.
|
|
304
|
+
In quiet environments, it is recommended to use the default value of -50.
|
|
305
|
+
In noisy environments, you can increase the threshold to between -40 and -30 to reduce false positives.
|
|
306
|
+
Adjusting these thresholds based on the actual use case and audio characteristics can achieve optimal performance.
|
|
307
|
+
stopRecognizeCount:
|
|
308
|
+
|
|
309
|
+
This value reflects how long to wait after detecting non-human voice before concluding that the user has stopped speaking. It controls the gap between consecutive speech utterances. Within this gap, VAD will treat adjacent sentences as part of the same speech.
|
|
310
|
+
A shorter gap will increase the likelihood of adjacent sentences being recognized as separate speech segments. Typically, it is recommended to set this value between 50 and 80.
|
|
311
|
+
For example: "Good afternoon, [interval_between_sentences] what are some fun places to visit in Beijing?"
|
|
312
|
+
|
|
313
|
+
If the interval_between_sentences between the speaker's phrases is greater than the stopRecognizeCount, the VAD will recognize the above as two separate VADs:
|
|
314
|
+
|
|
315
|
+
VAD1: Good afternoon
|
|
316
|
+
VAD2: What are some fun places to visit in Beijing?
|
|
317
|
+
If the interval_between_sentences is less than stopRecognizeCount, the VAD will recognize the above as a single VAD:
|
|
318
|
+
|
|
319
|
+
VAD: Good afternoon, what are some fun places to visit in Beijing?
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
If latency is a concern, you can lower this value, or consult with the development team to determine how to manage latency while ensuring semantic continuity in speech recognition. This will help avoid the AI being interrupted too sensitively.
|
|
@@ -26,13 +26,23 @@ def _check_download_and_extract_sdk():
|
|
|
26
26
|
sdk_dir = os.path.join(agora_service_path, "agora_sdk")
|
|
27
27
|
zip_path = os.path.join(agora_service_path, "agora_rtc_sdk.zip")
|
|
28
28
|
|
|
29
|
-
url = "https://download.agora.io/sdk/release/agora_rtc_sdk-x86_64-linux-gnu-v4.4.30-20241024_101940-398537.zip"
|
|
29
|
+
#url = "https://download.agora.io/sdk/release/agora_rtc_sdk-x86_64-linux-gnu-v4.4.30-20241024_101940-398537.zip"
|
|
30
|
+
# version 2.2.0 for linux
|
|
31
|
+
url = "https://download.agora.io/sdk/release/agora_rtc_sdk-x86_64-linux-gnu-v4.4.31-20241223_111509-491956.zip"
|
|
32
|
+
|
|
33
|
+
|
|
30
34
|
libagora_rtc_sdk_path = os.path.join(sdk_dir, "libagora_rtc_sdk.so")
|
|
31
|
-
rtc_md5 = "7031dd10d1681cd88fd89d68c5b54282"
|
|
35
|
+
#rtc_md5 = "7031dd10d1681cd88fd89d68c5b54282"
|
|
36
|
+
rtc_md5 = "f2a9e3ed15f872cb7e3b62d1528ac5cb"
|
|
32
37
|
if sys.platform == 'darwin':
|
|
33
|
-
url = "https://download.agora.io/sdk/release/agora_rtc_sdk_mac_rel.v4.4.30_22472_FULL_20241024_1224_398653.zip"
|
|
38
|
+
#url = "https://download.agora.io/sdk/release/agora_rtc_sdk_mac_rel.v4.4.30_22472_FULL_20241024_1224_398653.zip"
|
|
39
|
+
# version 2.2.0 for mac
|
|
40
|
+
url = "https://download.agora.io/sdk/release/agora_sdk_mac_v4.4.31_23136_FULL_20241223_1245_492039.zip"
|
|
41
|
+
|
|
34
42
|
libagora_rtc_sdk_path = os.path.join(sdk_dir, "libAgoraRtcKit.dylib")
|
|
35
|
-
rtc_md5 = "ca3ca14f9e2b7d97eb2594d1f32dab9f"
|
|
43
|
+
#rtc_md5 = "ca3ca14f9e2b7d97eb2594d1f32dab9f"
|
|
44
|
+
rtc_md5 = "6821cae218c8f31f8d720ac0c77edab0"
|
|
45
|
+
|
|
36
46
|
|
|
37
47
|
if os.path.exists(libagora_rtc_sdk_path) and get_file_md5(libagora_rtc_sdk_path) == rtc_md5:
|
|
38
48
|
return
|
|
@@ -718,6 +718,29 @@ class RemoteVideoTrackStatsInner(ctypes.Structure):
|
|
|
718
718
|
stats.total_active_time,
|
|
719
719
|
stats.publish_duration
|
|
720
720
|
)
|
|
721
|
+
|
|
722
|
+
class ColorSpaceTypeInner(ctypes.Structure):
|
|
723
|
+
_fields_ = [
|
|
724
|
+
("primaries_id", ctypes.c_int),
|
|
725
|
+
("transfer_id", ctypes.c_int),
|
|
726
|
+
("matrix_id", ctypes.c_int),
|
|
727
|
+
("range_id", ctypes.c_int)
|
|
728
|
+
]
|
|
729
|
+
def get(self):
|
|
730
|
+
return ColorSpaceType(
|
|
731
|
+
primaries_id=self.primaries_id,
|
|
732
|
+
transfer_id=self.transfer_id,
|
|
733
|
+
matrix_id=self.matrix_id,
|
|
734
|
+
range_id=self.range_id
|
|
735
|
+
)
|
|
736
|
+
@staticmethod
|
|
737
|
+
def create(colorspace:ColorSpaceType) -> 'ColorSpaceTypeInner':
|
|
738
|
+
return ColorSpaceTypeInner(
|
|
739
|
+
primaries_id=colorspace.primaries_id,
|
|
740
|
+
transfer_id=colorspace.transfer_id,
|
|
741
|
+
matrix_id=colorspace.matrix_id,
|
|
742
|
+
range_id=colorspace.range_id
|
|
743
|
+
)
|
|
721
744
|
|
|
722
745
|
|
|
723
746
|
class ExternalVideoFrameInner(ctypes.Structure):
|
|
@@ -741,7 +764,8 @@ class ExternalVideoFrameInner(ctypes.Structure):
|
|
|
741
764
|
("metadata_size", ctypes.c_int),
|
|
742
765
|
("alpha_buffer", ctypes.c_void_p),
|
|
743
766
|
("fill_alpha_buffer", ctypes.c_uint8),
|
|
744
|
-
("alpha_mode", ctypes.c_int)
|
|
767
|
+
("alpha_mode", ctypes.c_int),
|
|
768
|
+
("color_space", ColorSpaceTypeInner)
|
|
745
769
|
]
|
|
746
770
|
|
|
747
771
|
def get(self):
|
|
@@ -765,7 +789,8 @@ class ExternalVideoFrameInner(ctypes.Structure):
|
|
|
765
789
|
metadata_size=self.metadata_size,
|
|
766
790
|
alpha_buffer=self.alpha_buffer,
|
|
767
791
|
fill_alpha_buffer=self.fill_alpha_buffer,
|
|
768
|
-
alpha_mode=self.alpha_mode
|
|
792
|
+
alpha_mode=self.alpha_mode,
|
|
793
|
+
color_space=self.color_space.get()
|
|
769
794
|
)
|
|
770
795
|
|
|
771
796
|
@staticmethod
|
|
@@ -815,7 +840,8 @@ class ExternalVideoFrameInner(ctypes.Structure):
|
|
|
815
840
|
c_metadata_size,
|
|
816
841
|
c_alpha_buffer_ptr,
|
|
817
842
|
frame.fill_alpha_buffer,
|
|
818
|
-
frame.alpha_mode
|
|
843
|
+
frame.alpha_mode,
|
|
844
|
+
ColorSpaceTypeInner.create(frame.color_space)
|
|
819
845
|
)
|
|
820
846
|
|
|
821
847
|
|
|
@@ -1105,6 +1131,7 @@ class AgoraServiceConfigInner(ctypes.Structure):
|
|
|
1105
1131
|
('audio_scenario', ctypes.c_int),
|
|
1106
1132
|
|
|
1107
1133
|
('use_string_uid', ctypes.c_int),
|
|
1134
|
+
('domain_limit', ctypes.c_int),
|
|
1108
1135
|
]
|
|
1109
1136
|
|
|
1110
1137
|
def get(self):
|
|
@@ -1117,7 +1144,8 @@ class AgoraServiceConfigInner(ctypes.Structure):
|
|
|
1117
1144
|
area_code=self.area_code,
|
|
1118
1145
|
channel_profile=self.channel_profile,
|
|
1119
1146
|
audio_scenario=self.audio_scenario,
|
|
1120
|
-
use_string_uid=self.use_string_uid
|
|
1147
|
+
use_string_uid=self.use_string_uid,
|
|
1148
|
+
domain_limit=self.domain_limit
|
|
1121
1149
|
)
|
|
1122
1150
|
|
|
1123
1151
|
@staticmethod
|
|
@@ -1131,7 +1159,8 @@ class AgoraServiceConfigInner(ctypes.Structure):
|
|
|
1131
1159
|
config.area_code,
|
|
1132
1160
|
config.channel_profile,
|
|
1133
1161
|
config.audio_scenario,
|
|
1134
|
-
config.use_string_uid
|
|
1162
|
+
config.use_string_uid,
|
|
1163
|
+
config.domain_limit
|
|
1135
1164
|
)
|
|
1136
1165
|
|
|
1137
1166
|
|
|
@@ -44,6 +44,7 @@ ON_INTRA_REQUEST_RECEIVED_CALLBACK = ctypes.CFUNCTYPE(None, AGORA_HANDLE)
|
|
|
44
44
|
ON_REMOTE_SUBSCRIBE_FALLBACK_TO_AUDIO_ONLY_CALLBACK = ctypes.CFUNCTYPE(None, AGORA_HANDLE, user_id_t, ctypes.c_int)
|
|
45
45
|
ON_STREAM_MESSAGE_CALLBACK = ctypes.CFUNCTYPE(None, AGORA_HANDLE, user_id_t, ctypes.c_int, ctypes.c_char_p, ctypes.c_size_t)
|
|
46
46
|
ON_USER_STATE_CHANGED_CALLBACK = ctypes.CFUNCTYPE(None, AGORA_HANDLE, user_id_t, ctypes.c_uint32)
|
|
47
|
+
ON_AUDIO_META_DATA_RECEIVED_CALLBACK = ctypes.CFUNCTYPE(None, AGORA_HANDLE, user_id_t, ctypes.c_char_p, ctypes.c_size_t)
|
|
47
48
|
|
|
48
49
|
|
|
49
50
|
class RTCLocalUserObserverInner(ctypes.Structure):
|
|
@@ -85,7 +86,8 @@ class RTCLocalUserObserverInner(ctypes.Structure):
|
|
|
85
86
|
("on_intra_request_received", ON_INTRA_REQUEST_RECEIVED_CALLBACK),
|
|
86
87
|
("on_remote_subscribe_fallback_to_audio_only", ON_REMOTE_SUBSCRIBE_FALLBACK_TO_AUDIO_ONLY_CALLBACK),
|
|
87
88
|
("on_stream_message", ON_STREAM_MESSAGE_CALLBACK),
|
|
88
|
-
("on_user_state_changed", ON_USER_STATE_CHANGED_CALLBACK)
|
|
89
|
+
("on_user_state_changed", ON_USER_STATE_CHANGED_CALLBACK),
|
|
90
|
+
("on_audio_meta_data_received", ON_AUDIO_META_DATA_RECEIVED_CALLBACK)
|
|
89
91
|
]
|
|
90
92
|
|
|
91
93
|
def __init__(self, local_user_observer: IRTCLocalUserObserver, local_user: 'LocalUser') -> None:
|
|
@@ -128,6 +130,7 @@ class RTCLocalUserObserverInner(ctypes.Structure):
|
|
|
128
130
|
self.on_remote_subscribe_fallback_to_audio_only = ON_REMOTE_SUBSCRIBE_FALLBACK_TO_AUDIO_ONLY_CALLBACK(self._on_remote_subscribe_fallback_to_audio_only)
|
|
129
131
|
self.on_stream_message = ON_STREAM_MESSAGE_CALLBACK(self._on_stream_message)
|
|
130
132
|
self.on_user_state_changed = ON_USER_STATE_CHANGED_CALLBACK(self._on_user_state_changed)
|
|
133
|
+
self.on_audio_meta_data_received = ON_AUDIO_META_DATA_RECEIVED_CALLBACK(self._on_audio_meta_data_received)
|
|
131
134
|
|
|
132
135
|
"""
|
|
133
136
|
it seems that this interface does not provide much value to the user's business,
|
|
@@ -348,3 +351,8 @@ class RTCLocalUserObserverInner(ctypes.Structure):
|
|
|
348
351
|
logger.debug(f"LocalUserCB _on_user_state_changed: {local_user_handle}, {user_id}, {state}")
|
|
349
352
|
user_id_str = user_id.decode('utf-8') if user_id else ""
|
|
350
353
|
self.local_user_observer.on_user_state_changed(self.local_user, user_id_str, state)
|
|
354
|
+
def _on_audio_meta_data_received(self, local_user_handle, user_id, audio_meta_data, size):
|
|
355
|
+
user_id_str = user_id.decode('utf-8') if user_id else ""
|
|
356
|
+
bytes_from_c = bytearray(ctypes.string_at(audio_meta_data, size))
|
|
357
|
+
|
|
358
|
+
self.local_user_observer.on_audio_meta_data_received(self.local_user, user_id_str, bytes_from_c)
|
|
@@ -307,6 +307,15 @@ class AgoraServiceConfig:
|
|
|
307
307
|
channel_profile: ChannelProfileType = ChannelProfileType.CHANNEL_PROFILE_LIVE_BROADCASTING
|
|
308
308
|
audio_scenario: AudioScenarioType = AudioScenarioType.AUDIO_SCENARIO_CHORUS
|
|
309
309
|
use_string_uid: int = 0
|
|
310
|
+
#version 2.2.0
|
|
311
|
+
# default to 0
|
|
312
|
+
domain_limit: int = 0
|
|
313
|
+
'''
|
|
314
|
+
// if >0, when remote user muted itself, the onplaybackbeforemixing will be still called badk with active pacakage
|
|
315
|
+
// if <=0, when remote user muted itself, the onplaybackbeforemixing will be no longer called back
|
|
316
|
+
// default to 0, i.e when muted, no callback will be triggered
|
|
317
|
+
'''
|
|
318
|
+
should_callbck_when_muted: int = 0
|
|
310
319
|
|
|
311
320
|
|
|
312
321
|
@dataclass(kw_only=True)
|
|
@@ -423,6 +432,13 @@ class VideoEncoderConfiguration:
|
|
|
423
432
|
mirror_mode: int = 0
|
|
424
433
|
encode_alpha: int = 0
|
|
425
434
|
|
|
435
|
+
@dataclass(kw_only=True)
|
|
436
|
+
class ColorSpaceType:
|
|
437
|
+
primaries_id: int = 0
|
|
438
|
+
transfer_id: int = 0
|
|
439
|
+
matrix_id: int = 0
|
|
440
|
+
range_id: int = 0
|
|
441
|
+
|
|
426
442
|
|
|
427
443
|
@dataclass(kw_only=True)
|
|
428
444
|
class ExternalVideoFrame:
|
|
@@ -445,6 +461,7 @@ class ExternalVideoFrame:
|
|
|
445
461
|
alpha_buffer: bytearray = None
|
|
446
462
|
fill_alpha_buffer: int = 0
|
|
447
463
|
alpha_mode: int = 0
|
|
464
|
+
color_space: ColorSpaceType = field(default_factory=ColorSpaceType)
|
|
448
465
|
|
|
449
466
|
|
|
450
467
|
@dataclass(kw_only=True)
|
|
@@ -92,6 +92,10 @@ class AgoraService:
|
|
|
92
92
|
# force audio vad v2 to be enabled
|
|
93
93
|
agora_parameter.set_parameters("{\"che.audio.label.enable\": true}")
|
|
94
94
|
|
|
95
|
+
#versio 2.2.0 for callback when muted
|
|
96
|
+
if config.should_callbck_when_muted > 0:
|
|
97
|
+
agora_parameter.set_parameters("{\"rtc.audio.enable_user_silence_packet\": true}")
|
|
98
|
+
|
|
95
99
|
if config.log_path:
|
|
96
100
|
log_size = 512 * 1024
|
|
97
101
|
if config.log_size > 0:
|
|
@@ -62,7 +62,7 @@ agora_local_user_subscribe_all_audio.argtypes = [AGORA_HANDLE]
|
|
|
62
62
|
|
|
63
63
|
agora_local_user_unsubscribe_audio = agora_lib.agora_local_user_unsubscribe_audio
|
|
64
64
|
agora_local_user_unsubscribe_audio.restype = AGORA_API_C_INT
|
|
65
|
-
agora_local_user_unsubscribe_audio.argtypes = [AGORA_HANDLE,
|
|
65
|
+
agora_local_user_unsubscribe_audio.argtypes = [AGORA_HANDLE, user_id_t]
|
|
66
66
|
|
|
67
67
|
agora_local_user_unsubscribe_all_audio = agora_lib.agora_local_user_unsubscribe_all_audio
|
|
68
68
|
agora_local_user_unsubscribe_all_audio.restype = AGORA_API_C_INT
|
|
@@ -184,7 +184,7 @@ agora_local_user_subscribe_all_video.argtypes = [AGORA_HANDLE, ctypes.POINTER(Vi
|
|
|
184
184
|
|
|
185
185
|
agora_local_user_unsubscribe_video = agora_lib.agora_local_user_unsubscribe_video
|
|
186
186
|
agora_local_user_unsubscribe_video.restype = AGORA_API_C_INT
|
|
187
|
-
agora_local_user_unsubscribe_video.argtypes = [AGORA_HANDLE,
|
|
187
|
+
agora_local_user_unsubscribe_video.argtypes = [AGORA_HANDLE, user_id_t]
|
|
188
188
|
|
|
189
189
|
agora_local_user_unsubscribe_all_video = agora_lib.agora_local_user_unsubscribe_all_video
|
|
190
190
|
agora_local_user_unsubscribe_all_video.restype = AGORA_API_C_INT
|
|
@@ -223,6 +223,15 @@ agora_local_user_set_audio_scenario = agora_lib.agora_local_user_set_audio_scena
|
|
|
223
223
|
agora_local_user_set_audio_scenario.restype = AGORA_API_C_INT
|
|
224
224
|
agora_local_user_set_audio_scenario.argtypes = [AGORA_HANDLE, ctypes.c_int]
|
|
225
225
|
|
|
226
|
+
#verison 2.2.0
|
|
227
|
+
|
|
228
|
+
#AGORA_API_C_INT agora_local_user_send_audio_meta_data(AGORA_HANDLE agora_local_user, const char* meta_data, size_t length);
|
|
229
|
+
agora_local_user_send_aduio_meta_data = agora_lib.agora_local_user_send_audio_meta_data
|
|
230
|
+
agora_local_user_send_aduio_meta_data.restype = AGORA_API_C_INT
|
|
231
|
+
agora_local_user_send_aduio_meta_data.argtypes = [AGORA_HANDLE, ctypes.c_char_p, ctypes.c_size_t]
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
|
|
226
235
|
|
|
227
236
|
class LocalUser:
|
|
228
237
|
def __init__(self, local_user_handle, connection):
|
|
@@ -361,7 +370,13 @@ class LocalUser:
|
|
|
361
370
|
return ret
|
|
362
371
|
|
|
363
372
|
def subscribe_audio(self, user_id):
|
|
364
|
-
|
|
373
|
+
if user_id is None:
|
|
374
|
+
return -1
|
|
375
|
+
uid_str = user_id.encode('utf-8')
|
|
376
|
+
#ret = agora_local_user_subscribe_audio(self.user_handle, ctypes.create_string_buffer(uid_str))
|
|
377
|
+
# note:both ctypes.create_string_buffer and ctypes.c_char_p are all can change python's str to c_char_p
|
|
378
|
+
# but ctypes.c_char_p is more suitable for this case for the c api never change the content of c_char_p
|
|
379
|
+
ret = agora_local_user_subscribe_audio(self.user_handle, ctypes.c_char_p(uid_str))
|
|
365
380
|
return ret
|
|
366
381
|
|
|
367
382
|
def subscribe_all_audio(self):
|
|
@@ -369,7 +384,11 @@ class LocalUser:
|
|
|
369
384
|
return ret
|
|
370
385
|
|
|
371
386
|
def unsubscribe_audio(self, user_id):
|
|
372
|
-
|
|
387
|
+
#validity check
|
|
388
|
+
if user_id is None:
|
|
389
|
+
return -1
|
|
390
|
+
uid_str = user_id.encode('utf-8')
|
|
391
|
+
ret = agora_local_user_unsubscribe_audio(self.user_handle, ctypes.c_char_p(uid_str))
|
|
373
392
|
if ret < 0:
|
|
374
393
|
logger.error("Failed to unsubscribe audio")
|
|
375
394
|
else:
|
|
@@ -485,18 +504,33 @@ class LocalUser:
|
|
|
485
504
|
# return ret
|
|
486
505
|
|
|
487
506
|
def subscribe_video(self, user_id, options: VideoSubscriptionOptions):
|
|
488
|
-
|
|
507
|
+
if user_id is None:
|
|
508
|
+
return -1
|
|
509
|
+
uid_str = user_id.encode('utf-8')
|
|
489
510
|
|
|
490
|
-
|
|
511
|
+
|
|
512
|
+
if options is None:
|
|
513
|
+
inner = VideoSubscriptionOptionsInner()
|
|
514
|
+
else:
|
|
515
|
+
inner = VideoSubscriptionOptionsInner.create(options)
|
|
516
|
+
|
|
517
|
+
c_ptr = ctypes.byref(inner)
|
|
518
|
+
ret = agora_local_user_subscribe_video(self.user_handle, ctypes.c_char_p(uid_str), c_ptr)
|
|
491
519
|
return ret
|
|
492
520
|
|
|
493
521
|
def subscribe_all_video(self, options: VideoSubscriptionOptions):
|
|
494
|
-
|
|
522
|
+
if options is None:
|
|
523
|
+
inner = VideoSubscriptionOptionsInner()
|
|
524
|
+
else:
|
|
525
|
+
inner = VideoSubscriptionOptionsInner.create(options)
|
|
526
|
+
ret = agora_local_user_subscribe_all_video(self.user_handle, ctypes.byref(inner))
|
|
495
527
|
return ret
|
|
496
528
|
|
|
497
529
|
def unsubscribe_video(self, user_id):
|
|
498
|
-
|
|
499
|
-
|
|
530
|
+
if user_id is None:
|
|
531
|
+
return -1
|
|
532
|
+
uid_str = user_id.encode('utf-8')
|
|
533
|
+
ret = agora_local_user_unsubscribe_video(self.user_handle, ctypes.c_char_p(uid_str))
|
|
500
534
|
if ret < 0:
|
|
501
535
|
logger.error("Failed to unsubscribe video")
|
|
502
536
|
else:
|
|
@@ -569,3 +603,12 @@ class LocalUser:
|
|
|
569
603
|
def set_audio_scenario(self, scenario_type: AudioScenarioType):
|
|
570
604
|
ret = agora_local_user_set_audio_scenario(self.user_handle, scenario_type.value)
|
|
571
605
|
return ret
|
|
606
|
+
# data can be str or bytes/bytearray object,is diff to send_sream_message which is a str object
|
|
607
|
+
def send_audio_meta_data(self, data):
|
|
608
|
+
# chang to ctypes.c_char_p
|
|
609
|
+
if isinstance(data, str):
|
|
610
|
+
data = data.encode('utf-8')
|
|
611
|
+
c_data = ctypes.create_string_buffer(bytes(data))
|
|
612
|
+
size = len(data)
|
|
613
|
+
ret = agora_local_user_send_aduio_meta_data(self.user_handle, c_data, ctypes.c_size_t(size))
|
|
614
|
+
return ret
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_user_observer.py
RENAMED
|
@@ -117,3 +117,6 @@ class IRTCLocalUserObserver():
|
|
|
117
117
|
|
|
118
118
|
def on_user_state_changed(self, agora_local_user, user_id, state):
|
|
119
119
|
pass
|
|
120
|
+
# data is bytearray object, is diff to on_stream_msg which is str object
|
|
121
|
+
def on_audio_meta_data_received(self, agora_local_user, user_id, data):
|
|
122
|
+
pass
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: agora_python_server_sdk
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.2.0
|
|
4
4
|
Summary: A Python SDK for Agora Server
|
|
5
5
|
Home-page: https://github.com/AgoraIO-Extensions/Agora-Python-Server-SDK
|
|
6
6
|
Classifier: Intended Audience :: Developers
|
|
@@ -51,6 +51,25 @@ python agora_rtc/examples/example_audio_pcm_send.py --appId=xxx --channelId=xxx
|
|
|
51
51
|
|
|
52
52
|
# Change log
|
|
53
53
|
|
|
54
|
+
2025.01.08 Release 2.2.0
|
|
55
|
+
-- Updates:
|
|
56
|
+
- Update the SDK version from 4.4.30 to 4.4.31. Done.
|
|
57
|
+
-- FEAT:
|
|
58
|
+
- Add serviceconfigure.
|
|
59
|
+
- Add domain_limit. Done.
|
|
60
|
+
- Add should_callback_when_muted. Done.
|
|
61
|
+
- Add colorspacetype to ExternalVideoFrame to support the encoding of solid-color backgrounds in virtual human scenarios. Done.
|
|
62
|
+
-- FEAT:
|
|
63
|
+
- Add the AudioMetaData interface: localuser::send_audio_meta_data. Done.
|
|
64
|
+
- Add the OnAudioMetaDataReceived callback to localuserObserver::on_audio_meta_data_received. Done.
|
|
65
|
+
-- Sample modifications.
|
|
66
|
+
|
|
67
|
+
2024.12.17 Release 2.1.7
|
|
68
|
+
--Changes:
|
|
69
|
+
|
|
70
|
+
Fixed the typeError issue in LocalUser::sub/unsub audio/video.
|
|
71
|
+
Adjusted the default stopRecogCount for VAD from 30 to 50.
|
|
72
|
+
Modified sample_vad.
|
|
54
73
|
## 2024.12.09 Release 2.1.6
|
|
55
74
|
- New Features:
|
|
56
75
|
-- Added AudioVadManager to manage VAD (Voice Activity Detection) instances.
|
|
@@ -279,3 +298,41 @@ Store the LLM results in a cache as they are received.
|
|
|
279
298
|
Perform a reverse scan of the cached data to find the most recent punctuation mark.
|
|
280
299
|
Truncate the data from the start to the most recent punctuation mark and pass it to TTS for synthesis.
|
|
281
300
|
Remove the truncated data from the cache. The remaining data should be moved to the beginning of the cache and continue waiting for additional data from the LLM.
|
|
301
|
+
|
|
302
|
+
##VAD Configuration Parameters
|
|
303
|
+
AgoraAudioVadConfigV2 Properties
|
|
304
|
+
|
|
305
|
+
Property Name Type Description Default Value Value Range
|
|
306
|
+
preStartRecognizeCount int Number of audio frames saved before detecting speech 16 [0, ]
|
|
307
|
+
startRecognizeCount int Total number of audio frames to detect speech start 30 [1, max]
|
|
308
|
+
stopRecognizeCount int Number of audio frames to detect speech stop 50 [1, max]
|
|
309
|
+
activePercent float Percentage of active frames in startRecognizeCount frames 0.7 [0.0, 1.0]
|
|
310
|
+
inactivePercent float Percentage of inactive frames in stopRecognizeCount frames 0.5 [0.0, 1.0]
|
|
311
|
+
startVoiceProb int Probability that an audio frame contains human voice 70 [0, 100]
|
|
312
|
+
stopVoiceProb int Probability that an audio frame contains human voice 70 [0, 100]
|
|
313
|
+
startRmsThreshold int Energy dB threshold for detecting speech start -50 [-100, 0]
|
|
314
|
+
stopRmsThreshold int Energy dB threshold for detecting speech stop -50 [-100, 0]
|
|
315
|
+
Notes:
|
|
316
|
+
startRmsThreshold and stopRmsThreshold:
|
|
317
|
+
|
|
318
|
+
The higher the value, the louder the speaker's voice needs to be compared to the surrounding background noise.
|
|
319
|
+
In quiet environments, it is recommended to use the default value of -50.
|
|
320
|
+
In noisy environments, you can increase the threshold to between -40 and -30 to reduce false positives.
|
|
321
|
+
Adjusting these thresholds based on the actual use case and audio characteristics can achieve optimal performance.
|
|
322
|
+
stopRecognizeCount:
|
|
323
|
+
|
|
324
|
+
This value reflects how long to wait after detecting non-human voice before concluding that the user has stopped speaking. It controls the gap between consecutive speech utterances. Within this gap, VAD will treat adjacent sentences as part of the same speech.
|
|
325
|
+
A shorter gap will increase the likelihood of adjacent sentences being recognized as separate speech segments. Typically, it is recommended to set this value between 50 and 80.
|
|
326
|
+
For example: "Good afternoon, [interval_between_sentences] what are some fun places to visit in Beijing?"
|
|
327
|
+
|
|
328
|
+
If the interval_between_sentences between the speaker's phrases is greater than the stopRecognizeCount, the VAD will recognize the above as two separate VADs:
|
|
329
|
+
|
|
330
|
+
VAD1: Good afternoon
|
|
331
|
+
VAD2: What are some fun places to visit in Beijing?
|
|
332
|
+
If the interval_between_sentences is less than stopRecognizeCount, the VAD will recognize the above as a single VAD:
|
|
333
|
+
|
|
334
|
+
VAD: Good afternoon, what are some fun places to visit in Beijing?
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
If latency is a concern, you can lower this value, or consult with the development team to determine how to manage latency while ensuring semantic continuity in speech recognition. This will help avoid the AI being interrupted too sensitively.
|
|
@@ -20,10 +20,15 @@ class CustomInstallCommand(install):
|
|
|
20
20
|
agora_service_path = os.path.join(site.getsitepackages()[0], 'agora', 'rtc')
|
|
21
21
|
sdk_dir = os.path.join(agora_service_path, "agora_sdk")
|
|
22
22
|
zip_path = os.path.join(agora_service_path, "agora_rtc_sdk.zip")
|
|
23
|
-
|
|
24
|
-
url = "https://download.agora.io/sdk/release/agora_rtc_sdk-x86_64-linux-gnu-v4.4.30-20241024_101940-398537.zip"
|
|
23
|
+
'''# version before 2.2.0
|
|
24
|
+
#url = "https://download.agora.io/sdk/release/agora_rtc_sdk-x86_64-linux-gnu-v4.4.30-20241024_101940-398537.zip"
|
|
25
|
+
#url = "https://download.agora.io/sdk/release/agora_rtc_sdk_mac_rel.v4.4.30_22472_FULL_20241024_1224_398653.zip"
|
|
26
|
+
'''
|
|
27
|
+
# verison 2.2.0
|
|
28
|
+
url = "https://download.agora.io/sdk/release/agora_rtc_sdk-x86_64-linux-gnu-v4.4.31-20241223_111509-491956.zip"
|
|
25
29
|
if sys.platform == 'darwin':
|
|
26
|
-
url = "https://download.agora.io/sdk/release/
|
|
30
|
+
url = "https://download.agora.io/sdk/release/agora_sdk_mac_v4.4.31_23136_FULL_20241223_1245_492039.zip"
|
|
31
|
+
|
|
27
32
|
|
|
28
33
|
if os.path.exists(sdk_dir):
|
|
29
34
|
os.system(f"rm -rf {sdk_dir}")
|
|
@@ -45,7 +50,7 @@ class CustomInstallCommand(install):
|
|
|
45
50
|
|
|
46
51
|
setup(
|
|
47
52
|
name='agora_python_server_sdk',
|
|
48
|
-
version='2.
|
|
53
|
+
version='2.2.0',
|
|
49
54
|
description='A Python SDK for Agora Server',
|
|
50
55
|
long_description=open('README.md').read(),
|
|
51
56
|
long_description_content_type='text/markdown',
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/agora_parameter.py
RENAMED
|
File without changes
|
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_frame_observer.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_pcm_data_sender.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_sessionctrl.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/audio_vad_manager.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_audio_track.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/local_video_track.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/media_node_factory.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/remote_audio_track.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/remote_video_track.py
RENAMED
|
File without changes
|
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/rtc_connection_observer.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/video_frame_observer.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/video_frame_sender.py
RENAMED
|
File without changes
|
{agora_python_server_sdk-2.1.6 → agora_python_server_sdk-2.2.0}/agora/rtc/voice_detection.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|