sarvamai 0.1.18a0__tar.gz → 0.1.19a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/PKG-INFO +1 -1
  2. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/pyproject.toml +1 -1
  3. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/__init__.py +8 -0
  4. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/client_wrapper.py +2 -2
  5. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/__init__.py +2 -0
  6. sarvamai-0.1.19a1/src/sarvamai/requests/stt_flush_signal.py +21 -0
  7. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/__init__.py +2 -0
  8. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/client.py +13 -0
  9. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/raw_client.py +13 -0
  10. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/socket_client.py +68 -13
  11. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/types/__init__.py +2 -0
  12. sarvamai-0.1.19a1/src/sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_flush_signal.py +5 -0
  13. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/__init__.py +2 -0
  14. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/client.py +13 -0
  15. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/raw_client.py +13 -0
  16. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/types/__init__.py +2 -0
  17. sarvamai-0.1.19a1/src/sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_flush_signal.py +5 -0
  18. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/__init__.py +2 -0
  19. sarvamai-0.1.19a1/src/sarvamai/types/stt_flush_signal.py +31 -0
  20. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/README.md +0 -0
  21. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/chat/__init__.py +0 -0
  22. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/chat/client.py +0 -0
  23. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/chat/raw_client.py +0 -0
  24. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/client.py +0 -0
  25. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/__init__.py +0 -0
  26. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/api_error.py +0 -0
  27. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/datetime_utils.py +0 -0
  28. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/events.py +0 -0
  29. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/file.py +0 -0
  30. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/force_multipart.py +0 -0
  31. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/http_client.py +0 -0
  32. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/http_response.py +0 -0
  33. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/jsonable_encoder.py +0 -0
  34. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/pydantic_utilities.py +0 -0
  35. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/query_encoder.py +0 -0
  36. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/remove_none_from_dict.py +0 -0
  37. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/request_options.py +0 -0
  38. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/core/serialization.py +0 -0
  39. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/environment.py +0 -0
  40. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/__init__.py +0 -0
  41. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/bad_request_error.py +0 -0
  42. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/forbidden_error.py +0 -0
  43. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/internal_server_error.py +0 -0
  44. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/service_unavailable_error.py +0 -0
  45. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/too_many_requests_error.py +0 -0
  46. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/errors/unprocessable_entity_error.py +0 -0
  47. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/play.py +0 -0
  48. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/py.typed +0 -0
  49. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/audio_data.py +0 -0
  50. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/audio_message.py +0 -0
  51. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/audio_output.py +0 -0
  52. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/audio_output_data.py +0 -0
  53. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/base_job_parameters.py +0 -0
  54. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/bulk_job_callback.py +0 -0
  55. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/bulk_job_init_response_v_1.py +0 -0
  56. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/chat_completion_request_assistant_message.py +0 -0
  57. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/chat_completion_request_message.py +0 -0
  58. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/chat_completion_request_system_message.py +0 -0
  59. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/chat_completion_request_user_message.py +0 -0
  60. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/chat_completion_response_message.py +0 -0
  61. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/choice.py +0 -0
  62. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/completion_usage.py +0 -0
  63. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/config_message.py +0 -0
  64. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/configure_connection.py +0 -0
  65. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/configure_connection_data.py +0 -0
  66. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/create_chat_completion_response.py +0 -0
  67. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/diarized_entry.py +0 -0
  68. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/diarized_transcript.py +0 -0
  69. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/error_data.py +0 -0
  70. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/error_details.py +0 -0
  71. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/error_message.py +0 -0
  72. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/error_response.py +0 -0
  73. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/error_response_data.py +0 -0
  74. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/events_data.py +0 -0
  75. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/file_signed_url_details.py +0 -0
  76. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/files_download_response.py +0 -0
  77. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/files_request.py +0 -0
  78. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/files_upload_response.py +0 -0
  79. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/flush_signal.py +0 -0
  80. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/job_status_v_1_response.py +0 -0
  81. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/language_identification_response.py +0 -0
  82. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/ping_signal.py +0 -0
  83. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/send_text.py +0 -0
  84. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/send_text_data.py +0 -0
  85. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_job_parameters.py +0 -0
  86. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_response.py +0 -0
  87. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_response_data.py +0 -0
  88. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_streaming_response.py +0 -0
  89. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_transcription_data.py +0 -0
  90. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_translate_job_parameters.py +0 -0
  91. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_translate_response.py +0 -0
  92. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_translate_response_data.py +0 -0
  93. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_translate_streaming_response.py +0 -0
  94. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/speech_to_text_translate_transcription_data.py +0 -0
  95. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/stop_configuration.py +0 -0
  96. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/task_detail_v_1.py +0 -0
  97. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/task_file_details.py +0 -0
  98. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/text_to_speech_response.py +0 -0
  99. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/timestamps_model.py +0 -0
  100. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/transcription_metrics.py +0 -0
  101. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/translation_response.py +0 -0
  102. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/requests/transliteration_response.py +0 -0
  103. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text/__init__.py +0 -0
  104. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text/client.py +0 -0
  105. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text/raw_client.py +0 -0
  106. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_job/__init__.py +0 -0
  107. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_job/client.py +0 -0
  108. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_job/job.py +0 -0
  109. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_job/raw_client.py +0 -0
  110. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_high_vad_sensitivity.py +0 -0
  111. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py +0 -0
  112. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_model.py +0 -0
  113. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_vad_signals.py +0 -0
  114. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_job/__init__.py +0 -0
  115. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_job/client.py +0 -0
  116. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_job/job.py +0 -0
  117. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_job/raw_client.py +0 -0
  118. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/socket_client.py +0 -0
  119. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_high_vad_sensitivity.py +0 -0
  120. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_model.py +0 -0
  121. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_vad_signals.py +0 -0
  122. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text/__init__.py +0 -0
  123. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text/client.py +0 -0
  124. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text/raw_client.py +0 -0
  125. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech/__init__.py +0 -0
  126. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech/client.py +0 -0
  127. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech/raw_client.py +0 -0
  128. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech_streaming/__init__.py +0 -0
  129. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech_streaming/client.py +0 -0
  130. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech_streaming/raw_client.py +0 -0
  131. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/text_to_speech_streaming/socket_client.py +0 -0
  132. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/audio_data.py +0 -0
  133. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/audio_message.py +0 -0
  134. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/audio_output.py +0 -0
  135. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/audio_output_data.py +0 -0
  136. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/base_job_parameters.py +0 -0
  137. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/bulk_job_callback.py +0 -0
  138. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/bulk_job_init_response_v_1.py +0 -0
  139. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/chat_completion_request_assistant_message.py +0 -0
  140. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/chat_completion_request_message.py +0 -0
  141. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/chat_completion_request_system_message.py +0 -0
  142. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/chat_completion_request_user_message.py +0 -0
  143. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/chat_completion_response_message.py +0 -0
  144. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/choice.py +0 -0
  145. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/completion_usage.py +0 -0
  146. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/config_message.py +0 -0
  147. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/configure_connection.py +0 -0
  148. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/configure_connection_data.py +0 -0
  149. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/configure_connection_data_output_audio_bitrate.py +0 -0
  150. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/configure_connection_data_output_audio_codec.py +0 -0
  151. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/configure_connection_data_speaker.py +0 -0
  152. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/configure_connection_data_target_language_code.py +0 -0
  153. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/create_chat_completion_response.py +0 -0
  154. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/diarized_entry.py +0 -0
  155. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/diarized_transcript.py +0 -0
  156. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/error_code.py +0 -0
  157. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/error_data.py +0 -0
  158. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/error_details.py +0 -0
  159. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/error_message.py +0 -0
  160. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/error_response.py +0 -0
  161. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/error_response_data.py +0 -0
  162. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/events_data.py +0 -0
  163. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/events_data_signal_type.py +0 -0
  164. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/file_signed_url_details.py +0 -0
  165. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/files_download_response.py +0 -0
  166. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/files_request.py +0 -0
  167. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/files_upload_response.py +0 -0
  168. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/finish_reason.py +0 -0
  169. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/flush_signal.py +0 -0
  170. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/input_audio_codec.py +0 -0
  171. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/job_state.py +0 -0
  172. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/job_status_v_1_response.py +0 -0
  173. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/language_identification_response.py +0 -0
  174. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/numerals_format.py +0 -0
  175. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/ping_signal.py +0 -0
  176. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/reasoning_effort.py +0 -0
  177. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/response_type.py +0 -0
  178. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/role.py +0 -0
  179. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/sarvam_model_ids.py +0 -0
  180. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/send_text.py +0 -0
  181. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/send_text_data.py +0 -0
  182. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_sample_rate.py +0 -0
  183. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_job_parameters.py +0 -0
  184. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_language.py +0 -0
  185. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_model.py +0 -0
  186. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_response.py +0 -0
  187. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_response_data.py +0 -0
  188. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_streaming_response.py +0 -0
  189. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_transcription_data.py +0 -0
  190. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_job_parameters.py +0 -0
  191. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_language.py +0 -0
  192. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_model.py +0 -0
  193. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_response.py +0 -0
  194. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_response_data.py +0 -0
  195. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_streaming_response.py +0 -0
  196. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/speech_to_text_translate_transcription_data.py +0 -0
  197. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/spoken_form_numerals_format.py +0 -0
  198. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/stop_configuration.py +0 -0
  199. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/storage_container_type.py +0 -0
  200. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/task_detail_v_1.py +0 -0
  201. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/task_file_details.py +0 -0
  202. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/task_state.py +0 -0
  203. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/text_to_speech_language.py +0 -0
  204. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/text_to_speech_model.py +0 -0
  205. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/text_to_speech_output_audio_codec.py +0 -0
  206. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/text_to_speech_response.py +0 -0
  207. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/text_to_speech_speaker.py +0 -0
  208. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/timestamps_model.py +0 -0
  209. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/transcription_metrics.py +0 -0
  210. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translate_mode.py +0 -0
  211. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translate_model.py +0 -0
  212. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translate_source_language.py +0 -0
  213. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translate_speaker_gender.py +0 -0
  214. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translate_target_language.py +0 -0
  215. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translation_response.py +0 -0
  216. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/translatiterate_target_language.py +0 -0
  217. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/transliterate_mode.py +0 -0
  218. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/transliterate_source_language.py +0 -0
  219. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/types/transliteration_response.py +0 -0
  220. {sarvamai-0.1.18a0 → sarvamai-0.1.19a1}/src/sarvamai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sarvamai
3
- Version: 0.1.18a0
3
+ Version: 0.1.19a1
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -3,7 +3,7 @@ name = "sarvamai"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "sarvamai"
6
- version = "0.1.18a0"
6
+ version = "0.1.19a1"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -74,6 +74,7 @@ from .types import (
74
74
  SpokenFormNumeralsFormat,
75
75
  StopConfiguration,
76
76
  StorageContainerType,
77
+ SttFlushSignal,
77
78
  TaskDetailV1,
78
79
  TaskFileDetails,
79
80
  TaskState,
@@ -167,6 +168,7 @@ from .requests import (
167
168
  SpeechToTextTranslateStreamingResponseParams,
168
169
  SpeechToTextTranslateTranscriptionDataParams,
169
170
  StopConfigurationParams,
171
+ SttFlushSignalParams,
170
172
  TaskDetailV1Params,
171
173
  TaskFileDetailsParams,
172
174
  TextToSpeechResponseParams,
@@ -176,12 +178,14 @@ from .requests import (
176
178
  TransliterationResponseParams,
177
179
  )
178
180
  from .speech_to_text_streaming import (
181
+ SpeechToTextStreamingFlushSignal,
179
182
  SpeechToTextStreamingHighVadSensitivity,
180
183
  SpeechToTextStreamingLanguageCode,
181
184
  SpeechToTextStreamingModel,
182
185
  SpeechToTextStreamingVadSignals,
183
186
  )
184
187
  from .speech_to_text_translate_streaming import (
188
+ SpeechToTextTranslateStreamingFlushSignal,
185
189
  SpeechToTextTranslateStreamingHighVadSensitivity,
186
190
  SpeechToTextTranslateStreamingModel,
187
191
  SpeechToTextTranslateStreamingVadSignals,
@@ -297,6 +301,7 @@ __all__ = [
297
301
  "SpeechToTextResponseData",
298
302
  "SpeechToTextResponseDataParams",
299
303
  "SpeechToTextResponseParams",
304
+ "SpeechToTextStreamingFlushSignal",
300
305
  "SpeechToTextStreamingHighVadSensitivity",
301
306
  "SpeechToTextStreamingLanguageCode",
302
307
  "SpeechToTextStreamingModel",
@@ -313,6 +318,7 @@ __all__ = [
313
318
  "SpeechToTextTranslateResponseData",
314
319
  "SpeechToTextTranslateResponseDataParams",
315
320
  "SpeechToTextTranslateResponseParams",
321
+ "SpeechToTextTranslateStreamingFlushSignal",
316
322
  "SpeechToTextTranslateStreamingHighVadSensitivity",
317
323
  "SpeechToTextTranslateStreamingModel",
318
324
  "SpeechToTextTranslateStreamingResponse",
@@ -324,6 +330,8 @@ __all__ = [
324
330
  "StopConfiguration",
325
331
  "StopConfigurationParams",
326
332
  "StorageContainerType",
333
+ "SttFlushSignal",
334
+ "SttFlushSignalParams",
327
335
  "TaskDetailV1",
328
336
  "TaskDetailV1Params",
329
337
  "TaskFileDetails",
@@ -23,10 +23,10 @@ class BaseClientWrapper:
23
23
 
24
24
  def get_headers(self) -> typing.Dict[str, str]:
25
25
  headers: typing.Dict[str, str] = {
26
- "User-Agent": "sarvamai/0.1.18a0",
26
+ "User-Agent": "sarvamai/0.1.19a1",
27
27
  "X-Fern-Language": "Python",
28
28
  "X-Fern-SDK-Name": "sarvamai",
29
- "X-Fern-SDK-Version": "0.1.18a0",
29
+ "X-Fern-SDK-Version": "0.1.19a1",
30
30
  **(self.get_custom_headers() or {}),
31
31
  }
32
32
  headers["api-subscription-key"] = self.api_subscription_key
@@ -54,6 +54,7 @@ from .speech_to_text_translate_response_data import SpeechToTextTranslateRespons
54
54
  from .speech_to_text_translate_streaming_response import SpeechToTextTranslateStreamingResponseParams
55
55
  from .speech_to_text_translate_transcription_data import SpeechToTextTranslateTranscriptionDataParams
56
56
  from .stop_configuration import StopConfigurationParams
57
+ from .stt_flush_signal import SttFlushSignalParams
57
58
  from .task_detail_v_1 import TaskDetailV1Params
58
59
  from .task_file_details import TaskFileDetailsParams
59
60
  from .text_to_speech_response import TextToSpeechResponseParams
@@ -113,6 +114,7 @@ __all__ = [
113
114
  "SpeechToTextTranslateStreamingResponseParams",
114
115
  "SpeechToTextTranslateTranscriptionDataParams",
115
116
  "StopConfigurationParams",
117
+ "SttFlushSignalParams",
116
118
  "TaskDetailV1Params",
117
119
  "TaskFileDetailsParams",
118
120
  "TextToSpeechResponseParams",
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import typing_extensions
6
+
7
+
8
+ class SttFlushSignalParams(typing_extensions.TypedDict):
9
+ """
10
+ Signal to flush the audio buffer and force finalize partial transcriptions/translations
11
+ """
12
+
13
+ type: typing.Literal["flush"]
14
+ """
15
+ Type identifier for flush signal
16
+ """
17
+
18
+ reason: typing_extensions.NotRequired[str]
19
+ """
20
+ Optional reason for flushing (e.g., "end_of_segment", "manual_flush")
21
+ """
@@ -3,6 +3,7 @@
3
3
  # isort: skip_file
4
4
 
5
5
  from .types import (
6
+ SpeechToTextStreamingFlushSignal,
6
7
  SpeechToTextStreamingHighVadSensitivity,
7
8
  SpeechToTextStreamingLanguageCode,
8
9
  SpeechToTextStreamingModel,
@@ -10,6 +11,7 @@ from .types import (
10
11
  )
11
12
 
12
13
  __all__ = [
14
+ "SpeechToTextStreamingFlushSignal",
13
15
  "SpeechToTextStreamingHighVadSensitivity",
14
16
  "SpeechToTextStreamingLanguageCode",
15
17
  "SpeechToTextStreamingModel",
@@ -11,6 +11,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .raw_client import AsyncRawSpeechToTextStreamingClient, RawSpeechToTextStreamingClient
13
13
  from .socket_client import AsyncSpeechToTextStreamingSocketClient, SpeechToTextStreamingSocketClient
14
+ from .types.speech_to_text_streaming_flush_signal import SpeechToTextStreamingFlushSignal
14
15
  from .types.speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
15
16
  from .types.speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
16
17
  from .types.speech_to_text_streaming_model import SpeechToTextStreamingModel
@@ -45,6 +46,7 @@ class SpeechToTextStreamingClient:
45
46
  model: typing.Optional[SpeechToTextStreamingModel] = None,
46
47
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
47
48
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
49
+ flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
48
50
  api_subscription_key: typing.Optional[str] = None,
49
51
  request_options: typing.Optional[RequestOptions] = None,
50
52
  ) -> typing.Iterator[SpeechToTextStreamingSocketClient]:
@@ -65,6 +67,9 @@ class SpeechToTextStreamingClient:
65
67
  vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
66
68
  Enable VAD signals in response
67
69
 
70
+ flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
71
+ Signal to flush the audio buffer and finalize transcription
72
+
68
73
  api_subscription_key : typing.Optional[str]
69
74
  API subscription key for authentication
70
75
 
@@ -85,6 +90,8 @@ class SpeechToTextStreamingClient:
85
90
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
86
91
  if vad_signals is not None:
87
92
  query_params = query_params.add("vad_signals", vad_signals)
93
+ if flush_signal is not None:
94
+ query_params = query_params.add("flush_signal", flush_signal)
88
95
  ws_url = ws_url + f"?{query_params}"
89
96
  headers = self._raw_client._client_wrapper.get_headers()
90
97
  if api_subscription_key is not None:
@@ -132,6 +139,7 @@ class AsyncSpeechToTextStreamingClient:
132
139
  model: typing.Optional[SpeechToTextStreamingModel] = None,
133
140
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
134
141
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
142
+ flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
135
143
  api_subscription_key: typing.Optional[str] = None,
136
144
  request_options: typing.Optional[RequestOptions] = None,
137
145
  ) -> typing.AsyncIterator[AsyncSpeechToTextStreamingSocketClient]:
@@ -152,6 +160,9 @@ class AsyncSpeechToTextStreamingClient:
152
160
  vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
153
161
  Enable VAD signals in response
154
162
 
163
+ flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
164
+ Signal to flush the audio buffer and finalize transcription
165
+
155
166
  api_subscription_key : typing.Optional[str]
156
167
  API subscription key for authentication
157
168
 
@@ -172,6 +183,8 @@ class AsyncSpeechToTextStreamingClient:
172
183
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
173
184
  if vad_signals is not None:
174
185
  query_params = query_params.add("vad_signals", vad_signals)
186
+ if flush_signal is not None:
187
+ query_params = query_params.add("flush_signal", flush_signal)
175
188
  ws_url = ws_url + f"?{query_params}"
176
189
  headers = self._raw_client._client_wrapper.get_headers()
177
190
  if api_subscription_key is not None:
@@ -10,6 +10,7 @@ from ..core.api_error import ApiError
10
10
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .socket_client import AsyncSpeechToTextStreamingSocketClient, SpeechToTextStreamingSocketClient
13
+ from .types.speech_to_text_streaming_flush_signal import SpeechToTextStreamingFlushSignal
13
14
  from .types.speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
14
15
  from .types.speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
15
16
  from .types.speech_to_text_streaming_model import SpeechToTextStreamingModel
@@ -33,6 +34,7 @@ class RawSpeechToTextStreamingClient:
33
34
  model: typing.Optional[SpeechToTextStreamingModel] = None,
34
35
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
35
36
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
37
+ flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
36
38
  api_subscription_key: typing.Optional[str] = None,
37
39
  request_options: typing.Optional[RequestOptions] = None,
38
40
  ) -> typing.Iterator[SpeechToTextStreamingSocketClient]:
@@ -53,6 +55,9 @@ class RawSpeechToTextStreamingClient:
53
55
  vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
54
56
  Enable VAD signals in response
55
57
 
58
+ flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
59
+ Signal to flush the audio buffer and finalize transcription
60
+
56
61
  api_subscription_key : typing.Optional[str]
57
62
  API subscription key for authentication
58
63
 
@@ -73,6 +78,8 @@ class RawSpeechToTextStreamingClient:
73
78
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
74
79
  if vad_signals is not None:
75
80
  query_params = query_params.add("vad_signals", vad_signals)
81
+ if flush_signal is not None:
82
+ query_params = query_params.add("flush_signal", flush_signal)
76
83
  ws_url = ws_url + f"?{query_params}"
77
84
  headers = self._client_wrapper.get_headers()
78
85
  if api_subscription_key is not None:
@@ -109,6 +116,7 @@ class AsyncRawSpeechToTextStreamingClient:
109
116
  model: typing.Optional[SpeechToTextStreamingModel] = None,
110
117
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
111
118
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
119
+ flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
112
120
  api_subscription_key: typing.Optional[str] = None,
113
121
  request_options: typing.Optional[RequestOptions] = None,
114
122
  ) -> typing.AsyncIterator[AsyncSpeechToTextStreamingSocketClient]:
@@ -129,6 +137,9 @@ class AsyncRawSpeechToTextStreamingClient:
129
137
  vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
130
138
  Enable VAD signals in response
131
139
 
140
+ flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
141
+ Signal to flush the audio buffer and finalize transcription
142
+
132
143
  api_subscription_key : typing.Optional[str]
133
144
  API subscription key for authentication
134
145
 
@@ -149,6 +160,8 @@ class AsyncRawSpeechToTextStreamingClient:
149
160
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
150
161
  if vad_signals is not None:
151
162
  query_params = query_params.add("vad_signals", vad_signals)
163
+ if flush_signal is not None:
164
+ query_params = query_params.add("flush_signal", flush_signal)
152
165
  ws_url = ws_url + f"?{query_params}"
153
166
  headers = self._client_wrapper.get_headers()
154
167
  if api_subscription_key is not None:
@@ -9,7 +9,10 @@ from ..core.events import EventEmitterMixin, EventType
9
9
  from ..core.pydantic_utilities import parse_obj_as
10
10
  from ..types.audio_data import AudioData
11
11
  from ..types.audio_message import AudioMessage
12
- from ..types.speech_to_text_streaming_response import SpeechToTextStreamingResponse
12
+ from ..types.speech_to_text_streaming_response import (
13
+ SpeechToTextStreamingResponse,
14
+ )
15
+ from ..types.stt_flush_signal import SttFlushSignal
13
16
 
14
17
  SpeechToTextStreamingSocketClientResponse = typing.Union[SpeechToTextStreamingResponse]
15
18
 
@@ -22,7 +25,9 @@ class AsyncSpeechToTextStreamingSocketClient(EventEmitterMixin):
22
25
  async def __aiter__(self):
23
26
  async for message in self._websocket:
24
27
  message = json.loads(message) if isinstance(message, str) else message
25
- yield parse_obj_as(SpeechToTextStreamingSocketClientResponse, message) # type: ignore
28
+ yield parse_obj_as(
29
+ SpeechToTextStreamingSocketClientResponse, message
30
+ ) # type: ignore
26
31
 
27
32
  async def start_listening(self):
28
33
  """
@@ -37,8 +42,14 @@ class AsyncSpeechToTextStreamingSocketClient(EventEmitterMixin):
37
42
  self._emit(EventType.OPEN, None)
38
43
  try:
39
44
  async for raw_message in self._websocket:
40
- raw_message = json.loads(raw_message) if isinstance(raw_message, str) else raw_message
41
- parsed = parse_obj_as(SpeechToTextStreamingSocketClientResponse, raw_message) # type: ignore
45
+ raw_message = (
46
+ json.loads(raw_message)
47
+ if isinstance(raw_message, str)
48
+ else raw_message
49
+ )
50
+ parsed = parse_obj_as(
51
+ SpeechToTextStreamingSocketClientResponse, raw_message
52
+ ) # type: ignore
42
53
  self._emit(EventType.MESSAGE, parsed)
43
54
  except websockets.WebSocketException as exc:
44
55
  self._emit(EventType.ERROR, exc)
@@ -54,10 +65,26 @@ class AsyncSpeechToTextStreamingSocketClient(EventEmitterMixin):
54
65
  """
55
66
 
56
67
  return await self._send_speech_to_text_streaming_audio_message(
57
- message=AudioMessage(audio=AudioData(data=audio, sample_rate=sample_rate, encoding=encoding))
68
+ message=AudioMessage(
69
+ audio=AudioData(data=audio, sample_rate=sample_rate, encoding=encoding)
70
+ )
58
71
  )
59
72
 
60
- async def _send_speech_to_text_streaming_audio_message(self, message: AudioMessage) -> None:
73
+ async def flush(self, reason: typing.Optional[str] = None) -> None:
74
+ """
75
+ Signal to flush the audio buffer and force finalize partial transcriptions.
76
+ Use this to force processing of any remaining audio that hasn't been
77
+ transcribed yet.
78
+
79
+ :param reason: Optional reason for flushing (e.g., "end_of_segment",
80
+ "manual_flush")
81
+ """
82
+ message = SttFlushSignal(reason=reason)
83
+ await self._send_model(message)
84
+
85
+ async def _send_speech_to_text_streaming_audio_message(
86
+ self, message: AudioMessage
87
+ ) -> None:
61
88
  """
62
89
  Send a message to the websocket connection.
63
90
  The message will be sent as a AudioMessage.
@@ -70,7 +97,9 @@ class AsyncSpeechToTextStreamingSocketClient(EventEmitterMixin):
70
97
  """
71
98
  data = await self._websocket.recv()
72
99
  data = json.loads(data) if isinstance(data, str) else data
73
- return parse_obj_as(SpeechToTextStreamingSocketClientResponse, data) # type: ignore
100
+ return parse_obj_as(
101
+ SpeechToTextStreamingSocketClientResponse, data
102
+ ) # type: ignore
74
103
 
75
104
  async def _send(self, data: typing.Any) -> None:
76
105
  """
@@ -95,7 +124,9 @@ class SpeechToTextStreamingSocketClient(EventEmitterMixin):
95
124
  def __iter__(self):
96
125
  for message in self._websocket:
97
126
  message = json.loads(message) if isinstance(message, str) else message
98
- yield parse_obj_as(SpeechToTextStreamingSocketClientResponse, message) # type: ignore
127
+ yield parse_obj_as(
128
+ SpeechToTextStreamingSocketClientResponse, message
129
+ ) # type: ignore
99
130
 
100
131
  def start_listening(self):
101
132
  """
@@ -110,8 +141,14 @@ class SpeechToTextStreamingSocketClient(EventEmitterMixin):
110
141
  self._emit(EventType.OPEN, None)
111
142
  try:
112
143
  for raw_message in self._websocket:
113
- raw_message = json.loads(raw_message) if isinstance(raw_message, str) else raw_message
114
- parsed = parse_obj_as(SpeechToTextStreamingSocketClientResponse, raw_message) # type: ignore
144
+ raw_message = (
145
+ json.loads(raw_message)
146
+ if isinstance(raw_message, str)
147
+ else raw_message
148
+ )
149
+ parsed = parse_obj_as(
150
+ SpeechToTextStreamingSocketClientResponse, raw_message
151
+ ) # type: ignore
115
152
  self._emit(EventType.MESSAGE, parsed)
116
153
  except websockets.WebSocketException as exc:
117
154
  self._emit(EventType.ERROR, exc)
@@ -126,18 +163,36 @@ class SpeechToTextStreamingSocketClient(EventEmitterMixin):
126
163
  :param sample_rate (Optional): Audio sample rate in Hz (default is 16000)
127
164
  """
128
165
  return self._send_speech_to_text_streaming_audio_message(
129
- message=AudioMessage(audio=AudioData(data=audio, sample_rate=sample_rate, encoding=encoding))
166
+ message=AudioMessage(
167
+ audio=AudioData(data=audio, sample_rate=sample_rate, encoding=encoding)
168
+ )
130
169
  )
131
170
 
171
+ def flush(self, reason: typing.Optional[str] = None) -> None:
172
+ """
173
+ Signal to flush the audio buffer and force finalize partial transcriptions.
174
+ Use this to force processing of any remaining audio that hasn't been
175
+ transcribed yet.
176
+
177
+ :param reason: Optional reason for flushing (e.g., "end_of_segment",
178
+ "manual_flush")
179
+ """
180
+ message = SttFlushSignal(reason=reason)
181
+ self._send_model(message)
182
+
132
183
  def recv(self) -> SpeechToTextStreamingSocketClientResponse:
133
184
  """
134
185
  Receive a message from the websocket connection.
135
186
  """
136
187
  data = self._websocket.recv()
137
188
  data = json.loads(data) if isinstance(data, str) else data
138
- return parse_obj_as(SpeechToTextStreamingSocketClientResponse, data) # type: ignore
189
+ return parse_obj_as(
190
+ SpeechToTextStreamingSocketClientResponse, data
191
+ ) # type: ignore
139
192
 
140
- def _send_speech_to_text_streaming_audio_message(self, message: AudioMessage) -> None:
193
+ def _send_speech_to_text_streaming_audio_message(
194
+ self, message: AudioMessage
195
+ ) -> None:
141
196
  """
142
197
  Send a message to the websocket connection.
143
198
  The message will be sent as a AudioMessage.
@@ -2,12 +2,14 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
+ from .speech_to_text_streaming_flush_signal import SpeechToTextStreamingFlushSignal
5
6
  from .speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
6
7
  from .speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
7
8
  from .speech_to_text_streaming_model import SpeechToTextStreamingModel
8
9
  from .speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
9
10
 
10
11
  __all__ = [
12
+ "SpeechToTextStreamingFlushSignal",
11
13
  "SpeechToTextStreamingHighVadSensitivity",
12
14
  "SpeechToTextStreamingLanguageCode",
13
15
  "SpeechToTextStreamingModel",
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextStreamingFlushSignal = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -3,12 +3,14 @@
3
3
  # isort: skip_file
4
4
 
5
5
  from .types import (
6
+ SpeechToTextTranslateStreamingFlushSignal,
6
7
  SpeechToTextTranslateStreamingHighVadSensitivity,
7
8
  SpeechToTextTranslateStreamingModel,
8
9
  SpeechToTextTranslateStreamingVadSignals,
9
10
  )
10
11
 
11
12
  __all__ = [
13
+ "SpeechToTextTranslateStreamingFlushSignal",
12
14
  "SpeechToTextTranslateStreamingHighVadSensitivity",
13
15
  "SpeechToTextTranslateStreamingModel",
14
16
  "SpeechToTextTranslateStreamingVadSignals",
@@ -11,6 +11,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .raw_client import AsyncRawSpeechToTextTranslateStreamingClient, RawSpeechToTextTranslateStreamingClient
13
13
  from .socket_client import AsyncSpeechToTextTranslateStreamingSocketClient, SpeechToTextTranslateStreamingSocketClient
14
+ from .types.speech_to_text_translate_streaming_flush_signal import SpeechToTextTranslateStreamingFlushSignal
14
15
  from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
15
16
  SpeechToTextTranslateStreamingHighVadSensitivity,
16
17
  )
@@ -45,6 +46,7 @@ class SpeechToTextTranslateStreamingClient:
45
46
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
46
47
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
47
48
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
49
+ flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
48
50
  api_subscription_key: typing.Optional[str] = None,
49
51
  request_options: typing.Optional[RequestOptions] = None,
50
52
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -62,6 +64,9 @@ class SpeechToTextTranslateStreamingClient:
62
64
  vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
63
65
  Enable VAD signals in response
64
66
 
67
+ flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
68
+ Signal to flush the audio buffer and finalize transcription and translation
69
+
65
70
  api_subscription_key : typing.Optional[str]
66
71
  API subscription key for authentication
67
72
 
@@ -80,6 +85,8 @@ class SpeechToTextTranslateStreamingClient:
80
85
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
81
86
  if vad_signals is not None:
82
87
  query_params = query_params.add("vad_signals", vad_signals)
88
+ if flush_signal is not None:
89
+ query_params = query_params.add("flush_signal", flush_signal)
83
90
  ws_url = ws_url + f"?{query_params}"
84
91
  headers = self._raw_client._client_wrapper.get_headers()
85
92
  if api_subscription_key is not None:
@@ -126,6 +133,7 @@ class AsyncSpeechToTextTranslateStreamingClient:
126
133
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
127
134
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
128
135
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
136
+ flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
129
137
  api_subscription_key: typing.Optional[str] = None,
130
138
  request_options: typing.Optional[RequestOptions] = None,
131
139
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -143,6 +151,9 @@ class AsyncSpeechToTextTranslateStreamingClient:
143
151
  vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
144
152
  Enable VAD signals in response
145
153
 
154
+ flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
155
+ Signal to flush the audio buffer and finalize transcription and translation
156
+
146
157
  api_subscription_key : typing.Optional[str]
147
158
  API subscription key for authentication
148
159
 
@@ -161,6 +172,8 @@ class AsyncSpeechToTextTranslateStreamingClient:
161
172
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
162
173
  if vad_signals is not None:
163
174
  query_params = query_params.add("vad_signals", vad_signals)
175
+ if flush_signal is not None:
176
+ query_params = query_params.add("flush_signal", flush_signal)
164
177
  ws_url = ws_url + f"?{query_params}"
165
178
  headers = self._raw_client._client_wrapper.get_headers()
166
179
  if api_subscription_key is not None:
@@ -10,6 +10,7 @@ from ..core.api_error import ApiError
10
10
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .socket_client import AsyncSpeechToTextTranslateStreamingSocketClient, SpeechToTextTranslateStreamingSocketClient
13
+ from .types.speech_to_text_translate_streaming_flush_signal import SpeechToTextTranslateStreamingFlushSignal
13
14
  from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
14
15
  SpeechToTextTranslateStreamingHighVadSensitivity,
15
16
  )
@@ -33,6 +34,7 @@ class RawSpeechToTextTranslateStreamingClient:
33
34
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
34
35
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
35
36
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
37
+ flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
36
38
  api_subscription_key: typing.Optional[str] = None,
37
39
  request_options: typing.Optional[RequestOptions] = None,
38
40
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -50,6 +52,9 @@ class RawSpeechToTextTranslateStreamingClient:
50
52
  vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
51
53
  Enable VAD signals in response
52
54
 
55
+ flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
56
+ Signal to flush the audio buffer and finalize transcription and translation
57
+
53
58
  api_subscription_key : typing.Optional[str]
54
59
  API subscription key for authentication
55
60
 
@@ -68,6 +73,8 @@ class RawSpeechToTextTranslateStreamingClient:
68
73
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
69
74
  if vad_signals is not None:
70
75
  query_params = query_params.add("vad_signals", vad_signals)
76
+ if flush_signal is not None:
77
+ query_params = query_params.add("flush_signal", flush_signal)
71
78
  ws_url = ws_url + f"?{query_params}"
72
79
  headers = self._client_wrapper.get_headers()
73
80
  if api_subscription_key is not None:
@@ -103,6 +110,7 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
103
110
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
104
111
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
105
112
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
113
+ flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
106
114
  api_subscription_key: typing.Optional[str] = None,
107
115
  request_options: typing.Optional[RequestOptions] = None,
108
116
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -120,6 +128,9 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
120
128
  vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
121
129
  Enable VAD signals in response
122
130
 
131
+ flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
132
+ Signal to flush the audio buffer and finalize transcription and translation
133
+
123
134
  api_subscription_key : typing.Optional[str]
124
135
  API subscription key for authentication
125
136
 
@@ -138,6 +149,8 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
138
149
  query_params = query_params.add("high_vad_sensitivity", high_vad_sensitivity)
139
150
  if vad_signals is not None:
140
151
  query_params = query_params.add("vad_signals", vad_signals)
152
+ if flush_signal is not None:
153
+ query_params = query_params.add("flush_signal", flush_signal)
141
154
  ws_url = ws_url + f"?{query_params}"
142
155
  headers = self._client_wrapper.get_headers()
143
156
  if api_subscription_key is not None:
@@ -2,11 +2,13 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
+ from .speech_to_text_translate_streaming_flush_signal import SpeechToTextTranslateStreamingFlushSignal
5
6
  from .speech_to_text_translate_streaming_high_vad_sensitivity import SpeechToTextTranslateStreamingHighVadSensitivity
6
7
  from .speech_to_text_translate_streaming_model import SpeechToTextTranslateStreamingModel
7
8
  from .speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
8
9
 
9
10
  __all__ = [
11
+ "SpeechToTextTranslateStreamingFlushSignal",
10
12
  "SpeechToTextTranslateStreamingHighVadSensitivity",
11
13
  "SpeechToTextTranslateStreamingModel",
12
14
  "SpeechToTextTranslateStreamingVadSignals",
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextTranslateStreamingFlushSignal = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -75,6 +75,7 @@ from .speech_to_text_translate_transcription_data import SpeechToTextTranslateTr
75
75
  from .spoken_form_numerals_format import SpokenFormNumeralsFormat
76
76
  from .stop_configuration import StopConfiguration
77
77
  from .storage_container_type import StorageContainerType
78
+ from .stt_flush_signal import SttFlushSignal
78
79
  from .task_detail_v_1 import TaskDetailV1
79
80
  from .task_file_details import TaskFileDetails
80
81
  from .task_state import TaskState
@@ -168,6 +169,7 @@ __all__ = [
168
169
  "SpokenFormNumeralsFormat",
169
170
  "StopConfiguration",
170
171
  "StorageContainerType",
172
+ "SttFlushSignal",
171
173
  "TaskDetailV1",
172
174
  "TaskFileDetails",
173
175
  "TaskState",
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+
8
+
9
+ class SttFlushSignal(UniversalBaseModel):
10
+ """
11
+ Signal to flush the audio buffer and force finalize partial transcriptions/translations
12
+ """
13
+
14
+ type: typing.Literal["flush"] = pydantic.Field(default="flush")
15
+ """
16
+ Type identifier for flush signal
17
+ """
18
+
19
+ reason: typing.Optional[str] = pydantic.Field(default=None)
20
+ """
21
+ Optional reason for flushing (e.g., "end_of_segment", "manual_flush")
22
+ """
23
+
24
+ if IS_PYDANTIC_V2:
25
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
26
+ else:
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ extra = pydantic.Extra.allow
File without changes