gcore 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gcore might be problematic. Click here for more details.

Files changed (181) hide show
  1. gcore/_client.py +9 -0
  2. gcore/_models.py +24 -3
  3. gcore/_version.py +1 -1
  4. gcore/pagination.py +101 -1
  5. gcore/resources/__init__.py +14 -0
  6. gcore/resources/cloud/__init__.py +28 -0
  7. gcore/resources/cloud/cloud.py +64 -0
  8. gcore/resources/cloud/cost_reports.py +811 -0
  9. gcore/resources/cloud/file_shares/file_shares.py +10 -0
  10. gcore/resources/cloud/gpu_baremetal_clusters/gpu_baremetal_clusters.py +61 -26
  11. gcore/resources/cloud/inference/registry_credentials.py +4 -6
  12. gcore/resources/cloud/usage_reports.py +337 -0
  13. gcore/resources/streaming/__init__.py +159 -0
  14. gcore/resources/streaming/ai_tasks.py +1288 -0
  15. gcore/resources/streaming/broadcasts.py +579 -0
  16. gcore/resources/streaming/directories.py +515 -0
  17. gcore/resources/streaming/players.py +577 -0
  18. gcore/resources/streaming/playlists.py +1059 -0
  19. gcore/resources/streaming/quality_sets.py +331 -0
  20. gcore/resources/streaming/restreams.py +484 -0
  21. gcore/resources/streaming/statistics.py +3224 -0
  22. gcore/resources/streaming/streaming.py +390 -0
  23. gcore/resources/streaming/streams/__init__.py +33 -0
  24. gcore/resources/streaming/streams/overlays.py +716 -0
  25. gcore/resources/streaming/streams/streams.py +1592 -0
  26. gcore/resources/streaming/videos/__init__.py +33 -0
  27. gcore/resources/streaming/videos/subtitles.py +623 -0
  28. gcore/resources/streaming/videos/videos.py +1553 -0
  29. gcore/resources/waap/ip_info.py +28 -10
  30. gcore/types/cloud/__init__.py +10 -0
  31. gcore/types/cloud/cost_report_aggregated.py +865 -0
  32. gcore/types/cloud/cost_report_aggregated_monthly.py +865 -0
  33. gcore/types/cloud/cost_report_detailed.py +1343 -0
  34. gcore/types/cloud/cost_report_get_aggregated_monthly_params.py +394 -0
  35. gcore/types/cloud/cost_report_get_aggregated_params.py +409 -0
  36. gcore/types/cloud/cost_report_get_detailed_params.py +435 -0
  37. gcore/types/cloud/file_share.py +37 -3
  38. gcore/types/cloud/file_share_create_params.py +16 -0
  39. gcore/types/cloud/gpu_baremetal_cluster.py +2 -2
  40. gcore/types/cloud/gpu_baremetal_cluster_create_params.py +9 -0
  41. gcore/types/cloud/quota_get_all_response.py +24 -2
  42. gcore/types/cloud/quota_get_by_region_response.py +24 -2
  43. gcore/types/cloud/quotas/request_create_params.py +12 -1
  44. gcore/types/cloud/quotas/request_get_response.py +12 -1
  45. gcore/types/cloud/quotas/request_list_response.py +12 -1
  46. gcore/types/cloud/usage_report.py +1612 -0
  47. gcore/types/cloud/usage_report_get_params.py +432 -0
  48. gcore/types/streaming/__init__.py +143 -0
  49. gcore/types/streaming/ai_contentmoderation_casm.py +39 -0
  50. gcore/types/streaming/ai_contentmoderation_hardnudity.py +54 -0
  51. gcore/types/streaming/ai_contentmoderation_nsfw.py +39 -0
  52. gcore/types/streaming/ai_contentmoderation_softnudity.py +66 -0
  53. gcore/types/streaming/ai_contentmoderation_sport.py +39 -0
  54. gcore/types/streaming/ai_contentmoderation_weapon.py +39 -0
  55. gcore/types/streaming/ai_task.py +205 -0
  56. gcore/types/streaming/ai_task_cancel_response.py +12 -0
  57. gcore/types/streaming/ai_task_create_params.py +168 -0
  58. gcore/types/streaming/ai_task_create_response.py +10 -0
  59. gcore/types/streaming/ai_task_get_ai_settings_params.py +27 -0
  60. gcore/types/streaming/ai_task_get_ai_settings_response.py +12 -0
  61. gcore/types/streaming/ai_task_get_response.py +313 -0
  62. gcore/types/streaming/ai_task_list_params.py +50 -0
  63. gcore/types/streaming/broadcast.py +71 -0
  64. gcore/types/streaming/broadcast_create_params.py +76 -0
  65. gcore/types/streaming/broadcast_list_params.py +12 -0
  66. gcore/types/streaming/broadcast_spectators_count.py +12 -0
  67. gcore/types/streaming/broadcast_update_params.py +76 -0
  68. gcore/types/streaming/clip.py +78 -0
  69. gcore/types/streaming/create_video_param.py +214 -0
  70. gcore/types/streaming/direct_upload_parameters.py +33 -0
  71. gcore/types/streaming/directories_tree.py +19 -0
  72. gcore/types/streaming/directory_base.py +31 -0
  73. gcore/types/streaming/directory_create_params.py +15 -0
  74. gcore/types/streaming/directory_get_response.py +19 -0
  75. gcore/types/streaming/directory_item.py +13 -0
  76. gcore/types/streaming/directory_update_params.py +18 -0
  77. gcore/types/streaming/directory_video.py +13 -0
  78. gcore/types/streaming/ffprobes.py +25 -0
  79. gcore/types/streaming/max_stream_series.py +21 -0
  80. gcore/types/streaming/meet_series.py +23 -0
  81. gcore/types/streaming/player.py +114 -0
  82. gcore/types/streaming/player_create_params.py +18 -0
  83. gcore/types/streaming/player_list_params.py +12 -0
  84. gcore/types/streaming/player_param.py +114 -0
  85. gcore/types/streaming/player_update_params.py +18 -0
  86. gcore/types/streaming/playlist.py +102 -0
  87. gcore/types/streaming/playlist_create.py +12 -0
  88. gcore/types/streaming/playlist_create_params.py +102 -0
  89. gcore/types/streaming/playlist_list_params.py +12 -0
  90. gcore/types/streaming/playlist_list_videos_response.py +10 -0
  91. gcore/types/streaming/playlist_update_params.py +102 -0
  92. gcore/types/streaming/playlist_video.py +215 -0
  93. gcore/types/streaming/popular_videos.py +17 -0
  94. gcore/types/streaming/quality_set_set_default_params.py +23 -0
  95. gcore/types/streaming/quality_sets.py +57 -0
  96. gcore/types/streaming/restream.py +37 -0
  97. gcore/types/streaming/restream_create_params.py +41 -0
  98. gcore/types/streaming/restream_list_params.py +12 -0
  99. gcore/types/streaming/restream_update_params.py +41 -0
  100. gcore/types/streaming/statistic_get_ffprobes_params.py +22 -0
  101. gcore/types/streaming/statistic_get_live_unique_viewers_params.py +26 -0
  102. gcore/types/streaming/statistic_get_live_unique_viewers_response.py +25 -0
  103. gcore/types/streaming/statistic_get_live_watch_time_cdn_params.py +32 -0
  104. gcore/types/streaming/statistic_get_live_watch_time_total_cdn_params.py +30 -0
  105. gcore/types/streaming/statistic_get_max_streams_series_params.py +20 -0
  106. gcore/types/streaming/statistic_get_meet_series_params.py +20 -0
  107. gcore/types/streaming/statistic_get_popular_videos_params.py +15 -0
  108. gcore/types/streaming/statistic_get_storage_series_params.py +20 -0
  109. gcore/types/streaming/statistic_get_stream_series_params.py +20 -0
  110. gcore/types/streaming/statistic_get_unique_viewers_cdn_params.py +27 -0
  111. gcore/types/streaming/statistic_get_unique_viewers_params.py +34 -0
  112. gcore/types/streaming/statistic_get_views_by_browsers_params.py +15 -0
  113. gcore/types/streaming/statistic_get_views_by_country_params.py +15 -0
  114. gcore/types/streaming/statistic_get_views_by_hostname_params.py +15 -0
  115. gcore/types/streaming/statistic_get_views_by_operating_system_params.py +15 -0
  116. gcore/types/streaming/statistic_get_views_by_referer_params.py +15 -0
  117. gcore/types/streaming/statistic_get_views_by_region_params.py +15 -0
  118. gcore/types/streaming/statistic_get_views_heatmap_params.py +21 -0
  119. gcore/types/streaming/statistic_get_views_params.py +34 -0
  120. gcore/types/streaming/statistic_get_vod_storage_volume_params.py +17 -0
  121. gcore/types/streaming/statistic_get_vod_transcoding_duration_params.py +17 -0
  122. gcore/types/streaming/statistic_get_vod_unique_viewers_cdn_params.py +26 -0
  123. gcore/types/streaming/statistic_get_vod_watch_time_cdn_params.py +32 -0
  124. gcore/types/streaming/statistic_get_vod_watch_time_total_cdn_params.py +30 -0
  125. gcore/types/streaming/statistic_get_vod_watch_time_total_cdn_response.py +22 -0
  126. gcore/types/streaming/storage_series.py +23 -0
  127. gcore/types/streaming/stream.py +420 -0
  128. gcore/types/streaming/stream_create_clip_params.py +48 -0
  129. gcore/types/streaming/stream_create_params.py +165 -0
  130. gcore/types/streaming/stream_list_clips_response.py +10 -0
  131. gcore/types/streaming/stream_list_params.py +18 -0
  132. gcore/types/streaming/stream_series.py +21 -0
  133. gcore/types/streaming/stream_start_recording_response.py +76 -0
  134. gcore/types/streaming/stream_update_params.py +169 -0
  135. gcore/types/streaming/streams/__init__.py +11 -0
  136. gcore/types/streaming/streams/overlay.py +43 -0
  137. gcore/types/streaming/streams/overlay_create_params.py +36 -0
  138. gcore/types/streaming/streams/overlay_create_response.py +10 -0
  139. gcore/types/streaming/streams/overlay_list_response.py +10 -0
  140. gcore/types/streaming/streams/overlay_update_multiple_params.py +39 -0
  141. gcore/types/streaming/streams/overlay_update_multiple_response.py +10 -0
  142. gcore/types/streaming/streams/overlay_update_params.py +33 -0
  143. gcore/types/streaming/subtitle.py +12 -0
  144. gcore/types/streaming/subtitle_base.py +18 -0
  145. gcore/types/streaming/subtitle_base_param.py +18 -0
  146. gcore/types/streaming/unique_viewers.py +35 -0
  147. gcore/types/streaming/unique_viewers_cdn.py +17 -0
  148. gcore/types/streaming/video.py +444 -0
  149. gcore/types/streaming/video_create_multiple_params.py +28 -0
  150. gcore/types/streaming/video_create_multiple_response.py +10 -0
  151. gcore/types/streaming/video_create_params.py +13 -0
  152. gcore/types/streaming/video_create_response.py +10 -0
  153. gcore/types/streaming/video_list_names_params.py +13 -0
  154. gcore/types/streaming/video_list_params.py +59 -0
  155. gcore/types/streaming/video_update_params.py +214 -0
  156. gcore/types/streaming/videos/__init__.py +7 -0
  157. gcore/types/streaming/videos/subtitle_create_params.py +17 -0
  158. gcore/types/streaming/videos/subtitle_list_response.py +10 -0
  159. gcore/types/streaming/videos/subtitle_update_params.py +20 -0
  160. gcore/types/streaming/views.py +35 -0
  161. gcore/types/streaming/views_by_browser.py +17 -0
  162. gcore/types/streaming/views_by_country.py +19 -0
  163. gcore/types/streaming/views_by_hostname.py +17 -0
  164. gcore/types/streaming/views_by_operating_system.py +17 -0
  165. gcore/types/streaming/views_by_referer.py +17 -0
  166. gcore/types/streaming/views_by_region.py +19 -0
  167. gcore/types/streaming/views_heatmap.py +19 -0
  168. gcore/types/streaming/vod_statistics_series.py +21 -0
  169. gcore/types/streaming/vod_total_stream_duration_series.py +22 -0
  170. gcore/types/waap/domains/custom_rule_create_params.py +4 -4
  171. gcore/types/waap/domains/custom_rule_update_params.py +4 -4
  172. gcore/types/waap/ip_info_get_blocked_requests_params.py +5 -1
  173. gcore/types/waap/ip_info_get_counts_params.py +2 -1
  174. gcore/types/waap/ip_info_get_top_sessions_params.py +5 -1
  175. gcore/types/waap/ip_info_get_top_urls_params.py +5 -1
  176. gcore/types/waap/ip_info_get_top_user_agents_params.py +5 -1
  177. gcore/types/waap/waap_custom_rule.py +4 -4
  178. {gcore-0.6.0.dist-info → gcore-0.7.0.dist-info}/METADATA +1 -1
  179. {gcore-0.6.0.dist-info → gcore-0.7.0.dist-info}/RECORD +181 -33
  180. {gcore-0.6.0.dist-info → gcore-0.7.0.dist-info}/WHEEL +0 -0
  181. {gcore-0.6.0.dist-info → gcore-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1288 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal
6
+
7
+ import httpx
8
+
9
+ from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
10
+ from ..._utils import maybe_transform, async_maybe_transform
11
+ from ..._compat import cached_property
12
+ from ..._resource import SyncAPIResource, AsyncAPIResource
13
+ from ..._response import (
14
+ to_raw_response_wrapper,
15
+ to_streamed_response_wrapper,
16
+ async_to_raw_response_wrapper,
17
+ async_to_streamed_response_wrapper,
18
+ )
19
+ from ...pagination import SyncPageStreamingAI, AsyncPageStreamingAI
20
+ from ..._base_client import AsyncPaginator, make_request_options
21
+ from ...types.streaming import ai_task_list_params, ai_task_create_params, ai_task_get_ai_settings_params
22
+ from ...types.streaming.ai_task import AITask
23
+ from ...types.streaming.ai_task_get_response import AITaskGetResponse
24
+ from ...types.streaming.ai_task_cancel_response import AITaskCancelResponse
25
+ from ...types.streaming.ai_task_create_response import AITaskCreateResponse
26
+ from ...types.streaming.ai_task_get_ai_settings_response import AITaskGetAISettingsResponse
27
+
28
+ __all__ = ["AITasksResource", "AsyncAITasksResource"]
29
+
30
+
31
+ class AITasksResource(SyncAPIResource):
32
+ @cached_property
33
+ def with_raw_response(self) -> AITasksResourceWithRawResponse:
34
+ """
35
+ This property can be used as a prefix for any HTTP method call to return
36
+ the raw response object instead of the parsed content.
37
+
38
+ For more information, see https://www.github.com/G-Core/gcore-python#accessing-raw-response-data-eg-headers
39
+ """
40
+ return AITasksResourceWithRawResponse(self)
41
+
42
+ @cached_property
43
+ def with_streaming_response(self) -> AITasksResourceWithStreamingResponse:
44
+ """
45
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
46
+
47
+ For more information, see https://www.github.com/G-Core/gcore-python#with_streaming_response
48
+ """
49
+ return AITasksResourceWithStreamingResponse(self)
50
+
51
+ def create(
52
+ self,
53
+ *,
54
+ task_name: Literal["transcription", "content-moderation"],
55
+ url: str,
56
+ audio_language: str | NotGiven = NOT_GIVEN,
57
+ category: Literal["sport", "weapon", "nsfw", "hard_nudity", "soft_nudity", "child_pornography"]
58
+ | NotGiven = NOT_GIVEN,
59
+ client_entity_data: str | NotGiven = NOT_GIVEN,
60
+ client_user_id: str | NotGiven = NOT_GIVEN,
61
+ subtitles_language: str | NotGiven = NOT_GIVEN,
62
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
63
+ # The extra values given here take precedence over values defined on the client or passed to this method.
64
+ extra_headers: Headers | None = None,
65
+ extra_query: Query | None = None,
66
+ extra_body: Body | None = None,
67
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
68
+ ) -> AITaskCreateResponse:
69
+ """Creating an AI task.
70
+
71
+ This method allows you to create an AI task for VOD video
72
+ processing:
73
+
74
+ - ASR: Transcribe video
75
+ - ASR: Translate subtitles
76
+ - CM: Sports detection
77
+ - CM: Weapon detection
78
+ - CM: Not Safe For Work (NSFW) content detection
79
+ - CM: Soft nudity detection
80
+ - CM: Hard nudity detection
81
+ - CM: Child Sexual Abuse Material (CSAM) detection
82
+ - CM: Objects recognition (soon)
83
+ ![Auto generated subtitles example](https://demo-files.gvideo.io/apidocs/captions.gif)
84
+ How to use:
85
+ - Create an AI task, specify algoritm to use
86
+ - Get `task_id`
87
+ - Check a result using `` .../ai/tasks/{`task_id`} `` method For more detailed
88
+ information, see the description of each method separately.
89
+
90
+ **AI Automatic Speech Recognition (ASR)** AI is instrumental in automatic video
91
+ processing for subtitles creation by using Automatic Speech Recognition (ASR)
92
+ technology to transcribe spoken words into text, which can then be translated
93
+ into multiple languages for broader accessibility. Categories:
94
+
95
+ - `transcription` – to create subtitles/captions from audio in the original
96
+ language.
97
+ - `translation` – to transate subtitles/captions from the original language to
98
+ 99+ other languages. AI subtitle transcription and translation tools are
99
+ highly efficient, processing large volumes of audio-visual content quickly and
100
+ providing accurate transcriptions and translations with minimal human
101
+ intervention. Additionally, AI-driven solutions can significantly reduce costs
102
+ and turnaround times compared to traditional methods, making them an
103
+ invaluable resource for content creators and broadcasters aiming to reach
104
+ global audiences. Example response with positive result:
105
+
106
+ ```
107
+ {
108
+ "status": "SUCCESS",
109
+ "result": {
110
+ "subtitles": [
111
+ {
112
+ "`start_time`": "00:00:00.031",
113
+ "`end_time`": "00:00:03.831",
114
+ "text": "Come on team, ..."
115
+ }, ...
116
+ ]
117
+ "vttContent": "WEBVTT\n\n1\n00:00:00.031 --> 00:00:03.831\nCome on team, ...",
118
+ "`concatenated_text`": "Come on team, ...",
119
+ "languages": [ "eng" ],
120
+ "`speech_detected`": true
121
+ }
122
+ }, ...
123
+ }
124
+ ```
125
+
126
+ **AI Content Moderation (CM)** The AI Content Moderation API offers a powerful
127
+ solution for analyzing video content to detect various categories of
128
+ inappropriate material. Leveraging state-of-the-art AI models, this API ensures
129
+ real-time analysis and flagging of sensitive or restricted content types, making
130
+ it an essential tool for platforms requiring stringent content moderation.
131
+ Categories:
132
+
133
+ - `nsfw`: Quick algorithm to detect pornographic material, ensuring content is
134
+ "not-safe-for-work" or normal.
135
+ - `hard_nudity`: Detailed analisys of video which detects explicit nudity
136
+ involving genitalia.
137
+ - `soft_nudity`: Detailed video analysis that reveals both explicit and partial
138
+ nudity, including the presence of male and female faces and other uncovered
139
+ body parts.
140
+ - `child_pornography`: Detects child sexual abuse materials (CASM).
141
+ - `sport`: Recognizes various sporting activities.
142
+ - `weapon`: Identifies the presence of weapons in the video content. The AI
143
+ Content Moderation API is an invaluable tool for managing and controlling the
144
+ type of content being shared or streamed on your platform. By implementing
145
+ this API, you can ensure compliance with community guidelines and legal
146
+ requirements, as well as provide a safer environment for your users. Important
147
+ notes:
148
+ - It's allowed to analyse still images too (where applicable). Format of image:
149
+ JPEG, PNG. In that case one image is the same as video of 1 second duration.
150
+ - Not all frames in the video are used for analysis, but only key frames
151
+ (Iframe). For example, if a key frame in a video is set every ±2 seconds, then
152
+ detection will only occur at these timestamps. If an object appears and
153
+ disappears between these time stamps, it will not be detected. We are working
154
+ on a version to analyze more frames, please contact your manager or our
155
+ support team to enable this method. Example response with positive result:
156
+
157
+ ```
158
+ {
159
+ "status": "SUCCESS",
160
+ "result": {
161
+ "`nsfw_detected`": true,
162
+ "`detection_results`": ["nsfw"],
163
+ "frames": [{"label": "nsfw", "confidence": 1.0, "`frame_number`": 24}, ...],
164
+ },
165
+ }
166
+ ```
167
+
168
+ **Additional information** Billing takes into account the duration of the
169
+ analyzed video. Or the duration until the stop tag(where applicable), if the
170
+ condition was triggered during the analysis.
171
+
172
+ The heart of content moderation is AI, with additional services. They run on our
173
+ own infrastructure, so the files/data are not transferred anywhere to external
174
+ services. After processing, original files are also deleted from local storage
175
+ of AI.
176
+
177
+ Read more detailed information about our solution, and architecture, and
178
+ benefits in the knowledge base and blog.
179
+
180
+ Args:
181
+ task_name: Name of the task to be performed
182
+
183
+ url: URL to the MP4 file to analyse. File must be publicly accessible via HTTP/HTTPS.
184
+
185
+ audio_language: Language in original audio (transcription only). This value is used to determine
186
+ the language from which to transcribe. If this is not set, the system will run
187
+ auto language identification and the subtitles will be in the detected language.
188
+ The method also works based on AI analysis. It's fairly accurate, but if it's
189
+ wrong, then set the language explicitly. Additionally, when this is not set, we
190
+ also support recognition of alternate languages in the video (language
191
+ code-switching). Language is set by 3-letter language code according to
192
+ ISO-639-2 (bibliographic code). We can process languages:
193
+
194
+ - 'afr': Afrikaans
195
+ - 'alb': Albanian
196
+ - 'amh': Amharic
197
+ - 'ara': Arabic
198
+ - 'arm': Armenian
199
+ - 'asm': Assamese
200
+ - 'aze': Azerbaijani
201
+ - 'bak': Bashkir
202
+ - 'baq': Basque
203
+ - 'bel': Belarusian
204
+ - 'ben': Bengali
205
+ - 'bos': Bosnian
206
+ - 'bre': Breton
207
+ - 'bul': Bulgarian
208
+ - 'bur': Myanmar
209
+ - 'cat': Catalan
210
+ - 'chi': Chinese
211
+ - 'cze': Czech
212
+ - 'dan': Danish
213
+ - 'dut': Nynorsk
214
+ - 'eng': English
215
+ - 'est': Estonian
216
+ - 'fao': Faroese
217
+ - 'fin': Finnish
218
+ - 'fre': French
219
+ - 'geo': Georgian
220
+ - 'ger': German
221
+ - 'glg': Galician
222
+ - 'gre': Greek
223
+ - 'guj': Gujarati
224
+ - 'hat': Haitian creole
225
+ - 'hau': Hausa
226
+ - 'haw': Hawaiian
227
+ - 'heb': Hebrew
228
+ - 'hin': Hindi
229
+ - 'hrv': Croatian
230
+ - 'hun': Hungarian
231
+ - 'ice': Icelandic
232
+ - 'ind': Indonesian
233
+ - 'ita': Italian
234
+ - 'jav': Javanese
235
+ - 'jpn': Japanese
236
+ - 'kan': Kannada
237
+ - 'kaz': Kazakh
238
+ - 'khm': Khmer
239
+ - 'kor': Korean
240
+ - 'lao': Lao
241
+ - 'lat': Latin
242
+ - 'lav': Latvian
243
+ - 'lin': Lingala
244
+ - 'lit': Lithuanian
245
+ - 'ltz': Luxembourgish
246
+ - 'mac': Macedonian
247
+ - 'mal': Malayalam
248
+ - 'mao': Maori
249
+ - 'mar': Marathi
250
+ - 'may': Malay
251
+ - 'mlg': Malagasy
252
+ - 'mlt': Maltese
253
+ - 'mon': Mongolian
254
+ - 'nep': Nepali
255
+ - 'dut': Dutch
256
+ - 'nor': Norwegian
257
+ - 'oci': Occitan
258
+ - 'pan': Punjabi
259
+ - 'per': Persian
260
+ - 'pol': Polish
261
+ - 'por': Portuguese
262
+ - 'pus': Pashto
263
+ - 'rum': Romanian
264
+ - 'rus': Russian
265
+ - 'san': Sanskrit
266
+ - 'sin': Sinhala
267
+ - 'slo': Slovak
268
+ - 'slv': Slovenian
269
+ - 'sna': Shona
270
+ - 'snd': Sindhi
271
+ - 'som': Somali
272
+ - 'spa': Spanish
273
+ - 'srp': Serbian
274
+ - 'sun': Sundanese
275
+ - 'swa': Swahili
276
+ - 'swe': Swedish
277
+ - 'tam': Tamil
278
+ - 'tat': Tatar
279
+ - 'tel': Telugu
280
+ - 'tgk': Tajik
281
+ - 'tgl': Tagalog
282
+ - 'tha': Thai
283
+ - 'tib': Tibetan
284
+ - 'tuk': Turkmen
285
+ - 'tur': Turkish
286
+ - 'ukr': Ukrainian
287
+ - 'urd': Urdu
288
+ - 'uzb': Uzbek
289
+ - 'vie': Vietnamese
290
+ - 'wel': Welsh
291
+ - 'yid': Yiddish
292
+ - 'yor': Yoruba
293
+
294
+ category: Model for analysis (content-moderation only). Determines what exactly needs to
295
+ be found in the video.
296
+
297
+ client_entity_data: Meta parameter, designed to store your own extra information about a video
298
+ entity: video source, video id, etc. It is not used in any way in video
299
+ processing. For example, if an AI-task was created automatically when you
300
+ uploaded a video with the AI auto-processing option (nudity detection, etc),
301
+ then the ID of the associated video for which the task was performed will be
302
+ explicitly indicated here.
303
+
304
+ client_user_id: Meta parameter, designed to store your own identifier. Can be used by you to tag
305
+ requests from different end-users. It is not used in any way in video
306
+ processing.
307
+
308
+ subtitles_language: Indicates which language it is clearly necessary to translate into. If this is
309
+ not set, the original language will be used from attribute "`audio_language`".
310
+ Please note that:
311
+
312
+ - transcription into the original language is a free procedure,
313
+ - and translation from the original language into any other languages is a
314
+ "translation" procedure and is paid. More details in
315
+ [POST /ai/tasks#transcribe](https://api.gcore.com/docs/streaming/docs/api-reference/streaming/ai/create-ai-asr-task).
316
+ Language is set by 3-letter language code according to ISO-639-2
317
+ (bibliographic code).
318
+
319
+ extra_headers: Send extra headers
320
+
321
+ extra_query: Add additional query parameters to the request
322
+
323
+ extra_body: Add additional JSON properties to the request
324
+
325
+ timeout: Override the client-level default timeout for this request, in seconds
326
+ """
327
+ return self._post(
328
+ "/streaming/ai/tasks",
329
+ body=maybe_transform(
330
+ {
331
+ "task_name": task_name,
332
+ "url": url,
333
+ "audio_language": audio_language,
334
+ "category": category,
335
+ "client_entity_data": client_entity_data,
336
+ "client_user_id": client_user_id,
337
+ "subtitles_language": subtitles_language,
338
+ },
339
+ ai_task_create_params.AITaskCreateParams,
340
+ ),
341
+ options=make_request_options(
342
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
343
+ ),
344
+ cast_to=AITaskCreateResponse,
345
+ )
346
+
347
+ def list(
348
+ self,
349
+ *,
350
+ date_created: str | NotGiven = NOT_GIVEN,
351
+ limit: int | NotGiven = NOT_GIVEN,
352
+ ordering: Literal["task_id", "status", "task_name", "started_at"] | NotGiven = NOT_GIVEN,
353
+ page: int | NotGiven = NOT_GIVEN,
354
+ search: str | NotGiven = NOT_GIVEN,
355
+ status: Literal["FAILURE", "PENDING", "RECEIVED", "RETRY", "REVOKED", "STARTED", "SUCCESS"]
356
+ | NotGiven = NOT_GIVEN,
357
+ task_id: str | NotGiven = NOT_GIVEN,
358
+ task_name: Literal["transcription", "content-moderation"] | NotGiven = NOT_GIVEN,
359
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
360
+ # The extra values given here take precedence over values defined on the client or passed to this method.
361
+ extra_headers: Headers | None = None,
362
+ extra_query: Query | None = None,
363
+ extra_body: Body | None = None,
364
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
365
+ ) -> SyncPageStreamingAI[AITask]:
366
+ """Returns a list of previously created and processed AI tasks.
367
+
368
+ The list contains
369
+ brief information about the task and its execution status. Data is displayed
370
+ page by page.
371
+
372
+ Args:
373
+ date_created: Time when task was created. Datetime in ISO 8601 format.
374
+
375
+ limit: Number of results to return per page.
376
+
377
+ ordering: Which field to use when ordering the results: `task_id`, status, and
378
+ `task_name`. Sorting is done in ascending (ASC) order. If parameter is omitted
379
+ then "`started_at` DESC" is used for ordering by default.
380
+
381
+ page: Page to view from task list, starting from 1
382
+
383
+ search: This is an field for combined text search in the following fields: `task_id`,
384
+ `task_name`, status, and `task_data`. Both full and partial searches are
385
+ possible inside specified above fields. For example, you can filter tasks of a
386
+ certain category, or tasks by a specific original file. Example:
387
+
388
+ - To filter tasks of Content Moderation NSFW method:
389
+ `GET /streaming/ai/tasks?search=nsfw`
390
+ - To filter tasks of processing video from a specific origin:
391
+ `GET /streaming/ai/tasks?search=s3.eu-west-1.amazonaws.com`
392
+
393
+ status: Task status
394
+
395
+ task_id: The task unique identifier to fiund
396
+
397
+ task_name: Type of the AI task. Reflects the original API method that was used to create
398
+ the AI task.
399
+
400
+ extra_headers: Send extra headers
401
+
402
+ extra_query: Add additional query parameters to the request
403
+
404
+ extra_body: Add additional JSON properties to the request
405
+
406
+ timeout: Override the client-level default timeout for this request, in seconds
407
+ """
408
+ return self._get_api_list(
409
+ "/streaming/ai/tasks",
410
+ page=SyncPageStreamingAI[AITask],
411
+ options=make_request_options(
412
+ extra_headers=extra_headers,
413
+ extra_query=extra_query,
414
+ extra_body=extra_body,
415
+ timeout=timeout,
416
+ query=maybe_transform(
417
+ {
418
+ "date_created": date_created,
419
+ "limit": limit,
420
+ "ordering": ordering,
421
+ "page": page,
422
+ "search": search,
423
+ "status": status,
424
+ "task_id": task_id,
425
+ "task_name": task_name,
426
+ },
427
+ ai_task_list_params.AITaskListParams,
428
+ ),
429
+ ),
430
+ model=AITask,
431
+ )
432
+
433
+ def cancel(
434
+ self,
435
+ task_id: str,
436
+ *,
437
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
438
+ # The extra values given here take precedence over values defined on the client or passed to this method.
439
+ extra_headers: Headers | None = None,
440
+ extra_query: Query | None = None,
441
+ extra_body: Body | None = None,
442
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
443
+ ) -> AITaskCancelResponse:
444
+ """
445
+ Stopping a previously launched AI-task without waiting for it to be fully
446
+ completed. The task will be moved to "REVOKED" status.
447
+
448
+ Args:
449
+ extra_headers: Send extra headers
450
+
451
+ extra_query: Add additional query parameters to the request
452
+
453
+ extra_body: Add additional JSON properties to the request
454
+
455
+ timeout: Override the client-level default timeout for this request, in seconds
456
+ """
457
+ if not task_id:
458
+ raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}")
459
+ return self._post(
460
+ f"/streaming/ai/tasks/{task_id}/cancel",
461
+ options=make_request_options(
462
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
463
+ ),
464
+ cast_to=AITaskCancelResponse,
465
+ )
466
+
467
+ def get(
468
+ self,
469
+ task_id: str,
470
+ *,
471
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
472
+ # The extra values given here take precedence over values defined on the client or passed to this method.
473
+ extra_headers: Headers | None = None,
474
+ extra_query: Query | None = None,
475
+ extra_body: Body | None = None,
476
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
477
+ ) -> AITaskGetResponse:
478
+ """
479
+ This is the single method to check the execution status of an AI task, and
480
+ obtain the result of any type of AI task. Based on the results of processing,
481
+ the “result” field will contain an answer corresponding to the type of the
482
+ initially created task:
483
+
484
+ - ASR: Transcribe video
485
+ - ASR: Translate subtitles
486
+ - CM: Sports detection
487
+ - CM: Weapon detection
488
+ - CM: Not Safe For Work (NSFW) content detection
489
+ - CM: Soft nudity detection
490
+ - CM: Hard nudity detection
491
+ - CM: Child Sexual Abuse Material (CSAM) detection
492
+ - CM: Objects recognition (soon)
493
+ - etc... (see other methods from /ai/ domain)
494
+
495
+ A queue is used to process videos. The waiting time depends on the total number
496
+ of requests in the system, so sometimes you will have to wait. Statuses:
497
+
498
+ - PENDING – the task is received and it is pending for available resources
499
+ - STARTED – processing has started
500
+ - SUCCESS – processing has completed successfully
501
+ - FAILURE – processing failed
502
+ - REVOKED – processing was cancelled by the user (or the system)
503
+ - RETRY – the task execution failed due to internal reasons, the task is queued
504
+ for re-execution (up to 3 times) Each task is processed in sub-stages, for
505
+ example, original language is first determined in a video, and then
506
+ transcription is performed. In such cases, the video processing status may
507
+ change from "STARTED" to "PENDING", and back. This is due to waiting for
508
+ resources for a specific processing sub-stage. In this case, the overall
509
+ percentage "progress" of video processing will reflect the full picture.
510
+
511
+ The result data is stored for 1 month, after which it is deleted.
512
+
513
+ For billing conditions see the corresponding methods in /ai/ domain. The task is
514
+ billed only after successful completion of the task and transition to "SUCCESS"
515
+ status.
516
+
517
+ Args:
518
+ extra_headers: Send extra headers
519
+
520
+ extra_query: Add additional query parameters to the request
521
+
522
+ extra_body: Add additional JSON properties to the request
523
+
524
+ timeout: Override the client-level default timeout for this request, in seconds
525
+ """
526
+ if not task_id:
527
+ raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}")
528
+ return self._get(
529
+ f"/streaming/ai/tasks/{task_id}",
530
+ options=make_request_options(
531
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
532
+ ),
533
+ cast_to=AITaskGetResponse,
534
+ )
535
+
536
+ def get_ai_settings(
537
+ self,
538
+ *,
539
+ type: Literal["language_support"],
540
+ audio_language: str | NotGiven = NOT_GIVEN,
541
+ subtitles_language: str | NotGiven = NOT_GIVEN,
542
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
543
+ # The extra values given here take precedence over values defined on the client or passed to this method.
544
+ extra_headers: Headers | None = None,
545
+ extra_query: Query | None = None,
546
+ extra_body: Body | None = None,
547
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
548
+ ) -> AITaskGetAISettingsResponse:
549
+ """
550
+ The method for revealing basic information and advanced underlying settings that
551
+ are used when performing AI-tasks.
552
+
553
+ Parameter sections:
554
+
555
+ - "`language_support`" – AI Translation: check if a language pair is supported
556
+ or not for AI translation.
557
+ - this list will expand as new AI methods are added.
558
+
559
+ **`language_support`** There are many languages available for transcription. But
560
+ not all languages can be automatically translated to and from with good quality.
561
+ In order to determine the availability of translation from the audio language to
562
+ the desired subtitle language, you can use this type of "`language_support`". AI
563
+ models are constantly improving, so this method can be used for dynamic
564
+ determination. Example:
565
+
566
+ ```
567
+ curl -L 'https://api.gcore.com/streaming/ai/info?type=`language_support`&`audio_language`=eng&`subtitles_language`=fre'
568
+ { "supported": true }
569
+ ```
570
+
571
+ Today we provide the following capabilities as below. These are the 100
572
+ languages for which we support only transcription and translation to English.
573
+ The iso639-2b codes for these are:
574
+ `afr, sqi, amh, ara, hye, asm, aze, bak, eus, bel, ben, bos, bre, bul, mya, cat, zho, hrv, ces, dan, nld, eng, est, fao, fin, fra, glg, kat, deu, guj, hat, hau, haw, heb, hin, hun, isl, ind, ita, jpn, jav, kan, kaz, khm, kor, lao, lat, lav, lin, lit, ltz, mkd, mlg, msa, mal, mlt, mri, mar, ell, mon, nep, nor, nno, oci, pan, fas, pol, por, pus, ron, rus, san, srp, sna, snd, sin, slk, slv, som, spa, sun, swa, swe, tgl, tgk, tam, tat, tel, tha, bod, tur, tuk, ukr, urd, uzb, vie, cym, yid, yor`.
575
+ These are the 77 languages for which we support translation to other languages
576
+ and translation to:
577
+ `afr, amh, ara, hye, asm, aze, eus, bel, ben, bos, bul, mya, cat, zho, hrv, ces, dan, nld, eng, est, fin, fra, glg, kat, deu, guj, heb, hin, hun, isl, ind, ita, jpn, jav, kan, kaz, khm, kor, lao, lav, lit, mkd, mal, mlt, mar, ell, mon, nep, nno, pan, fas, pol, por, pus, ron, rus, srp, sna, snd, slk, slv, som, spa, swa, swe, tgl, tgk, tam, tel, tha, tur, ukr, urd, vie, cym, yor`.
578
+
579
+ Args:
580
+ type: The parameters section for which parameters are requested
581
+
582
+ audio_language: The source language from which the audio will be transcribed. Required when
583
+ `type=language_support`. Value is 3-letter language code according to ISO-639-2
584
+ (bibliographic code), (e.g., fre for French).
585
+
586
+ subtitles_language: The target language the text will be translated into. If omitted, the API will
587
+ return whether the `audio_language` is supported for transcription only, instead
588
+ of translation. Value is 3-letter language code according to ISO-639-2
589
+ (bibliographic code), (e.g., fre for French).
590
+
591
+ extra_headers: Send extra headers
592
+
593
+ extra_query: Add additional query parameters to the request
594
+
595
+ extra_body: Add additional JSON properties to the request
596
+
597
+ timeout: Override the client-level default timeout for this request, in seconds
598
+ """
599
+ return self._get(
600
+ "/streaming/ai/info",
601
+ options=make_request_options(
602
+ extra_headers=extra_headers,
603
+ extra_query=extra_query,
604
+ extra_body=extra_body,
605
+ timeout=timeout,
606
+ query=maybe_transform(
607
+ {
608
+ "type": type,
609
+ "audio_language": audio_language,
610
+ "subtitles_language": subtitles_language,
611
+ },
612
+ ai_task_get_ai_settings_params.AITaskGetAISettingsParams,
613
+ ),
614
+ ),
615
+ cast_to=AITaskGetAISettingsResponse,
616
+ )
617
+
618
+
619
+ class AsyncAITasksResource(AsyncAPIResource):
620
+ @cached_property
621
+ def with_raw_response(self) -> AsyncAITasksResourceWithRawResponse:
622
+ """
623
+ This property can be used as a prefix for any HTTP method call to return
624
+ the raw response object instead of the parsed content.
625
+
626
+ For more information, see https://www.github.com/G-Core/gcore-python#accessing-raw-response-data-eg-headers
627
+ """
628
+ return AsyncAITasksResourceWithRawResponse(self)
629
+
630
+ @cached_property
631
+ def with_streaming_response(self) -> AsyncAITasksResourceWithStreamingResponse:
632
+ """
633
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
634
+
635
+ For more information, see https://www.github.com/G-Core/gcore-python#with_streaming_response
636
+ """
637
+ return AsyncAITasksResourceWithStreamingResponse(self)
638
+
639
+ async def create(
640
+ self,
641
+ *,
642
+ task_name: Literal["transcription", "content-moderation"],
643
+ url: str,
644
+ audio_language: str | NotGiven = NOT_GIVEN,
645
+ category: Literal["sport", "weapon", "nsfw", "hard_nudity", "soft_nudity", "child_pornography"]
646
+ | NotGiven = NOT_GIVEN,
647
+ client_entity_data: str | NotGiven = NOT_GIVEN,
648
+ client_user_id: str | NotGiven = NOT_GIVEN,
649
+ subtitles_language: str | NotGiven = NOT_GIVEN,
650
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
651
+ # The extra values given here take precedence over values defined on the client or passed to this method.
652
+ extra_headers: Headers | None = None,
653
+ extra_query: Query | None = None,
654
+ extra_body: Body | None = None,
655
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
656
+ ) -> AITaskCreateResponse:
657
+ """Creating an AI task.
658
+
659
+ This method allows you to create an AI task for VOD video
660
+ processing:
661
+
662
+ - ASR: Transcribe video
663
+ - ASR: Translate subtitles
664
+ - CM: Sports detection
665
+ - CM: Weapon detection
666
+ - CM: Not Safe For Work (NSFW) content detection
667
+ - CM: Soft nudity detection
668
+ - CM: Hard nudity detection
669
+ - CM: Child Sexual Abuse Material (CSAM) detection
670
+ - CM: Objects recognition (soon)
671
+ ![Auto generated subtitles example](https://demo-files.gvideo.io/apidocs/captions.gif)
672
+ How to use:
673
+ - Create an AI task, specify algoritm to use
674
+ - Get `task_id`
675
+ - Check a result using `` .../ai/tasks/{`task_id`} `` method For more detailed
676
+ information, see the description of each method separately.
677
+
678
+ **AI Automatic Speech Recognition (ASR)** AI is instrumental in automatic video
679
+ processing for subtitles creation by using Automatic Speech Recognition (ASR)
680
+ technology to transcribe spoken words into text, which can then be translated
681
+ into multiple languages for broader accessibility. Categories:
682
+
683
+ - `transcription` – to create subtitles/captions from audio in the original
684
+ language.
685
+ - `translation` – to transate subtitles/captions from the original language to
686
+ 99+ other languages. AI subtitle transcription and translation tools are
687
+ highly efficient, processing large volumes of audio-visual content quickly and
688
+ providing accurate transcriptions and translations with minimal human
689
+ intervention. Additionally, AI-driven solutions can significantly reduce costs
690
+ and turnaround times compared to traditional methods, making them an
691
+ invaluable resource for content creators and broadcasters aiming to reach
692
+ global audiences. Example response with positive result:
693
+
694
+ ```
695
+ {
696
+ "status": "SUCCESS",
697
+ "result": {
698
+ "subtitles": [
699
+ {
700
+ "`start_time`": "00:00:00.031",
701
+ "`end_time`": "00:00:03.831",
702
+ "text": "Come on team, ..."
703
+ }, ...
704
+ ]
705
+ "vttContent": "WEBVTT\n\n1\n00:00:00.031 --> 00:00:03.831\nCome on team, ...",
706
+ "`concatenated_text`": "Come on team, ...",
707
+ "languages": [ "eng" ],
708
+ "`speech_detected`": true
709
+ }
710
+ }, ...
711
+ }
712
+ ```
713
+
714
+ **AI Content Moderation (CM)** The AI Content Moderation API offers a powerful
715
+ solution for analyzing video content to detect various categories of
716
+ inappropriate material. Leveraging state-of-the-art AI models, this API ensures
717
+ real-time analysis and flagging of sensitive or restricted content types, making
718
+ it an essential tool for platforms requiring stringent content moderation.
719
+ Categories:
720
+
721
+ - `nsfw`: Quick algorithm to detect pornographic material, ensuring content is
722
+ "not-safe-for-work" or normal.
723
+ - `hard_nudity`: Detailed analisys of video which detects explicit nudity
724
+ involving genitalia.
725
+ - `soft_nudity`: Detailed video analysis that reveals both explicit and partial
726
+ nudity, including the presence of male and female faces and other uncovered
727
+ body parts.
728
+ - `child_pornography`: Detects child sexual abuse materials (CASM).
729
+ - `sport`: Recognizes various sporting activities.
730
+ - `weapon`: Identifies the presence of weapons in the video content. The AI
731
+ Content Moderation API is an invaluable tool for managing and controlling the
732
+ type of content being shared or streamed on your platform. By implementing
733
+ this API, you can ensure compliance with community guidelines and legal
734
+ requirements, as well as provide a safer environment for your users. Important
735
+ notes:
736
+ - It's allowed to analyse still images too (where applicable). Format of image:
737
+ JPEG, PNG. In that case one image is the same as video of 1 second duration.
738
+ - Not all frames in the video are used for analysis, but only key frames
739
+ (Iframe). For example, if a key frame in a video is set every ±2 seconds, then
740
+ detection will only occur at these timestamps. If an object appears and
741
+ disappears between these time stamps, it will not be detected. We are working
742
+ on a version to analyze more frames, please contact your manager or our
743
+ support team to enable this method. Example response with positive result:
744
+
745
+ ```
746
+ {
747
+ "status": "SUCCESS",
748
+ "result": {
749
+ "`nsfw_detected`": true,
750
+ "`detection_results`": ["nsfw"],
751
+ "frames": [{"label": "nsfw", "confidence": 1.0, "`frame_number`": 24}, ...],
752
+ },
753
+ }
754
+ ```
755
+
756
+ **Additional information** Billing takes into account the duration of the
757
+ analyzed video. Or the duration until the stop tag(where applicable), if the
758
+ condition was triggered during the analysis.
759
+
760
+ The heart of content moderation is AI, with additional services. They run on our
761
+ own infrastructure, so the files/data are not transferred anywhere to external
762
+ services. After processing, original files are also deleted from local storage
763
+ of AI.
764
+
765
+ Read more detailed information about our solution, and architecture, and
766
+ benefits in the knowledge base and blog.
767
+
768
+ Args:
769
+ task_name: Name of the task to be performed
770
+
771
+ url: URL to the MP4 file to analyse. File must be publicly accessible via HTTP/HTTPS.
772
+
773
+ audio_language: Language in original audio (transcription only). This value is used to determine
774
+ the language from which to transcribe. If this is not set, the system will run
775
+ auto language identification and the subtitles will be in the detected language.
776
+ The method also works based on AI analysis. It's fairly accurate, but if it's
777
+ wrong, then set the language explicitly. Additionally, when this is not set, we
778
+ also support recognition of alternate languages in the video (language
779
+ code-switching). Language is set by 3-letter language code according to
780
+ ISO-639-2 (bibliographic code). We can process languages:
781
+
782
+ - 'afr': Afrikaans
783
+ - 'alb': Albanian
784
+ - 'amh': Amharic
785
+ - 'ara': Arabic
786
+ - 'arm': Armenian
787
+ - 'asm': Assamese
788
+ - 'aze': Azerbaijani
789
+ - 'bak': Bashkir
790
+ - 'baq': Basque
791
+ - 'bel': Belarusian
792
+ - 'ben': Bengali
793
+ - 'bos': Bosnian
794
+ - 'bre': Breton
795
+ - 'bul': Bulgarian
796
+ - 'bur': Myanmar
797
+ - 'cat': Catalan
798
+ - 'chi': Chinese
799
+ - 'cze': Czech
800
+ - 'dan': Danish
801
+ - 'dut': Nynorsk
802
+ - 'eng': English
803
+ - 'est': Estonian
804
+ - 'fao': Faroese
805
+ - 'fin': Finnish
806
+ - 'fre': French
807
+ - 'geo': Georgian
808
+ - 'ger': German
809
+ - 'glg': Galician
810
+ - 'gre': Greek
811
+ - 'guj': Gujarati
812
+ - 'hat': Haitian creole
813
+ - 'hau': Hausa
814
+ - 'haw': Hawaiian
815
+ - 'heb': Hebrew
816
+ - 'hin': Hindi
817
+ - 'hrv': Croatian
818
+ - 'hun': Hungarian
819
+ - 'ice': Icelandic
820
+ - 'ind': Indonesian
821
+ - 'ita': Italian
822
+ - 'jav': Javanese
823
+ - 'jpn': Japanese
824
+ - 'kan': Kannada
825
+ - 'kaz': Kazakh
826
+ - 'khm': Khmer
827
+ - 'kor': Korean
828
+ - 'lao': Lao
829
+ - 'lat': Latin
830
+ - 'lav': Latvian
831
+ - 'lin': Lingala
832
+ - 'lit': Lithuanian
833
+ - 'ltz': Luxembourgish
834
+ - 'mac': Macedonian
835
+ - 'mal': Malayalam
836
+ - 'mao': Maori
837
+ - 'mar': Marathi
838
+ - 'may': Malay
839
+ - 'mlg': Malagasy
840
+ - 'mlt': Maltese
841
+ - 'mon': Mongolian
842
+ - 'nep': Nepali
843
+ - 'dut': Dutch
844
+ - 'nor': Norwegian
845
+ - 'oci': Occitan
846
+ - 'pan': Punjabi
847
+ - 'per': Persian
848
+ - 'pol': Polish
849
+ - 'por': Portuguese
850
+ - 'pus': Pashto
851
+ - 'rum': Romanian
852
+ - 'rus': Russian
853
+ - 'san': Sanskrit
854
+ - 'sin': Sinhala
855
+ - 'slo': Slovak
856
+ - 'slv': Slovenian
857
+ - 'sna': Shona
858
+ - 'snd': Sindhi
859
+ - 'som': Somali
860
+ - 'spa': Spanish
861
+ - 'srp': Serbian
862
+ - 'sun': Sundanese
863
+ - 'swa': Swahili
864
+ - 'swe': Swedish
865
+ - 'tam': Tamil
866
+ - 'tat': Tatar
867
+ - 'tel': Telugu
868
+ - 'tgk': Tajik
869
+ - 'tgl': Tagalog
870
+ - 'tha': Thai
871
+ - 'tib': Tibetan
872
+ - 'tuk': Turkmen
873
+ - 'tur': Turkish
874
+ - 'ukr': Ukrainian
875
+ - 'urd': Urdu
876
+ - 'uzb': Uzbek
877
+ - 'vie': Vietnamese
878
+ - 'wel': Welsh
879
+ - 'yid': Yiddish
880
+ - 'yor': Yoruba
881
+
882
+ category: Model for analysis (content-moderation only). Determines what exactly needs to
883
+ be found in the video.
884
+
885
+ client_entity_data: Meta parameter, designed to store your own extra information about a video
886
+ entity: video source, video id, etc. It is not used in any way in video
887
+ processing. For example, if an AI-task was created automatically when you
888
+ uploaded a video with the AI auto-processing option (nudity detection, etc),
889
+ then the ID of the associated video for which the task was performed will be
890
+ explicitly indicated here.
891
+
892
+ client_user_id: Meta parameter, designed to store your own identifier. Can be used by you to tag
893
+ requests from different end-users. It is not used in any way in video
894
+ processing.
895
+
896
+ subtitles_language: Indicates which language it is clearly necessary to translate into. If this is
897
+ not set, the original language will be used from attribute "`audio_language`".
898
+ Please note that:
899
+
900
+ - transcription into the original language is a free procedure,
901
+ - and translation from the original language into any other languages is a
902
+ "translation" procedure and is paid. More details in
903
+ [POST /ai/tasks#transcribe](https://api.gcore.com/docs/streaming/docs/api-reference/streaming/ai/create-ai-asr-task).
904
+ Language is set by 3-letter language code according to ISO-639-2
905
+ (bibliographic code).
906
+
907
+ extra_headers: Send extra headers
908
+
909
+ extra_query: Add additional query parameters to the request
910
+
911
+ extra_body: Add additional JSON properties to the request
912
+
913
+ timeout: Override the client-level default timeout for this request, in seconds
914
+ """
915
+ return await self._post(
916
+ "/streaming/ai/tasks",
917
+ body=await async_maybe_transform(
918
+ {
919
+ "task_name": task_name,
920
+ "url": url,
921
+ "audio_language": audio_language,
922
+ "category": category,
923
+ "client_entity_data": client_entity_data,
924
+ "client_user_id": client_user_id,
925
+ "subtitles_language": subtitles_language,
926
+ },
927
+ ai_task_create_params.AITaskCreateParams,
928
+ ),
929
+ options=make_request_options(
930
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
931
+ ),
932
+ cast_to=AITaskCreateResponse,
933
+ )
934
+
935
+ def list(
936
+ self,
937
+ *,
938
+ date_created: str | NotGiven = NOT_GIVEN,
939
+ limit: int | NotGiven = NOT_GIVEN,
940
+ ordering: Literal["task_id", "status", "task_name", "started_at"] | NotGiven = NOT_GIVEN,
941
+ page: int | NotGiven = NOT_GIVEN,
942
+ search: str | NotGiven = NOT_GIVEN,
943
+ status: Literal["FAILURE", "PENDING", "RECEIVED", "RETRY", "REVOKED", "STARTED", "SUCCESS"]
944
+ | NotGiven = NOT_GIVEN,
945
+ task_id: str | NotGiven = NOT_GIVEN,
946
+ task_name: Literal["transcription", "content-moderation"] | NotGiven = NOT_GIVEN,
947
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
948
+ # The extra values given here take precedence over values defined on the client or passed to this method.
949
+ extra_headers: Headers | None = None,
950
+ extra_query: Query | None = None,
951
+ extra_body: Body | None = None,
952
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
953
+ ) -> AsyncPaginator[AITask, AsyncPageStreamingAI[AITask]]:
954
+ """Returns a list of previously created and processed AI tasks.
955
+
956
+ The list contains
957
+ brief information about the task and its execution status. Data is displayed
958
+ page by page.
959
+
960
+ Args:
961
+ date_created: Time when task was created. Datetime in ISO 8601 format.
962
+
963
+ limit: Number of results to return per page.
964
+
965
+ ordering: Which field to use when ordering the results: `task_id`, status, and
966
+ `task_name`. Sorting is done in ascending (ASC) order. If parameter is omitted
967
+ then "`started_at` DESC" is used for ordering by default.
968
+
969
+ page: Page to view from task list, starting from 1
970
+
971
+ search: This is an field for combined text search in the following fields: `task_id`,
972
+ `task_name`, status, and `task_data`. Both full and partial searches are
973
+ possible inside specified above fields. For example, you can filter tasks of a
974
+ certain category, or tasks by a specific original file. Example:
975
+
976
+ - To filter tasks of Content Moderation NSFW method:
977
+ `GET /streaming/ai/tasks?search=nsfw`
978
+ - To filter tasks of processing video from a specific origin:
979
+ `GET /streaming/ai/tasks?search=s3.eu-west-1.amazonaws.com`
980
+
981
+ status: Task status
982
+
983
+ task_id: The task unique identifier to fiund
984
+
985
+ task_name: Type of the AI task. Reflects the original API method that was used to create
986
+ the AI task.
987
+
988
+ extra_headers: Send extra headers
989
+
990
+ extra_query: Add additional query parameters to the request
991
+
992
+ extra_body: Add additional JSON properties to the request
993
+
994
+ timeout: Override the client-level default timeout for this request, in seconds
995
+ """
996
+ return self._get_api_list(
997
+ "/streaming/ai/tasks",
998
+ page=AsyncPageStreamingAI[AITask],
999
+ options=make_request_options(
1000
+ extra_headers=extra_headers,
1001
+ extra_query=extra_query,
1002
+ extra_body=extra_body,
1003
+ timeout=timeout,
1004
+ query=maybe_transform(
1005
+ {
1006
+ "date_created": date_created,
1007
+ "limit": limit,
1008
+ "ordering": ordering,
1009
+ "page": page,
1010
+ "search": search,
1011
+ "status": status,
1012
+ "task_id": task_id,
1013
+ "task_name": task_name,
1014
+ },
1015
+ ai_task_list_params.AITaskListParams,
1016
+ ),
1017
+ ),
1018
+ model=AITask,
1019
+ )
1020
+
1021
+ async def cancel(
1022
+ self,
1023
+ task_id: str,
1024
+ *,
1025
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1026
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1027
+ extra_headers: Headers | None = None,
1028
+ extra_query: Query | None = None,
1029
+ extra_body: Body | None = None,
1030
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1031
+ ) -> AITaskCancelResponse:
1032
+ """
1033
+ Stopping a previously launched AI-task without waiting for it to be fully
1034
+ completed. The task will be moved to "REVOKED" status.
1035
+
1036
+ Args:
1037
+ extra_headers: Send extra headers
1038
+
1039
+ extra_query: Add additional query parameters to the request
1040
+
1041
+ extra_body: Add additional JSON properties to the request
1042
+
1043
+ timeout: Override the client-level default timeout for this request, in seconds
1044
+ """
1045
+ if not task_id:
1046
+ raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}")
1047
+ return await self._post(
1048
+ f"/streaming/ai/tasks/{task_id}/cancel",
1049
+ options=make_request_options(
1050
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1051
+ ),
1052
+ cast_to=AITaskCancelResponse,
1053
+ )
1054
+
1055
+ async def get(
1056
+ self,
1057
+ task_id: str,
1058
+ *,
1059
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1060
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1061
+ extra_headers: Headers | None = None,
1062
+ extra_query: Query | None = None,
1063
+ extra_body: Body | None = None,
1064
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1065
+ ) -> AITaskGetResponse:
1066
+ """
1067
+ This is the single method to check the execution status of an AI task, and
1068
+ obtain the result of any type of AI task. Based on the results of processing,
1069
+ the “result” field will contain an answer corresponding to the type of the
1070
+ initially created task:
1071
+
1072
+ - ASR: Transcribe video
1073
+ - ASR: Translate subtitles
1074
+ - CM: Sports detection
1075
+ - CM: Weapon detection
1076
+ - CM: Not Safe For Work (NSFW) content detection
1077
+ - CM: Soft nudity detection
1078
+ - CM: Hard nudity detection
1079
+ - CM: Child Sexual Abuse Material (CSAM) detection
1080
+ - CM: Objects recognition (soon)
1081
+ - etc... (see other methods from /ai/ domain)
1082
+
1083
+ A queue is used to process videos. The waiting time depends on the total number
1084
+ of requests in the system, so sometimes you will have to wait. Statuses:
1085
+
1086
+ - PENDING – the task is received and it is pending for available resources
1087
+ - STARTED – processing has started
1088
+ - SUCCESS – processing has completed successfully
1089
+ - FAILURE – processing failed
1090
+ - REVOKED – processing was cancelled by the user (or the system)
1091
+ - RETRY – the task execution failed due to internal reasons, the task is queued
1092
+ for re-execution (up to 3 times) Each task is processed in sub-stages, for
1093
+ example, original language is first determined in a video, and then
1094
+ transcription is performed. In such cases, the video processing status may
1095
+ change from "STARTED" to "PENDING", and back. This is due to waiting for
1096
+ resources for a specific processing sub-stage. In this case, the overall
1097
+ percentage "progress" of video processing will reflect the full picture.
1098
+
1099
+ The result data is stored for 1 month, after which it is deleted.
1100
+
1101
+ For billing conditions see the corresponding methods in /ai/ domain. The task is
1102
+ billed only after successful completion of the task and transition to "SUCCESS"
1103
+ status.
1104
+
1105
+ Args:
1106
+ extra_headers: Send extra headers
1107
+
1108
+ extra_query: Add additional query parameters to the request
1109
+
1110
+ extra_body: Add additional JSON properties to the request
1111
+
1112
+ timeout: Override the client-level default timeout for this request, in seconds
1113
+ """
1114
+ if not task_id:
1115
+ raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}")
1116
+ return await self._get(
1117
+ f"/streaming/ai/tasks/{task_id}",
1118
+ options=make_request_options(
1119
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1120
+ ),
1121
+ cast_to=AITaskGetResponse,
1122
+ )
1123
+
1124
+ async def get_ai_settings(
1125
+ self,
1126
+ *,
1127
+ type: Literal["language_support"],
1128
+ audio_language: str | NotGiven = NOT_GIVEN,
1129
+ subtitles_language: str | NotGiven = NOT_GIVEN,
1130
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1131
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1132
+ extra_headers: Headers | None = None,
1133
+ extra_query: Query | None = None,
1134
+ extra_body: Body | None = None,
1135
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1136
+ ) -> AITaskGetAISettingsResponse:
1137
+ """
1138
+ The method for revealing basic information and advanced underlying settings that
1139
+ are used when performing AI-tasks.
1140
+
1141
+ Parameter sections:
1142
+
1143
+ - "`language_support`" – AI Translation: check if a language pair is supported
1144
+ or not for AI translation.
1145
+ - this list will expand as new AI methods are added.
1146
+
1147
+ **`language_support`** There are many languages available for transcription. But
1148
+ not all languages can be automatically translated to and from with good quality.
1149
+ In order to determine the availability of translation from the audio language to
1150
+ the desired subtitle language, you can use this type of "`language_support`". AI
1151
+ models are constantly improving, so this method can be used for dynamic
1152
+ determination. Example:
1153
+
1154
+ ```
1155
+ curl -L 'https://api.gcore.com/streaming/ai/info?type=`language_support`&`audio_language`=eng&`subtitles_language`=fre'
1156
+ { "supported": true }
1157
+ ```
1158
+
1159
+ Today we provide the following capabilities as below. These are the 100
1160
+ languages for which we support only transcription and translation to English.
1161
+ The iso639-2b codes for these are:
1162
+ `afr, sqi, amh, ara, hye, asm, aze, bak, eus, bel, ben, bos, bre, bul, mya, cat, zho, hrv, ces, dan, nld, eng, est, fao, fin, fra, glg, kat, deu, guj, hat, hau, haw, heb, hin, hun, isl, ind, ita, jpn, jav, kan, kaz, khm, kor, lao, lat, lav, lin, lit, ltz, mkd, mlg, msa, mal, mlt, mri, mar, ell, mon, nep, nor, nno, oci, pan, fas, pol, por, pus, ron, rus, san, srp, sna, snd, sin, slk, slv, som, spa, sun, swa, swe, tgl, tgk, tam, tat, tel, tha, bod, tur, tuk, ukr, urd, uzb, vie, cym, yid, yor`.
1163
+ These are the 77 languages for which we support translation to other languages
1164
+ and translation to:
1165
+ `afr, amh, ara, hye, asm, aze, eus, bel, ben, bos, bul, mya, cat, zho, hrv, ces, dan, nld, eng, est, fin, fra, glg, kat, deu, guj, heb, hin, hun, isl, ind, ita, jpn, jav, kan, kaz, khm, kor, lao, lav, lit, mkd, mal, mlt, mar, ell, mon, nep, nno, pan, fas, pol, por, pus, ron, rus, srp, sna, snd, slk, slv, som, spa, swa, swe, tgl, tgk, tam, tel, tha, tur, ukr, urd, vie, cym, yor`.
1166
+
1167
+ Args:
1168
+ type: The parameters section for which parameters are requested
1169
+
1170
+ audio_language: The source language from which the audio will be transcribed. Required when
1171
+ `type=language_support`. Value is 3-letter language code according to ISO-639-2
1172
+ (bibliographic code), (e.g., fre for French).
1173
+
1174
+ subtitles_language: The target language the text will be translated into. If omitted, the API will
1175
+ return whether the `audio_language` is supported for transcription only, instead
1176
+ of translation. Value is 3-letter language code according to ISO-639-2
1177
+ (bibliographic code), (e.g., fre for French).
1178
+
1179
+ extra_headers: Send extra headers
1180
+
1181
+ extra_query: Add additional query parameters to the request
1182
+
1183
+ extra_body: Add additional JSON properties to the request
1184
+
1185
+ timeout: Override the client-level default timeout for this request, in seconds
1186
+ """
1187
+ return await self._get(
1188
+ "/streaming/ai/info",
1189
+ options=make_request_options(
1190
+ extra_headers=extra_headers,
1191
+ extra_query=extra_query,
1192
+ extra_body=extra_body,
1193
+ timeout=timeout,
1194
+ query=await async_maybe_transform(
1195
+ {
1196
+ "type": type,
1197
+ "audio_language": audio_language,
1198
+ "subtitles_language": subtitles_language,
1199
+ },
1200
+ ai_task_get_ai_settings_params.AITaskGetAISettingsParams,
1201
+ ),
1202
+ ),
1203
+ cast_to=AITaskGetAISettingsResponse,
1204
+ )
1205
+
1206
+
1207
+ class AITasksResourceWithRawResponse:
1208
+ def __init__(self, ai_tasks: AITasksResource) -> None:
1209
+ self._ai_tasks = ai_tasks
1210
+
1211
+ self.create = to_raw_response_wrapper(
1212
+ ai_tasks.create,
1213
+ )
1214
+ self.list = to_raw_response_wrapper(
1215
+ ai_tasks.list,
1216
+ )
1217
+ self.cancel = to_raw_response_wrapper(
1218
+ ai_tasks.cancel,
1219
+ )
1220
+ self.get = to_raw_response_wrapper(
1221
+ ai_tasks.get,
1222
+ )
1223
+ self.get_ai_settings = to_raw_response_wrapper(
1224
+ ai_tasks.get_ai_settings,
1225
+ )
1226
+
1227
+
1228
+ class AsyncAITasksResourceWithRawResponse:
1229
+ def __init__(self, ai_tasks: AsyncAITasksResource) -> None:
1230
+ self._ai_tasks = ai_tasks
1231
+
1232
+ self.create = async_to_raw_response_wrapper(
1233
+ ai_tasks.create,
1234
+ )
1235
+ self.list = async_to_raw_response_wrapper(
1236
+ ai_tasks.list,
1237
+ )
1238
+ self.cancel = async_to_raw_response_wrapper(
1239
+ ai_tasks.cancel,
1240
+ )
1241
+ self.get = async_to_raw_response_wrapper(
1242
+ ai_tasks.get,
1243
+ )
1244
+ self.get_ai_settings = async_to_raw_response_wrapper(
1245
+ ai_tasks.get_ai_settings,
1246
+ )
1247
+
1248
+
1249
+ class AITasksResourceWithStreamingResponse:
1250
+ def __init__(self, ai_tasks: AITasksResource) -> None:
1251
+ self._ai_tasks = ai_tasks
1252
+
1253
+ self.create = to_streamed_response_wrapper(
1254
+ ai_tasks.create,
1255
+ )
1256
+ self.list = to_streamed_response_wrapper(
1257
+ ai_tasks.list,
1258
+ )
1259
+ self.cancel = to_streamed_response_wrapper(
1260
+ ai_tasks.cancel,
1261
+ )
1262
+ self.get = to_streamed_response_wrapper(
1263
+ ai_tasks.get,
1264
+ )
1265
+ self.get_ai_settings = to_streamed_response_wrapper(
1266
+ ai_tasks.get_ai_settings,
1267
+ )
1268
+
1269
+
1270
+ class AsyncAITasksResourceWithStreamingResponse:
1271
+ def __init__(self, ai_tasks: AsyncAITasksResource) -> None:
1272
+ self._ai_tasks = ai_tasks
1273
+
1274
+ self.create = async_to_streamed_response_wrapper(
1275
+ ai_tasks.create,
1276
+ )
1277
+ self.list = async_to_streamed_response_wrapper(
1278
+ ai_tasks.list,
1279
+ )
1280
+ self.cancel = async_to_streamed_response_wrapper(
1281
+ ai_tasks.cancel,
1282
+ )
1283
+ self.get = async_to_streamed_response_wrapper(
1284
+ ai_tasks.get,
1285
+ )
1286
+ self.get_ai_settings = async_to_streamed_response_wrapper(
1287
+ ai_tasks.get_ai_settings,
1288
+ )