webscout 5.0__py3-none-any.whl → 5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -36,11 +36,12 @@ class OLLAMA(Provider):
36
36
  proxies: dict = {},
37
37
  history_offset: int = 10250,
38
38
  act: str = None,
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """Instantiates Ollama
41
42
 
42
43
  Args:
43
- model (str, optional): Model name. Defaults to 'llama2'.
44
+ model (str, optional): Model name. Defaults to 'qwen2:0.5b'.
44
45
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
46
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
47
  timeout (int, optional): Http request timeout. Defaults to 30.
@@ -50,12 +51,14 @@ class OLLAMA(Provider):
50
51
  proxies (dict, optional): Http request proxies. Defaults to {}.
51
52
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
53
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
+ system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
53
55
  """
54
56
  self.model = model
55
57
  self.is_conversation = is_conversation
56
58
  self.max_tokens_to_sample = max_tokens
57
59
  self.timeout = timeout
58
60
  self.last_response = {}
61
+ self.system_prompt = system_prompt
59
62
 
60
63
  self.__available_optimizers = (
61
64
  method
@@ -110,21 +113,19 @@ class OLLAMA(Provider):
110
113
  )
111
114
 
112
115
  def for_stream():
116
+ # Correctly call ollama.chat with stream=True
113
117
  stream = ollama.chat(model=self.model, messages=[
118
+ {'role': 'system', 'content': self.system_prompt},
114
119
  {'role': 'user', 'content': conversation_prompt}
115
120
  ], stream=True)
116
121
 
117
- message_load = ""
122
+ # Yield each chunk directly
118
123
  for chunk in stream:
119
- message_load += chunk['message']['content']
120
- yield chunk['message']['content'] if raw else dict(text=message_load)
121
- self.last_response.update(dict(text=message_load))
122
- self.conversation.update_chat_history(
123
- prompt, self.get_message(self.last_response)
124
- )
124
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
125
125
 
126
126
  def for_non_stream():
127
127
  response = ollama.chat(model=self.model, messages=[
128
+ {'role': 'system', 'content': self.system_prompt}, # Add system message
128
129
  {'role': 'user', 'content': conversation_prompt}
129
130
  ])
130
131
  self.last_response.update(dict(text=response['message']['content']))
@@ -183,6 +184,6 @@ class OLLAMA(Provider):
183
184
  return response["text"]
184
185
  if __name__ == "__main__":
185
186
  ollama_provider = OLLAMA(model="qwen:0.5b")
186
- response = ollama_provider.chat("hi")
187
+ response = ollama_provider.chat("hi", stream=True)
187
188
  for r in response:
188
- print(r, end="", flush=True)
189
+ print(r, end="", flush=True)
@@ -0,0 +1,138 @@
1
+ import requests
2
+ import os
3
+ import time
4
+ from typing import List
5
+ from string import punctuation
6
+ from random import choice
7
+ from requests.exceptions import RequestException
8
+
9
+ from webscout.AIbase import ImageProvider
10
+
11
+ class PollinationsAI(ImageProvider):
12
+ """Image provider for pollinations.ai"""
13
+
14
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
15
+ """Initializes the PollinationsAI class.
16
+
17
+ Args:
18
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
19
+ proxies (dict, optional): HTTP request proxies (socks). Defaults to {}.
20
+ """
21
+ self.image_gen_endpoint = "https://image.pollinations.ai/prompt/{prompt}?width={width}&height={height}&model={model}"
22
+ self.headers = {
23
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
24
+ "Accept-Language": "en-US,en;q=0.5",
25
+ "Accept-Encoding": "gzip, deflate",
26
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
27
+ }
28
+ self.session = requests.Session()
29
+ self.session.headers.update(self.headers)
30
+ self.session.proxies.update(proxies)
31
+ self.timeout = timeout
32
+ self.prompt: str = "AI-generated image - webscout"
33
+ self.image_extension: str = "jpeg"
34
+
35
+ def generate(
36
+ self, prompt: str, amount: int = 1, additives: bool = True,
37
+ width: int = 768, height: int = 768, model: str = "flux",
38
+ max_retries: int = 3, retry_delay: int = 5
39
+ ) -> List[bytes]:
40
+ """Generate image from prompt
41
+
42
+ Args:
43
+ prompt (str): Image description.
44
+ amount (int): Total images to be generated. Defaults to 1.
45
+ additives (bool, optional): Try to make each prompt unique. Defaults to True.
46
+ width (int, optional): Width of the generated image. Defaults to 768.
47
+ height (int, optional): Height of the generated image. Defaults to 768.
48
+ model (str, optional): The model to use for image generation. Defaults to "flux".
49
+ max_retries (int, optional): Maximum number of retry attempts. Defaults to 3.
50
+ retry_delay (int, optional): Delay between retries in seconds. Defaults to 5.
51
+
52
+ Returns:
53
+ List[bytes]: List of generated images as bytes.
54
+ """
55
+ assert bool(prompt), "Prompt cannot be null"
56
+ assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
57
+ assert amount > 0, "Amount should be greater than 0"
58
+
59
+ ads = lambda: (
60
+ ""
61
+ if not additives
62
+ else choice(punctuation)
63
+ + choice(punctuation)
64
+ + choice(punctuation)
65
+ + choice(punctuation)
66
+ + choice(punctuation)
67
+ )
68
+
69
+ self.prompt = prompt
70
+ response = []
71
+ for _ in range(amount):
72
+ url = self.image_gen_endpoint.format(
73
+ prompt=prompt + ads(), width=width, height=height, model=model
74
+ )
75
+
76
+ for attempt in range(max_retries):
77
+ try:
78
+ resp = self.session.get(url, timeout=self.timeout)
79
+ resp.raise_for_status()
80
+ response.append(resp.content)
81
+ break
82
+ except RequestException as e:
83
+ if attempt == max_retries - 1:
84
+ print(f"Failed to generate image after {max_retries} attempts: {e}")
85
+ raise
86
+ else:
87
+ print(f"Attempt {attempt + 1} failed. Retrying in {retry_delay} seconds...")
88
+ time.sleep(retry_delay)
89
+
90
+ return response
91
+
92
+ def save(
93
+ self,
94
+ response: List[bytes],
95
+ name: str = None,
96
+ dir: str = os.getcwd(),
97
+ filenames_prefix: str = "",
98
+ ) -> List[str]:
99
+ """Save generated images
100
+
101
+ Args:
102
+ response (List[bytes]): List of generated images as bytes.
103
+ name (str): Filename for the images. Defaults to the last prompt.
104
+ dir (str, optional): Directory for saving images. Defaults to os.getcwd().
105
+ filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
106
+
107
+ Returns:
108
+ List[str]: List of saved filenames.
109
+ """
110
+ assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
111
+ name = self.prompt if name is None else name
112
+
113
+ filenames = []
114
+ count = 0
115
+ for image in response:
116
+ def complete_path():
117
+ count_value = "" if count == 0 else f"_{count}"
118
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
119
+
120
+ while os.path.isfile(complete_path()):
121
+ count += 1
122
+
123
+ absolute_path_to_file = complete_path()
124
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
125
+
126
+ with open(absolute_path_to_file, "wb") as fh:
127
+ fh.write(image)
128
+
129
+ return filenames
130
+
131
+
132
+ if __name__ == "__main__":
133
+ bot = PollinationsAI()
134
+ try:
135
+ resp = bot.generate("AI-generated image - webscout", 1)
136
+ print(bot.save(resp))
137
+ except Exception as e:
138
+ print(f"An error occurred: {e}")
@@ -0,0 +1,2 @@
1
+ from .deepinfra import *
2
+ from .PollinationsAI import *
@@ -0,0 +1,148 @@
1
+ import requests
2
+ import os
3
+ from typing import List
4
+ from string import punctuation
5
+ from random import choice
6
+ from random import randint
7
+ import base64
8
+
9
+ from webscout.AIbase import ImageProvider
10
+
11
+ class DeepInfraImager(ImageProvider):
12
+ """DeepInfra Image provider"""
13
+
14
+ def __init__(
15
+ self,
16
+ model: str = "black-forest-labs/FLUX-1-dev",
17
+ timeout: int = 60,
18
+ proxies: dict = {},
19
+ ):
20
+ """Initializes `DeepInfraImager`
21
+
22
+ Args:
23
+ model (str, optional): The name of the DeepInfra model to use.
24
+ Defaults to "black-forest-labs/FLUX-1-dev".
25
+ timeout (int, optional): Http request timeout. Defaults to 60 seconds.
26
+ proxies (dict, optional): Http request proxies (socks). Defaults to {}.
27
+ """
28
+ self.image_gen_endpoint: str = f"https://api.deepinfra.com/v1/inference/{model}"
29
+ self.headers = {
30
+ "Accept": "application/json, text/plain, */*",
31
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
32
+ "Accept-Encoding": "gzip, deflate, br, zstd",
33
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
34
+ "DNT": "1",
35
+ "Origin": "https://deepinfra.com",
36
+ "Referer": "https://deepinfra.com/",
37
+ "Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
38
+ "Sec-CH-UA-Mobile": "?0",
39
+ "Sec-CH-UA-Platform": '"Windows"',
40
+ "Sec-Fetch-Dest": "empty",
41
+ "Sec-Fetch-Mode": "cors",
42
+ "Sec-Fetch-Site": "same-site"
43
+ }
44
+ self.session = requests.Session()
45
+ self.session.headers.update(self.headers)
46
+ self.session.proxies.update(proxies)
47
+ self.timeout = timeout
48
+ self.prompt: str = "AI-generated image - webscout"
49
+ self.image_extension: str = "png"
50
+
51
+ def generate(
52
+ self, prompt: str, amount: int = 1, additives: bool = True,
53
+ num_inference_steps: int = 25, guidance_scale: float = 7.5,
54
+ width: int = 1024, height: int = 1024, seed: int = None
55
+ ) -> list[bytes]:
56
+ """Generate image from prompt
57
+
58
+ Args:
59
+ prompt (str): Image description.
60
+ amount (int): Total images to be generated. Defaults to 1.
61
+ additives (bool, optional): Try to make each prompt unique. Defaults to True.
62
+ num_inference_steps (int, optional): Number of inference steps. Defaults to 39.
63
+ guidance_scale (float, optional): Guidance scale for image generation. Defaults to 13.3.
64
+ width (int, optional): Width of the generated image. Defaults to 1024.
65
+ height (int, optional): Height of the generated image. Defaults to 1024.
66
+ seed (int, optional): Random seed for image generation. If None, a random seed is used.
67
+ Defaults to None.
68
+
69
+ Returns:
70
+ list[bytes]: List of generated images as bytes.
71
+ """
72
+ assert bool(prompt), "Prompt cannot be null"
73
+ assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
74
+ assert amount > 0, "Amount should be greater than 0"
75
+
76
+ ads = lambda: (
77
+ ""
78
+ if not additives
79
+ else choice(punctuation)
80
+ + choice(punctuation)
81
+ + choice(punctuation)
82
+ + choice(punctuation)
83
+ + choice(punctuation)
84
+ )
85
+
86
+ self.prompt = prompt
87
+ response = []
88
+ for _ in range(amount):
89
+ payload = {
90
+ "prompt": prompt + ads(),
91
+ "num_inference_steps": num_inference_steps,
92
+ "guidance_scale": guidance_scale,
93
+ "width": width,
94
+ "height": height,
95
+ "seed": seed if seed is not None else randint(1, 10000),
96
+ }
97
+ resp = self.session.post(url=self.image_gen_endpoint, json=payload, timeout=self.timeout)
98
+ resp.raise_for_status()
99
+ # Extract base64 encoded image data and decode it
100
+ image_data = resp.json()['images'][0].split(",")[1]
101
+ image_bytes = base64.b64decode(image_data)
102
+ response.append(image_bytes)
103
+
104
+ return response
105
+
106
+ def save(
107
+ self,
108
+ response: list[bytes],
109
+ name: str = None,
110
+ dir: str = os.getcwd(),
111
+ filenames_prefix: str = "",
112
+ ) -> list[str]:
113
+ """Save generated images
114
+
115
+ Args:
116
+ response (list[bytes]): List of generated images as bytes.
117
+ name (str): Filename for the images. Defaults to last prompt.
118
+ dir (str, optional): Directory for saving images. Defaults to os.getcwd().
119
+ filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
120
+ """
121
+ assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
122
+ name = self.prompt if name is None else name
123
+
124
+ filenames: list = []
125
+ count = 0
126
+ for image in response:
127
+ def complete_path():
128
+ count_value = "" if count == 0 else f"_{count}"
129
+ return os.path.join(
130
+ dir, name + count_value + "." + self.image_extension
131
+ )
132
+
133
+ while os.path.isfile(complete_path()):
134
+ count += 1
135
+
136
+ absolute_path_to_file = complete_path()
137
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
138
+
139
+ with open(absolute_path_to_file, "wb") as fh:
140
+ fh.write(image)
141
+
142
+ return filenames
143
+
144
+
145
+ if __name__ == "__main__":
146
+ bot = DeepInfraImager()
147
+ resp = bot.generate("AI-generated image - webscout", 1)
148
+ print(bot.save(resp))
@@ -0,0 +1,2 @@
1
+ from .streamElements import *
2
+ from .voicepod import *
@@ -0,0 +1,296 @@
1
+ import time
2
+ import pygame
3
+ import requests
4
+ import pathlib
5
+ import urllib.parse
6
+ from typing import Union, Generator
7
+
8
+ from webscout import exceptions
9
+ from webscout.AIbase import TTSProvider
10
+
11
+ class StreamElements(TTSProvider):
12
+ """
13
+ Text-to-speech provider using the StreamElements API.
14
+ """
15
+
16
+ # Request headers
17
+ headers: dict[str, str] = {
18
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
19
+ }
20
+ cache_dir = pathlib.Path("./audio_cache")
21
+ all_voices: list[str] = [
22
+ "Filiz",
23
+ "Astrid",
24
+ "Tatyana",
25
+ "Maxim",
26
+ "Carmen",
27
+ "Ines",
28
+ "Cristiano",
29
+ "Vitoria",
30
+ "Ricardo",
31
+ "Maja",
32
+ "Jan",
33
+ "Jacek",
34
+ "Ewa",
35
+ "Ruben",
36
+ "Lotte",
37
+ "Liv",
38
+ "Seoyeon",
39
+ "Takumi",
40
+ "Mizuki",
41
+ "Giorgio",
42
+ "Carla",
43
+ "Bianca",
44
+ "Karl",
45
+ "Dora",
46
+ "Mathieu",
47
+ "Celine",
48
+ "Chantal",
49
+ "Penelope",
50
+ "Miguel",
51
+ "Mia",
52
+ "Enrique",
53
+ "Conchita",
54
+ "Geraint",
55
+ "Salli",
56
+ "Matthew",
57
+ "Kimberly",
58
+ "Kendra",
59
+ "Justin",
60
+ "Joey",
61
+ "Joanna",
62
+ "Ivy",
63
+ "Raveena",
64
+ "Aditi",
65
+ "Emma",
66
+ "Brian",
67
+ "Amy",
68
+ "Russell",
69
+ "Nicole",
70
+ "Vicki",
71
+ "Marlene",
72
+ "Hans",
73
+ "Naja",
74
+ "Mads",
75
+ "Gwyneth",
76
+ "Zhiyu",
77
+ "es-ES-Standard-A",
78
+ "it-IT-Standard-A",
79
+ "it-IT-Wavenet-A",
80
+ "ja-JP-Standard-A",
81
+ "ja-JP-Wavenet-A",
82
+ "ko-KR-Standard-A",
83
+ "ko-KR-Wavenet-A",
84
+ "pt-BR-Standard-A",
85
+ "tr-TR-Standard-A",
86
+ "sv-SE-Standard-A",
87
+ "nl-NL-Standard-A",
88
+ "nl-NL-Wavenet-A",
89
+ "en-US-Wavenet-A",
90
+ "en-US-Wavenet-B",
91
+ "en-US-Wavenet-C",
92
+ "en-US-Wavenet-D",
93
+ "en-US-Wavenet-E",
94
+ "en-US-Wavenet-F",
95
+ "en-GB-Standard-A",
96
+ "en-GB-Standard-B",
97
+ "en-GB-Standard-C",
98
+ "en-GB-Standard-D",
99
+ "en-GB-Wavenet-A",
100
+ "en-GB-Wavenet-B",
101
+ "en-GB-Wavenet-C",
102
+ "en-GB-Wavenet-D",
103
+ "en-US-Standard-B",
104
+ "en-US-Standard-C",
105
+ "en-US-Standard-D",
106
+ "en-US-Standard-E",
107
+ "de-DE-Standard-A",
108
+ "de-DE-Standard-B",
109
+ "de-DE-Wavenet-A",
110
+ "de-DE-Wavenet-B",
111
+ "de-DE-Wavenet-C",
112
+ "de-DE-Wavenet-D",
113
+ "en-AU-Standard-A",
114
+ "en-AU-Standard-B",
115
+ "en-AU-Wavenet-A",
116
+ "en-AU-Wavenet-B",
117
+ "en-AU-Wavenet-C",
118
+ "en-AU-Wavenet-D",
119
+ "en-AU-Standard-C",
120
+ "en-AU-Standard-D",
121
+ "fr-CA-Standard-A",
122
+ "fr-CA-Standard-B",
123
+ "fr-CA-Standard-C",
124
+ "fr-CA-Standard-D",
125
+ "fr-FR-Standard-C",
126
+ "fr-FR-Standard-D",
127
+ "fr-FR-Wavenet-A",
128
+ "fr-FR-Wavenet-B",
129
+ "fr-FR-Wavenet-C",
130
+ "fr-FR-Wavenet-D",
131
+ "da-DK-Wavenet-A",
132
+ "pl-PL-Wavenet-A",
133
+ "pl-PL-Wavenet-B",
134
+ "pl-PL-Wavenet-C",
135
+ "pl-PL-Wavenet-D",
136
+ "pt-PT-Wavenet-A",
137
+ "pt-PT-Wavenet-B",
138
+ "pt-PT-Wavenet-C",
139
+ "pt-PT-Wavenet-D",
140
+ "ru-RU-Wavenet-A",
141
+ "ru-RU-Wavenet-B",
142
+ "ru-RU-Wavenet-C",
143
+ "ru-RU-Wavenet-D",
144
+ "sk-SK-Wavenet-A",
145
+ "tr-TR-Wavenet-A",
146
+ "tr-TR-Wavenet-B",
147
+ "tr-TR-Wavenet-C",
148
+ "tr-TR-Wavenet-D",
149
+ "tr-TR-Wavenet-E",
150
+ "uk-UA-Wavenet-A",
151
+ "ar-XA-Wavenet-A",
152
+ "ar-XA-Wavenet-B",
153
+ "ar-XA-Wavenet-C",
154
+ "cs-CZ-Wavenet-A",
155
+ "nl-NL-Wavenet-B",
156
+ "nl-NL-Wavenet-C",
157
+ "nl-NL-Wavenet-D",
158
+ "nl-NL-Wavenet-E",
159
+ "en-IN-Wavenet-A",
160
+ "en-IN-Wavenet-B",
161
+ "en-IN-Wavenet-C",
162
+ "fil-PH-Wavenet-A",
163
+ "fi-FI-Wavenet-A",
164
+ "el-GR-Wavenet-A",
165
+ "hi-IN-Wavenet-A",
166
+ "hi-IN-Wavenet-B",
167
+ "hi-IN-Wavenet-C",
168
+ "hu-HU-Wavenet-A",
169
+ "id-ID-Wavenet-A",
170
+ "id-ID-Wavenet-B",
171
+ "id-ID-Wavenet-C",
172
+ "it-IT-Wavenet-B",
173
+ "it-IT-Wavenet-C",
174
+ "it-IT-Wavenet-D",
175
+ "ja-JP-Wavenet-B",
176
+ "ja-JP-Wavenet-C",
177
+ "ja-JP-Wavenet-D",
178
+ "cmn-CN-Wavenet-A",
179
+ "cmn-CN-Wavenet-B",
180
+ "cmn-CN-Wavenet-C",
181
+ "cmn-CN-Wavenet-D",
182
+ "nb-no-Wavenet-E",
183
+ "nb-no-Wavenet-A",
184
+ "nb-no-Wavenet-B",
185
+ "nb-no-Wavenet-C",
186
+ "nb-no-Wavenet-D",
187
+ "vi-VN-Wavenet-A",
188
+ "vi-VN-Wavenet-B",
189
+ "vi-VN-Wavenet-C",
190
+ "vi-VN-Wavenet-D",
191
+ "sr-rs-Standard-A",
192
+ "lv-lv-Standard-A",
193
+ "is-is-Standard-A",
194
+ "bg-bg-Standard-A",
195
+ "af-ZA-Standard-A",
196
+ "Tracy",
197
+ "Danny",
198
+ "Huihui",
199
+ "Yaoyao",
200
+ "Kangkang",
201
+ "HanHan",
202
+ "Zhiwei",
203
+ "Asaf",
204
+ "An",
205
+ "Stefanos",
206
+ "Filip",
207
+ "Ivan",
208
+ "Heidi",
209
+ "Herena",
210
+ "Kalpana",
211
+ "Hemant",
212
+ "Matej",
213
+ "Andika",
214
+ "Rizwan",
215
+ "Lado",
216
+ "Valluvar",
217
+ "Linda",
218
+ "Heather",
219
+ "Sean",
220
+ "Michael",
221
+ "Karsten",
222
+ "Guillaume",
223
+ "Pattara",
224
+ "Jakub",
225
+ "Szabolcs",
226
+ "Hoda",
227
+ "Naayf",
228
+ ]
229
+
230
+ def __init__(self, timeout: int = 20, proxies: dict = None):
231
+ """Initializes the StreamElements TTS client."""
232
+ self.session = requests.Session()
233
+ self.session.headers.update(self.headers)
234
+ if proxies:
235
+ self.session.proxies.update(proxies)
236
+ self.timeout = timeout
237
+
238
+ def tts(self, text: str, voice: str = "Brian") -> str:
239
+ """
240
+ Converts text to speech using the StreamElements API and saves it to a file.
241
+ """
242
+ assert (
243
+ voice in self.all_voices
244
+ ), f"Voice '{voice}' not one of [{', '.join(self.all_voices)}]"
245
+
246
+ url = f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{urllib.parse.quote(text)}}}"
247
+ filename = self.cache_dir / f"{int(time.time())}.mp3"
248
+
249
+ try:
250
+ response = self.session.get(url=url, headers=self.headers, stream=True, timeout=self.timeout)
251
+ response.raise_for_status()
252
+
253
+ # Create the audio_cache directory if it doesn't exist
254
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
255
+
256
+ with open(filename, "wb") as f:
257
+ for chunk in response.iter_content(chunk_size=512):
258
+ if chunk:
259
+ f.write(chunk)
260
+
261
+ return filename.as_posix()
262
+
263
+ except requests.exceptions.RequestException as e:
264
+ raise exceptions.FailedToGenerateResponseError(
265
+ f"Failed to perform the operation: {e}"
266
+ )
267
+
268
+ def play_audio(self, filename: str):
269
+ """
270
+ Plays an audio file using pygame.
271
+
272
+ Args:
273
+ filename (str): The path to the audio file.
274
+
275
+ Raises:
276
+ RuntimeError: If there is an error playing the audio.
277
+ """
278
+ try:
279
+ pygame.mixer.init()
280
+ pygame.mixer.music.load(filename)
281
+ pygame.mixer.music.play()
282
+ while pygame.mixer.music.get_busy():
283
+ pygame.time.Clock().tick(10)
284
+ except Exception as e:
285
+ raise RuntimeError(f"Error playing audio: {e}")
286
+
287
+ # Example usage
288
+ if __name__ == "__main__":
289
+ streamelements = StreamElements()
290
+ text = "This is a test of the StreamElements text-to-speech API."
291
+
292
+ print("Generating audio...")
293
+ audio_file = streamelements.tts(text, voice="Brian")
294
+
295
+ print("Playing audio...")
296
+ streamelements.play_audio(audio_file)