webscout 6.3__py3-none-any.whl → 6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (85) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +488 -1130
  4. webscout/Bing_search.py +250 -153
  5. webscout/DWEBS.py +151 -19
  6. webscout/Extra/__init__.py +2 -1
  7. webscout/Extra/autocoder/__init__.py +9 -0
  8. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  9. webscout/Extra/autocoder/rawdog.py +681 -0
  10. webscout/Extra/autollama.py +246 -195
  11. webscout/Extra/gguf.py +441 -416
  12. webscout/LLM.py +206 -43
  13. webscout/Litlogger/__init__.py +681 -0
  14. webscout/Provider/DARKAI.py +1 -1
  15. webscout/Provider/EDITEE.py +1 -1
  16. webscout/Provider/NinjaChat.py +1 -1
  17. webscout/Provider/PI.py +221 -207
  18. webscout/Provider/Perplexity.py +598 -598
  19. webscout/Provider/RoboCoders.py +206 -0
  20. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  21. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  22. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  23. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  24. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  25. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  26. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  27. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  28. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  29. webscout/Provider/TTI/__init__.py +2 -4
  30. webscout/Provider/TTI/artbit/__init__.py +22 -0
  31. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  32. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  33. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  34. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  35. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  36. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  37. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  38. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  39. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  40. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  41. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  42. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  43. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  44. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  45. webscout/Provider/TTI/talkai/__init__.py +4 -0
  46. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  47. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  48. webscout/Provider/__init__.py +146 -139
  49. webscout/Provider/askmyai.py +2 -2
  50. webscout/Provider/cerebras.py +227 -219
  51. webscout/Provider/llama3mitril.py +0 -1
  52. webscout/Provider/mhystical.py +176 -0
  53. webscout/Provider/perplexitylabs.py +265 -0
  54. webscout/Provider/twitterclone.py +251 -245
  55. webscout/Provider/typegpt.py +359 -0
  56. webscout/__init__.py +28 -23
  57. webscout/__main__.py +5 -5
  58. webscout/cli.py +252 -280
  59. webscout/conversation.py +227 -0
  60. webscout/exceptions.py +161 -29
  61. webscout/litagent/__init__.py +172 -0
  62. webscout/litprinter/__init__.py +831 -0
  63. webscout/optimizers.py +270 -0
  64. webscout/prompt_manager.py +279 -0
  65. webscout/swiftcli/__init__.py +810 -0
  66. webscout/transcriber.py +479 -551
  67. webscout/update_checker.py +125 -0
  68. webscout/version.py +1 -1
  69. {webscout-6.3.dist-info → webscout-6.4.dist-info}/METADATA +26 -45
  70. {webscout-6.3.dist-info → webscout-6.4.dist-info}/RECORD +75 -45
  71. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  72. webscout/Provider/TTI/Nexra.py +0 -120
  73. webscout/Provider/TTI/PollinationsAI.py +0 -138
  74. webscout/Provider/TTI/WebSimAI.py +0 -142
  75. webscout/Provider/TTI/aiforce.py +0 -160
  76. webscout/Provider/TTI/artbit.py +0 -141
  77. webscout/Provider/TTI/deepinfra.py +0 -148
  78. webscout/Provider/TTI/huggingface.py +0 -155
  79. webscout/Provider/TTI/talkai.py +0 -116
  80. webscout/models.py +0 -23
  81. /webscout/{g4f.py → gpt4free.py} +0 -0
  82. {webscout-6.3.dist-info → webscout-6.4.dist-info}/LICENSE.md +0 -0
  83. {webscout-6.3.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
  84. {webscout-6.3.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
  85. {webscout-6.3.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
webscout/AIauto.py CHANGED
@@ -1,176 +1,191 @@
1
- from webscout.AIbase import Provider
2
- from webscout.g4f import GPT4FREE, TestProviders
3
- from webscout.exceptions import AllProvidersFailure
4
- from typing import Union, Any, Dict, Generator
5
- import importlib
6
- import pkgutil
7
- import logging
8
- import random
9
- import inspect
10
-
11
- def load_providers():
12
- provider_map = {}
13
- api_key_providers = set()
14
- provider_package = importlib.import_module("webscout.Provider")
15
-
16
- for _, module_name, _ in pkgutil.iter_modules(provider_package.__path__):
17
- try:
18
- module = importlib.import_module(f"webscout.Provider.{module_name}")
19
- for attr_name in dir(module):
20
- attr = getattr(module, attr_name)
21
- if isinstance(attr, type) and issubclass(attr, Provider) and attr != Provider:
22
- provider_map[attr_name.upper()] = attr
23
- # Check if the provider needs an API key
24
- if 'api_key' in inspect.signature(attr.__init__).parameters:
25
- api_key_providers.add(attr_name.upper())
26
- except Exception as e:
27
- logging.warning(f"Failed to load provider {module_name}: {e}")
28
-
29
- return provider_map, api_key_providers
30
-
31
- provider_map, api_key_providers = load_providers()
32
-
33
- class AUTO(Provider):
34
- def __init__(
35
- self,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- exclude: list[str] = [],
46
- ):
47
- self.provider = None
48
- self.provider_name = None
49
- self.is_conversation = is_conversation
50
- self.max_tokens = max_tokens
51
- self.timeout = timeout
52
- self.intro = intro
53
- self.filepath = filepath
54
- self.update_file = update_file
55
- self.proxies = proxies
56
- self.history_offset = history_offset
57
- self.act = act
58
- self.exclude = [e.upper() for e in exclude]
59
-
60
- @property
61
- def last_response(self) -> dict[str, Any]:
62
- return self.provider.last_response if self.provider else {}
63
-
64
- @property
65
- def conversation(self) -> object:
66
- return self.provider.conversation if self.provider else None
67
-
68
- def ask(
69
- self,
70
- prompt: str,
71
- stream: bool = False,
72
- raw: bool = False,
73
- optimizer: str = None,
74
- conversationally: bool = False,
75
- run_new_test: bool = False,
76
- ) -> Union[Dict, Generator]:
77
- ask_kwargs = {
78
- "prompt": prompt,
79
- "stream": stream,
80
- "raw": raw,
81
- "optimizer": optimizer,
82
- "conversationally": conversationally,
83
- }
84
-
85
- # Filter out API key required providers and excluded providers
86
- available_providers = [
87
- (name, cls) for name, cls in provider_map.items()
88
- if name not in api_key_providers and name not in self.exclude
89
- ]
90
-
91
- # Shuffle the list of available providers
92
- random.shuffle(available_providers)
93
-
94
- # Try webscout-based providers
95
- for provider_name, provider_class in available_providers:
96
- try:
97
- self.provider_name = f"webscout-{provider_name}"
98
- self.provider = provider_class(
99
- is_conversation=self.is_conversation,
100
- max_tokens=self.max_tokens,
101
- timeout=self.timeout,
102
- intro=self.intro,
103
- filepath=self.filepath,
104
- update_file=self.update_file,
105
- proxies=self.proxies,
106
- history_offset=self.history_offset,
107
- act=self.act,
108
- )
109
-
110
- return self.provider.ask(**ask_kwargs)
111
-
112
- except Exception as e:
113
- logging.debug(
114
- f"Failed to generate response using provider {provider_name} - {e}"
115
- )
116
-
117
- # Try GPT4FREE providers
118
- gpt4free_providers = TestProviders(timeout=self.timeout).get_results(run=run_new_test)
119
- random.shuffle(gpt4free_providers)
120
-
121
- for provider_info in gpt4free_providers:
122
- if provider_info["name"].upper() in self.exclude:
123
- continue
124
- try:
125
- self.provider_name = f"g4f-{provider_info['name']}"
126
- self.provider = GPT4FREE(
127
- provider=provider_info["name"],
128
- is_conversation=self.is_conversation,
129
- max_tokens=self.max_tokens,
130
- intro=self.intro,
131
- filepath=self.filepath,
132
- update_file=self.update_file,
133
- proxies=self.proxies,
134
- history_offset=self.history_offset,
135
- act=self.act,
136
- )
137
-
138
- print(f"Using provider: {self.provider_name}")
139
- return self.provider.ask(**ask_kwargs)
140
-
141
- except Exception as e:
142
- logging.debug(
143
- f"Failed to generate response using GPT4FREE-based provider {provider_info['name']} - {e}"
144
- )
145
-
146
- raise AllProvidersFailure("None of the providers generated response successfully.")
147
-
148
- def chat(
149
- self,
150
- prompt: str,
151
- stream: bool = False,
152
- optimizer: str = None,
153
- conversationally: bool = False,
154
- run_new_test: bool = False,
155
- ) -> Union[str, Generator[str, None, None]]:
156
- response = self.ask(
157
- prompt,
158
- stream,
159
- optimizer=optimizer,
160
- conversationally=conversationally,
161
- run_new_test=run_new_test,
162
- )
163
-
164
- if stream:
165
- return (self.get_message(chunk) for chunk in response)
166
- else:
167
- return self.get_message(response)
168
-
169
- def get_message(self, response: dict) -> str:
170
- assert self.provider is not None, "Chat with AI first"
171
- return self.provider.get_message(response)
172
- if __name__ == "__main__":
173
- auto = AUTO()
174
-
175
- response = auto.chat("Hello, how are you?")
176
- print(response)
1
+ from webscout.AIbase import Provider
2
+ from webscout.g4f import GPT4FREE, TestProviders
3
+ from webscout.exceptions import AllProvidersFailure
4
+ from webscout.Litlogger import Litlogger, LogFormat, ColorScheme
5
+ from typing import Union, Any, Dict, Generator
6
+ import importlib
7
+ import pkgutil
8
+ import random
9
+ import inspect
10
+
11
+ # Initialize LitLogger with cyberpunk theme
12
+ logger = Litlogger(
13
+ name="AIauto",
14
+ format=LogFormat.DETAILED,
15
+ color_scheme=ColorScheme.CYBERPUNK
16
+ )
17
+
18
+ def load_providers():
19
+ provider_map = {}
20
+ api_key_providers = set()
21
+ provider_package = importlib.import_module("webscout.Provider")
22
+
23
+ for _, module_name, _ in pkgutil.iter_modules(provider_package.__path__):
24
+ try:
25
+ module = importlib.import_module(f"webscout.Provider.{module_name}")
26
+ for attr_name in dir(module):
27
+ attr = getattr(module, attr_name)
28
+ if isinstance(attr, type) and issubclass(attr, Provider) and attr != Provider:
29
+ provider_map[attr_name.upper()] = attr
30
+ # Check if the provider needs an API key
31
+ if 'api_key' in inspect.signature(attr.__init__).parameters:
32
+ api_key_providers.add(attr_name.upper())
33
+ logger.debug(f"Provider {attr_name} requires API key")
34
+ logger.success(f"Loaded provider module: {module_name}")
35
+ except Exception as e:
36
+ logger.error(f"Failed to load provider {module_name}: {str(e)}")
37
+
38
+ logger.info(f"Total providers loaded: {len(provider_map)}")
39
+ logger.info(f"API key providers: {len(api_key_providers)}")
40
+ return provider_map, api_key_providers
41
+
42
+ provider_map, api_key_providers = load_providers()
43
+
44
+ class AUTO(Provider):
45
+ def __init__(
46
+ self,
47
+ is_conversation: bool = True,
48
+ max_tokens: int = 600,
49
+ timeout: int = 30,
50
+ intro: str = None,
51
+ filepath: str = None,
52
+ update_file: bool = True,
53
+ proxies: dict = {},
54
+ history_offset: int = 10250,
55
+ act: str = None,
56
+ exclude: list[str] = [],
57
+ ):
58
+ self.provider = None
59
+ self.provider_name = None
60
+ self.is_conversation = is_conversation
61
+ self.max_tokens = max_tokens
62
+ self.timeout = timeout
63
+ self.intro = intro
64
+ self.filepath = filepath
65
+ self.update_file = update_file
66
+ self.proxies = proxies
67
+ self.history_offset = history_offset
68
+ self.act = act
69
+ self.exclude = [e.upper() for e in exclude]
70
+
71
+ @property
72
+ def last_response(self) -> dict[str, Any]:
73
+ return self.provider.last_response if self.provider else {}
74
+
75
+ @property
76
+ def conversation(self) -> object:
77
+ return self.provider.conversation if self.provider else None
78
+
79
+ def ask(
80
+ self,
81
+ prompt: str,
82
+ stream: bool = False,
83
+ raw: bool = False,
84
+ optimizer: str = None,
85
+ conversationally: bool = False,
86
+ run_new_test: bool = False,
87
+ ) -> Union[Dict, Generator]:
88
+ ask_kwargs = {
89
+ "prompt": prompt,
90
+ "stream": stream,
91
+ "raw": raw,
92
+ "optimizer": optimizer,
93
+ "conversationally": conversationally,
94
+ }
95
+
96
+ # Filter out API key required providers and excluded providers
97
+ available_providers = [
98
+ (name, cls) for name, cls in provider_map.items()
99
+ if name not in api_key_providers and name not in self.exclude
100
+ ]
101
+
102
+ # Shuffle the list of available providers
103
+ random.shuffle(available_providers)
104
+
105
+ # Try webscout-based providers
106
+ for provider_name, provider_class in available_providers:
107
+ try:
108
+ self.provider_name = f"webscout-{provider_name}"
109
+ logger.info(f"Trying provider: {self.provider_name}")
110
+
111
+ self.provider = provider_class(
112
+ is_conversation=self.is_conversation,
113
+ max_tokens=self.max_tokens,
114
+ timeout=self.timeout,
115
+ intro=self.intro,
116
+ filepath=self.filepath,
117
+ update_file=self.update_file,
118
+ proxies=self.proxies,
119
+ history_offset=self.history_offset,
120
+ act=self.act,
121
+ )
122
+ response = self.provider.ask(**ask_kwargs)
123
+ logger.success(f"Successfully used provider: {self.provider_name}")
124
+ return response
125
+ except Exception as e:
126
+ logger.warning(f"Provider {self.provider_name} failed: {str(e)}")
127
+ continue
128
+
129
+ # Try GPT4FREE providers
130
+ gpt4free_providers = TestProviders(timeout=self.timeout).get_results(run=run_new_test)
131
+ random.shuffle(gpt4free_providers)
132
+
133
+ for provider_info in gpt4free_providers:
134
+ if provider_info["name"].upper() in self.exclude:
135
+ continue
136
+ try:
137
+ self.provider_name = f"g4f-{provider_info['name']}"
138
+ logger.info(f"Trying provider: {self.provider_name}")
139
+
140
+ self.provider = GPT4FREE(
141
+ provider=provider_info["name"],
142
+ is_conversation=self.is_conversation,
143
+ max_tokens=self.max_tokens,
144
+ intro=self.intro,
145
+ filepath=self.filepath,
146
+ update_file=self.update_file,
147
+ proxies=self.proxies,
148
+ history_offset=self.history_offset,
149
+ act=self.act,
150
+ )
151
+
152
+ response = self.provider.ask(**ask_kwargs)
153
+ logger.success(f"Successfully used provider: {self.provider_name}")
154
+ return response
155
+ except Exception as e:
156
+ logger.warning(f"Provider {self.provider_name} failed: {str(e)}")
157
+ continue
158
+
159
+ # If we get here, all providers failed
160
+ logger.error("All providers failed to process the request")
161
+ raise AllProvidersFailure("All providers failed to process the request")
162
+
163
+ def chat(
164
+ self,
165
+ prompt: str,
166
+ stream: bool = False,
167
+ optimizer: str = None,
168
+ conversationally: bool = False,
169
+ run_new_test: bool = False,
170
+ ) -> Union[str, Generator[str, None, None]]:
171
+ response = self.ask(
172
+ prompt,
173
+ stream,
174
+ optimizer=optimizer,
175
+ conversationally=conversationally,
176
+ run_new_test=run_new_test,
177
+ )
178
+
179
+ if stream:
180
+ return (self.get_message(chunk) for chunk in response)
181
+ else:
182
+ return self.get_message(response)
183
+
184
+ def get_message(self, response: dict) -> str:
185
+ assert self.provider is not None, "Chat with AI first"
186
+ return self.provider.get_message(response)
187
+ if __name__ == "__main__":
188
+ auto = AUTO()
189
+
190
+ response = auto.chat("Hello, how are you?")
191
+ print(response)
webscout/AIbase.py CHANGED
@@ -10,16 +10,9 @@ ImageData: TypeAlias = Union[bytes, str, Generator[bytes, None, None]]
10
10
  AsyncImageData: TypeAlias = Union[bytes, str, AsyncGenerator[bytes, None]]
11
11
 
12
12
  class AIProviderError(Exception):
13
- """Base exception for AI provider errors"""
14
13
  pass
15
14
 
16
15
  class Provider(ABC):
17
- """Base class for text-based AI providers.
18
-
19
- This class defines the interface for synchronous AI text generation providers.
20
- All text-based AI providers should inherit from this class and implement
21
- its abstract methods.
22
- """
23
16
 
24
17
  @abstractmethod
25
18
  def ask(
@@ -30,30 +23,6 @@ class Provider(ABC):
30
23
  optimizer: Optional[str] = None,
31
24
  conversationally: bool = False,
32
25
  ) -> Response:
33
- """Chat with AI and get detailed response.
34
-
35
- Args:
36
- prompt: The input text to send to the AI
37
- stream: Whether to stream the response. Defaults to False
38
- raw: Whether to return raw response as received. Defaults to False
39
- optimizer: Optional prompt optimizer - choices: ['code', 'shell_command']
40
- conversationally: Whether to maintain conversation context. Defaults to False
41
-
42
- Returns:
43
- A dictionary containing response details:
44
- {
45
- "completion": str, # The AI's response
46
- "stop_reason": str|None, # Reason for response termination
47
- "truncated": bool, # Whether response was truncated
48
- "stop": str|None, # Stop token if any
49
- "model": str, # Model used for generation
50
- "log_id": str, # Unique log identifier
51
- "exception": str|None # Error message if any
52
- }
53
-
54
- Raises:
55
- AIProviderError: If there's an error communicating with the AI provider
56
- """
57
26
  raise NotImplementedError("Method needs to be implemented in subclass")
58
27
 
59
28
  @abstractmethod
@@ -64,39 +33,13 @@ class Provider(ABC):
64
33
  optimizer: Optional[str] = None,
65
34
  conversationally: bool = False,
66
35
  ) -> str:
67
- """Generate a simple text response from the AI.
68
-
69
- Args:
70
- prompt: The input text to send to the AI
71
- stream: Whether to stream the response. Defaults to False
72
- optimizer: Optional prompt optimizer - choices: ['code', 'shell_command']
73
- conversationally: Whether to maintain conversation context. Defaults to False
74
-
75
- Returns:
76
- The AI's text response
77
-
78
- Raises:
79
- AIProviderError: If there's an error communicating with the AI provider
80
- """
81
36
  raise NotImplementedError("Method needs to be implemented in subclass")
82
37
 
83
38
  @abstractmethod
84
39
  def get_message(self, response: Response) -> str:
85
- """Extract the message content from a response dictionary.
86
-
87
- Args:
88
- response: Response dictionary from ask() method
89
-
90
- Returns:
91
- The extracted message text
92
-
93
- Raises:
94
- AIProviderError: If message cannot be extracted from response
95
- """
96
40
  raise NotImplementedError("Method needs to be implemented in subclass")
97
41
 
98
42
  class AsyncProvider(ABC):
99
- """Asynchronous base class for text-based AI providers"""
100
43
 
101
44
  @abstractmethod
102
45
  async def ask(
@@ -107,30 +50,6 @@ class AsyncProvider(ABC):
107
50
  optimizer: Optional[str] = None,
108
51
  conversationally: bool = False,
109
52
  ) -> Response:
110
- """Asynchronously chat with AI and get detailed response.
111
-
112
- Args:
113
- prompt: The input text to send to the AI
114
- stream: Whether to stream the response. Defaults to False
115
- raw: Whether to return raw response as received. Defaults to False
116
- optimizer: Optional prompt optimizer - choices: ['code', 'shell_command']
117
- conversationally: Whether to maintain conversation context. Defaults to False
118
-
119
- Returns:
120
- A dictionary containing response details:
121
- {
122
- "completion": str, # The AI's response
123
- "stop_reason": str|None, # Reason for response termination
124
- "truncated": bool, # Whether response was truncated
125
- "stop": str|None, # Stop token if any
126
- "model": str, # Model used for generation
127
- "log_id": str, # Unique log identifier
128
- "exception": str|None # Error message if any
129
- }
130
-
131
- Raises:
132
- AIProviderError: If there's an error communicating with the AI provider
133
- """
134
53
  raise NotImplementedError("Method needs to be implemented in subclass")
135
54
 
136
55
  @abstractmethod
@@ -141,100 +60,28 @@ class AsyncProvider(ABC):
141
60
  optimizer: Optional[str] = None,
142
61
  conversationally: bool = False,
143
62
  ) -> str:
144
- """Asynchronously generate a simple text response from the AI.
145
-
146
- Args:
147
- prompt: The input text to send to the AI
148
- stream: Whether to stream the response. Defaults to False
149
- optimizer: Optional prompt optimizer - choices: ['code', 'shell_command']
150
- conversationally: Whether to maintain conversation context. Defaults to False
151
-
152
- Returns:
153
- The AI's text response
154
-
155
- Raises:
156
- AIProviderError: If there's an error communicating with the AI provider
157
- """
158
63
  raise NotImplementedError("Method needs to be implemented in subclass")
159
64
 
160
65
  @abstractmethod
161
66
  async def get_message(self, response: Response) -> str:
162
- """Asynchronously extract the message content from a response dictionary.
163
-
164
- Args:
165
- response: Response dictionary from ask() method
166
-
167
- Returns:
168
- The extracted message text
169
-
170
- Raises:
171
- AIProviderError: If message cannot be extracted from response
172
- """
173
67
  raise NotImplementedError("Method needs to be implemented in subclass")
174
68
 
175
69
  class TTSProvider(ABC):
176
- """Base class for text-to-speech providers.
177
-
178
- This class defines the interface for synchronous text-to-speech providers.
179
- """
180
70
 
181
71
  @abstractmethod
182
72
  def tts(self, text: str) -> ImageData:
183
- """Convert text to speech.
184
-
185
- Args:
186
- text: The text to convert to speech
187
-
188
- Returns:
189
- One of:
190
- - Raw audio bytes
191
- - Path to saved audio file
192
- - Generator yielding audio chunks
193
-
194
- Raises:
195
- AIProviderError: If text-to-speech conversion fails
196
- """
197
73
  raise NotImplementedError("Method needs to be implemented in subclass")
198
74
 
199
75
  class AsyncTTSProvider(ABC):
200
- """Base class for asynchronous text-to-speech providers."""
201
76
 
202
77
  @abstractmethod
203
78
  async def tts(self, text: str) -> AsyncImageData:
204
- """Asynchronously convert text to speech.
205
-
206
- Args:
207
- text: The text to convert to speech
208
-
209
- Returns:
210
- One of:
211
- - Raw audio bytes
212
- - Path to saved audio file
213
- - AsyncGenerator yielding audio chunks
214
-
215
- Raises:
216
- AIProviderError: If text-to-speech conversion fails
217
- """
218
79
  raise NotImplementedError("Method needs to be implemented in subclass")
219
80
 
220
81
  class ImageProvider(ABC):
221
- """Base class for text-to-image generation providers."""
222
82
 
223
83
  @abstractmethod
224
84
  def generate(self, prompt: str, amount: int = 1) -> List[bytes]:
225
- """Generate images from a text description.
226
-
227
- Args:
228
- prompt: Text description of desired image
229
- amount: Number of images to generate (default: 1)
230
-
231
- Returns:
232
- List of generated images as bytes
233
-
234
- Raises:
235
- AIProviderError: If image generation fails
236
- ValueError: If amount is less than 1
237
- """
238
85
  raise NotImplementedError("Method needs to be implemented in subclass")
239
86
 
240
87
  @abstractmethod
@@ -244,24 +91,9 @@ class ImageProvider(ABC):
244
91
  name: Optional[str] = None,
245
92
  dir: Optional[Union[str, Path]] = None
246
93
  ) -> List[str]:
247
- """Save generated images to disk.
248
-
249
- Args:
250
- response: List of image data in bytes
251
- name: Base filename for saved images (default: auto-generated)
252
- dir: Directory to save images (default: current directory)
253
-
254
- Returns:
255
- List of paths to saved image files
256
-
257
- Raises:
258
- AIProviderError: If saving images fails
259
- ValueError: If response is empty
260
- """
261
94
  raise NotImplementedError("Method needs to be implemented in subclass")
262
95
 
263
96
  class AsyncImageProvider(ABC):
264
- """Base class for asynchronous text-to-image generation providers."""
265
97
 
266
98
  @abstractmethod
267
99
  async def generate(
@@ -269,21 +101,6 @@ class AsyncImageProvider(ABC):
269
101
  prompt: str,
270
102
  amount: int = 1
271
103
  ) -> Union[AsyncGenerator[bytes, None], List[bytes]]:
272
- """Asynchronously generate images from text.
273
-
274
- Args:
275
- prompt: Text description of desired image
276
- amount: Number of images to generate (default: 1)
277
-
278
- Returns:
279
- Either:
280
- - AsyncGenerator yielding image bytes for streaming
281
- - List of image bytes if not streaming
282
-
283
- Raises:
284
- AIProviderError: If image generation fails
285
- ValueError: If amount is less than 1
286
- """
287
104
  raise NotImplementedError("Method needs to be implemented in subclass")
288
105
 
289
106
  @abstractmethod
@@ -293,18 +110,4 @@ class AsyncImageProvider(ABC):
293
110
  name: Optional[str] = None,
294
111
  dir: Optional[Union[str, Path]] = None
295
112
  ) -> List[str]:
296
- """Asynchronously save generated images.
297
-
298
- Args:
299
- response: Either AsyncGenerator yielding images or List of image bytes
300
- name: Base filename for saved images (default: auto-generated)
301
- dir: Directory to save images (default: current directory)
302
-
303
- Returns:
304
- List of paths to saved image files
305
-
306
- Raises:
307
- AIProviderError: If saving images fails
308
- ValueError: If response is empty
309
- """
310
113
  raise NotImplementedError("Method needs to be implemented in subclass")