askai-python 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Hossein Ghorbani
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,129 @@
1
+ Metadata-Version: 2.4
2
+ Name: askai-python
3
+ Version: 0.1.2
4
+ Summary: AI Made Stupid Simple. Unified API for OpenAI, Groq, Google, and more.
5
+ Author-email: Hossein Ghorbani <hosseingh1068@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://hosseinghorbani0.ir/
8
+ Project-URL: Bug Tracker, https://github.com/Hosseinghorbani0/ask-ai/issues
9
+ Requires-Python: >=3.8
10
+ Description-Content-Type: text/markdown
11
+ License-File: LICENSE
12
+ Requires-Dist: openai>=1.0.0
13
+ Requires-Dist: groq
14
+ Requires-Dist: google-generativeai
15
+ Requires-Dist: anthropic
16
+ Requires-Dist: requests
17
+ Dynamic: license-file
18
+
19
+ # ask-ai
20
+
21
+ <p align="center">
22
+ 🌍 <b>Readme:</b>
23
+ <a href="README.md"><img src="https://flagcdn.com/20x15/us.png" alt="English"> English</a> ·
24
+ <a href="docs/README_fa.md"><img src="https://flagcdn.com/20x15/ir.png" alt="Persian"> فارسی</a> ·
25
+ <a href="docs/README_zh.md"><img src="https://flagcdn.com/20x15/cn.png" alt="Chinese"> 中文</a> ·
26
+ <a href="docs/README_tr.md"><img src="https://flagcdn.com/20x15/tr.png" alt="Turkish"> Türkçe</a> ·
27
+ <a href="docs/README_ar.md"><img src="https://flagcdn.com/20x15/sa.png" alt="Arabic"> العربية</a> ·
28
+ <a href="docs/README_ru.md"><img src="https://flagcdn.com/20x15/ru.png" alt="Russian"> Русский</a> ·
29
+ <a href="docs/README_es.md"><img src="https://flagcdn.com/20x15/es.png" alt="Spanish"> Español</a> ·
30
+ <a href="docs/README_ja.md"><img src="https://flagcdn.com/20x15/jp.png" alt="Japanese"> 日本語</a>
31
+ </p>
32
+
33
+ <p align="center">
34
+ <b>AI Made Simple.</b><br/>
35
+ One unified Python client for OpenAI, Claude, Gemini, Groq & more.
36
+ </p>
37
+
38
+ > Stop rewriting AI code for every provider. Use one line. Switch models anytime.
39
+
40
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
41
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
42
+
43
+ ---
44
+
45
+ ## ⚡ Why ask-ai?
46
+
47
+ - **No SDK lock-in**: Don't learn 5 different libraries. Learn one.
48
+ - **Unified API**: `ai.ask()` works for text, images, and audio across all providers.
49
+ - **Production Ready**: Built-in type safety, error handling, and environment management.
50
+ - **Zero-Config**: Auto-detects API keys from your environment.
51
+
52
+ ---
53
+
54
+ ## 🚀 The Golden Example
55
+
56
+ See the power of `ask-ai` in 3 lines of code:
57
+
58
+ ```python
59
+ from ask_ai import OpenAI, Anthropic, Google
60
+
61
+ prompt = "Explain quantum computing in one sentence."
62
+
63
+ # Switch providers instantly
64
+ print("OpenAI: ", OpenAI().ask(prompt))
65
+ print("Claude: ", Anthropic().ask(prompt))
66
+ print("Gemini: ", Google().ask(prompt))
67
+ ```
68
+
69
+ ---
70
+
71
+ ## 📦 Installation
72
+
73
+ ```bash
74
+ pip install askai-python
75
+ ```
76
+
77
+ ---
78
+
79
+ ## 📖 Quick Start
80
+
81
+ ### 1. Setup
82
+ Export your API keys (or pass them explicitly).
83
+ ```bash
84
+ export OPENAI_API_KEY="sk-..."
85
+ export ANTHROPIC_API_KEY="sk-ant-..."
86
+ ```
87
+
88
+ ### 2. Standard Usage
89
+ ```python
90
+ from ask_ai import OpenAI
91
+
92
+ ai = OpenAI()
93
+ print(ai.ask("Hello, World!"))
94
+ ```
95
+
96
+ ### 3. Advanced Usage
97
+ Control `temperature`, `top_p`, and system personas for professional results.
98
+ ```python
99
+ ai.advanced(
100
+ temperature=0.7,
101
+ prompt="You are a senior DevOps engineer."
102
+ )
103
+
104
+ print(ai.ask("How do I optimize a Dockerfile?"))
105
+ ```
106
+
107
+ ---
108
+
109
+ ## 🔌 Supported Providers
110
+
111
+ | Provider | Class | Feature Set |
112
+ |----------|-------|-------------|
113
+ | **OpenAI** | `OpenAI` | All Models (GPT-4o, o1, etc.) |
114
+ | **Anthropic** | `Anthropic` | All Models (Claude 3.5, Opus) |
115
+ | **Google** | `Google` | All Models (Gemini 1.5 Pro/Flash) |
116
+ | **Groq** | `Groq` | All Models (Llama 3, Mixtral) |
117
+ | **Azure** | `Azure` | All Deployments |
118
+ | **OpenRouter**| `OpenRouter`| All Models (100+) |
119
+
120
+ ---
121
+
122
+ ## ⭐ Support the Project
123
+
124
+ If this project saved you time, please consider giving it a star on GitHub! It helps us grow.
125
+
126
+ **[Give it a Star!](https://github.com/Hosseinghorbani0/ask-ai)**
127
+
128
+ ---
129
+ *Built by [Hossein Ghorbani](https://hosseinghorbani0.ir/) | [GitHub](https://github.com/Hosseinghorbani0).*
@@ -0,0 +1,111 @@
1
+ # ask-ai
2
+
3
+ <p align="center">
4
+ 🌍 <b>Readme:</b>
5
+ <a href="README.md"><img src="https://flagcdn.com/20x15/us.png" alt="English"> English</a> ·
6
+ <a href="docs/README_fa.md"><img src="https://flagcdn.com/20x15/ir.png" alt="Persian"> فارسی</a> ·
7
+ <a href="docs/README_zh.md"><img src="https://flagcdn.com/20x15/cn.png" alt="Chinese"> 中文</a> ·
8
+ <a href="docs/README_tr.md"><img src="https://flagcdn.com/20x15/tr.png" alt="Turkish"> Türkçe</a> ·
9
+ <a href="docs/README_ar.md"><img src="https://flagcdn.com/20x15/sa.png" alt="Arabic"> العربية</a> ·
10
+ <a href="docs/README_ru.md"><img src="https://flagcdn.com/20x15/ru.png" alt="Russian"> Русский</a> ·
11
+ <a href="docs/README_es.md"><img src="https://flagcdn.com/20x15/es.png" alt="Spanish"> Español</a> ·
12
+ <a href="docs/README_ja.md"><img src="https://flagcdn.com/20x15/jp.png" alt="Japanese"> 日本語</a>
13
+ </p>
14
+
15
+ <p align="center">
16
+ <b>AI Made Simple.</b><br/>
17
+ One unified Python client for OpenAI, Claude, Gemini, Groq & more.
18
+ </p>
19
+
20
+ > Stop rewriting AI code for every provider. Use one line. Switch models anytime.
21
+
22
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
23
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
24
+
25
+ ---
26
+
27
+ ## ⚡ Why ask-ai?
28
+
29
+ - **No SDK lock-in**: Don't learn 5 different libraries. Learn one.
30
+ - **Unified API**: `ai.ask()` works for text, images, and audio across all providers.
31
+ - **Production Ready**: Built-in type safety, error handling, and environment management.
32
+ - **Zero-Config**: Auto-detects API keys from your environment.
33
+
34
+ ---
35
+
36
+ ## 🚀 The Golden Example
37
+
38
+ See the power of `ask-ai` in 3 lines of code:
39
+
40
+ ```python
41
+ from ask_ai import OpenAI, Anthropic, Google
42
+
43
+ prompt = "Explain quantum computing in one sentence."
44
+
45
+ # Switch providers instantly
46
+ print("OpenAI: ", OpenAI().ask(prompt))
47
+ print("Claude: ", Anthropic().ask(prompt))
48
+ print("Gemini: ", Google().ask(prompt))
49
+ ```
50
+
51
+ ---
52
+
53
+ ## 📦 Installation
54
+
55
+ ```bash
56
+ pip install askai-python
57
+ ```
58
+
59
+ ---
60
+
61
+ ## 📖 Quick Start
62
+
63
+ ### 1. Setup
64
+ Export your API keys (or pass them explicitly).
65
+ ```bash
66
+ export OPENAI_API_KEY="sk-..."
67
+ export ANTHROPIC_API_KEY="sk-ant-..."
68
+ ```
69
+
70
+ ### 2. Standard Usage
71
+ ```python
72
+ from ask_ai import OpenAI
73
+
74
+ ai = OpenAI()
75
+ print(ai.ask("Hello, World!"))
76
+ ```
77
+
78
+ ### 3. Advanced Usage
79
+ Control `temperature`, `top_p`, and system personas for professional results.
80
+ ```python
81
+ ai.advanced(
82
+ temperature=0.7,
83
+ prompt="You are a senior DevOps engineer."
84
+ )
85
+
86
+ print(ai.ask("How do I optimize a Dockerfile?"))
87
+ ```
88
+
89
+ ---
90
+
91
+ ## 🔌 Supported Providers
92
+
93
+ | Provider | Class | Feature Set |
94
+ |----------|-------|-------------|
95
+ | **OpenAI** | `OpenAI` | All Models (GPT-4o, o1, etc.) |
96
+ | **Anthropic** | `Anthropic` | All Models (Claude 3.5, Opus) |
97
+ | **Google** | `Google` | All Models (Gemini 1.5 Pro/Flash) |
98
+ | **Groq** | `Groq` | All Models (Llama 3, Mixtral) |
99
+ | **Azure** | `Azure` | All Deployments |
100
+ | **OpenRouter**| `OpenRouter`| All Models (100+) |
101
+
102
+ ---
103
+
104
+ ## ⭐ Support the Project
105
+
106
+ If this project saved you time, please consider giving it a star on GitHub! It helps us grow.
107
+
108
+ **[Give it a Star!](https://github.com/Hosseinghorbani0/ask-ai)**
109
+
110
+ ---
111
+ *Built by [Hossein Ghorbani](https://hosseinghorbani0.ir/) | [GitHub](https://github.com/Hosseinghorbani0).*
@@ -0,0 +1,7 @@
1
+ from .providers import OpenAI, Groq, Google, OpenRouter, Azure, Anthropic
2
+
3
+
4
+ from .config import AdvancedConfig
5
+ from .base import Response
6
+ from .media import ImageObject, AudioObject
7
+ from .exceptions import AskAIError, APIKeyError, ProviderError
@@ -0,0 +1,4 @@
1
+ from .cli import main
2
+
3
+ if __name__ == "__main__":
4
+ main()
@@ -0,0 +1,141 @@
1
+ from typing import Optional, Union, List, Dict, Any
2
+ import os
3
+ from .config import AdvancedConfig
4
+ from .media import ImageObject, AudioObject
5
+ from .exceptions import APIKeyError
6
+
7
+ class Response:
8
+ """
9
+ Unified response object for all easyai requests.
10
+ """
11
+ def __init__(self, text: str = "", media: Union[ImageObject, AudioObject, None] = None):
12
+ self.text = text
13
+ self.media = media
14
+
15
+ def __str__(self):
16
+ return self.text
17
+
18
+ def save(self, path: str):
19
+ """Smart save based on content type."""
20
+ if self.media:
21
+ self.media.save(path)
22
+ else:
23
+ with open(path, "w", encoding="utf-8") as f:
24
+ f.write(self.text)
25
+ print(f"Text saved to {path}")
26
+
27
+ class BaseProvider:
28
+ """
29
+ Abstract base class for all AI providers.
30
+ Implements the core 'ask' logic and configuration management.
31
+ """
32
+ def __init__(self, api_key: str = None, model: str = None, persona: str = None, **kwargs):
33
+ # 1. Zero-Config: Try env var if key not provided
34
+ self.api_key = api_key or os.environ.get(self._get_api_key_env_var())
35
+ if not self.api_key:
36
+ # Some providers might not need it (e.g. local), but generally they do.
37
+ # We allow subclass to handle granular validation, but warn here.
38
+ pass
39
+
40
+ self.model = model or self._get_default_model()
41
+ self.persona = persona
42
+
43
+ # Global Advanced Config
44
+ self.config = AdvancedConfig(**kwargs)
45
+
46
+ def _get_api_key_env_var(self) -> str:
47
+ """Subclasses should return the env var name, e.g. 'OPENAI_API_KEY'"""
48
+ raise NotImplementedError
49
+
50
+ def _get_default_model(self) -> str:
51
+ """Subclasses should return a default model"""
52
+ raise NotImplementedError
53
+
54
+ def advanced(self, **kwargs):
55
+ """
56
+ Update global advanced settings for this instance.
57
+ Merges new settings with existing ones.
58
+ """
59
+ new_conf = AdvancedConfig(**kwargs)
60
+ # primitive merge: update self.config with new values
61
+ for k, v in new_conf.__dict__.items():
62
+ if v is not None:
63
+ setattr(self.config, k, v)
64
+
65
+ # Handle extra kwargs
66
+ if new_conf.extra:
67
+ self.config.extra.update(new_conf.extra)
68
+
69
+ def ask(self, query: str, **kwargs) -> Response:
70
+ """
71
+ The main entry point.
72
+ Detects intent, manages config, and returns a unified Response.
73
+ """
74
+ # 1. Merge Request Config with Global Config
75
+ request_config = AdvancedConfig(**kwargs)
76
+ final_config = self._merge_configs(self.config, request_config)
77
+
78
+ # 2. Add System/Persona Message
79
+ messages = self._prepare_messages(query, final_config)
80
+
81
+ # 3. Check for specific output_type override (e.g. user forced image)
82
+ output_type = kwargs.get('output_type')
83
+
84
+ # 4. Call Provider Implementation
85
+ return self._send_request(messages, final_config, output_type)
86
+
87
+ def _merge_configs(self, global_conf: AdvancedConfig, req_conf: AdvancedConfig) -> AdvancedConfig:
88
+ return global_conf.merge(req_conf) # We need to implement merge logic in AdvancedConfig properly or here
89
+
90
+ def _prepare_messages(self, query: str, config: AdvancedConfig) -> List[Dict[str, str]]:
91
+ messages = []
92
+ if config.system_message:
93
+ messages.append({"role": "system", "content": config.system_message})
94
+ elif self.persona:
95
+ messages.append({"role": "system", "content": self.persona})
96
+
97
+ messages.append({"role": "user", "content": query})
98
+ return messages
99
+
100
+ def _send_request(self, messages: List[Dict[str, str]], config: AdvancedConfig, output_type: str = None) -> Response:
101
+ """Subclasses must implement this."""
102
+ raise NotImplementedError
103
+
104
+ # --- Tool Definitions for Smart Intent ---
105
+ def _get_media_tools(self) -> List[Dict[str, Any]]:
106
+ return [
107
+ {
108
+ "type": "function",
109
+ "function": {
110
+ "name": "generate_image",
111
+ "description": "Generate an image based on a prompt. Use this when the user asks to draw, create, or show an image.",
112
+ "parameters": {
113
+ "type": "object",
114
+ "properties": {
115
+ "prompt": {
116
+ "type": "string",
117
+ "description": "The detailed description of the image to generate."
118
+ }
119
+ },
120
+ "required": ["prompt"]
121
+ }
122
+ }
123
+ },
124
+ {
125
+ "type": "function",
126
+ "function": {
127
+ "name": "generate_speech",
128
+ "description": "Generate audio speech from text. Use this when the user asks to say something, speak, or read aloud.",
129
+ "parameters": {
130
+ "type": "object",
131
+ "properties": {
132
+ "text": {
133
+ "type": "string",
134
+ "description": "The text to speak."
135
+ }
136
+ },
137
+ "required": ["text"]
138
+ }
139
+ }
140
+ }
141
+ ]
@@ -0,0 +1,77 @@
1
+ import sys
2
+ import argparse
3
+ from .providers import OpenAI
4
+ from .exceptions import EasyAIError
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(description="easyai CLI: Ask AI anything directly from your terminal.")
8
+ parser.add_argument("query", help="The text query to ask the AI.")
9
+ parser.add_argument("--provider", default="openai", choices=["openai", "groq", "google", "openrouter", "azure", "anthropic"], help="The AI provider to use.")
10
+ parser.add_argument("--model", help="Specific model to use (optional).")
11
+ parser.add_argument("--temp", type=float, help="Temperature setting (optional).")
12
+
13
+ args = parser.parse_args()
14
+
15
+ # Map provider name to class
16
+ providers = {
17
+ "openai": OpenAI,
18
+ # Lazy load others to avoid unnecessary imports if cli is used often?
19
+ # But we already imported OpenAI.
20
+ # Let's import dynamically or just map if already imported in __init__.
21
+ }
22
+
23
+ # We need to import others dynamically or statically.
24
+ # Since __init__ exposes them, let's use that.
25
+ import easyai
26
+
27
+ provider_class = getattr(easyai, args.provider.capitalize(), None)
28
+ if args.provider == "openrouter": # naming case mismatch
29
+ provider_class = easyai.OpenRouter
30
+ elif args.provider == "openai":
31
+ provider_class = easyai.OpenAI
32
+ elif args.provider == "groq":
33
+ provider_class = easyai.Groq
34
+ elif args.provider == "google":
35
+ provider_class = easyai.Google
36
+ elif args.provider == "azure":
37
+ provider_class = easyai.Azure
38
+ elif args.provider == "anthropic":
39
+ provider_class = easyai.Anthropic
40
+
41
+ if not provider_class:
42
+ print(f"Error: Provider '{args.provider}' not found.")
43
+ sys.exit(1)
44
+
45
+ try:
46
+ # Init provider
47
+ kwargs = {}
48
+ if args.model:
49
+ kwargs["model"] = args.model
50
+
51
+ ai = provider_class(**kwargs)
52
+
53
+ # Ask
54
+ ask_kwargs = {}
55
+ if args.temp is not None:
56
+ ask_kwargs["temperature"] = args.temp
57
+
58
+ print(f"[{args.provider.upper()}] Thinking...")
59
+ response = ai.ask(args.query, **ask_kwargs)
60
+
61
+ if response.media:
62
+ print(f"Generated Media: {response.media.type}")
63
+ response.media.show()
64
+ # auto save to current directory?
65
+ filename = f"output.{'png' if response.media.type == 'image' else 'mp3'}"
66
+ response.media.save(filename)
67
+ print(f"Saved to {filename}")
68
+ else:
69
+ print(response.text)
70
+
71
+ except EasyAIError as e:
72
+ print(f"EasyAI Error: {e}")
73
+ except Exception as e:
74
+ print(f"Unexpected Error: {e}")
75
+
76
+ if __name__ == "__main__":
77
+ main()
@@ -0,0 +1,56 @@
1
+ from typing import Optional, Dict, Any
2
+
3
+ class AdvancedConfig:
4
+ """
5
+ Configuration class for advanced AI settings.
6
+ """
7
+ def __init__(
8
+ self,
9
+ temperature: Optional[float] = None,
10
+ max_tokens: Optional[int] = None,
11
+ top_p: Optional[float] = None,
12
+ frequency_penalty: Optional[float] = None,
13
+ presence_penalty: Optional[float] = None,
14
+ stop: Optional[list] = None,
15
+ system_message: Optional[str] = None,
16
+ safe_mode: bool = False,
17
+ **kwargs
18
+ ):
19
+ self.temperature = temperature
20
+ self.max_tokens = max_tokens
21
+ self.top_p = top_p
22
+ self.frequency_penalty = frequency_penalty
23
+ self.presence_penalty = presence_penalty
24
+ self.stop = stop
25
+ self.stop = stop
26
+
27
+ # Smart Aliases for 10/10 Usability
28
+ # If user passes 'prompt' or 'system', treat it as 'system_message'
29
+ if system_message is None:
30
+ system_message = kwargs.get("prompt") or kwargs.get("system")
31
+
32
+ self.system_message = system_message
33
+ self.safe_mode = safe_mode
34
+ self.extra = kwargs
35
+
36
+ def to_dict(self) -> Dict[str, Any]:
37
+ """Convert config to a dictionary, filtering None values."""
38
+ return {k: v for k, v in self.__dict__.items() if v is not None and k != 'extra'}
39
+
40
+ def merge(self, other_config: 'AdvancedConfig') -> 'AdvancedConfig':
41
+ """Merge another config into this one (other overrides self)."""
42
+ new_config = AdvancedConfig()
43
+ # Copy self
44
+ for k, v in self.__dict__.items():
45
+ setattr(new_config, k, v)
46
+
47
+ # Override with other
48
+ for k, v in other_config.__dict__.items():
49
+ if v is not None:
50
+ setattr(new_config, k, v)
51
+
52
+ return new_config
53
+
54
+ @classmethod
55
+ def from_dict(cls, data: Dict[str, Any]) -> 'AdvancedConfig':
56
+ return cls(**data)
@@ -0,0 +1,15 @@
1
+ class AskAIError(Exception):
2
+ """Base exception for all ask-ai errors."""
3
+ pass
4
+
5
+ class APIKeyError(AskAIError):
6
+ """Raised when API key is missing or invalid."""
7
+ pass
8
+
9
+ class ProviderError(AskAIError):
10
+ """Raised when the provider API fails (e.g. 500 error)."""
11
+ pass
12
+
13
+ class MediaTypeNotSupportedError(AskAIError):
14
+ """Raised when a provider can't handle the requested media type."""
15
+ pass
@@ -0,0 +1,63 @@
1
+ import os
2
+ import base64
3
+ from PIL import Image
4
+ from io import BytesIO
5
+ import platform
6
+ import subprocess
7
+
8
+ class MediaObject:
9
+ def __init__(self, data: bytes, media_type: str):
10
+ self.data = data
11
+ self.type = media_type
12
+
13
+ @property
14
+ def bytes(self) -> bytes:
15
+ return self.data
16
+
17
+ class ImageObject(MediaObject):
18
+ def __init__(self, data: bytes):
19
+ super().__init__(data, "image")
20
+
21
+ def save(self, path: str):
22
+ """Save the image to a file."""
23
+ with open(path, "wb") as f:
24
+ f.write(self.data)
25
+ print(f"Image saved to {path}")
26
+
27
+ def show(self):
28
+ """Display the image using the default OS viewer."""
29
+ try:
30
+ image = Image.open(BytesIO(self.data))
31
+ image.show()
32
+ except Exception as e:
33
+ print(f"Error showing image: {e}")
34
+
35
+ class AudioObject(MediaObject):
36
+ def __init__(self, data: bytes):
37
+ super().__init__(data, "audio")
38
+
39
+ def save(self, path: str):
40
+ """Save the audio to a file."""
41
+ with open(path, "wb") as f:
42
+ f.write(self.data)
43
+ print(f"Audio saved to {path}")
44
+
45
+ def play(self):
46
+ """Play the audio using the default OS player."""
47
+ # Simple cross-platform play attempt
48
+ import tempfile
49
+
50
+ # Save to temp file
51
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
52
+ f.write(self.data)
53
+ temp_path = f.name
54
+
55
+ try:
56
+ if platform.system() == "Windows":
57
+ os.startfile(temp_path)
58
+ elif platform.system() == "Darwin":
59
+ subprocess.call(["open", temp_path])
60
+ else:
61
+ subprocess.call(["xdg-open", temp_path])
62
+ except Exception as e:
63
+ print(f"Error playing audio: {e}")
@@ -0,0 +1,11 @@
1
+ from .openai_provider import OpenAIProvider as OpenAI
2
+ from .groq_provider import GroqProvider as Groq
3
+ from .google_provider import GoogleProvider as Google
4
+ from .openrouter_provider import OpenRouterProvider as OpenRouter
5
+ from .azure_provider import AzureProvider as Azure
6
+ from .anthropic_provider import AnthropicProvider as Anthropic
7
+
8
+
9
+
10
+
11
+