askai-python 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ask_ai/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ from .providers import OpenAI, Groq, Google, OpenRouter, Azure, Anthropic
2
+
3
+
4
+ from .config import AdvancedConfig
5
+ from .base import Response
6
+ from .media import ImageObject, AudioObject
7
+ from .exceptions import AskAIError, APIKeyError, ProviderError
ask_ai/__main__.py ADDED
@@ -0,0 +1,4 @@
1
+ from .cli import main
2
+
3
+ if __name__ == "__main__":
4
+ main()
ask_ai/base.py ADDED
@@ -0,0 +1,141 @@
1
+ from typing import Optional, Union, List, Dict, Any
2
+ import os
3
+ from .config import AdvancedConfig
4
+ from .media import ImageObject, AudioObject
5
+ from .exceptions import APIKeyError
6
+
7
+ class Response:
8
+ """
9
+ Unified response object for all easyai requests.
10
+ """
11
+ def __init__(self, text: str = "", media: Union[ImageObject, AudioObject, None] = None):
12
+ self.text = text
13
+ self.media = media
14
+
15
+ def __str__(self):
16
+ return self.text
17
+
18
+ def save(self, path: str):
19
+ """Smart save based on content type."""
20
+ if self.media:
21
+ self.media.save(path)
22
+ else:
23
+ with open(path, "w", encoding="utf-8") as f:
24
+ f.write(self.text)
25
+ print(f"Text saved to {path}")
26
+
27
+ class BaseProvider:
28
+ """
29
+ Abstract base class for all AI providers.
30
+ Implements the core 'ask' logic and configuration management.
31
+ """
32
+ def __init__(self, api_key: str = None, model: str = None, persona: str = None, **kwargs):
33
+ # 1. Zero-Config: Try env var if key not provided
34
+ self.api_key = api_key or os.environ.get(self._get_api_key_env_var())
35
+ if not self.api_key:
36
+ # Some providers might not need it (e.g. local), but generally they do.
37
+ # We allow subclass to handle granular validation, but warn here.
38
+ pass
39
+
40
+ self.model = model or self._get_default_model()
41
+ self.persona = persona
42
+
43
+ # Global Advanced Config
44
+ self.config = AdvancedConfig(**kwargs)
45
+
46
+ def _get_api_key_env_var(self) -> str:
47
+ """Subclasses should return the env var name, e.g. 'OPENAI_API_KEY'"""
48
+ raise NotImplementedError
49
+
50
+ def _get_default_model(self) -> str:
51
+ """Subclasses should return a default model"""
52
+ raise NotImplementedError
53
+
54
+ def advanced(self, **kwargs):
55
+ """
56
+ Update global advanced settings for this instance.
57
+ Merges new settings with existing ones.
58
+ """
59
+ new_conf = AdvancedConfig(**kwargs)
60
+ # primitive merge: update self.config with new values
61
+ for k, v in new_conf.__dict__.items():
62
+ if v is not None:
63
+ setattr(self.config, k, v)
64
+
65
+ # Handle extra kwargs
66
+ if new_conf.extra:
67
+ self.config.extra.update(new_conf.extra)
68
+
69
+ def ask(self, query: str, **kwargs) -> Response:
70
+ """
71
+ The main entry point.
72
+ Detects intent, manages config, and returns a unified Response.
73
+ """
74
+ # 1. Merge Request Config with Global Config
75
+ request_config = AdvancedConfig(**kwargs)
76
+ final_config = self._merge_configs(self.config, request_config)
77
+
78
+ # 2. Add System/Persona Message
79
+ messages = self._prepare_messages(query, final_config)
80
+
81
+ # 3. Check for specific output_type override (e.g. user forced image)
82
+ output_type = kwargs.get('output_type')
83
+
84
+ # 4. Call Provider Implementation
85
+ return self._send_request(messages, final_config, output_type)
86
+
87
+ def _merge_configs(self, global_conf: AdvancedConfig, req_conf: AdvancedConfig) -> AdvancedConfig:
88
+ return global_conf.merge(req_conf) # We need to implement merge logic in AdvancedConfig properly or here
89
+
90
+ def _prepare_messages(self, query: str, config: AdvancedConfig) -> List[Dict[str, str]]:
91
+ messages = []
92
+ if config.system_message:
93
+ messages.append({"role": "system", "content": config.system_message})
94
+ elif self.persona:
95
+ messages.append({"role": "system", "content": self.persona})
96
+
97
+ messages.append({"role": "user", "content": query})
98
+ return messages
99
+
100
+ def _send_request(self, messages: List[Dict[str, str]], config: AdvancedConfig, output_type: str = None) -> Response:
101
+ """Subclasses must implement this."""
102
+ raise NotImplementedError
103
+
104
+ # --- Tool Definitions for Smart Intent ---
105
+ def _get_media_tools(self) -> List[Dict[str, Any]]:
106
+ return [
107
+ {
108
+ "type": "function",
109
+ "function": {
110
+ "name": "generate_image",
111
+ "description": "Generate an image based on a prompt. Use this when the user asks to draw, create, or show an image.",
112
+ "parameters": {
113
+ "type": "object",
114
+ "properties": {
115
+ "prompt": {
116
+ "type": "string",
117
+ "description": "The detailed description of the image to generate."
118
+ }
119
+ },
120
+ "required": ["prompt"]
121
+ }
122
+ }
123
+ },
124
+ {
125
+ "type": "function",
126
+ "function": {
127
+ "name": "generate_speech",
128
+ "description": "Generate audio speech from text. Use this when the user asks to say something, speak, or read aloud.",
129
+ "parameters": {
130
+ "type": "object",
131
+ "properties": {
132
+ "text": {
133
+ "type": "string",
134
+ "description": "The text to speak."
135
+ }
136
+ },
137
+ "required": ["text"]
138
+ }
139
+ }
140
+ }
141
+ ]
ask_ai/cli.py ADDED
@@ -0,0 +1,77 @@
1
+ import sys
2
+ import argparse
3
+ from .providers import OpenAI
4
+ from .exceptions import EasyAIError
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(description="easyai CLI: Ask AI anything directly from your terminal.")
8
+ parser.add_argument("query", help="The text query to ask the AI.")
9
+ parser.add_argument("--provider", default="openai", choices=["openai", "groq", "google", "openrouter", "azure", "anthropic"], help="The AI provider to use.")
10
+ parser.add_argument("--model", help="Specific model to use (optional).")
11
+ parser.add_argument("--temp", type=float, help="Temperature setting (optional).")
12
+
13
+ args = parser.parse_args()
14
+
15
+ # Map provider name to class
16
+ providers = {
17
+ "openai": OpenAI,
18
+ # Lazy load others to avoid unnecessary imports if cli is used often?
19
+ # But we already imported OpenAI.
20
+ # Let's import dynamically or just map if already imported in __init__.
21
+ }
22
+
23
+ # We need to import others dynamically or statically.
24
+ # Since __init__ exposes them, let's use that.
25
+ import easyai
26
+
27
+ provider_class = getattr(easyai, args.provider.capitalize(), None)
28
+ if args.provider == "openrouter": # naming case mismatch
29
+ provider_class = easyai.OpenRouter
30
+ elif args.provider == "openai":
31
+ provider_class = easyai.OpenAI
32
+ elif args.provider == "groq":
33
+ provider_class = easyai.Groq
34
+ elif args.provider == "google":
35
+ provider_class = easyai.Google
36
+ elif args.provider == "azure":
37
+ provider_class = easyai.Azure
38
+ elif args.provider == "anthropic":
39
+ provider_class = easyai.Anthropic
40
+
41
+ if not provider_class:
42
+ print(f"Error: Provider '{args.provider}' not found.")
43
+ sys.exit(1)
44
+
45
+ try:
46
+ # Init provider
47
+ kwargs = {}
48
+ if args.model:
49
+ kwargs["model"] = args.model
50
+
51
+ ai = provider_class(**kwargs)
52
+
53
+ # Ask
54
+ ask_kwargs = {}
55
+ if args.temp is not None:
56
+ ask_kwargs["temperature"] = args.temp
57
+
58
+ print(f"[{args.provider.upper()}] Thinking...")
59
+ response = ai.ask(args.query, **ask_kwargs)
60
+
61
+ if response.media:
62
+ print(f"Generated Media: {response.media.type}")
63
+ response.media.show()
64
+ # auto save to current directory?
65
+ filename = f"output.{'png' if response.media.type == 'image' else 'mp3'}"
66
+ response.media.save(filename)
67
+ print(f"Saved to {filename}")
68
+ else:
69
+ print(response.text)
70
+
71
+ except EasyAIError as e:
72
+ print(f"EasyAI Error: {e}")
73
+ except Exception as e:
74
+ print(f"Unexpected Error: {e}")
75
+
76
+ if __name__ == "__main__":
77
+ main()
ask_ai/config.py ADDED
@@ -0,0 +1,56 @@
1
+ from typing import Optional, Dict, Any
2
+
3
+ class AdvancedConfig:
4
+ """
5
+ Configuration class for advanced AI settings.
6
+ """
7
+ def __init__(
8
+ self,
9
+ temperature: Optional[float] = None,
10
+ max_tokens: Optional[int] = None,
11
+ top_p: Optional[float] = None,
12
+ frequency_penalty: Optional[float] = None,
13
+ presence_penalty: Optional[float] = None,
14
+ stop: Optional[list] = None,
15
+ system_message: Optional[str] = None,
16
+ safe_mode: bool = False,
17
+ **kwargs
18
+ ):
19
+ self.temperature = temperature
20
+ self.max_tokens = max_tokens
21
+ self.top_p = top_p
22
+ self.frequency_penalty = frequency_penalty
23
+ self.presence_penalty = presence_penalty
24
+ self.stop = stop
25
+ self.stop = stop
26
+
27
+ # Smart Aliases for 10/10 Usability
28
+ # If user passes 'prompt' or 'system', treat it as 'system_message'
29
+ if system_message is None:
30
+ system_message = kwargs.get("prompt") or kwargs.get("system")
31
+
32
+ self.system_message = system_message
33
+ self.safe_mode = safe_mode
34
+ self.extra = kwargs
35
+
36
+ def to_dict(self) -> Dict[str, Any]:
37
+ """Convert config to a dictionary, filtering None values."""
38
+ return {k: v for k, v in self.__dict__.items() if v is not None and k != 'extra'}
39
+
40
+ def merge(self, other_config: 'AdvancedConfig') -> 'AdvancedConfig':
41
+ """Merge another config into this one (other overrides self)."""
42
+ new_config = AdvancedConfig()
43
+ # Copy self
44
+ for k, v in self.__dict__.items():
45
+ setattr(new_config, k, v)
46
+
47
+ # Override with other
48
+ for k, v in other_config.__dict__.items():
49
+ if v is not None:
50
+ setattr(new_config, k, v)
51
+
52
+ return new_config
53
+
54
+ @classmethod
55
+ def from_dict(cls, data: Dict[str, Any]) -> 'AdvancedConfig':
56
+ return cls(**data)
ask_ai/exceptions.py ADDED
@@ -0,0 +1,15 @@
1
+ class AskAIError(Exception):
2
+ """Base exception for all ask-ai errors."""
3
+ pass
4
+
5
+ class APIKeyError(AskAIError):
6
+ """Raised when API key is missing or invalid."""
7
+ pass
8
+
9
+ class ProviderError(AskAIError):
10
+ """Raised when the provider API fails (e.g. 500 error)."""
11
+ pass
12
+
13
+ class MediaTypeNotSupportedError(AskAIError):
14
+ """Raised when a provider can't handle the requested media type."""
15
+ pass
ask_ai/media.py ADDED
@@ -0,0 +1,63 @@
1
+ import os
2
+ import base64
3
+ from PIL import Image
4
+ from io import BytesIO
5
+ import platform
6
+ import subprocess
7
+
8
+ class MediaObject:
9
+ def __init__(self, data: bytes, media_type: str):
10
+ self.data = data
11
+ self.type = media_type
12
+
13
+ @property
14
+ def bytes(self) -> bytes:
15
+ return self.data
16
+
17
+ class ImageObject(MediaObject):
18
+ def __init__(self, data: bytes):
19
+ super().__init__(data, "image")
20
+
21
+ def save(self, path: str):
22
+ """Save the image to a file."""
23
+ with open(path, "wb") as f:
24
+ f.write(self.data)
25
+ print(f"Image saved to {path}")
26
+
27
+ def show(self):
28
+ """Display the image using the default OS viewer."""
29
+ try:
30
+ image = Image.open(BytesIO(self.data))
31
+ image.show()
32
+ except Exception as e:
33
+ print(f"Error showing image: {e}")
34
+
35
+ class AudioObject(MediaObject):
36
+ def __init__(self, data: bytes):
37
+ super().__init__(data, "audio")
38
+
39
+ def save(self, path: str):
40
+ """Save the audio to a file."""
41
+ with open(path, "wb") as f:
42
+ f.write(self.data)
43
+ print(f"Audio saved to {path}")
44
+
45
+ def play(self):
46
+ """Play the audio using the default OS player."""
47
+ # Simple cross-platform play attempt
48
+ import tempfile
49
+
50
+ # Save to temp file
51
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
52
+ f.write(self.data)
53
+ temp_path = f.name
54
+
55
+ try:
56
+ if platform.system() == "Windows":
57
+ os.startfile(temp_path)
58
+ elif platform.system() == "Darwin":
59
+ subprocess.call(["open", temp_path])
60
+ else:
61
+ subprocess.call(["xdg-open", temp_path])
62
+ except Exception as e:
63
+ print(f"Error playing audio: {e}")
@@ -0,0 +1,11 @@
1
+ from .openai_provider import OpenAIProvider as OpenAI
2
+ from .groq_provider import GroqProvider as Groq
3
+ from .google_provider import GoogleProvider as Google
4
+ from .openrouter_provider import OpenRouterProvider as OpenRouter
5
+ from .azure_provider import AzureProvider as Azure
6
+ from .anthropic_provider import AnthropicProvider as Anthropic
7
+
8
+
9
+
10
+
11
+
@@ -0,0 +1,87 @@
1
+ from typing import List, Dict, Any, Optional
2
+ import os
3
+ try:
4
+ import anthropic
5
+ except ImportError:
6
+ anthropic = None
7
+
8
+ from ..base import BaseProvider, Response
9
+ from ..config import AdvancedConfig
10
+ from ..exceptions import APIKeyError, ProviderError, MediaTypeNotSupportedError
11
+
12
+ class AnthropicProvider(BaseProvider):
13
+ def __init__(self, api_key: str = None, model: str = None, **kwargs):
14
+ super().__init__(api_key, model, **kwargs)
15
+ if not self.api_key:
16
+ raise APIKeyError("Anthropic API Key is missing. Set ANTHROPIC_API_KEY env var or pass api_key=")
17
+
18
+ if anthropic is None:
19
+ raise ProviderError("Anthropic client library not installed. Please install 'anthropic' package.")
20
+
21
+ self.client = anthropic.Anthropic(api_key=self.api_key)
22
+
23
+ def _get_api_key_env_var(self):
24
+ return "ANTHROPIC_API_KEY"
25
+
26
+ def _get_default_model(self):
27
+ return "claude-3-5-sonnet-20240620"
28
+
29
+ def _send_request(self, messages: List[Dict[str, str]], config: AdvancedConfig, output_type: str = None) -> Response:
30
+
31
+ if output_type in ["image", "audio"]:
32
+ raise MediaTypeNotSupportedError(f"Anthropic provider currently does not support {output_type} generation.")
33
+
34
+ try:
35
+ # Convert messages to Anthropic format
36
+ # OpenAI: system, user, assistant
37
+ # Anthropic: system arg + messages list (user/assistant)
38
+
39
+ anthropic_messages = []
40
+ system_instruction = None
41
+
42
+ for m in messages:
43
+ role = m["role"]
44
+ content = m["content"]
45
+ if role == "system":
46
+ system_instruction = content
47
+ elif role in ["user", "assistant"]:
48
+ anthropic_messages.append({"role": role, "content": content})
49
+
50
+ # Prepare args
51
+ kwargs = {
52
+ "model": self.model,
53
+ "messages": anthropic_messages,
54
+ "max_tokens": config.max_tokens or 1024, # Anthropic requires max_tokens usually
55
+ "temperature": config.temperature,
56
+ "top_p": config.top_p,
57
+ }
58
+ if system_instruction:
59
+ kwargs["system"] = system_instruction
60
+
61
+ # Filter None (except max_tokens if needed, but we set default)
62
+ kwargs = {k: v for k, v in kwargs.items() if v is not None}
63
+
64
+ # TODO: Add tool support for Smart Intent if feasible.
65
+ # Anthropic supports tools.
66
+ # For simplicity in V1, maybe just text?
67
+ # User asked for "Smart Intent" globally.
68
+ # If we want smart intent here, we define tools in 'tools' arg.
69
+ # But the 'media' generation needs a callback.
70
+ # For now, let's stick to text to ensure stability, unless I add the tool schema.
71
+ # Adding tool schema for Anthropic is different from OpenAI.
72
+ # Let's skip tools for Anthropic in V1 or add later if requested.
73
+ # The prompt said "Smart Intent... if type is not specified... easyai registers (tools)".
74
+ # If I don't add it, Anthropic won't have smart media.
75
+ # That's acceptable for V1.
76
+
77
+ response = self.client.messages.create(**kwargs)
78
+
79
+ # response.content is a list of blocks.
80
+ text_content = ""
81
+ if response.content:
82
+ text_content = response.content[0].text # Assuming first block is text
83
+
84
+ return Response(text=text_content)
85
+
86
+ except Exception as e:
87
+ raise ProviderError(f"Anthropic API Error: {e}")
@@ -0,0 +1,47 @@
1
+ from typing import List, Dict, Any, Optional
2
+ import os
3
+ from ..base import BaseProvider, Response # Import base classes
4
+ from ..config import AdvancedConfig
5
+ from ..exceptions import APIKeyError, ProviderError
6
+ from .openai_provider import OpenAIProvider
7
+
8
+ class AzureProvider(OpenAIProvider):
9
+ def __init__(self, api_key: str = None, model: str = None, endpoint: str = None, api_version: str = None, **kwargs):
10
+
11
+ self.api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
12
+ if not self.api_key:
13
+ raise APIKeyError("Azure API Key is missing. Set AZURE_OPENAI_API_KEY env var or pass api_key=")
14
+
15
+ self.endpoint = endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT")
16
+ if not self.endpoint:
17
+ raise ProviderError("Azure Endpoint is missing. Set AZURE_OPENAI_ENDPOINT env var or pass endpoint=")
18
+
19
+ self.api_version = api_version or os.environ.get("AZURE_OPENAI_API_VERSION", "2024-02-15-preview")
20
+
21
+ # Initialize the AzureOpenAI client
22
+ import openai
23
+ self.client = openai.AzureOpenAI(
24
+ api_key=self.api_key,
25
+ api_version=self.api_version,
26
+ azure_endpoint=self.endpoint
27
+ )
28
+
29
+ self.model = model or os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME")
30
+ if not self.model:
31
+ # Azure uses "deployment name" as model parameter usually.
32
+ raise ProviderError("Azure Model (Deployment Name) is missing. Set AZURE_OPENAI_DEPLOYMENT_NAME env var or pass model=")
33
+
34
+ self.persona = None
35
+ self.config = AdvancedConfig(**kwargs)
36
+
37
+ def _get_api_key_env_var(self):
38
+ return "AZURE_OPENAI_API_KEY"
39
+
40
+ def _get_default_model(self):
41
+ return None # Must be provided for Azure as it's deployment specific
42
+
43
+ # Inherits _send_request from OpenAIProvider as AzureOpenAI client is compatible with OpenAI client methods.
44
+ # verify _generate_image compatibility?
45
+ # Azure DALL-E 3 works similarly.
46
+ # Azure TTS/STT works similarly but might need deployment names.
47
+ # Assuming standard compatibility for now.
@@ -0,0 +1,87 @@
1
+ from typing import List, Dict, Any, Optional
2
+ import os
3
+ try:
4
+ import google.generativeai as genai
5
+ except ImportError:
6
+ genai = None
7
+
8
+ from ..base import BaseProvider, Response
9
+ from ..media import ImageObject, AudioObject
10
+ from ..config import AdvancedConfig
11
+ from ..exceptions import APIKeyError, ProviderError, MediaTypeNotSupportedError
12
+
13
+ class GoogleProvider(BaseProvider):
14
+ def __init__(self, api_key: str = None, model: str = None, **kwargs):
15
+ super().__init__(api_key, model, **kwargs)
16
+ if not self.api_key:
17
+ raise APIKeyError("Google API Key is missing. Set GOOGLE_API_KEY env var or pass api_key=")
18
+
19
+ if genai is None:
20
+ raise ProviderError("Google Generative AI library not installed. Please install 'google-generativeai'.")
21
+
22
+ genai.configure(api_key=self.api_key)
23
+ self.model_name = self.model # defaulting to "gemini-1.5-flash" via base method
24
+
25
+ def _get_api_key_env_var(self):
26
+ return "GOOGLE_API_KEY"
27
+
28
+ def _get_default_model(self):
29
+ return "gemini-1.5-flash"
30
+
31
+ def _send_request(self, messages: List[Dict[str, str]], config: AdvancedConfig, output_type: str = None) -> Response:
32
+
33
+ if output_type in ["image", "audio"]:
34
+ raise MediaTypeNotSupportedError(f"Google provider currently supports text generation primarily in this library.")
35
+
36
+ try:
37
+ # Convert messages to Gemini format (user/model roles)
38
+ # OpenAI: system, user, assistant
39
+ # Gemini: user, model
40
+ # System instruction is separate in config usually or handled specially.
41
+
42
+ gemini_messages = []
43
+ system_instruction = None
44
+
45
+ for m in messages:
46
+ role = m["role"]
47
+ content = m["content"]
48
+ if role == "system":
49
+ system_instruction = content # Gemini 1.5 supports system instruction
50
+ elif role == "user":
51
+ gemini_messages.append({"role": "user", "parts": [content]})
52
+ elif role == "assistant":
53
+ gemini_messages.append({"role": "model", "parts": [content]})
54
+
55
+ # Prepare config
56
+ generation_config = genai.types.GenerationConfig(
57
+ candidate_count=1,
58
+ max_output_tokens=config.max_tokens,
59
+ temperature=config.temperature,
60
+ top_p=config.top_p,
61
+ )
62
+
63
+ # Initialize model
64
+ model = genai.GenerativeModel(
65
+ model_name=self.model_name,
66
+ system_instruction=system_instruction
67
+ )
68
+
69
+ # Chat Session or Generate Content?
70
+ # For simplicity, use start_chat with history if message list > 1, or generate_content if 1?
71
+ # Actually, to maintain context, use chat.
72
+ # But here we are stateless per request (user passed history in messages).
73
+ # So `start_chat(history=...)` then send last message.
74
+
75
+ if not gemini_messages:
76
+ return Response(text="")
77
+
78
+ last_msg = gemini_messages[-1]
79
+ history = gemini_messages[:-1]
80
+
81
+ chat = model.start_chat(history=history)
82
+ response = chat.send_message(last_msg["parts"][0], generation_config=generation_config)
83
+
84
+ return Response(text=response.text)
85
+
86
+ except Exception as e:
87
+ raise ProviderError(f"Google API Error: {e}")
@@ -0,0 +1,75 @@
1
+ from typing import List, Dict, Any
2
+ import os
3
+ try:
4
+ from groq import Groq
5
+ except ImportError:
6
+ Groq = None
7
+
8
+ from ..base import BaseProvider, Response
9
+ from ..media import ImageObject, AudioObject
10
+ from ..config import AdvancedConfig
11
+ from ..exceptions import APIKeyError, ProviderError, MediaTypeNotSupportedError
12
+
13
+ class GroqProvider(BaseProvider):
14
+ def __init__(self, api_key: str = None, model: str = None, **kwargs):
15
+ super().__init__(api_key, model, **kwargs)
16
+ if not self.api_key:
17
+ raise APIKeyError("Groq API Key is missing. Set GROQ_API_KEY env var or pass api_key=")
18
+
19
+ if Groq is None:
20
+ raise ProviderError("Groq client library not installed. Please install 'groq' package.")
21
+
22
+ self.client = Groq(api_key=self.api_key)
23
+
24
+ def _get_api_key_env_var(self):
25
+ return "GROQ_API_KEY"
26
+
27
+ def _get_default_model(self):
28
+ return "llama3-8b-8192"
29
+
30
+ def _send_request(self, messages: List[Dict[str, str]], config: AdvancedConfig, output_type: str = None) -> Response:
31
+
32
+ # Groq currently mainly text + tool use.
33
+ if output_type in ["image", "audio"]:
34
+ raise MediaTypeNotSupportedError(f"Groq provider currently does not support {output_type} generation.")
35
+
36
+ try:
37
+ # Prepare args
38
+ kwargs = {
39
+ "model": self.model,
40
+ "messages": messages,
41
+ "temperature": config.temperature,
42
+ "max_tokens": config.max_tokens,
43
+ "top_p": config.top_p,
44
+ "frequency_penalty": config.frequency_penalty,
45
+ "presence_penalty": config.presence_penalty,
46
+ # Groq supports OpenAI-style tools
47
+ "tools": self._get_media_tools(),
48
+ "tool_choice": "auto"
49
+ }
50
+ # Filter None
51
+ kwargs = {k: v for k, v in kwargs.items() if v is not None}
52
+
53
+ response = self.client.chat.completions.create(**kwargs)
54
+ message = response.choices[0].message
55
+
56
+ # Smart Intent Check
57
+ if message.tool_calls:
58
+ # Logic to handle tool calls?
59
+ # Since Groq doesn't generate images natively, it can't "do" the action itself unless we bridge it?
60
+ # Actually, for a *client library*, if the model says "call generate_image",
61
+ # we technically CAN'T fulfill it if strict to "Groq Provider".
62
+ # BUT, the user might want Groq to *decide* and then use another backend?
63
+ # No, that breaks simple "provider" model.
64
+ # So we should just say "I can't do that".
65
+ # OR, we return the tool call as text?
66
+ # For now, let's catch it and say "Groq wanted to create media but can't".
67
+ tool_call = message.tool_calls[0]
68
+ op = tool_call.function.name
69
+ if op in ["generate_image", "generate_speech"]:
70
+ return Response(text=f"[System] The model attempted to generate {op} but Groq provider does not support media generation.")
71
+
72
+ return Response(text=message.content)
73
+
74
+ except Exception as e:
75
+ raise ProviderError(f"Groq API Error: {e}")
@@ -0,0 +1,122 @@
1
+ import os
2
+ import json
3
+ import requests
4
+ from typing import List, Dict, Any, Optional
5
+ import openai
6
+ from ..base import BaseProvider, Response
7
+ from ..media import ImageObject, AudioObject
8
+ from ..config import AdvancedConfig
9
+ from ..exceptions import APIKeyError, ProviderError
10
+
11
+ class OpenAIProvider(BaseProvider): # Renamed to avoid name collision in imports
12
+ def __init__(self, api_key: str = None, model: str = None, **kwargs):
13
+ super().__init__(api_key, model, **kwargs)
14
+ if not self.api_key:
15
+ raise APIKeyError("OpenAI API Key is missing. Set OPENAI_API_KEY env var or pass api_key=")
16
+
17
+ self.client = openai.OpenAI(api_key=self.api_key)
18
+
19
+ def _get_api_key_env_var(self):
20
+ return "OPENAI_API_KEY"
21
+
22
+ def _get_default_model(self):
23
+ return "gpt-4o"
24
+
25
+ def _send_request(self, messages: List[Dict[str, str]], config: AdvancedConfig, output_type: str = None) -> Response:
26
+ """
27
+ Handles the logic for OpenAI API.
28
+ Prioritizes:
29
+ 1. Explicit output_type (image/audio)
30
+ 2. Text generation (with function calling for smart intent)
31
+ """
32
+
33
+ # 1. Explicit Image
34
+ if output_type == "image":
35
+ return self._generate_image(messages, config)
36
+
37
+ # 2. Explicit Audio
38
+ if output_type == "audio":
39
+ return self._generate_audio(messages, config)
40
+
41
+ # 3. Text / Smart Intent
42
+ return self._generate_text_or_smart(messages, config)
43
+
44
+ def _generate_text_or_smart(self, messages: List[Dict[str, str]], config: AdvancedConfig) -> Response:
45
+ try:
46
+ # Prepare args
47
+ kwargs = {
48
+ "model": self.model,
49
+ "messages": messages,
50
+ "temperature": config.temperature,
51
+ "max_tokens": config.max_tokens,
52
+ "top_p": config.top_p,
53
+ "frequency_penalty": config.frequency_penalty,
54
+ "presence_penalty": config.presence_penalty,
55
+ "tools": self._get_media_tools(),
56
+ "tool_choice": "auto"
57
+ }
58
+ # Filter None
59
+ kwargs = {k: v for k, v in kwargs.items() if v is not None}
60
+
61
+ response = self.client.chat.completions.create(**kwargs)
62
+ message = response.choices[0].message
63
+
64
+ # Check for tool calls (Smart Intent)
65
+ if message.tool_calls:
66
+ tool_call = message.tool_calls[0]
67
+ function_name = tool_call.function.name
68
+ arguments = json.loads(tool_call.function.arguments)
69
+
70
+ if function_name == "generate_image":
71
+ print(f"[Smart Intent] Detecting Image Generation: {arguments.get('prompt')}")
72
+ # Recursively call image gen with the prompt
73
+ return self._generate_image([{"role": "user", "content": arguments.get("prompt")}], config)
74
+
75
+ elif function_name == "generate_speech":
76
+ print(f"[Smart Intent] Detecting Speech Generation")
77
+ return self._generate_audio([{"role": "user", "content": arguments.get("text")}], config)
78
+
79
+ # Normal Text Response
80
+ return Response(text=message.content)
81
+
82
+ except Exception as e:
83
+ raise ProviderError(f"OpenAI API Error: {e}")
84
+
85
+ def _generate_image(self, messages: List[Dict[str, str]], config: AdvancedConfig) -> Response:
86
+ # Extract prompt from last user message
87
+ prompt = messages[-1]["content"]
88
+ try:
89
+ response = self.client.images.generate(
90
+ model="dall-e-3", # Defaulting to d3
91
+ prompt=prompt,
92
+ size="1024x1024",
93
+ quality="standard",
94
+ n=1,
95
+ )
96
+
97
+ image_url = response.data[0].url
98
+ # Download image bytes
99
+ img_data = requests.get(image_url).content
100
+
101
+ return Response(
102
+ text=f"Generated image for: {prompt}",
103
+ media=ImageObject(img_data)
104
+ )
105
+ except Exception as e:
106
+ raise ProviderError(f"OpenAI Image Error: {e}")
107
+
108
+ def _generate_audio(self, messages: List[Dict[str, str]], config: AdvancedConfig) -> Response:
109
+ text = messages[-1]["content"]
110
+ try:
111
+ response = self.client.audio.speech.create(
112
+ model="tts-1",
113
+ voice="alloy",
114
+ input=text
115
+ )
116
+ # response.content gives bytes
117
+ return Response(
118
+ text=f"Generated audio for: {text}",
119
+ media=AudioObject(response.content)
120
+ )
121
+ except Exception as e:
122
+ raise ProviderError(f"OpenAI Audio Error: {e}")
@@ -0,0 +1,44 @@
1
+ from typing import List, Dict, Any, Optional
2
+ import os
3
+ from ..base import BaseProvider, Response # Import base classes
4
+ from ..config import AdvancedConfig
5
+ from ..exceptions import APIKeyError, ProviderError
6
+ from .openai_provider import OpenAIProvider
7
+
8
+ class OpenRouterProvider(OpenAIProvider):
9
+ def __init__(self, api_key: str = None, model: str = None, **kwargs):
10
+ # Initialize OpenAIProvider but with OpenRouter base URL
11
+ # We need to handle api_key lookup ourselves first or let OpenAIProvider do it?
12
+ # OpenAIProvider looks for OPENAI_API_KEY. We want OPENROUTER_API_KEY.
13
+
14
+ self.api_key = api_key or os.environ.get("OPENROUTER_API_KEY")
15
+ if not self.api_key:
16
+ raise APIKeyError("OpenRouter API Key is missing. Set OPENROUTER_API_KEY env var or pass api_key=")
17
+
18
+ # Initialize the OpenAI client with OpenRouter base URL
19
+ import openai
20
+ self.client = openai.OpenAI(
21
+ base_url="https://openrouter.ai/api/v1",
22
+ api_key=self.api_key,
23
+ )
24
+
25
+ self.model = model or "openai/gpt-3.5-turbo" # OpenRouter needs prefix usually? or just model name. "openai/gpt-3.5-turbo" is a common default free-ish one or cheap one.
26
+ self.persona = None
27
+ self.config = AdvancedConfig(**kwargs)
28
+
29
+ def _get_api_key_env_var(self):
30
+ return "OPENROUTER_API_KEY"
31
+
32
+ def _get_default_model(self):
33
+ return "openai/gpt-3.5-turbo"
34
+
35
+ def _get_media_tools(self) -> List[Dict[str, Any]]:
36
+ # OpenRouter does not support standard OpenAI media endpoints (Image/Audio)
37
+ # So we disable smart intent tools to prevent model from calling them.
38
+ return []
39
+
40
+ def _generate_image(self, messages, config):
41
+ raise ProviderError("OpenRouter does not support Image Generation via this library yet.")
42
+
43
+ def _generate_audio(self, messages, config):
44
+ raise ProviderError("OpenRouter does not support Audio Generation via this library yet.")
@@ -0,0 +1,129 @@
1
+ Metadata-Version: 2.4
2
+ Name: askai-python
3
+ Version: 0.1.2
4
+ Summary: AI Made Stupid Simple. Unified API for OpenAI, Groq, Google, and more.
5
+ Author-email: Hossein Ghorbani <hosseingh1068@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://hosseinghorbani0.ir/
8
+ Project-URL: Bug Tracker, https://github.com/Hosseinghorbani0/ask-ai/issues
9
+ Requires-Python: >=3.8
10
+ Description-Content-Type: text/markdown
11
+ License-File: LICENSE
12
+ Requires-Dist: openai>=1.0.0
13
+ Requires-Dist: groq
14
+ Requires-Dist: google-generativeai
15
+ Requires-Dist: anthropic
16
+ Requires-Dist: requests
17
+ Dynamic: license-file
18
+
19
+ # ask-ai
20
+
21
+ <p align="center">
22
+ 🌍 <b>Readme:</b>
23
+ <a href="README.md"><img src="https://flagcdn.com/20x15/us.png" alt="English"> English</a> ·
24
+ <a href="docs/README_fa.md"><img src="https://flagcdn.com/20x15/ir.png" alt="Persian"> فارسی</a> ·
25
+ <a href="docs/README_zh.md"><img src="https://flagcdn.com/20x15/cn.png" alt="Chinese"> 中文</a> ·
26
+ <a href="docs/README_tr.md"><img src="https://flagcdn.com/20x15/tr.png" alt="Turkish"> Türkçe</a> ·
27
+ <a href="docs/README_ar.md"><img src="https://flagcdn.com/20x15/sa.png" alt="Arabic"> العربية</a> ·
28
+ <a href="docs/README_ru.md"><img src="https://flagcdn.com/20x15/ru.png" alt="Russian"> Русский</a> ·
29
+ <a href="docs/README_es.md"><img src="https://flagcdn.com/20x15/es.png" alt="Spanish"> Español</a> ·
30
+ <a href="docs/README_ja.md"><img src="https://flagcdn.com/20x15/jp.png" alt="Japanese"> 日本語</a>
31
+ </p>
32
+
33
+ <p align="center">
34
+ <b>AI Made Simple.</b><br/>
35
+ One unified Python client for OpenAI, Claude, Gemini, Groq & more.
36
+ </p>
37
+
38
+ > Stop rewriting AI code for every provider. Use one line. Switch models anytime.
39
+
40
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
41
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
42
+
43
+ ---
44
+
45
+ ## ⚡ Why ask-ai?
46
+
47
+ - **No SDK lock-in**: Don't learn 5 different libraries. Learn one.
48
+ - **Unified API**: `ai.ask()` works for text, images, and audio across all providers.
49
+ - **Production Ready**: Built-in type safety, error handling, and environment management.
50
+ - **Zero-Config**: Auto-detects API keys from your environment.
51
+
52
+ ---
53
+
54
+ ## 🚀 The Golden Example
55
+
56
+ See the power of `ask-ai` in 3 lines of code:
57
+
58
+ ```python
59
+ from ask_ai import OpenAI, Anthropic, Google
60
+
61
+ prompt = "Explain quantum computing in one sentence."
62
+
63
+ # Switch providers instantly
64
+ print("OpenAI: ", OpenAI().ask(prompt))
65
+ print("Claude: ", Anthropic().ask(prompt))
66
+ print("Gemini: ", Google().ask(prompt))
67
+ ```
68
+
69
+ ---
70
+
71
+ ## 📦 Installation
72
+
73
+ ```bash
74
+ pip install askai-python
75
+ ```
76
+
77
+ ---
78
+
79
+ ## 📖 Quick Start
80
+
81
+ ### 1. Setup
82
+ Export your API keys (or pass them explicitly).
83
+ ```bash
84
+ export OPENAI_API_KEY="sk-..."
85
+ export ANTHROPIC_API_KEY="sk-ant-..."
86
+ ```
87
+
88
+ ### 2. Standard Usage
89
+ ```python
90
+ from ask_ai import OpenAI
91
+
92
+ ai = OpenAI()
93
+ print(ai.ask("Hello, World!"))
94
+ ```
95
+
96
+ ### 3. Advanced Usage
97
+ Control `temperature`, `top_p`, and system personas for professional results.
98
+ ```python
99
+ ai.advanced(
100
+ temperature=0.7,
101
+ prompt="You are a senior DevOps engineer."
102
+ )
103
+
104
+ print(ai.ask("How do I optimize a Dockerfile?"))
105
+ ```
106
+
107
+ ---
108
+
109
+ ## 🔌 Supported Providers
110
+
111
+ | Provider | Class | Feature Set |
112
+ |----------|-------|-------------|
113
+ | **OpenAI** | `OpenAI` | All Models (GPT-4o, o1, etc.) |
114
+ | **Anthropic** | `Anthropic` | All Models (Claude 3.5, Opus) |
115
+ | **Google** | `Google` | All Models (Gemini 1.5 Pro/Flash) |
116
+ | **Groq** | `Groq` | All Models (Llama 3, Mixtral) |
117
+ | **Azure** | `Azure` | All Deployments |
118
+ | **OpenRouter**| `OpenRouter`| All Models (100+) |
119
+
120
+ ---
121
+
122
+ ## ⭐ Support the Project
123
+
124
+ If this project saved you time, please consider giving it a star on GitHub! It helps us grow.
125
+
126
+ **[Give it a Star!](https://github.com/Hosseinghorbani0/ask-ai)**
127
+
128
+ ---
129
+ *Built by [Hossein Ghorbani](https://hosseinghorbani0.ir/) | [GitHub](https://github.com/Hosseinghorbani0).*
@@ -0,0 +1,19 @@
1
+ ask_ai/__init__.py,sha256=T9VyhKGoRGmxnfM4FtL8SHb3SVgUy1Vr3-pi6s5QnvE,245
2
+ ask_ai/__main__.py,sha256=MSmt_5Xg84uHqzTN38JwgseJK8rsJn_11A8WD99VtEo,61
3
+ ask_ai/base.py,sha256=1oOhCxXhi1jYUgpNTNwt5G1KR3GxTlSjXbDpl3xJJxM,5423
4
+ ask_ai/cli.py,sha256=8oeRLsT2lrcuG0vDq_a4CVqvzzmwDG3HxHW4BYg3IxM,2708
5
+ ask_ai/config.py,sha256=oP5RjdbbdDve8G-FlcbNaMXPorBw96GxhCbfFWnfbIw,1934
6
+ ask_ai/exceptions.py,sha256=JT2XbhsaWCFn3bPJD37RVH9HehV1ngus5s05v6xc1Tg,414
7
+ ask_ai/media.py,sha256=cxPZ0XoY0iU3Y8bY-YV3MGPlNFOJ_FjjwMATgo-jcFw,1821
8
+ ask_ai/providers/__init__.py,sha256=tVSW-FfSMPzCYK_TZy93W5ckdVe2PBPyzogpKXH_jhg,341
9
+ ask_ai/providers/anthropic_provider.py,sha256=NL_6E7eNTYAoYpym4ewD8Vj8tF_ewuFjFkRR0Jj0L-U,3670
10
+ ask_ai/providers/azure_provider.py,sha256=Diq8Wla_2lWWsE3O9rFuNe_SrWLRG4MOxGBeZ-SKvqI,2097
11
+ ask_ai/providers/google_provider.py,sha256=DsIgu_cBC7ta0lKclAlge1PYfcp3zXKPLjWSedXy5cU,3505
12
+ ask_ai/providers/groq_provider.py,sha256=iU7BTpvmBniE78VdTQETrCXrJzdXTpWH2oN9_sUgnFk,3268
13
+ ask_ai/providers/openai_provider.py,sha256=b6grPbsUIY19HkABDrUIAZAIcJCKQup5tooU2LFlkvo,4793
14
+ ask_ai/providers/openrouter_provider.py,sha256=LCGK-vNxy_maY2Fle6xh_i8MVm7a_bhjCaF7Uj0H3AM,1986
15
+ askai_python-0.1.2.dist-info/licenses/LICENSE,sha256=SUSbAUsh_Uqw7NDEKwAGRVPS7sojihBHIBGBx6WZxvg,1073
16
+ askai_python-0.1.2.dist-info/METADATA,sha256=xYcPCIs4mKxUACKlu9-eItSlAIi1Z0mRVsTLMBoyVEI,3923
17
+ askai_python-0.1.2.dist-info/WHEEL,sha256=YLJXdYXQ2FQ0Uqn2J-6iEIC-3iOey8lH3xCtvFLkd8Q,91
18
+ askai_python-0.1.2.dist-info/top_level.txt,sha256=fW2GapXaz6f-2UAuzVVPUPNfZUK_ZS1dpLyCllcE03s,7
19
+ askai_python-0.1.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (81.0.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Hossein Ghorbani
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ ask_ai