msgmodel 3.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- msgmodel/__init__.py +81 -0
- msgmodel/__main__.py +224 -0
- msgmodel/config.py +159 -0
- msgmodel/core.py +506 -0
- msgmodel/exceptions.py +93 -0
- msgmodel/providers/__init__.py +11 -0
- msgmodel/providers/gemini.py +325 -0
- msgmodel/providers/openai.py +350 -0
- msgmodel/py.typed +0 -0
- msgmodel/security.py +165 -0
- msgmodel-3.2.1.dist-info/METADATA +416 -0
- msgmodel-3.2.1.dist-info/RECORD +16 -0
- msgmodel-3.2.1.dist-info/WHEEL +5 -0
- msgmodel-3.2.1.dist-info/entry_points.txt +2 -0
- msgmodel-3.2.1.dist-info/licenses/LICENSE +21 -0
- msgmodel-3.2.1.dist-info/top_level.txt +1 -0
msgmodel/__init__.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
"""
|
|
2
|
+
msgmodel
|
|
3
|
+
~~~~~~~~
|
|
4
|
+
|
|
5
|
+
A unified Python library for interacting with LLM providers.
|
|
6
|
+
|
|
7
|
+
Supports OpenAI and Google Gemini with a single, consistent interface.
|
|
8
|
+
|
|
9
|
+
Basic usage:
|
|
10
|
+
>>> from msgmodel import query
|
|
11
|
+
>>> response = query("openai", "Hello, world!")
|
|
12
|
+
>>> print(response.text)
|
|
13
|
+
|
|
14
|
+
Streaming:
|
|
15
|
+
>>> from msgmodel import stream
|
|
16
|
+
>>> for chunk in stream("openai", "Tell me a story"):
|
|
17
|
+
... print(chunk, end="", flush=True)
|
|
18
|
+
|
|
19
|
+
With custom configuration:
|
|
20
|
+
>>> from msgmodel import query, OpenAIConfig
|
|
21
|
+
>>> config = OpenAIConfig(model="gpt-4o-mini", temperature=0.7)
|
|
22
|
+
>>> response = query("openai", "Hello!", config=config)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
__version__ = "3.2.1"
|
|
26
|
+
__author__ = "Leo Dias"
|
|
27
|
+
|
|
28
|
+
# Core API
|
|
29
|
+
from .core import query, stream, LLMResponse
|
|
30
|
+
|
|
31
|
+
# Configuration
|
|
32
|
+
from .config import (
|
|
33
|
+
Provider,
|
|
34
|
+
OpenAIConfig,
|
|
35
|
+
GeminiConfig,
|
|
36
|
+
get_default_config,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Exceptions
|
|
40
|
+
from .exceptions import (
|
|
41
|
+
MsgModelError,
|
|
42
|
+
ConfigurationError,
|
|
43
|
+
AuthenticationError,
|
|
44
|
+
FileError,
|
|
45
|
+
APIError,
|
|
46
|
+
ProviderError,
|
|
47
|
+
StreamingError,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# Providers (for advanced usage)
|
|
51
|
+
from .providers import OpenAIProvider, GeminiProvider
|
|
52
|
+
|
|
53
|
+
# Security (v3.2.1+)
|
|
54
|
+
from .security import RequestSigner
|
|
55
|
+
|
|
56
|
+
__all__ = [
|
|
57
|
+
# Version
|
|
58
|
+
"__version__",
|
|
59
|
+
# Core API
|
|
60
|
+
"query",
|
|
61
|
+
"stream",
|
|
62
|
+
"LLMResponse",
|
|
63
|
+
# Configuration
|
|
64
|
+
"Provider",
|
|
65
|
+
"OpenAIConfig",
|
|
66
|
+
"GeminiConfig",
|
|
67
|
+
"get_default_config",
|
|
68
|
+
# Exceptions
|
|
69
|
+
"MsgModelError",
|
|
70
|
+
"ConfigurationError",
|
|
71
|
+
"AuthenticationError",
|
|
72
|
+
"FileError",
|
|
73
|
+
"APIError",
|
|
74
|
+
"ProviderError",
|
|
75
|
+
"StreamingError",
|
|
76
|
+
# Providers
|
|
77
|
+
"OpenAIProvider",
|
|
78
|
+
"GeminiProvider",
|
|
79
|
+
# Security
|
|
80
|
+
"RequestSigner",
|
|
81
|
+
]
|
msgmodel/__main__.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"""
|
|
2
|
+
msgmodel CLI
|
|
3
|
+
~~~~~~~~~~~~
|
|
4
|
+
|
|
5
|
+
Command-line interface for msgmodel.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python -m msgmodel --provider openai --prompt "Hello, world!"
|
|
9
|
+
python -m msgmodel -p g -f prompt.txt --stream
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import sys
|
|
14
|
+
import logging
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
from . import (
|
|
18
|
+
__version__,
|
|
19
|
+
query,
|
|
20
|
+
stream,
|
|
21
|
+
Provider,
|
|
22
|
+
OpenAIConfig,
|
|
23
|
+
GeminiConfig,
|
|
24
|
+
ClaudeConfig,
|
|
25
|
+
MsgModelError,
|
|
26
|
+
ConfigurationError,
|
|
27
|
+
AuthenticationError,
|
|
28
|
+
FileError,
|
|
29
|
+
APIError,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Configure logging
|
|
33
|
+
logging.basicConfig(
|
|
34
|
+
level=logging.INFO,
|
|
35
|
+
format="%(levelname)s: %(message)s",
|
|
36
|
+
stream=sys.stderr,
|
|
37
|
+
)
|
|
38
|
+
logger = logging.getLogger(__name__)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def parse_args() -> argparse.Namespace:
|
|
42
|
+
"""Parse command-line arguments."""
|
|
43
|
+
parser = argparse.ArgumentParser(
|
|
44
|
+
prog="msgmodel",
|
|
45
|
+
description="Unified LLM API - Query OpenAI, Gemini, or Claude from the command line.",
|
|
46
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
47
|
+
epilog="""
|
|
48
|
+
Examples:
|
|
49
|
+
%(prog)s -p openai "Hello, world!"
|
|
50
|
+
%(prog)s -p gemini -f prompt.txt
|
|
51
|
+
%(prog)s -p claude "Tell me a story" --stream
|
|
52
|
+
%(prog)s -p o "Analyze this" -i instructions.txt -b image.jpg
|
|
53
|
+
""",
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
parser.add_argument(
|
|
57
|
+
"-p", "--provider",
|
|
58
|
+
required=True,
|
|
59
|
+
help="LLM provider: 'openai'/'o', 'gemini'/'g', or 'claude'/'c'",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
parser.add_argument(
|
|
63
|
+
"prompt",
|
|
64
|
+
nargs="?",
|
|
65
|
+
help="The prompt text (or use -f for file input)",
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
parser.add_argument(
|
|
69
|
+
"-f", "--prompt-file",
|
|
70
|
+
help="Read prompt from file instead of command line",
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
parser.add_argument(
|
|
74
|
+
"-i", "--instruction",
|
|
75
|
+
help="System instruction (text or path to file)",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
parser.add_argument(
|
|
79
|
+
"-b", "--binary-file",
|
|
80
|
+
help="Path to binary file (image, PDF, etc.)",
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
parser.add_argument(
|
|
84
|
+
"-k", "--api-key",
|
|
85
|
+
help="API key (overrides environment variable and key file)",
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
parser.add_argument(
|
|
89
|
+
"-m", "--model",
|
|
90
|
+
help="Model to use (overrides default)",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
parser.add_argument(
|
|
94
|
+
"-t", "--max-tokens",
|
|
95
|
+
type=int,
|
|
96
|
+
default=1000,
|
|
97
|
+
help="Maximum tokens to generate (default: 1000)",
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
parser.add_argument(
|
|
101
|
+
"--temperature",
|
|
102
|
+
type=float,
|
|
103
|
+
help="Sampling temperature (0.0 to 2.0)",
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
parser.add_argument(
|
|
107
|
+
"-s", "--stream",
|
|
108
|
+
action="store_true",
|
|
109
|
+
help="Stream the response",
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
parser.add_argument(
|
|
113
|
+
"--json",
|
|
114
|
+
action="store_true",
|
|
115
|
+
help="Output raw JSON response",
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
parser.add_argument(
|
|
119
|
+
"-v", "--verbose",
|
|
120
|
+
action="store_true",
|
|
121
|
+
help="Enable verbose logging",
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
parser.add_argument(
|
|
125
|
+
"--version",
|
|
126
|
+
action="version",
|
|
127
|
+
version=f"%(prog)s {__version__}",
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
return parser.parse_args()
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def read_file_content(path: str) -> str:
|
|
134
|
+
"""Read content from a file."""
|
|
135
|
+
try:
|
|
136
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
137
|
+
return f.read()
|
|
138
|
+
except IOError as e:
|
|
139
|
+
raise FileError(f"Cannot read file {path}: {e}")
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def main() -> int:
|
|
143
|
+
"""Main entry point for the CLI."""
|
|
144
|
+
args = parse_args()
|
|
145
|
+
|
|
146
|
+
if args.verbose:
|
|
147
|
+
logging.getLogger().setLevel(logging.DEBUG)
|
|
148
|
+
|
|
149
|
+
try:
|
|
150
|
+
# Get the prompt
|
|
151
|
+
if args.prompt:
|
|
152
|
+
prompt = args.prompt
|
|
153
|
+
elif args.prompt_file:
|
|
154
|
+
prompt = read_file_content(args.prompt_file)
|
|
155
|
+
else:
|
|
156
|
+
logger.error("Either prompt text or -f/--prompt-file is required")
|
|
157
|
+
return 1
|
|
158
|
+
|
|
159
|
+
# Get system instruction
|
|
160
|
+
system_instruction = None
|
|
161
|
+
if args.instruction:
|
|
162
|
+
# Check if it's a file path
|
|
163
|
+
if Path(args.instruction).exists():
|
|
164
|
+
system_instruction = read_file_content(args.instruction)
|
|
165
|
+
else:
|
|
166
|
+
system_instruction = args.instruction
|
|
167
|
+
|
|
168
|
+
# Common kwargs
|
|
169
|
+
kwargs = {
|
|
170
|
+
"provider": args.provider,
|
|
171
|
+
"prompt": prompt,
|
|
172
|
+
"api_key": args.api_key,
|
|
173
|
+
"system_instruction": system_instruction,
|
|
174
|
+
"file_path": args.binary_file,
|
|
175
|
+
"max_tokens": args.max_tokens,
|
|
176
|
+
"model": args.model,
|
|
177
|
+
"temperature": args.temperature,
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if args.stream:
|
|
181
|
+
# Streaming mode
|
|
182
|
+
for chunk in stream(**kwargs):
|
|
183
|
+
print(chunk, end="", flush=True)
|
|
184
|
+
print() # Final newline
|
|
185
|
+
else:
|
|
186
|
+
# Non-streaming mode
|
|
187
|
+
response = query(**kwargs)
|
|
188
|
+
|
|
189
|
+
if args.json:
|
|
190
|
+
import json
|
|
191
|
+
print(json.dumps(response.raw_response, indent=2))
|
|
192
|
+
else:
|
|
193
|
+
print(response.text)
|
|
194
|
+
|
|
195
|
+
if args.verbose:
|
|
196
|
+
logger.info(f"Model: {response.model}")
|
|
197
|
+
logger.info(f"Provider: {response.provider}")
|
|
198
|
+
if response.usage:
|
|
199
|
+
logger.info(f"Usage: {response.usage}")
|
|
200
|
+
|
|
201
|
+
return 0
|
|
202
|
+
|
|
203
|
+
except ConfigurationError as e:
|
|
204
|
+
logger.error(f"Configuration error: {e}")
|
|
205
|
+
return 1
|
|
206
|
+
except AuthenticationError as e:
|
|
207
|
+
logger.error(f"Authentication error: {e}")
|
|
208
|
+
return 2
|
|
209
|
+
except FileError as e:
|
|
210
|
+
logger.error(f"File error: {e}")
|
|
211
|
+
return 3
|
|
212
|
+
except APIError as e:
|
|
213
|
+
logger.error(f"API error: {e}")
|
|
214
|
+
return 4
|
|
215
|
+
except MsgModelError as e:
|
|
216
|
+
logger.error(f"Error: {e}")
|
|
217
|
+
return 5
|
|
218
|
+
except KeyboardInterrupt:
|
|
219
|
+
logger.info("\nOperation cancelled")
|
|
220
|
+
return 0
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
if __name__ == "__main__":
|
|
224
|
+
sys.exit(main())
|
msgmodel/config.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
"""
|
|
2
|
+
msgmodel.config
|
|
3
|
+
~~~~~~~~~~~~~~~
|
|
4
|
+
|
|
5
|
+
Configuration dataclasses for LLM providers.
|
|
6
|
+
|
|
7
|
+
These classes provide type-safe, runtime-configurable settings
|
|
8
|
+
for each supported provider. Defaults match the original script's
|
|
9
|
+
hardcoded values.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from typing import Optional, List
|
|
14
|
+
from enum import Enum
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class Provider(str, Enum):
|
|
18
|
+
"""Supported LLM providers."""
|
|
19
|
+
OPENAI = "openai"
|
|
20
|
+
GEMINI = "gemini"
|
|
21
|
+
|
|
22
|
+
@classmethod
|
|
23
|
+
def from_string(cls, value: str) -> "Provider":
|
|
24
|
+
"""
|
|
25
|
+
Convert string to Provider enum.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
value: Provider name or shorthand ('o', 'g', 'openai', 'gemini')
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Provider enum member
|
|
32
|
+
|
|
33
|
+
Raises:
|
|
34
|
+
ValueError: If the value is not a valid provider
|
|
35
|
+
"""
|
|
36
|
+
value = value.lower().strip()
|
|
37
|
+
|
|
38
|
+
# Support shorthand codes
|
|
39
|
+
shortcuts = {
|
|
40
|
+
'o': cls.OPENAI,
|
|
41
|
+
'g': cls.GEMINI,
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
if value in shortcuts:
|
|
45
|
+
return shortcuts[value]
|
|
46
|
+
|
|
47
|
+
# Support full names
|
|
48
|
+
for provider in cls:
|
|
49
|
+
if provider.value == value:
|
|
50
|
+
return provider
|
|
51
|
+
|
|
52
|
+
valid = ", ".join([f"'{p.value}'" for p in cls] + ["'o'", "'g'"])
|
|
53
|
+
raise ValueError(f"Invalid provider '{value}'. Valid options: {valid}")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# ============================================================================
|
|
57
|
+
# API URLs (constants, not configurable per-request)
|
|
58
|
+
# ============================================================================
|
|
59
|
+
OPENAI_URL = "https://api.openai.com/v1/chat/completions"
|
|
60
|
+
GEMINI_URL = "https://generativelanguage.googleapis.com"
|
|
61
|
+
|
|
62
|
+
# Environment variable names for API keys
|
|
63
|
+
OPENAI_API_KEY_ENV = "OPENAI_API_KEY"
|
|
64
|
+
GEMINI_API_KEY_ENV = "GEMINI_API_KEY"
|
|
65
|
+
|
|
66
|
+
# Default API key file names (for backward compatibility)
|
|
67
|
+
OPENAI_API_KEY_FILE = "openai-api.key"
|
|
68
|
+
GEMINI_API_KEY_FILE = "gemini-api.key"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@dataclass
|
|
72
|
+
class OpenAIConfig:
|
|
73
|
+
"""
|
|
74
|
+
Configuration for OpenAI API calls.
|
|
75
|
+
|
|
76
|
+
**PRIVACY ENFORCEMENT**: Zero Data Retention (ZDR) is REQUIRED and non-negotiable.
|
|
77
|
+
The X-OpenAI-No-Store header is automatically added to all requests.
|
|
78
|
+
|
|
79
|
+
OpenAI will NOT use your prompts or responses for model training or improvements.
|
|
80
|
+
See: https://platform.openai.com/docs/guides/zero-data-retention
|
|
81
|
+
|
|
82
|
+
Attributes:
|
|
83
|
+
model: Model identifier (e.g., 'gpt-4o', 'gpt-4o-mini')
|
|
84
|
+
temperature: Sampling temperature (0.0 to 2.0)
|
|
85
|
+
top_p: Nucleus sampling parameter
|
|
86
|
+
max_tokens: Maximum tokens to generate
|
|
87
|
+
n: Number of completions to generate
|
|
88
|
+
|
|
89
|
+
Note: File uploads are only supported via inline base64-encoding in prompts.
|
|
90
|
+
Files are limited to practical API size constraints (~15-20MB).
|
|
91
|
+
"""
|
|
92
|
+
model: str = "gpt-4o"
|
|
93
|
+
temperature: float = 1.0
|
|
94
|
+
top_p: float = 1.0
|
|
95
|
+
max_tokens: int = 1000
|
|
96
|
+
n: int = 1
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@dataclass
|
|
100
|
+
class GeminiConfig:
|
|
101
|
+
"""
|
|
102
|
+
Configuration for Google Gemini API calls.
|
|
103
|
+
|
|
104
|
+
**PRIVACY ENFORCEMENT**: PAID API TIER IS REQUIRED AND ENFORCED.
|
|
105
|
+
|
|
106
|
+
Gemini paid services (with active Google Cloud Billing and paid quota):
|
|
107
|
+
- Do NOT use your prompts or responses for model training
|
|
108
|
+
- Retain data temporarily for abuse detection only (24-72 hours)
|
|
109
|
+
- Provide near-stateless operation for sensitive materials
|
|
110
|
+
|
|
111
|
+
UNPAID TIER IS NOT SUPPORTED. Google retains data indefinitely for training
|
|
112
|
+
on unpaid services. This library will verify paid access and raise an error
|
|
113
|
+
if you attempt to use unpaid quota.
|
|
114
|
+
|
|
115
|
+
See: https://ai.google.dev/gemini-api/terms
|
|
116
|
+
|
|
117
|
+
Attributes:
|
|
118
|
+
model: Model identifier (e.g., 'gemini-2.5-flash', 'gemini-1.5-pro')
|
|
119
|
+
temperature: Sampling temperature (0.0 to 2.0)
|
|
120
|
+
top_p: Nucleus sampling parameter
|
|
121
|
+
top_k: Top-k sampling parameter
|
|
122
|
+
max_tokens: Maximum tokens to generate
|
|
123
|
+
candidate_count: Number of response candidates
|
|
124
|
+
safety_threshold: Content safety filtering level
|
|
125
|
+
api_version: API version to use
|
|
126
|
+
cache_control: Whether to enable caching
|
|
127
|
+
|
|
128
|
+
Note: File uploads are only supported via inline base64-encoding in prompts.
|
|
129
|
+
Files are limited to practical API size constraints (~22MB).
|
|
130
|
+
"""
|
|
131
|
+
model: str = "gemini-2.5-flash"
|
|
132
|
+
temperature: float = 1.0
|
|
133
|
+
top_p: float = 0.95
|
|
134
|
+
top_k: int = 40
|
|
135
|
+
max_tokens: int = 1000
|
|
136
|
+
candidate_count: int = 1
|
|
137
|
+
safety_threshold: str = "BLOCK_NONE"
|
|
138
|
+
api_version: str = "v1beta"
|
|
139
|
+
cache_control: bool = False
|
|
140
|
+
|
|
141
|
+
# Type alias for supported configs
|
|
142
|
+
ProviderConfig = OpenAIConfig | GeminiConfig
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def get_default_config(provider: Provider) -> ProviderConfig:
|
|
146
|
+
"""
|
|
147
|
+
Get the default configuration for a provider.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
provider: The LLM provider
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
Default configuration dataclass for the provider
|
|
154
|
+
"""
|
|
155
|
+
configs = {
|
|
156
|
+
Provider.OPENAI: OpenAIConfig,
|
|
157
|
+
Provider.GEMINI: GeminiConfig,
|
|
158
|
+
}
|
|
159
|
+
return configs[provider]()
|