msgmodel 3.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- msgmodel/__init__.py +81 -0
- msgmodel/__main__.py +224 -0
- msgmodel/config.py +159 -0
- msgmodel/core.py +506 -0
- msgmodel/exceptions.py +93 -0
- msgmodel/providers/__init__.py +11 -0
- msgmodel/providers/gemini.py +325 -0
- msgmodel/providers/openai.py +350 -0
- msgmodel/py.typed +0 -0
- msgmodel/security.py +165 -0
- msgmodel-3.2.1.dist-info/METADATA +416 -0
- msgmodel-3.2.1.dist-info/RECORD +16 -0
- msgmodel-3.2.1.dist-info/WHEEL +5 -0
- msgmodel-3.2.1.dist-info/entry_points.txt +2 -0
- msgmodel-3.2.1.dist-info/licenses/LICENSE +21 -0
- msgmodel-3.2.1.dist-info/top_level.txt +1 -0
msgmodel/security.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
"""
|
|
2
|
+
msgmodel.security
|
|
3
|
+
~~~~~~~~~~~~~~~~~
|
|
4
|
+
|
|
5
|
+
Security utilities for msgmodel, including request signing and verification.
|
|
6
|
+
|
|
7
|
+
v3.2.1 Enhancement: Request signing for multi-tenant deployments.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import hmac
|
|
11
|
+
import hashlib
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Optional, Any, Dict
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class RequestSigner:
|
|
20
|
+
"""
|
|
21
|
+
Stateless request signer for verifying request authenticity in multi-user deployments.
|
|
22
|
+
|
|
23
|
+
v3.2.1 Enhancement: Provides optional request signing to prevent unauthorized API calls
|
|
24
|
+
in shared environments. Signing is deterministic and does not require server state.
|
|
25
|
+
|
|
26
|
+
Example:
|
|
27
|
+
>>> signer = RequestSigner(secret_key="my-secret-key")
|
|
28
|
+
>>> signature = signer.sign_request(
|
|
29
|
+
... provider="openai",
|
|
30
|
+
... message="Hello, world!",
|
|
31
|
+
... model="gpt-4o"
|
|
32
|
+
... )
|
|
33
|
+
>>>
|
|
34
|
+
>>> # Verify signature on receiving end
|
|
35
|
+
>>> is_valid = signer.verify_signature(
|
|
36
|
+
... signature=signature,
|
|
37
|
+
... provider="openai",
|
|
38
|
+
... message="Hello, world!",
|
|
39
|
+
... model="gpt-4o"
|
|
40
|
+
... )
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(self, secret_key: str):
|
|
44
|
+
"""
|
|
45
|
+
Initialize the request signer.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
secret_key: Secret key for HMAC signing (must be kept confidential)
|
|
49
|
+
"""
|
|
50
|
+
if not secret_key or not isinstance(secret_key, str):
|
|
51
|
+
raise ValueError("secret_key must be a non-empty string")
|
|
52
|
+
self.secret = secret_key
|
|
53
|
+
|
|
54
|
+
def _canonicalize(self, provider: str, message: str, **kwargs) -> str:
|
|
55
|
+
"""
|
|
56
|
+
Create a canonical string representation of a request for signing.
|
|
57
|
+
|
|
58
|
+
The canonical format is deterministic and order-independent for kwargs.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
provider: LLM provider name
|
|
62
|
+
message: User message/prompt
|
|
63
|
+
**kwargs: Additional request parameters
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Canonical request string
|
|
67
|
+
"""
|
|
68
|
+
# Sort kwargs by key for deterministic ordering
|
|
69
|
+
sorted_kwargs = sorted(kwargs.items())
|
|
70
|
+
canonical = f"{provider}|{message}|{json.dumps(sorted_kwargs, sort_keys=True)}"
|
|
71
|
+
return canonical
|
|
72
|
+
|
|
73
|
+
def sign_request(self, provider: str, message: str, **kwargs) -> str:
|
|
74
|
+
"""
|
|
75
|
+
Generate HMAC-SHA256 signature for a request.
|
|
76
|
+
|
|
77
|
+
The signature covers the provider, message, and all additional parameters.
|
|
78
|
+
Signatures are deterministic—the same request always produces the same signature.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
provider: LLM provider name ('openai', 'gemini', etc.)
|
|
82
|
+
message: User message/prompt
|
|
83
|
+
**kwargs: Additional request parameters (model, temperature, file_hash, etc.)
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Hex-encoded HMAC-SHA256 signature
|
|
87
|
+
"""
|
|
88
|
+
canonical = self._canonicalize(provider, message, **kwargs)
|
|
89
|
+
signature = hmac.new(
|
|
90
|
+
self.secret.encode("utf-8"),
|
|
91
|
+
canonical.encode("utf-8"),
|
|
92
|
+
hashlib.sha256
|
|
93
|
+
).hexdigest()
|
|
94
|
+
return signature
|
|
95
|
+
|
|
96
|
+
def verify_signature(
|
|
97
|
+
self,
|
|
98
|
+
signature: str,
|
|
99
|
+
provider: str,
|
|
100
|
+
message: str,
|
|
101
|
+
**kwargs
|
|
102
|
+
) -> bool:
|
|
103
|
+
"""
|
|
104
|
+
Verify a request signature.
|
|
105
|
+
|
|
106
|
+
Uses constant-time comparison to prevent timing attacks.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
signature: Hex-encoded signature to verify
|
|
110
|
+
provider: LLM provider name
|
|
111
|
+
message: User message/prompt
|
|
112
|
+
**kwargs: Additional request parameters (must match original request)
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
True if signature is valid, False otherwise
|
|
116
|
+
"""
|
|
117
|
+
try:
|
|
118
|
+
expected_signature = self.sign_request(provider, message, **kwargs)
|
|
119
|
+
# Use constant-time comparison to prevent timing attacks
|
|
120
|
+
return hmac.compare_digest(signature, expected_signature)
|
|
121
|
+
except Exception as e:
|
|
122
|
+
logger.warning(f"Signature verification failed: {e}")
|
|
123
|
+
return False
|
|
124
|
+
|
|
125
|
+
def sign_dict(self, request_dict: Dict[str, Any]) -> str:
|
|
126
|
+
"""
|
|
127
|
+
Sign a request dictionary containing 'provider', 'message', and optional other fields.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
request_dict: Dictionary with at least 'provider' and 'message' keys
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Hex-encoded HMAC-SHA256 signature
|
|
134
|
+
|
|
135
|
+
Raises:
|
|
136
|
+
ValueError: If required keys are missing
|
|
137
|
+
"""
|
|
138
|
+
if "provider" not in request_dict or "message" not in request_dict:
|
|
139
|
+
raise ValueError("request_dict must contain 'provider' and 'message' keys")
|
|
140
|
+
|
|
141
|
+
provider = request_dict["provider"]
|
|
142
|
+
message = request_dict["message"]
|
|
143
|
+
kwargs = {k: v for k, v in request_dict.items() if k not in ("provider", "message")}
|
|
144
|
+
|
|
145
|
+
return self.sign_request(provider, message, **kwargs)
|
|
146
|
+
|
|
147
|
+
def verify_dict(self, signature: str, request_dict: Dict[str, Any]) -> bool:
|
|
148
|
+
"""
|
|
149
|
+
Verify a signature for a request dictionary.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
signature: Hex-encoded signature to verify
|
|
153
|
+
request_dict: Dictionary with at least 'provider' and 'message' keys
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
True if signature is valid, False otherwise
|
|
157
|
+
"""
|
|
158
|
+
if "provider" not in request_dict or "message" not in request_dict:
|
|
159
|
+
return False
|
|
160
|
+
|
|
161
|
+
provider = request_dict["provider"]
|
|
162
|
+
message = request_dict["message"]
|
|
163
|
+
kwargs = {k: v for k, v in request_dict.items() if k not in ("provider", "message")}
|
|
164
|
+
|
|
165
|
+
return self.verify_signature(signature, provider, message, **kwargs)
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: msgmodel
|
|
3
|
+
Version: 3.2.1
|
|
4
|
+
Summary: Privacy-focused Python library for interacting with LLM providers (OpenAI, Gemini). Zero-retention middleware with stateless BytesIO-only file uploads.
|
|
5
|
+
Author-email: Leo Dias <leoodiass@outlook.com>
|
|
6
|
+
Maintainer-email: Leo Dias <leoodiass@outlook.com>
|
|
7
|
+
License-Expression: MIT
|
|
8
|
+
Project-URL: Homepage, https://github.com/LeoooDias/msgModel
|
|
9
|
+
Project-URL: Repository, https://github.com/LeoooDias/msgModel
|
|
10
|
+
Project-URL: Documentation, https://github.com/LeoooDias/msgModel#readme
|
|
11
|
+
Project-URL: Issues, https://github.com/LeoooDias/msgModel/issues
|
|
12
|
+
Project-URL: Changelog, https://github.com/LeoooDias/msgModel/releases
|
|
13
|
+
Keywords: llm,openai,gemini,gpt,ai,machine-learning,chatbot,api,unified,wrapper
|
|
14
|
+
Classifier: Development Status :: 4 - Beta
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: Operating System :: OS Independent
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
22
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
23
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
24
|
+
Classifier: Typing :: Typed
|
|
25
|
+
Classifier: Environment :: Console
|
|
26
|
+
Requires-Python: >=3.10
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
License-File: LICENSE
|
|
29
|
+
Requires-Dist: requests>=2.31.0
|
|
30
|
+
Provides-Extra: claude
|
|
31
|
+
Requires-Dist: anthropic>=0.18.0; extra == "claude"
|
|
32
|
+
Provides-Extra: all
|
|
33
|
+
Requires-Dist: anthropic>=0.18.0; extra == "all"
|
|
34
|
+
Provides-Extra: dev
|
|
35
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
36
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
37
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
38
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
39
|
+
Requires-Dist: build>=1.0.0; extra == "dev"
|
|
40
|
+
Requires-Dist: twine>=4.0.0; extra == "dev"
|
|
41
|
+
Dynamic: license-file
|
|
42
|
+
|
|
43
|
+
# msgmodel
|
|
44
|
+
|
|
45
|
+
[](https://badge.fury.io/py/msgmodel)
|
|
46
|
+
[](https://www.python.org/downloads/)
|
|
47
|
+
[](https://opensource.org/licenses/MIT)
|
|
48
|
+
|
|
49
|
+
A unified Python library and CLI for interacting with multiple Large Language Model (LLM) providers with **privacy-first, zero-retention design**.
|
|
50
|
+
|
|
51
|
+
## Overview
|
|
52
|
+
|
|
53
|
+
`msgmodel` provides both a **Python library** and a **command-line interface** to interact with two major LLM providers:
|
|
54
|
+
- **OpenAI** (GPT models) — Zero Data Retention enforced
|
|
55
|
+
- **Google Gemini** (Paid tier) — Abuse-monitoring retention only
|
|
56
|
+
|
|
57
|
+
**Privacy Guarantee**: All files are processed via in-memory BytesIO objects with base64 inline encoding. No persistent file storage, no server-side uploads, no training data retention (when configured correctly).
|
|
58
|
+
|
|
59
|
+
## Features
|
|
60
|
+
|
|
61
|
+
- **Unified API**: Single `query()` and `stream()` functions work with all providers
|
|
62
|
+
- **Library & CLI**: Use as a Python module or command-line tool
|
|
63
|
+
- **Streaming support**: Stream responses in real-time
|
|
64
|
+
- **File attachments**: Process images, PDFs, and text files with in-memory BytesIO only
|
|
65
|
+
- **Flexible configuration**: Dataclass-based configs with sensible defaults
|
|
66
|
+
- **Multiple API key sources**: Direct parameter, environment variable, or key file
|
|
67
|
+
- **Exception-based error handling**: Clean errors, no `sys.exit()` in library code
|
|
68
|
+
- **Type-safe**: Full type hints throughout
|
|
69
|
+
- **Privacy-first**: Mandatory zero-retention enforcement, stateless design, BytesIO-only file transfers
|
|
70
|
+
|
|
71
|
+
## Installation
|
|
72
|
+
|
|
73
|
+
### From PyPI (Recommended)
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
pip install msgmodel
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### From Source
|
|
80
|
+
|
|
81
|
+
```bash
|
|
82
|
+
# Clone the repository
|
|
83
|
+
git clone https://github.com/LeoooDias/msgmodel.git
|
|
84
|
+
cd msgmodel
|
|
85
|
+
|
|
86
|
+
# Install the package
|
|
87
|
+
pip install -e .
|
|
88
|
+
|
|
89
|
+
# Or with development dependencies
|
|
90
|
+
pip install -e ".[dev]"
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Prerequisites
|
|
94
|
+
|
|
95
|
+
- Python 3.10 or higher
|
|
96
|
+
- API keys from the providers you wish to use
|
|
97
|
+
|
|
98
|
+
## Quick Start
|
|
99
|
+
|
|
100
|
+
### As a Library
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from msgmodel import query, stream
|
|
104
|
+
|
|
105
|
+
# Simple query (uses OPENAI_API_KEY env var)
|
|
106
|
+
response = query("openai", "What is Python?")
|
|
107
|
+
print(response.text)
|
|
108
|
+
|
|
109
|
+
# With explicit API key
|
|
110
|
+
response = query("gemini", "Hello!", api_key="your-api-key")
|
|
111
|
+
|
|
112
|
+
# Streaming
|
|
113
|
+
for chunk in stream("openai", "Tell me a story"):
|
|
114
|
+
print(chunk, end="", flush=True)
|
|
115
|
+
|
|
116
|
+
# With file attachment (in-memory BytesIO only)
|
|
117
|
+
import io
|
|
118
|
+
file_obj = io.BytesIO(your_binary_data)
|
|
119
|
+
response = query("gemini", "Describe this image", file_like=file_obj, filename="photo.jpg")
|
|
120
|
+
|
|
121
|
+
# With custom configuration
|
|
122
|
+
from msgmodel import OpenAIConfig
|
|
123
|
+
|
|
124
|
+
config = OpenAIConfig(model="gpt-4o-mini", temperature=0.7, max_tokens=2000)
|
|
125
|
+
response = query("openai", "Write a poem", config=config)
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### As a CLI
|
|
129
|
+
|
|
130
|
+
```bash
|
|
131
|
+
# Basic usage
|
|
132
|
+
python -m msgmodel -p openai "What is Python?"
|
|
133
|
+
|
|
134
|
+
# Using shorthand provider codes
|
|
135
|
+
python -m msgmodel -p g "Hello, Gemini!" # g = gemini
|
|
136
|
+
python -m msgmodel -p o "Hello, OpenAI!" # o = openai
|
|
137
|
+
|
|
138
|
+
# With streaming
|
|
139
|
+
python -m msgmodel -p openai "Tell me a story" --stream
|
|
140
|
+
|
|
141
|
+
# From a file
|
|
142
|
+
python -m msgmodel -p gemini -f prompt.txt
|
|
143
|
+
|
|
144
|
+
# With system instruction
|
|
145
|
+
python -m msgmodel -p openai "Analyze this" -i "You are a data analyst"
|
|
146
|
+
|
|
147
|
+
# With file attachment (base64 inline)
|
|
148
|
+
python -m msgmodel -p gemini "Describe this" -b image.jpg
|
|
149
|
+
|
|
150
|
+
# Custom parameters
|
|
151
|
+
python -m msgmodel -p openai "Hello" -m gpt-4o-mini -t 500 --temperature 0.7
|
|
152
|
+
|
|
153
|
+
# Get full JSON response instead of just text
|
|
154
|
+
python -m msgmodel -p openai "Hello" --json
|
|
155
|
+
|
|
156
|
+
# Verbose output (shows model, provider, token usage)
|
|
157
|
+
python -m msgmodel -p openai "Hello" -v
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## API Key Configuration
|
|
161
|
+
|
|
162
|
+
API keys can be provided in three ways (in order of priority):
|
|
163
|
+
|
|
164
|
+
1. **Direct parameter**: `query("openai", "Hello", api_key="sk-...")`
|
|
165
|
+
2. **Environment variable**:
|
|
166
|
+
- `OPENAI_API_KEY` for OpenAI
|
|
167
|
+
- `GEMINI_API_KEY` for Gemini
|
|
168
|
+
3. **Key file** in current directory:
|
|
169
|
+
- `openai-api.key`
|
|
170
|
+
- `gemini-api.key`
|
|
171
|
+
|
|
172
|
+
## Configuration
|
|
173
|
+
|
|
174
|
+
Each provider has its own configuration dataclass with sensible defaults:
|
|
175
|
+
|
|
176
|
+
```python
|
|
177
|
+
from msgmodel import OpenAIConfig, GeminiConfig
|
|
178
|
+
|
|
179
|
+
# OpenAI configuration
|
|
180
|
+
openai_config = OpenAIConfig(
|
|
181
|
+
model="gpt-4o", # Model to use
|
|
182
|
+
temperature=1.0, # Sampling temperature
|
|
183
|
+
top_p=1.0, # Nucleus sampling
|
|
184
|
+
max_tokens=1000, # Max output tokens
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Gemini configuration
|
|
188
|
+
gemini_config = GeminiConfig(
|
|
189
|
+
model="gemini-2.5-flash",
|
|
190
|
+
temperature=1.0,
|
|
191
|
+
top_p=0.95,
|
|
192
|
+
top_k=40,
|
|
193
|
+
safety_threshold="BLOCK_NONE",
|
|
194
|
+
)
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
## Data Retention & Privacy
|
|
198
|
+
|
|
199
|
+
`msgmodel` is designed with **statelessness** as a core principle. Here's what you need to know:
|
|
200
|
+
|
|
201
|
+
### OpenAI (Zero Data Retention)
|
|
202
|
+
|
|
203
|
+
When using OpenAI:
|
|
204
|
+
|
|
205
|
+
- **What's protected**: Input prompts, system instructions, and model responses
|
|
206
|
+
- **How**: The `X-OpenAI-No-Store` header is automatically added to all Chat Completions requests
|
|
207
|
+
- **Result**: OpenAI does **not** use these interactions for service improvements or model training
|
|
208
|
+
- **Persistence**: Inputs/outputs are **not stored** beyond the immediate request-response cycle
|
|
209
|
+
- **File handling**: All files are base64-encoded and embedded inline in prompts — no server-side uploads
|
|
210
|
+
|
|
211
|
+
**Important limitations**:
|
|
212
|
+
- OpenAI's **API logs** may retain minimal metadata (timestamps, API version, token counts) for ~30 days for debugging purposes, but not the actual content
|
|
213
|
+
- **Billing records** will still show API usage but not interaction content
|
|
214
|
+
|
|
215
|
+
Example (ZDR enforced):
|
|
216
|
+
```python
|
|
217
|
+
from msgmodel import query
|
|
218
|
+
|
|
219
|
+
response = query("openai", "Sensitive prompt")
|
|
220
|
+
# Zero Data Retention is enforced automatically by the X-OpenAI-No-Store header
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### Google Gemini (Paid Tier Required)
|
|
224
|
+
|
|
225
|
+
Google Gemini's data retention policy **depends on which service tier you use**. No API parameter controls this; it's determined by your Google Cloud account configuration.
|
|
226
|
+
|
|
227
|
+
**Paid Services (Google Cloud Billing + Paid Quota)**
|
|
228
|
+
|
|
229
|
+
- **What's protected**: Data is NOT used for model training or product improvement
|
|
230
|
+
- **What IS retained**: Prompts and responses retained temporarily for abuse detection only (typically 24-72 hours)
|
|
231
|
+
- **Human review**: NO (unless abuse is detected)
|
|
232
|
+
- **Statelessness**: ✅ ACHIEVABLE — within abuse monitoring requirements
|
|
233
|
+
- **File handling**: Base64-encoded inline embedding — no persistent storage
|
|
234
|
+
|
|
235
|
+
```python
|
|
236
|
+
from msgmodel import query
|
|
237
|
+
|
|
238
|
+
# Paid tier: Data protected from training, used only for abuse monitoring
|
|
239
|
+
response = query("gemini", "Sensitive prompt", api_key="your-api-key")
|
|
240
|
+
```
|
|
241
|
+
|
|
242
|
+
**Important**: Using Gemini assumes your Google Cloud project has:
|
|
243
|
+
1. Cloud Billing account linked
|
|
244
|
+
2. Paid API quota enabled (not on free quota tier)
|
|
245
|
+
|
|
246
|
+
If not enabled, Google will apply unpaid service terms regardless of your code.
|
|
247
|
+
|
|
248
|
+
**Learn more**: [Google Gemini API Terms — How Google Uses Your Data](https://ai.google.dev/gemini-api/terms)
|
|
249
|
+
|
|
250
|
+
### Summary Comparison
|
|
251
|
+
|
|
252
|
+
| Provider | Statelessness Achievable | How | Caveat |
|
|
253
|
+
|----------|--------------------------|-----|---------|
|
|
254
|
+
| **OpenAI** | ✅ YES | Automatic ZDR header | Metadata ~30 days |
|
|
255
|
+
| **Gemini (Paid)** | ✅ MOSTLY | Cloud Billing + paid quota | Abuse monitoring ~24-72 hours |
|
|
256
|
+
| **Gemini (Unpaid)** | ❌ NO | No configuration possible | Data retained for training indefinitely |
|
|
257
|
+
|
|
258
|
+
For maximum privacy, use **OpenAI with zero-retention** (default) or **Gemini with paid Cloud Billing**.
|
|
259
|
+
|
|
260
|
+
## File Uploads
|
|
261
|
+
|
|
262
|
+
### The BytesIO-Only Approach
|
|
263
|
+
|
|
264
|
+
All file uploads in msgmodel v3.2.0+ use **in-memory BytesIO objects** with base64 inline encoding:
|
|
265
|
+
|
|
266
|
+
```python
|
|
267
|
+
import io
|
|
268
|
+
from msgmodel import query
|
|
269
|
+
|
|
270
|
+
# Read file into memory
|
|
271
|
+
with open("document.pdf", "rb") as f:
|
|
272
|
+
file_data = f.read()
|
|
273
|
+
|
|
274
|
+
# Create BytesIO object
|
|
275
|
+
file_obj = io.BytesIO(file_data)
|
|
276
|
+
|
|
277
|
+
# Query with file
|
|
278
|
+
response = query(
|
|
279
|
+
"openai",
|
|
280
|
+
"Summarize this document",
|
|
281
|
+
file_like=file_obj,
|
|
282
|
+
filename="document.pdf" # Enables MIME type detection
|
|
283
|
+
)
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
**Why BytesIO only?**
|
|
287
|
+
- No disk persistence between requests
|
|
288
|
+
- No server-side file uploads (Files API not used)
|
|
289
|
+
- Better privacy—files never stored on provider servers
|
|
290
|
+
- Stateless operation—each request is completely independent
|
|
291
|
+
|
|
292
|
+
**File Size Limits**
|
|
293
|
+
- **OpenAI**: ~15-20MB practical limit (base64 overhead + token limits)
|
|
294
|
+
- **Gemini**: ~22MB practical limit (base64 overhead + token limits)
|
|
295
|
+
|
|
296
|
+
If API returns a size-related error, the file exceeds practical limits for that provider.
|
|
297
|
+
|
|
298
|
+
## Error Handling
|
|
299
|
+
|
|
300
|
+
The library uses exceptions instead of `sys.exit()`:
|
|
301
|
+
|
|
302
|
+
```python
|
|
303
|
+
from msgmodel import query, MsgModelError, AuthenticationError, APIError
|
|
304
|
+
|
|
305
|
+
try:
|
|
306
|
+
response = query("openai", "Hello")
|
|
307
|
+
except AuthenticationError as e:
|
|
308
|
+
print(f"API key issue: {e}")
|
|
309
|
+
except APIError as e:
|
|
310
|
+
print(f"API call failed: {e}")
|
|
311
|
+
print(f"Status code: {e.status_code}")
|
|
312
|
+
except MsgModelError as e:
|
|
313
|
+
print(f"General error: {e}")
|
|
314
|
+
```
|
|
315
|
+
|
|
316
|
+
## Response Object
|
|
317
|
+
|
|
318
|
+
The `query()` function returns an `LLMResponse` object:
|
|
319
|
+
|
|
320
|
+
```python
|
|
321
|
+
response = query("openai", "Hello")
|
|
322
|
+
|
|
323
|
+
print(response.text) # The generated text
|
|
324
|
+
print(response.model) # Model used (e.g., "gpt-4o")
|
|
325
|
+
print(response.provider) # Provider name (e.g., "openai")
|
|
326
|
+
print(response.usage) # Token usage dict (if available)
|
|
327
|
+
print(response.raw_response) # Complete API response
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
## Project Structure
|
|
331
|
+
|
|
332
|
+
```
|
|
333
|
+
msgmodel/
|
|
334
|
+
├── msgmodel/ # Python package
|
|
335
|
+
│ ├── __init__.py # Public API exports
|
|
336
|
+
│ ├── __main__.py # CLI entry point
|
|
337
|
+
│ ├── core.py # Core query/stream functions
|
|
338
|
+
│ ├── config.py # Configuration dataclasses
|
|
339
|
+
│ ├── exceptions.py # Custom exceptions
|
|
340
|
+
│ ├── py.typed # PEP 561 marker for typed package
|
|
341
|
+
│ └── providers/ # Provider implementations
|
|
342
|
+
│ ├── __init__.py
|
|
343
|
+
│ ├── openai.py
|
|
344
|
+
│ └── gemini.py
|
|
345
|
+
├── tests/ # Test suite
|
|
346
|
+
│ ├── test_config.py
|
|
347
|
+
│ ├── test_core.py
|
|
348
|
+
│ └── test_exceptions.py
|
|
349
|
+
├── pyproject.toml # Package configuration
|
|
350
|
+
├── LICENSE # MIT License
|
|
351
|
+
├── MANIFEST.in # Distribution manifest
|
|
352
|
+
├── requirements.txt # Dependencies
|
|
353
|
+
└── README.md
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
## CLI Usage
|
|
357
|
+
|
|
358
|
+
After installation, the `msgmodel` command is available:
|
|
359
|
+
|
|
360
|
+
```bash
|
|
361
|
+
# Basic usage
|
|
362
|
+
msgmodel -p openai "What is Python?"
|
|
363
|
+
|
|
364
|
+
# Or using python -m
|
|
365
|
+
python -m msgmodel -p openai "What is Python?"
|
|
366
|
+
|
|
367
|
+
# Provider shortcuts: o=openai, g=gemini
|
|
368
|
+
msgmodel -p g "Hello, Gemini!"
|
|
369
|
+
|
|
370
|
+
# With streaming
|
|
371
|
+
msgmodel -p openai "Tell me a story" --stream
|
|
372
|
+
|
|
373
|
+
# From a file
|
|
374
|
+
msgmodel -p gemini -f prompt.txt
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
## Running Tests
|
|
378
|
+
|
|
379
|
+
```bash
|
|
380
|
+
# Install dev dependencies
|
|
381
|
+
pip install -e ".[dev]"
|
|
382
|
+
|
|
383
|
+
# Run tests
|
|
384
|
+
pytest
|
|
385
|
+
|
|
386
|
+
# Run with coverage
|
|
387
|
+
pytest --cov=msgmodel
|
|
388
|
+
```
|
|
389
|
+
|
|
390
|
+
## Building & Publishing
|
|
391
|
+
|
|
392
|
+
```bash
|
|
393
|
+
# Install build tools
|
|
394
|
+
pip install build twine
|
|
395
|
+
|
|
396
|
+
# Build the package
|
|
397
|
+
python -m build
|
|
398
|
+
|
|
399
|
+
# Check the distribution
|
|
400
|
+
twine check dist/*
|
|
401
|
+
|
|
402
|
+
# Upload to PyPI (requires PyPI account)
|
|
403
|
+
twine upload dist/*
|
|
404
|
+
|
|
405
|
+
# Upload to TestPyPI first (recommended)
|
|
406
|
+
twine upload --repository testpypi dist/*
|
|
407
|
+
```
|
|
408
|
+
|
|
409
|
+
## License
|
|
410
|
+
|
|
411
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
412
|
+
|
|
413
|
+
## Author
|
|
414
|
+
|
|
415
|
+
Leo Dias
|
|
416
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
msgmodel/__init__.py,sha256=HCivZ1dIjhiMtZN9j4hcIlSY6mLY2hbuzrZtlQT_NDI,1663
|
|
2
|
+
msgmodel/__main__.py,sha256=n-A86IWf8A4X3lAWjPBR_bilewNxZJrdZbv7pB5b24Y,5755
|
|
3
|
+
msgmodel/config.py,sha256=Gz4BHLmD3LMv3x8XXSv-kxaNc3X357VkjKO04YkdE_Y,5005
|
|
4
|
+
msgmodel/core.py,sha256=kPlykiCxicmKBKZfy8fccc-6YGfKNRiPlSTCdjsyqOY,17329
|
|
5
|
+
msgmodel/exceptions.py,sha256=DdtQs8UfaL_KBoAwtLN-t6tYsyh10s6fMxsGbJDmr3o,1973
|
|
6
|
+
msgmodel/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
+
msgmodel/security.py,sha256=46_BHRwXN9GUeLpBen9_siBwPiq7kxSuT7WjqvryMIM,5739
|
|
8
|
+
msgmodel/providers/__init__.py,sha256=tH7tCRE97XlVDyUEgjpztMj148v2TTH2cda_galYPvQ,219
|
|
9
|
+
msgmodel/providers/gemini.py,sha256=jK7uAQ4714tkn17aVeReXdaiKKuoLNPHnmxbruCI1Ag,12280
|
|
10
|
+
msgmodel/providers/openai.py,sha256=sEz8CpILV5d0nZE62fkC37mkPZ2H_xcCmeDx4RuXW08,13003
|
|
11
|
+
msgmodel-3.2.1.dist-info/licenses/LICENSE,sha256=yrOPs41SkWzJmldAS0ggL7o6Prx2VcdKp5cYceHznW8,1065
|
|
12
|
+
msgmodel-3.2.1.dist-info/METADATA,sha256=dKgvkIktNDDDTjrmwu5nCGfU1t8DQiMOMjlreNtRshk,13163
|
|
13
|
+
msgmodel-3.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
+
msgmodel-3.2.1.dist-info/entry_points.txt,sha256=TmynPfL2ClRVbo5lQjijdNuNxFSQrGFmn3BGFRteb6I,52
|
|
15
|
+
msgmodel-3.2.1.dist-info/top_level.txt,sha256=IGJ3DqDqBj44i1BReNf1BxtpT86Wx9HGC4aURDsfFS8,9
|
|
16
|
+
msgmodel-3.2.1.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Leo Dias
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
msgmodel
|