fenra 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fenra-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,90 @@
1
+ Metadata-Version: 2.3
2
+ Name: fenra
3
+ Version: 0.1.0
4
+ Summary: Privacy-first AI cost tracking library
5
+ Author: Ajdin Ahmetovic
6
+ Author-email: Ajdin Ahmetovic <ahmetovicajdin@gmail.com>
7
+ Requires-Dist: requests>=2.28.0
8
+ Requires-Dist: fenra[openai,anthropic,gemini] ; extra == 'all'
9
+ Requires-Dist: anthropic>=0.18.0 ; extra == 'anthropic'
10
+ Requires-Dist: pytest>=7.0.0 ; extra == 'dev'
11
+ Requires-Dist: pytest-asyncio>=0.21.0 ; extra == 'dev'
12
+ Requires-Dist: mypy>=1.0.0 ; extra == 'dev'
13
+ Requires-Dist: ruff>=0.1.0 ; extra == 'dev'
14
+ Requires-Dist: google-genai>=1.0.0 ; extra == 'gemini'
15
+ Requires-Dist: openai>=1.0.0 ; extra == 'openai'
16
+ Requires-Python: >=3.11
17
+ Provides-Extra: all
18
+ Provides-Extra: anthropic
19
+ Provides-Extra: dev
20
+ Provides-Extra: gemini
21
+ Provides-Extra: openai
22
+ Description-Content-Type: text/markdown
23
+
24
+ <p align="center">
25
+ <img src="https://fenra.io/fenra-text.svg" alt="Fenra" width="200">
26
+ </p>
27
+
28
+ # Fenra Python SDK
29
+
30
+ [![PyPI](https://img.shields.io/pypi/v/fenra.svg)](https://pypi.python.org/pypi/fenra)
31
+ [![Python](https://img.shields.io/pypi/pyversions/fenra.svg)](https://pypi.python.org/pypi/fenra)
32
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
33
+
34
+ FinOps platform for AI cost visibility
35
+
36
+ ## Usage
37
+
38
+ Install the package:
39
+
40
+ ```bash
41
+ pip install fenra
42
+ ```
43
+
44
+ Initialize and start tracking:
45
+
46
+ ```python
47
+ import fenra
48
+ from openai import OpenAI
49
+
50
+ fenra.init(api_key="your-fenra-api-key")
51
+
52
+ # Use as normal - costs are tracked automatically
53
+ client = OpenAI()
54
+ response = client.chat.completions.create(
55
+ model="gpt-4o",
56
+ messages=[{"role": "user", "content": "Hello!"}]
57
+ )
58
+ ```
59
+
60
+ Works with Anthropic and Gemini the same way.
61
+
62
+ For custom providers or manual tracking:
63
+
64
+ ```python
65
+ fenra.track(
66
+ provider="custom",
67
+ model="my-model",
68
+ input_tokens=100,
69
+ output_tokens=50,
70
+ )
71
+ ```
72
+
73
+ To disable auto-tracking:
74
+
75
+ ```bash
76
+ export FENRA_DISABLE_AUTO_TRACK=1
77
+ ```
78
+
79
+ ## Support
80
+
81
+ - [Documentation](https://docs.fenra.io)
82
+ - [Contact](mailto:hello@fenra.io)
83
+
84
+ ## Privacy
85
+
86
+ Fenra is privacy-first by default. No prompts or responses are stored unless explicitly enabled.
87
+
88
+ ## License
89
+
90
+ The Fenra Python SDK is licensed under the [Apache 2.0](http://apache.org/licenses/LICENSE-2.0.txt) License.
fenra-0.1.0/README.md ADDED
@@ -0,0 +1,67 @@
1
+ <p align="center">
2
+ <img src="https://fenra.io/fenra-text.svg" alt="Fenra" width="200">
3
+ </p>
4
+
5
+ # Fenra Python SDK
6
+
7
+ [![PyPI](https://img.shields.io/pypi/v/fenra.svg)](https://pypi.python.org/pypi/fenra)
8
+ [![Python](https://img.shields.io/pypi/pyversions/fenra.svg)](https://pypi.python.org/pypi/fenra)
9
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
10
+
11
+ FinOps platform for AI cost visibility
12
+
13
+ ## Usage
14
+
15
+ Install the package:
16
+
17
+ ```bash
18
+ pip install fenra
19
+ ```
20
+
21
+ Initialize and start tracking:
22
+
23
+ ```python
24
+ import fenra
25
+ from openai import OpenAI
26
+
27
+ fenra.init(api_key="your-fenra-api-key")
28
+
29
+ # Use as normal - costs are tracked automatically
30
+ client = OpenAI()
31
+ response = client.chat.completions.create(
32
+ model="gpt-4o",
33
+ messages=[{"role": "user", "content": "Hello!"}]
34
+ )
35
+ ```
36
+
37
+ Works with Anthropic and Gemini the same way.
38
+
39
+ For custom providers or manual tracking:
40
+
41
+ ```python
42
+ fenra.track(
43
+ provider="custom",
44
+ model="my-model",
45
+ input_tokens=100,
46
+ output_tokens=50,
47
+ )
48
+ ```
49
+
50
+ To disable auto-tracking:
51
+
52
+ ```bash
53
+ export FENRA_DISABLE_AUTO_TRACK=1
54
+ ```
55
+
56
+ ## Support
57
+
58
+ - [Documentation](https://docs.fenra.io)
59
+ - [Contact](mailto:hello@fenra.io)
60
+
61
+ ## Privacy
62
+
63
+ Fenra is privacy-first by default. No prompts or responses are stored unless explicitly enabled.
64
+
65
+ ## License
66
+
67
+ The Fenra Python SDK is licensed under the [Apache 2.0](http://apache.org/licenses/LICENSE-2.0.txt) License.
@@ -0,0 +1,23 @@
1
+ [project]
2
+ name = "fenra"
3
+ version = "0.1.0"
4
+ description = "Privacy-first AI cost tracking library"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Ajdin Ahmetovic", email = "ahmetovicajdin@gmail.com" }
8
+ ]
9
+ requires-python = ">=3.11"
10
+ dependencies = [
11
+ "requests>=2.28.0",
12
+ ]
13
+
14
+ [project.optional-dependencies]
15
+ openai = ["openai>=1.0.0"]
16
+ anthropic = ["anthropic>=0.18.0"]
17
+ gemini = ["google-genai>=1.0.0"]
18
+ all = ["fenra[openai,anthropic,gemini]"]
19
+ dev = ["pytest>=7.0.0", "pytest-asyncio>=0.21.0", "mypy>=1.0.0", "ruff>=0.1.0"]
20
+
21
+ [build-system]
22
+ requires = ["uv_build>=0.9.18,<0.10.0"]
23
+ build-backend = "uv_build"
@@ -0,0 +1,187 @@
1
+ """Fenra: Privacy-first AI cost tracking library."""
2
+
3
+ import os
4
+ from typing import Any
5
+
6
+ from fenra._context import clear_context, get_context, set_context
7
+ from fenra._core import enqueue_transaction, flush, init, shutdown
8
+
9
+ __version__ = "0.1.0"
10
+
11
+ # Check environment variable for disabling auto-tracking
12
+ _auto_track_disabled = os.environ.get("FENRA_DISABLE_AUTO_TRACK", "").lower() in (
13
+ "1",
14
+ "true",
15
+ "yes",
16
+ )
17
+
18
+
19
+ def disable_auto_track() -> None:
20
+ """
21
+ Disable auto-tracking/patching of AI provider SDKs.
22
+
23
+ This function must be called BEFORE importing fenra if you want to prevent
24
+ automatic patching. Alternatively, set the environment variable
25
+ FENRA_DISABLE_AUTO_TRACK=1 before importing fenra.
26
+
27
+ When auto-tracking is disabled, you can still use fenra.track() for manual
28
+ tracking.
29
+ """
30
+ global _auto_track_disabled
31
+ _auto_track_disabled = True
32
+
33
+
34
+ def track(
35
+ provider: str,
36
+ model: str,
37
+ input_tokens: int,
38
+ output_tokens: int,
39
+ *,
40
+ total_tokens: int | None = None,
41
+ reasoning_tokens: int | None = None,
42
+ cached_tokens: int | None = None,
43
+ **context_overrides: Any,
44
+ ) -> None:
45
+ """
46
+ Manually track an AI call.
47
+
48
+ Merges context_overrides with the current context from set_context().
49
+
50
+ Args:
51
+ provider: Provider name (e.g., "openai", "anthropic", "custom")
52
+ model: Model identifier (e.g., "gpt-4o", "claude-3-opus")
53
+ input_tokens: Number of input/prompt tokens
54
+ output_tokens: Number of output/completion tokens
55
+ total_tokens: Total tokens (calculated if not provided)
56
+ reasoning_tokens: Reasoning tokens (for o1/o3 models)
57
+ cached_tokens: Cached tokens (if applicable)
58
+ **context_overrides: Additional context to merge with current context
59
+ """
60
+ from fenra._context import get_context
61
+
62
+ # Merge context_overrides with current context
63
+ context = get_context()
64
+ context.update(context_overrides)
65
+
66
+ # Calculate total_tokens if not provided
67
+ if total_tokens is None:
68
+ total_tokens = input_tokens + output_tokens
69
+
70
+ # Build metrics
71
+ metrics: dict[str, Any] = {
72
+ "input_tokens": input_tokens,
73
+ "output_tokens": output_tokens,
74
+ "total_tokens": total_tokens,
75
+ }
76
+
77
+ if reasoning_tokens is not None:
78
+ metrics["reasoning_tokens"] = reasoning_tokens
79
+
80
+ if cached_tokens is not None:
81
+ metrics["cached_tokens"] = cached_tokens
82
+
83
+ # Build transaction
84
+ transaction = {
85
+ "provider": provider,
86
+ "model": model,
87
+ "usage": [
88
+ {
89
+ "type": "tokens",
90
+ "metrics": metrics,
91
+ }
92
+ ],
93
+ "context": context,
94
+ }
95
+
96
+ enqueue_transaction(transaction)
97
+
98
+
99
+ # Auto-patch OpenAI when imported (if OpenAI is installed)
100
+ def _auto_patch_openai() -> None:
101
+ """Automatically patch OpenAI if it's installed."""
102
+ if _auto_track_disabled:
103
+ return
104
+ try:
105
+ from fenra.integrations.openai import (
106
+ patch_openai,
107
+ patch_openai_async,
108
+ patch_openai_beta_parse,
109
+ patch_openai_beta_parse_async,
110
+ patch_openai_images,
111
+ patch_openai_images_async,
112
+ patch_openai_responses,
113
+ patch_openai_responses_async,
114
+ )
115
+
116
+ patch_openai()
117
+ patch_openai_async()
118
+ patch_openai_responses()
119
+ patch_openai_responses_async()
120
+ patch_openai_images()
121
+ patch_openai_images_async()
122
+ patch_openai_beta_parse()
123
+ patch_openai_beta_parse_async()
124
+ except ImportError:
125
+ # OpenAI not installed, that's fine
126
+ pass
127
+
128
+
129
+ # Auto-patch Gemini when imported (if Gemini is installed)
130
+ def _auto_patch_gemini() -> None:
131
+ """Automatically patch Gemini if it's installed."""
132
+ if _auto_track_disabled:
133
+ return
134
+ try:
135
+ from fenra.integrations.gemini import (
136
+ patch_gemini,
137
+ patch_gemini_async,
138
+ patch_gemini_stream,
139
+ patch_gemini_stream_async,
140
+ )
141
+
142
+ patch_gemini()
143
+ patch_gemini_async()
144
+ patch_gemini_stream()
145
+ patch_gemini_stream_async()
146
+ except ImportError:
147
+ # Gemini not installed, that's fine
148
+ pass
149
+
150
+
151
+ # Auto-patch Anthropic when imported (if Anthropic is installed)
152
+ def _auto_patch_anthropic() -> None:
153
+ """Automatically patch Anthropic if it's installed."""
154
+ if _auto_track_disabled:
155
+ return
156
+ try:
157
+ from fenra.integrations.anthropic import (
158
+ patch_anthropic,
159
+ patch_anthropic_async,
160
+ patch_anthropic_stream,
161
+ patch_anthropic_stream_async,
162
+ )
163
+
164
+ patch_anthropic()
165
+ patch_anthropic_async()
166
+ patch_anthropic_stream()
167
+ patch_anthropic_stream_async()
168
+ except ImportError:
169
+ # Anthropic not installed, that's fine
170
+ pass
171
+
172
+
173
+ # Auto-patch on import
174
+ _auto_patch_openai()
175
+ _auto_patch_gemini()
176
+ _auto_patch_anthropic()
177
+
178
+ __all__ = [
179
+ "init",
180
+ "shutdown",
181
+ "flush",
182
+ "track",
183
+ "set_context", # Primary context API - merges into existing
184
+ "get_context",
185
+ "clear_context",
186
+ "disable_auto_track",
187
+ ]
@@ -0,0 +1,42 @@
1
+ """Context management using contextvars for async-safe context propagation."""
2
+
3
+ from contextvars import ContextVar
4
+ from typing import Any
5
+
6
+ _fenra_context: ContextVar[dict[str, Any]] = ContextVar("fenra_context", default={})
7
+
8
+
9
+ def set_context(**kwargs: Any) -> None:
10
+ """
11
+ Merge new metadata into the current context.
12
+
13
+ This function is additive. Each call merges new keys into the existing
14
+ context dictionary. Later calls with the same key will overwrite.
15
+
16
+ Example middleware stacking:
17
+ # Auth middleware
18
+ fenra.set_context(billable_customer_id="acme-corp", user_id="u_123")
19
+
20
+ # Feature middleware (adds to existing context)
21
+ fenra.set_context(feature="chat-assistant", environment="production")
22
+
23
+ # Final context: {billable_customer_id, user_id, feature, environment}
24
+ """
25
+ current = _fenra_context.get().copy()
26
+ current.update(kwargs)
27
+ _fenra_context.set(current)
28
+
29
+
30
+ def get_context() -> dict[str, Any]:
31
+ """
32
+ Get the full merged context dictionary.
33
+
34
+ Returns a copy to prevent external mutation.
35
+ Called by auto-instrumentation to include in logging payload.
36
+ """
37
+ return _fenra_context.get().copy()
38
+
39
+
40
+ def clear_context() -> None:
41
+ """Reset context to empty dict. Useful at request boundaries."""
42
+ _fenra_context.set({})
@@ -0,0 +1,229 @@
1
+ """Core Fenra SDK functionality: background worker, queue, and configuration."""
2
+
3
+ import logging
4
+ import queue
5
+ import threading
6
+ import time
7
+ from dataclasses import dataclass, field
8
+ from typing import Any
9
+
10
+ import requests
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ @dataclass
16
+ class FenraConfig:
17
+ """Configuration for Fenra SDK."""
18
+
19
+ api_key: str
20
+ base_url: str = "https://ingest.fenra.io"
21
+ content_logging: bool = False # Privacy-first default
22
+ enabled: bool = True
23
+ flush_interval: float = 1.0
24
+ batch_size: int = 10
25
+ queue_size: int = 10000
26
+
27
+ def __post_init__(self) -> None:
28
+ """Validate configuration."""
29
+ if not self.api_key:
30
+ raise ValueError("api_key is required")
31
+
32
+
33
+ # Global configuration singleton
34
+ _config: FenraConfig | None = None
35
+ _transaction_queue: queue.Queue[dict[str, Any]] | None = None
36
+ _worker_thread: threading.Thread | None = None
37
+ _worker_stop_event: threading.Event | None = None
38
+
39
+
40
+ def _get_config() -> FenraConfig:
41
+ """Get the global configuration, raising if not initialized."""
42
+ if _config is None:
43
+ raise RuntimeError("Fenra not initialized. Call fenra.init() first.")
44
+ return _config
45
+
46
+
47
+ def _get_queue() -> queue.Queue[dict[str, Any]]:
48
+ """Get the global transaction queue, raising if not initialized."""
49
+ if _transaction_queue is None:
50
+ raise RuntimeError("Fenra not initialized. Call fenra.init() first.")
51
+ return _transaction_queue
52
+
53
+
54
+ def _background_worker() -> None:
55
+ """Background worker thread that processes transactions from the queue."""
56
+ batch: list[dict[str, Any]] = []
57
+ last_flush = time.time()
58
+
59
+ while True:
60
+ try:
61
+ # Check if we should stop
62
+ if _worker_stop_event and _worker_stop_event.is_set():
63
+ # Flush any remaining items before stopping
64
+ if batch:
65
+ _flush_batch(batch)
66
+ break
67
+
68
+ config = _get_config()
69
+ if not config.enabled:
70
+ time.sleep(0.1)
71
+ continue
72
+
73
+ # Try to get an item from the queue with timeout
74
+ try:
75
+ transaction = _get_queue().get(timeout=config.flush_interval)
76
+ batch.append(transaction)
77
+ except queue.Empty:
78
+ # Timeout - flush if we have items and enough time has passed
79
+ if batch and (time.time() - last_flush) >= config.flush_interval:
80
+ _flush_batch(batch)
81
+ batch = []
82
+ last_flush = time.time()
83
+ continue
84
+
85
+ # Check if we should flush due to batch size
86
+ if len(batch) >= config.batch_size:
87
+ _flush_batch(batch)
88
+ batch = []
89
+ last_flush = time.time()
90
+
91
+ except Exception as e:
92
+ # Never crash the host application
93
+ logger.error(f"Error in Fenra background worker: {e}", exc_info=True)
94
+ time.sleep(0.1)
95
+
96
+
97
+ def _flush_batch(batch: list[dict[str, Any]]) -> None:
98
+ """Flush a batch of transactions to the Fenra API."""
99
+ if not batch:
100
+ return
101
+
102
+ try:
103
+ config = _get_config()
104
+
105
+ # Prepare payload - single transaction or bulk
106
+ if len(batch) == 1:
107
+ payload = batch[0]
108
+ else:
109
+ payload = {"transactions": batch}
110
+
111
+ # Send to API
112
+ response = requests.post(
113
+ f"{config.base_url}/usage/transactions",
114
+ json=payload,
115
+ headers={"X-Api-Key": config.api_key},
116
+ timeout=10.0,
117
+ )
118
+
119
+ # Log errors but don't raise
120
+ if response.status_code >= 400:
121
+ logger.warning(
122
+ f"Fenra API error: {response.status_code} - {response.text}"
123
+ )
124
+ else:
125
+ logger.debug(f"Fenra: Successfully sent {len(batch)} transaction(s)")
126
+
127
+ except Exception as e:
128
+ # Never crash the host application
129
+ logger.error(f"Error sending transactions to Fenra API: {e}", exc_info=True)
130
+
131
+
132
+ def init(
133
+ api_key: str,
134
+ *,
135
+ base_url: str = "https://ingest.fenra.io",
136
+ content_logging: bool = False,
137
+ enabled: bool = True,
138
+ flush_interval: float = 1.0,
139
+ batch_size: int = 10,
140
+ queue_size: int = 10000,
141
+ ) -> None:
142
+ """
143
+ Initialize Fenra SDK.
144
+
145
+ Args:
146
+ api_key: Your Fenra API key
147
+ base_url: Base URL for Fenra API (default: https://ingest.fenra.io)
148
+ content_logging: Whether to log prompt content (default: False, privacy-first)
149
+ enabled: Whether tracking is enabled (default: True)
150
+ flush_interval: Seconds between automatic flushes (default: 1.0)
151
+ batch_size: Maximum transactions per batch (default: 10)
152
+ queue_size: Maximum queue size before dropping transactions (default: 10000)
153
+ """
154
+ global _config, _transaction_queue, _worker_thread, _worker_stop_event
155
+
156
+ # Stop existing worker if any
157
+ shutdown()
158
+
159
+ # Create configuration
160
+ _config = FenraConfig(
161
+ api_key=api_key,
162
+ base_url=base_url,
163
+ content_logging=content_logging,
164
+ enabled=enabled,
165
+ flush_interval=flush_interval,
166
+ batch_size=batch_size,
167
+ queue_size=queue_size,
168
+ )
169
+
170
+ _transaction_queue = queue.Queue(maxsize=_config.queue_size)
171
+
172
+ # Create and start worker thread
173
+ _worker_stop_event = threading.Event()
174
+ _worker_thread = threading.Thread(target=_background_worker, daemon=True)
175
+ _worker_thread.start()
176
+
177
+ logger.info("Fenra SDK initialized")
178
+
179
+
180
+ def shutdown() -> None:
181
+ """Shutdown Fenra SDK and flush remaining transactions."""
182
+ global _worker_thread, _worker_stop_event
183
+
184
+ if _worker_thread is not None and _worker_thread.is_alive():
185
+ if _worker_stop_event:
186
+ _worker_stop_event.set()
187
+ _worker_thread.join(timeout=5.0)
188
+ _worker_thread = None
189
+ _worker_stop_event = None
190
+
191
+ logger.info("Fenra SDK shut down")
192
+
193
+
194
+ def flush() -> None:
195
+ """Manually flush all pending transactions."""
196
+ if _transaction_queue is None:
197
+ return
198
+
199
+ batch: list[dict[str, Any]] = []
200
+ # Drain the queue
201
+ while True:
202
+ try:
203
+ transaction = _transaction_queue.get_nowait()
204
+ batch.append(transaction)
205
+ except queue.Empty:
206
+ break
207
+
208
+ if batch:
209
+ _flush_batch(batch)
210
+
211
+
212
+ def enqueue_transaction(transaction: dict[str, Any]) -> None:
213
+ """
214
+ Enqueue a transaction for background processing.
215
+
216
+ This function is safe to call from any thread. Failures are logged
217
+ but never raise exceptions.
218
+ """
219
+ try:
220
+ config = _get_config()
221
+ if not config.enabled:
222
+ return
223
+
224
+ _get_queue().put_nowait(transaction)
225
+ except queue.Full:
226
+ logger.warning("Fenra transaction queue is full, dropping transaction")
227
+ except Exception as e:
228
+ # Never crash the host application
229
+ logger.error(f"Error enqueueing Fenra transaction: {e}", exc_info=True)
@@ -0,0 +1 @@
1
+ """Fenra integrations for various AI providers."""