axonflow 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- axonflow/__init__.py +140 -0
- axonflow/client.py +1612 -0
- axonflow/exceptions.py +103 -0
- axonflow/interceptors/__init__.py +20 -0
- axonflow/interceptors/anthropic.py +184 -0
- axonflow/interceptors/base.py +58 -0
- axonflow/interceptors/bedrock.py +231 -0
- axonflow/interceptors/gemini.py +281 -0
- axonflow/interceptors/ollama.py +253 -0
- axonflow/interceptors/openai.py +160 -0
- axonflow/policies.py +289 -0
- axonflow/py.typed +0 -0
- axonflow/types.py +214 -0
- axonflow/utils/__init__.py +12 -0
- axonflow/utils/cache.py +102 -0
- axonflow/utils/logging.py +89 -0
- axonflow/utils/retry.py +111 -0
- axonflow-0.4.0.dist-info/METADATA +316 -0
- axonflow-0.4.0.dist-info/RECORD +22 -0
- axonflow-0.4.0.dist-info/WHEEL +5 -0
- axonflow-0.4.0.dist-info/licenses/LICENSE +21 -0
- axonflow-0.4.0.dist-info/top_level.txt +1 -0
axonflow/utils/retry.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""Retry utilities for AxonFlow SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Callable, TypeVar
|
|
6
|
+
|
|
7
|
+
from tenacity import (
|
|
8
|
+
RetryCallState,
|
|
9
|
+
retry,
|
|
10
|
+
retry_if_exception_type,
|
|
11
|
+
stop_after_attempt,
|
|
12
|
+
wait_exponential,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from axonflow.types import RetryConfig
|
|
16
|
+
|
|
17
|
+
T = TypeVar("T")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class RetryHandler:
|
|
21
|
+
"""Handles retry logic with exponential backoff."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, config: RetryConfig) -> None:
|
|
24
|
+
"""Initialize retry handler.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
config: Retry configuration
|
|
28
|
+
"""
|
|
29
|
+
self.config = config
|
|
30
|
+
|
|
31
|
+
def create_decorator(
|
|
32
|
+
self,
|
|
33
|
+
retry_on: tuple[type[Exception], ...],
|
|
34
|
+
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
35
|
+
"""Create retry decorator based on config.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
retry_on: Exception types to retry on
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Decorator function
|
|
42
|
+
"""
|
|
43
|
+
if not self.config.enabled:
|
|
44
|
+
return lambda f: f
|
|
45
|
+
|
|
46
|
+
return retry(
|
|
47
|
+
stop=stop_after_attempt(self.config.max_attempts),
|
|
48
|
+
wait=wait_exponential(
|
|
49
|
+
multiplier=self.config.initial_delay,
|
|
50
|
+
max=self.config.max_delay,
|
|
51
|
+
exp_base=self.config.exponential_base,
|
|
52
|
+
),
|
|
53
|
+
retry=retry_if_exception_type(retry_on),
|
|
54
|
+
reraise=True,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def log_retry(retry_state: RetryCallState) -> None:
|
|
59
|
+
"""Log retry attempt.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
retry_state: Current retry state
|
|
63
|
+
"""
|
|
64
|
+
if retry_state.outcome and retry_state.outcome.failed:
|
|
65
|
+
exception = retry_state.outcome.exception()
|
|
66
|
+
attempt = retry_state.attempt_number
|
|
67
|
+
print(f"Retry attempt {attempt} failed: {exception}")
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def with_retry(
|
|
71
|
+
max_attempts: int = 3,
|
|
72
|
+
initial_delay: float = 1.0,
|
|
73
|
+
max_delay: float = 30.0,
|
|
74
|
+
exponential_base: float = 2.0,
|
|
75
|
+
retry_on: tuple[type[Exception], ...] = (Exception,),
|
|
76
|
+
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
77
|
+
"""Decorator for adding retry logic to a function.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
max_attempts: Maximum retry attempts
|
|
81
|
+
initial_delay: Initial delay between retries
|
|
82
|
+
max_delay: Maximum delay between retries
|
|
83
|
+
exponential_base: Exponential backoff base
|
|
84
|
+
retry_on: Exception types to retry on
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Decorator function
|
|
88
|
+
"""
|
|
89
|
+
config = RetryConfig(
|
|
90
|
+
enabled=True,
|
|
91
|
+
max_attempts=max_attempts,
|
|
92
|
+
initial_delay=initial_delay,
|
|
93
|
+
max_delay=max_delay,
|
|
94
|
+
exponential_base=exponential_base,
|
|
95
|
+
)
|
|
96
|
+
handler = RetryHandler(config)
|
|
97
|
+
return handler.create_decorator(retry_on)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def create_retry_decorator(config: RetryConfig, retry_on: tuple[type[Exception], ...]) -> Any:
|
|
101
|
+
"""Create a retry decorator from config.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
config: Retry configuration
|
|
105
|
+
retry_on: Exception types to retry on
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Retry decorator
|
|
109
|
+
"""
|
|
110
|
+
handler = RetryHandler(config)
|
|
111
|
+
return handler.create_decorator(retry_on)
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: axonflow
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: AxonFlow Python SDK - Enterprise AI Governance in 3 Lines of Code
|
|
5
|
+
Author-email: AxonFlow <dev@getaxonflow.com>
|
|
6
|
+
Maintainer-email: AxonFlow <dev@getaxonflow.com>
|
|
7
|
+
License: MIT
|
|
8
|
+
Project-URL: Homepage, https://getaxonflow.com
|
|
9
|
+
Project-URL: Documentation, https://docs.getaxonflow.com/sdk/python
|
|
10
|
+
Project-URL: Repository, https://github.com/getaxonflow/axonflow-sdk-python
|
|
11
|
+
Project-URL: Changelog, https://github.com/getaxonflow/axonflow-sdk-python/blob/main/CHANGELOG.md
|
|
12
|
+
Project-URL: Issues, https://github.com/getaxonflow/axonflow-sdk-python/issues
|
|
13
|
+
Keywords: ai,governance,llm,openai,anthropic,bedrock,policy,compliance,enterprise,mcp,multi-agent
|
|
14
|
+
Classifier: Development Status :: 4 - Beta
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Programming Language :: Python :: 3
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
24
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
25
|
+
Classifier: Typing :: Typed
|
|
26
|
+
Requires-Python: >=3.9
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
License-File: LICENSE
|
|
29
|
+
Requires-Dist: httpx>=0.25.0
|
|
30
|
+
Requires-Dist: pydantic>=2.0.0
|
|
31
|
+
Requires-Dist: tenacity>=8.0.0
|
|
32
|
+
Requires-Dist: structlog>=23.0.0
|
|
33
|
+
Requires-Dist: cachetools>=5.0.0
|
|
34
|
+
Requires-Dist: eval_type_backport>=0.2.0; python_version < "3.10"
|
|
35
|
+
Provides-Extra: dev
|
|
36
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
37
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
38
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
39
|
+
Requires-Dist: pytest-httpx>=0.22.0; extra == "dev"
|
|
40
|
+
Requires-Dist: mypy>=1.5.0; extra == "dev"
|
|
41
|
+
Requires-Dist: types-cachetools>=5.0.0; extra == "dev"
|
|
42
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
43
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
44
|
+
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
45
|
+
Requires-Dist: pre-commit>=3.0.0; extra == "dev"
|
|
46
|
+
Provides-Extra: docs
|
|
47
|
+
Requires-Dist: sphinx>=7.0.0; extra == "docs"
|
|
48
|
+
Requires-Dist: sphinx-rtd-theme>=1.3.0; extra == "docs"
|
|
49
|
+
Requires-Dist: sphinx-autodoc-typehints>=1.24.0; extra == "docs"
|
|
50
|
+
Requires-Dist: myst-parser>=2.0.0; extra == "docs"
|
|
51
|
+
Provides-Extra: openai
|
|
52
|
+
Requires-Dist: openai>=1.0.0; extra == "openai"
|
|
53
|
+
Provides-Extra: anthropic
|
|
54
|
+
Requires-Dist: anthropic>=0.18.0; extra == "anthropic"
|
|
55
|
+
Provides-Extra: all
|
|
56
|
+
Requires-Dist: openai>=1.0.0; extra == "all"
|
|
57
|
+
Requires-Dist: anthropic>=0.18.0; extra == "all"
|
|
58
|
+
Dynamic: license-file
|
|
59
|
+
|
|
60
|
+
# AxonFlow Python SDK
|
|
61
|
+
|
|
62
|
+
Enterprise AI Governance in 3 Lines of Code.
|
|
63
|
+
|
|
64
|
+
[](https://badge.fury.io/py/axonflow)
|
|
65
|
+
[](https://www.python.org/downloads/)
|
|
66
|
+
[](https://opensource.org/licenses/MIT)
|
|
67
|
+
[](http://mypy-lang.org/)
|
|
68
|
+
|
|
69
|
+
## Installation
|
|
70
|
+
|
|
71
|
+
```bash
|
|
72
|
+
pip install axonflow
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
With LLM provider support:
|
|
76
|
+
```bash
|
|
77
|
+
pip install axonflow[openai] # OpenAI integration
|
|
78
|
+
pip install axonflow[anthropic] # Anthropic integration
|
|
79
|
+
pip install axonflow[all] # All integrations
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## Quick Start
|
|
83
|
+
|
|
84
|
+
### Async Usage (Recommended)
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
import asyncio
|
|
88
|
+
from axonflow import AxonFlow
|
|
89
|
+
|
|
90
|
+
async def main():
|
|
91
|
+
async with AxonFlow(
|
|
92
|
+
agent_url="https://your-agent.axonflow.com",
|
|
93
|
+
client_id="your-client-id",
|
|
94
|
+
client_secret="your-client-secret"
|
|
95
|
+
) as client:
|
|
96
|
+
# Execute a governed query
|
|
97
|
+
response = await client.execute_query(
|
|
98
|
+
user_token="user-jwt-token",
|
|
99
|
+
query="What is AI governance?",
|
|
100
|
+
request_type="chat"
|
|
101
|
+
)
|
|
102
|
+
print(response.data)
|
|
103
|
+
|
|
104
|
+
asyncio.run(main())
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
### Sync Usage
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
from axonflow import AxonFlow
|
|
111
|
+
|
|
112
|
+
with AxonFlow.sync(
|
|
113
|
+
agent_url="https://your-agent.axonflow.com",
|
|
114
|
+
client_id="your-client-id",
|
|
115
|
+
client_secret="your-client-secret"
|
|
116
|
+
) as client:
|
|
117
|
+
response = client.execute_query(
|
|
118
|
+
user_token="user-jwt-token",
|
|
119
|
+
query="What is AI governance?",
|
|
120
|
+
request_type="chat"
|
|
121
|
+
)
|
|
122
|
+
print(response.data)
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
## Features
|
|
126
|
+
|
|
127
|
+
### Gateway Mode
|
|
128
|
+
|
|
129
|
+
For lowest-latency LLM calls with full governance and audit compliance:
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
from axonflow import AxonFlow, TokenUsage
|
|
133
|
+
|
|
134
|
+
async with AxonFlow(...) as client:
|
|
135
|
+
# 1. Pre-check: Get policy approval
|
|
136
|
+
ctx = await client.get_policy_approved_context(
|
|
137
|
+
user_token="user-jwt",
|
|
138
|
+
query="Find patient records",
|
|
139
|
+
data_sources=["postgres"]
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
if not ctx.approved:
|
|
143
|
+
raise Exception(f"Blocked: {ctx.block_reason}")
|
|
144
|
+
|
|
145
|
+
# 2. Make LLM call directly (your code)
|
|
146
|
+
llm_response = await openai.chat.completions.create(
|
|
147
|
+
model="gpt-4",
|
|
148
|
+
messages=[{"role": "user", "content": str(ctx.approved_data)}]
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# 3. Audit the call
|
|
152
|
+
await client.audit_llm_call(
|
|
153
|
+
context_id=ctx.context_id,
|
|
154
|
+
response_summary=llm_response.choices[0].message.content[:100],
|
|
155
|
+
provider="openai",
|
|
156
|
+
model="gpt-4",
|
|
157
|
+
token_usage=TokenUsage(
|
|
158
|
+
prompt_tokens=llm_response.usage.prompt_tokens,
|
|
159
|
+
completion_tokens=llm_response.usage.completion_tokens,
|
|
160
|
+
total_tokens=llm_response.usage.total_tokens
|
|
161
|
+
),
|
|
162
|
+
latency_ms=250
|
|
163
|
+
)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### OpenAI Integration
|
|
167
|
+
|
|
168
|
+
Transparent governance for existing OpenAI code:
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
from openai import OpenAI
|
|
172
|
+
from axonflow import AxonFlow
|
|
173
|
+
from axonflow.interceptors.openai import wrap_openai_client
|
|
174
|
+
|
|
175
|
+
openai = OpenAI()
|
|
176
|
+
axonflow = AxonFlow(...)
|
|
177
|
+
|
|
178
|
+
# Wrap client - governance is now automatic
|
|
179
|
+
wrapped = wrap_openai_client(openai, axonflow, user_token="user-123")
|
|
180
|
+
|
|
181
|
+
# Use as normal
|
|
182
|
+
response = wrapped.chat.completions.create(
|
|
183
|
+
model="gpt-4",
|
|
184
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
185
|
+
)
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
### MCP Connectors
|
|
189
|
+
|
|
190
|
+
Query data through MCP connectors:
|
|
191
|
+
|
|
192
|
+
```python
|
|
193
|
+
# List available connectors
|
|
194
|
+
connectors = await client.list_connectors()
|
|
195
|
+
|
|
196
|
+
# Query a connector
|
|
197
|
+
result = await client.query_connector(
|
|
198
|
+
user_token="user-jwt",
|
|
199
|
+
connector_name="postgres",
|
|
200
|
+
operation="query",
|
|
201
|
+
params={"sql": "SELECT * FROM users LIMIT 10"}
|
|
202
|
+
)
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
### Multi-Agent Planning
|
|
206
|
+
|
|
207
|
+
Generate and execute multi-agent plans:
|
|
208
|
+
|
|
209
|
+
```python
|
|
210
|
+
# Generate a plan
|
|
211
|
+
plan = await client.generate_plan(
|
|
212
|
+
query="Book a flight and hotel for my trip to Paris",
|
|
213
|
+
domain="travel"
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
print(f"Plan has {len(plan.steps)} steps")
|
|
217
|
+
|
|
218
|
+
# Execute the plan
|
|
219
|
+
result = await client.execute_plan(plan.plan_id)
|
|
220
|
+
print(f"Result: {result.result}")
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
## Configuration
|
|
224
|
+
|
|
225
|
+
```python
|
|
226
|
+
from axonflow import AxonFlow, Mode, RetryConfig
|
|
227
|
+
|
|
228
|
+
client = AxonFlow(
|
|
229
|
+
agent_url="https://your-agent.axonflow.com",
|
|
230
|
+
client_id="your-client-id",
|
|
231
|
+
client_secret="your-client-secret",
|
|
232
|
+
license_key="optional-license-key", # For enterprise features
|
|
233
|
+
mode=Mode.PRODUCTION, # or Mode.SANDBOX
|
|
234
|
+
debug=True, # Enable debug logging
|
|
235
|
+
timeout=60.0, # Request timeout in seconds
|
|
236
|
+
retry_config=RetryConfig( # Retry configuration
|
|
237
|
+
enabled=True,
|
|
238
|
+
max_attempts=3,
|
|
239
|
+
initial_delay=1.0,
|
|
240
|
+
max_delay=30.0,
|
|
241
|
+
),
|
|
242
|
+
cache_enabled=True, # Enable response caching
|
|
243
|
+
cache_ttl=60.0, # Cache TTL in seconds
|
|
244
|
+
)
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
## Error Handling
|
|
248
|
+
|
|
249
|
+
```python
|
|
250
|
+
from axonflow.exceptions import (
|
|
251
|
+
AxonFlowError,
|
|
252
|
+
PolicyViolationError,
|
|
253
|
+
AuthenticationError,
|
|
254
|
+
RateLimitError,
|
|
255
|
+
TimeoutError,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
try:
|
|
259
|
+
response = await client.execute_query(...)
|
|
260
|
+
except PolicyViolationError as e:
|
|
261
|
+
print(f"Blocked by policy: {e.block_reason}")
|
|
262
|
+
except RateLimitError as e:
|
|
263
|
+
print(f"Rate limited: {e.limit}/{e.remaining}, resets at {e.reset_at}")
|
|
264
|
+
except AuthenticationError:
|
|
265
|
+
print("Invalid credentials")
|
|
266
|
+
except TimeoutError:
|
|
267
|
+
print("Request timed out")
|
|
268
|
+
except AxonFlowError as e:
|
|
269
|
+
print(f"AxonFlow error: {e.message}")
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
## Response Types
|
|
273
|
+
|
|
274
|
+
All responses are Pydantic models with full type hints:
|
|
275
|
+
|
|
276
|
+
```python
|
|
277
|
+
from axonflow import (
|
|
278
|
+
ClientResponse,
|
|
279
|
+
PolicyApprovalResult,
|
|
280
|
+
PlanResponse,
|
|
281
|
+
ConnectorResponse,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
# Full autocomplete and type checking support
|
|
285
|
+
response: ClientResponse = await client.execute_query(...)
|
|
286
|
+
print(response.success)
|
|
287
|
+
print(response.data)
|
|
288
|
+
print(response.policy_info.policies_evaluated)
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
## Development
|
|
292
|
+
|
|
293
|
+
```bash
|
|
294
|
+
# Install dev dependencies
|
|
295
|
+
pip install -e ".[dev]"
|
|
296
|
+
|
|
297
|
+
# Run tests
|
|
298
|
+
pytest
|
|
299
|
+
|
|
300
|
+
# Run linting
|
|
301
|
+
ruff check .
|
|
302
|
+
ruff format .
|
|
303
|
+
|
|
304
|
+
# Run type checking
|
|
305
|
+
mypy axonflow
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
## Documentation
|
|
309
|
+
|
|
310
|
+
- [API Reference](https://docs.getaxonflow.com/sdk/python/api)
|
|
311
|
+
- [Gateway Mode Guide](https://docs.getaxonflow.com/sdk/python/gateway-mode)
|
|
312
|
+
- [Examples](https://github.com/getaxonflow/axonflow/tree/main/sdk/python/examples)
|
|
313
|
+
|
|
314
|
+
## License
|
|
315
|
+
|
|
316
|
+
Apache 2.0 - See [LICENSE](LICENSE) for details.
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
axonflow/__init__.py,sha256=nBRiCru0KqddQ24e9kwXGDiSRvPfkZZc8zoCsQvzOsw,3523
|
|
2
|
+
axonflow/client.py,sha256=qL91qxJOQcux6cESqmjTf7oWw-cWPQQoKJMxSF8QLag,54078
|
|
3
|
+
axonflow/exceptions.py,sha256=TYxBlqORwQTVJuaS5yQlghUO13w0KoYo6W5HvpypRUI,2415
|
|
4
|
+
axonflow/policies.py,sha256=f_Hc-qV5cKtxwDVH_rO7GRskebkLCBLexgIj4yUW3Ok,8417
|
|
5
|
+
axonflow/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
axonflow/types.py,sha256=nDhE9CwClSgE3oC5lNfDS8707G7PZuwHDhECtGFtcRM,6946
|
|
7
|
+
axonflow/interceptors/__init__.py,sha256=Yynmu2HMItrZzbN8AnRNgCEQDr9IfUYCdRc-BZUkCLk,660
|
|
8
|
+
axonflow/interceptors/anthropic.py,sha256=ZJW-Tbl2mIzWKckvDoHrDbV0Q0_x2H1sAhgiHjKRzAk,6086
|
|
9
|
+
axonflow/interceptors/base.py,sha256=aNfN6WESl2c19tkIJ6qTe-uhNoFRdMAxg9YgJb7To1c,1469
|
|
10
|
+
axonflow/interceptors/bedrock.py,sha256=FIhlQjoo9nxjCu3Ke9V8I0ZzHcOXpz1aVOWpL73JX1U,7691
|
|
11
|
+
axonflow/interceptors/gemini.py,sha256=eFQv3dmg0oM1eNAIg1ywubpbdYx_Cfk9Q6X0XWfrZKI,9131
|
|
12
|
+
axonflow/interceptors/ollama.py,sha256=hOBaR5LTSBrKOXFjoA3wPQwL15B8WD01a_pcPIF7Txg,7824
|
|
13
|
+
axonflow/interceptors/openai.py,sha256=_brrpE2PBoZzQxq_zkz2gbcfbx4saeIbxB8gcaswa2A,5057
|
|
14
|
+
axonflow/utils/__init__.py,sha256=_a6AC8YB9zkCsxVnS3jekrBnK9Vr81mqpkeW07j06s8,286
|
|
15
|
+
axonflow/utils/cache.py,sha256=OI7jXj3wL6vhA9WtZiJyTP4wumMUke1p6nFR9PCayBk,2381
|
|
16
|
+
axonflow/utils/logging.py,sha256=dfBz1rrm3LAAGN6f6VUSYp--bEPk7Wtum2gZhfdW0Jc,2509
|
|
17
|
+
axonflow/utils/retry.py,sha256=JVIFKWzao2HOrjfJ7ZPZl7ztUMzhl1LJyX8hX7Rm4zM,3003
|
|
18
|
+
axonflow-0.4.0.dist-info/licenses/LICENSE,sha256=kpfaU6d8Kw06TmnV5V2B8kTl5X73i9r0bgR86SWkFzU,1068
|
|
19
|
+
axonflow-0.4.0.dist-info/METADATA,sha256=YUIKPfX_-1o13LLRy7sH67-Jp96MyrYcjlK36u4HeWI,8977
|
|
20
|
+
axonflow-0.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
21
|
+
axonflow-0.4.0.dist-info/top_level.txt,sha256=PTnp0kT26lEgaU4UsWCJG_sPgxHh4mrw8ZTb6jH4OV4,9
|
|
22
|
+
axonflow-0.4.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 getaxonflow
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
axonflow
|