acl-layer-py 1.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,24 @@
1
+ # Environment files
2
+ .env
3
+ .env.development
4
+ .env.production
5
+
6
+ # Python
7
+ __pycache__/
8
+ *.pyc
9
+ *.pyo
10
+ *.pyd
11
+ venv/
12
+ .venv/
13
+
14
+ # Logs & telemetry
15
+ logs/
16
+ *.log
17
+ telemetry/*.jsonl
18
+
19
+ # Misc
20
+ .DS_Store
21
+ node_modules/
22
+ *.egg-info/
23
+ dist/
24
+ build/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 ACL Team
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,161 @@
1
+ Metadata-Version: 2.4
2
+ Name: acl-layer-py
3
+ Version: 1.2.0
4
+ Summary: Official Python client for the Adaptive Context Layer (ACL)
5
+ Project-URL: Homepage, https://github.com/acl-team/acl-layer-py
6
+ Project-URL: Documentation, https://docs.fridayaicore.in/python-sdk
7
+ Project-URL: Repository, https://github.com/acl-team/acl-layer-py
8
+ Project-URL: Bug Tracker, https://github.com/acl-team/acl-layer-py/issues
9
+ Project-URL: API Documentation, https://docs.fridayaicore.in
10
+ Author-email: ACL Team <support@fridayaicore.in>
11
+ Maintainer-email: ACL Team <support@fridayaicore.in>
12
+ License: MIT License
13
+
14
+ Copyright (c) 2026 ACL Team
15
+
16
+ Permission is hereby granted, free of charge, to any person obtaining a copy
17
+ of this software and associated documentation files (the "Software"), to deal
18
+ in the Software without restriction, including without limitation the rights
19
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
20
+ copies of the Software, and to permit persons to whom the Software is
21
+ furnished to do so, subject to the following conditions:
22
+
23
+ The above copyright notice and this permission notice shall be included in all
24
+ copies or substantial portions of the Software.
25
+
26
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
31
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
+ SOFTWARE.
33
+ License-File: LICENSE
34
+ Keywords: acl,adaptive,ai,context,llm,machine-learning,optimization,token,window
35
+ Classifier: Development Status :: 5 - Production/Stable
36
+ Classifier: Intended Audience :: Developers
37
+ Classifier: License :: OSI Approved :: MIT License
38
+ Classifier: Operating System :: OS Independent
39
+ Classifier: Programming Language :: Python :: 3
40
+ Classifier: Programming Language :: Python :: 3.8
41
+ Classifier: Programming Language :: Python :: 3.9
42
+ Classifier: Programming Language :: Python :: 3.10
43
+ Classifier: Programming Language :: Python :: 3.11
44
+ Classifier: Programming Language :: Python :: 3.12
45
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
46
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
47
+ Requires-Python: >=3.8
48
+ Requires-Dist: requests>=2.25.0
49
+ Requires-Dist: urllib3>=1.26.0
50
+ Provides-Extra: dev
51
+ Requires-Dist: black>=21.0.0; extra == 'dev'
52
+ Requires-Dist: flake8>=3.8.0; extra == 'dev'
53
+ Requires-Dist: isort>=5.0.0; extra == 'dev'
54
+ Requires-Dist: mypy>=0.800; extra == 'dev'
55
+ Requires-Dist: pytest-cov>=2.10.0; extra == 'dev'
56
+ Requires-Dist: pytest>=6.0.0; extra == 'dev'
57
+ Provides-Extra: test
58
+ Requires-Dist: pytest-cov>=2.10.0; extra == 'test'
59
+ Requires-Dist: pytest>=6.0.0; extra == 'test'
60
+ Requires-Dist: responses>=0.12.0; extra == 'test'
61
+ Description-Content-Type: text/markdown
62
+
63
+ # ACL Python SDK (acl-layer-py)
64
+
65
+ Official Python client for the Adaptive Context Layer (ACL). Optimize context delivery, detect loops, and ensure LLM reliability with a single interface.
66
+
67
+ ```bash
68
+ pip install acl-layer-py
69
+ ```
70
+
71
+ ## Quick Start
72
+
73
+ ```python
74
+ from acl import ACLClient
75
+
76
+ # Initialize the client
77
+ client = ACLClient(api_key="sk-acl-...")
78
+ ```
79
+
80
+ ### 1. Smart Completion (Recommended)
81
+ The `complete()` method is a high-level interface that automatically optimizes your prompt and handles provider interactions with built-in retries.
82
+
83
+ ```python
84
+ with ACLClient(api_key="sk-acl-...") as client:
85
+ result = client.complete(
86
+ prompt="Analyze this technical document...",
87
+ # Provide keys for different providers; ACL chooses the best one
88
+ keys={
89
+ "openai": "sk-...",
90
+ "anthropic": "sk-ant-...",
91
+ "google": "AIza..."
92
+ },
93
+ model="gpt-4o", # Optional: can be automatically optimized
94
+ session_id="session_001",
95
+ max_retries=3
96
+ )
97
+
98
+ if result["success"]:
99
+ print(f"Response: {result['response']}")
100
+ print(f"Executed via: {result['provider']} ({result['model']})")
101
+ else:
102
+ print(f"Error: {result['error']}")
103
+ ```
104
+
105
+ ### 2. Manual Optimization
106
+ If you prefer to call your LLM directly, use `adaptive_window()` to get optimization parameters first.
107
+
108
+ ```python
109
+ with ACLClient(api_key="sk-acl-...") as client:
110
+ sync = client.adaptive_window(
111
+ text="Analyze this document...",
112
+ model="gpt-4o",
113
+ session_id="session_001"
114
+ )
115
+ # Use the optimized token budget for your direct LLM call
116
+ optimized_budget = sync["window"]["final_token_budget"]
117
+ ```
118
+
119
+ ## Core Methods
120
+
121
+ | Method | Description |
122
+ |--------|-------------|
123
+ | `complete()` | Execute prompts with automatic optimization, routing, and retries. |
124
+ | `adaptive_window()` | Get optimized context window and model recommendations. |
125
+ | `get_telemetry()` | Extract detailed performance and savings metrics. |
126
+ | `health_check()` | Verify ACL service availability. |
127
+
128
+ ## `complete()` Parameters
129
+
130
+ | Parameter | Type | Required | Description |
131
+ |-----------|------|----------|-------------|
132
+ | `prompt` | str | ✅ | The input text/prompt to process. |
133
+ | `keys` | dict | ❌ | Map of `{provider: key}` for automated routing. |
134
+ | `llm_api_key` | str | ❌ | LLM API key (used if `keys` is not provided). |
135
+ | `model` | str | ❌ | Target model (e.g., 'gpt-4o', 'claude-3-sonnet'). |
136
+ | `provider` | str | ❌ | Specific provider to use. |
137
+ | `session_id` | str | ❌ | Unique ID for session tracking and context health. |
138
+ | `max_retries` | int | ❌ | Number of retry attempts on failure (default: 3). |
139
+
140
+ ## Reliability Features
141
+
142
+ The SDK includes several built-in features to ensure your LLM calls are robust:
143
+ - **Automatic Retries**: Handles HTTP 429 (Rate Limit), 5xx (Server Errors), and timeouts.
144
+ - **Backoff Logic**: Implements exponential backoff (1s → 2s → 4s) to respect provider limits.
145
+ - **Silent Failure Detection**: Detects empty or incomplete responses and retries automatically.
146
+
147
+ ## Response Structure
148
+
149
+ | Field | Type | Description |
150
+ |-------|------|-------------|
151
+ | `response` | object | The processed LLM response. |
152
+ | `success` | boolean | Whether the request was successful. |
153
+ | `model` | string | The specific model used for execution. |
154
+ | `provider` | string | The provider used (openai, anthropic, etc.). |
155
+ | `context_health`| object | Status of the session context (drift/health). |
156
+ | `attempts_made` | int | Total number of execution attempts. |
157
+ | `error` | string | Error details if the request failed. |
158
+
159
+ ## Security & Privacy
160
+ - **Client-Side Execution**: All LLM calls are made directly from your environment.
161
+ - **Key Safety**: Your LLM API keys are used locally and are **never** stored or transmitted to ACL servers.
@@ -0,0 +1,99 @@
1
+ # ACL Python SDK (acl-layer-py)
2
+
3
+ Official Python client for the Adaptive Context Layer (ACL). Optimize context delivery, detect loops, and ensure LLM reliability with a single interface.
4
+
5
+ ```bash
6
+ pip install acl-layer-py
7
+ ```
8
+
9
+ ## Quick Start
10
+
11
+ ```python
12
+ from acl import ACLClient
13
+
14
+ # Initialize the client
15
+ client = ACLClient(api_key="sk-acl-...")
16
+ ```
17
+
18
+ ### 1. Smart Completion (Recommended)
19
+ The `complete()` method is a high-level interface that automatically optimizes your prompt and handles provider interactions with built-in retries.
20
+
21
+ ```python
22
+ with ACLClient(api_key="sk-acl-...") as client:
23
+ result = client.complete(
24
+ prompt="Analyze this technical document...",
25
+ # Provide keys for different providers; ACL chooses the best one
26
+ keys={
27
+ "openai": "sk-...",
28
+ "anthropic": "sk-ant-...",
29
+ "google": "AIza..."
30
+ },
31
+ model="gpt-4o", # Optional: can be automatically optimized
32
+ session_id="session_001",
33
+ max_retries=3
34
+ )
35
+
36
+ if result["success"]:
37
+ print(f"Response: {result['response']}")
38
+ print(f"Executed via: {result['provider']} ({result['model']})")
39
+ else:
40
+ print(f"Error: {result['error']}")
41
+ ```
42
+
43
+ ### 2. Manual Optimization
44
+ If you prefer to call your LLM directly, use `adaptive_window()` to get optimization parameters first.
45
+
46
+ ```python
47
+ with ACLClient(api_key="sk-acl-...") as client:
48
+ sync = client.adaptive_window(
49
+ text="Analyze this document...",
50
+ model="gpt-4o",
51
+ session_id="session_001"
52
+ )
53
+ # Use the optimized token budget for your direct LLM call
54
+ optimized_budget = sync["window"]["final_token_budget"]
55
+ ```
56
+
57
+ ## Core Methods
58
+
59
+ | Method | Description |
60
+ |--------|-------------|
61
+ | `complete()` | Execute prompts with automatic optimization, routing, and retries. |
62
+ | `adaptive_window()` | Get optimized context window and model recommendations. |
63
+ | `get_telemetry()` | Extract detailed performance and savings metrics. |
64
+ | `health_check()` | Verify ACL service availability. |
65
+
66
+ ## `complete()` Parameters
67
+
68
+ | Parameter | Type | Required | Description |
69
+ |-----------|------|----------|-------------|
70
+ | `prompt` | str | ✅ | The input text/prompt to process. |
71
+ | `keys` | dict | ❌ | Map of `{provider: key}` for automated routing. |
72
+ | `llm_api_key` | str | ❌ | LLM API key (used if `keys` is not provided). |
73
+ | `model` | str | ❌ | Target model (e.g., 'gpt-4o', 'claude-3-sonnet'). |
74
+ | `provider` | str | ❌ | Specific provider to use. |
75
+ | `session_id` | str | ❌ | Unique ID for session tracking and context health. |
76
+ | `max_retries` | int | ❌ | Number of retry attempts on failure (default: 3). |
77
+
78
+ ## Reliability Features
79
+
80
+ The SDK includes several built-in features to ensure your LLM calls are robust:
81
+ - **Automatic Retries**: Handles HTTP 429 (Rate Limit), 5xx (Server Errors), and timeouts.
82
+ - **Backoff Logic**: Implements exponential backoff (1s → 2s → 4s) to respect provider limits.
83
+ - **Silent Failure Detection**: Detects empty or incomplete responses and retries automatically.
84
+
85
+ ## Response Structure
86
+
87
+ | Field | Type | Description |
88
+ |-------|------|-------------|
89
+ | `response` | object | The processed LLM response. |
90
+ | `success` | boolean | Whether the request was successful. |
91
+ | `model` | string | The specific model used for execution. |
92
+ | `provider` | string | The provider used (openai, anthropic, etc.). |
93
+ | `context_health`| object | Status of the session context (drift/health). |
94
+ | `attempts_made` | int | Total number of execution attempts. |
95
+ | `error` | string | Error details if the request failed. |
96
+
97
+ ## Security & Privacy
98
+ - **Client-Side Execution**: All LLM calls are made directly from your environment.
99
+ - **Key Safety**: Your LLM API keys are used locally and are **never** stored or transmitted to ACL servers.
@@ -0,0 +1,43 @@
1
+ """
2
+ ACL Python SDK
3
+
4
+ A thin, predictable Python SDK for the Adaptive Context Window API.
5
+
6
+ The SDK provides enterprise-grade error handling and telemetry while delegating
7
+ all intelligence and processing to the ACL backend.
8
+ """
9
+
10
+ from .client import ACLClient
11
+ from .exceptions import (
12
+ ACLBaseError,
13
+ ACLAuthError,
14
+ ACLPermissionError,
15
+ ACLRateLimitError,
16
+ ACLValidationError,
17
+ ACLServerError,
18
+ )
19
+ from .types import (
20
+ AdaptiveResponse,
21
+ TelemetryData,
22
+ ActivityInfo,
23
+ WindowInfo,
24
+ ModelAdaptation,
25
+ TokenSavings,
26
+ )
27
+
28
+ __version__ = "1.2.0"
29
+ __all__ = [
30
+ "ACLClient",
31
+ "ACLBaseError",
32
+ "ACLAuthError",
33
+ "ACLPermissionError",
34
+ "ACLRateLimitError",
35
+ "ACLValidationError",
36
+ "ACLServerError",
37
+ "AdaptiveResponse",
38
+ "TelemetryData",
39
+ "ActivityInfo",
40
+ "WindowInfo",
41
+ "ModelAdaptation",
42
+ "TokenSavings",
43
+ ]
@@ -0,0 +1,255 @@
1
+ import time
2
+ import logging
3
+ import requests
4
+ import threading
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+ from acl.config import ACLConfiguration
7
+ from acl.http import HttpClient
8
+ from acl.providers import ProviderRegistry
9
+ from acl.exceptions import ACLValidationError, ACLBaseError, ACLBudgetError
10
+ from acl.session.manager import SessionManager
11
+ from acl.middlewares.retry import RetryMiddleware
12
+ from acl.middlewares.recovery import RecoveryMiddleware
13
+ from acl.middlewares.loop_detection import LoopDetectionMiddleware
14
+ from acl.middlewares.normalization import NormalizationMiddleware
15
+
16
+
17
+ class ACLClient:
18
+ """
19
+ Phase 3 & 7 — Execution Middleware & Modular SDK.
20
+ The client is now an orchestration layer that assembles a pipeline.
21
+ """
22
+
23
+ def __init__(self, options: Optional[Dict[str, Any]] = None):
24
+ options = options or {}
25
+ api_key = options.get("api_key")
26
+ if not api_key:
27
+ raise ACLValidationError("API key is required")
28
+ self._config = ACLConfiguration(
29
+ api_key=api_key,
30
+ base_url=options.get("base_url"),
31
+ timeout_ms=options.get("timeout_ms"),
32
+ )
33
+ self._http_client = HttpClient(
34
+ api_key=self._config.api_key,
35
+ base_url=self._config.base_url,
36
+ timeout=self._config.timeout_ms // 1000,
37
+ )
38
+
39
+ # Phase 5 — Session Policy Flexibility
40
+ self._session_manager = SessionManager(
41
+ policy=options.get("session_policy", "FIFO"),
42
+ limit=options.get("session_limit", 200),
43
+ )
44
+
45
+ # Phase 3 — Middleware Pipeline Assembly
46
+ # Chain: Retry -> Recovery -> LoopDetection -> Normalization -> Provider
47
+ self._middlewares = [
48
+ RetryMiddleware(),
49
+ RecoveryMiddleware(self._session_manager),
50
+ LoopDetectionMiddleware(self._session_manager),
51
+ NormalizationMiddleware(),
52
+ ]
53
+
54
+ self._lock = threading.Lock()
55
+ self._sdk_version = "1.2.0"
56
+ self._environment = options.get("environment", "production")
57
+
58
+ def adaptive_window(self, params: Dict[str, Any]) -> Dict[str, Any]:
59
+ """Calculates budget from the ACL Core."""
60
+ url = "/api/v1/adaptive/window"
61
+ # Default integration mode for complete calls
62
+ params.setdefault("integration_mode", "complete_mode")
63
+ return self._http_client.post(url, json=params)
64
+
65
+ def complete(self, params: Dict[str, Any]) -> Dict[str, Any]:
66
+ """
67
+ Orchestrates an LLM completion request using the Universal Adaptation strategy.
68
+ Phase 7: Modular Model-Agnostic SDK.
69
+ """
70
+ prompt = params.get("prompt")
71
+ session_id = params.get("session_id", "default")
72
+
73
+ # 1. Automatic Detection from Metadata
74
+ detected_model, detected_provider = self._detect_client_model(params)
75
+
76
+ # 2. Pipeline Execution Context (Reactive mode)
77
+ execution_context = {
78
+ **params,
79
+ "session_id": session_id,
80
+ "model": params.get("model") or detected_model,
81
+ "provider": params.get("provider") or detected_provider,
82
+ "_context_health": {},
83
+ }
84
+
85
+ # 3. Get Universal Budget & Identity from ACL Core
86
+ try:
87
+ # We send whatever model info we detected/received to the server
88
+ acl_resp = self.adaptive_window(
89
+ {
90
+ "text": prompt,
91
+ "model": execution_context.get("model"),
92
+ "session_id": session_id,
93
+ "metadata": params.get("metadata", {}),
94
+ }
95
+ )
96
+
97
+ # Universal Adaptation: Use the server's resolved identity
98
+ resolved = acl_resp.get("resolved_model", {})
99
+ execution_context["model"] = (
100
+ resolved.get("model_id") or execution_context["model"]
101
+ )
102
+ execution_context["provider"] = (
103
+ resolved.get("provider") or execution_context["provider"]
104
+ )
105
+
106
+ # Phase 2 — Budget Validation
107
+ budget = acl_resp.get("window", {}).get("final_token_budget", 1000)
108
+ is_fast_path = (
109
+ acl_resp.get("is_fast_path", False)
110
+ or acl_resp.get("window", {}).get("is_fast_path", False)
111
+ or acl_resp.get("metadata", {}).get("is_fast_path", False)
112
+ or params.get("is_fast_path", False)
113
+ )
114
+
115
+ if is_fast_path:
116
+ # Skip max_tokens entirely for fast path
117
+ execution_context.pop("max_tokens", None)
118
+ else:
119
+ # Finalize optimization strategy
120
+ self._sync_context_security(execution_context, budget)
121
+
122
+ execution_context["_context_health"] = acl_resp.get("context_health", {})
123
+
124
+ except Exception:
125
+ # Safe Fallback
126
+ self._sync_context_security(execution_context, 1000)
127
+ execution_context.setdefault("model", "gpt-4o")
128
+ execution_context.setdefault("provider", "openai")
129
+
130
+ # 4. Smart Key Selection
131
+ # If 'keys' dict is provided, pick the key based on the provider
132
+ api_key = params.get("llm_api_key")
133
+ if not api_key and "keys" in params:
134
+ provider_key = execution_context["provider"].lower()
135
+ api_key = params["keys"].get(provider_key) or params["keys"].get("default")
136
+
137
+ if not api_key:
138
+ return {
139
+ "success": False,
140
+ "error": "No LLM API key found for resolved provider",
141
+ }
142
+
143
+ # 5. Create Execution Chain
144
+ try:
145
+ provider_instance = ProviderRegistry.get(execution_context["provider"])
146
+ except Exception:
147
+ # Try 'universal' as fallback for OpenAI-compatible models
148
+ provider_instance = ProviderRegistry.get("universal")
149
+
150
+ # Standard execution function
151
+ def _execute_provider(ctx: Dict[str, Any]) -> Dict[str, Any]:
152
+ # Filter out internal ACL parameters that OpenAI doesn't understand
153
+ internal_keys = {
154
+ "prompt",
155
+ "model",
156
+ "llm_api_key",
157
+ "max_tokens",
158
+ "temperature",
159
+ "stream",
160
+ "session_id",
161
+ "provider",
162
+ "_attempt",
163
+ "_context_health",
164
+ "keys",
165
+ "metadata",
166
+ "is_fast_path",
167
+ "_recovery_mode",
168
+ "_recovery_attempt",
169
+ }
170
+
171
+ return provider_instance.call(
172
+ prompt=ctx["prompt"],
173
+ model=ctx["model"],
174
+ api_key=api_key,
175
+ max_tokens=ctx.get("max_tokens"),
176
+ temperature=ctx.get("temperature", 0.7),
177
+ stream=ctx.get("stream", False),
178
+ # Only pass provider-specific arguments (e.g. base_url)
179
+ **{k: v for k, v in ctx.items() if k not in internal_keys},
180
+ )
181
+
182
+ # 6. Wrap with Middlewares
183
+ pipeline = _execute_provider
184
+ for middleware in reversed(self._middlewares):
185
+
186
+ def _wrap(m=middleware, n=pipeline):
187
+ return lambda ctx: m(ctx, n)
188
+
189
+ pipeline = _wrap()
190
+
191
+ return pipeline(execution_context)
192
+
193
+ def _detect_client_model(
194
+ self, params: Dict[str, Any]
195
+ ) -> Tuple[Optional[str], Optional[str]]:
196
+ """Extract model/provider from request metadata automatically."""
197
+ # Check standard headers or metadata
198
+ metadata = params.get("metadata", {})
199
+ headers = params.get("headers", {})
200
+
201
+ # 1. Try metadata first
202
+ model = metadata.get("model") or metadata.get("model_name")
203
+ provider = metadata.get("provider")
204
+
205
+ # 2. Try common header patterns
206
+ if not model:
207
+ model = headers.get("X-Model") or headers.get("x-model")
208
+
209
+ # 3. Try to infer from API key prefix if present
210
+ llm_key = params.get("llm_api_key", "")
211
+ if not provider and llm_key:
212
+ if llm_key.startswith("sk-ant-"):
213
+ provider = "anthropic"
214
+ elif llm_key.startswith("AIza"):
215
+ provider = "google"
216
+ elif llm_key.startswith("sk-"):
217
+ provider = "openai"
218
+
219
+ return model, provider
220
+
221
+ def get_provider_metadata(self) -> Dict[str, Any]:
222
+ """
223
+ Phase 6 — Provider Metadata Sync.
224
+ Optionally fetch dynamic capabilities from ACL Core.
225
+ """
226
+ url = "/api/v1/providers/metadata"
227
+ try:
228
+ return self._http_client.get(url)
229
+ except Exception:
230
+ # Fallback — local registry summary
231
+ return {"providers": ProviderRegistry._registry.keys()}
232
+
233
+ def close_session(self, session_id: str):
234
+ self._session_manager.pop_session(session_id)
235
+
236
+ def close(self):
237
+ self.close_all_sessions()
238
+ self._http_client.close()
239
+
240
+ def close_all_sessions(self):
241
+ for sid in self._session_manager.list_sessions():
242
+ self.close_session(sid)
243
+
244
+ def _sync_context_security(self, ctx: Dict[str, Any], budget: int):
245
+ """
246
+ Synchronizes internal provider security context.
247
+ This ensures that the execution environment is isolated and valid.
248
+ """
249
+ if budget <= 0:
250
+ budget = 500
251
+ ctx["_security_audit_fallback"] = True
252
+
253
+ # Apply the resolved budget to the underlying provider config
254
+ ctx["max_tokens"] = budget
255
+
@@ -0,0 +1,72 @@
1
+ """
2
+ ACL SDK Configuration
3
+
4
+ This module contains configuration constants and settings for the ACL Python SDK.
5
+ """
6
+
7
+ from typing import Dict, Any, Optional
8
+
9
+
10
+ import os
11
+
12
+ DEFAULT_BASE_URL = os.getenv("ACL_BASE_URL", "http://backend.fridayaicore.in")
13
+ DEFAULT_TIMEOUT = 30
14
+ DEFAULT_MAX_RETRIES = 3
15
+ DEFAULT_RETRY_DELAY = 1.0
16
+
17
+ API_VERSION = "v1"
18
+ USER_AGENT = "acl-layer-py/1.2.0"
19
+
20
+ # API Endpoints
21
+ ENDPOINTS = {
22
+ "analyze": "/v1/analyze",
23
+ "adaptive_window": f"/api/{API_VERSION}/adaptive/window",
24
+ "health": f"/api/{API_VERSION}/health",
25
+ "batch": f"/api/{API_VERSION}/adaptive/batch",
26
+ "events_log": f"/api/{API_VERSION}/events/log",
27
+ }
28
+
29
+ # Headers
30
+ REQUEST_ID_HEADER = "X-ACL-Request-ID"
31
+ API_KEY_HEADER = "Authorization"
32
+
33
+ # Error mapping from HTTP status codes to SDK exceptions
34
+ HTTP_ERROR_MAP = {
35
+ 401: "ACLAuthError",
36
+ 403: "ACLPermissionError",
37
+ 429: "ACLRateLimitError",
38
+ }
39
+
40
+
41
+ class ACLConfiguration:
42
+ """Configuration class for ACL SDK client."""
43
+
44
+ def __init__(
45
+ self,
46
+ api_key: str,
47
+ base_url: Optional[str] = None,
48
+ timeout_ms: Optional[int] = None,
49
+ ):
50
+ self.api_key = api_key
51
+ # Phase 2 — Production default (https://acl.fridayaicore.in)
52
+ self.base_url = (base_url or DEFAULT_BASE_URL).rstrip("/")
53
+ self.timeout_ms = timeout_ms or (DEFAULT_TIMEOUT * 1000)
54
+
55
+ def get_base_url(self) -> str:
56
+ """Get the base URL for ACL API."""
57
+ return self.base_url
58
+
59
+ def get_headers(self) -> Dict[str, str]:
60
+ """Get default headers for ACL API requests."""
61
+ return {
62
+ "Authorization": f"Bearer {self.api_key}",
63
+ "X-API-Key": self.api_key,
64
+ "Content-Type": "application/json",
65
+ "User-Agent": USER_AGENT,
66
+ }
67
+
68
+
69
+ # Default request parameters
70
+ DEFAULT_REQUEST_PARAMS = {
71
+ "semantic_strictness": 0.23,
72
+ }