agentready-sdk 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentready-sdk
3
+ Version: 0.3.0
4
+ Summary: Cut your AI token costs by 40-60%. Drop-in proxy for OpenAI, LangChain, LlamaIndex, CrewAI.
5
+ Author-email: AgentReady <christalingx@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://agentready.cloud
8
+ Project-URL: Documentation, https://agentready.cloud/docs/quickstart
9
+ Project-URL: Repository, https://github.com/christianbragliasitibt/agentready
10
+ Keywords: llm,tokens,openai,compression,langchain,llamaindex,crewai,ai,cost-reduction,mcp,proxy
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Requires-Python: >=3.8
23
+ Description-Content-Type: text/markdown
24
+ Requires-Dist: httpx>=0.24.0
25
+ Provides-Extra: openai
26
+ Requires-Dist: openai>=1.0.0; extra == "openai"
27
+ Provides-Extra: langchain
28
+ Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
29
+ Provides-Extra: llamaindex
30
+ Requires-Dist: llama-index-core>=0.10.0; extra == "llamaindex"
31
+ Provides-Extra: crewai
32
+ Requires-Dist: crewai>=0.28.0; extra == "crewai"
33
+ Provides-Extra: all
34
+ Requires-Dist: openai>=1.0.0; extra == "all"
35
+ Requires-Dist: langchain-core>=0.1.0; extra == "all"
36
+ Requires-Dist: llama-index-core>=0.10.0; extra == "all"
37
+ Requires-Dist: crewai>=0.28.0; extra == "all"
38
+
39
+ # AgentReady — Python SDK
40
+
41
+ **Cut AI token costs by 40-60%.** Drop-in proxy for OpenAI, LangChain, LlamaIndex, CrewAI.
42
+
43
+ [![PyPI](https://img.shields.io/pypi/v/agentready.svg)](https://pypi.org/project/agentready/)
44
+ [![Python](https://img.shields.io/pypi/pyversions/agentready.svg)](https://pypi.org/project/agentready/)
45
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
46
+
47
+ ## Install
48
+
49
+ ```bash
50
+ pip install agentready
51
+ ```
52
+
53
+ ## Quick Start — Drop-in Proxy (Recommended)
54
+
55
+ Just swap your `base_url`. **Zero code changes** to your existing OpenAI calls:
56
+
57
+ ```python
58
+ from openai import OpenAI
59
+
60
+ client = OpenAI(
61
+ base_url="https://agentready.cloud/v1", # ← only change needed
62
+ api_key="ak_...", # your AgentReady key
63
+ default_headers={
64
+ "X-Upstream-API-Key": "sk-...", # your OpenAI key
65
+ },
66
+ )
67
+
68
+ # Everything works exactly like before — but 40-60% cheaper
69
+ response = client.chat.completions.create(
70
+ model="gpt-4o",
71
+ messages=[{"role": "user", "content": your_long_prompt}],
72
+ )
73
+ ```
74
+
75
+ ### One-liner Helper
76
+
77
+ ```python
78
+ import agentready
79
+
80
+ client = agentready.openai("ak_...", upstream_key="sk-...")
81
+ response = client.chat.completions.create(
82
+ model="gpt-4o",
83
+ messages=[{"role": "user", "content": "Hello!"}],
84
+ )
85
+ ```
86
+
87
+ ### Async Client
88
+
89
+ ```python
90
+ client = agentready.create_client("ak_...", upstream_key="sk-...", async_client=True)
91
+ response = await client.chat.completions.create(...)
92
+ ```
93
+
94
+ ## Method 2 — Monkey-Patch
95
+
96
+ Patch all OpenAI/Anthropic calls globally with two lines:
97
+
98
+ ```python
99
+ from agentready import patch_openai
100
+ patch_openai(api_key="ak_...")
101
+
102
+ # All existing OpenAI code is now compressed automatically
103
+ from openai import OpenAI
104
+ client = OpenAI()
105
+ response = client.chat.completions.create(
106
+ model="gpt-4o",
107
+ messages=[{"role": "user", "content": your_long_prompt}],
108
+ )
109
+ ```
110
+
111
+ Or patch everything at once:
112
+
113
+ ```python
114
+ import agentready
115
+ agentready.api_key = "ak_..."
116
+ agentready.auto() # patches OpenAI + Anthropic
117
+ ```
118
+
119
+ ## Method 3 — Manual Compression
120
+
121
+ For fine-grained control:
122
+
123
+ ```python
124
+ import agentready
125
+ agentready.api_key = "ak_..."
126
+
127
+ result = agentready.compress("Your long prompt text here...")
128
+ print(result.text) # compressed text
129
+ print(result.tokens_saved) # 1,247
130
+ print(result.reduction_percent) # 52.3
131
+ print(result.savings_usd) # 0.0374
132
+ ```
133
+
134
+ ## Framework Integrations
135
+
136
+ ### LangChain
137
+
138
+ ```python
139
+ from agentready.integrations.langchain import TokenCutCallbackHandler
140
+ from langchain_openai import ChatOpenAI
141
+
142
+ handler = TokenCutCallbackHandler(api_key="ak_...")
143
+ llm = ChatOpenAI(model="gpt-4o", callbacks=[handler])
144
+ response = llm.invoke("Your very long prompt here...")
145
+ ```
146
+
147
+ ### LlamaIndex
148
+
149
+ ```python
150
+ from agentready.integrations.llamaindex import TokenCutPostprocessor
151
+
152
+ postprocessor = TokenCutPostprocessor(api_key="ak_...")
153
+ query_engine = index.as_query_engine(
154
+ node_postprocessors=[postprocessor]
155
+ )
156
+ ```
157
+
158
+ ### CrewAI
159
+
160
+ ```python
161
+ from agentready.integrations.crewai import create_crewai_llm
162
+ from crewai import Agent, Task, Crew
163
+
164
+ llm = create_crewai_llm(
165
+ agentready_key="ak_...",
166
+ upstream_key="sk-...",
167
+ model="gpt-4o",
168
+ )
169
+
170
+ agent = Agent(
171
+ role="Researcher",
172
+ goal="Research AI trends",
173
+ backstory="Expert AI researcher.",
174
+ llm=llm,
175
+ )
176
+ ```
177
+
178
+ ## How It Works
179
+
180
+ AgentReady's proxy sits between your code and OpenAI. Every request is:
181
+
182
+ 1. **Compressed** — redundant phrasing removed, verbose text condensed
183
+ 2. **Forwarded** — sent to OpenAI with your upstream key
184
+ 3. **Returned** — response comes back unchanged
185
+
186
+ Code blocks, URLs, numbers, and key terms are always preserved.
187
+
188
+ ## Configuration
189
+
190
+ ```python
191
+ # Proxy mode — compression level via header
192
+ client = agentready.openai(
193
+ "ak_...",
194
+ upstream_key="sk-...",
195
+ compression_level="aggressive", # "light", "standard", "aggressive"
196
+ )
197
+
198
+ # Patch mode — configuration via arguments
199
+ agentready.auto(
200
+ level="medium",
201
+ preserve_code=True,
202
+ min_tokens=100,
203
+ )
204
+ ```
205
+
206
+ ## Pricing
207
+
208
+ **Beta — Free unlimited usage.** After beta: pay-per-token, ~60% less than direct API costs.
209
+
210
+ Get your API key at [agentready.cloud](https://agentready.cloud)
211
+
212
+ ## License
213
+
214
+ MIT — [AgentReady](https://agentready.cloud)
@@ -0,0 +1,176 @@
1
+ # AgentReady — Python SDK
2
+
3
+ **Cut AI token costs by 40-60%.** Drop-in proxy for OpenAI, LangChain, LlamaIndex, CrewAI.
4
+
5
+ [![PyPI](https://img.shields.io/pypi/v/agentready.svg)](https://pypi.org/project/agentready/)
6
+ [![Python](https://img.shields.io/pypi/pyversions/agentready.svg)](https://pypi.org/project/agentready/)
7
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
8
+
9
+ ## Install
10
+
11
+ ```bash
12
+ pip install agentready
13
+ ```
14
+
15
+ ## Quick Start — Drop-in Proxy (Recommended)
16
+
17
+ Just swap your `base_url`. **Zero code changes** to your existing OpenAI calls:
18
+
19
+ ```python
20
+ from openai import OpenAI
21
+
22
+ client = OpenAI(
23
+ base_url="https://agentready.cloud/v1", # ← only change needed
24
+ api_key="ak_...", # your AgentReady key
25
+ default_headers={
26
+ "X-Upstream-API-Key": "sk-...", # your OpenAI key
27
+ },
28
+ )
29
+
30
+ # Everything works exactly like before — but 40-60% cheaper
31
+ response = client.chat.completions.create(
32
+ model="gpt-4o",
33
+ messages=[{"role": "user", "content": your_long_prompt}],
34
+ )
35
+ ```
36
+
37
+ ### One-liner Helper
38
+
39
+ ```python
40
+ import agentready
41
+
42
+ client = agentready.openai("ak_...", upstream_key="sk-...")
43
+ response = client.chat.completions.create(
44
+ model="gpt-4o",
45
+ messages=[{"role": "user", "content": "Hello!"}],
46
+ )
47
+ ```
48
+
49
+ ### Async Client
50
+
51
+ ```python
52
+ client = agentready.create_client("ak_...", upstream_key="sk-...", async_client=True)
53
+ response = await client.chat.completions.create(...)
54
+ ```
55
+
56
+ ## Method 2 — Monkey-Patch
57
+
58
+ Patch all OpenAI/Anthropic calls globally with two lines:
59
+
60
+ ```python
61
+ from agentready import patch_openai
62
+ patch_openai(api_key="ak_...")
63
+
64
+ # All existing OpenAI code is now compressed automatically
65
+ from openai import OpenAI
66
+ client = OpenAI()
67
+ response = client.chat.completions.create(
68
+ model="gpt-4o",
69
+ messages=[{"role": "user", "content": your_long_prompt}],
70
+ )
71
+ ```
72
+
73
+ Or patch everything at once:
74
+
75
+ ```python
76
+ import agentready
77
+ agentready.api_key = "ak_..."
78
+ agentready.auto() # patches OpenAI + Anthropic
79
+ ```
80
+
81
+ ## Method 3 — Manual Compression
82
+
83
+ For fine-grained control:
84
+
85
+ ```python
86
+ import agentready
87
+ agentready.api_key = "ak_..."
88
+
89
+ result = agentready.compress("Your long prompt text here...")
90
+ print(result.text) # compressed text
91
+ print(result.tokens_saved) # 1,247
92
+ print(result.reduction_percent) # 52.3
93
+ print(result.savings_usd) # 0.0374
94
+ ```
95
+
96
+ ## Framework Integrations
97
+
98
+ ### LangChain
99
+
100
+ ```python
101
+ from agentready.integrations.langchain import TokenCutCallbackHandler
102
+ from langchain_openai import ChatOpenAI
103
+
104
+ handler = TokenCutCallbackHandler(api_key="ak_...")
105
+ llm = ChatOpenAI(model="gpt-4o", callbacks=[handler])
106
+ response = llm.invoke("Your very long prompt here...")
107
+ ```
108
+
109
+ ### LlamaIndex
110
+
111
+ ```python
112
+ from agentready.integrations.llamaindex import TokenCutPostprocessor
113
+
114
+ postprocessor = TokenCutPostprocessor(api_key="ak_...")
115
+ query_engine = index.as_query_engine(
116
+ node_postprocessors=[postprocessor]
117
+ )
118
+ ```
119
+
120
+ ### CrewAI
121
+
122
+ ```python
123
+ from agentready.integrations.crewai import create_crewai_llm
124
+ from crewai import Agent, Task, Crew
125
+
126
+ llm = create_crewai_llm(
127
+ agentready_key="ak_...",
128
+ upstream_key="sk-...",
129
+ model="gpt-4o",
130
+ )
131
+
132
+ agent = Agent(
133
+ role="Researcher",
134
+ goal="Research AI trends",
135
+ backstory="Expert AI researcher.",
136
+ llm=llm,
137
+ )
138
+ ```
139
+
140
+ ## How It Works
141
+
142
+ AgentReady's proxy sits between your code and OpenAI. Every request is:
143
+
144
+ 1. **Compressed** — redundant phrasing removed, verbose text condensed
145
+ 2. **Forwarded** — sent to OpenAI with your upstream key
146
+ 3. **Returned** — response comes back unchanged
147
+
148
+ Code blocks, URLs, numbers, and key terms are always preserved.
149
+
150
+ ## Configuration
151
+
152
+ ```python
153
+ # Proxy mode — compression level via header
154
+ client = agentready.openai(
155
+ "ak_...",
156
+ upstream_key="sk-...",
157
+ compression_level="aggressive", # "light", "standard", "aggressive"
158
+ )
159
+
160
+ # Patch mode — configuration via arguments
161
+ agentready.auto(
162
+ level="medium",
163
+ preserve_code=True,
164
+ min_tokens=100,
165
+ )
166
+ ```
167
+
168
+ ## Pricing
169
+
170
+ **Beta — Free unlimited usage.** After beta: pay-per-token, ~60% less than direct API costs.
171
+
172
+ Get your API key at [agentready.cloud](https://agentready.cloud)
173
+
174
+ ## License
175
+
176
+ MIT — [AgentReady](https://agentready.cloud)
@@ -0,0 +1,44 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "agentready-sdk"
7
+ version = "0.3.0"
8
+ description = "Cut your AI token costs by 40-60%. Drop-in proxy for OpenAI, LangChain, LlamaIndex, CrewAI."
9
+ readme = "README.md"
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.8"
12
+ authors = [{name = "AgentReady", email = "christalingx@gmail.com"}]
13
+ keywords = ["llm", "tokens", "openai", "compression", "langchain", "llamaindex", "crewai", "ai", "cost-reduction", "mcp", "proxy"]
14
+ classifiers = [
15
+ "Development Status :: 4 - Beta",
16
+ "Intended Audience :: Developers",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Programming Language :: Python :: 3.8",
20
+ "Programming Language :: Python :: 3.9",
21
+ "Programming Language :: Python :: 3.10",
22
+ "Programming Language :: Python :: 3.11",
23
+ "Programming Language :: Python :: 3.12",
24
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
25
+ "Topic :: Software Development :: Libraries :: Python Modules",
26
+ ]
27
+ dependencies = [
28
+ "httpx>=0.24.0",
29
+ ]
30
+
31
+ [project.optional-dependencies]
32
+ openai = ["openai>=1.0.0"]
33
+ langchain = ["langchain-core>=0.1.0"]
34
+ llamaindex = ["llama-index-core>=0.10.0"]
35
+ crewai = ["crewai>=0.28.0"]
36
+ all = ["openai>=1.0.0", "langchain-core>=0.1.0", "llama-index-core>=0.10.0", "crewai>=0.28.0"]
37
+
38
+ [project.urls]
39
+ Homepage = "https://agentready.cloud"
40
+ Documentation = "https://agentready.cloud/docs/quickstart"
41
+ Repository = "https://github.com/christianbragliasitibt/agentready"
42
+
43
+ [tool.setuptools.packages.find]
44
+ where = ["src"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,109 @@
1
+ """
2
+ AgentReady — Cut your AI token costs by 40-60%.
3
+
4
+ Drop-in proxy for OpenAI, Anthropic, LangChain, LlamaIndex, CrewAI.
5
+
6
+ Quick Start (recommended — just swap base_url):
7
+ from openai import OpenAI
8
+ client = OpenAI(
9
+ base_url="https://agentready.cloud/v1",
10
+ api_key="ak_...", # your AgentReady API key
11
+ default_headers={"X-Upstream-API-Key": "sk-..."}, # your OpenAI key
12
+ )
13
+ # That's it! All calls are now compressed automatically.
14
+
15
+ One-liner:
16
+ import agentready
17
+ client = agentready.openai("ak_...", upstream_key="sk-...")
18
+
19
+ Patch mode (monkey-patch existing code):
20
+ import agentready
21
+ agentready.api_key = "ak_..."
22
+ agentready.auto() # patches OpenAI & Anthropic globally
23
+
24
+ Manual compression:
25
+ import agentready
26
+ agentready.api_key = "ak_..."
27
+ result = agentready.compress("Your long prompt here...")
28
+ """
29
+
30
+ from agentready.client import TokenCutClient, CompressResult
31
+ from agentready.patch import auto, unpatch, patch_openai, patch_anthropic
32
+ from agentready.proxy import openai, create_client
33
+
34
+ __version__ = "0.3.0"
35
+ __all__ = [
36
+ "api_key",
37
+ "auto",
38
+ "unpatch",
39
+ "patch_openai",
40
+ "patch_anthropic",
41
+ "openai",
42
+ "create_client",
43
+ "compress",
44
+ "compress_async",
45
+ "CompressResult",
46
+ ]
47
+
48
+ # ── Module-level API key ─────────────────────────────────────────────
49
+ api_key: str | None = None
50
+
51
+ # ── Module-level client (lazy) ───────────────────────────────────────
52
+ _client: TokenCutClient | None = None
53
+
54
+
55
+ def _get_client() -> TokenCutClient:
56
+ global _client
57
+ if _client is None or _client.api_key != api_key:
58
+ if not api_key:
59
+ raise ValueError(
60
+ "agentready.api_key is not set. "
61
+ "Get your API key at https://agentready.cloud/dashboard/api-keys"
62
+ )
63
+ _client = TokenCutClient(api_key=api_key)
64
+ return _client
65
+
66
+
67
+ def compress(
68
+ text: str,
69
+ level: str = "medium",
70
+ preserve_code: bool = True,
71
+ preserve_urls: bool = True,
72
+ target_model: str = "gpt-4",
73
+ ) -> CompressResult:
74
+ """Compress text using TokenCut.
75
+
76
+ Args:
77
+ text: The text to compress.
78
+ level: Compression level — "light", "medium", or "aggressive".
79
+ preserve_code: Keep code blocks intact.
80
+ preserve_urls: Keep URLs intact.
81
+ target_model: Target LLM model for cost estimation.
82
+
83
+ Returns:
84
+ CompressResult with compressed text and stats.
85
+ """
86
+ return _get_client().compress(
87
+ text=text,
88
+ level=level,
89
+ preserve_code=preserve_code,
90
+ preserve_urls=preserve_urls,
91
+ target_model=target_model,
92
+ )
93
+
94
+
95
+ async def compress_async(
96
+ text: str,
97
+ level: str = "medium",
98
+ preserve_code: bool = True,
99
+ preserve_urls: bool = True,
100
+ target_model: str = "gpt-4",
101
+ ) -> CompressResult:
102
+ """Async version of compress."""
103
+ return await _get_client().compress_async(
104
+ text=text,
105
+ level=level,
106
+ preserve_code=preserve_code,
107
+ preserve_urls=preserve_urls,
108
+ target_model=target_model,
109
+ )
@@ -0,0 +1,123 @@
1
+ """TokenCut API client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Optional
7
+
8
+ import httpx
9
+
10
+ API_BASE = "https://agentready.cloud/api/v1"
11
+
12
+
13
+ @dataclass
14
+ class CompressResult:
15
+ """Result of a TokenCut compression."""
16
+
17
+ text: str
18
+ original_tokens: int
19
+ compressed_tokens: int
20
+ tokens_saved: int
21
+ reduction_percent: float
22
+ original_cost_usd: float
23
+ compressed_cost_usd: float
24
+ savings_usd: float
25
+ processing_time_ms: float
26
+ credits_consumed: int
27
+ credits_remaining: int
28
+
29
+ @property
30
+ def ratio(self) -> float:
31
+ """Compression ratio (0-1)."""
32
+ if self.original_tokens == 0:
33
+ return 0.0
34
+ return self.tokens_saved / self.original_tokens
35
+
36
+
37
+ class TokenCutClient:
38
+ """Low-level TokenCut API client."""
39
+
40
+ def __init__(self, api_key: str, base_url: str = API_BASE, timeout: float = 30.0):
41
+ self.api_key = api_key
42
+ self.base_url = base_url.rstrip("/")
43
+ self.timeout = timeout
44
+
45
+ def _headers(self) -> dict:
46
+ return {
47
+ "Authorization": f"Bearer {self.api_key}",
48
+ "Content-Type": "application/json",
49
+ "User-Agent": "agentready-python/0.3.0",
50
+ }
51
+
52
+ def _parse_response(self, data: dict) -> CompressResult:
53
+ if not data.get("success"):
54
+ raise TokenCutError(data.get("error", "Unknown error"))
55
+
56
+ stats = data["data"]["stats"]
57
+ return CompressResult(
58
+ text=data["data"]["compressed_text"],
59
+ original_tokens=stats["original_tokens"],
60
+ compressed_tokens=stats["compressed_tokens"],
61
+ tokens_saved=stats["tokens_saved"],
62
+ reduction_percent=stats["reduction_percent"],
63
+ original_cost_usd=stats.get("original_cost_usd", 0),
64
+ compressed_cost_usd=stats.get("compressed_cost_usd", 0),
65
+ savings_usd=stats.get("savings_usd", 0),
66
+ processing_time_ms=stats.get("processing_time_ms", 0),
67
+ credits_consumed=data.get("credits_consumed", 0),
68
+ credits_remaining=data.get("credits_remaining", 0),
69
+ )
70
+
71
+ def compress(
72
+ self,
73
+ text: str,
74
+ level: str = "medium",
75
+ preserve_code: bool = True,
76
+ preserve_urls: bool = True,
77
+ target_model: str = "gpt-4",
78
+ ) -> CompressResult:
79
+ """Compress text synchronously."""
80
+ with httpx.Client(timeout=self.timeout) as client:
81
+ response = client.post(
82
+ f"{self.base_url}/tools/tokencut",
83
+ headers=self._headers(),
84
+ json={
85
+ "text": text,
86
+ "level": level,
87
+ "preserve_code": preserve_code,
88
+ "preserve_urls": preserve_urls,
89
+ "target_model": target_model,
90
+ },
91
+ )
92
+ response.raise_for_status()
93
+ return self._parse_response(response.json())
94
+
95
+ async def compress_async(
96
+ self,
97
+ text: str,
98
+ level: str = "medium",
99
+ preserve_code: bool = True,
100
+ preserve_urls: bool = True,
101
+ target_model: str = "gpt-4",
102
+ ) -> CompressResult:
103
+ """Compress text asynchronously."""
104
+ async with httpx.AsyncClient(timeout=self.timeout) as client:
105
+ response = await client.post(
106
+ f"{self.base_url}/tools/tokencut",
107
+ headers=self._headers(),
108
+ json={
109
+ "text": text,
110
+ "level": level,
111
+ "preserve_code": preserve_code,
112
+ "preserve_urls": preserve_urls,
113
+ "target_model": target_model,
114
+ },
115
+ )
116
+ response.raise_for_status()
117
+ return self._parse_response(response.json())
118
+
119
+
120
+ class TokenCutError(Exception):
121
+ """Error from the TokenCut API."""
122
+
123
+ pass
@@ -0,0 +1 @@
1
+ """AgentReady integrations for popular LLM frameworks."""