agentic-programming 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentic_programming-0.4.0/LICENSE +21 -0
- agentic_programming-0.4.0/PKG-INFO +373 -0
- agentic_programming-0.4.0/README.md +332 -0
- agentic_programming-0.4.0/agentic/__init__.py +48 -0
- agentic_programming-0.4.0/agentic/apps/__init__.py +1 -0
- agentic_programming-0.4.0/agentic/apps/mini_lesson.py +47 -0
- agentic_programming-0.4.0/agentic/cli.py +319 -0
- agentic_programming-0.4.0/agentic/context.py +574 -0
- agentic_programming-0.4.0/agentic/function.py +232 -0
- agentic_programming-0.4.0/agentic/functions/__init__.py +2 -0
- agentic_programming-0.4.0/agentic/functions/extract_domain.py +19 -0
- agentic_programming-0.4.0/agentic/functions/sentiment.py +17 -0
- agentic_programming-0.4.0/agentic/functions/word_count.py +14 -0
- agentic_programming-0.4.0/agentic/mcp/__init__.py +1 -0
- agentic_programming-0.4.0/agentic/mcp/__main__.py +4 -0
- agentic_programming-0.4.0/agentic/mcp/server.py +189 -0
- agentic_programming-0.4.0/agentic/meta_functions/__init__.py +17 -0
- agentic_programming-0.4.0/agentic/meta_functions/_helpers.py +265 -0
- agentic_programming-0.4.0/agentic/meta_functions/create.py +108 -0
- agentic_programming-0.4.0/agentic/meta_functions/create_app.py +136 -0
- agentic_programming-0.4.0/agentic/meta_functions/create_skill.py +62 -0
- agentic_programming-0.4.0/agentic/meta_functions/fix.py +109 -0
- agentic_programming-0.4.0/agentic/providers/__init__.py +169 -0
- agentic_programming-0.4.0/agentic/providers/anthropic.py +234 -0
- agentic_programming-0.4.0/agentic/providers/claude_code.py +327 -0
- agentic_programming-0.4.0/agentic/providers/codex.py +275 -0
- agentic_programming-0.4.0/agentic/providers/gemini.py +211 -0
- agentic_programming-0.4.0/agentic/providers/gemini_cli.py +165 -0
- agentic_programming-0.4.0/agentic/providers/openai.py +249 -0
- agentic_programming-0.4.0/agentic/runtime.py +232 -0
- agentic_programming-0.4.0/agentic_programming.egg-info/PKG-INFO +373 -0
- agentic_programming-0.4.0/agentic_programming.egg-info/SOURCES.txt +45 -0
- agentic_programming-0.4.0/agentic_programming.egg-info/dependency_links.txt +1 -0
- agentic_programming-0.4.0/agentic_programming.egg-info/entry_points.txt +2 -0
- agentic_programming-0.4.0/agentic_programming.egg-info/requires.txt +21 -0
- agentic_programming-0.4.0/agentic_programming.egg-info/top_level.txt +1 -0
- agentic_programming-0.4.0/pyproject.toml +67 -0
- agentic_programming-0.4.0/setup.cfg +4 -0
- agentic_programming-0.4.0/tests/test_async.py +491 -0
- agentic_programming-0.4.0/tests/test_context.py +225 -0
- agentic_programming-0.4.0/tests/test_error_recovery.py +158 -0
- agentic_programming-0.4.0/tests/test_function.py +192 -0
- agentic_programming-0.4.0/tests/test_meta.py +278 -0
- agentic_programming-0.4.0/tests/test_providers.py +1086 -0
- agentic_programming-0.4.0/tests/test_recovery.py +358 -0
- agentic_programming-0.4.0/tests/test_render.py +644 -0
- agentic_programming-0.4.0/tests/test_runtime.py +330 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Fzkuji
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: agentic-programming
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: Agentic Programming — Python functions that call LLMs with automatic context tracking.
|
|
5
|
+
Author: Fzkuji
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/Fzkuji/Agentic-Programming
|
|
8
|
+
Project-URL: Repository, https://github.com/Fzkuji/Agentic-Programming
|
|
9
|
+
Project-URL: Documentation, https://github.com/Fzkuji/Agentic-Programming/tree/main/docs
|
|
10
|
+
Keywords: llm,agentic,ai,function,decorator,context
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: Science/Research
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.11
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
License-File: LICENSE
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=8.0.0; extra == "dev"
|
|
28
|
+
Provides-Extra: anthropic
|
|
29
|
+
Requires-Dist: anthropic>=0.30.0; extra == "anthropic"
|
|
30
|
+
Provides-Extra: openai
|
|
31
|
+
Requires-Dist: openai>=1.30.0; extra == "openai"
|
|
32
|
+
Provides-Extra: gemini
|
|
33
|
+
Requires-Dist: google-genai>=1.0.0; extra == "gemini"
|
|
34
|
+
Provides-Extra: mcp
|
|
35
|
+
Requires-Dist: mcp[cli]>=1.0.0; extra == "mcp"
|
|
36
|
+
Provides-Extra: all
|
|
37
|
+
Requires-Dist: anthropic>=0.30.0; extra == "all"
|
|
38
|
+
Requires-Dist: openai>=1.30.0; extra == "all"
|
|
39
|
+
Requires-Dist: google-genai>=1.0.0; extra == "all"
|
|
40
|
+
Requires-Dist: mcp[cli]>=1.0.0; extra == "all"
|
|
41
|
+
|
|
42
|
+
<p align="center">
|
|
43
|
+
<img src="docs/images/banner.png" alt="Agentic Programming: Redefining Agent Flow Control" width="900">
|
|
44
|
+
</p>
|
|
45
|
+
|
|
46
|
+
<p align="center">
|
|
47
|
+
<h1 align="center">🧬 Agentic Programming</h1>
|
|
48
|
+
<p align="center">
|
|
49
|
+
<strong>Python functions that think.</strong><br>
|
|
50
|
+
A programming paradigm where Python and LLM co-execute functions.
|
|
51
|
+
</p>
|
|
52
|
+
<p align="center">
|
|
53
|
+
<a href="docs/README_CN.md">🇨🇳 中文</a>
|
|
54
|
+
</p>
|
|
55
|
+
</p>
|
|
56
|
+
|
|
57
|
+
## Table of Contents
|
|
58
|
+
|
|
59
|
+
- [Motivation](#motivation)
|
|
60
|
+
- [Core Idea](#core-idea)
|
|
61
|
+
- [Quick Start](#quick-start)
|
|
62
|
+
- [Usage](#usage)
|
|
63
|
+
- [Python](#1-python--write-agentic-code)
|
|
64
|
+
- [Skills](#2-skills--agent-integration)
|
|
65
|
+
- [MCP](#3-mcp--any-mcp-client)
|
|
66
|
+
- [Core Concepts](#core-concepts)
|
|
67
|
+
- [Agentic Functions](#agentic-functions)
|
|
68
|
+
- [Automatic Context](#automatic-context)
|
|
69
|
+
- [Self-Evolving Code](#self-evolving-code)
|
|
70
|
+
- [Error Recovery](#error-recovery)
|
|
71
|
+
- [API Reference](#api-reference)
|
|
72
|
+
- [Comparison](#comparison)
|
|
73
|
+
- [Project Structure](#project-structure)
|
|
74
|
+
- [Contributing](#contributing)
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
> 🚀 **This is a paradigm proposal.** We're sharing a new way to think about LLM-powered programming. The code here is a reference implementation — we'd love to see you take these ideas and build your own version, in any language, for any use case.
|
|
79
|
+
|
|
80
|
+
**Projects built with Agentic Programming:**
|
|
81
|
+
|
|
82
|
+
| Project | Description |
|
|
83
|
+
|---------|-------------|
|
|
84
|
+
| [🖥️ GUI Agent Harness](https://github.com/Fzkuji/GUI-Agent-Harness) | Autonomous GUI agent that operates desktop apps via vision + agentic functions. Python controls observe→plan→act→verify loops; the LLM only reasons when asked. |
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## Motivation
|
|
89
|
+
|
|
90
|
+
Current LLM agent frameworks place the LLM as the central scheduler — it decides what to do, when, and how. This creates three fundamental problems:
|
|
91
|
+
|
|
92
|
+
- **Unpredictable execution** — the LLM may skip, repeat, or invent steps regardless of defined workflows
|
|
93
|
+
- **Context explosion** — each tool-call round-trip accumulates history
|
|
94
|
+
- **No output guarantees** — the LLM interprets instructions rather than executing them
|
|
95
|
+
|
|
96
|
+
<p align="center">
|
|
97
|
+
<img src="docs/images/the_problem.png" alt="The Problem: LLM as Scheduler" width="800">
|
|
98
|
+
</p>
|
|
99
|
+
|
|
100
|
+
The core issue: **the LLM controls the flow, but nothing enforces it.** Skills, prompts, and system messages are suggestions, not guarantees.
|
|
101
|
+
|
|
102
|
+
---
|
|
103
|
+
|
|
104
|
+
## Core Idea
|
|
105
|
+
|
|
106
|
+
<p align="center">
|
|
107
|
+
<img src="docs/images/the_idea.png" alt="The Paradigm: Python controls flow, LLM reasons" width="800">
|
|
108
|
+
</p>
|
|
109
|
+
|
|
110
|
+
**Give the flow back to Python. Let the LLM focus on reasoning.**
|
|
111
|
+
|
|
112
|
+
| Principle | How |
|
|
113
|
+
|-----------|-----|
|
|
114
|
+
| **Deterministic flow** | Python controls `if/else/for/while`. The execution path is guaranteed, not suggested. |
|
|
115
|
+
| **Minimal LLM calls** | The LLM is called only when reasoning is needed. 2 calls instead of 10. |
|
|
116
|
+
| **Docstring = Prompt** | Change the function's docstring, change the LLM's behavior. No separate prompt files. |
|
|
117
|
+
| **Self-evolving** | Functions generate, fix, and improve themselves at runtime via meta functions. |
|
|
118
|
+
|
|
119
|
+
```python
|
|
120
|
+
@agentic_function
|
|
121
|
+
def observe(task):
|
|
122
|
+
"""Look at the screen and describe what you see."""
|
|
123
|
+
|
|
124
|
+
img = take_screenshot() # Python: deterministic
|
|
125
|
+
ocr = run_ocr(img) # Python: deterministic
|
|
126
|
+
|
|
127
|
+
return runtime.exec(content=[ # LLM: reasoning
|
|
128
|
+
{"type": "text", "text": f"Task: {task}\nOCR: {ocr}"},
|
|
129
|
+
{"type": "image", "path": img},
|
|
130
|
+
])
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
---
|
|
134
|
+
|
|
135
|
+
## Quick Start
|
|
136
|
+
|
|
137
|
+
```bash
|
|
138
|
+
git clone https://github.com/Fzkuji/Agentic-Programming.git
|
|
139
|
+
cd Agentic-Programming
|
|
140
|
+
pip install -e .
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
Set up at least one LLM provider:
|
|
144
|
+
|
|
145
|
+
| Provider | Setup |
|
|
146
|
+
|----------|-------|
|
|
147
|
+
| Claude Code CLI | `npm i -g @anthropic-ai/claude-code && claude login` |
|
|
148
|
+
| Codex CLI | `npm i -g @openai/codex && codex auth` |
|
|
149
|
+
| Gemini CLI | `npm i -g @anthropic-ai/gemini-cli` |
|
|
150
|
+
| Anthropic API | `pip install -e ".[anthropic]"` then `export ANTHROPIC_API_KEY=...` |
|
|
151
|
+
| OpenAI API | `pip install -e ".[openai]"` then `export OPENAI_API_KEY=...` |
|
|
152
|
+
| Gemini API | `pip install -e ".[gemini]"` then `export GOOGLE_API_KEY=...` |
|
|
153
|
+
|
|
154
|
+
Verify with `agentic providers`.
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## Usage
|
|
159
|
+
|
|
160
|
+
### 1. Python — write agentic code
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
from agentic import agentic_function, create_runtime
|
|
164
|
+
|
|
165
|
+
runtime = create_runtime() # auto-detects best available provider
|
|
166
|
+
|
|
167
|
+
@agentic_function
|
|
168
|
+
def summarize(text: str) -> str:
|
|
169
|
+
"""Summarize the given text into 3 bullet points."""
|
|
170
|
+
return runtime.exec(content=[
|
|
171
|
+
{"type": "text", "text": text},
|
|
172
|
+
])
|
|
173
|
+
|
|
174
|
+
result = summarize(text="Your long article here...")
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
Override the provider when needed:
|
|
178
|
+
|
|
179
|
+
```python
|
|
180
|
+
runtime = create_runtime(provider="openai", model="gpt-4o")
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### 2. Skills — agent integration
|
|
184
|
+
|
|
185
|
+
Install skills so your LLM agent can use agentic functions through natural language:
|
|
186
|
+
|
|
187
|
+
```bash
|
|
188
|
+
cp -r skills/* ~/.claude/skills/ # Claude Code
|
|
189
|
+
cp -r skills/* ~/.gemini/skills/ # Gemini CLI
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
Then talk to your agent:
|
|
193
|
+
|
|
194
|
+
> "Create a function that extracts emails from text"
|
|
195
|
+
|
|
196
|
+
The agent picks up the skill, calls `agentic create`, and the generated function handles everything from there. Once created:
|
|
197
|
+
|
|
198
|
+
> "Run sentiment on 'This is amazing'"
|
|
199
|
+
|
|
200
|
+
### 3. MCP — any MCP client
|
|
201
|
+
|
|
202
|
+
Run the built-in MCP server so any MCP-compatible client (Claude Desktop, Cursor, etc.) can use agentic functions:
|
|
203
|
+
|
|
204
|
+
```bash
|
|
205
|
+
pip install -e ".[mcp]"
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
Add to your MCP client config:
|
|
209
|
+
|
|
210
|
+
```json
|
|
211
|
+
{
|
|
212
|
+
"mcpServers": {
|
|
213
|
+
"agentic": {
|
|
214
|
+
"command": "python",
|
|
215
|
+
"args": ["-m", "agentic.mcp"]
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
Exposes five tools: `list_functions`, `run_function`, `create_function`, `create_application`, `fix_function`.
|
|
222
|
+
|
|
223
|
+
---
|
|
224
|
+
|
|
225
|
+
## Core Concepts
|
|
226
|
+
|
|
227
|
+
### Agentic Functions
|
|
228
|
+
|
|
229
|
+
Every `@agentic_function` can call `runtime.exec()` to invoke an LLM. The framework auto-injects execution context into the prompt. Python controls the flow — the LLM only reasons when explicitly asked.
|
|
230
|
+
|
|
231
|
+
```python
|
|
232
|
+
@agentic_function
|
|
233
|
+
def login_flow(username, password):
|
|
234
|
+
"""Complete login flow."""
|
|
235
|
+
observe(task="find login form") # Python decides what to do
|
|
236
|
+
click(element="login button") # Python decides the order
|
|
237
|
+
return verify(expected="dashboard") # Python decides when to stop
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
### Automatic Context
|
|
241
|
+
|
|
242
|
+
Every call creates a **Context** node. Nodes form a tree that is automatically injected into LLM calls:
|
|
243
|
+
|
|
244
|
+
```
|
|
245
|
+
login_flow ✓ 8.8s
|
|
246
|
+
├── observe ✓ 3.1s → "found login form at (200, 300)"
|
|
247
|
+
├── click ✓ 2.5s → "clicked login button"
|
|
248
|
+
└── verify ✓ 3.2s → "dashboard confirmed"
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
When `verify` calls the LLM, it automatically sees what `observe` and `click` returned. No manual context management.
|
|
252
|
+
|
|
253
|
+
### Self-Evolving Code
|
|
254
|
+
|
|
255
|
+
Functions can generate new functions, fix broken ones, and scaffold complete apps — all at runtime:
|
|
256
|
+
|
|
257
|
+
```python
|
|
258
|
+
from agentic.meta_functions import create, create_app, fix
|
|
259
|
+
|
|
260
|
+
# Generate a function from description
|
|
261
|
+
sentiment = create("Analyze text sentiment", runtime=runtime, name="sentiment")
|
|
262
|
+
sentiment(text="I love this!") # → "positive"
|
|
263
|
+
|
|
264
|
+
# Generate a complete app (runtime + argparse + main)
|
|
265
|
+
create_app("Summarize articles from URLs", runtime=runtime, name="summarizer")
|
|
266
|
+
# → agentic/apps/summarizer.py — runnable with: python agentic/apps/summarizer.py <url>
|
|
267
|
+
|
|
268
|
+
# Fix a broken function — auto-reads source & error history
|
|
269
|
+
fixed = fix(fn=broken_fn, runtime=runtime, instruction="return JSON, not plain text")
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
The `create → run → fail → fix → run` cycle means programs improve themselves through use.
|
|
273
|
+
|
|
274
|
+
### Error Recovery
|
|
275
|
+
|
|
276
|
+
`Runtime` retries transient failures automatically. For deeper issues, `fix()` rewrites the function:
|
|
277
|
+
|
|
278
|
+
```python
|
|
279
|
+
runtime = create_runtime(max_retries=3)
|
|
280
|
+
|
|
281
|
+
try:
|
|
282
|
+
result = extract(text="Acme closed at $42.50")
|
|
283
|
+
except Exception:
|
|
284
|
+
extract = fix(fn=extract, runtime=runtime) # LLM analyzes errors and rewrites
|
|
285
|
+
result = extract(text="Acme closed at $42.50")
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
Every attempt is recorded in the Context tree — `fix()` reads the full error history to diagnose the root cause, not just the symptom.
|
|
289
|
+
|
|
290
|
+
---
|
|
291
|
+
|
|
292
|
+
## API Reference
|
|
293
|
+
|
|
294
|
+
### Core
|
|
295
|
+
|
|
296
|
+
| Import | What it does |
|
|
297
|
+
|--------|-------------|
|
|
298
|
+
| `from agentic import agentic_function` | Decorator. Records execution into Context tree |
|
|
299
|
+
| `from agentic import Runtime` | LLM runtime. `exec()` calls the LLM with auto-context |
|
|
300
|
+
| `from agentic import Context` | Execution tree. `tree()`, `save()`, `traceback()` |
|
|
301
|
+
| `from agentic import create_runtime` | Create a Runtime with auto-detection or explicit provider |
|
|
302
|
+
|
|
303
|
+
### Meta Functions
|
|
304
|
+
|
|
305
|
+
| Import | What it does |
|
|
306
|
+
|--------|-------------|
|
|
307
|
+
| `from agentic.meta_functions import create` | Generate a new `@agentic_function` from description |
|
|
308
|
+
| `from agentic.meta_functions import create_app` | Generate a complete runnable app with `main()` |
|
|
309
|
+
| `from agentic.meta_functions import fix` | Fix broken functions via LLM analysis |
|
|
310
|
+
| `from agentic.meta_functions import create_skill` | Generate a SKILL.md for agent discovery |
|
|
311
|
+
|
|
312
|
+
### Providers
|
|
313
|
+
|
|
314
|
+
Six built-in providers: Anthropic, OpenAI, Gemini (API), Claude Code, Codex, Gemini (CLI). All CLI providers maintain **session continuity** across calls. See [Provider docs](docs/api/providers.md) for details.
|
|
315
|
+
|
|
316
|
+
---
|
|
317
|
+
|
|
318
|
+
## Comparison
|
|
319
|
+
|
|
320
|
+
| | Tool-Calling / MCP | Agentic Programming |
|
|
321
|
+
|--|---------------------|---------------------|
|
|
322
|
+
| **Who schedules?** | LLM decides | Python decides |
|
|
323
|
+
| **Functions contain** | Code only | Code + LLM reasoning |
|
|
324
|
+
| **Context** | Flat conversation | Structured tree |
|
|
325
|
+
| **Prompt** | Hidden in agent config | Docstring = prompt |
|
|
326
|
+
| **Self-improvement** | Not built-in | `create` → `fix` → evolve |
|
|
327
|
+
|
|
328
|
+
MCP is the *transport*. Agentic Programming is the *execution model*. They're orthogonal.
|
|
329
|
+
|
|
330
|
+
---
|
|
331
|
+
|
|
332
|
+
## Project Structure
|
|
333
|
+
|
|
334
|
+
```
|
|
335
|
+
agentic/
|
|
336
|
+
├── __init__.py # agentic_function, Runtime, Context, create_runtime
|
|
337
|
+
├── function.py # @agentic_function decorator
|
|
338
|
+
├── runtime.py # Runtime (exec + retry + context injection)
|
|
339
|
+
├── context.py # Context tree
|
|
340
|
+
├── meta_functions/ # Self-evolving code generation
|
|
341
|
+
│ ├── create.py # create() — generate a function
|
|
342
|
+
│ ├── create_app.py # create_app() — generate a complete app
|
|
343
|
+
│ ├── fix.py # fix() — rewrite broken functions
|
|
344
|
+
│ └── create_skill.py # create_skill() — generate SKILL.md
|
|
345
|
+
├── providers/ # Anthropic, OpenAI, Gemini, Claude Code, Codex, Gemini CLI
|
|
346
|
+
├── mcp/ # MCP server (python -m agentic.mcp)
|
|
347
|
+
├── functions/ # saved generated functions
|
|
348
|
+
└── apps/ # generated apps (from create_app)
|
|
349
|
+
skills/ # SKILL.md files for agent integration
|
|
350
|
+
examples/ # runnable demos
|
|
351
|
+
tests/ # pytest suite
|
|
352
|
+
```
|
|
353
|
+
|
|
354
|
+
## Integration
|
|
355
|
+
|
|
356
|
+
| Guide | Description |
|
|
357
|
+
|-------|-------------|
|
|
358
|
+
| [Getting Started](docs/GETTING_STARTED.md) | 3-minute setup and runnable examples |
|
|
359
|
+
| [Claude Code](docs/INTEGRATION_CLAUDE_CODE.md) | Use without API key via Claude Code CLI |
|
|
360
|
+
| [OpenClaw](docs/INTEGRATION_OPENCLAW.md) | Use as OpenClaw skill |
|
|
361
|
+
| [API Reference](docs/API.md) | Full API documentation |
|
|
362
|
+
|
|
363
|
+
---
|
|
364
|
+
|
|
365
|
+
## Contributing
|
|
366
|
+
|
|
367
|
+
This is a **paradigm proposal** with a reference implementation. We welcome discussions, alternative implementations in other languages, use cases that validate or challenge the approach, and bug reports.
|
|
368
|
+
|
|
369
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for details.
|
|
370
|
+
|
|
371
|
+
## License
|
|
372
|
+
|
|
373
|
+
MIT
|