openclay-agent 2.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openclay_agent-2.0.0/LICENSE +21 -0
- openclay_agent-2.0.0/PKG-INFO +133 -0
- openclay_agent-2.0.0/README.md +99 -0
- openclay_agent-2.0.0/agent_backend.py +300 -0
- openclay_agent-2.0.0/app.py +106 -0
- openclay_agent-2.0.0/credential_store.py +183 -0
- openclay_agent-2.0.0/installer.py +298 -0
- openclay_agent-2.0.0/introspect.py +280 -0
- openclay_agent-2.0.0/memory.py +208 -0
- openclay_agent-2.0.0/openclay_agent.egg-info/PKG-INFO +133 -0
- openclay_agent-2.0.0/openclay_agent.egg-info/SOURCES.txt +20 -0
- openclay_agent-2.0.0/openclay_agent.egg-info/dependency_links.txt +1 -0
- openclay_agent-2.0.0/openclay_agent.egg-info/entry_points.txt +2 -0
- openclay_agent-2.0.0/openclay_agent.egg-info/requires.txt +13 -0
- openclay_agent-2.0.0/openclay_agent.egg-info/top_level.txt +11 -0
- openclay_agent-2.0.0/panel.py +287 -0
- openclay_agent-2.0.0/post_flows.py +171 -0
- openclay_agent-2.0.0/pyproject.toml +54 -0
- openclay_agent-2.0.0/setup.cfg +4 -0
- openclay_agent-2.0.0/twitter_post.py +60 -0
- openclay_agent-2.0.0/vision_caption.py +299 -0
- openclay_agent-2.0.0/wiki_engine.py +213 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Anomalia
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: openclay-agent
|
|
3
|
+
Version: 2.0.0
|
|
4
|
+
Summary: Turn intention into infrastructure. Local-first AI agent bootstrapper.
|
|
5
|
+
Author-email: Anomalia <openclay@proton.me>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/openclay1/OpenClay
|
|
8
|
+
Project-URL: Repository, https://github.com/openclay1/OpenClay
|
|
9
|
+
Keywords: ai,agent,local,wiki,llm,ollama,infrastructure
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.9
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: gradio>=4.0.0
|
|
24
|
+
Requires-Dist: requests>=2.31.0
|
|
25
|
+
Requires-Dist: tweepy>=4.14.0
|
|
26
|
+
Provides-Extra: anthropic
|
|
27
|
+
Requires-Dist: anthropic>=0.40.0; extra == "anthropic"
|
|
28
|
+
Provides-Extra: openai
|
|
29
|
+
Requires-Dist: openai>=1.0.0; extra == "openai"
|
|
30
|
+
Provides-Extra: all
|
|
31
|
+
Requires-Dist: anthropic>=0.40.0; extra == "all"
|
|
32
|
+
Requires-Dist: openai>=1.0.0; extra == "all"
|
|
33
|
+
Dynamic: license-file
|
|
34
|
+
|
|
35
|
+
# 🏺 OpenClay — turn intention into infrastructure.
|
|
36
|
+
|
|
37
|
+
You describe what you want. OpenClay reads your machine, builds a local AI stack, and starts working. No config files. No copy-paste. Everything it learns about you stays on your computer, in files you own.
|
|
38
|
+
|
|
39
|
+
<!-- TODO: replace with actual GIF -->
|
|
40
|
+

|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
pip install openclay-agent
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Or from source:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
git clone https://github.com/openclay1/OpenClay.git && cd OpenClay && pip3 install -r requirements.txt && python3 app.py
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
| Your hardware | What OpenClay runs |
|
|
53
|
+
|---|---|
|
|
54
|
+
| 32 GB+ RAM, Apple Silicon / 6 GB+ VRAM | `llama3:8b` — full reasoning |
|
|
55
|
+
| 16 GB+ RAM, Apple Silicon / CUDA | `qwen2.5:7b` — fast + capable |
|
|
56
|
+
| 16 GB+ RAM, Intel, no discrete GPU | `qwen2.5:3b-instruct-q4_K_M` — lean workhorse |
|
|
57
|
+
| 8 GB+ RAM | `qwen2.5:1.5b` — lightweight |
|
|
58
|
+
| Under 8 GB | Template-only mode — no model needed |
|
|
59
|
+
| Any machine + API key | Claude, GPT-4, etc. via config |
|
|
60
|
+
|
|
61
|
+
---
|
|
62
|
+
|
|
63
|
+
## What it does today
|
|
64
|
+
|
|
65
|
+
- **Single input, single button.** Type an intention, hit Go.
|
|
66
|
+
- **Hardware detection → model selection → silent install.** You don't pick a model. It does.
|
|
67
|
+
- **LLM Wiki.** Karpathy-pattern knowledge base. Ingest files, query your wiki, lint for health. Local markdown, yours forever.
|
|
68
|
+
- **Tweet drafting and posting.** Draft from intention, review, post — or post directly.
|
|
69
|
+
- **Persistent memory.** AGENTS.md tracks what works, what fails, your preferences. The agent reads it before every action.
|
|
70
|
+
|
|
71
|
+
## How it works
|
|
72
|
+
|
|
73
|
+
```
|
|
74
|
+
intention → hardware scan → model install → execution → wiki memory
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Everything runs locally through Ollama. No data leaves your machine.
|
|
78
|
+
|
|
79
|
+
## Wiki
|
|
80
|
+
|
|
81
|
+
OpenClay maintains a local wiki that compounds over time. Drop a file in `raw/`, type `ingest filename`, and the LLM breaks it into concept pages, entity pages, and source summaries. Query it later. Lint it for contradictions.
|
|
82
|
+
|
|
83
|
+
```
|
|
84
|
+
raw/ ← your files, immutable, LLM never writes here
|
|
85
|
+
wiki/
|
|
86
|
+
concepts/ ← idea pages
|
|
87
|
+
entities/ ← people, orgs, tools
|
|
88
|
+
sources/ ← one summary per ingested file
|
|
89
|
+
comparisons/ ← cross-source analysis
|
|
90
|
+
index.md ← auto-generated catalog
|
|
91
|
+
log.md ← append-only activity record
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
The wiki is not for you. It's for the agent — so it can act with consistency.
|
|
95
|
+
|
|
96
|
+
## Configuration
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
cp .env.example .env
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
Most things work with zero config. Add Twitter keys only if you want to post.
|
|
103
|
+
|
|
104
|
+
## Project structure
|
|
105
|
+
|
|
106
|
+
```
|
|
107
|
+
app.py — entry point
|
|
108
|
+
panel.py — browser UI (Gradio)
|
|
109
|
+
agent_backend.py — switchable LLM backend
|
|
110
|
+
wiki_engine.py — wiki operations: ingest, query, lint
|
|
111
|
+
memory.py — AGENTS.md persistent memory
|
|
112
|
+
twitter_post.py — tweet posting (Tweepy)
|
|
113
|
+
introspect.py — hardware detection
|
|
114
|
+
theme.css — design system
|
|
115
|
+
openclay.md — wiki schema
|
|
116
|
+
wiki/ — agent's compounding memory
|
|
117
|
+
raw/ — immutable source documents
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
Every module stays under 300 lines.
|
|
121
|
+
|
|
122
|
+
## Principles
|
|
123
|
+
|
|
124
|
+
1. **Act, don't ask.** If the agent can do it, it does it.
|
|
125
|
+
2. **Local-first.** No cloud dependency for core function.
|
|
126
|
+
3. **Files over apps.** Markdown you own, not data locked in a platform.
|
|
127
|
+
4. **Install what's missing.** Silent. No questions.
|
|
128
|
+
|
|
129
|
+
## License
|
|
130
|
+
|
|
131
|
+
MIT — it's yours.
|
|
132
|
+
|
|
133
|
+
[github.com/openclay1/OpenClay](https://github.com/openclay1/OpenClay)
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# 🏺 OpenClay — turn intention into infrastructure.
|
|
2
|
+
|
|
3
|
+
You describe what you want. OpenClay reads your machine, builds a local AI stack, and starts working. No config files. No copy-paste. Everything it learns about you stays on your computer, in files you own.
|
|
4
|
+
|
|
5
|
+
<!-- TODO: replace with actual GIF -->
|
|
6
|
+

|
|
7
|
+
|
|
8
|
+
```bash
|
|
9
|
+
pip install openclay-agent
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
Or from source:
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
git clone https://github.com/openclay1/OpenClay.git && cd OpenClay && pip3 install -r requirements.txt && python3 app.py
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
| Your hardware | What OpenClay runs |
|
|
19
|
+
|---|---|
|
|
20
|
+
| 32 GB+ RAM, Apple Silicon / 6 GB+ VRAM | `llama3:8b` — full reasoning |
|
|
21
|
+
| 16 GB+ RAM, Apple Silicon / CUDA | `qwen2.5:7b` — fast + capable |
|
|
22
|
+
| 16 GB+ RAM, Intel, no discrete GPU | `qwen2.5:3b-instruct-q4_K_M` — lean workhorse |
|
|
23
|
+
| 8 GB+ RAM | `qwen2.5:1.5b` — lightweight |
|
|
24
|
+
| Under 8 GB | Template-only mode — no model needed |
|
|
25
|
+
| Any machine + API key | Claude, GPT-4, etc. via config |
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## What it does today
|
|
30
|
+
|
|
31
|
+
- **Single input, single button.** Type an intention, hit Go.
|
|
32
|
+
- **Hardware detection → model selection → silent install.** You don't pick a model. It does.
|
|
33
|
+
- **LLM Wiki.** Karpathy-pattern knowledge base. Ingest files, query your wiki, lint for health. Local markdown, yours forever.
|
|
34
|
+
- **Tweet drafting and posting.** Draft from intention, review, post — or post directly.
|
|
35
|
+
- **Persistent memory.** AGENTS.md tracks what works, what fails, your preferences. The agent reads it before every action.
|
|
36
|
+
|
|
37
|
+
## How it works
|
|
38
|
+
|
|
39
|
+
```
|
|
40
|
+
intention → hardware scan → model install → execution → wiki memory
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Everything runs locally through Ollama. No data leaves your machine.
|
|
44
|
+
|
|
45
|
+
## Wiki
|
|
46
|
+
|
|
47
|
+
OpenClay maintains a local wiki that compounds over time. Drop a file in `raw/`, type `ingest filename`, and the LLM breaks it into concept pages, entity pages, and source summaries. Query it later. Lint it for contradictions.
|
|
48
|
+
|
|
49
|
+
```
|
|
50
|
+
raw/ ← your files, immutable, LLM never writes here
|
|
51
|
+
wiki/
|
|
52
|
+
concepts/ ← idea pages
|
|
53
|
+
entities/ ← people, orgs, tools
|
|
54
|
+
sources/ ← one summary per ingested file
|
|
55
|
+
comparisons/ ← cross-source analysis
|
|
56
|
+
index.md ← auto-generated catalog
|
|
57
|
+
log.md ← append-only activity record
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
The wiki is not for you. It's for the agent — so it can act with consistency.
|
|
61
|
+
|
|
62
|
+
## Configuration
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
cp .env.example .env
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
Most things work with zero config. Add Twitter keys only if you want to post.
|
|
69
|
+
|
|
70
|
+
## Project structure
|
|
71
|
+
|
|
72
|
+
```
|
|
73
|
+
app.py — entry point
|
|
74
|
+
panel.py — browser UI (Gradio)
|
|
75
|
+
agent_backend.py — switchable LLM backend
|
|
76
|
+
wiki_engine.py — wiki operations: ingest, query, lint
|
|
77
|
+
memory.py — AGENTS.md persistent memory
|
|
78
|
+
twitter_post.py — tweet posting (Tweepy)
|
|
79
|
+
introspect.py — hardware detection
|
|
80
|
+
theme.css — design system
|
|
81
|
+
openclay.md — wiki schema
|
|
82
|
+
wiki/ — agent's compounding memory
|
|
83
|
+
raw/ — immutable source documents
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
Every module stays under 300 lines.
|
|
87
|
+
|
|
88
|
+
## Principles
|
|
89
|
+
|
|
90
|
+
1. **Act, don't ask.** If the agent can do it, it does it.
|
|
91
|
+
2. **Local-first.** No cloud dependency for core function.
|
|
92
|
+
3. **Files over apps.** Markdown you own, not data locked in a platform.
|
|
93
|
+
4. **Install what's missing.** Silent. No questions.
|
|
94
|
+
|
|
95
|
+
## License
|
|
96
|
+
|
|
97
|
+
MIT — it's yours.
|
|
98
|
+
|
|
99
|
+
[github.com/openclay1/OpenClay](https://github.com/openclay1/OpenClay)
|
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
"""agent_backend.py — Switchable agent backend (clawcode / claudecode via Ollama)."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import subprocess
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import urllib.request
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import requests
|
|
13
|
+
except ImportError:
|
|
14
|
+
requests = None
|
|
15
|
+
|
|
16
|
+
BASE_DIR = Path(__file__).parent
|
|
17
|
+
DATA_DIR = BASE_DIR / "data"
|
|
18
|
+
ENV_PATH = BASE_DIR / ".env"
|
|
19
|
+
OLLAMA_URL = "http://localhost:11434"
|
|
20
|
+
|
|
21
|
+
# ── Tools available to the Claw Code agent loop ──
|
|
22
|
+
AGENT_TOOLS = [
|
|
23
|
+
{
|
|
24
|
+
"type": "function",
|
|
25
|
+
"function": {
|
|
26
|
+
"name": "write_file",
|
|
27
|
+
"description": "Write content to a file at the given path.",
|
|
28
|
+
"parameters": {
|
|
29
|
+
"type": "object",
|
|
30
|
+
"properties": {
|
|
31
|
+
"path": {"type": "string", "description": "File path"},
|
|
32
|
+
"content": {"type": "string", "description": "File content"},
|
|
33
|
+
},
|
|
34
|
+
"required": ["path", "content"],
|
|
35
|
+
},
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"type": "function",
|
|
40
|
+
"function": {
|
|
41
|
+
"name": "read_file",
|
|
42
|
+
"description": "Read a file and return its contents.",
|
|
43
|
+
"parameters": {
|
|
44
|
+
"type": "object",
|
|
45
|
+
"properties": {
|
|
46
|
+
"path": {"type": "string", "description": "File path"},
|
|
47
|
+
},
|
|
48
|
+
"required": ["path"],
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
"type": "function",
|
|
54
|
+
"function": {
|
|
55
|
+
"name": "run_command",
|
|
56
|
+
"description": "Run a shell command and return stdout.",
|
|
57
|
+
"parameters": {
|
|
58
|
+
"type": "object",
|
|
59
|
+
"properties": {
|
|
60
|
+
"command": {"type": "string", "description": "Shell command"},
|
|
61
|
+
},
|
|
62
|
+
"required": ["command"],
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
"type": "function",
|
|
68
|
+
"function": {
|
|
69
|
+
"name": "list_files",
|
|
70
|
+
"description": "List files in a directory.",
|
|
71
|
+
"parameters": {
|
|
72
|
+
"type": "object",
|
|
73
|
+
"properties": {
|
|
74
|
+
"path": {"type": "string", "description": "Directory path"},
|
|
75
|
+
},
|
|
76
|
+
"required": ["path"],
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
},
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _read_env_key(key: str) -> str:
|
|
84
|
+
"""Read a key from .env, fall back to os.environ."""
|
|
85
|
+
if ENV_PATH.exists():
|
|
86
|
+
for line in ENV_PATH.read_text().splitlines():
|
|
87
|
+
l = line.strip()
|
|
88
|
+
if l and not l.startswith("#") and "=" in l:
|
|
89
|
+
k, _, v = l.partition("=")
|
|
90
|
+
if k.strip() == key:
|
|
91
|
+
return v.strip()
|
|
92
|
+
return os.environ.get(key, "")
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def get_backend() -> str:
|
|
96
|
+
"""Return the configured backend: 'clawcode' or 'claudecode'."""
|
|
97
|
+
val = _read_env_key("AGENT_BACKEND").lower().strip()
|
|
98
|
+
if val in ("clawcode", "claw", "claw-code", "claw_code"):
|
|
99
|
+
return "clawcode"
|
|
100
|
+
return "claudecode"
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def get_model() -> str:
|
|
104
|
+
"""Return the configured Ollama model name."""
|
|
105
|
+
stack_path = DATA_DIR / "stack.json"
|
|
106
|
+
if stack_path.exists():
|
|
107
|
+
with open(stack_path) as f:
|
|
108
|
+
stack = json.load(f)
|
|
109
|
+
return stack.get("model", {}).get("model", "qwen2.5:3b-instruct-q4_K_M")
|
|
110
|
+
return "qwen2.5:3b-instruct-q4_K_M"
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# ── Tool executors (sandboxed to project directory) ──
|
|
114
|
+
|
|
115
|
+
def _exec_tool(name: str, args: dict) -> str:
|
|
116
|
+
"""Execute a tool call and return the result string."""
|
|
117
|
+
if name == "write_file":
|
|
118
|
+
p = Path(args["path"])
|
|
119
|
+
if not str(p.resolve()).startswith(str(BASE_DIR)):
|
|
120
|
+
return "Error: path outside project directory"
|
|
121
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
122
|
+
p.write_text(args["content"])
|
|
123
|
+
return f"Written {len(args['content'])} chars to {p}"
|
|
124
|
+
elif name == "read_file":
|
|
125
|
+
p = Path(args["path"])
|
|
126
|
+
if not p.exists():
|
|
127
|
+
return f"Error: {p} not found"
|
|
128
|
+
return p.read_text()[:4000]
|
|
129
|
+
elif name == "run_command":
|
|
130
|
+
try:
|
|
131
|
+
r = subprocess.run(
|
|
132
|
+
args["command"], shell=True, capture_output=True,
|
|
133
|
+
text=True, timeout=30, cwd=str(BASE_DIR))
|
|
134
|
+
return (r.stdout + r.stderr)[:4000]
|
|
135
|
+
except subprocess.TimeoutExpired:
|
|
136
|
+
return "Error: command timed out (30s)"
|
|
137
|
+
elif name == "list_files":
|
|
138
|
+
p = Path(args.get("path", str(BASE_DIR)))
|
|
139
|
+
if p.is_dir():
|
|
140
|
+
return "\n".join(f.name for f in sorted(p.iterdir())[:50])
|
|
141
|
+
return f"Error: {p} is not a directory"
|
|
142
|
+
return f"Error: unknown tool {name}"
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
# ── Shared: urllib fallback for when requests is unavailable ──
|
|
146
|
+
|
|
147
|
+
def _ollama_chat_urllib(prompt: str, model: str) -> str:
|
|
148
|
+
"""Simple /api/chat call using only urllib (no tool use)."""
|
|
149
|
+
payload = json.dumps({
|
|
150
|
+
"model": model,
|
|
151
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
152
|
+
"stream": False,
|
|
153
|
+
}).encode()
|
|
154
|
+
try:
|
|
155
|
+
req = urllib.request.Request(
|
|
156
|
+
f"{OLLAMA_URL}/api/chat", data=payload,
|
|
157
|
+
headers={"Content-Type": "application/json"})
|
|
158
|
+
resp = urllib.request.urlopen(req, timeout=120)
|
|
159
|
+
data = json.loads(resp.read())
|
|
160
|
+
return data.get("message", {}).get("content", "")
|
|
161
|
+
except Exception:
|
|
162
|
+
return ""
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _ollama_post(url: str, body: dict, timeout: int = 120) -> dict | None:
|
|
166
|
+
"""POST to Ollama using requests or urllib."""
|
|
167
|
+
if requests:
|
|
168
|
+
try:
|
|
169
|
+
r = requests.post(url, json=body, timeout=timeout)
|
|
170
|
+
return r.json() if r.status_code == 200 else None
|
|
171
|
+
except Exception:
|
|
172
|
+
return None
|
|
173
|
+
# urllib fallback
|
|
174
|
+
payload = json.dumps(body).encode()
|
|
175
|
+
try:
|
|
176
|
+
req = urllib.request.Request(
|
|
177
|
+
url, data=payload, headers={"Content-Type": "application/json"})
|
|
178
|
+
resp = urllib.request.urlopen(req, timeout=timeout)
|
|
179
|
+
return json.loads(resp.read())
|
|
180
|
+
except Exception:
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# ── Claw Code backend: Ollama /api/chat with tool-use loop ──
|
|
185
|
+
|
|
186
|
+
def _clawcode_generate(prompt: str, model: str, max_turns: int = 5) -> str:
|
|
187
|
+
"""Claw Code style agent loop: chat + tool calls over Ollama."""
|
|
188
|
+
messages = [{"role": "user", "content": prompt}]
|
|
189
|
+
content = ""
|
|
190
|
+
|
|
191
|
+
for _ in range(max_turns):
|
|
192
|
+
data = _ollama_post(f"{OLLAMA_URL}/api/chat", {
|
|
193
|
+
"model": model, "messages": messages,
|
|
194
|
+
"tools": AGENT_TOOLS, "stream": False,
|
|
195
|
+
})
|
|
196
|
+
if not data:
|
|
197
|
+
return _claudecode_generate(prompt, model)
|
|
198
|
+
|
|
199
|
+
msg = data.get("message", {})
|
|
200
|
+
content = msg.get("content", "")
|
|
201
|
+
tool_calls = msg.get("tool_calls", [])
|
|
202
|
+
|
|
203
|
+
if not tool_calls:
|
|
204
|
+
return content
|
|
205
|
+
|
|
206
|
+
messages.append(msg)
|
|
207
|
+
for tc in tool_calls:
|
|
208
|
+
fn = tc.get("function", {})
|
|
209
|
+
name = fn.get("name", "")
|
|
210
|
+
args = fn.get("arguments", {})
|
|
211
|
+
if isinstance(args, str):
|
|
212
|
+
try:
|
|
213
|
+
args = json.loads(args)
|
|
214
|
+
except json.JSONDecodeError:
|
|
215
|
+
args = {}
|
|
216
|
+
result = _exec_tool(name, args)
|
|
217
|
+
messages.append({"role": "tool", "content": result})
|
|
218
|
+
|
|
219
|
+
return content if content else "Agent completed (max turns reached)."
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
# ── Claude Code backend: simple Ollama CLI generate ──
|
|
223
|
+
|
|
224
|
+
def _find_ollama() -> str:
|
|
225
|
+
"""Find the ollama binary path."""
|
|
226
|
+
for p in ["/usr/local/bin/ollama", "/opt/homebrew/bin/ollama", "ollama"]:
|
|
227
|
+
if Path(p).exists() or p == "ollama":
|
|
228
|
+
return p
|
|
229
|
+
return "ollama"
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _claudecode_generate(prompt: str, model: str) -> str:
|
|
233
|
+
"""Original simple path: ollama run via subprocess."""
|
|
234
|
+
ollama = _find_ollama()
|
|
235
|
+
try:
|
|
236
|
+
result = subprocess.run(
|
|
237
|
+
[ollama, "run", model, prompt],
|
|
238
|
+
capture_output=True, text=True, timeout=120,
|
|
239
|
+
)
|
|
240
|
+
if result.returncode == 0:
|
|
241
|
+
return result.stdout.strip()
|
|
242
|
+
except Exception:
|
|
243
|
+
pass
|
|
244
|
+
# Fallback: use urllib if subprocess fails
|
|
245
|
+
return _ollama_chat_urllib(prompt, model)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
# ── Public API ──
|
|
249
|
+
|
|
250
|
+
def _inject_memory(prompt: str) -> str:
|
|
251
|
+
"""Prepend AGENTS.md context to the prompt if available."""
|
|
252
|
+
try:
|
|
253
|
+
from memory import load_memory_context
|
|
254
|
+
ctx = load_memory_context()
|
|
255
|
+
if ctx:
|
|
256
|
+
return f"AGENT MEMORY:\n{ctx}\n\n---\n\n{prompt}"
|
|
257
|
+
except Exception:
|
|
258
|
+
pass
|
|
259
|
+
return prompt
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def generate(prompt: str, model: str | None = None) -> str:
|
|
263
|
+
"""Generate text using the configured backend.
|
|
264
|
+
|
|
265
|
+
Reads AGENTS.md before every call. Logs outcome after.
|
|
266
|
+
This is the single entry point all modules should use.
|
|
267
|
+
"""
|
|
268
|
+
if model is None:
|
|
269
|
+
model = get_model()
|
|
270
|
+
backend = get_backend()
|
|
271
|
+
full_prompt = _inject_memory(prompt)
|
|
272
|
+
try:
|
|
273
|
+
if backend == "clawcode":
|
|
274
|
+
result = _clawcode_generate(full_prompt, model)
|
|
275
|
+
else:
|
|
276
|
+
result = _claudecode_generate(full_prompt, model)
|
|
277
|
+
# Silent success logging
|
|
278
|
+
try:
|
|
279
|
+
from memory import record_success
|
|
280
|
+
short = prompt[:80].replace("\n", " ")
|
|
281
|
+
record_success(f"generate({backend})", model, short)
|
|
282
|
+
except Exception:
|
|
283
|
+
pass
|
|
284
|
+
return result
|
|
285
|
+
except Exception as e:
|
|
286
|
+
# Silent failure logging
|
|
287
|
+
try:
|
|
288
|
+
from memory import record_failure
|
|
289
|
+
record_failure(f"generate({backend})", str(e)[:200])
|
|
290
|
+
except Exception:
|
|
291
|
+
pass
|
|
292
|
+
raise
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def generate_with_tools(prompt: str, model: str | None = None,
|
|
296
|
+
max_turns: int = 5) -> str:
|
|
297
|
+
"""Force the tool-use agent loop regardless of backend setting."""
|
|
298
|
+
if model is None:
|
|
299
|
+
model = get_model()
|
|
300
|
+
return _clawcode_generate(prompt, model, max_turns=max_turns)
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""
|
|
2
|
+
app.py — OpenClay entry point.
|
|
3
|
+
Detects hardware, installs what's needed, opens the browser.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
BASE_DIR = Path(__file__).parent
|
|
14
|
+
DATA_DIR = BASE_DIR / "data"
|
|
15
|
+
QUEUE_DIR = BASE_DIR / "queue"
|
|
16
|
+
CONFIG_PATH = BASE_DIR / "config.json"
|
|
17
|
+
|
|
18
|
+
os.chdir(str(BASE_DIR))
|
|
19
|
+
sys.path.insert(0, str(BASE_DIR))
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def load_config() -> dict:
|
|
23
|
+
if CONFIG_PATH.exists():
|
|
24
|
+
with open(CONFIG_PATH) as f:
|
|
25
|
+
return json.load(f)
|
|
26
|
+
return {}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def setup():
|
|
30
|
+
"""Detect hardware, write to memory, install silently."""
|
|
31
|
+
# Hardware detection
|
|
32
|
+
profile = {}
|
|
33
|
+
try:
|
|
34
|
+
from introspect import run as introspect_run
|
|
35
|
+
profile = introspect_run()
|
|
36
|
+
except Exception:
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
# Write machine profile to AGENTS.md (first run populates, later runs update)
|
|
40
|
+
try:
|
|
41
|
+
from memory import record_machine_profile
|
|
42
|
+
record_machine_profile(profile)
|
|
43
|
+
except Exception:
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
# Stack selection
|
|
47
|
+
try:
|
|
48
|
+
from selector import run as selector_run
|
|
49
|
+
stack = selector_run()
|
|
50
|
+
except Exception:
|
|
51
|
+
stack = {}
|
|
52
|
+
|
|
53
|
+
# Silent installation
|
|
54
|
+
try:
|
|
55
|
+
from installer import run as installer_run
|
|
56
|
+
installer_run()
|
|
57
|
+
except Exception:
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
return stack
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def main():
|
|
64
|
+
print("OpenClay is starting... ⚡")
|
|
65
|
+
config = load_config()
|
|
66
|
+
demo = config.get("demo_mode", False)
|
|
67
|
+
|
|
68
|
+
# Demo mode: load sample intent
|
|
69
|
+
if demo:
|
|
70
|
+
demo_path = BASE_DIR / config.get("demo_asset", "demo_assets/sample_intake.json")
|
|
71
|
+
if demo_path.exists():
|
|
72
|
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
|
73
|
+
QUEUE_DIR.mkdir(parents=True, exist_ok=True)
|
|
74
|
+
with open(demo_path) as f:
|
|
75
|
+
intent = json.load(f)
|
|
76
|
+
with open(DATA_DIR / "intent.json", "w") as f:
|
|
77
|
+
json.dump(intent, f, indent=2)
|
|
78
|
+
|
|
79
|
+
# Setup: hardware + install (silent)
|
|
80
|
+
stack = setup()
|
|
81
|
+
|
|
82
|
+
# Start agent loop in background
|
|
83
|
+
try:
|
|
84
|
+
from agent import run_loop
|
|
85
|
+
agent_thread = threading.Thread(target=run_loop, daemon=True)
|
|
86
|
+
agent_thread.start()
|
|
87
|
+
except Exception:
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
# One clean line, then open the browser
|
|
91
|
+
print("\n ✓ OpenClay ready → http://127.0.0.1:7861\n")
|
|
92
|
+
|
|
93
|
+
# Launch panel (blocks)
|
|
94
|
+
try:
|
|
95
|
+
from panel import launch
|
|
96
|
+
launch()
|
|
97
|
+
except ImportError:
|
|
98
|
+
print(" Gradio not installed. Run: pip3 install gradio")
|
|
99
|
+
except KeyboardInterrupt:
|
|
100
|
+
print("\n Stopped.")
|
|
101
|
+
except Exception as e:
|
|
102
|
+
print(f" Error: {e}")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
if __name__ == "__main__":
|
|
106
|
+
main()
|