explainable-agent 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. explainable_agent-0.1.0/LICENSE +21 -0
  2. explainable_agent-0.1.0/PKG-INFO +113 -0
  3. explainable_agent-0.1.0/README.md +89 -0
  4. explainable_agent-0.1.0/explainable_agent/__init__.py +8 -0
  5. explainable_agent-0.1.0/explainable_agent/agent.py +632 -0
  6. explainable_agent-0.1.0/explainable_agent/cli.py +139 -0
  7. explainable_agent-0.1.0/explainable_agent/config.py +74 -0
  8. explainable_agent-0.1.0/explainable_agent/dataset_adapters.py +105 -0
  9. explainable_agent-0.1.0/explainable_agent/eval_tool_calls.py +651 -0
  10. explainable_agent-0.1.0/explainable_agent/json_utils.py +65 -0
  11. explainable_agent-0.1.0/explainable_agent/openai_client.py +293 -0
  12. explainable_agent-0.1.0/explainable_agent/report.py +204 -0
  13. explainable_agent-0.1.0/explainable_agent/schemas.py +91 -0
  14. explainable_agent-0.1.0/explainable_agent/tools.py +493 -0
  15. explainable_agent-0.1.0/explainable_agent.egg-info/PKG-INFO +113 -0
  16. explainable_agent-0.1.0/explainable_agent.egg-info/SOURCES.txt +28 -0
  17. explainable_agent-0.1.0/explainable_agent.egg-info/dependency_links.txt +1 -0
  18. explainable_agent-0.1.0/explainable_agent.egg-info/entry_points.txt +2 -0
  19. explainable_agent-0.1.0/explainable_agent.egg-info/requires.txt +11 -0
  20. explainable_agent-0.1.0/explainable_agent.egg-info/top_level.txt +1 -0
  21. explainable_agent-0.1.0/pyproject.toml +45 -0
  22. explainable_agent-0.1.0/setup.cfg +4 -0
  23. explainable_agent-0.1.0/tests/test_agent.py +134 -0
  24. explainable_agent-0.1.0/tests/test_dataset_adapters.py +39 -0
  25. explainable_agent-0.1.0/tests/test_eval_actionable_plan.py +28 -0
  26. explainable_agent-0.1.0/tests/test_eval_tool_calls.py +173 -0
  27. explainable_agent-0.1.0/tests/test_json_utils.py +15 -0
  28. explainable_agent-0.1.0/tests/test_report.py +51 -0
  29. explainable_agent-0.1.0/tests/test_sqlite_tools.py +51 -0
  30. explainable_agent-0.1.0/tests/test_tools.py +52 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Emre
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,113 @@
1
+ Metadata-Version: 2.4
2
+ Name: explainable-agent
3
+ Version: 0.1.0
4
+ Summary: A local-first, explainable AI agent framework with self-healing, detailed error diagnostics, and interactive tool-calling traces.
5
+ Author: Emre
6
+ License: MIT
7
+ Project-URL: Repository, https://github.com/emredeveloper/explainable-agent-lab
8
+ Project-URL: Issues, https://github.com/emredeveloper/explainable-agent-lab/issues
9
+ Keywords: agent,llm,tool-calling,evaluation,xai,self-healing
10
+ Requires-Python: >=3.10
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: openai>=1.54.0
14
+ Requires-Dist: json-repair>=0.30.3
15
+ Requires-Dist: tenacity>=8.2.3
16
+ Requires-Dist: pydantic>=2.9.0
17
+ Requires-Dist: ddgs>=0.1.0
18
+ Requires-Dist: rich>=13.0.0
19
+ Provides-Extra: dev
20
+ Requires-Dist: pytest>=8.3.0; extra == "dev"
21
+ Requires-Dist: twine>=5.1.1; extra == "dev"
22
+ Requires-Dist: build>=1.2.1; extra == "dev"
23
+ Dynamic: license-file
24
+
25
+ # 🔬 Explainable Agent Lab
26
+
27
+ > A local-first, explainable agent framework designed to guide developers in building robust AI agents.
28
+
29
+ Building reliable agents is hard. LLMs hallucinate, get stuck in infinite loops, or fail to parse tools correctly. **Explainable Agent Lab** is built to solve this by focusing on **explainability and guidance**.
30
+
31
+ ✨ **Key Features:**
32
+ - **Show the Hidden Errors:** Reveal exactly where and why an agent fails (e.g., low confidence, schema violations).
33
+ - **Self-Healing:** The agent automatically analyzes its own errors and proposes alternative tool-based solutions.
34
+ - **Visual Terminal Tracking:** Step-by-step interactive and colorful tracking using the `rich` library (`--verbose`).
35
+ - **Detailed Diagnostic Reports:** Actionable suggestions on hallucination risks, loop patterns, and prompt improvements.
36
+
37
+ ---
38
+
39
+ ## 🚀 Quick Start
40
+
41
+ ### 1. Install
42
+ ```bash
43
+ # Recommended: Editable install
44
+ python -m venv .venv
45
+ # On Windows:
46
+ .venv\Scripts\activate
47
+ # On Mac/Linux:
48
+ # source .venv/bin/activate
49
+
50
+ pip install -e .[dev]
51
+ ```
52
+
53
+ ### 2. Connect Your Local LLM
54
+ You can use any OpenAI-compatible local server like **Ollama** or **LM Studio**.
55
+
56
+ - **Ollama:** `http://localhost:11434/v1` (e.g., model: `ministral-3:14b`)
57
+ - **LM Studio:** `http://localhost:1234/v1` (e.g., model: `gpt-oss-20b`)
58
+
59
+ *Tip: Copy `.env.example` to `.env` to set your defaults.*
60
+
61
+ ### 3. Run the Agent
62
+ **Example using Ollama:**
63
+ ```bash
64
+ python -m explainable_agent.cli \
65
+ --base-url http://localhost:11434/v1 \
66
+ --model ministral-3:14b \
67
+ --task "calculate_math: (215*4)-12" \
68
+ --verbose
69
+ ```
70
+
71
+ ---
72
+
73
+ ## 💻 Using the Python API
74
+
75
+ Easily integrate the agent into your codebase or create custom tools using the `@define_tool` decorator.
76
+
77
+ Check out the `examples/` directory:
78
+ - [`examples/basic_usage.py`](examples/basic_usage.py) - Initialize and run the agent programmatically.
79
+ - [`examples/custom_tool_usage.py`](examples/custom_tool_usage.py) - Learn how to build custom tools and watch the agent self-heal from errors.
80
+
81
+ Run an example:
82
+ ```bash
83
+ python examples/custom_tool_usage.py
84
+ ```
85
+
86
+ ---
87
+
88
+ ## 📊 Evaluation & Custom Datasets
89
+
90
+ Evaluate your fine-tuned models or custom datasets easily. The pipeline parses messy outputs, repairs broken JSON, and generates actionable Markdown reports.
91
+
92
+ **1. Create a `.jsonl` dataset** (See `examples/custom_eval_sample.jsonl`)
93
+
94
+ **2. Run the evaluation:**
95
+ ```bash
96
+ python scripts/eval_hf_tool_calls.py \
97
+ --dataset examples/custom_eval_sample.jsonl \
98
+ --model ministral-3:14b
99
+ ```
100
+
101
+ We also support standard benchmarks out of the box:
102
+ - **HF Tool Calls:** `data/evals/hf_xlam_fc_sample.jsonl`
103
+ - **BFCL SQL:** `data/evals/bfcl_sql/BFCL_v3_sql.json`
104
+ - **SWE-bench Lite:** `data/evals/swebench_lite_test.jsonl`
105
+
106
+ ---
107
+
108
+ ## 🛠️ Built-in Tools
109
+ The agent comes with out-of-the-box tools ready to use:
110
+ `duckduckgo_search`, `calculate_math`, `read_text_file`, `list_workspace_files`, `now_utc`, `sqlite_init_demo`, `sqlite_list_tables`, `sqlite_describe_table`, `sqlite_query`, `sqlite_execute`.
111
+
112
+ ---
113
+ *License: MIT | Current Release: v0.1.0*
@@ -0,0 +1,89 @@
1
+ # 🔬 Explainable Agent Lab
2
+
3
+ > A local-first, explainable agent framework designed to guide developers in building robust AI agents.
4
+
5
+ Building reliable agents is hard. LLMs hallucinate, get stuck in infinite loops, or fail to parse tools correctly. **Explainable Agent Lab** is built to solve this by focusing on **explainability and guidance**.
6
+
7
+ ✨ **Key Features:**
8
+ - **Show the Hidden Errors:** Reveal exactly where and why an agent fails (e.g., low confidence, schema violations).
9
+ - **Self-Healing:** The agent automatically analyzes its own errors and proposes alternative tool-based solutions.
10
+ - **Visual Terminal Tracking:** Step-by-step interactive and colorful tracking using the `rich` library (`--verbose`).
11
+ - **Detailed Diagnostic Reports:** Actionable suggestions on hallucination risks, loop patterns, and prompt improvements.
12
+
13
+ ---
14
+
15
+ ## 🚀 Quick Start
16
+
17
+ ### 1. Install
18
+ ```bash
19
+ # Recommended: Editable install
20
+ python -m venv .venv
21
+ # On Windows:
22
+ .venv\Scripts\activate
23
+ # On Mac/Linux:
24
+ # source .venv/bin/activate
25
+
26
+ pip install -e .[dev]
27
+ ```
28
+
29
+ ### 2. Connect Your Local LLM
30
+ You can use any OpenAI-compatible local server like **Ollama** or **LM Studio**.
31
+
32
+ - **Ollama:** `http://localhost:11434/v1` (e.g., model: `ministral-3:14b`)
33
+ - **LM Studio:** `http://localhost:1234/v1` (e.g., model: `gpt-oss-20b`)
34
+
35
+ *Tip: Copy `.env.example` to `.env` to set your defaults.*
36
+
37
+ ### 3. Run the Agent
38
+ **Example using Ollama:**
39
+ ```bash
40
+ python -m explainable_agent.cli \
41
+ --base-url http://localhost:11434/v1 \
42
+ --model ministral-3:14b \
43
+ --task "calculate_math: (215*4)-12" \
44
+ --verbose
45
+ ```
46
+
47
+ ---
48
+
49
+ ## 💻 Using the Python API
50
+
51
+ Easily integrate the agent into your codebase or create custom tools using the `@define_tool` decorator.
52
+
53
+ Check out the `examples/` directory:
54
+ - [`examples/basic_usage.py`](examples/basic_usage.py) - Initialize and run the agent programmatically.
55
+ - [`examples/custom_tool_usage.py`](examples/custom_tool_usage.py) - Learn how to build custom tools and watch the agent self-heal from errors.
56
+
57
+ Run an example:
58
+ ```bash
59
+ python examples/custom_tool_usage.py
60
+ ```
61
+
62
+ ---
63
+
64
+ ## 📊 Evaluation & Custom Datasets
65
+
66
+ Evaluate your fine-tuned models or custom datasets easily. The pipeline parses messy outputs, repairs broken JSON, and generates actionable Markdown reports.
67
+
68
+ **1. Create a `.jsonl` dataset** (See `examples/custom_eval_sample.jsonl`)
69
+
70
+ **2. Run the evaluation:**
71
+ ```bash
72
+ python scripts/eval_hf_tool_calls.py \
73
+ --dataset examples/custom_eval_sample.jsonl \
74
+ --model ministral-3:14b
75
+ ```
76
+
77
+ We also support standard benchmarks out of the box:
78
+ - **HF Tool Calls:** `data/evals/hf_xlam_fc_sample.jsonl`
79
+ - **BFCL SQL:** `data/evals/bfcl_sql/BFCL_v3_sql.json`
80
+ - **SWE-bench Lite:** `data/evals/swebench_lite_test.jsonl`
81
+
82
+ ---
83
+
84
+ ## 🛠️ Built-in Tools
85
+ The agent comes with out-of-the-box tools ready to use:
86
+ `duckduckgo_search`, `calculate_math`, `read_text_file`, `list_workspace_files`, `now_utc`, `sqlite_init_demo`, `sqlite_list_tables`, `sqlite_describe_table`, `sqlite_query`, `sqlite_execute`.
87
+
88
+ ---
89
+ *License: MIT | Current Release: v0.1.0*
@@ -0,0 +1,8 @@
1
+ """Explainable local agent MVP."""
2
+
3
+ from .agent import ExplainableAgent
4
+ from .config import Settings
5
+
6
+ __version__ = "0.1.0"
7
+
8
+ __all__ = ["ExplainableAgent", "Settings", "__version__"]