langchain-skillkit 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. langchain_skillkit-0.2.0/.github/workflows/ci.yml +50 -0
  2. langchain_skillkit-0.2.0/.github/workflows/publish.yml +31 -0
  3. langchain_skillkit-0.2.0/.gitignore +11 -0
  4. langchain_skillkit-0.2.0/LICENSE +21 -0
  5. langchain_skillkit-0.2.0/PKG-INFO +235 -0
  6. langchain_skillkit-0.2.0/README.md +216 -0
  7. langchain_skillkit-0.2.0/examples/manual_wiring.py +60 -0
  8. langchain_skillkit-0.2.0/examples/multi_agent.py +69 -0
  9. langchain_skillkit-0.2.0/examples/standalone_node.py +35 -0
  10. langchain_skillkit-0.2.0/pyproject.toml +47 -0
  11. langchain_skillkit-0.2.0/src/langchain_skillkit/__init__.py +36 -0
  12. langchain_skillkit-0.2.0/src/langchain_skillkit/frontmatter.py +38 -0
  13. langchain_skillkit-0.2.0/src/langchain_skillkit/node.py +289 -0
  14. langchain_skillkit-0.2.0/src/langchain_skillkit/skill_kit.py +221 -0
  15. langchain_skillkit-0.2.0/src/langchain_skillkit/state.py +24 -0
  16. langchain_skillkit-0.2.0/src/langchain_skillkit/types.py +49 -0
  17. langchain_skillkit-0.2.0/src/langchain_skillkit/validate.py +46 -0
  18. langchain_skillkit-0.2.0/tests/__init__.py +0 -0
  19. langchain_skillkit-0.2.0/tests/fixtures/prompts/nodes/analyst.md +8 -0
  20. langchain_skillkit-0.2.0/tests/fixtures/prompts/nodes/researcher.md +9 -0
  21. langchain_skillkit-0.2.0/tests/fixtures/skills/market-sizing/SKILL.md +17 -0
  22. langchain_skillkit-0.2.0/tests/fixtures/skills/market-sizing/calculator.py +16 -0
  23. langchain_skillkit-0.2.0/tests/fixtures/skills_extra/competitive-analysis/SKILL.md +9 -0
  24. langchain_skillkit-0.2.0/tests/test_frontmatter.py +66 -0
  25. langchain_skillkit-0.2.0/tests/test_node.py +200 -0
  26. langchain_skillkit-0.2.0/tests/test_skill_kit.py +160 -0
  27. langchain_skillkit-0.2.0/tests/test_types.py +67 -0
  28. langchain_skillkit-0.2.0/tests/test_validate.py +122 -0
  29. langchain_skillkit-0.2.0/uv.lock +1402 -0
@@ -0,0 +1,50 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+
8
+ jobs:
9
+ test:
10
+ runs-on: ubuntu-latest
11
+ strategy:
12
+ matrix:
13
+ python: ["3.11", "3.13"]
14
+ name: Test (Python ${{ matrix.python }})
15
+ steps:
16
+ - uses: actions/checkout@v4
17
+
18
+ - uses: astral-sh/setup-uv@v5
19
+
20
+ - name: Install Python
21
+ run: uv python install ${{ matrix.python }}
22
+
23
+ - name: Install dependencies
24
+ run: uv sync --extra dev
25
+
26
+ - name: Run tests
27
+ run: uv run pytest --tb=short -q
28
+
29
+ lint:
30
+ runs-on: ubuntu-latest
31
+ name: Lint & Types
32
+ steps:
33
+ - uses: actions/checkout@v4
34
+
35
+ - uses: astral-sh/setup-uv@v5
36
+
37
+ - name: Install Python
38
+ run: uv python install 3.13
39
+
40
+ - name: Install dependencies
41
+ run: uv sync --extra dev
42
+
43
+ - name: Ruff check
44
+ run: uv run ruff check src/ tests/
45
+
46
+ - name: Ruff format
47
+ run: uv run ruff format --check src/ tests/
48
+
49
+ - name: Mypy
50
+ run: uv run mypy src/
@@ -0,0 +1,31 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ push:
5
+ tags: ["v*"]
6
+
7
+ jobs:
8
+ publish:
9
+ runs-on: ubuntu-latest
10
+ name: Build & Publish
11
+ permissions:
12
+ id-token: write
13
+ contents: write
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+
17
+ - uses: astral-sh/setup-uv@v5
18
+
19
+ - name: Install Python
20
+ run: uv python install 3.13
21
+
22
+ - name: Build package
23
+ run: uv build
24
+
25
+ - name: Publish to PyPI
26
+ run: uv publish --trusted-publishing always
27
+
28
+ - name: Create GitHub Release
29
+ run: gh release create ${{ github.ref_name }} dist/* --generate-notes
30
+ env:
31
+ GH_TOKEN: ${{ github.token }}
@@ -0,0 +1,11 @@
1
+ __pycache__/
2
+ *.py[cod]
3
+ *.egg-info/
4
+ dist/
5
+ build/
6
+ .venv/
7
+ .pytest_cache/
8
+ .ruff_cache/
9
+ *.egg
10
+ .env
11
+ .start/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 rsmdt
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,235 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain-skillkit
3
+ Version: 0.2.0
4
+ Summary: Skill-driven agent toolkit for LangGraph with semantic skill discovery
5
+ License-Expression: MIT
6
+ License-File: LICENSE
7
+ Requires-Python: >=3.11
8
+ Requires-Dist: langchain-core>=0.3
9
+ Requires-Dist: langgraph>=0.4
10
+ Requires-Dist: pyyaml>=6.0
11
+ Provides-Extra: dev
12
+ Requires-Dist: langchain-openai>=0.3; extra == 'dev'
13
+ Requires-Dist: mypy>=1.10; extra == 'dev'
14
+ Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
15
+ Requires-Dist: pytest>=8.0; extra == 'dev'
16
+ Requires-Dist: ruff>=0.8; extra == 'dev'
17
+ Requires-Dist: types-pyyaml>=6.0; extra == 'dev'
18
+ Description-Content-Type: text/markdown
19
+
20
+ # langchain-skillkit
21
+
22
+ Skill-driven agent toolkit for LangGraph with semantic skill discovery.
23
+
24
+ [![PyPI version](https://img.shields.io/pypi/v/langchain-skillkit.svg)](https://pypi.org/project/langchain-skillkit/)
25
+ [![Python](https://img.shields.io/pypi/pyversions/langchain-skillkit.svg)](https://pypi.org/project/langchain-skillkit/)
26
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
27
+
28
+ Give your LangGraph agents reusable, discoverable skills defined as markdown files. Two paths to use: `SkillKit` as a standalone toolkit you wire yourself, or the `node` metaclass that gives you a complete ReAct subgraph with dependency injection.
29
+
30
+ ## Table of Contents
31
+
32
+ - [Installation & Quick Start](#installation--quick-start)
33
+ - [Examples](#examples)
34
+ - [API Reference](#api-reference)
35
+ - [Security](#security)
36
+ - [Why This Toolkit?](#why-this-toolkit)
37
+ - [Contributing](#contributing)
38
+
39
+ ## Installation & Quick Start
40
+
41
+ Requires **Python 3.11+**, `langchain-core>=0.3`, `langgraph>=0.4`.
42
+
43
+ ```bash
44
+ pip install langchain-skillkit
45
+ ```
46
+
47
+ Skills follow the [AgentSkills.io specification](https://agentskills.io/specification) — each skill is a directory with a `SKILL.md` and optional reference files:
48
+
49
+ ```
50
+ skills/
51
+ market-sizing/
52
+ SKILL.md # Instructions + frontmatter (name, description)
53
+ calculator.py # Template — loaded on demand via SkillRead
54
+ competitive-analysis/
55
+ SKILL.md
56
+ swot-template.md # Reference doc — loaded on demand via SkillRead
57
+ examples/
58
+ output.json # Example output
59
+ ```
60
+
61
+ ```python
62
+ from langchain_core.tools import tool
63
+ from langchain_core.messages import HumanMessage
64
+ from langchain_openai import ChatOpenAI
65
+ from langgraph.graph import StateGraph, START, END
66
+ from langchain_skillkit import node, AgentState
67
+
68
+ # --- Define tools ---
69
+
70
+ @tool
71
+ def web_search(query: str) -> str:
72
+ """Search the web for information."""
73
+ return f"Results for: {query}"
74
+
75
+ # --- Declare an agent ---
76
+ # Subclassing `node` produces a CompiledStateGraph, not a class.
77
+ # The agent gets Skill and SkillRead tools automatically from the skills directory.
78
+
79
+ class researcher(node):
80
+ llm = ChatOpenAI(model="gpt-4o")
81
+ tools = [web_search]
82
+ skills = "skills/"
83
+
84
+ async def handler(state, *, llm):
85
+ response = await llm.ainvoke(state["messages"])
86
+ return {"messages": [response], "sender": "researcher"}
87
+
88
+ # --- Use standalone ---
89
+
90
+ result = researcher.invoke({"messages": [HumanMessage("Size the B2B SaaS market")]})
91
+
92
+ # --- Or compose into a parent graph ---
93
+
94
+ workflow = StateGraph(AgentState)
95
+ workflow.add_node("researcher", researcher)
96
+ workflow.add_edge(START, "researcher")
97
+ workflow.add_edge("researcher", END)
98
+ graph = workflow.compile()
99
+ ```
100
+
101
+ ## Examples
102
+
103
+ See [`examples/`](examples/) for complete working code:
104
+
105
+ - **[`standalone_node.py`](examples/standalone_node.py)** — Simplest usage: declare a node class, invoke it
106
+ - **[`manual_wiring.py`](examples/manual_wiring.py)** — Use `SkillKit` as a standalone toolkit with full graph control
107
+ - **[`multi_agent.py`](examples/multi_agent.py)** — Compose multiple agents in a parent graph
108
+
109
+ ## API Reference
110
+
111
+ ### `SkillKit(skills_dirs)`
112
+
113
+ Toolkit that provides `Skill` and `SkillRead` tools.
114
+
115
+ ```python
116
+ from langchain_skillkit import SkillKit
117
+
118
+ kit = SkillKit("skills/")
119
+ all_tools = [web_search] + kit.tools # [web_search, Skill, SkillRead]
120
+ ```
121
+
122
+ **Parameters:**
123
+ - `skills_dirs` (str | list[str]): Directory or list of directories containing skill subdirectories
124
+
125
+ **Properties:**
126
+
127
+ | Property | Type | Description |
128
+ |----------|------|-------------|
129
+ | `tools` | `list[BaseTool]` | `[Skill, SkillRead]` — built once, cached |
130
+
131
+ ### `node`
132
+
133
+ Declarative agent builder. Subclassing produces a `CompiledStateGraph`.
134
+
135
+ ```python
136
+ from langchain_skillkit import node
137
+
138
+ class my_agent(node):
139
+ llm = ChatOpenAI(model="gpt-4o") # Required
140
+ tools = [web_search] # Optional
141
+ skills = "skills/" # Optional
142
+
143
+ async def handler(state, *, llm):
144
+ response = await llm.ainvoke(state["messages"])
145
+ return {"messages": [response], "sender": "my_agent"}
146
+
147
+ my_agent.invoke({"messages": [HumanMessage("...")]})
148
+ ```
149
+
150
+ **Class attributes:**
151
+
152
+ | Attribute | Required | Description |
153
+ |-----------|----------|-------------|
154
+ | `llm` | Yes | Language model instance |
155
+ | `tools` | No | List of LangChain tools |
156
+ | `skills` | No | Path(s) to skill directories, or a `SkillKit` instance |
157
+
158
+ **Handler signature:**
159
+
160
+ ```python
161
+ async def handler(state, *, llm, tools, runtime): ...
162
+ ```
163
+
164
+ `state` is positional. Everything after `*` is keyword-only and injected by name — declare only what you need:
165
+
166
+ | Parameter | Type | Description |
167
+ |-----------|------|-------------|
168
+ | `state` | `dict` | LangGraph state (positional, required) |
169
+ | `llm` | `BaseChatModel` | LLM pre-bound with all tools via `bind_tools()` |
170
+ | `tools` | `list[BaseTool]` | All tools available to the agent |
171
+ | `runtime` | `Any` | LangGraph runtime context (passed through from config) |
172
+
173
+ ### `AgentState`
174
+
175
+ Minimal LangGraph state type for composing nodes in a parent graph:
176
+
177
+ ```python
178
+ from langchain_skillkit import AgentState
179
+ from langgraph.graph import StateGraph
180
+
181
+ workflow = StateGraph(AgentState)
182
+ workflow.add_node("researcher", researcher)
183
+ ```
184
+
185
+ Extend it with your own fields:
186
+
187
+ ```python
188
+ class MyState(AgentState):
189
+ current_project: str
190
+ iteration_count: int
191
+ ```
192
+
193
+ | Field | Type | Description |
194
+ |-------|------|-------------|
195
+ | `messages` | `Annotated[list, add_messages]` | Conversation history with LangGraph message reducer |
196
+ | `sender` | `str` | Name of the last node that produced output |
197
+
198
+ ## Security
199
+
200
+ - **Path traversal prevention**: File paths resolved to absolute and checked against skill directories.
201
+ - **Name validation**: Skill names validated per [AgentSkills.io spec](https://agentskills.io/specification) — lowercase alphanumeric + hyphens, 1-64 chars, must match directory name.
202
+ - **Tool scoping**: Each `node` subclass only has access to the tools declared in its `tools` attribute.
203
+
204
+ ## Why This Toolkit?
205
+
206
+ Developers building multi-agent LangGraph systems face these problems:
207
+
208
+ 1. **Prompt reuse is manual.** The same domain instructions get copy-pasted across agents with no versioning or structure.
209
+ 2. **Agents lack discoverability.** There's no standard way for an LLM to find and select relevant instructions at runtime.
210
+ 3. **Agent wiring is repetitive.** Every ReAct agent needs the same graph boilerplate: handler node, tool node, conditional edges.
211
+ 4. **Reference files are inaccessible.** Templates, scripts, and examples referenced in prompts can't be loaded on demand.
212
+
213
+ This toolkit solves all four with:
214
+
215
+ - Skill-as-markdown: reusable instructions with structured frontmatter
216
+ - Semantic discovery: the LLM matches user intent to skill descriptions at runtime
217
+ - Declarative agents: `class my_agent(node)` gives you a complete ReAct subgraph
218
+ - On-demand file loading: `SkillRead` lets the LLM pull reference files when needed
219
+ - AgentSkills.io spec compliance: portable skills that work across toolkits
220
+ - Full type safety: mypy strict mode support
221
+
222
+ ## Contributing
223
+
224
+ This toolkit is extracted from a production codebase and is actively maintained. Issues, feature requests, and pull requests are welcome.
225
+
226
+ ```bash
227
+ git clone https://github.com/rsmdt/langchain-skillkit.git
228
+ cd langchain-skillkit
229
+ uv sync --extra dev
230
+ uv run pytest --tb=short -q
231
+ uv run ruff check src/ tests/
232
+ uv run mypy src/
233
+ ```
234
+
235
+ GitHub: https://github.com/rsmdt/langchain-skillkit
@@ -0,0 +1,216 @@
1
+ # langchain-skillkit
2
+
3
+ Skill-driven agent toolkit for LangGraph with semantic skill discovery.
4
+
5
+ [![PyPI version](https://img.shields.io/pypi/v/langchain-skillkit.svg)](https://pypi.org/project/langchain-skillkit/)
6
+ [![Python](https://img.shields.io/pypi/pyversions/langchain-skillkit.svg)](https://pypi.org/project/langchain-skillkit/)
7
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
8
+
9
+ Give your LangGraph agents reusable, discoverable skills defined as markdown files. Two paths to use: `SkillKit` as a standalone toolkit you wire yourself, or the `node` metaclass that gives you a complete ReAct subgraph with dependency injection.
10
+
11
+ ## Table of Contents
12
+
13
+ - [Installation & Quick Start](#installation--quick-start)
14
+ - [Examples](#examples)
15
+ - [API Reference](#api-reference)
16
+ - [Security](#security)
17
+ - [Why This Toolkit?](#why-this-toolkit)
18
+ - [Contributing](#contributing)
19
+
20
+ ## Installation & Quick Start
21
+
22
+ Requires **Python 3.11+**, `langchain-core>=0.3`, `langgraph>=0.4`.
23
+
24
+ ```bash
25
+ pip install langchain-skillkit
26
+ ```
27
+
28
+ Skills follow the [AgentSkills.io specification](https://agentskills.io/specification) — each skill is a directory with a `SKILL.md` and optional reference files:
29
+
30
+ ```
31
+ skills/
32
+ market-sizing/
33
+ SKILL.md # Instructions + frontmatter (name, description)
34
+ calculator.py # Template — loaded on demand via SkillRead
35
+ competitive-analysis/
36
+ SKILL.md
37
+ swot-template.md # Reference doc — loaded on demand via SkillRead
38
+ examples/
39
+ output.json # Example output
40
+ ```
41
+
42
+ ```python
43
+ from langchain_core.tools import tool
44
+ from langchain_core.messages import HumanMessage
45
+ from langchain_openai import ChatOpenAI
46
+ from langgraph.graph import StateGraph, START, END
47
+ from langchain_skillkit import node, AgentState
48
+
49
+ # --- Define tools ---
50
+
51
+ @tool
52
+ def web_search(query: str) -> str:
53
+ """Search the web for information."""
54
+ return f"Results for: {query}"
55
+
56
+ # --- Declare an agent ---
57
+ # Subclassing `node` produces a CompiledStateGraph, not a class.
58
+ # The agent gets Skill and SkillRead tools automatically from the skills directory.
59
+
60
+ class researcher(node):
61
+ llm = ChatOpenAI(model="gpt-4o")
62
+ tools = [web_search]
63
+ skills = "skills/"
64
+
65
+ async def handler(state, *, llm):
66
+ response = await llm.ainvoke(state["messages"])
67
+ return {"messages": [response], "sender": "researcher"}
68
+
69
+ # --- Use standalone ---
70
+
71
+ result = researcher.invoke({"messages": [HumanMessage("Size the B2B SaaS market")]})
72
+
73
+ # --- Or compose into a parent graph ---
74
+
75
+ workflow = StateGraph(AgentState)
76
+ workflow.add_node("researcher", researcher)
77
+ workflow.add_edge(START, "researcher")
78
+ workflow.add_edge("researcher", END)
79
+ graph = workflow.compile()
80
+ ```
81
+
82
+ ## Examples
83
+
84
+ See [`examples/`](examples/) for complete working code:
85
+
86
+ - **[`standalone_node.py`](examples/standalone_node.py)** — Simplest usage: declare a node class, invoke it
87
+ - **[`manual_wiring.py`](examples/manual_wiring.py)** — Use `SkillKit` as a standalone toolkit with full graph control
88
+ - **[`multi_agent.py`](examples/multi_agent.py)** — Compose multiple agents in a parent graph
89
+
90
+ ## API Reference
91
+
92
+ ### `SkillKit(skills_dirs)`
93
+
94
+ Toolkit that provides `Skill` and `SkillRead` tools.
95
+
96
+ ```python
97
+ from langchain_skillkit import SkillKit
98
+
99
+ kit = SkillKit("skills/")
100
+ all_tools = [web_search] + kit.tools # [web_search, Skill, SkillRead]
101
+ ```
102
+
103
+ **Parameters:**
104
+ - `skills_dirs` (str | list[str]): Directory or list of directories containing skill subdirectories
105
+
106
+ **Properties:**
107
+
108
+ | Property | Type | Description |
109
+ |----------|------|-------------|
110
+ | `tools` | `list[BaseTool]` | `[Skill, SkillRead]` — built once, cached |
111
+
112
+ ### `node`
113
+
114
+ Declarative agent builder. Subclassing produces a `CompiledStateGraph`.
115
+
116
+ ```python
117
+ from langchain_skillkit import node
118
+
119
+ class my_agent(node):
120
+ llm = ChatOpenAI(model="gpt-4o") # Required
121
+ tools = [web_search] # Optional
122
+ skills = "skills/" # Optional
123
+
124
+ async def handler(state, *, llm):
125
+ response = await llm.ainvoke(state["messages"])
126
+ return {"messages": [response], "sender": "my_agent"}
127
+
128
+ my_agent.invoke({"messages": [HumanMessage("...")]})
129
+ ```
130
+
131
+ **Class attributes:**
132
+
133
+ | Attribute | Required | Description |
134
+ |-----------|----------|-------------|
135
+ | `llm` | Yes | Language model instance |
136
+ | `tools` | No | List of LangChain tools |
137
+ | `skills` | No | Path(s) to skill directories, or a `SkillKit` instance |
138
+
139
+ **Handler signature:**
140
+
141
+ ```python
142
+ async def handler(state, *, llm, tools, runtime): ...
143
+ ```
144
+
145
+ `state` is positional. Everything after `*` is keyword-only and injected by name — declare only what you need:
146
+
147
+ | Parameter | Type | Description |
148
+ |-----------|------|-------------|
149
+ | `state` | `dict` | LangGraph state (positional, required) |
150
+ | `llm` | `BaseChatModel` | LLM pre-bound with all tools via `bind_tools()` |
151
+ | `tools` | `list[BaseTool]` | All tools available to the agent |
152
+ | `runtime` | `Any` | LangGraph runtime context (passed through from config) |
153
+
154
+ ### `AgentState`
155
+
156
+ Minimal LangGraph state type for composing nodes in a parent graph:
157
+
158
+ ```python
159
+ from langchain_skillkit import AgentState
160
+ from langgraph.graph import StateGraph
161
+
162
+ workflow = StateGraph(AgentState)
163
+ workflow.add_node("researcher", researcher)
164
+ ```
165
+
166
+ Extend it with your own fields:
167
+
168
+ ```python
169
+ class MyState(AgentState):
170
+ current_project: str
171
+ iteration_count: int
172
+ ```
173
+
174
+ | Field | Type | Description |
175
+ |-------|------|-------------|
176
+ | `messages` | `Annotated[list, add_messages]` | Conversation history with LangGraph message reducer |
177
+ | `sender` | `str` | Name of the last node that produced output |
178
+
179
+ ## Security
180
+
181
+ - **Path traversal prevention**: File paths resolved to absolute and checked against skill directories.
182
+ - **Name validation**: Skill names validated per [AgentSkills.io spec](https://agentskills.io/specification) — lowercase alphanumeric + hyphens, 1-64 chars, must match directory name.
183
+ - **Tool scoping**: Each `node` subclass only has access to the tools declared in its `tools` attribute.
184
+
185
+ ## Why This Toolkit?
186
+
187
+ Developers building multi-agent LangGraph systems face these problems:
188
+
189
+ 1. **Prompt reuse is manual.** The same domain instructions get copy-pasted across agents with no versioning or structure.
190
+ 2. **Agents lack discoverability.** There's no standard way for an LLM to find and select relevant instructions at runtime.
191
+ 3. **Agent wiring is repetitive.** Every ReAct agent needs the same graph boilerplate: handler node, tool node, conditional edges.
192
+ 4. **Reference files are inaccessible.** Templates, scripts, and examples referenced in prompts can't be loaded on demand.
193
+
194
+ This toolkit solves all four with:
195
+
196
+ - Skill-as-markdown: reusable instructions with structured frontmatter
197
+ - Semantic discovery: the LLM matches user intent to skill descriptions at runtime
198
+ - Declarative agents: `class my_agent(node)` gives you a complete ReAct subgraph
199
+ - On-demand file loading: `SkillRead` lets the LLM pull reference files when needed
200
+ - AgentSkills.io spec compliance: portable skills that work across toolkits
201
+ - Full type safety: mypy strict mode support
202
+
203
+ ## Contributing
204
+
205
+ This toolkit is extracted from a production codebase and is actively maintained. Issues, feature requests, and pull requests are welcome.
206
+
207
+ ```bash
208
+ git clone https://github.com/rsmdt/langchain-skillkit.git
209
+ cd langchain-skillkit
210
+ uv sync --extra dev
211
+ uv run pytest --tb=short -q
212
+ uv run ruff check src/ tests/
213
+ uv run mypy src/
214
+ ```
215
+
216
+ GitHub: https://github.com/rsmdt/langchain-skillkit
@@ -0,0 +1,60 @@
1
+ """Manual wiring — use SkillKit as a standalone toolkit.
2
+
3
+ Use this approach when you want full control over your LangGraph graph
4
+ and just need the Skill + SkillRead tools added to your tool list.
5
+ """
6
+
7
+ from langchain_core.messages import AIMessage, HumanMessage
8
+ from langchain_core.tools import tool
9
+ from langchain_openai import ChatOpenAI
10
+ from langgraph.graph import END, START, StateGraph
11
+ from langgraph.prebuilt import ToolNode
12
+
13
+ from langchain_skillkit import AgentState, SkillKit
14
+
15
+
16
+ @tool
17
+ def web_search(query: str) -> str:
18
+ """Search the web for information."""
19
+ return f"Results for: {query}"
20
+
21
+
22
+ llm = ChatOpenAI(model="gpt-4o")
23
+ kit = SkillKit("skills/")
24
+
25
+ # Combine your tools with skill tools
26
+ all_tools = [web_search] + kit.tools
27
+ bound_llm = llm.bind_tools(all_tools)
28
+
29
+
30
+ async def researcher(state: AgentState) -> dict:
31
+ """Research node that uses skills for methodology."""
32
+ response = await bound_llm.ainvoke(state["messages"])
33
+ return {"messages": [response], "sender": "researcher"}
34
+
35
+
36
+ def should_continue(state: AgentState) -> str:
37
+ last = state["messages"][-1]
38
+ if hasattr(last, "tool_calls") and last.tool_calls:
39
+ return "tools"
40
+ return END
41
+
42
+
43
+ # Build the graph manually
44
+ workflow = StateGraph(AgentState)
45
+ workflow.add_node("researcher", researcher)
46
+ workflow.add_node("tools", ToolNode(all_tools))
47
+
48
+ workflow.add_edge(START, "researcher")
49
+ workflow.add_conditional_edges("researcher", should_continue, ["tools", END])
50
+ workflow.add_edge("tools", "researcher")
51
+
52
+ graph = workflow.compile()
53
+
54
+ if __name__ == "__main__":
55
+ import asyncio
56
+
57
+ result = asyncio.run(
58
+ graph.ainvoke({"messages": [HumanMessage("Size the B2B SaaS market")]})
59
+ )
60
+ print(result["messages"][-1].content)
@@ -0,0 +1,69 @@
1
+ """Multi-agent graph — compose multiple node subclasses.
2
+
3
+ Each node metaclass produces a self-contained ReAct subgraph with its own
4
+ tools and skill access. Compose them in a parent graph for multi-agent workflows.
5
+ """
6
+
7
+ from langchain_core.messages import HumanMessage
8
+ from langchain_core.tools import tool
9
+ from langchain_openai import ChatOpenAI
10
+ from langgraph.graph import END, START, StateGraph
11
+
12
+ from langchain_skillkit import AgentState, node
13
+
14
+
15
+ @tool
16
+ def web_search(query: str) -> str:
17
+ """Search the web for information."""
18
+ return f"Results for: {query}"
19
+
20
+
21
+ @tool
22
+ def sql_query(query: str) -> str:
23
+ """Run a SQL query against the database."""
24
+ return f"SQL results for: {query}"
25
+
26
+
27
+ @tool
28
+ def calculate(expression: str) -> str:
29
+ """Evaluate a mathematical expression."""
30
+ return str(eval(expression))
31
+
32
+
33
+ class researcher(node):
34
+ llm = ChatOpenAI(model="gpt-4o")
35
+ tools = [web_search]
36
+ skills = "skills/"
37
+
38
+ async def handler(state, *, llm):
39
+ response = await llm.ainvoke(state["messages"])
40
+ return {"messages": [response], "sender": "researcher"}
41
+
42
+
43
+ class analyst(node):
44
+ llm = ChatOpenAI(model="gpt-4o")
45
+ tools = [sql_query, calculate]
46
+ skills = "skills/"
47
+
48
+ async def handler(state, *, llm):
49
+ response = await llm.ainvoke(state["messages"])
50
+ return {"messages": [response], "sender": "analyst"}
51
+
52
+
53
+ # Compose in a parent graph
54
+ workflow = StateGraph(AgentState)
55
+ workflow.add_node("researcher", researcher)
56
+ workflow.add_node("analyst", analyst)
57
+
58
+ workflow.add_edge(START, "researcher")
59
+ workflow.add_edge("researcher", "analyst")
60
+ workflow.add_edge("analyst", END)
61
+
62
+ graph = workflow.compile()
63
+
64
+ if __name__ == "__main__":
65
+ result = graph.invoke(
66
+ {"messages": [HumanMessage("Analyze the European SaaS market")]}
67
+ )
68
+ for msg in result["messages"]:
69
+ print(f"[{msg.type}] {msg.content[:100]}")