copex 0.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of copex might be problematic. Click here for more details.
- copex/__init__.py +69 -0
- copex/checkpoint.py +445 -0
- copex/cli.py +1106 -0
- copex/client.py +725 -0
- copex/config.py +311 -0
- copex/mcp.py +561 -0
- copex/metrics.py +383 -0
- copex/models.py +50 -0
- copex/persistence.py +324 -0
- copex/plan.py +358 -0
- copex/ralph.py +247 -0
- copex/tools.py +404 -0
- copex/ui.py +971 -0
- copex-0.8.4.dist-info/METADATA +511 -0
- copex-0.8.4.dist-info/RECORD +18 -0
- copex-0.8.4.dist-info/WHEEL +4 -0
- copex-0.8.4.dist-info/entry_points.txt +2 -0
- copex-0.8.4.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,511 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: copex
|
|
3
|
+
Version: 0.8.4
|
|
4
|
+
Summary: Copilot Extended - Resilient wrapper for GitHub Copilot SDK with auto-retry, Ralph Wiggum loops, and more
|
|
5
|
+
Project-URL: Homepage, https://github.com/Arthur742Ramos/copex
|
|
6
|
+
Project-URL: Repository, https://github.com/Arthur742Ramos/copex
|
|
7
|
+
Project-URL: Issues, https://github.com/Arthur742Ramos/copex/issues
|
|
8
|
+
Author-email: Arthur Ramos <arthur742ramos@users.noreply.github.com>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: ai,copex,copilot,github,ralph-wiggum,retry,sdk
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Requires-Python: >=3.10
|
|
20
|
+
Requires-Dist: github-copilot-sdk>=0.1.0
|
|
21
|
+
Requires-Dist: prompt-toolkit>=3.0.0
|
|
22
|
+
Requires-Dist: pydantic>=2.0.0
|
|
23
|
+
Requires-Dist: rich>=13.0.0
|
|
24
|
+
Requires-Dist: tomli-w>=1.0.0
|
|
25
|
+
Requires-Dist: typer>=0.9.0
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest; extra == 'dev'
|
|
28
|
+
Requires-Dist: pytest-asyncio; extra == 'dev'
|
|
29
|
+
Requires-Dist: ruff; extra == 'dev'
|
|
30
|
+
Description-Content-Type: text/markdown
|
|
31
|
+
|
|
32
|
+
# Copex - Copilot Extended
|
|
33
|
+
|
|
34
|
+
[](https://badge.fury.io/py/copex)
|
|
35
|
+
[](https://www.python.org/downloads/)
|
|
36
|
+
[](https://opensource.org/licenses/MIT)
|
|
37
|
+
[](https://github.com/Arthur742Ramos/copex/actions/workflows/test.yml)
|
|
38
|
+
|
|
39
|
+
A resilient Python wrapper for the GitHub Copilot SDK with automatic retry, Ralph Wiggum loops, session persistence, metrics, parallel tools, and MCP integration.
|
|
40
|
+
|
|
41
|
+
## Features
|
|
42
|
+
|
|
43
|
+
- 🔄 **Automatic Retry** - Handles 500 errors, rate limits, and transient failures with exponential backoff
|
|
44
|
+
- 🚀 **Auto-Continue** - Automatically sends "Keep going" on any error
|
|
45
|
+
- 🔁 **Ralph Wiggum Loops** - Iterative AI development with completion promises
|
|
46
|
+
- 💾 **Session Persistence** - Save/restore conversation history to disk
|
|
47
|
+
- 📍 **Checkpointing** - Resume interrupted Ralph loops after crashes
|
|
48
|
+
- 📊 **Metrics & Logging** - Track token usage, timing, and costs
|
|
49
|
+
- ⚡ **Parallel Tools** - Execute multiple tool calls concurrently
|
|
50
|
+
- 🔌 **MCP Integration** - Connect to external MCP servers for extended capabilities
|
|
51
|
+
- 🎯 **Model Selection** - Easy switching between GPT-5.2-codex, Claude, Gemini, and more
|
|
52
|
+
- 🧠 **Reasoning Effort** - Configure reasoning depth from `none` to `xhigh`
|
|
53
|
+
- 💻 **Beautiful CLI** - Rich terminal output with markdown rendering
|
|
54
|
+
|
|
55
|
+
## Installation
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
pip install copex
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
Or install from source:
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
git clone https://github.com/Arthur742Ramos/copex
|
|
65
|
+
cd copex
|
|
66
|
+
pip install -e .
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Prerequisites
|
|
70
|
+
|
|
71
|
+
- Python 3.10+
|
|
72
|
+
- [GitHub Copilot CLI](https://docs.github.com/en/copilot/how-tos/set-up/install-copilot-cli) installed
|
|
73
|
+
- Active Copilot subscription
|
|
74
|
+
|
|
75
|
+
**Note:** Copex automatically detects the Copilot CLI path on Windows, macOS, and Linux. If auto-detection fails, you can specify the path manually:
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
config = CopexConfig(cli_path="/path/to/copilot")
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
Or check detection:
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
from copex import find_copilot_cli
|
|
85
|
+
print(f"Found CLI at: {find_copilot_cli()}")
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Quick Start
|
|
89
|
+
|
|
90
|
+
### Python API
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
import asyncio
|
|
94
|
+
from copex import Copex, CopexConfig, Model, ReasoningEffort
|
|
95
|
+
|
|
96
|
+
async def main():
|
|
97
|
+
# Simple usage with defaults (gpt-5.2-codex, xhigh reasoning)
|
|
98
|
+
async with Copex() as copex:
|
|
99
|
+
response = await copex.chat("Explain async/await in Python")
|
|
100
|
+
print(response)
|
|
101
|
+
|
|
102
|
+
# Custom configuration
|
|
103
|
+
config = CopexConfig(
|
|
104
|
+
model=Model.GPT_5_2_CODEX,
|
|
105
|
+
reasoning_effort=ReasoningEffort.XHIGH,
|
|
106
|
+
retry={"max_retries": 10, "base_delay": 2.0},
|
|
107
|
+
auto_continue=True,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
async with Copex(config) as copex:
|
|
111
|
+
# Get full response object with metadata
|
|
112
|
+
response = await copex.send("Write a binary search function")
|
|
113
|
+
print(f"Content: {response.content}")
|
|
114
|
+
print(f"Reasoning: {response.reasoning}")
|
|
115
|
+
print(f"Retries needed: {response.retries}")
|
|
116
|
+
|
|
117
|
+
asyncio.run(main())
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
### Ralph Wiggum Loops
|
|
121
|
+
|
|
122
|
+
The [Ralph Wiggum technique](https://ghuntley.com/ralph/) enables iterative AI development:
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
from copex import Copex, RalphWiggum
|
|
126
|
+
|
|
127
|
+
async def main():
|
|
128
|
+
async with Copex() as copex:
|
|
129
|
+
ralph = RalphWiggum(copex)
|
|
130
|
+
|
|
131
|
+
result = await ralph.loop(
|
|
132
|
+
prompt="Build a REST API with CRUD operations and tests",
|
|
133
|
+
completion_promise="ALL TESTS PASSING",
|
|
134
|
+
max_iterations=30,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
print(f"Completed in {result.iteration} iterations")
|
|
138
|
+
print(f"Reason: {result.completion_reason}")
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
**How it works:**
|
|
142
|
+
1. The same prompt is fed to the AI repeatedly
|
|
143
|
+
2. The AI sees its previous work in conversation history
|
|
144
|
+
3. It iteratively improves until outputting `<promise>COMPLETION TEXT</promise>`
|
|
145
|
+
4. Loop ends when promise matches or max iterations reached
|
|
146
|
+
|
|
147
|
+
### Skills, Instructions & MCP
|
|
148
|
+
|
|
149
|
+
Copex is fully compatible with Copilot SDK features:
|
|
150
|
+
|
|
151
|
+
```python
|
|
152
|
+
from copex import Copex, CopexConfig
|
|
153
|
+
|
|
154
|
+
config = CopexConfig(
|
|
155
|
+
model=Model.GPT_5_2_CODEX,
|
|
156
|
+
reasoning_effort=ReasoningEffort.XHIGH,
|
|
157
|
+
|
|
158
|
+
# Enable skills
|
|
159
|
+
skills=["code-review", "api-design", "security"],
|
|
160
|
+
|
|
161
|
+
# Custom instructions
|
|
162
|
+
instructions="Follow PEP 8. Use type hints. Prefer dataclasses.",
|
|
163
|
+
# Or load from file:
|
|
164
|
+
# instructions_file=".copilot/instructions.md",
|
|
165
|
+
|
|
166
|
+
# MCP servers (inline or from file)
|
|
167
|
+
mcp_servers=[
|
|
168
|
+
{"name": "github", "url": "https://api.github.com/mcp/"},
|
|
169
|
+
],
|
|
170
|
+
# mcp_config_file=".copex/mcp.json",
|
|
171
|
+
|
|
172
|
+
# Tool filtering
|
|
173
|
+
available_tools=["repos", "issues", "code_security"],
|
|
174
|
+
excluded_tools=["delete_repo"],
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
async with Copex(config) as copex:
|
|
178
|
+
response = await copex.chat("Review this code for security issues")
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### Streaming
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
async def stream_example():
|
|
185
|
+
async with Copex() as copex:
|
|
186
|
+
async for chunk in copex.stream("Write a REST API"):
|
|
187
|
+
if chunk.type == "message":
|
|
188
|
+
print(chunk.delta, end="", flush=True)
|
|
189
|
+
elif chunk.type == "reasoning":
|
|
190
|
+
print(f"[thinking: {chunk.delta}]", end="")
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
## CLI Usage
|
|
194
|
+
|
|
195
|
+
### Single prompt
|
|
196
|
+
|
|
197
|
+
```bash
|
|
198
|
+
# Basic usage
|
|
199
|
+
copex chat "Explain Docker containers"
|
|
200
|
+
|
|
201
|
+
# With options
|
|
202
|
+
copex chat "Write a Python web scraper" \
|
|
203
|
+
--model gpt-5.2-codex \
|
|
204
|
+
--reasoning xhigh \
|
|
205
|
+
--max-retries 10
|
|
206
|
+
|
|
207
|
+
# From stdin (for long prompts)
|
|
208
|
+
cat prompt.txt | copex chat
|
|
209
|
+
|
|
210
|
+
# Show reasoning output
|
|
211
|
+
copex chat "Solve this algorithm" --show-reasoning
|
|
212
|
+
|
|
213
|
+
# Raw output (for piping)
|
|
214
|
+
copex chat "Write a bash script" --raw > script.sh
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### Ralph Wiggum loop
|
|
218
|
+
|
|
219
|
+
```bash
|
|
220
|
+
# Run iterative development loop
|
|
221
|
+
copex ralph "Build a calculator with tests" --promise "ALL TESTS PASSING" -n 20
|
|
222
|
+
|
|
223
|
+
# Without completion promise (runs until max iterations)
|
|
224
|
+
copex ralph "Improve code coverage" --max-iterations 10
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
### Interactive mode
|
|
228
|
+
|
|
229
|
+
```bash
|
|
230
|
+
copex interactive
|
|
231
|
+
|
|
232
|
+
# With specific model
|
|
233
|
+
copex interactive --model claude-sonnet-4.5 --reasoning high
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
Interactive slash commands:
|
|
237
|
+
- `/model <name>` - Change model
|
|
238
|
+
- `/reasoning <level>` - Change reasoning effort
|
|
239
|
+
- `/models` - List available models
|
|
240
|
+
- `/new` - Start a new session
|
|
241
|
+
- `/status` - Show current settings
|
|
242
|
+
- `/tools` - Toggle full tool call list
|
|
243
|
+
- `/help` - Show commands
|
|
244
|
+
|
|
245
|
+
### Other commands
|
|
246
|
+
|
|
247
|
+
```bash
|
|
248
|
+
# List available models
|
|
249
|
+
copex models
|
|
250
|
+
|
|
251
|
+
# Create default config file
|
|
252
|
+
copex init
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
## Configuration
|
|
256
|
+
|
|
257
|
+
Create a config file at `~/.config/copex/config.toml`:
|
|
258
|
+
|
|
259
|
+
```toml
|
|
260
|
+
model = "gpt-5.2-codex"
|
|
261
|
+
reasoning_effort = "xhigh"
|
|
262
|
+
streaming = true
|
|
263
|
+
timeout = 300.0
|
|
264
|
+
auto_continue = true
|
|
265
|
+
continue_prompt = "Keep going"
|
|
266
|
+
|
|
267
|
+
# Skills to enable
|
|
268
|
+
skills = ["code-review", "api-design", "test-writer"]
|
|
269
|
+
|
|
270
|
+
# Custom instructions (inline or file path)
|
|
271
|
+
instructions = "Follow our team coding standards. Prefer functional programming."
|
|
272
|
+
# instructions_file = ".copilot/instructions.md"
|
|
273
|
+
|
|
274
|
+
# MCP server config file
|
|
275
|
+
# mcp_config_file = ".copex/mcp.json"
|
|
276
|
+
|
|
277
|
+
# Tool filtering
|
|
278
|
+
# available_tools = ["repos", "issues", "code_security"]
|
|
279
|
+
excluded_tools = []
|
|
280
|
+
|
|
281
|
+
[retry]
|
|
282
|
+
max_retries = 5
|
|
283
|
+
retry_on_any_error = true
|
|
284
|
+
base_delay = 1.0
|
|
285
|
+
max_delay = 30.0
|
|
286
|
+
exponential_base = 2.0
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
## Available Models
|
|
290
|
+
|
|
291
|
+
| Model | Description |
|
|
292
|
+
|-------|-------------|
|
|
293
|
+
| `gpt-5.2-codex` | Latest Codex model (default) |
|
|
294
|
+
| `gpt-5.1-codex` | Previous Codex version |
|
|
295
|
+
| `gpt-5.1-codex-max` | High-capacity Codex |
|
|
296
|
+
| `gpt-5.1-codex-mini` | Fast, lightweight Codex |
|
|
297
|
+
| `claude-sonnet-4.5` | Claude Sonnet 4.5 |
|
|
298
|
+
| `claude-sonnet-4` | Claude Sonnet 4 |
|
|
299
|
+
| `claude-opus-4.5` | Claude Opus (premium) |
|
|
300
|
+
| `gemini-3-pro-preview` | Gemini 3 Pro |
|
|
301
|
+
|
|
302
|
+
## Reasoning Effort Levels
|
|
303
|
+
|
|
304
|
+
| Level | Description |
|
|
305
|
+
|-------|-------------|
|
|
306
|
+
| `none` | No extended reasoning |
|
|
307
|
+
| `low` | Minimal reasoning |
|
|
308
|
+
| `medium` | Balanced reasoning |
|
|
309
|
+
| `high` | Deep reasoning |
|
|
310
|
+
| `xhigh` | Maximum reasoning (best for complex tasks) |
|
|
311
|
+
|
|
312
|
+
## Error Handling
|
|
313
|
+
|
|
314
|
+
By default, Copex retries on **any error** (`retry_on_any_error=True`).
|
|
315
|
+
|
|
316
|
+
You can also be specific:
|
|
317
|
+
|
|
318
|
+
```python
|
|
319
|
+
config = CopexConfig(
|
|
320
|
+
retry={
|
|
321
|
+
"retry_on_any_error": False,
|
|
322
|
+
"max_retries": 10,
|
|
323
|
+
"retry_on_errors": ["500", "timeout", "rate limit"],
|
|
324
|
+
}
|
|
325
|
+
)
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
## Credits
|
|
329
|
+
|
|
330
|
+
- **Ralph Wiggum technique**: [Geoffrey Huntley](https://ghuntley.com/ralph/)
|
|
331
|
+
- **GitHub Copilot SDK**: [github/copilot-sdk](https://github.com/github/copilot-sdk)
|
|
332
|
+
|
|
333
|
+
## Contributing
|
|
334
|
+
|
|
335
|
+
Contributions welcome! Please open an issue or PR at [github.com/Arthur742Ramos/copex](https://github.com/Arthur742Ramos/copex).
|
|
336
|
+
|
|
337
|
+
## License
|
|
338
|
+
|
|
339
|
+
MIT
|
|
340
|
+
|
|
341
|
+
---
|
|
342
|
+
|
|
343
|
+
## Advanced Features
|
|
344
|
+
|
|
345
|
+
### Session Persistence
|
|
346
|
+
|
|
347
|
+
Save and restore conversation history:
|
|
348
|
+
|
|
349
|
+
```python
|
|
350
|
+
from copex import Copex, SessionStore, PersistentSession
|
|
351
|
+
|
|
352
|
+
store = SessionStore() # Saves to ~/.copex/sessions/
|
|
353
|
+
|
|
354
|
+
# Create a persistent session
|
|
355
|
+
session = PersistentSession("my-project", store)
|
|
356
|
+
|
|
357
|
+
async with Copex() as copex:
|
|
358
|
+
response = await copex.chat("Hello!")
|
|
359
|
+
session.add_user_message("Hello!")
|
|
360
|
+
session.add_assistant_message(response)
|
|
361
|
+
# Auto-saved to disk
|
|
362
|
+
|
|
363
|
+
# Later, restore it
|
|
364
|
+
session = PersistentSession("my-project", store)
|
|
365
|
+
print(session.messages) # Previous messages loaded
|
|
366
|
+
```
|
|
367
|
+
|
|
368
|
+
### Checkpointing (Crash Recovery)
|
|
369
|
+
|
|
370
|
+
Resume Ralph loops after interruption:
|
|
371
|
+
|
|
372
|
+
```python
|
|
373
|
+
from copex import Copex, CheckpointStore, CheckpointedRalph
|
|
374
|
+
|
|
375
|
+
store = CheckpointStore() # Saves to ~/.copex/checkpoints/
|
|
376
|
+
|
|
377
|
+
async with Copex() as copex:
|
|
378
|
+
ralph = CheckpointedRalph(copex, store, loop_id="my-api-project")
|
|
379
|
+
|
|
380
|
+
# Automatically resumes from last checkpoint if interrupted
|
|
381
|
+
result = await ralph.loop(
|
|
382
|
+
prompt="Build a REST API with tests",
|
|
383
|
+
completion_promise="ALL TESTS PASSING",
|
|
384
|
+
max_iterations=30,
|
|
385
|
+
resume=True, # Resume from checkpoint
|
|
386
|
+
)
|
|
387
|
+
```
|
|
388
|
+
|
|
389
|
+
### Metrics & Cost Tracking
|
|
390
|
+
|
|
391
|
+
Track token usage and estimate costs:
|
|
392
|
+
|
|
393
|
+
```python
|
|
394
|
+
from copex import Copex, MetricsCollector
|
|
395
|
+
|
|
396
|
+
collector = MetricsCollector()
|
|
397
|
+
|
|
398
|
+
async with Copex() as copex:
|
|
399
|
+
# Track a request
|
|
400
|
+
req = collector.start_request(
|
|
401
|
+
model="gpt-5.2-codex",
|
|
402
|
+
prompt="Write a function..."
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
response = await copex.chat("Write a function...")
|
|
406
|
+
|
|
407
|
+
collector.complete_request(
|
|
408
|
+
req.request_id,
|
|
409
|
+
success=True,
|
|
410
|
+
response=response,
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
# Get summary
|
|
414
|
+
print(collector.print_summary())
|
|
415
|
+
# Session: 20260117_170000
|
|
416
|
+
# Requests: 5 (5 ok, 0 failed)
|
|
417
|
+
# Success Rate: 100.0%
|
|
418
|
+
# Total Tokens: 12,450
|
|
419
|
+
# Estimated Cost: $0.0234
|
|
420
|
+
|
|
421
|
+
# Export metrics
|
|
422
|
+
collector.export_json("metrics.json")
|
|
423
|
+
collector.export_csv("metrics.csv")
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
### Parallel Tools
|
|
427
|
+
|
|
428
|
+
Execute multiple tools concurrently:
|
|
429
|
+
|
|
430
|
+
```python
|
|
431
|
+
from copex import Copex, ParallelToolExecutor
|
|
432
|
+
|
|
433
|
+
executor = ParallelToolExecutor()
|
|
434
|
+
|
|
435
|
+
@executor.tool("get_weather", "Get weather for a city")
|
|
436
|
+
async def get_weather(city: str) -> str:
|
|
437
|
+
return f"Weather in {city}: Sunny, 72°F"
|
|
438
|
+
|
|
439
|
+
@executor.tool("get_time", "Get time in timezone")
|
|
440
|
+
async def get_time(timezone: str) -> str:
|
|
441
|
+
return f"Time in {timezone}: 2:30 PM"
|
|
442
|
+
|
|
443
|
+
# Tools execute in parallel when AI calls multiple at once
|
|
444
|
+
async with Copex() as copex:
|
|
445
|
+
response = await copex.send(
|
|
446
|
+
"What's the weather in Seattle and the time in PST?",
|
|
447
|
+
tools=executor.get_tool_definitions(),
|
|
448
|
+
)
|
|
449
|
+
```
|
|
450
|
+
|
|
451
|
+
### MCP Server Integration
|
|
452
|
+
|
|
453
|
+
Connect to external MCP servers:
|
|
454
|
+
|
|
455
|
+
```python
|
|
456
|
+
from copex import Copex, MCPManager, MCPServerConfig
|
|
457
|
+
|
|
458
|
+
manager = MCPManager()
|
|
459
|
+
|
|
460
|
+
# Add MCP servers
|
|
461
|
+
manager.add_server(MCPServerConfig(
|
|
462
|
+
name="github",
|
|
463
|
+
command="npx",
|
|
464
|
+
args=["-y", "@github/mcp-server"],
|
|
465
|
+
env={"GITHUB_TOKEN": "..."},
|
|
466
|
+
))
|
|
467
|
+
|
|
468
|
+
manager.add_server(MCPServerConfig(
|
|
469
|
+
name="filesystem",
|
|
470
|
+
command="npx",
|
|
471
|
+
args=["-y", "@anthropic/mcp-server-filesystem", "/path/to/dir"],
|
|
472
|
+
))
|
|
473
|
+
|
|
474
|
+
await manager.connect_all()
|
|
475
|
+
|
|
476
|
+
# Get all tools from all servers
|
|
477
|
+
all_tools = manager.get_all_tools()
|
|
478
|
+
|
|
479
|
+
# Call a tool
|
|
480
|
+
result = await manager.call_tool("github:search_repos", {"query": "copex"})
|
|
481
|
+
|
|
482
|
+
await manager.disconnect_all()
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
**MCP Config File** (`~/.copex/mcp.json`):
|
|
486
|
+
|
|
487
|
+
```json
|
|
488
|
+
{
|
|
489
|
+
"servers": {
|
|
490
|
+
"github": {
|
|
491
|
+
"command": "npx",
|
|
492
|
+
"args": ["-y", "@github/mcp-server"],
|
|
493
|
+
"env": {"GITHUB_TOKEN": "your-token"}
|
|
494
|
+
},
|
|
495
|
+
"browser": {
|
|
496
|
+
"command": "npx",
|
|
497
|
+
"args": ["-y", "@anthropic/mcp-server-puppeteer"]
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
```
|
|
502
|
+
|
|
503
|
+
```python
|
|
504
|
+
from copex import load_mcp_config, MCPManager
|
|
505
|
+
|
|
506
|
+
configs = load_mcp_config() # Loads from ~/.copex/mcp.json
|
|
507
|
+
manager = MCPManager()
|
|
508
|
+
for config in configs:
|
|
509
|
+
manager.add_server(config)
|
|
510
|
+
await manager.connect_all()
|
|
511
|
+
```
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
copex/__init__.py,sha256=tFeYHcqAswuLVmjBPUqG9HOJTrr6QBH0vPGqDD1ijgM,1643
|
|
2
|
+
copex/checkpoint.py,sha256=KRhadt4J9UVSqVPGlYfdrhP3cKZc8UsISOcZm_g8slo,13438
|
|
3
|
+
copex/cli.py,sha256=BoMzlutwFTmxLPbavDNqMq1fSVGpCCeYiFlkvMZbGJ4,38286
|
|
4
|
+
copex/client.py,sha256=SIZnnaCvqcaYx7X95ntA1-mE5awQv_cw9KNU2ko8Dss,27907
|
|
5
|
+
copex/config.py,sha256=yeBMO-_D9hKFHASCzZm7Odq9Ko2T5BBipM9Ddy6_GgU,10885
|
|
6
|
+
copex/mcp.py,sha256=16eLKWhk6_7DqdEsrLZi9TMuYoKcbrYosFXCAGNzMEs,16308
|
|
7
|
+
copex/metrics.py,sha256=dS4cuauTY9fKT3eGUuLrhnaLqMh6GGFycD0InkIXXAE,11979
|
|
8
|
+
copex/models.py,sha256=1O3eZNvcQBMSsOGMsq4kS28-l9BbhqeHov-J33lwmj0,1452
|
|
9
|
+
copex/persistence.py,sha256=UFA30bI1rZB-X9HW-v7RolxQ6QL7yAcO1C-V-br7d9U,10022
|
|
10
|
+
copex/plan.py,sha256=punyDcCB6KdeChX7DuiU5-Pl2r2SgGTD4u2oUGDk1uk,11814
|
|
11
|
+
copex/ralph.py,sha256=UZHl5xuivGHaIqQKlIp5xB7WWoc9LCtHweOgzh-OuPU,8350
|
|
12
|
+
copex/tools.py,sha256=RehqIYekvyL2b4bwQlftWjkEadu9teQwtPDcEop0QxA,12072
|
|
13
|
+
copex/ui.py,sha256=oJ_ZAZOnRspvlpKeAcl8YgsOeMXEp-1kVSTZb1nOa1s,36592
|
|
14
|
+
copex-0.8.4.dist-info/METADATA,sha256=G8jkuuXnAuBTPTpYNsYxCRHTB1AXCBGKdieqgNyXFbM,13643
|
|
15
|
+
copex-0.8.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
16
|
+
copex-0.8.4.dist-info/entry_points.txt,sha256=r0gJK7Vq1SoE-j5jdtC51qO1IoIipMWGTR-I2NFhnRk,40
|
|
17
|
+
copex-0.8.4.dist-info/licenses/LICENSE,sha256=eGBwBmQ3sxCsuKPsoB1X4wnizlxrPadsO9yWotKpeQs,1069
|
|
18
|
+
copex-0.8.4.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Arthur Ramos
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|