scope-optimizer 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scope_optimizer-0.1.0/LICENSE +22 -0
- scope_optimizer-0.1.0/PKG-INFO +380 -0
- scope_optimizer-0.1.0/README.md +340 -0
- scope_optimizer-0.1.0/pyproject.toml +91 -0
- scope_optimizer-0.1.0/scope/__init__.py +119 -0
- scope_optimizer-0.1.0/scope/history_store.py +256 -0
- scope_optimizer-0.1.0/scope/memory_optimizer.py +657 -0
- scope_optimizer-0.1.0/scope/models/__init__.py +77 -0
- scope_optimizer-0.1.0/scope/models/anthropic_adapter.py +198 -0
- scope_optimizer-0.1.0/scope/models/base.py +253 -0
- scope_optimizer-0.1.0/scope/models/litellm_adapter.py +191 -0
- scope_optimizer-0.1.0/scope/models/openai_adapter.py +182 -0
- scope_optimizer-0.1.0/scope/optimizer.py +660 -0
- scope_optimizer-0.1.0/scope/prompts.py +378 -0
- scope_optimizer-0.1.0/scope/strategic_store.py +386 -0
- scope_optimizer-0.1.0/scope/synthesizer.py +542 -0
- scope_optimizer-0.1.0/scope/utils.py +58 -0
- scope_optimizer-0.1.0/scope_optimizer.egg-info/PKG-INFO +380 -0
- scope_optimizer-0.1.0/scope_optimizer.egg-info/SOURCES.txt +39 -0
- scope_optimizer-0.1.0/scope_optimizer.egg-info/dependency_links.txt +1 -0
- scope_optimizer-0.1.0/scope_optimizer.egg-info/requires.txt +13 -0
- scope_optimizer-0.1.0/scope_optimizer.egg-info/top_level.txt +2 -0
- scope_optimizer-0.1.0/scope_saved/__init__.py +119 -0
- scope_optimizer-0.1.0/scope_saved/history_store.py +256 -0
- scope_optimizer-0.1.0/scope_saved/memory_optimizer.py +657 -0
- scope_optimizer-0.1.0/scope_saved/models/__init__.py +77 -0
- scope_optimizer-0.1.0/scope_saved/models/anthropic_adapter.py +198 -0
- scope_optimizer-0.1.0/scope_saved/models/base.py +253 -0
- scope_optimizer-0.1.0/scope_saved/models/litellm_adapter.py +191 -0
- scope_optimizer-0.1.0/scope_saved/models/openai_adapter.py +182 -0
- scope_optimizer-0.1.0/scope_saved/optimizer.py +660 -0
- scope_optimizer-0.1.0/scope_saved/prompts.py +378 -0
- scope_optimizer-0.1.0/scope_saved/strategic_store.py +386 -0
- scope_optimizer-0.1.0/scope_saved/synthesizer.py +542 -0
- scope_optimizer-0.1.0/scope_saved/utils.py +58 -0
- scope_optimizer-0.1.0/setup.cfg +4 -0
- scope_optimizer-0.1.0/tests/test_basic.py +224 -0
- scope_optimizer-0.1.0/tests/test_integration.py +319 -0
- scope_optimizer-0.1.0/tests/test_models.py +189 -0
- scope_optimizer-0.1.0/tests/test_strategic_store.py +211 -0
- scope_optimizer-0.1.0/tests/test_synthesizer.py +193 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 SCOPE Team
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
22
|
+
|
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: scope-optimizer
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SCOPE: Self-evolving Context Optimization via Prompt Evolution - A framework for automatic prompt optimization
|
|
5
|
+
Author: SCOPE Team
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/JarvisPei/SCOPE
|
|
8
|
+
Project-URL: Documentation, https://github.com/JarvisPei/SCOPE#readme
|
|
9
|
+
Project-URL: Repository, https://github.com/JarvisPei/SCOPE
|
|
10
|
+
Project-URL: Issues, https://github.com/JarvisPei/SCOPE/issues
|
|
11
|
+
Keywords: llm,prompt-optimization,agents,machine-learning,nlp,prompt-engineering
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: Science/Research
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Operating System :: OS Independent
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
24
|
+
Requires-Python: >=3.8
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
License-File: LICENSE
|
|
27
|
+
Requires-Dist: openai>=1.0.0
|
|
28
|
+
Requires-Dist: anthropic>=0.18.0
|
|
29
|
+
Requires-Dist: litellm>=1.0.0
|
|
30
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
33
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
34
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
35
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
36
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
37
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
38
|
+
Requires-Dist: pre-commit>=3.0.0; extra == "dev"
|
|
39
|
+
Dynamic: license-file
|
|
40
|
+
|
|
41
|
+
<p align="center">
|
|
42
|
+
<img src="assets/SCOPE_logo.png" alt="SCOPE Logo" width="400">
|
|
43
|
+
</p>
|
|
44
|
+
|
|
45
|
+
<p align="center">
|
|
46
|
+
<strong>Self-evolving Context Optimization via Prompt Evolution</strong>
|
|
47
|
+
</p>
|
|
48
|
+
|
|
49
|
+
<p align="center">
|
|
50
|
+
A framework for automatic prompt optimization that learns from agent execution traces
|
|
51
|
+
</p>
|
|
52
|
+
|
|
53
|
+
<p align="center">
|
|
54
|
+
<a href="#installation">Installation</a> •
|
|
55
|
+
<a href="#quick-start">Quick Start</a> •
|
|
56
|
+
<a href="#how-it-works">How It Works</a> •
|
|
57
|
+
<a href="#api-reference">API</a> •
|
|
58
|
+
<a href="#configuration">Configuration</a>
|
|
59
|
+
</p>
|
|
60
|
+
|
|
61
|
+
<p align="center">
|
|
62
|
+
<a href="https://arxiv.org/abs/2512.15374"><img src="https://img.shields.io/badge/arXiv-2512.15374-b31b1b.svg" alt="arXiv"></a>
|
|
63
|
+
<a href="https://pypi.org/project/scope-optimizer/"><img src="https://img.shields.io/pypi/v/scope-optimizer.svg" alt="PyPI version"></a>
|
|
64
|
+
<img src="https://img.shields.io/badge/python-3.8+-blue.svg" alt="Python 3.8+">
|
|
65
|
+
<a href="https://github.com/JarvisPei/SCOPE/actions"><img src="https://github.com/JarvisPei/SCOPE/workflows/Tests/badge.svg" alt="Tests"></a>
|
|
66
|
+
<img src="https://img.shields.io/badge/license-MIT-green.svg" alt="License: MIT">
|
|
67
|
+
<a href="CONTRIBUTING.md"><img src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg" alt="PRs Welcome"></a>
|
|
68
|
+
</p>
|
|
69
|
+
|
|
70
|
+
---
|
|
71
|
+
|
|
72
|
+
## Overview
|
|
73
|
+
|
|
74
|
+
**SCOPE** transforms static agent prompts into self-evolving systems that learn from their own execution. Instead of manually crafting prompts, SCOPE automatically synthesizes guidelines from execution traces and continuously improves agent performance.
|
|
75
|
+
|
|
76
|
+
📄 **Paper:** [SCOPE: Prompt Evolution for Enhancing Agent Effectiveness](https://arxiv.org/abs/2512.15374)
|
|
77
|
+
|
|
78
|
+
**Key Features:**
|
|
79
|
+
- 🔄 **Automatic Learning** — Synthesizes guidelines from errors and successful patterns
|
|
80
|
+
- 📊 **Dual-Stream Memory** — Tactical (task-specific) + Strategic (cross-task) learning
|
|
81
|
+
- 🎯 **Best-of-N Selection** — Generates multiple candidates and selects the best
|
|
82
|
+
- 🧠 **Memory Optimization** — Automatically consolidates and deduplicates rules
|
|
83
|
+
- 🔌 **Universal Model Support** — Works with OpenAI, Anthropic, and 100+ providers via LiteLLM
|
|
84
|
+
|
|
85
|
+
## Installation
|
|
86
|
+
|
|
87
|
+
```bash
|
|
88
|
+
pip install scope-optimizer
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
**From source:**
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
git clone https://github.com/JarvisPei/SCOPE.git
|
|
95
|
+
cd scope
|
|
96
|
+
pip install -e .
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
## Quick Start
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
import asyncio
|
|
103
|
+
from dotenv import load_dotenv
|
|
104
|
+
from scope import SCOPEOptimizer
|
|
105
|
+
from scope.models import create_openai_model
|
|
106
|
+
|
|
107
|
+
load_dotenv() # Load API keys from .env
|
|
108
|
+
|
|
109
|
+
async def main():
|
|
110
|
+
model = create_openai_model("gpt-4o-mini")
|
|
111
|
+
optimizer = SCOPEOptimizer(
|
|
112
|
+
synthesizer_model=model,
|
|
113
|
+
exp_path="./scope_data", # Strategic rules persist here
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Initialize prompt with previously learned strategic rules
|
|
117
|
+
base_prompt = "You are a helpful assistant."
|
|
118
|
+
strategic_rules = optimizer.get_strategic_rules_for_agent("my_agent")
|
|
119
|
+
current_prompt = base_prompt + strategic_rules # Applies cross-task knowledge
|
|
120
|
+
|
|
121
|
+
while not task_complete:
|
|
122
|
+
# ... your agent logic ...
|
|
123
|
+
|
|
124
|
+
# Call SCOPE after each step
|
|
125
|
+
result = await optimizer.on_step_complete(
|
|
126
|
+
agent_name="my_agent",
|
|
127
|
+
agent_role="AI Assistant",
|
|
128
|
+
task="Answer user questions",
|
|
129
|
+
model_output="...",
|
|
130
|
+
error=error_if_any, # Pass errors when they occur
|
|
131
|
+
current_system_prompt=current_prompt,
|
|
132
|
+
task_id="task_001",
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Apply generated guideline
|
|
136
|
+
if result:
|
|
137
|
+
guideline, guideline_type = result # guideline_type: "tactical" or "strategic"
|
|
138
|
+
current_prompt += f"\n\n## Learned Guideline:\n{guideline}"
|
|
139
|
+
|
|
140
|
+
asyncio.run(main())
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## How It Works
|
|
144
|
+
|
|
145
|
+
SCOPE operates through four key mechanisms:
|
|
146
|
+
|
|
147
|
+
### 1. Guideline Synthesis (π_φ, π_σ)
|
|
148
|
+
|
|
149
|
+
When errors occur or quality issues are detected, SCOPE **generates** multiple candidate guidelines using the Generator (π_φ) and **selects** the best candidate using the Selector (π_σ).
|
|
150
|
+
|
|
151
|
+
### 2. Dual-Stream Routing (π_γ)
|
|
152
|
+
|
|
153
|
+
Guidelines are classified and routed to appropriate memory:
|
|
154
|
+
|
|
155
|
+
| Stream | Scope | Persistence | Example |
|
|
156
|
+
|--------|-------|-------------|---------|
|
|
157
|
+
| **Tactical** | Task-specific | In-memory only | "This API has rate limit of 10/min" |
|
|
158
|
+
| **Strategic** | Cross-task | Saved to disk | "Always validate JSON before parsing" |
|
|
159
|
+
|
|
160
|
+
### 3. Memory Optimization (π_ω)
|
|
161
|
+
|
|
162
|
+
Strategic memory is automatically optimized via conflict resolution, subsumption pruning, and consolidation.
|
|
163
|
+
|
|
164
|
+
### 4. Prompt Evolution
|
|
165
|
+
|
|
166
|
+
```
|
|
167
|
+
θ_new = θ_base ⊕ M_strategic ⊕ M_tactical
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
## API Reference
|
|
172
|
+
|
|
173
|
+
### SCOPEOptimizer
|
|
174
|
+
|
|
175
|
+
```python
|
|
176
|
+
optimizer = SCOPEOptimizer(
|
|
177
|
+
# Required parameters
|
|
178
|
+
synthesizer_model, # Model instance for guideline synthesis (e.g., gpt-4o-mini)
|
|
179
|
+
exp_path="./scope_data", # Path for storing strategic rules and history
|
|
180
|
+
|
|
181
|
+
# Analysis settings
|
|
182
|
+
enable_quality_analysis=True, # Whether to analyze successful steps for improvements (default: True)
|
|
183
|
+
quality_analysis_frequency=1, # Analyze quality every N successful steps (default: 1)
|
|
184
|
+
auto_accept_threshold="medium", # Confidence threshold: "all", "low", "medium", "high" (default: "medium")
|
|
185
|
+
|
|
186
|
+
# Memory settings
|
|
187
|
+
max_rules_per_task=20, # Max tactical rules to apply per task (default: 20)
|
|
188
|
+
strategic_confidence_threshold=0.85, # Min confidence for strategic promotion (default: 0.85)
|
|
189
|
+
max_strategic_rules_per_domain=10, # Max strategic rules per domain per agent (default: 10)
|
|
190
|
+
|
|
191
|
+
# Synthesis settings
|
|
192
|
+
synthesis_mode="thoroughness", # "efficiency" (fast) or "thoroughness" (comprehensive, default)
|
|
193
|
+
use_best_of_n=False, # Enable Best-of-N candidate selection (default: False)
|
|
194
|
+
candidate_models=None, # Additional models for Best-of-N (default: None)
|
|
195
|
+
|
|
196
|
+
# Advanced settings
|
|
197
|
+
optimizer_model=None, # Separate model for rule optimization (default: synthesizer_model)
|
|
198
|
+
enable_rule_optimization=True, # Auto-optimize strategic memory when full (default: True)
|
|
199
|
+
store_history=False, # Store guideline generation history to disk (default: False)
|
|
200
|
+
)
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
### on_step_complete
|
|
204
|
+
|
|
205
|
+
```python
|
|
206
|
+
# Call after each agent step
|
|
207
|
+
result = await optimizer.on_step_complete(
|
|
208
|
+
# Required parameters
|
|
209
|
+
agent_name="my_agent", # Unique identifier for the agent
|
|
210
|
+
agent_role="AI Assistant", # Role/description of the agent
|
|
211
|
+
task="Complete user request", # Current task description
|
|
212
|
+
|
|
213
|
+
# Step context (at least one of error/model_output/observations required)
|
|
214
|
+
model_output="Agent's response...", # Model's output text (default: None)
|
|
215
|
+
tool_calls="[{...}]", # Tool calls attempted as string (default: None)
|
|
216
|
+
observations="Tool results...", # Observations/tool results received (default: None)
|
|
217
|
+
error=exception_if_any, # Exception if step failed (default: None)
|
|
218
|
+
|
|
219
|
+
# Prompt context
|
|
220
|
+
current_system_prompt=prompt, # Current system prompt including strategic rules
|
|
221
|
+
|
|
222
|
+
# Optional settings
|
|
223
|
+
task_id="task_001", # Task identifier for tracking (default: None)
|
|
224
|
+
truncate_context=True, # Truncate long context for efficiency (default: True)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# Returns: Tuple[str, str] or None
|
|
228
|
+
# - On success: (guideline_text, guideline_type) where guideline_type is "tactical" or "strategic"
|
|
229
|
+
# - On skip/failure: None
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
### Loading Strategic Rules
|
|
233
|
+
|
|
234
|
+
```python
|
|
235
|
+
# Load strategic rules at agent initialization (critical for cross-task learning!)
|
|
236
|
+
strategic_rules = optimizer.get_strategic_rules_for_agent("my_agent")
|
|
237
|
+
initial_prompt = base_prompt + strategic_rules # Apply learned knowledge
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
Strategic rules are stored in `{exp_path}/strategic_memory/global_rules.json` and automatically loaded when you call `get_strategic_rules_for_agent()`.
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
### Model Adapters
|
|
244
|
+
|
|
245
|
+
```python
|
|
246
|
+
from scope.models import create_openai_model, create_anthropic_model, create_litellm_model
|
|
247
|
+
|
|
248
|
+
# OpenAI
|
|
249
|
+
model = create_openai_model("gpt-4o-mini")
|
|
250
|
+
|
|
251
|
+
# Anthropic
|
|
252
|
+
model = create_anthropic_model("claude-3-5-sonnet-20241022")
|
|
253
|
+
|
|
254
|
+
# LiteLLM (100+ providers)
|
|
255
|
+
model = create_litellm_model("gpt-4o-mini") # OpenAI
|
|
256
|
+
model = create_litellm_model("gemini/gemini-1.5-pro") # Google
|
|
257
|
+
model = create_litellm_model("ollama/llama2") # Local
|
|
258
|
+
```
|
|
259
|
+
|
|
260
|
+
### Custom Model Adapter
|
|
261
|
+
|
|
262
|
+
```python
|
|
263
|
+
# Async adapter (default)
|
|
264
|
+
from scope.models import BaseModelAdapter, Message, ModelResponse
|
|
265
|
+
|
|
266
|
+
class MyAsyncAdapter(BaseModelAdapter):
|
|
267
|
+
async def generate(self, messages: List[Message]) -> ModelResponse:
|
|
268
|
+
result = await my_api_call(messages)
|
|
269
|
+
return ModelResponse(content=result) # Return raw text
|
|
270
|
+
|
|
271
|
+
# Sync adapter (for non-async code)
|
|
272
|
+
from scope.models import SyncModelAdapter
|
|
273
|
+
|
|
274
|
+
class MySyncAdapter(SyncModelAdapter):
|
|
275
|
+
def generate_sync(self, messages: List[Message]) -> ModelResponse:
|
|
276
|
+
result = requests.post(api_url, json={"messages": ...})
|
|
277
|
+
return ModelResponse(content=result.json()["text"])
|
|
278
|
+
|
|
279
|
+
# Or wrap any function (sync or async)
|
|
280
|
+
from scope.models import CallableModelAdapter
|
|
281
|
+
|
|
282
|
+
def my_model(messages):
|
|
283
|
+
return "response"
|
|
284
|
+
|
|
285
|
+
model = CallableModelAdapter(my_model)
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
> **Note:** Your adapter just returns the raw model output. SCOPE's prompts ask the model to return JSON, and SCOPE handles parsing internally.
|
|
289
|
+
|
|
290
|
+
## Configuration
|
|
291
|
+
|
|
292
|
+
### Environment Variables
|
|
293
|
+
|
|
294
|
+
Set API keys via environment variables or `.env` file:
|
|
295
|
+
|
|
296
|
+
```bash
|
|
297
|
+
# Copy template and edit
|
|
298
|
+
cp .env.template .env
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
```python
|
|
302
|
+
from dotenv import load_dotenv
|
|
303
|
+
load_dotenv() # API keys automatically loaded
|
|
304
|
+
```
|
|
305
|
+
|
|
306
|
+
See [`.env.template`](.env.template) for all supported providers.
|
|
307
|
+
|
|
308
|
+
### Confidence Thresholds
|
|
309
|
+
|
|
310
|
+
| Threshold | Accepts | Use Case |
|
|
311
|
+
|-----------|---------|----------|
|
|
312
|
+
| `"all"` | Everything | Aggressive learning |
|
|
313
|
+
| `"low"` | Low + Medium + High | Balanced |
|
|
314
|
+
| `"medium"` | Medium + High | Conservative (default) |
|
|
315
|
+
| `"high"` | High only | Very conservative |
|
|
316
|
+
|
|
317
|
+
### Synthesis Modes
|
|
318
|
+
|
|
319
|
+
| Mode | Description |
|
|
320
|
+
|------|-------------|
|
|
321
|
+
| `"thoroughness"` | Comprehensive 7-dimension analysis (default) |
|
|
322
|
+
| `"efficiency"` | Lightweight, faster analysis |
|
|
323
|
+
|
|
324
|
+
### Logging
|
|
325
|
+
|
|
326
|
+
```python
|
|
327
|
+
import logging
|
|
328
|
+
|
|
329
|
+
logging.getLogger("scope").setLevel(logging.INFO)
|
|
330
|
+
logging.getLogger("scope").addHandler(logging.StreamHandler())
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
## Testing
|
|
334
|
+
|
|
335
|
+
Verify your setup with the included test scripts:
|
|
336
|
+
|
|
337
|
+
```bash
|
|
338
|
+
# Quick connectivity test
|
|
339
|
+
python examples/test_simple.py
|
|
340
|
+
|
|
341
|
+
# Deep functionality test
|
|
342
|
+
python examples/test_scope_deep.py
|
|
343
|
+
|
|
344
|
+
# With custom model/provider
|
|
345
|
+
python examples/test_simple.py --model gpt-4o --provider openai
|
|
346
|
+
python examples/test_scope_deep.py --model claude-3-5-sonnet-20241022 --provider anthropic
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
Run `--help` for all options.
|
|
350
|
+
|
|
351
|
+
## Development
|
|
352
|
+
|
|
353
|
+
```bash
|
|
354
|
+
# Install with dev dependencies
|
|
355
|
+
pip install -e ".[dev]"
|
|
356
|
+
|
|
357
|
+
# Run tests
|
|
358
|
+
pytest tests/
|
|
359
|
+
|
|
360
|
+
# Format code
|
|
361
|
+
black scope/
|
|
362
|
+
ruff check scope/
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
## Citation
|
|
366
|
+
|
|
367
|
+
If you find SCOPE useful for your research, please cite our paper:
|
|
368
|
+
|
|
369
|
+
```bibtex
|
|
370
|
+
@article{pei2025scope,
|
|
371
|
+
title={SCOPE: Prompt Evolution for Enhancing Agent Effectiveness},
|
|
372
|
+
author={Pei, Zehua and Zhen, Hui-Ling and Kai, Shixiong and Pan, Sinno Jialin and Wang, Yunhe and Yuan, Mingxuan and Yu, Bei},
|
|
373
|
+
journal={arXiv preprint arXiv:2512.15374},
|
|
374
|
+
year={2025}
|
|
375
|
+
}
|
|
376
|
+
```
|
|
377
|
+
|
|
378
|
+
## License
|
|
379
|
+
|
|
380
|
+
MIT License - see [LICENSE](LICENSE) for details.
|