EvoScientist 0.1.0rc1__py3-none-any.whl → 0.1.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- EvoScientist/EvoScientist.py +1 -1
- EvoScientist/cli.py +450 -178
- EvoScientist/middleware.py +5 -1
- EvoScientist/skills/accelerate/SKILL.md +332 -0
- EvoScientist/skills/accelerate/references/custom-plugins.md +453 -0
- EvoScientist/skills/accelerate/references/megatron-integration.md +489 -0
- EvoScientist/skills/accelerate/references/performance.md +525 -0
- EvoScientist/skills/bitsandbytes/SKILL.md +411 -0
- EvoScientist/skills/bitsandbytes/references/memory-optimization.md +521 -0
- EvoScientist/skills/bitsandbytes/references/qlora-training.md +521 -0
- EvoScientist/skills/bitsandbytes/references/quantization-formats.md +447 -0
- EvoScientist/skills/clip/SKILL.md +253 -0
- EvoScientist/skills/clip/references/applications.md +207 -0
- EvoScientist/skills/find-skills/SKILL.md +133 -0
- EvoScientist/skills/find-skills/scripts/install_skill.py +211 -0
- EvoScientist/skills/flash-attention/SKILL.md +367 -0
- EvoScientist/skills/flash-attention/references/benchmarks.md +215 -0
- EvoScientist/skills/flash-attention/references/transformers-integration.md +293 -0
- EvoScientist/skills/langgraph-docs/SKILL.md +36 -0
- EvoScientist/skills/llama-cpp/SKILL.md +258 -0
- EvoScientist/skills/llama-cpp/references/optimization.md +89 -0
- EvoScientist/skills/llama-cpp/references/quantization.md +213 -0
- EvoScientist/skills/llama-cpp/references/server.md +125 -0
- EvoScientist/skills/lm-evaluation-harness/SKILL.md +490 -0
- EvoScientist/skills/lm-evaluation-harness/references/api-evaluation.md +490 -0
- EvoScientist/skills/lm-evaluation-harness/references/benchmark-guide.md +488 -0
- EvoScientist/skills/lm-evaluation-harness/references/custom-tasks.md +602 -0
- EvoScientist/skills/lm-evaluation-harness/references/distributed-eval.md +519 -0
- EvoScientist/skills/ml-paper-writing/SKILL.md +937 -0
- EvoScientist/skills/ml-paper-writing/references/checklists.md +361 -0
- EvoScientist/skills/ml-paper-writing/references/citation-workflow.md +562 -0
- EvoScientist/skills/ml-paper-writing/references/reviewer-guidelines.md +367 -0
- EvoScientist/skills/ml-paper-writing/references/sources.md +159 -0
- EvoScientist/skills/ml-paper-writing/references/writing-guide.md +476 -0
- EvoScientist/skills/ml-paper-writing/templates/README.md +251 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/README.md +534 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-supp.tex +144 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-template.tex +952 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bib +111 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bst +1493 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.sty +315 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/README.md +50 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl.sty +312 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_latex.tex +377 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_lualatex.tex +101 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_natbib.bst +1940 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/anthology.bib.txt +26 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/custom.bib +70 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/formatting.md +326 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/README.md +3 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bib +11 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bst +1440 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.sty +218 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.tex +305 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/fancyhdr.sty +485 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/math_commands.tex +508 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/natbib.sty +1246 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/fancyhdr.sty +485 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bib +24 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bst +1440 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.sty +246 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.tex +414 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/math_commands.tex +508 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/natbib.sty +1246 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithm.sty +79 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithmic.sty +201 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.bib +75 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.tex +662 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/fancyhdr.sty +864 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.bst +1443 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.sty +767 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml_numpapers.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/Makefile +36 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/extra_pkgs.tex +53 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/main.tex +38 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/neurips.sty +382 -0
- EvoScientist/skills/peft/SKILL.md +431 -0
- EvoScientist/skills/peft/references/advanced-usage.md +514 -0
- EvoScientist/skills/peft/references/troubleshooting.md +480 -0
- EvoScientist/skills/ray-data/SKILL.md +326 -0
- EvoScientist/skills/ray-data/references/integration.md +82 -0
- EvoScientist/skills/ray-data/references/transformations.md +83 -0
- EvoScientist/skills/skill-creator/LICENSE.txt +202 -0
- EvoScientist/skills/skill-creator/SKILL.md +356 -0
- EvoScientist/skills/skill-creator/references/output-patterns.md +82 -0
- EvoScientist/skills/skill-creator/references/workflows.md +28 -0
- EvoScientist/skills/skill-creator/scripts/init_skill.py +303 -0
- EvoScientist/skills/skill-creator/scripts/package_skill.py +110 -0
- EvoScientist/skills/skill-creator/scripts/quick_validate.py +95 -0
- EvoScientist/skills/tensorboard/SKILL.md +629 -0
- EvoScientist/skills/tensorboard/references/integrations.md +638 -0
- EvoScientist/skills/tensorboard/references/profiling.md +545 -0
- EvoScientist/skills/tensorboard/references/visualization.md +620 -0
- EvoScientist/skills/vllm/SKILL.md +364 -0
- EvoScientist/skills/vllm/references/optimization.md +226 -0
- EvoScientist/skills/vllm/references/quantization.md +284 -0
- EvoScientist/skills/vllm/references/server-deployment.md +255 -0
- EvoScientist/skills/vllm/references/troubleshooting.md +447 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/METADATA +26 -3
- evoscientist-0.1.0rc2.dist-info/RECORD +119 -0
- evoscientist-0.1.0rc1.dist-info/RECORD +0 -21
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/WHEEL +0 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/entry_points.txt +0 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/licenses/LICENSE +0 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
# Troubleshooting Guide
|
|
2
|
+
|
|
3
|
+
## Contents
|
|
4
|
+
- Out of memory (OOM) errors
|
|
5
|
+
- Performance issues
|
|
6
|
+
- Model loading errors
|
|
7
|
+
- Network and connection issues
|
|
8
|
+
- Quantization problems
|
|
9
|
+
- Distributed serving issues
|
|
10
|
+
- Debugging tools and commands
|
|
11
|
+
|
|
12
|
+
## Out of memory (OOM) errors
|
|
13
|
+
|
|
14
|
+
### Symptom: `torch.cuda.OutOfMemoryError` during model loading
|
|
15
|
+
|
|
16
|
+
**Cause**: Model + KV cache exceeds available VRAM
|
|
17
|
+
|
|
18
|
+
**Solutions (try in order)**:
|
|
19
|
+
|
|
20
|
+
1. **Reduce GPU memory utilization**:
|
|
21
|
+
```bash
|
|
22
|
+
vllm serve MODEL --gpu-memory-utilization 0.7 # Try 0.7, 0.75, 0.8
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
2. **Reduce max sequence length**:
|
|
26
|
+
```bash
|
|
27
|
+
vllm serve MODEL --max-model-len 4096 # Instead of 8192
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
3. **Enable quantization**:
|
|
31
|
+
```bash
|
|
32
|
+
vllm serve MODEL --quantization awq # 4x memory reduction
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
4. **Use tensor parallelism** (multiple GPUs):
|
|
36
|
+
```bash
|
|
37
|
+
vllm serve MODEL --tensor-parallel-size 2 # Split across 2 GPUs
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
5. **Reduce max concurrent sequences**:
|
|
41
|
+
```bash
|
|
42
|
+
vllm serve MODEL --max-num-seqs 128 # Default is 256
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Symptom: OOM during inference (not model loading)
|
|
46
|
+
|
|
47
|
+
**Cause**: KV cache fills up during generation
|
|
48
|
+
|
|
49
|
+
**Solutions**:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
# Reduce KV cache allocation
|
|
53
|
+
vllm serve MODEL --gpu-memory-utilization 0.85
|
|
54
|
+
|
|
55
|
+
# Reduce batch size
|
|
56
|
+
vllm serve MODEL --max-num-seqs 64
|
|
57
|
+
|
|
58
|
+
# Reduce max tokens per request
|
|
59
|
+
# Set in client request: max_tokens=512
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Symptom: OOM with quantized model
|
|
63
|
+
|
|
64
|
+
**Cause**: Quantization overhead or incorrect configuration
|
|
65
|
+
|
|
66
|
+
**Solution**:
|
|
67
|
+
```bash
|
|
68
|
+
# Ensure quantization flag matches model
|
|
69
|
+
vllm serve TheBloke/Llama-2-70B-AWQ --quantization awq # Must specify
|
|
70
|
+
|
|
71
|
+
# Try different dtype
|
|
72
|
+
vllm serve MODEL --quantization awq --dtype float16
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## Performance issues
|
|
76
|
+
|
|
77
|
+
### Symptom: Low throughput (<50 req/sec expected >100)
|
|
78
|
+
|
|
79
|
+
**Diagnostic steps**:
|
|
80
|
+
|
|
81
|
+
1. **Check GPU utilization**:
|
|
82
|
+
```bash
|
|
83
|
+
watch -n 1 nvidia-smi
|
|
84
|
+
# GPU utilization should be >80%
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
If <80%, increase concurrent requests:
|
|
88
|
+
```bash
|
|
89
|
+
vllm serve MODEL --max-num-seqs 512 # Increase from 256
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
2. **Check if memory-bound**:
|
|
93
|
+
```bash
|
|
94
|
+
# If memory at 100% but GPU <80%, reduce sequence length
|
|
95
|
+
vllm serve MODEL --max-model-len 4096
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
3. **Enable optimizations**:
|
|
99
|
+
```bash
|
|
100
|
+
vllm serve MODEL \
|
|
101
|
+
--enable-prefix-caching \
|
|
102
|
+
--enable-chunked-prefill \
|
|
103
|
+
--max-num-seqs 512
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
4. **Check tensor parallelism settings**:
|
|
107
|
+
```bash
|
|
108
|
+
# Must use power-of-2 GPUs
|
|
109
|
+
vllm serve MODEL --tensor-parallel-size 4 # Not 3 or 5
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### Symptom: High TTFT (time to first token >1 second)
|
|
113
|
+
|
|
114
|
+
**Causes and solutions**:
|
|
115
|
+
|
|
116
|
+
**Long prompts**:
|
|
117
|
+
```bash
|
|
118
|
+
vllm serve MODEL --enable-chunked-prefill
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
**No prefix caching**:
|
|
122
|
+
```bash
|
|
123
|
+
vllm serve MODEL --enable-prefix-caching # For repeated prompts
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
**Too many concurrent requests**:
|
|
127
|
+
```bash
|
|
128
|
+
vllm serve MODEL --max-num-seqs 64 # Reduce to prioritize latency
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
**Model too large for single GPU**:
|
|
132
|
+
```bash
|
|
133
|
+
vllm serve MODEL --tensor-parallel-size 2 # Parallelize prefill
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### Symptom: Slow token generation (low tokens/sec)
|
|
137
|
+
|
|
138
|
+
**Diagnostic**:
|
|
139
|
+
```bash
|
|
140
|
+
# Check if model is correct size
|
|
141
|
+
vllm serve MODEL # Should see model size in logs
|
|
142
|
+
|
|
143
|
+
# Check speculative decoding
|
|
144
|
+
vllm serve MODEL --speculative-model DRAFT_MODEL
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
**For H100 GPUs**, enable FP8:
|
|
148
|
+
```bash
|
|
149
|
+
vllm serve MODEL --quantization fp8
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
## Model loading errors
|
|
153
|
+
|
|
154
|
+
### Symptom: `OSError: MODEL not found`
|
|
155
|
+
|
|
156
|
+
**Causes**:
|
|
157
|
+
|
|
158
|
+
1. **Model name typo**:
|
|
159
|
+
```bash
|
|
160
|
+
# Check exact model name on HuggingFace
|
|
161
|
+
vllm serve meta-llama/Llama-3-8B-Instruct # Correct capitalization
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
2. **Private/gated model**:
|
|
165
|
+
```bash
|
|
166
|
+
# Login to HuggingFace first
|
|
167
|
+
huggingface-cli login
|
|
168
|
+
# Then run vLLM
|
|
169
|
+
vllm serve meta-llama/Llama-3-70B-Instruct
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
3. **Custom model needs trust flag**:
|
|
173
|
+
```bash
|
|
174
|
+
vllm serve MODEL --trust-remote-code
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
### Symptom: `ValueError: Tokenizer not found`
|
|
178
|
+
|
|
179
|
+
**Solution**:
|
|
180
|
+
```bash
|
|
181
|
+
# Download model manually first
|
|
182
|
+
python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('MODEL')"
|
|
183
|
+
|
|
184
|
+
# Then launch vLLM
|
|
185
|
+
vllm serve MODEL
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
### Symptom: `ImportError: No module named 'flash_attn'`
|
|
189
|
+
|
|
190
|
+
**Solution**:
|
|
191
|
+
```bash
|
|
192
|
+
# Install flash attention
|
|
193
|
+
pip install flash-attn --no-build-isolation
|
|
194
|
+
|
|
195
|
+
# Or disable flash attention
|
|
196
|
+
vllm serve MODEL --disable-flash-attn
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
## Network and connection issues
|
|
200
|
+
|
|
201
|
+
### Symptom: `Connection refused` when querying server
|
|
202
|
+
|
|
203
|
+
**Diagnostic**:
|
|
204
|
+
|
|
205
|
+
1. **Check server is running**:
|
|
206
|
+
```bash
|
|
207
|
+
curl http://localhost:8000/health
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
2. **Check port binding**:
|
|
211
|
+
```bash
|
|
212
|
+
# Bind to all interfaces for remote access
|
|
213
|
+
vllm serve MODEL --host 0.0.0.0 --port 8000
|
|
214
|
+
|
|
215
|
+
# Check if port is in use
|
|
216
|
+
lsof -i :8000
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
3. **Check firewall**:
|
|
220
|
+
```bash
|
|
221
|
+
# Allow port through firewall
|
|
222
|
+
sudo ufw allow 8000
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
### Symptom: Slow response times over network
|
|
226
|
+
|
|
227
|
+
**Solutions**:
|
|
228
|
+
|
|
229
|
+
1. **Increase timeout**:
|
|
230
|
+
```python
|
|
231
|
+
from openai import OpenAI
|
|
232
|
+
|
|
233
|
+
client = OpenAI(
|
|
234
|
+
base_url="http://localhost:8000/v1",
|
|
235
|
+
api_key="EMPTY",
|
|
236
|
+
timeout=300.0 # 5 minute timeout
|
|
237
|
+
)
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
2. **Check network latency**:
|
|
241
|
+
```bash
|
|
242
|
+
ping SERVER_IP # Should be <10ms for local network
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
3. **Use connection pooling**:
|
|
246
|
+
```python
|
|
247
|
+
import requests
|
|
248
|
+
from requests.adapters import HTTPAdapter
|
|
249
|
+
from urllib3.util.retry import Retry
|
|
250
|
+
|
|
251
|
+
session = requests.Session()
|
|
252
|
+
retries = Retry(total=3, backoff_factor=1)
|
|
253
|
+
session.mount('http://', HTTPAdapter(max_retries=retries))
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
## Quantization problems
|
|
257
|
+
|
|
258
|
+
### Symptom: `RuntimeError: Quantization format not supported`
|
|
259
|
+
|
|
260
|
+
**Solution**:
|
|
261
|
+
```bash
|
|
262
|
+
# Ensure correct quantization method
|
|
263
|
+
vllm serve MODEL --quantization awq # For AWQ models
|
|
264
|
+
vllm serve MODEL --quantization gptq # For GPTQ models
|
|
265
|
+
|
|
266
|
+
# Check model card for quantization type
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
### Symptom: Poor quality outputs after quantization
|
|
270
|
+
|
|
271
|
+
**Diagnostic**:
|
|
272
|
+
|
|
273
|
+
1. **Verify model is correctly quantized**:
|
|
274
|
+
```bash
|
|
275
|
+
# Check model config.json for quantization_config
|
|
276
|
+
cat ~/.cache/huggingface/hub/models--MODEL/config.json
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
2. **Try different quantization method**:
|
|
280
|
+
```bash
|
|
281
|
+
# If AWQ quality issues, try FP8 (H100 only)
|
|
282
|
+
vllm serve MODEL --quantization fp8
|
|
283
|
+
|
|
284
|
+
# Or use less aggressive quantization
|
|
285
|
+
vllm serve MODEL # No quantization
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
3. **Increase temperature for better diversity**:
|
|
289
|
+
```python
|
|
290
|
+
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
## Distributed serving issues
|
|
294
|
+
|
|
295
|
+
### Symptom: `RuntimeError: Distributed init failed`
|
|
296
|
+
|
|
297
|
+
**Diagnostic**:
|
|
298
|
+
|
|
299
|
+
1. **Check environment variables**:
|
|
300
|
+
```bash
|
|
301
|
+
# On all nodes
|
|
302
|
+
echo $MASTER_ADDR # Should be same
|
|
303
|
+
echo $MASTER_PORT # Should be same
|
|
304
|
+
echo $RANK # Should be unique per node (0, 1, 2, ...)
|
|
305
|
+
echo $WORLD_SIZE # Should be same (total nodes)
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
2. **Check network connectivity**:
|
|
309
|
+
```bash
|
|
310
|
+
# From node 1 to node 2
|
|
311
|
+
ping NODE2_IP
|
|
312
|
+
nc -zv NODE2_IP 29500 # Check port accessibility
|
|
313
|
+
```
|
|
314
|
+
|
|
315
|
+
3. **Check NCCL settings**:
|
|
316
|
+
```bash
|
|
317
|
+
export NCCL_DEBUG=INFO
|
|
318
|
+
export NCCL_SOCKET_IFNAME=eth0 # Or your network interface
|
|
319
|
+
vllm serve MODEL --tensor-parallel-size 8
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
### Symptom: `NCCL error: unhandled cuda error`
|
|
323
|
+
|
|
324
|
+
**Solutions**:
|
|
325
|
+
|
|
326
|
+
```bash
|
|
327
|
+
# Set NCCL to use correct network interface
|
|
328
|
+
export NCCL_SOCKET_IFNAME=eth0 # Replace with your interface
|
|
329
|
+
|
|
330
|
+
# Increase timeout
|
|
331
|
+
export NCCL_TIMEOUT=1800 # 30 minutes
|
|
332
|
+
|
|
333
|
+
# Force P2P for debugging
|
|
334
|
+
export NCCL_P2P_DISABLE=1
|
|
335
|
+
```
|
|
336
|
+
|
|
337
|
+
## Debugging tools and commands
|
|
338
|
+
|
|
339
|
+
### Enable debug logging
|
|
340
|
+
|
|
341
|
+
```bash
|
|
342
|
+
export VLLM_LOGGING_LEVEL=DEBUG
|
|
343
|
+
vllm serve MODEL
|
|
344
|
+
```
|
|
345
|
+
|
|
346
|
+
### Monitor GPU usage
|
|
347
|
+
|
|
348
|
+
```bash
|
|
349
|
+
# Real-time GPU monitoring
|
|
350
|
+
watch -n 1 nvidia-smi
|
|
351
|
+
|
|
352
|
+
# Memory breakdown
|
|
353
|
+
nvidia-smi --query-gpu=memory.used,memory.free --format=csv -l 1
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
### Profile performance
|
|
357
|
+
|
|
358
|
+
```bash
|
|
359
|
+
# Built-in benchmarking
|
|
360
|
+
vllm bench throughput \
|
|
361
|
+
--model MODEL \
|
|
362
|
+
--input-tokens 128 \
|
|
363
|
+
--output-tokens 256 \
|
|
364
|
+
--num-prompts 100
|
|
365
|
+
|
|
366
|
+
vllm bench latency \
|
|
367
|
+
--model MODEL \
|
|
368
|
+
--input-tokens 128 \
|
|
369
|
+
--output-tokens 256 \
|
|
370
|
+
--batch-size 8
|
|
371
|
+
```
|
|
372
|
+
|
|
373
|
+
### Check metrics
|
|
374
|
+
|
|
375
|
+
```bash
|
|
376
|
+
# Prometheus metrics
|
|
377
|
+
curl http://localhost:9090/metrics
|
|
378
|
+
|
|
379
|
+
# Filter for specific metrics
|
|
380
|
+
curl http://localhost:9090/metrics | grep vllm_time_to_first_token
|
|
381
|
+
|
|
382
|
+
# Key metrics to monitor:
|
|
383
|
+
# - vllm_time_to_first_token_seconds
|
|
384
|
+
# - vllm_time_per_output_token_seconds
|
|
385
|
+
# - vllm_num_requests_running
|
|
386
|
+
# - vllm_gpu_cache_usage_perc
|
|
387
|
+
# - vllm_request_success_total
|
|
388
|
+
```
|
|
389
|
+
|
|
390
|
+
### Test server health
|
|
391
|
+
|
|
392
|
+
```bash
|
|
393
|
+
# Health check
|
|
394
|
+
curl http://localhost:8000/health
|
|
395
|
+
|
|
396
|
+
# Model info
|
|
397
|
+
curl http://localhost:8000/v1/models
|
|
398
|
+
|
|
399
|
+
# Test completion
|
|
400
|
+
curl http://localhost:8000/v1/completions \
|
|
401
|
+
-H "Content-Type: application/json" \
|
|
402
|
+
-d '{
|
|
403
|
+
"model": "MODEL",
|
|
404
|
+
"prompt": "Hello",
|
|
405
|
+
"max_tokens": 10
|
|
406
|
+
}'
|
|
407
|
+
```
|
|
408
|
+
|
|
409
|
+
### Common environment variables
|
|
410
|
+
|
|
411
|
+
```bash
|
|
412
|
+
# CUDA settings
|
|
413
|
+
export CUDA_VISIBLE_DEVICES=0,1,2,3 # Limit to specific GPUs
|
|
414
|
+
|
|
415
|
+
# vLLM settings
|
|
416
|
+
export VLLM_LOGGING_LEVEL=DEBUG
|
|
417
|
+
export VLLM_TRACE_FUNCTION=1 # Profile functions
|
|
418
|
+
export VLLM_USE_V1=1 # Use v1.0 engine (faster)
|
|
419
|
+
|
|
420
|
+
# NCCL settings (distributed)
|
|
421
|
+
export NCCL_DEBUG=INFO
|
|
422
|
+
export NCCL_SOCKET_IFNAME=eth0
|
|
423
|
+
export NCCL_IB_DISABLE=0 # Enable InfiniBand
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
### Collect diagnostic info for bug reports
|
|
427
|
+
|
|
428
|
+
```bash
|
|
429
|
+
# System info
|
|
430
|
+
nvidia-smi
|
|
431
|
+
python --version
|
|
432
|
+
pip show vllm
|
|
433
|
+
|
|
434
|
+
# vLLM version and config
|
|
435
|
+
vllm --version
|
|
436
|
+
python -c "import vllm; print(vllm.__version__)"
|
|
437
|
+
|
|
438
|
+
# Run with debug logging
|
|
439
|
+
export VLLM_LOGGING_LEVEL=DEBUG
|
|
440
|
+
vllm serve MODEL 2>&1 | tee vllm_debug.log
|
|
441
|
+
|
|
442
|
+
# Include in bug report:
|
|
443
|
+
# - vllm_debug.log
|
|
444
|
+
# - nvidia-smi output
|
|
445
|
+
# - Full command used
|
|
446
|
+
# - Expected vs actual behavior
|
|
447
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: EvoScientist
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.0rc2
|
|
4
4
|
Summary: EvoScientist: Towards Self-Evolving AI Scientists for End-to-End Scientific Discovery
|
|
5
5
|
Author: Xi Zhang
|
|
6
6
|
Maintainer: Xi Zhang
|
|
@@ -121,14 +121,15 @@ python -m EvoScientist
|
|
|
121
121
|
|
|
122
122
|
### Script Inference
|
|
123
123
|
```python
|
|
124
|
-
from EvoScientist import
|
|
124
|
+
from EvoScientist import EvoScientist_agent
|
|
125
125
|
from langchain_core.messages import HumanMessage
|
|
126
126
|
from EvoScientist.utils import format_messages
|
|
127
|
+
|
|
127
128
|
thread = {"configurable": {"thread_id": "1"}}
|
|
128
129
|
question = "Hi?"
|
|
129
130
|
last_len = 0
|
|
130
131
|
|
|
131
|
-
for state in
|
|
132
|
+
for state in EvoScientist_agent.stream(
|
|
132
133
|
{"messages": [HumanMessage(content=question)]},
|
|
133
134
|
config=thread,
|
|
134
135
|
stream_mode="values",
|
|
@@ -139,6 +140,28 @@ for state in agent.stream(
|
|
|
139
140
|
last_len = len(msgs)
|
|
140
141
|
```
|
|
141
142
|
|
|
143
|
+
<details>
|
|
144
|
+
<summary> Output </summary>
|
|
145
|
+
|
|
146
|
+
```json
|
|
147
|
+
|
|
148
|
+
╭─────────────────────────────────────────────────── 🧑 Human ────────────────────────────────────────────────────╮
|
|
149
|
+
│ Hi? │
|
|
150
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
|
151
|
+
╭───────────────────────────────────────────────────── 📝 AI ─────────────────────────────────────────────────────╮
|
|
152
|
+
│ Hi! I'm here to help you with experimental research tasks. I can assist with: │
|
|
153
|
+
│ │
|
|
154
|
+
│ - **Planning experiments** - designing stages, success criteria, and workflows │
|
|
155
|
+
│ - **Running experiments** - implementing baselines, training models, analyzing results │
|
|
156
|
+
│ - **Research** - finding papers, methods, datasets, and baselines │
|
|
157
|
+
│ - **Analysis** - computing metrics, creating visualizations, interpreting results │
|
|
158
|
+
│ - **Writing** - drafting experimental reports and documentation │
|
|
159
|
+
│ │
|
|
160
|
+
│ What would you like to work on today? │
|
|
161
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
</details>
|
|
142
165
|
|
|
143
166
|
### Web Interface
|
|
144
167
|
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
EvoScientist/EvoScientist.py,sha256=j0J-bIIxAvG1acsDCcEYmb4FIYoyibdRSQSkzoQUcg4,4627
|
|
2
|
+
EvoScientist/__init__.py,sha256=yj4YJUPldtq4xx8geadIoZ1n-BmgsM2Z_S2W8ld9S1k,702
|
|
3
|
+
EvoScientist/__main__.py,sha256=P5USQ_BiiY0TBhtajGMuX5AMXlQ5joz1kvljHeacqmA,91
|
|
4
|
+
EvoScientist/backends.py,sha256=yKI5fofVd3sDxJp5BEaqPsXjIOrKrpge1KnFG-X7gzU,12321
|
|
5
|
+
EvoScientist/cli.py,sha256=-7iL6NUjW5CmLKx1BLd0d-Y2DtcXPcoMOCdOgA6X-_E,59946
|
|
6
|
+
EvoScientist/middleware.py,sha256=ai2SZjSNtGCMQdFiMELPJ1t0uKPDZ199Lp5dEr5EnTo,931
|
|
7
|
+
EvoScientist/prompts.py,sha256=ni-qHoEoM703xtfD4J3IlvwAWPcW8gcNiCBVjRJoy3g,10692
|
|
8
|
+
EvoScientist/subagent.yaml,sha256=yoFlMJWitLlYuOe4_44EPG6vjpNP_txHBlqr-kNX2GM,6130
|
|
9
|
+
EvoScientist/tools.py,sha256=oYaJN8tl_NCw6PLFFNRZbjPB9-VriVcpVb6Zp-d_xVQ,4223
|
|
10
|
+
EvoScientist/utils.py,sha256=GuWMRMkPHYgUOtoPZA0ErIt4FUBE4NMkz_OZluxYyRo,6801
|
|
11
|
+
EvoScientist/skills/accelerate/SKILL.md,sha256=Ju_xLlGW3bZBWQO_8FWThkP155jE2atq9Ww0Zn8XwEs,8328
|
|
12
|
+
EvoScientist/skills/accelerate/references/custom-plugins.md,sha256=Gy8c9dcKFV93ksrRKkZr5DFbeH_iHgGzbTK_N0aTwCA,11781
|
|
13
|
+
EvoScientist/skills/accelerate/references/megatron-integration.md,sha256=ad7cw8eMGX3vqGUxQSVBuWpiQJDAr9eHqWfPhlEXJz0,11252
|
|
14
|
+
EvoScientist/skills/accelerate/references/performance.md,sha256=VZ6gUcI3c9OnInYQAYQpoOEuSXlZSabprfqRswUYKjc,12569
|
|
15
|
+
EvoScientist/skills/bitsandbytes/SKILL.md,sha256=3VM2RakwcfyTM6Yul6MwgDnUqHPV-3FbTPa8-LpGo74,10108
|
|
16
|
+
EvoScientist/skills/bitsandbytes/references/memory-optimization.md,sha256=a8y1Mt5qkEeFylaRRPuGSKGnBXIL2JT6xFDdW-qBBWc,12642
|
|
17
|
+
EvoScientist/skills/bitsandbytes/references/qlora-training.md,sha256=E2Nrxy-_gLz6nEgA30MQ_RQ4mdRX3w-aJO7f16RJriQ,12015
|
|
18
|
+
EvoScientist/skills/bitsandbytes/references/quantization-formats.md,sha256=U1nvDKjlg305DaYvlyJW2SS5NMNVI2XHD__m1YEErH4,10258
|
|
19
|
+
EvoScientist/skills/clip/SKILL.md,sha256=n8rPHAQdce8eYcfKwrt6LELD0Z1sKg_C5fWjyWQbej4,6884
|
|
20
|
+
EvoScientist/skills/clip/references/applications.md,sha256=aC_DE-Y8775GbtDv30gEiUajq7KS5fMs9AWPz1lpKuk,5360
|
|
21
|
+
EvoScientist/skills/find-skills/SKILL.md,sha256=fvjNEIEVsOauW_LuEKshYkbvd19pRdVV-rXcQqfFeKo,4169
|
|
22
|
+
EvoScientist/skills/find-skills/scripts/install_skill.py,sha256=sbLrZeY887LeiKcvA7f3AuxJI98PwGeAVyttVXxs_eA,6332
|
|
23
|
+
EvoScientist/skills/flash-attention/SKILL.md,sha256=3GL1xP6jFkjZhs9ZcE-61FbkB5d4p7NvVXm9MvxTG58,10190
|
|
24
|
+
EvoScientist/skills/flash-attention/references/benchmarks.md,sha256=Jk_zBgr4JMMvem48DgIIw4ZGxyiyKS-zXt6SOjjWC2s,7129
|
|
25
|
+
EvoScientist/skills/flash-attention/references/transformers-integration.md,sha256=ievo4gdW7iGBA6EUTUaC6WCg-qB4cMJYhTC8jSbEQaE,7427
|
|
26
|
+
EvoScientist/skills/langgraph-docs/SKILL.md,sha256=8Whxox7qKnNti7ijSJlDtscsk2dPXeNFFdr8dKWHAfY,1054
|
|
27
|
+
EvoScientist/skills/llama-cpp/SKILL.md,sha256=10yqRUg_Wzxr1iZ-ObREYnmJ_5D9uEtsLnw9pN3xQ-8,5912
|
|
28
|
+
EvoScientist/skills/llama-cpp/references/optimization.md,sha256=TomBHAgn9eud65_GCc1EHrGX-w-oXnndjB8nKxSmaf0,1659
|
|
29
|
+
EvoScientist/skills/llama-cpp/references/quantization.md,sha256=x2hqhok9Tg4FLMczti6hvA5dro8bF0Q2zCk6RqSGiT0,4956
|
|
30
|
+
EvoScientist/skills/llama-cpp/references/server.md,sha256=GBqErhd_MFIFzDDQUwX8QjeqJV8__ykpk2LLHgy222Q,2259
|
|
31
|
+
EvoScientist/skills/lm-evaluation-harness/SKILL.md,sha256=PO6Ky_6Pkd0I8MAK0WeV4YTtWdWCD0rRSbCFbEhB63Q,11893
|
|
32
|
+
EvoScientist/skills/lm-evaluation-harness/references/api-evaluation.md,sha256=QzNy2EuLvWwo5blxwarnxdLQMqrp_yPdfXpHpRJ33Hg,11114
|
|
33
|
+
EvoScientist/skills/lm-evaluation-harness/references/benchmark-guide.md,sha256=bz5tr3jO4hRTzrwcRojuEm-_DUVmeCSW3VhB3Waa4pU,10769
|
|
34
|
+
EvoScientist/skills/lm-evaluation-harness/references/custom-tasks.md,sha256=xrVZHHfBZXC0P6X0l0BaZfNe9z0XAqOHHMDIolfHaus,13125
|
|
35
|
+
EvoScientist/skills/lm-evaluation-harness/references/distributed-eval.md,sha256=PdxFUfJc4NPLCflx8nGRMx-2W1tMiC79Spvee9-4I1M,11426
|
|
36
|
+
EvoScientist/skills/ml-paper-writing/SKILL.md,sha256=f6mlckj8UCXM9AjZ7Cgyn7r3FPF6S-K5n-3rV8RxvNk,35573
|
|
37
|
+
EvoScientist/skills/ml-paper-writing/references/checklists.md,sha256=Azqosw47finMHIK-GFdkNRSNVzC8KArPWRrU7_-yXqA,10774
|
|
38
|
+
EvoScientist/skills/ml-paper-writing/references/citation-workflow.md,sha256=Bno7AtqotNDzI45k009HG7NZuW513c6PqWKq9eQx5-0,15167
|
|
39
|
+
EvoScientist/skills/ml-paper-writing/references/reviewer-guidelines.md,sha256=PHC-X7hWi2SZGqeLYc9H02EQgnxILiSSvn_fu4OKEOA,10451
|
|
40
|
+
EvoScientist/skills/ml-paper-writing/references/sources.md,sha256=e68aVk_r_SRnezFs9HPs3w3IbSjgsc23e3ogvdDqCqY,7310
|
|
41
|
+
EvoScientist/skills/ml-paper-writing/references/writing-guide.md,sha256=7V_2lgoEvThbaihXeiZCgFbjWSAAGxkWJApOtWP1Rwk,16297
|
|
42
|
+
EvoScientist/skills/ml-paper-writing/templates/README.md,sha256=5l3BoiyxUNoSTb1N-wjsOktol4Q2kYqIDoWB1vdOOeU,6707
|
|
43
|
+
EvoScientist/skills/ml-paper-writing/templates/aaai2026/README.md,sha256=7ixhqLBuFHHA33x3QHTS_xvoJOY3ep449q3KXGjdu2A,17987
|
|
44
|
+
EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-supp.tex,sha256=Na3P9BnvwloDIUDFINQFINDQ_GkSPp2OOK0WHECtB7c,4548
|
|
45
|
+
EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-template.tex,sha256=vtEsULXgJ_2ICX0jjsTn6_Gue0UMsG1t98iuEmpiCuo,63140
|
|
46
|
+
EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bib,sha256=2Pzz3Am51Imu_xNjEXTFmYpIyw0IAKyKAKL3vbxAKVs,4766
|
|
47
|
+
EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bst,sha256=rCbixmBHQ1wO0l8hrjatQtcxzz15TEqLXwWmIUGicpQ,30207
|
|
48
|
+
EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.sty,sha256=o58YZqBN0eJgNhPIdvpEaq6iwvPjTQK-ho_SFSC_reU,11802
|
|
49
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/README.md,sha256=5VYI-bwLVk9Y2GTUMZdUd4XvAhw-wcXwptIFeqYVaB4,2126
|
|
50
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/acl.sty,sha256=Gd_t3CwORI85JqC-8Eip2z82EbRiZbdgyqvXraTzYd4,11615
|
|
51
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/acl_latex.tex,sha256=M5ye6XBcF2fUTvJLhTZcC7hhnsw_z2YXK_E1YpI4lhQ,14533
|
|
52
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/acl_lualatex.tex,sha256=DZmHuoMzMamW-avNGgPuu6fXrjMRRXk-fdtrWujbJ7M,3050
|
|
53
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/acl_natbib.bst,sha256=4zL9UdzqSOKoqJdUiSw8uZZ0oc1wtSe2Yemq_8I16Dw,45186
|
|
54
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/anthology.bib.txt,sha256=K3jS2a7aYuFMTkYJnoIltfwRY4fY4KVKrXdkheJJzv8,1169
|
|
55
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/custom.bib,sha256=12zLMN3OtwyeGtC-P0Pf4wGtV2Wqsslg_IMO9nv4Iyo,2071
|
|
56
|
+
EvoScientist/skills/ml-paper-writing/templates/acl/formatting.md,sha256=ngBKE2xd1DMA2X5b716zp30OKi01AvDyQ27AEdHEnpw,17923
|
|
57
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/README.md,sha256=fCxYXVZUgjV9_99Jm1Z5KeB6nwlhlgjBnOnrKYRpfqE,51
|
|
58
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bib,sha256=xfq_Rsx9em5Se4KGC5qdZY6wf2Orqvqbe-tjHYq7kb0,496
|
|
59
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bst,sha256=LWdVLbftOMz8y1lXtS-VZW4lwklyR2HTz195Iq0YRMU,26973
|
|
60
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.pdf,sha256=2LPXS8ga7Jup1rc5tJIq3JOuAC18glIrnGBkAUvsHRY,122635
|
|
61
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.sty,sha256=ed-LKhsULfsySgp-h_q82dXcKjRwDhxsCDqbu55dqo4,7727
|
|
62
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.tex,sha256=uvAc0FbM0bNRZdGQAZQUlGIX3haD-K493GaxHjxlPbw,12830
|
|
63
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/fancyhdr.sty,sha256=tW7EQ0ufRgdSmksj3GitjUuU8fYxyM3a99p4FA1Tpeo,20521
|
|
64
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/math_commands.tex,sha256=kEc8TQVCBw2yRM6nPvli1s3cWyp0Z1fmpA3fX9-5C6k,12284
|
|
65
|
+
EvoScientist/skills/ml-paper-writing/templates/colm2025/natbib.sty,sha256=iLxwwOSEYZNMq1sqzO8Gt0qLOsRa0DzNPyprfg1tUw0,45154
|
|
66
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/fancyhdr.sty,sha256=tW7EQ0ufRgdSmksj3GitjUuU8fYxyM3a99p4FA1Tpeo,20521
|
|
67
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bib,sha256=zdhufUwxhU3PIUWHFlfJRFiKbUTDty4WD_S6qN8aUvs,629
|
|
68
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bst,sha256=LWdVLbftOMz8y1lXtS-VZW4lwklyR2HTz195Iq0YRMU,26973
|
|
69
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf,sha256=yz1BTPpHAtUt6U3hyBI8NLLPRrelUQ30ncwQFKbCZs8,200508
|
|
70
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.sty,sha256=pIUvaOCA1sUkUFfKIDkQC0CeMXJ4mKqTwD143bhDdKM,9025
|
|
71
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.tex,sha256=lBtY3m5S9VON4Ov-LSBCWnnhdoyycdRTt0NpBLUVmFk,16899
|
|
72
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/math_commands.tex,sha256=kEc8TQVCBw2yRM6nPvli1s3cWyp0Z1fmpA3fX9-5C6k,12284
|
|
73
|
+
EvoScientist/skills/ml-paper-writing/templates/iclr2026/natbib.sty,sha256=iLxwwOSEYZNMq1sqzO8Gt0qLOsRa0DzNPyprfg1tUw0,45154
|
|
74
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithm.sty,sha256=k_0OsxwRLrQFgz248df10jjH5pGxwFaA1yduaPNtVko,2223
|
|
75
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithmic.sty,sha256=SNGHlKXZfAR5pYjMLqwJF5kv652oOsxGMbj1V1fYD5s,7414
|
|
76
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.bib,sha256=35UBA9OPnPyBsfQNhMm-KjUl0EbSmRppc6REaSLAa9E,2051
|
|
77
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.pdf,sha256=Po_g6VLehwLKRpfboJylLoO9ZJdnuSqDQNNKWRyqTRs,193509
|
|
78
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.tex,sha256=wsqBQL8lXR_3fRJ46z7v7UAYtLHnEGW44cy8aLdMis8,29714
|
|
79
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/fancyhdr.sty,sha256=kTDFL5EIerxtIjFk_6WH4gfjJX_LzQae8J7LU5EEPxQ,31715
|
|
80
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.bst,sha256=DsPV65sC77fgtEoy83dYgvQqdD0L3GGPNOaTYwm5h2Q,27147
|
|
81
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.sty,sha256=fNz5D2pZxSGefxXIj37Qn8r1mNrZHmzd3E3Dyw45epU,27344
|
|
82
|
+
EvoScientist/skills/ml-paper-writing/templates/icml2026/icml_numpapers.pdf,sha256=006NqYIpY2NieZbm4YhQwR-9YW5pRgYSNwUeeo9wgLs,2823
|
|
83
|
+
EvoScientist/skills/ml-paper-writing/templates/neurips2025/Makefile,sha256=CYNCXXTnafJFf2_wZUz3b8DJX40Vj8jq7GbFaXugWU8,1054
|
|
84
|
+
EvoScientist/skills/ml-paper-writing/templates/neurips2025/extra_pkgs.tex,sha256=_NawkVb6GTw0f2rNgYjUMPoo92n7biwlFMu9IVw5AUo,2837
|
|
85
|
+
EvoScientist/skills/ml-paper-writing/templates/neurips2025/main.tex,sha256=NmwJP7wwGMRt4o8_4OywY7SroQoImv4DeDe79nOpxNY,574
|
|
86
|
+
EvoScientist/skills/ml-paper-writing/templates/neurips2025/neurips.sty,sha256=73pV8KfJ2hL-o5r3mQpqMUpyG3SiOxVE8lLlX8upQM0,11625
|
|
87
|
+
EvoScientist/skills/peft/SKILL.md,sha256=5QUOUtNXQaGPRP3bIMEJ1FZ6mWslSPD59-wrxiE0pnI,12198
|
|
88
|
+
EvoScientist/skills/peft/references/advanced-usage.md,sha256=hmPPda-i4QSIMvaAyFlLJwnAIFYDEzq16BVhaXyEdlk,12541
|
|
89
|
+
EvoScientist/skills/peft/references/troubleshooting.md,sha256=BmRb5Zua0EYeg3_OWhsKcSGxmhNTY1wE_fnfWhNXhzg,10344
|
|
90
|
+
EvoScientist/skills/ray-data/SKILL.md,sha256=vMc7M0HcoYxZytCSy23tt6wL8L3oBGegOZIawMAiVsM,7328
|
|
91
|
+
EvoScientist/skills/ray-data/references/integration.md,sha256=0YLgJbA5Z3lELxa2RhwL8UCjN2NQUc-4RyaYoY9Amco,1851
|
|
92
|
+
EvoScientist/skills/ray-data/references/transformations.md,sha256=r8Aa3lO8ld5t7G3mFLUsb5LrSgsUF0DZN7XdGFXeOA0,1664
|
|
93
|
+
EvoScientist/skills/skill-creator/LICENSE.txt,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
|
94
|
+
EvoScientist/skills/skill-creator/SKILL.md,sha256=suPYP2BCXC0LnUFi77j5qjIrRYQ0lzQPIon0K-QigBw,17837
|
|
95
|
+
EvoScientist/skills/skill-creator/references/output-patterns.md,sha256=1gJ4ALnYwmWJ7bqFtCkBpBrDbXU7Tpzrx_TLsDzc8KQ,1813
|
|
96
|
+
EvoScientist/skills/skill-creator/references/workflows.md,sha256=70hGh31dq0dRGgGmzzFHatZPW8WUVjVFkpV5RXUziYA,818
|
|
97
|
+
EvoScientist/skills/skill-creator/scripts/init_skill.py,sha256=C7olC5TKpMsrKLFdrSb9zzca7aTJeXuBIOVcLjPgxzw,10863
|
|
98
|
+
EvoScientist/skills/skill-creator/scripts/package_skill.py,sha256=sx-8s-Ni1cUwjpklX_joPN46UT4GNpU5ZKZISUyhCTE,3288
|
|
99
|
+
EvoScientist/skills/skill-creator/scripts/quick_validate.py,sha256=OBqy09H9XsMjcQFkEwY-_5nQ1IxBV0cIXGFRfpZ9u4c,3523
|
|
100
|
+
EvoScientist/skills/tensorboard/SKILL.md,sha256=cXxLBfiLvIiNVMxR-jkK4iXWh7O4b7YW42zH_CYQgCA,15276
|
|
101
|
+
EvoScientist/skills/tensorboard/references/integrations.md,sha256=pkfUfpWJ63q7vLSU_CyPIcK4yrH-46WeMMq0uBAUzUo,16205
|
|
102
|
+
EvoScientist/skills/tensorboard/references/profiling.md,sha256=FHEwLVicAMO9nQJHAI1zLk9EaEJpKy3EWH7IJ4rNM9g,13337
|
|
103
|
+
EvoScientist/skills/tensorboard/references/visualization.md,sha256=Dl72GAjcC7HTggoGlaqGpK9mWfG6IyQgSVO4TdpvOBU,14897
|
|
104
|
+
EvoScientist/skills/vllm/SKILL.md,sha256=j4Yt6lbZa9jQvBlUa92hYWYwZCVQeWFBxI17_4zs4DI,9013
|
|
105
|
+
EvoScientist/skills/vllm/references/optimization.md,sha256=D4YNM3rbHQcj1AcNizFEUX8RGtf2U2BpJ7xnp6AZsaw,5769
|
|
106
|
+
EvoScientist/skills/vllm/references/quantization.md,sha256=X7MOqej77Ep_MEfzfRDAkA0QzS32ZVWBj_1CWjspsnA,6746
|
|
107
|
+
EvoScientist/skills/vllm/references/server-deployment.md,sha256=fbYY7EC4Zf4u-s9GwEw9XcIz_GSjxj0mRRr7l7eVET4,5252
|
|
108
|
+
EvoScientist/skills/vllm/references/troubleshooting.md,sha256=MHrNkI5gZ0AXvSNjcm5rm-GUEAYuPbBkUpCmmAKXYoM,9037
|
|
109
|
+
EvoScientist/stream/__init__.py,sha256=cO30Ujs7mVB1pZjI0AXgaggmPeV6jXgBjt6AKYH42kQ,1219
|
|
110
|
+
EvoScientist/stream/emitter.py,sha256=zmdn8vIXNgkgoWRtanrMtPN2GlsHmZtdFmoqCry3o_o,2978
|
|
111
|
+
EvoScientist/stream/formatter.py,sha256=vyv89NGoTzxjsWt0teBXX38MZzFLhzMDBUcIH6-slDo,5532
|
|
112
|
+
EvoScientist/stream/tracker.py,sha256=cVWmiTCiwzOh84I7sAXXfgr_HXxOCCCzTvli-XJhthQ,3670
|
|
113
|
+
EvoScientist/stream/utils.py,sha256=36e5aaEZVpwTZnqO1qlVs2QTr04JbmWuvHMSbor49xM,7278
|
|
114
|
+
evoscientist-0.1.0rc2.dist-info/licenses/LICENSE,sha256=NsYFy5JSm90lmXcPNQuthVTCVaVcbT4XZ9h5HpAoGIo,1069
|
|
115
|
+
evoscientist-0.1.0rc2.dist-info/METADATA,sha256=2a_6L4t6tuldTQEvLYZFyoNgOeOFxFq5Pr0hdO0YxOY,8581
|
|
116
|
+
evoscientist-0.1.0rc2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
117
|
+
evoscientist-0.1.0rc2.dist-info/entry_points.txt,sha256=AQTfRSD-9-fgD1ViOdbGcvhsNeaUNgvmNqLkNY7o7Zw,55
|
|
118
|
+
evoscientist-0.1.0rc2.dist-info/top_level.txt,sha256=XBZouSd9lQfNn0Fus6jQb9nqdXzAbKyzh987Jt3A2-A,13
|
|
119
|
+
evoscientist-0.1.0rc2.dist-info/RECORD,,
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
EvoScientist/EvoScientist.py,sha256=9q0iSru5zrwXMvK1sUtExGF4YAkIq2HZiJINuMRBz7o,4601
|
|
2
|
-
EvoScientist/__init__.py,sha256=yj4YJUPldtq4xx8geadIoZ1n-BmgsM2Z_S2W8ld9S1k,702
|
|
3
|
-
EvoScientist/__main__.py,sha256=P5USQ_BiiY0TBhtajGMuX5AMXlQ5joz1kvljHeacqmA,91
|
|
4
|
-
EvoScientist/backends.py,sha256=yKI5fofVd3sDxJp5BEaqPsXjIOrKrpge1KnFG-X7gzU,12321
|
|
5
|
-
EvoScientist/cli.py,sha256=TsLj8Dgkr0gRgVSpnazx1qRvF4X-yw6qRSqqK7c94VE,49399
|
|
6
|
-
EvoScientist/middleware.py,sha256=3ibUbv0jFyzRNcKBy7V2oZqIHQu-NSPWt1C3EHgj0lY,836
|
|
7
|
-
EvoScientist/prompts.py,sha256=ni-qHoEoM703xtfD4J3IlvwAWPcW8gcNiCBVjRJoy3g,10692
|
|
8
|
-
EvoScientist/subagent.yaml,sha256=yoFlMJWitLlYuOe4_44EPG6vjpNP_txHBlqr-kNX2GM,6130
|
|
9
|
-
EvoScientist/tools.py,sha256=oYaJN8tl_NCw6PLFFNRZbjPB9-VriVcpVb6Zp-d_xVQ,4223
|
|
10
|
-
EvoScientist/utils.py,sha256=GuWMRMkPHYgUOtoPZA0ErIt4FUBE4NMkz_OZluxYyRo,6801
|
|
11
|
-
EvoScientist/stream/__init__.py,sha256=cO30Ujs7mVB1pZjI0AXgaggmPeV6jXgBjt6AKYH42kQ,1219
|
|
12
|
-
EvoScientist/stream/emitter.py,sha256=zmdn8vIXNgkgoWRtanrMtPN2GlsHmZtdFmoqCry3o_o,2978
|
|
13
|
-
EvoScientist/stream/formatter.py,sha256=vyv89NGoTzxjsWt0teBXX38MZzFLhzMDBUcIH6-slDo,5532
|
|
14
|
-
EvoScientist/stream/tracker.py,sha256=cVWmiTCiwzOh84I7sAXXfgr_HXxOCCCzTvli-XJhthQ,3670
|
|
15
|
-
EvoScientist/stream/utils.py,sha256=36e5aaEZVpwTZnqO1qlVs2QTr04JbmWuvHMSbor49xM,7278
|
|
16
|
-
evoscientist-0.1.0rc1.dist-info/licenses/LICENSE,sha256=NsYFy5JSm90lmXcPNQuthVTCVaVcbT4XZ9h5HpAoGIo,1069
|
|
17
|
-
evoscientist-0.1.0rc1.dist-info/METADATA,sha256=FHHeFCQVotPc5FG1Z-cmz1blHtf_gyFGxyxrG5rclCg,5936
|
|
18
|
-
evoscientist-0.1.0rc1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
19
|
-
evoscientist-0.1.0rc1.dist-info/entry_points.txt,sha256=AQTfRSD-9-fgD1ViOdbGcvhsNeaUNgvmNqLkNY7o7Zw,55
|
|
20
|
-
evoscientist-0.1.0rc1.dist-info/top_level.txt,sha256=XBZouSd9lQfNn0Fus6jQb9nqdXzAbKyzh987Jt3A2-A,13
|
|
21
|
-
evoscientist-0.1.0rc1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|