EvoScientist 0.0.1.dev3__py3-none-any.whl → 0.1.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. EvoScientist/EvoScientist.py +17 -49
  2. EvoScientist/backends.py +0 -26
  3. EvoScientist/cli.py +1109 -255
  4. EvoScientist/middleware.py +8 -61
  5. EvoScientist/stream/__init__.py +0 -25
  6. EvoScientist/stream/utils.py +16 -23
  7. EvoScientist/tools.py +0 -64
  8. evoscientist-0.1.0rc1.dist-info/METADATA +199 -0
  9. evoscientist-0.1.0rc1.dist-info/RECORD +21 -0
  10. evoscientist-0.1.0rc1.dist-info/entry_points.txt +2 -0
  11. EvoScientist/memory.py +0 -715
  12. EvoScientist/paths.py +0 -45
  13. EvoScientist/skills/accelerate/SKILL.md +0 -332
  14. EvoScientist/skills/accelerate/references/custom-plugins.md +0 -453
  15. EvoScientist/skills/accelerate/references/megatron-integration.md +0 -489
  16. EvoScientist/skills/accelerate/references/performance.md +0 -525
  17. EvoScientist/skills/bitsandbytes/SKILL.md +0 -411
  18. EvoScientist/skills/bitsandbytes/references/memory-optimization.md +0 -521
  19. EvoScientist/skills/bitsandbytes/references/qlora-training.md +0 -521
  20. EvoScientist/skills/bitsandbytes/references/quantization-formats.md +0 -447
  21. EvoScientist/skills/find-skills/SKILL.md +0 -133
  22. EvoScientist/skills/find-skills/scripts/install_skill.py +0 -211
  23. EvoScientist/skills/flash-attention/SKILL.md +0 -367
  24. EvoScientist/skills/flash-attention/references/benchmarks.md +0 -215
  25. EvoScientist/skills/flash-attention/references/transformers-integration.md +0 -293
  26. EvoScientist/skills/llama-cpp/SKILL.md +0 -258
  27. EvoScientist/skills/llama-cpp/references/optimization.md +0 -89
  28. EvoScientist/skills/llama-cpp/references/quantization.md +0 -213
  29. EvoScientist/skills/llama-cpp/references/server.md +0 -125
  30. EvoScientist/skills/lm-evaluation-harness/SKILL.md +0 -490
  31. EvoScientist/skills/lm-evaluation-harness/references/api-evaluation.md +0 -490
  32. EvoScientist/skills/lm-evaluation-harness/references/benchmark-guide.md +0 -488
  33. EvoScientist/skills/lm-evaluation-harness/references/custom-tasks.md +0 -602
  34. EvoScientist/skills/lm-evaluation-harness/references/distributed-eval.md +0 -519
  35. EvoScientist/skills/ml-paper-writing/SKILL.md +0 -937
  36. EvoScientist/skills/ml-paper-writing/references/checklists.md +0 -361
  37. EvoScientist/skills/ml-paper-writing/references/citation-workflow.md +0 -562
  38. EvoScientist/skills/ml-paper-writing/references/reviewer-guidelines.md +0 -367
  39. EvoScientist/skills/ml-paper-writing/references/sources.md +0 -159
  40. EvoScientist/skills/ml-paper-writing/references/writing-guide.md +0 -476
  41. EvoScientist/skills/ml-paper-writing/templates/README.md +0 -251
  42. EvoScientist/skills/ml-paper-writing/templates/aaai2026/README.md +0 -534
  43. EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-supp.tex +0 -144
  44. EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-template.tex +0 -952
  45. EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bib +0 -111
  46. EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bst +0 -1493
  47. EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.sty +0 -315
  48. EvoScientist/skills/ml-paper-writing/templates/acl/README.md +0 -50
  49. EvoScientist/skills/ml-paper-writing/templates/acl/acl.sty +0 -312
  50. EvoScientist/skills/ml-paper-writing/templates/acl/acl_latex.tex +0 -377
  51. EvoScientist/skills/ml-paper-writing/templates/acl/acl_lualatex.tex +0 -101
  52. EvoScientist/skills/ml-paper-writing/templates/acl/acl_natbib.bst +0 -1940
  53. EvoScientist/skills/ml-paper-writing/templates/acl/anthology.bib.txt +0 -26
  54. EvoScientist/skills/ml-paper-writing/templates/acl/custom.bib +0 -70
  55. EvoScientist/skills/ml-paper-writing/templates/acl/formatting.md +0 -326
  56. EvoScientist/skills/ml-paper-writing/templates/colm2025/README.md +0 -3
  57. EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bib +0 -11
  58. EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bst +0 -1440
  59. EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.pdf +0 -0
  60. EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.sty +0 -218
  61. EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.tex +0 -305
  62. EvoScientist/skills/ml-paper-writing/templates/colm2025/fancyhdr.sty +0 -485
  63. EvoScientist/skills/ml-paper-writing/templates/colm2025/math_commands.tex +0 -508
  64. EvoScientist/skills/ml-paper-writing/templates/colm2025/natbib.sty +0 -1246
  65. EvoScientist/skills/ml-paper-writing/templates/iclr2026/fancyhdr.sty +0 -485
  66. EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bib +0 -24
  67. EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bst +0 -1440
  68. EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf +0 -0
  69. EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.sty +0 -246
  70. EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.tex +0 -414
  71. EvoScientist/skills/ml-paper-writing/templates/iclr2026/math_commands.tex +0 -508
  72. EvoScientist/skills/ml-paper-writing/templates/iclr2026/natbib.sty +0 -1246
  73. EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithm.sty +0 -79
  74. EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithmic.sty +0 -201
  75. EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.bib +0 -75
  76. EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.pdf +0 -0
  77. EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.tex +0 -662
  78. EvoScientist/skills/ml-paper-writing/templates/icml2026/fancyhdr.sty +0 -864
  79. EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.bst +0 -1443
  80. EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.sty +0 -767
  81. EvoScientist/skills/ml-paper-writing/templates/icml2026/icml_numpapers.pdf +0 -0
  82. EvoScientist/skills/ml-paper-writing/templates/neurips2025/Makefile +0 -36
  83. EvoScientist/skills/ml-paper-writing/templates/neurips2025/extra_pkgs.tex +0 -53
  84. EvoScientist/skills/ml-paper-writing/templates/neurips2025/main.tex +0 -38
  85. EvoScientist/skills/ml-paper-writing/templates/neurips2025/neurips.sty +0 -382
  86. EvoScientist/skills/peft/SKILL.md +0 -431
  87. EvoScientist/skills/peft/references/advanced-usage.md +0 -514
  88. EvoScientist/skills/peft/references/troubleshooting.md +0 -480
  89. EvoScientist/skills/ray-data/SKILL.md +0 -326
  90. EvoScientist/skills/ray-data/references/integration.md +0 -82
  91. EvoScientist/skills/ray-data/references/transformations.md +0 -83
  92. EvoScientist/skills/skill-creator/LICENSE.txt +0 -202
  93. EvoScientist/skills/skill-creator/SKILL.md +0 -356
  94. EvoScientist/skills/skill-creator/references/output-patterns.md +0 -82
  95. EvoScientist/skills/skill-creator/references/workflows.md +0 -28
  96. EvoScientist/skills/skill-creator/scripts/init_skill.py +0 -303
  97. EvoScientist/skills/skill-creator/scripts/package_skill.py +0 -110
  98. EvoScientist/skills/skill-creator/scripts/quick_validate.py +0 -95
  99. EvoScientist/skills_manager.py +0 -392
  100. EvoScientist/stream/display.py +0 -604
  101. EvoScientist/stream/events.py +0 -415
  102. EvoScientist/stream/state.py +0 -343
  103. evoscientist-0.0.1.dev3.dist-info/METADATA +0 -321
  104. evoscientist-0.0.1.dev3.dist-info/RECORD +0 -113
  105. evoscientist-0.0.1.dev3.dist-info/entry_points.txt +0 -5
  106. {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.1.0rc1.dist-info}/WHEEL +0 -0
  107. {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.1.0rc1.dist-info}/licenses/LICENSE +0 -0
  108. {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.1.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,480 +0,0 @@
1
- # PEFT Troubleshooting Guide
2
-
3
- ## Installation Issues
4
-
5
- ### bitsandbytes CUDA Error
6
-
7
- **Error**: `CUDA Setup failed despite GPU being available`
8
-
9
- **Fix**:
10
- ```bash
11
- # Check CUDA version
12
- nvcc --version
13
-
14
- # Install matching bitsandbytes
15
- pip uninstall bitsandbytes
16
- pip install bitsandbytes --no-cache-dir
17
-
18
- # Or compile from source for specific CUDA
19
- git clone https://github.com/TimDettmers/bitsandbytes.git
20
- cd bitsandbytes
21
- CUDA_VERSION=118 make cuda11x # Adjust for your CUDA
22
- pip install .
23
- ```
24
-
25
- ### Triton Import Error
26
-
27
- **Error**: `ModuleNotFoundError: No module named 'triton'`
28
-
29
- **Fix**:
30
- ```bash
31
- # Install triton (Linux only)
32
- pip install triton
33
-
34
- # Windows: Triton not supported, use CUDA backend
35
- # Set environment variable to disable triton
36
- export CUDA_VISIBLE_DEVICES=0
37
- ```
38
-
39
- ### PEFT Version Conflicts
40
-
41
- **Error**: `AttributeError: 'LoraConfig' object has no attribute 'use_dora'`
42
-
43
- **Fix**:
44
- ```bash
45
- # Upgrade to latest PEFT
46
- pip install peft>=0.13.0 --upgrade
47
-
48
- # Check version
49
- python -c "import peft; print(peft.__version__)"
50
- ```
51
-
52
- ## Training Issues
53
-
54
- ### CUDA Out of Memory
55
-
56
- **Error**: `torch.cuda.OutOfMemoryError: CUDA out of memory`
57
-
58
- **Solutions**:
59
-
60
- 1. **Enable gradient checkpointing**:
61
- ```python
62
- from peft import prepare_model_for_kbit_training
63
- model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
64
- ```
65
-
66
- 2. **Reduce batch size**:
67
- ```python
68
- TrainingArguments(
69
- per_device_train_batch_size=1,
70
- gradient_accumulation_steps=16 # Maintain effective batch size
71
- )
72
- ```
73
-
74
- 3. **Use QLoRA**:
75
- ```python
76
- from transformers import BitsAndBytesConfig
77
-
78
- bnb_config = BitsAndBytesConfig(
79
- load_in_4bit=True,
80
- bnb_4bit_quant_type="nf4",
81
- bnb_4bit_use_double_quant=True
82
- )
83
- model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config)
84
- ```
85
-
86
- 4. **Lower LoRA rank**:
87
- ```python
88
- LoraConfig(r=8) # Instead of r=16 or higher
89
- ```
90
-
91
- 5. **Target fewer modules**:
92
- ```python
93
- target_modules=["q_proj", "v_proj"] # Instead of all-linear
94
- ```
95
-
96
- ### Loss Not Decreasing
97
-
98
- **Problem**: Training loss stays flat or increases.
99
-
100
- **Solutions**:
101
-
102
- 1. **Check learning rate**:
103
- ```python
104
- # Start lower
105
- TrainingArguments(learning_rate=1e-4) # Not 2e-4 or higher
106
- ```
107
-
108
- 2. **Verify adapter is active**:
109
- ```python
110
- model.print_trainable_parameters()
111
- # Should show >0 trainable params
112
-
113
- # Check adapter applied
114
- print(model.peft_config)
115
- ```
116
-
117
- 3. **Check data formatting**:
118
- ```python
119
- # Verify tokenization
120
- sample = dataset[0]
121
- decoded = tokenizer.decode(sample["input_ids"])
122
- print(decoded) # Should look correct
123
- ```
124
-
125
- 4. **Increase rank**:
126
- ```python
127
- LoraConfig(r=32, lora_alpha=64) # More capacity
128
- ```
129
-
130
- ### NaN Loss
131
-
132
- **Error**: `Loss is NaN`
133
-
134
- **Fix**:
135
- ```python
136
- # Use bf16 instead of fp16
137
- TrainingArguments(bf16=True, fp16=False)
138
-
139
- # Or enable loss scaling
140
- TrainingArguments(fp16=True, fp16_full_eval=True)
141
-
142
- # Lower learning rate
143
- TrainingArguments(learning_rate=5e-5)
144
-
145
- # Check for data issues
146
- for batch in dataloader:
147
- if torch.isnan(batch["input_ids"].float()).any():
148
- print("NaN in input!")
149
- ```
150
-
151
- ### Adapter Not Training
152
-
153
- **Problem**: `trainable params: 0` or model not updating.
154
-
155
- **Fix**:
156
- ```python
157
- # Verify LoRA applied to correct modules
158
- for name, module in model.named_modules():
159
- if "lora" in name.lower():
160
- print(f"Found LoRA: {name}")
161
-
162
- # Check target_modules match model architecture
163
- from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING
164
- print(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING.get(model.config.model_type))
165
-
166
- # Ensure model in training mode
167
- model.train()
168
-
169
- # Check requires_grad
170
- for name, param in model.named_parameters():
171
- if param.requires_grad:
172
- print(f"Trainable: {name}")
173
- ```
174
-
175
- ## Loading Issues
176
-
177
- ### Adapter Loading Fails
178
-
179
- **Error**: `ValueError: Can't find adapter weights`
180
-
181
- **Fix**:
182
- ```python
183
- # Check adapter files exist
184
- import os
185
- print(os.listdir("./adapter-path"))
186
- # Should contain: adapter_config.json, adapter_model.safetensors
187
-
188
- # Load with correct structure
189
- from peft import PeftModel, PeftConfig
190
-
191
- # Check config
192
- config = PeftConfig.from_pretrained("./adapter-path")
193
- print(config)
194
-
195
- # Load base model first
196
- base_model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
197
- model = PeftModel.from_pretrained(base_model, "./adapter-path")
198
- ```
199
-
200
- ### Base Model Mismatch
201
-
202
- **Error**: `RuntimeError: size mismatch`
203
-
204
- **Fix**:
205
- ```python
206
- # Ensure base model matches adapter
207
- from peft import PeftConfig
208
-
209
- config = PeftConfig.from_pretrained("./adapter-path")
210
- print(f"Base model: {config.base_model_name_or_path}")
211
-
212
- # Load exact same base model
213
- base_model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
214
- ```
215
-
216
- ### Safetensors vs PyTorch Format
217
-
218
- **Error**: `ValueError: We couldn't connect to 'https://huggingface.co'`
219
-
220
- **Fix**:
221
- ```python
222
- # Force local loading
223
- model = PeftModel.from_pretrained(
224
- base_model,
225
- "./adapter-path",
226
- local_files_only=True
227
- )
228
-
229
- # Or specify format
230
- model.save_pretrained("./adapter", safe_serialization=True) # safetensors
231
- model.save_pretrained("./adapter", safe_serialization=False) # pytorch
232
- ```
233
-
234
- ## Inference Issues
235
-
236
- ### Slow Generation
237
-
238
- **Problem**: Inference much slower than expected.
239
-
240
- **Solutions**:
241
-
242
- 1. **Merge adapter for deployment**:
243
- ```python
244
- merged_model = model.merge_and_unload()
245
- # No adapter overhead during inference
246
- ```
247
-
248
- 2. **Use optimized inference engine**:
249
- ```python
250
- from vllm import LLM
251
- llm = LLM(model="./merged-model", dtype="half")
252
- ```
253
-
254
- 3. **Enable Flash Attention**:
255
- ```python
256
- model = AutoModelForCausalLM.from_pretrained(
257
- model_name,
258
- attn_implementation="flash_attention_2"
259
- )
260
- ```
261
-
262
- ### Output Quality Issues
263
-
264
- **Problem**: Fine-tuned model produces worse outputs.
265
-
266
- **Solutions**:
267
-
268
- 1. **Check evaluation without adapter**:
269
- ```python
270
- with model.disable_adapter():
271
- base_output = model.generate(**inputs)
272
- # Compare with adapter output
273
- ```
274
-
275
- 2. **Lower temperature during eval**:
276
- ```python
277
- model.generate(**inputs, temperature=0.1, do_sample=False)
278
- ```
279
-
280
- 3. **Retrain with more data**:
281
- ```python
282
- # Increase training samples
283
- # Use higher quality data
284
- # Train for more epochs
285
- ```
286
-
287
- ### Wrong Adapter Active
288
-
289
- **Problem**: Model using wrong adapter or no adapter.
290
-
291
- **Fix**:
292
- ```python
293
- # Check active adapters
294
- print(model.active_adapters)
295
-
296
- # Explicitly set adapter
297
- model.set_adapter("your-adapter-name")
298
-
299
- # List all adapters
300
- print(model.peft_config.keys())
301
- ```
302
-
303
- ## QLoRA Specific Issues
304
-
305
- ### Quantization Errors
306
-
307
- **Error**: `RuntimeError: mat1 and mat2 shapes cannot be multiplied`
308
-
309
- **Fix**:
310
- ```python
311
- # Ensure compute dtype matches
312
- bnb_config = BitsAndBytesConfig(
313
- load_in_4bit=True,
314
- bnb_4bit_compute_dtype=torch.bfloat16, # Match model dtype
315
- bnb_4bit_quant_type="nf4"
316
- )
317
-
318
- # Load with correct dtype
319
- model = AutoModelForCausalLM.from_pretrained(
320
- model_name,
321
- quantization_config=bnb_config,
322
- torch_dtype=torch.bfloat16
323
- )
324
- ```
325
-
326
- ### QLoRA OOM
327
-
328
- **Error**: OOM even with 4-bit quantization.
329
-
330
- **Fix**:
331
- ```python
332
- # Enable double quantization
333
- bnb_config = BitsAndBytesConfig(
334
- load_in_4bit=True,
335
- bnb_4bit_use_double_quant=True # Further memory reduction
336
- )
337
-
338
- # Use offloading
339
- model = AutoModelForCausalLM.from_pretrained(
340
- model_name,
341
- quantization_config=bnb_config,
342
- device_map="auto",
343
- max_memory={0: "20GB", "cpu": "100GB"}
344
- )
345
- ```
346
-
347
- ### QLoRA Merge Fails
348
-
349
- **Error**: `RuntimeError: expected scalar type BFloat16 but found Float`
350
-
351
- **Fix**:
352
- ```python
353
- # Dequantize before merging
354
- from peft import PeftModel
355
-
356
- # Load in higher precision for merging
357
- base_model = AutoModelForCausalLM.from_pretrained(
358
- base_model_name,
359
- torch_dtype=torch.float16, # Not quantized
360
- device_map="auto"
361
- )
362
-
363
- # Load adapter
364
- model = PeftModel.from_pretrained(base_model, "./qlora-adapter")
365
-
366
- # Now merge
367
- merged = model.merge_and_unload()
368
- ```
369
-
370
- ## Multi-Adapter Issues
371
-
372
- ### Adapter Conflict
373
-
374
- **Error**: `ValueError: Adapter with name 'default' already exists`
375
-
376
- **Fix**:
377
- ```python
378
- # Use unique names
379
- model.load_adapter("./adapter1", adapter_name="task1")
380
- model.load_adapter("./adapter2", adapter_name="task2")
381
-
382
- # Or delete existing
383
- model.delete_adapter("default")
384
- ```
385
-
386
- ### Mixed Precision Adapters
387
-
388
- **Error**: Adapters trained with different dtypes.
389
-
390
- **Fix**:
391
- ```python
392
- # Convert adapter precision
393
- model = PeftModel.from_pretrained(base_model, "./adapter")
394
- model = model.to(torch.bfloat16)
395
-
396
- # Or load with specific dtype
397
- model = PeftModel.from_pretrained(
398
- base_model,
399
- "./adapter",
400
- torch_dtype=torch.bfloat16
401
- )
402
- ```
403
-
404
- ## Performance Optimization
405
-
406
- ### Memory Profiling
407
-
408
- ```python
409
- import torch
410
-
411
- def print_memory():
412
- if torch.cuda.is_available():
413
- allocated = torch.cuda.memory_allocated() / 1e9
414
- reserved = torch.cuda.memory_reserved() / 1e9
415
- print(f"Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB")
416
-
417
- # Profile during training
418
- print_memory() # Before
419
- model.train()
420
- loss = model(**batch).loss
421
- loss.backward()
422
- print_memory() # After
423
- ```
424
-
425
- ### Speed Profiling
426
-
427
- ```python
428
- import time
429
- import torch
430
-
431
- def benchmark_generation(model, tokenizer, prompt, n_runs=5):
432
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
433
-
434
- # Warmup
435
- model.generate(**inputs, max_new_tokens=10)
436
- torch.cuda.synchronize()
437
-
438
- # Benchmark
439
- times = []
440
- for _ in range(n_runs):
441
- start = time.perf_counter()
442
- outputs = model.generate(**inputs, max_new_tokens=100)
443
- torch.cuda.synchronize()
444
- times.append(time.perf_counter() - start)
445
-
446
- tokens = outputs.shape[1] - inputs.input_ids.shape[1]
447
- avg_time = sum(times) / len(times)
448
- print(f"Speed: {tokens/avg_time:.2f} tokens/sec")
449
-
450
- # Compare adapter vs merged
451
- benchmark_generation(adapter_model, tokenizer, "Hello")
452
- benchmark_generation(merged_model, tokenizer, "Hello")
453
- ```
454
-
455
- ## Getting Help
456
-
457
- 1. **Check PEFT GitHub Issues**: https://github.com/huggingface/peft/issues
458
- 2. **HuggingFace Forums**: https://discuss.huggingface.co/
459
- 3. **PEFT Documentation**: https://huggingface.co/docs/peft
460
-
461
- ### Debugging Template
462
-
463
- When reporting issues, include:
464
-
465
- ```python
466
- # System info
467
- import peft
468
- import transformers
469
- import torch
470
-
471
- print(f"PEFT: {peft.__version__}")
472
- print(f"Transformers: {transformers.__version__}")
473
- print(f"PyTorch: {torch.__version__}")
474
- print(f"CUDA: {torch.version.cuda}")
475
- print(f"GPU: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'N/A'}")
476
-
477
- # Config
478
- print(model.peft_config)
479
- model.print_trainable_parameters()
480
- ```