opik-optimizer 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,656 @@
1
+ import opik
2
+ from typing import Literal, List, Dict, Any
3
+ from .. import utils
4
+ from datasets import load_dataset
5
+ import traceback
6
+
7
+
8
+ class HaltError(Exception):
9
+ """Exception raised when we need to halt the process due to a critical error."""
10
+
11
+ pass
12
+
13
+
14
+ def get_or_create_dataset(
15
+ name: Literal[
16
+ "hotpot-300",
17
+ "hotpot-500",
18
+ "halu-eval-300",
19
+ "tiny-test",
20
+ "gsm8k",
21
+ "hotpot_qa",
22
+ "ai2_arc",
23
+ "truthful_qa",
24
+ "cnn_dailymail",
25
+ "ragbench_sentence_relevance",
26
+ "election_questions",
27
+ "medhallu",
28
+ "rag_hallucinations",
29
+ ],
30
+ test_mode: bool = False,
31
+ ) -> opik.Dataset:
32
+ """Get or create a dataset from HuggingFace."""
33
+ try:
34
+ # Try to get existing dataset first
35
+ opik_client = opik.Opik()
36
+ dataset_name = f"{name}_test" if test_mode else name
37
+
38
+ try:
39
+ dataset = opik_client.get_dataset(dataset_name)
40
+ if dataset: # Check if dataset exists
41
+ items = dataset.get_items()
42
+ if items and len(items) > 0: # Check if dataset has data
43
+ return dataset
44
+ # If dataset exists but is empty, delete it
45
+ print(f"Dataset {dataset_name} exists but is empty - deleting it...")
46
+ opik_client.delete_dataset(dataset_name)
47
+ except Exception:
48
+ # If dataset doesn't exist, we'll create it
49
+ pass
50
+
51
+ # Load data based on dataset name
52
+ if name == "hotpot-300":
53
+ data = _load_hotpot_300(test_mode)
54
+ elif name == "hotpot-500":
55
+ data = _load_hotpot_500(test_mode)
56
+ elif name == "halu-eval-300":
57
+ data = _load_halu_eval_300(test_mode)
58
+ elif name == "tiny-test":
59
+ data = _load_tiny_test()
60
+ elif name == "gsm8k":
61
+ data = _load_gsm8k(test_mode)
62
+ elif name == "hotpot_qa":
63
+ data = _load_hotpot_qa(test_mode)
64
+ elif name == "ai2_arc":
65
+ data = _load_ai2_arc(test_mode)
66
+ elif name == "truthful_qa":
67
+ data = _load_truthful_qa(test_mode)
68
+ elif name == "cnn_dailymail":
69
+ data = _load_cnn_dailymail(test_mode)
70
+ elif name == "ragbench_sentence_relevance":
71
+ data = _load_ragbench_sentence_relevance(test_mode)
72
+ elif name == "election_questions":
73
+ data = _load_election_questions(test_mode)
74
+ elif name == "medhallu":
75
+ data = _load_medhallu(test_mode)
76
+ elif name == "rag_hallucinations":
77
+ data = _load_rag_hallucinations(test_mode)
78
+ elif name == "math-50":
79
+ data = _load_math_50()
80
+ else:
81
+ raise HaltError(f"Unknown dataset: {name}")
82
+
83
+ if not data:
84
+ raise HaltError(f"No data loaded for dataset: {name}")
85
+
86
+ # Create dataset in Opik
87
+ try:
88
+ dataset = opik_client.create_dataset(dataset_name) # Use dataset_name with test mode suffix
89
+ except opik.rest_api.core.api_error.ApiError as e:
90
+ if e.status_code == 409: # Dataset already exists
91
+ # Try to get the dataset again
92
+ dataset = opik_client.get_dataset(dataset_name)
93
+ if not dataset:
94
+ raise HaltError(f"Dataset {dataset_name} exists but is empty")
95
+ return dataset
96
+ raise HaltError(f"Failed to create dataset {dataset_name}: {e}")
97
+
98
+ # Insert data into the dataset
99
+ try:
100
+ dataset.insert(data)
101
+ except Exception as e:
102
+ raise HaltError(f"Failed to insert data into dataset {dataset_name}: {e}")
103
+
104
+ # Verify data was added
105
+ items = dataset.get_items()
106
+ if not items or len(items) == 0:
107
+ raise HaltError(f"Failed to add data to dataset {dataset_name}")
108
+
109
+ return dataset
110
+ except HaltError:
111
+ raise # Re-raise HaltError to stop the process
112
+ except Exception as e:
113
+ print(f"Error loading dataset {name}: {e}")
114
+ print(traceback.format_exc())
115
+ raise HaltError(f"Critical error loading dataset {name}: {e}")
116
+
117
+
118
+ def _load_hotpot_500(test_mode: bool = False) -> List[Dict[str, Any]]:
119
+ from dspy.datasets import HotPotQA
120
+
121
+ seed = 2024
122
+ size = 500 if not test_mode else 5
123
+
124
+ try:
125
+ trainset = [
126
+ x.with_inputs("question")
127
+ for x in HotPotQA(train_seed=seed, train_size=size).train
128
+ ]
129
+ except Exception:
130
+ raise Exception("Unable to download HotPotQA; please try again") from None
131
+
132
+ data = []
133
+ for row in reversed(trainset):
134
+ d = row.toDict()
135
+ del d["dspy_uuid"]
136
+ del d["dspy_split"]
137
+ data.append(d)
138
+
139
+ return data
140
+
141
+
142
+ def _load_hotpot_300(test_mode: bool = False) -> List[Dict[str, Any]]:
143
+ from dspy.datasets import HotPotQA
144
+
145
+ seed = 42
146
+ size = 300 if not test_mode else 3
147
+
148
+ try:
149
+ trainset = [
150
+ x.with_inputs("question")
151
+ for x in HotPotQA(train_seed=seed, train_size=size).train
152
+ ]
153
+ except Exception:
154
+ raise Exception("Unable to download HotPotQA; please try again") from None
155
+
156
+ data = []
157
+ for row in trainset:
158
+ d = row.toDict()
159
+ del d["dspy_uuid"]
160
+ del d["dspy_split"]
161
+ data.append(d)
162
+
163
+ return data
164
+
165
+
166
+ def _load_halu_eval_300(test_mode: bool = False) -> List[Dict[str, Any]]:
167
+ import pandas as pd
168
+
169
+ try:
170
+ df = pd.read_parquet(
171
+ "hf://datasets/pminervini/HaluEval/general/data-00000-of-00001.parquet"
172
+ )
173
+ except Exception:
174
+ raise Exception("Unable to download HaluEval; please try again") from None
175
+
176
+ df = df.sample(n=300, random_state=42)
177
+
178
+ dataset_records = [
179
+ {
180
+ "input": x["user_query"],
181
+ "llm_output": x["chatgpt_response"],
182
+ "expected_hallucination_label": x["hallucination"],
183
+ }
184
+ for x in df.to_dict(orient="records")
185
+ ]
186
+
187
+ return dataset_records
188
+
189
+
190
+ def _load_tiny_test() -> List[Dict[str, Any]]:
191
+ return [
192
+ {
193
+ "text": "What is the capital of France?",
194
+ "label": "Paris",
195
+ "metadata": {
196
+ "context": "France is a country in Europe. Its capital is Paris."
197
+ },
198
+ },
199
+ {
200
+ "text": "Who wrote Romeo and Juliet?",
201
+ "label": "William Shakespeare",
202
+ "metadata": {
203
+ "context": "Romeo and Juliet is a famous play written by William Shakespeare."
204
+ },
205
+ },
206
+ {
207
+ "text": "What is 2 + 2?",
208
+ "label": "4",
209
+ "metadata": {"context": "Basic arithmetic: 2 + 2 equals 4."},
210
+ },
211
+ {
212
+ "text": "What is the largest planet in our solar system?",
213
+ "label": "Jupiter",
214
+ "metadata": {
215
+ "context": "Jupiter is the largest planet in our solar system."
216
+ },
217
+ },
218
+ {
219
+ "text": "Who painted the Mona Lisa?",
220
+ "label": "Leonardo da Vinci",
221
+ "metadata": {"context": "The Mona Lisa was painted by Leonardo da Vinci."},
222
+ },
223
+ ]
224
+
225
+
226
+ def _load_gsm8k(test_mode: bool = False) -> List[Dict[str, Any]]:
227
+ """Load GSM8K dataset with 300 examples."""
228
+ try:
229
+ # Use streaming to avoid downloading the entire dataset
230
+ dataset = load_dataset("gsm8k", "main", streaming=True)
231
+ n_samples = 5 if test_mode else 300
232
+
233
+ # Convert streaming dataset to list
234
+ data = []
235
+ for i, item in enumerate(dataset["train"]):
236
+ if i >= n_samples:
237
+ break
238
+ data.append({
239
+ "question": item["question"],
240
+ "answer": item["answer"],
241
+ })
242
+ return data
243
+ except Exception as e:
244
+ print(f"Error loading GSM8K dataset: {e}")
245
+ raise Exception("Unable to download gsm8k; please try again") from None
246
+
247
+
248
+ def _load_hotpot_qa(test_mode: bool = False) -> List[Dict[str, Any]]:
249
+ """Load HotpotQA dataset with 300 examples."""
250
+ try:
251
+ # Use streaming to avoid downloading the entire dataset
252
+ dataset = load_dataset("hotpot_qa", "distractor", streaming=True)
253
+ n_samples = 5 if test_mode else 300
254
+
255
+ # Convert streaming dataset to list
256
+ data = []
257
+ for i, item in enumerate(dataset["train"]):
258
+ if i >= n_samples:
259
+ break
260
+ data.append({
261
+ "question": item["question"],
262
+ "answer": item["answer"],
263
+ "context": item["context"],
264
+ })
265
+ return data
266
+ except Exception as e:
267
+ print(f"Error loading HotpotQA dataset: {e}")
268
+ raise Exception("Unable to download HotPotQA; please try again") from None
269
+
270
+
271
+ def _load_ai2_arc(test_mode: bool = False) -> List[Dict[str, Any]]:
272
+ """Load AI2 ARC dataset with 300 examples."""
273
+ try:
274
+ # Use streaming to avoid downloading the entire dataset
275
+ dataset = load_dataset("ai2_arc", "ARC-Challenge", streaming=True)
276
+ n_samples = 5 if test_mode else 300
277
+
278
+ # Convert streaming dataset to list
279
+ data = []
280
+ for i, item in enumerate(dataset["train"]):
281
+ if i >= n_samples:
282
+ break
283
+ data.append({
284
+ "question": item["question"],
285
+ "answer": item["answerKey"],
286
+ "choices": item["choices"],
287
+ })
288
+ return data
289
+ except Exception as e:
290
+ print(f"Error loading AI2 ARC dataset: {e}")
291
+ raise Exception("Unable to download ai2_arc; please try again") from None
292
+
293
+
294
+ def _load_truthful_qa(test_mode: bool = False) -> List[Dict]:
295
+ """Load TruthfulQA dataset."""
296
+ try:
297
+ # Load both configurations
298
+ try:
299
+ gen_dataset = load_dataset("truthful_qa", "generation")
300
+ mc_dataset = load_dataset("truthful_qa", "multiple_choice")
301
+ except Exception:
302
+ raise Exception(
303
+ "Unable to download truthful_qa; please try again"
304
+ ) from None
305
+
306
+ # Combine data from both configurations
307
+ data = []
308
+ n_samples = 5 if test_mode else 300
309
+ for gen_item, mc_item in zip(
310
+ gen_dataset["validation"], mc_dataset["validation"]
311
+ ):
312
+ if len(data) >= n_samples:
313
+ break
314
+
315
+ # Get correct answers from both configurations
316
+ correct_answers = set(gen_item["correct_answers"])
317
+ if "mc1_targets" in mc_item:
318
+ correct_answers.update(
319
+ [
320
+ choice
321
+ for choice, label in zip(
322
+ mc_item["mc1_targets"]["choices"],
323
+ mc_item["mc1_targets"]["labels"],
324
+ )
325
+ if label == 1
326
+ ]
327
+ )
328
+ if "mc2_targets" in mc_item:
329
+ correct_answers.update(
330
+ [
331
+ choice
332
+ for choice, label in zip(
333
+ mc_item["mc2_targets"]["choices"],
334
+ mc_item["mc2_targets"]["labels"],
335
+ )
336
+ if label == 1
337
+ ]
338
+ )
339
+
340
+ # Get all possible answers
341
+ all_answers = set(
342
+ gen_item["correct_answers"] + gen_item["incorrect_answers"]
343
+ )
344
+ if "mc1_targets" in mc_item:
345
+ all_answers.update(mc_item["mc1_targets"]["choices"])
346
+ if "mc2_targets" in mc_item:
347
+ all_answers.update(mc_item["mc2_targets"]["choices"])
348
+
349
+ # Create a single example with all necessary fields
350
+ example = {
351
+ "question": gen_item["question"],
352
+ "answer": gen_item["best_answer"],
353
+ "choices": list(all_answers),
354
+ "correct_answer": gen_item["best_answer"],
355
+ "input": gen_item["question"], # For AnswerRelevance metric
356
+ "output": gen_item["best_answer"], # For output_key requirement
357
+ "context": gen_item.get("source", ""), # Use source as context
358
+ "type": "TEXT", # Set type to TEXT as required by Opik
359
+ "category": gen_item["category"],
360
+ "source": "MANUAL", # Set source to MANUAL as required by Opik
361
+ "correct_answers": list(
362
+ correct_answers
363
+ ), # Keep track of all correct answers
364
+ "incorrect_answers": gen_item[
365
+ "incorrect_answers"
366
+ ], # Keep track of incorrect answers
367
+ }
368
+
369
+ # Ensure all required fields are present
370
+ required_fields = [
371
+ "question",
372
+ "answer",
373
+ "choices",
374
+ "correct_answer",
375
+ "input",
376
+ "output",
377
+ "context",
378
+ ]
379
+ if all(field in example and example[field] for field in required_fields):
380
+ data.append(example)
381
+
382
+ if not data:
383
+ raise ValueError("No valid examples found in TruthfulQA dataset")
384
+
385
+ return data
386
+ except Exception as e:
387
+ print(f"Error loading TruthfulQA dataset: {e}")
388
+ print(traceback.format_exc())
389
+ raise
390
+
391
+
392
+ def _load_cnn_dailymail(test_mode: bool = False) -> List[Dict]:
393
+ """Load CNN Daily Mail dataset with 100 examples."""
394
+ try:
395
+ dataset = load_dataset("cnn_dailymail", "3.0.0", streaming=True)
396
+ n_samples = 5 if test_mode else 100
397
+
398
+ # Convert streaming dataset to list
399
+ data = []
400
+ for i, item in enumerate(dataset["validation"]):
401
+ if i >= n_samples:
402
+ break
403
+ data.append({
404
+ "article": item["article"],
405
+ "highlights": item["highlights"],
406
+ })
407
+ return data
408
+ except Exception as e:
409
+ print(f"Error loading CNN Daily Mail dataset: {e}")
410
+ raise Exception("Unable to download cnn_dailymail; please try again") from None
411
+
412
+
413
+ def _load_math_50():
414
+ return [
415
+ {"question": "What is (5 + 3) * 2 - 4?", "expected answer": "12"},
416
+ {
417
+ "question": "If you divide 20 by 4 and then add 7, what do you get?",
418
+ "expected answer": "12",
419
+ },
420
+ {
421
+ "question": "Start with 10, subtract 2, multiply the result by 3, then add 5.",
422
+ "expected answer": "29",
423
+ },
424
+ {
425
+ "question": "Add 6 and 4, then divide by 2, and finally multiply by 5.",
426
+ "expected answer": "25",
427
+ },
428
+ {
429
+ "question": "Take 15, subtract 3, add 2, then divide the result by 2.",
430
+ "expected answer": "7",
431
+ },
432
+ {"question": "What is 7 * (6 - 2) + 1?", "expected answer": "29"},
433
+ {
434
+ "question": "If you multiply 8 by 3 and subtract 5, what is the result?",
435
+ "expected answer": "19",
436
+ },
437
+ {
438
+ "question": "Begin with 25, divide by 5, then multiply by 4.",
439
+ "expected answer": "20",
440
+ },
441
+ {
442
+ "question": "Subtract 9 from 17, then multiply the difference by 3.",
443
+ "expected answer": "24",
444
+ },
445
+ {"question": "What is 10 + 5 * 3 - 8?", "expected answer": "17"},
446
+ {"question": "Divide 36 by 6, then add 11.", "expected answer": "17"},
447
+ {
448
+ "question": "Start with 2, multiply by 9, subtract 7, and add 4.",
449
+ "expected answer": "15",
450
+ },
451
+ {
452
+ "question": "Add 12 and 8, divide by 4, and then subtract 1.",
453
+ "expected answer": "4",
454
+ },
455
+ {
456
+ "question": "Take 30, subtract 10, divide by 2, and add 7.",
457
+ "expected answer": "17",
458
+ },
459
+ {"question": "What is (15 - 5) / 2 * 3?", "expected answer": "15"},
460
+ {
461
+ "question": "If you add 14 and 6, and then divide by 5, what do you get?",
462
+ "expected answer": "4",
463
+ },
464
+ {
465
+ "question": "Start with 50, divide by 10, multiply by 2, and subtract 3.",
466
+ "expected answer": "7",
467
+ },
468
+ {
469
+ "question": "Subtract 4 from 11, multiply by 5, and then add 2.",
470
+ "expected answer": "37",
471
+ },
472
+ {"question": "What is 9 * 4 - 12 / 3?", "expected answer": "32"},
473
+ {
474
+ "question": "Divide 42 by 7, and then multiply by 3.",
475
+ "expected answer": "18",
476
+ },
477
+ {
478
+ "question": "Begin with 1, add 19, divide by 4, and multiply by 6.",
479
+ "expected answer": "30",
480
+ },
481
+ {
482
+ "question": "Subtract 6 from 21, then divide the result by 5.",
483
+ "expected answer": "3",
484
+ },
485
+ {"question": "What is (8 + 7) * 2 - 9?", "expected answer": "21"},
486
+ {
487
+ "question": "If you multiply 7 by 5 and then subtract 11, what is the answer?",
488
+ "expected answer": "24",
489
+ },
490
+ {
491
+ "question": "Start with 3, multiply by 8, add 6, and then divide by 2.",
492
+ "expected answer": "15",
493
+ },
494
+ {"question": "What is 3 * (10 - 4) + 5?", "expected answer": "23"},
495
+ {
496
+ "question": "If you multiply 12 by 2 and subtract 7, what is the result?",
497
+ "expected answer": "17",
498
+ },
499
+ {
500
+ "question": "Begin with 35, divide by 7, then multiply by 6.",
501
+ "expected answer": "30",
502
+ },
503
+ {
504
+ "question": "Subtract 11 from 20, then multiply the difference by 4.",
505
+ "expected answer": "36",
506
+ },
507
+ {"question": "What is 15 + 3 * 7 - 9?", "expected answer": "27"},
508
+ {"question": "Divide 63 by 9, then add 13.", "expected answer": "20"},
509
+ {
510
+ "question": "Start with 6, multiply by 5, subtract 8, and add 11.",
511
+ "expected answer": "33",
512
+ },
513
+ {
514
+ "question": "Add 18 and 6, divide by 3, and then subtract 4.",
515
+ "expected answer": "4",
516
+ },
517
+ {
518
+ "question": "Take 50, subtract 20, divide by 5, and add 9.",
519
+ "expected answer": "15",
520
+ },
521
+ {"question": "What is (25 - 10) / 3 * 4?", "expected answer": "20"},
522
+ {
523
+ "question": "If you add 9 and 15, and then divide by 8, what do you get?",
524
+ "expected answer": "3",
525
+ },
526
+ {
527
+ "question": "Start with 40, divide by 5, multiply by 3, and subtract 7.",
528
+ "expected answer": "17",
529
+ },
530
+ {
531
+ "question": "Subtract 5 from 22, multiply by 2, and then divide by 6.",
532
+ "expected answer": "5.666666666666667",
533
+ },
534
+ {"question": "What is 7 * 6 + 8 - 11?", "expected answer": "39"},
535
+ {
536
+ "question": "Divide 72 by 8, and then multiply by 5.",
537
+ "expected answer": "45",
538
+ },
539
+ {
540
+ "question": "Begin with 3, add 17, divide by 5, and multiply by 7.",
541
+ "expected answer": "28",
542
+ },
543
+ {
544
+ "question": "Subtract 9 from 31, then divide the result by 4.",
545
+ "expected answer": "5.5",
546
+ },
547
+ {"question": "What is (11 + 9) * 3 - 15?", "expected answer": "45"},
548
+ {
549
+ "question": "If you multiply 8 by 7 and then subtract 19, what is the answer?",
550
+ "expected answer": "37",
551
+ },
552
+ {
553
+ "question": "Start with 2, multiply by 12, add 16, and then divide by 4.",
554
+ "expected answer": "10",
555
+ },
556
+ {
557
+ "question": "Add 13 and 19, then subtract 6, and finally divide by 2.",
558
+ "expected answer": "13",
559
+ },
560
+ {
561
+ "question": "Take 45, divide by 9, add 11, and then subtract 3.",
562
+ "expected answer": "13",
563
+ },
564
+ {"question": "What is 18 - 4 * 3 + 7?", "expected answer": "13"},
565
+ {
566
+ "question": "If you divide 56 by 7 and then add 9, what do you get?",
567
+ "expected answer": "17",
568
+ },
569
+ {
570
+ "question": "Begin with 4, multiply by 9, subtract 12, and then divide by 6.",
571
+ "expected answer": "4",
572
+ },
573
+ ]
574
+
575
+
576
+ def _load_ragbench_sentence_relevance(test_mode: bool = False) -> List[Dict]:
577
+ """Load RAGBench sentence relevance dataset."""
578
+ try:
579
+ dataset = load_dataset("wandb/ragbench-sentence-relevance-balanced")
580
+ except Exception:
581
+ raise Exception("Unable to download ragbench-sentence-relevance; please try again") from None
582
+
583
+ n_samples = 5 if test_mode else 300
584
+ train_data = dataset["train"].select(range(n_samples))
585
+
586
+ return [
587
+ {
588
+ "question": item["question"],
589
+ "sentence": item["sentence"],
590
+ "label": item["label"],
591
+ }
592
+ for item in train_data
593
+ ]
594
+
595
+
596
+ def _load_election_questions(test_mode: bool = False) -> List[Dict]:
597
+ """Load Anthropic election questions dataset."""
598
+ try:
599
+ dataset = load_dataset("Anthropic/election_questions")
600
+ except Exception:
601
+ raise Exception("Unable to download election_questions; please try again") from None
602
+
603
+ n_samples = 5 if test_mode else 300
604
+ train_data = dataset["test"].select(range(n_samples))
605
+
606
+ return [
607
+ {
608
+ "question": item["question"],
609
+ "label": item["label"], # "Harmless" or "Harmful"
610
+ }
611
+ for item in train_data
612
+ ]
613
+
614
+
615
+ def _load_medhallu(test_mode: bool = False) -> List[Dict]:
616
+ """Load MedHallu medical hallucinations dataset."""
617
+ try:
618
+ dataset = load_dataset("UTAustin-AIHealth/MedHallu", "pqa_labeled")
619
+ except Exception:
620
+ raise Exception("Unable to download medhallu; please try again") from None
621
+
622
+ n_samples = 5 if test_mode else 300
623
+ train_data = dataset["train"].select(range(n_samples))
624
+
625
+ return [
626
+ {
627
+ "question": item["Question"],
628
+ "knowledge": item["Knowledge"],
629
+ "ground_truth": item["Ground Truth"],
630
+ "hallucinated_answer": item["Hallucinated Answer"],
631
+ "difficulty_level": item["Difficulty Level"],
632
+ "hallucination_category": item["Category of Hallucination"],
633
+ }
634
+ for item in train_data
635
+ ]
636
+
637
+
638
+ def _load_rag_hallucinations(test_mode: bool = False) -> List[Dict]:
639
+ """Load Aporia RAG hallucinations dataset."""
640
+ try:
641
+ dataset = load_dataset("aporia-ai/rag_hallucinations")
642
+ except Exception:
643
+ raise Exception("Unable to download rag_hallucinations; please try again") from None
644
+
645
+ n_samples = 5 if test_mode else 300
646
+ train_data = dataset["train"].select(range(n_samples))
647
+
648
+ return [
649
+ {
650
+ "context": item["context"],
651
+ "question": item["question"],
652
+ "answer": item["answer"],
653
+ "is_hallucination": item["is_hallucination"],
654
+ }
655
+ for item in train_data
656
+ ]
@@ -0,0 +1,5 @@
1
+ from .few_shot_bayesian_optimizer import FewShotBayesianOptimizer
2
+
3
+ __all__ = [
4
+ "FewShotBayesianOptimizer",
5
+ ]