opik-optimizer 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,408 @@
1
+ import random
2
+ from typing import Any, Dict, List, Tuple, Union, Optional, Callable, Literal
3
+ import openai
4
+ import opik
5
+ import optuna
6
+ import logging
7
+ import json
8
+
9
+ from opik import Dataset
10
+ from opik_optimizer.optimization_config import mappers
11
+
12
+ from opik_optimizer.optimization_config.configs import TaskConfig, MetricConfig
13
+ from opik_optimizer import base_optimizer
14
+
15
+ from . import prompt_parameter
16
+ from . import prompt_templates
17
+ from .._throttle import RateLimiter, rate_limited
18
+ from .. import optimization_result, task_evaluator
19
+
20
+ import litellm
21
+
22
+ from opik.evaluation.models.litellm import opik_monitor as opik_litellm_monitor
23
+
24
+ limiter = RateLimiter(max_calls_per_second=15)
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+ @rate_limited(limiter)
29
+ def _call_model(model, messages, seed, model_kwargs):
30
+ model_kwargs = opik_litellm_monitor.try_add_opik_monitoring_to_params(model_kwargs)
31
+
32
+ response = litellm.completion(
33
+ model=model,
34
+ messages=messages,
35
+ seed=seed,
36
+ **model_kwargs,
37
+ )
38
+
39
+ return response
40
+
41
+
42
+ class FewShotBayesianOptimizer(base_optimizer.BaseOptimizer):
43
+ def __init__(
44
+ self,
45
+ model: str,
46
+ project_name: Optional[str] = None,
47
+ min_examples: int = 2,
48
+ max_examples: int = 8,
49
+ seed: int = 42,
50
+ n_threads: int = 8,
51
+ n_initial_prompts: int = 5,
52
+ n_iterations: int = 10,
53
+ **model_kwargs,
54
+ ) -> None:
55
+ super().__init__(model, project_name, **model_kwargs)
56
+ self.min_examples = min_examples
57
+ self.max_examples = max_examples
58
+ self.seed = seed
59
+ self.n_threads = n_threads
60
+ self.n_initial_prompts = n_initial_prompts
61
+ self.n_iterations = n_iterations
62
+
63
+ self._opik_client = opik.Opik()
64
+ logger.debug(f"Initialized FewShotBayesianOptimizer with model: {model}")
65
+
66
+ def _split_dataset(
67
+ self, dataset: List[Dict[str, Any]], train_ratio: float
68
+ ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
69
+ """Split the dataset into training and validation sets.
70
+
71
+ Args:
72
+ dataset: List of dataset items
73
+ train_ratio: Ratio of items to use for training
74
+
75
+ Returns:
76
+ Tuple of (train_set, validation_set)
77
+ """
78
+ if not dataset:
79
+ return [], []
80
+
81
+ random.seed(self.seed)
82
+ dataset = dataset.copy()
83
+ random.shuffle(dataset)
84
+
85
+ split_idx = int(len(dataset) * train_ratio)
86
+ return dataset[:split_idx], dataset[split_idx:]
87
+
88
+ def _optimize_prompt(
89
+ self,
90
+ dataset: Union[str, Dataset],
91
+ metric_config: MetricConfig,
92
+ task_config: TaskConfig,
93
+ n_trials: int = 10,
94
+ optimization_id: Optional[str] = None,
95
+ experiment_config: Optional[Dict] = None,
96
+ n_samples: int = None,
97
+ ) -> optimization_result.OptimizationResult:
98
+ random.seed(self.seed)
99
+
100
+ if not task_config.use_chat_prompt:
101
+ raise ValueError(
102
+ "Few-shot Bayesian optimization is only supported for chat prompts."
103
+ )
104
+
105
+ opik_dataset: opik.Dataset = dataset
106
+
107
+ # Load the dataset
108
+ if isinstance(dataset, str):
109
+ opik_dataset = self._opik_client.get_dataset(dataset)
110
+ dataset_items = opik_dataset.get_items()
111
+ else:
112
+ opik_dataset = dataset
113
+ dataset_items = opik_dataset.get_items()
114
+
115
+ experiment_config = experiment_config or {}
116
+ base_experiment_config = { # Base config for reuse
117
+ **experiment_config,
118
+ **{
119
+ "optimizer": self.__class__.__name__,
120
+ "metric": metric_config.metric.name,
121
+ "dataset": opik_dataset.name,
122
+ "configuration": {},
123
+ },
124
+ }
125
+
126
+ # Evaluate Initial (Zero-Shot) Prompt
127
+ logger.info("Evaluating initial (zero-shot) prompt...")
128
+ initial_instruction = task_config.instruction_prompt
129
+ zero_shot_param = prompt_parameter.ChatPromptParameter(
130
+ name="zero_shot_prompt",
131
+ instruction=initial_instruction,
132
+ task_input_parameters=task_config.input_dataset_fields,
133
+ task_output_parameter=task_config.output_dataset_field,
134
+ demo_examples=[], # No examples
135
+ )
136
+ zero_shot_llm_task = self._build_task_from_prompt_template(
137
+ zero_shot_param.as_template()
138
+ )
139
+
140
+ initial_eval_config = base_experiment_config.copy()
141
+ initial_eval_config["configuration"]["prompt"] = initial_instruction
142
+ initial_eval_config["configuration"]["n_examples"] = 0
143
+
144
+ # Determine dataset item IDs for evaluation (initial and trials)
145
+ all_dataset_item_ids = [item["id"] for item in dataset_items]
146
+ eval_dataset_item_ids = all_dataset_item_ids
147
+ if n_samples is not None and n_samples < len(all_dataset_item_ids):
148
+ eval_dataset_item_ids = random.sample(all_dataset_item_ids, n_samples)
149
+ logger.info(f"Using {n_samples} samples for evaluations.")
150
+ else:
151
+ logger.info(
152
+ f"Using all {len(all_dataset_item_ids)} samples for evaluations."
153
+ )
154
+
155
+ initial_score = task_evaluator.evaluate(
156
+ dataset=opik_dataset,
157
+ dataset_item_ids=eval_dataset_item_ids,
158
+ metric_config=metric_config,
159
+ evaluated_task=zero_shot_llm_task,
160
+ num_threads=self.n_threads,
161
+ project_name=self.project_name,
162
+ experiment_config=initial_eval_config,
163
+ optimization_id=optimization_id,
164
+ )
165
+ logger.info(f"Initial (zero-shot) score: {initial_score:.4f}")
166
+
167
+ # Start Optuna Study
168
+ logger.info("Starting Optuna study for Few-Shot Bayesian Optimization...")
169
+
170
+ def optimization_objective(trial: optuna.Trial) -> float:
171
+ n_examples = trial.suggest_int(
172
+ "n_examples", self.min_examples, self.max_examples
173
+ )
174
+ available_indices = list(range(len(dataset_items)))
175
+ example_indices = random.sample(available_indices, n_examples)
176
+ trial.set_user_attr("example_indices", example_indices)
177
+
178
+ instruction = task_config.instruction_prompt
179
+ demo_examples = [dataset_items[idx] for idx in example_indices]
180
+
181
+ processed_demo_examples = []
182
+ for example in demo_examples:
183
+ processed_example = {}
184
+ for key, value in example.items():
185
+ processed_example[key] = str(value)
186
+ processed_demo_examples.append(processed_example)
187
+
188
+ param = prompt_parameter.ChatPromptParameter(
189
+ name=f"trial_{trial.number}_prompt",
190
+ instruction=instruction,
191
+ task_input_parameters=task_config.input_dataset_fields,
192
+ task_output_parameter=task_config.output_dataset_field,
193
+ demo_examples=processed_demo_examples,
194
+ )
195
+
196
+ llm_task = self._build_task_from_prompt_template(param.as_template())
197
+
198
+ # Log trial config
199
+ trial_config = base_experiment_config.copy()
200
+ trial_config["configuration"]["prompt"] = instruction # Base instruction
201
+ trial_config["configuration"][
202
+ "examples"
203
+ ] = processed_demo_examples # Log stringified examples
204
+ trial_config["configuration"]["n_examples"] = n_examples
205
+ trial_config["configuration"]["example_indices"] = example_indices
206
+
207
+ logger.debug(
208
+ f"Trial {trial.number}: n_examples={n_examples}, indices={example_indices}"
209
+ )
210
+ logger.debug(f"Evaluating trial {trial.number}...")
211
+
212
+ score = task_evaluator.evaluate(
213
+ dataset=opik_dataset,
214
+ dataset_item_ids=eval_dataset_item_ids,
215
+ metric_config=metric_config,
216
+ evaluated_task=llm_task,
217
+ num_threads=self.n_threads,
218
+ project_name=self.project_name,
219
+ experiment_config=trial_config,
220
+ optimization_id=optimization_id,
221
+ )
222
+ logger.debug(f"Trial {trial.number} score: {score:.4f}")
223
+
224
+ trial.set_user_attr("score", score)
225
+ trial.set_user_attr("param", param)
226
+ return score
227
+
228
+ # Configure Optuna Logging
229
+ try:
230
+ optuna.logging.disable_default_handler()
231
+ optuna_logger = logging.getLogger("optuna")
232
+ package_level = logging.getLogger("opik_optimizer").getEffectiveLevel()
233
+ optuna_logger.setLevel(package_level)
234
+ optuna_logger.propagate = False
235
+ logger.debug(
236
+ f"Optuna logger configured to level {logging.getLevelName(package_level)} and set to not propagate."
237
+ )
238
+ except Exception as e:
239
+ logger.warning(f"Could not configure Optuna logging within optimizer: {e}")
240
+
241
+ study = optuna.create_study(direction="maximize")
242
+ study.optimize(optimization_objective, n_trials=n_trials)
243
+ logger.info("Optuna study finished.")
244
+
245
+ best_trial = study.best_trial
246
+ best_score = best_trial.value
247
+ best_n_examples = best_trial.params["n_examples"]
248
+ best_example_indices = best_trial.user_attrs.get("example_indices", [])
249
+ best_param: prompt_parameter.ChatPromptParameter = best_trial.user_attrs[
250
+ "param"
251
+ ]
252
+
253
+ chat_messages_list = best_param.as_template().format()
254
+ main_prompt_string = best_param.instruction
255
+
256
+ return optimization_result.OptimizationResult(
257
+ prompt=main_prompt_string,
258
+ score=best_score,
259
+ metric_name=metric_config.metric.name,
260
+ details={
261
+ "prompt_type": "chat" if task_config.use_chat_prompt else "non-chat",
262
+ "chat_messages": chat_messages_list,
263
+ "prompt_parameter": best_param,
264
+ "n_examples": best_n_examples,
265
+ "example_indices": best_example_indices,
266
+ "trial_number": best_trial.number,
267
+ "initial_score": initial_score,
268
+ "total_trials": n_trials,
269
+ "rounds": [],
270
+ "stopped_early": False,
271
+ "metric_config": metric_config.dict(),
272
+ "task_config": task_config.dict(),
273
+ "model": self.model,
274
+ "temperature": self.model_kwargs.get("temperature"),
275
+ },
276
+ )
277
+
278
+ def optimize_prompt(
279
+ self,
280
+ dataset: Union[str, Dataset],
281
+ metric_config: MetricConfig,
282
+ task_config: TaskConfig,
283
+ n_trials: int = 10,
284
+ experiment_config: Optional[Dict] = None,
285
+ n_samples: int = None,
286
+ ) -> optimization_result.OptimizationResult:
287
+ optimization = None
288
+ try:
289
+ optimization = self._opik_client.create_optimization(
290
+ dataset_name=dataset.name,
291
+ objective_name=metric_config.metric.name,
292
+ )
293
+ except Exception:
294
+ logger.warning(
295
+ "Opik server does not support optimizations. Please upgrade opik."
296
+ )
297
+ optimization = None
298
+
299
+ try:
300
+ result = self._optimize_prompt(
301
+ optimization_id=optimization.id if optimization is not None else None,
302
+ dataset=dataset,
303
+ metric_config=metric_config,
304
+ task_config=task_config,
305
+ n_trials=n_trials,
306
+ experiment_config=experiment_config,
307
+ n_samples=n_samples,
308
+ )
309
+ if optimization:
310
+ self.update_optimization(optimization, status="completed")
311
+ return result
312
+ except Exception as e:
313
+ if optimization:
314
+ self.update_optimization(optimization, status="cancelled")
315
+ logger.error(f"FewShotBayesian optimization failed: {e}", exc_info=True)
316
+ raise e
317
+
318
+ def evaluate_prompt(
319
+ self,
320
+ prompt: List[Dict[Literal["role", "content"], str]],
321
+ dataset: opik.Dataset,
322
+ metric_config: MetricConfig,
323
+ task_config: Optional[TaskConfig] = None,
324
+ dataset_item_ids: Optional[List[str]] = None,
325
+ experiment_config: Optional[Dict] = None,
326
+ n_samples: int = None,
327
+ ) -> float:
328
+
329
+ if isinstance(prompt, str):
330
+ if task_config is None:
331
+ raise ValueError(
332
+ "To use a string prompt, please pass in task_config to evaluate_prompt()"
333
+ )
334
+
335
+ questions = {
336
+ field: ("{{%s}}" % field) for field in task_config.input_dataset_fields
337
+ }
338
+ prompt = [
339
+ {"role": "system", "content": prompt},
340
+ {"role": "user", "content": json.dumps(questions)},
341
+ ]
342
+
343
+ # Ensure prompt is correctly formatted
344
+ if not all(
345
+ isinstance(item, dict) and "role" in item and "content" in item
346
+ for item in prompt
347
+ ):
348
+ raise ValueError(
349
+ "A ChatPrompt must be a list of dictionaries with 'role' and 'content' keys."
350
+ )
351
+
352
+ template = prompt_templates.ChatPromptTemplate(
353
+ prompt, validate_placeholders=False
354
+ )
355
+ llm_task = self._build_task_from_prompt_template(template)
356
+
357
+ experiment_config = experiment_config or {}
358
+ experiment_config = {
359
+ **experiment_config,
360
+ **{
361
+ "optimizer": self.__class__.__name__,
362
+ "metric": metric_config.metric.name,
363
+ "dataset": dataset.name,
364
+ "configuration": {
365
+ "examples": prompt,
366
+ },
367
+ },
368
+ }
369
+
370
+ if n_samples is not None:
371
+ if dataset_item_ids is not None:
372
+ raise Exception("Can't use n_samples and dataset_item_ids")
373
+
374
+ all_ids = [dataset_item["id"] for dataset_item in dataset.get_items()]
375
+ dataset_item_ids = random.sample(all_ids, n_samples)
376
+
377
+ logger.debug(f"Starting FewShotBayesian evaluation...")
378
+ score = task_evaluator.evaluate(
379
+ dataset=dataset,
380
+ dataset_item_ids=dataset_item_ids,
381
+ metric_config=metric_config,
382
+ evaluated_task=llm_task,
383
+ num_threads=self.n_threads,
384
+ project_name=self.project_name,
385
+ experiment_config=experiment_config,
386
+ )
387
+ logger.debug(f"Evaluation score: {score:.4f}")
388
+
389
+ return score
390
+
391
+ def _build_task_from_prompt_template(
392
+ self, template: prompt_templates.ChatPromptTemplate
393
+ ):
394
+ def llm_task(dataset_item: Dict[str, Any]) -> Dict[str, Any]:
395
+ prompt_ = template.format(**dataset_item)
396
+
397
+ response = _call_model(
398
+ model=self.model,
399
+ messages=prompt_,
400
+ seed=self.seed,
401
+ model_kwargs=self.model_kwargs,
402
+ )
403
+
404
+ return {
405
+ mappers.EVALUATED_LLM_TASK_OUTPUT: response.choices[0].message.content
406
+ }
407
+
408
+ return llm_task
@@ -0,0 +1,91 @@
1
+ import pydantic
2
+ from typing import List, Dict, Literal
3
+ import json
4
+
5
+ from . import prompt_templates
6
+
7
+ ChatItem = Dict[Literal["role", "content"], str]
8
+
9
+
10
+ class ChatPromptParameter(pydantic.BaseModel):
11
+ name: str
12
+ instruction: str
13
+ task_input_parameters: List[str]
14
+ task_output_parameter: str
15
+ demo_examples: List[Dict[str, str]] = pydantic.Field(default_factory=list)
16
+
17
+ _few_shot_system_prompt_intro: str = "You are an intelligent assistant that learns from few-shot examples provided earlier in the conversation. Whenever you respond, carefully follow the structure, tone, and format of previous assistant replies, using them as a guide"
18
+
19
+ def as_template(self) -> prompt_templates.ChatPromptTemplate:
20
+ if not self.demo_examples:
21
+ return prompt_templates.ChatPromptTemplate(
22
+ chat_template=[
23
+ {
24
+ "role": "system",
25
+ "content": self.instruction
26
+ },
27
+ {
28
+ "role": "user",
29
+ "content": json.dumps({param: f"{{{{{param}}}}}" for param in self.task_input_parameters})
30
+ }
31
+ ]
32
+ )
33
+
34
+ return prompt_templates.ChatPromptTemplate(
35
+ chat_template=[
36
+ {
37
+ "role": "system",
38
+ "content": self.instruction + f"\n\n{self._few_shot_system_prompt_intro}"
39
+ },
40
+ *self._render_demos(),
41
+ {
42
+ "role": "user",
43
+ "content": json.dumps({param: f"{{{{{param}}}}}" for param in self.task_input_parameters})
44
+ }
45
+ ]
46
+ )
47
+
48
+ def _render_demos(self) -> List[ChatItem]:
49
+ """
50
+ Renders demo examples in the following format:
51
+
52
+ [
53
+ {
54
+ "role": "user",
55
+ "content": "\n{\n\"input_field1\": \"value1\",\n\"input_field2\": \"value2\"\n}\n"
56
+ },
57
+ {
58
+ "role": "assistant",
59
+ "content": "expected_response_1"
60
+ },
61
+ {
62
+ "role": "user",
63
+ "content": "\n{\n\"input_field1\": \"value3\",\n\"input_field2\": \"value4\"\n}\n"
64
+ },
65
+ {
66
+ "role": "assistant",
67
+ "content": "expected_response_2"
68
+ }
69
+ ]
70
+ """
71
+ chat_items: List[ChatItem] = []
72
+
73
+ for example in self.demo_examples:
74
+ inputs = {param: example[param] for param in self.task_input_parameters}
75
+
76
+ formatted_input = json.dumps(inputs, indent=2)
77
+
78
+ user_message: ChatItem = {
79
+ "role": "user",
80
+ "content": f"\n{formatted_input}\n"
81
+ }
82
+
83
+ assistant_message: ChatItem = {
84
+ "role": "assistant",
85
+ "content": example[self.task_output_parameter]
86
+ }
87
+
88
+ chat_items.append(user_message)
89
+ chat_items.append(assistant_message)
90
+
91
+ return chat_items
@@ -0,0 +1,80 @@
1
+ import abc
2
+ from typing import Dict, List, Literal, Any
3
+ from typing_extensions import override
4
+ import opik
5
+ from opik.api_objects.prompt import prompt_template as opik_prompt_template
6
+
7
+ ChatItem = Dict[Literal["role", "content"], str]
8
+
9
+
10
+ class BaseTemplate(abc.ABC):
11
+ @abc.abstractmethod
12
+ def format(self, **kwargs: Any) -> Any:
13
+ raise NotImplementedError
14
+
15
+ class PromptTemplate(BaseTemplate):
16
+ """Wrapper for opik PromptTemplate which is a subclass of BaseTemplate."""
17
+ def __init__(
18
+ self,
19
+ template: str,
20
+ validate_placeholders: bool = False,
21
+ type: opik.PromptType = opik.PromptType.MUSTACHE
22
+ ) -> None:
23
+ self._opik_prompt_template = opik_prompt_template.PromptTemplate(
24
+ template=template,
25
+ validate_placeholders=validate_placeholders,
26
+ type=type
27
+ )
28
+
29
+ @override
30
+ def format(self, **kwargs: Any) -> str:
31
+ return self._opik_prompt_template.format(**kwargs)
32
+
33
+
34
+ class ChatItemTemplate(BaseTemplate):
35
+ def __init__(
36
+ self,
37
+ role: str,
38
+ prompt_template: PromptTemplate
39
+ ) -> None:
40
+ self._role = role
41
+ self._prompt_template = prompt_template
42
+
43
+ @override
44
+ def format(self, **kwargs: Any) -> ChatItem:
45
+ return {
46
+ "role": self._role,
47
+ "content": self._prompt_template.format(**kwargs)
48
+ }
49
+
50
+ class ChatPromptTemplate(BaseTemplate):
51
+ def __init__(
52
+ self,
53
+ chat_template: List[Dict[str, str]],
54
+ type: opik.PromptType = opik.PromptType.MUSTACHE,
55
+ validate_placeholders: bool = False,
56
+ ) -> None:
57
+ self._raw_chat_template = chat_template
58
+ self._type = type
59
+ self._validate_placeholders = validate_placeholders
60
+ self._init_chat_template_items()
61
+
62
+ def _init_chat_template_items(self) -> None:
63
+ self._chat_template_items: List[ChatItemTemplate] = [
64
+ ChatItemTemplate(
65
+ role=item["role"],
66
+ prompt_template=PromptTemplate(
67
+ item["content"],
68
+ type=self._type,
69
+ validate_placeholders=self._validate_placeholders,
70
+ )
71
+ )
72
+ for item in self._raw_chat_template
73
+ ]
74
+
75
+ @override
76
+ def format(self, **kwargs: Any) -> List[ChatItem]:
77
+ return [
78
+ item.format(**kwargs)
79
+ for item in self._chat_template_items
80
+ ]
File without changes
@@ -0,0 +1,69 @@
1
+ import logging
2
+ from rich.logging import RichHandler
3
+
4
+ DEFAULT_LOG_FORMAT = '%(message)s'
5
+ DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
6
+
7
+ # Store configured state to prevent reconfiguration
8
+ _logging_configured = False
9
+
10
+ def setup_logging(
11
+ level=logging.WARNING,
12
+ format_string=DEFAULT_LOG_FORMAT,
13
+ date_format=DEFAULT_DATE_FORMAT,
14
+ force=False,
15
+ ):
16
+ """
17
+ Configures logging for the opik_optimizer package using rich.
18
+
19
+ Args:
20
+ level: The desired logging level (e.g., logging.DEBUG, logging.INFO, logging.WARNING).
21
+ format_string: The format string for log messages.
22
+ date_format: The format string for the date/time in log messages.
23
+ force: If True, reconfigure logging even if already configured.
24
+ """
25
+ global _logging_configured
26
+ if _logging_configured and not force:
27
+ # Use logger after getting it
28
+ return
29
+
30
+ # Configure opik_optimizer package logger
31
+ package_logger = logging.getLogger('opik_optimizer')
32
+
33
+ # Avoid adding handlers repeatedly if force=True replaces them
34
+ if not package_logger.handlers or force:
35
+ # Remove existing handlers if forcing re-configuration
36
+ if force and package_logger.handlers:
37
+ for handler in package_logger.handlers[:]:
38
+ package_logger.removeHandler(handler)
39
+
40
+ console_handler = RichHandler(
41
+ rich_tracebacks=True,
42
+ markup=True, # Enable rich markup in log messages
43
+ log_time_format=f"[{date_format}]" # Apply date format
44
+ )
45
+ # RichHandler manages formatting, so we don't need a separate formatter
46
+ # formatter = logging.Formatter(format_string, datefmt=date_format)
47
+ # console_handler.setFormatter(formatter)
48
+ package_logger.addHandler(console_handler)
49
+
50
+ package_logger.setLevel(level)
51
+ package_logger.propagate = False # Don't duplicate messages in root logger
52
+
53
+ # Set levels for noisy libraries like LiteLLM and httpx
54
+ logging.getLogger("LiteLLM").setLevel(logging.WARNING)
55
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
56
+ logging.getLogger("requests").setLevel(logging.WARNING)
57
+ logging.getLogger("httpx").setLevel(logging.WARNING)
58
+ logging.getLogger("dspy").setLevel(logging.WARNING)
59
+ logging.getLogger("datasets").setLevel(logging.WARNING)
60
+ logging.getLogger("optuna").setLevel(logging.WARNING)
61
+ logging.getLogger("filelock").setLevel(logging.WARNING)
62
+
63
+ _logging_configured = True
64
+
65
+ # Use level name provided by rich handler by default
66
+ package_logger.info(f"Opik Optimizer logging configured to level: [bold cyan]{logging.getLevelName(level)}[/bold cyan]")
67
+
68
+ # Ensure logger obtained after setup can be used immediately if needed
69
+ logger = logging.getLogger(__name__)