dao-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. dao_ai/agent_as_code.py +2 -5
  2. dao_ai/cli.py +65 -15
  3. dao_ai/config.py +672 -218
  4. dao_ai/genie/cache/core.py +6 -2
  5. dao_ai/genie/cache/lru.py +29 -11
  6. dao_ai/genie/cache/semantic.py +95 -44
  7. dao_ai/hooks/core.py +5 -5
  8. dao_ai/logging.py +56 -0
  9. dao_ai/memory/core.py +61 -44
  10. dao_ai/memory/databricks.py +54 -41
  11. dao_ai/memory/postgres.py +77 -36
  12. dao_ai/middleware/assertions.py +45 -17
  13. dao_ai/middleware/core.py +13 -7
  14. dao_ai/middleware/guardrails.py +30 -25
  15. dao_ai/middleware/human_in_the_loop.py +9 -5
  16. dao_ai/middleware/message_validation.py +61 -29
  17. dao_ai/middleware/summarization.py +16 -11
  18. dao_ai/models.py +172 -69
  19. dao_ai/nodes.py +148 -19
  20. dao_ai/optimization.py +26 -16
  21. dao_ai/orchestration/core.py +15 -8
  22. dao_ai/orchestration/supervisor.py +22 -8
  23. dao_ai/orchestration/swarm.py +57 -12
  24. dao_ai/prompts.py +17 -17
  25. dao_ai/providers/databricks.py +365 -155
  26. dao_ai/state.py +24 -6
  27. dao_ai/tools/__init__.py +2 -0
  28. dao_ai/tools/agent.py +1 -3
  29. dao_ai/tools/core.py +7 -7
  30. dao_ai/tools/email.py +29 -77
  31. dao_ai/tools/genie.py +18 -13
  32. dao_ai/tools/mcp.py +223 -156
  33. dao_ai/tools/python.py +5 -2
  34. dao_ai/tools/search.py +1 -1
  35. dao_ai/tools/slack.py +21 -9
  36. dao_ai/tools/sql.py +202 -0
  37. dao_ai/tools/time.py +30 -7
  38. dao_ai/tools/unity_catalog.py +129 -86
  39. dao_ai/tools/vector_search.py +318 -244
  40. dao_ai/utils.py +15 -10
  41. dao_ai-0.1.3.dist-info/METADATA +455 -0
  42. dao_ai-0.1.3.dist-info/RECORD +64 -0
  43. dao_ai-0.1.1.dist-info/METADATA +0 -1878
  44. dao_ai-0.1.1.dist-info/RECORD +0 -62
  45. {dao_ai-0.1.1.dist-info → dao_ai-0.1.3.dist-info}/WHEEL +0 -0
  46. {dao_ai-0.1.1.dist-info → dao_ai-0.1.3.dist-info}/entry_points.txt +0 -0
  47. {dao_ai-0.1.1.dist-info → dao_ai-0.1.3.dist-info}/licenses/LICENSE +0 -0
@@ -181,15 +181,17 @@ class DatabricksProvider(ServiceProvider):
181
181
  experiment: Experiment | None = mlflow.get_experiment_by_name(experiment_name)
182
182
  if experiment is None:
183
183
  experiment_id: str = mlflow.create_experiment(name=experiment_name)
184
- logger.info(
185
- f"Created new experiment: {experiment_name} (ID: {experiment_id})"
184
+ logger.success(
185
+ "Created new MLflow experiment",
186
+ experiment_name=experiment_name,
187
+ experiment_id=experiment_id,
186
188
  )
187
189
  experiment = mlflow.get_experiment(experiment_id)
188
190
  return experiment
189
191
 
190
192
  def create_token(self) -> str:
191
193
  current_user: User = self.w.current_user.me()
192
- logger.debug(f"Authenticated to Databricks as {current_user}")
194
+ logger.debug("Authenticated to Databricks", user=str(current_user))
193
195
  headers: dict[str, str] = self.w.config.authenticate()
194
196
  token: str = headers["Authorization"].replace("Bearer ", "")
195
197
  return token
@@ -201,17 +203,24 @@ class DatabricksProvider(ServiceProvider):
201
203
  secret_response: GetSecretResponse = self.w.secrets.get_secret(
202
204
  secret_scope, secret_key
203
205
  )
204
- logger.debug(f"Retrieved secret {secret_key} from scope {secret_scope}")
206
+ logger.trace(
207
+ "Retrieved secret", secret_key=secret_key, secret_scope=secret_scope
208
+ )
205
209
  encoded_secret: str = secret_response.value
206
210
  decoded_secret: str = base64.b64decode(encoded_secret).decode("utf-8")
207
211
  return decoded_secret
208
212
  except NotFound:
209
213
  logger.warning(
210
- f"Secret {secret_key} not found in scope {secret_scope}, using default value"
214
+ "Secret not found, using default value",
215
+ secret_key=secret_key,
216
+ secret_scope=secret_scope,
211
217
  )
212
218
  except Exception as e:
213
219
  logger.error(
214
- f"Error retrieving secret {secret_key} from scope {secret_scope}: {e}"
220
+ "Error retrieving secret",
221
+ secret_key=secret_key,
222
+ secret_scope=secret_scope,
223
+ error=str(e),
215
224
  )
216
225
 
217
226
  return default_value
@@ -220,14 +229,16 @@ class DatabricksProvider(ServiceProvider):
220
229
  self,
221
230
  config: AppConfig,
222
231
  ) -> ModelInfo:
223
- logger.debug("Creating agent...")
232
+ logger.info("Creating agent")
224
233
  mlflow.set_registry_uri("databricks-uc")
225
234
 
226
235
  # Set up experiment for proper tracking
227
236
  experiment: Experiment = self.get_or_create_experiment(config)
228
237
  mlflow.set_experiment(experiment_id=experiment.experiment_id)
229
238
  logger.debug(
230
- f"Using experiment: {experiment.name} (ID: {experiment.experiment_id})"
239
+ "Using MLflow experiment",
240
+ experiment_name=experiment.name,
241
+ experiment_id=experiment.experiment_id,
231
242
  )
232
243
 
233
244
  llms: Sequence[LLMModel] = list(config.resources.llms.values())
@@ -273,12 +284,16 @@ class DatabricksProvider(ServiceProvider):
273
284
  for resource in r.as_resources()
274
285
  if not r.on_behalf_of_user
275
286
  ]
276
- logger.debug(f"system_resources: {[r.name for r in system_resources]}")
287
+ logger.trace(
288
+ "System resources identified",
289
+ count=len(system_resources),
290
+ resources=[r.name for r in system_resources],
291
+ )
277
292
 
278
293
  system_auth_policy: SystemAuthPolicy = SystemAuthPolicy(
279
294
  resources=system_resources
280
295
  )
281
- logger.debug(f"system_auth_policy: {system_auth_policy}")
296
+ logger.trace("System auth policy created", policy=str(system_auth_policy))
282
297
 
283
298
  api_scopes: Sequence[str] = list(
284
299
  set(
@@ -290,15 +305,19 @@ class DatabricksProvider(ServiceProvider):
290
305
  ]
291
306
  )
292
307
  )
293
- logger.debug(f"api_scopes: {api_scopes}")
308
+ logger.trace("API scopes identified", scopes=api_scopes)
294
309
 
295
310
  user_auth_policy: UserAuthPolicy = UserAuthPolicy(api_scopes=api_scopes)
296
- logger.debug(f"user_auth_policy: {user_auth_policy}")
311
+ logger.trace("User auth policy created", policy=str(user_auth_policy))
297
312
 
298
313
  auth_policy: AuthPolicy = AuthPolicy(
299
314
  system_auth_policy=system_auth_policy, user_auth_policy=user_auth_policy
300
315
  )
301
- logger.debug(f"auth_policy: {auth_policy}")
316
+ logger.debug(
317
+ "Auth policy created",
318
+ has_system_auth=system_auth_policy is not None,
319
+ has_user_auth=user_auth_policy is not None,
320
+ )
302
321
 
303
322
  code_paths: list[str] = config.app.code_paths
304
323
  for path in code_paths:
@@ -325,24 +344,27 @@ class DatabricksProvider(ServiceProvider):
325
344
 
326
345
  pip_requirements += get_installed_packages()
327
346
 
328
- logger.debug(f"pip_requirements: {pip_requirements}")
329
- logger.debug(f"code_paths: {code_paths}")
347
+ logger.trace("Pip requirements prepared", count=len(pip_requirements))
348
+ logger.trace("Code paths prepared", count=len(code_paths))
330
349
 
331
350
  run_name: str = normalize_name(config.app.name)
332
- logger.debug(f"run_name: {run_name}")
333
- logger.debug(f"model_path: {model_path.as_posix()}")
351
+ logger.debug(
352
+ "Agent run configuration",
353
+ run_name=run_name,
354
+ model_path=model_path.as_posix(),
355
+ )
334
356
 
335
357
  input_example: dict[str, Any] = None
336
358
  if config.app.input_example:
337
359
  input_example = config.app.input_example.model_dump()
338
360
 
339
- logger.debug(f"input_example: {input_example}")
361
+ logger.trace("Input example configured", has_example=input_example is not None)
340
362
 
341
363
  # Create conda environment with configured Python version
342
364
  # This allows deploying from environments with different Python versions
343
365
  # (e.g., Databricks Apps with Python 3.11 can deploy to Model Serving with 3.12)
344
366
  target_python_version: str = config.app.python_version
345
- logger.debug(f"target_python_version: {target_python_version}")
367
+ logger.debug("Target Python version configured", version=target_python_version)
346
368
 
347
369
  conda_env: dict[str, Any] = {
348
370
  "name": "mlflow-env",
@@ -353,7 +375,11 @@ class DatabricksProvider(ServiceProvider):
353
375
  {"pip": list(pip_requirements)},
354
376
  ],
355
377
  }
356
- logger.debug(f"conda_env: {conda_env}")
378
+ logger.trace(
379
+ "Conda environment configured",
380
+ python_version=target_python_version,
381
+ pip_packages_count=len(pip_requirements),
382
+ )
357
383
 
358
384
  with mlflow.start_run(run_name=run_name):
359
385
  mlflow.set_tag("type", "agent")
@@ -374,8 +400,10 @@ class DatabricksProvider(ServiceProvider):
374
400
  model_version: ModelVersion = mlflow.register_model(
375
401
  name=registered_model_name, model_uri=logged_agent_info.model_uri
376
402
  )
377
- logger.debug(
378
- f"Registered model: {registered_model_name} with version: {model_version.version}"
403
+ logger.success(
404
+ "Model registered",
405
+ model_name=registered_model_name,
406
+ version=model_version.version,
379
407
  )
380
408
 
381
409
  client: MlflowClient = MlflowClient()
@@ -387,7 +415,7 @@ class DatabricksProvider(ServiceProvider):
387
415
  key="dao_ai",
388
416
  value=dao_ai_version(),
389
417
  )
390
- logger.debug(f"Set dao_ai tag on model version {model_version.version}")
418
+ logger.trace("Set dao_ai tag on model version", version=model_version.version)
391
419
 
392
420
  client.set_registered_model_alias(
393
421
  name=registered_model_name,
@@ -404,12 +432,15 @@ class DatabricksProvider(ServiceProvider):
404
432
  aliased_model: ModelVersion = client.get_model_version_by_alias(
405
433
  registered_model_name, config.app.alias
406
434
  )
407
- logger.debug(
408
- f"Model {registered_model_name} aliased to {config.app.alias} with version: {aliased_model.version}"
435
+ logger.info(
436
+ "Model aliased",
437
+ model_name=registered_model_name,
438
+ alias=config.app.alias,
439
+ version=aliased_model.version,
409
440
  )
410
441
 
411
442
  def deploy_agent(self, config: AppConfig) -> None:
412
- logger.debug("Deploying agent...")
443
+ logger.info("Deploying agent", endpoint_name=config.app.endpoint_name)
413
444
  mlflow.set_registry_uri("databricks-uc")
414
445
 
415
446
  endpoint_name: str = config.app.endpoint_name
@@ -430,12 +461,10 @@ class DatabricksProvider(ServiceProvider):
430
461
  agents.get_deployments(endpoint_name)
431
462
  endpoint_exists = True
432
463
  logger.debug(
433
- f"Endpoint {endpoint_name} already exists, updating without tags to avoid conflicts..."
464
+ "Endpoint already exists, updating", endpoint_name=endpoint_name
434
465
  )
435
466
  except Exception:
436
- logger.debug(
437
- f"Endpoint {endpoint_name} doesn't exist, creating new with tags..."
438
- )
467
+ logger.debug("Creating new endpoint", endpoint_name=endpoint_name)
439
468
 
440
469
  # Deploy - skip tags for existing endpoints to avoid conflicts
441
470
  agents.deploy(
@@ -451,8 +480,11 @@ class DatabricksProvider(ServiceProvider):
451
480
  registered_model_name: str = config.app.registered_model.full_name
452
481
  permissions: Sequence[dict[str, Any]] = config.app.permissions
453
482
 
454
- logger.debug(registered_model_name)
455
- logger.debug(permissions)
483
+ logger.debug(
484
+ "Configuring model permissions",
485
+ model_name=registered_model_name,
486
+ permissions_count=len(permissions),
487
+ )
456
488
 
457
489
  for permission in permissions:
458
490
  principals: Sequence[str] = permission.principals
@@ -472,7 +504,7 @@ class DatabricksProvider(ServiceProvider):
472
504
  try:
473
505
  catalog_info = self.w.catalogs.get(name=schema.catalog_name)
474
506
  except NotFound:
475
- logger.debug(f"Creating catalog: {schema.catalog_name}")
507
+ logger.info("Creating catalog", catalog_name=schema.catalog_name)
476
508
  catalog_info = self.w.catalogs.create(name=schema.catalog_name)
477
509
  return catalog_info
478
510
 
@@ -482,7 +514,7 @@ class DatabricksProvider(ServiceProvider):
482
514
  try:
483
515
  schema_info = self.w.schemas.get(full_name=schema.full_name)
484
516
  except NotFound:
485
- logger.debug(f"Creating schema: {schema.full_name}")
517
+ logger.info("Creating schema", schema_name=schema.full_name)
486
518
  schema_info = self.w.schemas.create(
487
519
  name=schema.schema_name, catalog_name=catalog_info.name
488
520
  )
@@ -494,7 +526,7 @@ class DatabricksProvider(ServiceProvider):
494
526
  try:
495
527
  volume_info = self.w.volumes.read(name=volume.full_name)
496
528
  except NotFound:
497
- logger.debug(f"Creating volume: {volume.full_name}")
529
+ logger.info("Creating volume", volume_name=volume.full_name)
498
530
  volume_info = self.w.volumes.create(
499
531
  catalog_name=schema_info.catalog_name,
500
532
  schema_name=schema_info.name,
@@ -505,7 +537,7 @@ class DatabricksProvider(ServiceProvider):
505
537
 
506
538
  def create_path(self, volume_path: VolumePathModel) -> Path:
507
539
  path: Path = volume_path.full_name
508
- logger.info(f"Creating volume path: {path}")
540
+ logger.info("Creating volume path", path=str(path))
509
541
  self.w.files.create_directory(path)
510
542
  return path
511
543
 
@@ -546,11 +578,12 @@ class DatabricksProvider(ServiceProvider):
546
578
 
547
579
  if ddl:
548
580
  ddl_path: Path = Path(ddl)
549
- logger.debug(f"Executing DDL from: {ddl_path}")
581
+ logger.debug("Executing DDL", ddl_path=str(ddl_path))
550
582
  statements: Sequence[str] = sqlparse.parse(ddl_path.read_text())
551
583
  for statement in statements:
552
- logger.debug(statement)
553
- logger.debug(f"args: {args}")
584
+ logger.trace(
585
+ "Executing DDL statement", statement=str(statement)[:100], args=args
586
+ )
554
587
  spark.sql(
555
588
  str(statement),
556
589
  args=args,
@@ -559,20 +592,23 @@ class DatabricksProvider(ServiceProvider):
559
592
  if data:
560
593
  data_path: Path = Path(data)
561
594
  if format == "sql":
562
- logger.debug(f"Executing SQL from: {data_path}")
595
+ logger.debug("Executing SQL from file", data_path=str(data_path))
563
596
  data_statements: Sequence[str] = sqlparse.parse(data_path.read_text())
564
597
  for statement in data_statements:
565
- logger.debug(statement)
566
- logger.debug(f"args: {args}")
598
+ logger.trace(
599
+ "Executing SQL statement",
600
+ statement=str(statement)[:100],
601
+ args=args,
602
+ )
567
603
  spark.sql(
568
604
  str(statement),
569
605
  args=args,
570
606
  )
571
607
  else:
572
- logger.debug(f"Writing to: {table}")
608
+ logger.debug("Writing dataset to table", table=table)
573
609
  if not data_path.is_absolute():
574
610
  data_path = current_dir / data_path
575
- logger.debug(f"Data path: {data_path.as_posix()}")
611
+ logger.trace("Data path resolved", path=data_path.as_posix())
576
612
  if format == "excel":
577
613
  pdf = pd.read_excel(data_path.as_posix())
578
614
  df = spark.createDataFrame(pdf, schema=dataset.table_schema)
@@ -589,6 +625,22 @@ class DatabricksProvider(ServiceProvider):
589
625
  df.write.mode("overwrite").saveAsTable(table)
590
626
 
591
627
  def create_vector_store(self, vector_store: VectorStoreModel) -> None:
628
+ # Validate that this is a provisioning-mode config
629
+ if vector_store.source_table is None:
630
+ raise ValueError(
631
+ "Cannot create vector store: source_table is required for provisioning. "
632
+ "This VectorStoreModel appears to be configured for 'use existing index' mode. "
633
+ "To provision a new vector store, provide source_table and embedding_source_column."
634
+ )
635
+ if vector_store.embedding_source_column is None:
636
+ raise ValueError(
637
+ "Cannot create vector store: embedding_source_column is required for provisioning."
638
+ )
639
+ if vector_store.endpoint is None:
640
+ raise ValueError(
641
+ "Cannot create vector store: endpoint is required for provisioning."
642
+ )
643
+
592
644
  if not endpoint_exists(self.vsc, vector_store.endpoint.name):
593
645
  self.vsc.create_endpoint_and_wait(
594
646
  name=vector_store.endpoint.name,
@@ -596,13 +648,17 @@ class DatabricksProvider(ServiceProvider):
596
648
  verbose=True,
597
649
  )
598
650
 
599
- logger.debug(f"Endpoint named {vector_store.endpoint.name} is ready.")
651
+ logger.success(
652
+ "Vector search endpoint ready", endpoint_name=vector_store.endpoint.name
653
+ )
600
654
 
601
655
  if not index_exists(
602
656
  self.vsc, vector_store.endpoint.name, vector_store.index.full_name
603
657
  ):
604
- logger.debug(
605
- f"Creating index {vector_store.index.full_name} on endpoint {vector_store.endpoint.name}..."
658
+ logger.info(
659
+ "Creating vector search index",
660
+ index_name=vector_store.index.full_name,
661
+ endpoint_name=vector_store.endpoint.name,
606
662
  )
607
663
  self.vsc.create_delta_sync_index_and_wait(
608
664
  endpoint_name=vector_store.endpoint.name,
@@ -616,7 +672,8 @@ class DatabricksProvider(ServiceProvider):
616
672
  )
617
673
  else:
618
674
  logger.debug(
619
- f"Index {vector_store.index.full_name} already exists, checking status and syncing..."
675
+ "Vector search index already exists, checking status",
676
+ index_name=vector_store.index.full_name,
620
677
  )
621
678
  index = self.vsc.get_index(
622
679
  vector_store.endpoint.name, vector_store.index.full_name
@@ -639,54 +696,61 @@ class DatabricksProvider(ServiceProvider):
639
696
 
640
697
  if pipeline_status in [
641
698
  "COMPLETED",
699
+ "ONLINE",
642
700
  "FAILED",
643
701
  "CANCELED",
644
702
  "ONLINE_PIPELINE_FAILED",
645
703
  ]:
646
- logger.debug(
647
- f"Index is ready to sync (status: {pipeline_status})"
648
- )
704
+ logger.debug("Index ready to sync", status=pipeline_status)
649
705
  break
650
706
  elif pipeline_status in [
651
707
  "WAITING_FOR_RESOURCES",
652
708
  "PROVISIONING",
653
709
  "INITIALIZING",
654
710
  "INDEXING",
655
- "ONLINE",
656
711
  ]:
657
- logger.debug(
658
- f"Index not ready yet (status: {pipeline_status}), waiting {wait_interval} seconds..."
712
+ logger.trace(
713
+ "Index not ready, waiting",
714
+ status=pipeline_status,
715
+ wait_seconds=wait_interval,
659
716
  )
660
717
  time.sleep(wait_interval)
661
718
  elapsed += wait_interval
662
719
  else:
663
720
  logger.warning(
664
- f"Unknown pipeline status: {pipeline_status}, attempting sync anyway"
721
+ "Unknown pipeline status, attempting sync",
722
+ status=pipeline_status,
665
723
  )
666
724
  break
667
725
  except Exception as status_error:
668
726
  logger.warning(
669
- f"Could not check index status: {status_error}, attempting sync anyway"
727
+ "Could not check index status, attempting sync",
728
+ error=str(status_error),
670
729
  )
671
730
  break
672
731
 
673
732
  if elapsed >= max_wait_time:
674
733
  logger.warning(
675
- f"Timed out waiting for index to be ready after {max_wait_time} seconds"
734
+ "Timed out waiting for index to be ready",
735
+ max_wait_seconds=max_wait_time,
676
736
  )
677
737
 
678
738
  # Now attempt to sync
679
739
  try:
680
740
  index.sync()
681
- logger.debug("Index sync completed successfully")
741
+ logger.success("Index sync completed")
682
742
  except Exception as sync_error:
683
743
  if "not ready to sync yet" in str(sync_error).lower():
684
- logger.warning(f"Index still not ready to sync: {sync_error}")
744
+ logger.warning(
745
+ "Index still not ready to sync", error=str(sync_error)
746
+ )
685
747
  else:
686
748
  raise sync_error
687
749
 
688
- logger.debug(
689
- f"index {vector_store.index.full_name} on table {vector_store.source_table.full_name} is ready"
750
+ logger.success(
751
+ "Vector search index ready",
752
+ index_name=vector_store.index.full_name,
753
+ source_table=vector_store.source_table.full_name,
690
754
  )
691
755
 
692
756
  def get_vector_index(self, vector_store: VectorStoreModel) -> None:
@@ -722,12 +786,16 @@ class DatabricksProvider(ServiceProvider):
722
786
  # sql = sql.replace("{catalog_name}", schema.catalog_name)
723
787
  # sql = sql.replace("{schema_name}", schema.schema_name)
724
788
 
725
- logger.info(function.name)
726
- logger.info(sql)
789
+ logger.info("Creating SQL function", function_name=function.name)
790
+ logger.trace("SQL function body", sql=sql[:200])
727
791
  _: FunctionInfo = self.dfs.create_function(sql_function_body=sql)
728
792
 
729
793
  if unity_catalog_function.test:
730
- logger.info(unity_catalog_function.test.parameters)
794
+ logger.debug(
795
+ "Testing function",
796
+ function_name=function.full_name,
797
+ parameters=unity_catalog_function.test.parameters,
798
+ )
731
799
 
732
800
  result: FunctionExecutionResult = self.dfs.execute_function(
733
801
  function_name=function.full_name,
@@ -735,37 +803,50 @@ class DatabricksProvider(ServiceProvider):
735
803
  )
736
804
 
737
805
  if result.error:
738
- logger.error(result.error)
806
+ logger.error(
807
+ "Function test failed",
808
+ function_name=function.full_name,
809
+ error=result.error,
810
+ )
739
811
  else:
740
- logger.info(f"Function {function.full_name} executed successfully.")
741
- logger.info(f"Result: {result}")
812
+ logger.success(
813
+ "Function test passed", function_name=function.full_name
814
+ )
815
+ logger.debug("Function test result", result=str(result))
742
816
 
743
817
  def find_columns(self, table_model: TableModel) -> Sequence[str]:
744
- logger.debug(f"Finding columns for table: {table_model.full_name}")
818
+ logger.trace("Finding columns for table", table=table_model.full_name)
745
819
  table_info: TableInfo = self.w.tables.get(full_name=table_model.full_name)
746
820
  columns: Sequence[ColumnInfo] = table_info.columns
747
821
  column_names: Sequence[str] = [c.name for c in columns]
748
- logger.debug(f"Columns found: {column_names}")
822
+ logger.debug(
823
+ "Columns found",
824
+ table=table_model.full_name,
825
+ columns_count=len(column_names),
826
+ )
749
827
  return column_names
750
828
 
751
829
  def find_primary_key(self, table_model: TableModel) -> Sequence[str] | None:
752
- logger.debug(f"Finding primary key for table: {table_model.full_name}")
830
+ logger.trace("Finding primary key for table", table=table_model.full_name)
753
831
  primary_keys: Sequence[str] | None = None
754
832
  table_info: TableInfo = self.w.tables.get(full_name=table_model.full_name)
755
833
  constraints: Sequence[TableConstraint] = table_info.table_constraints
756
834
  primary_key_constraint: PrimaryKeyConstraint | None = next(
757
- c.primary_key_constraint for c in constraints if c.primary_key_constraint
835
+ (c.primary_key_constraint for c in constraints if c.primary_key_constraint),
836
+ None,
758
837
  )
759
838
  if primary_key_constraint:
760
839
  primary_keys = primary_key_constraint.child_columns
761
840
 
762
- logger.debug(f"Primary key for table {table_model.full_name}: {primary_keys}")
841
+ logger.debug(
842
+ "Primary key found", table=table_model.full_name, primary_keys=primary_keys
843
+ )
763
844
  return primary_keys
764
845
 
765
846
  def find_vector_search_endpoint(
766
847
  self, predicate: Callable[[dict[str, Any]], bool]
767
848
  ) -> str | None:
768
- logger.debug("Finding vector search endpoint...")
849
+ logger.trace("Finding vector search endpoint")
769
850
  endpoint_name: str | None = None
770
851
  vector_search_endpoints: Sequence[dict[str, Any]] = (
771
852
  self.vsc.list_endpoints().get("endpoints", [])
@@ -774,11 +855,13 @@ class DatabricksProvider(ServiceProvider):
774
855
  if predicate(endpoint):
775
856
  endpoint_name = endpoint["name"]
776
857
  break
777
- logger.debug(f"Vector search endpoint found: {endpoint_name}")
858
+ logger.debug("Vector search endpoint found", endpoint_name=endpoint_name)
778
859
  return endpoint_name
779
860
 
780
861
  def find_endpoint_for_index(self, index_model: IndexModel) -> str | None:
781
- logger.debug(f"Finding vector search index: {index_model.full_name}")
862
+ logger.trace(
863
+ "Finding endpoint for vector search index", index_name=index_model.full_name
864
+ )
782
865
  all_endpoints: Sequence[dict[str, Any]] = self.vsc.list_endpoints().get(
783
866
  "endpoints", []
784
867
  )
@@ -788,12 +871,20 @@ class DatabricksProvider(ServiceProvider):
788
871
  endpoint_name: str = endpoint["name"]
789
872
  indexes = self.vsc.list_indexes(name=endpoint_name)
790
873
  vector_indexes: Sequence[dict[str, Any]] = indexes.get("vector_indexes", [])
791
- logger.trace(f"Endpoint: {endpoint_name}, vector_indexes: {vector_indexes}")
874
+ logger.trace(
875
+ "Checking endpoint for indexes",
876
+ endpoint_name=endpoint_name,
877
+ indexes_count=len(vector_indexes),
878
+ )
792
879
  index_names = [vector_index["name"] for vector_index in vector_indexes]
793
880
  if index_name in index_names:
794
881
  found_endpoint_name = endpoint_name
795
882
  break
796
- logger.debug(f"Vector search index found: {found_endpoint_name}")
883
+ logger.debug(
884
+ "Vector search index endpoint found",
885
+ index_name=index_model.full_name,
886
+ endpoint_name=found_endpoint_name,
887
+ )
797
888
  return found_endpoint_name
798
889
 
799
890
  def _wait_for_database_available(
@@ -820,7 +911,8 @@ class DatabricksProvider(ServiceProvider):
820
911
  from typing import Any
821
912
 
822
913
  logger.info(
823
- f"Waiting for database instance {instance_name} to become AVAILABLE..."
914
+ "Waiting for database instance to become AVAILABLE",
915
+ instance_name=instance_name,
824
916
  )
825
917
  elapsed: int = 0
826
918
 
@@ -830,16 +922,24 @@ class DatabricksProvider(ServiceProvider):
830
922
  name=instance_name
831
923
  )
832
924
  current_state: str = current_instance.state
833
- logger.debug(
834
- f"Database instance {instance_name} state: {current_state}"
925
+ logger.trace(
926
+ "Database instance state checked",
927
+ instance_name=instance_name,
928
+ state=current_state,
835
929
  )
836
930
 
837
931
  if current_state == "AVAILABLE":
838
- logger.info(f"Database instance {instance_name} is now AVAILABLE")
932
+ logger.success(
933
+ "Database instance is now AVAILABLE",
934
+ instance_name=instance_name,
935
+ )
839
936
  return
840
937
  elif current_state in ["STARTING", "UPDATING", "PROVISIONING"]:
841
- logger.debug(
842
- f"Database instance still in {current_state} state, waiting {wait_interval} seconds..."
938
+ logger.trace(
939
+ "Database instance not ready, waiting",
940
+ instance_name=instance_name,
941
+ state=current_state,
942
+ wait_seconds=wait_interval,
843
943
  )
844
944
  time.sleep(wait_interval)
845
945
  elapsed += wait_interval
@@ -849,7 +949,9 @@ class DatabricksProvider(ServiceProvider):
849
949
  )
850
950
  else:
851
951
  logger.warning(
852
- f"Unknown database state: {current_state}, continuing to wait..."
952
+ "Unknown database state, continuing to wait",
953
+ instance_name=instance_name,
954
+ state=current_state,
853
955
  )
854
956
  time.sleep(wait_interval)
855
957
  elapsed += wait_interval
@@ -892,13 +994,17 @@ class DatabricksProvider(ServiceProvider):
892
994
 
893
995
  if existing_instance:
894
996
  logger.debug(
895
- f"Database instance {database.instance_name} already exists with state: {existing_instance.state}"
997
+ "Database instance already exists",
998
+ instance_name=database.instance_name,
999
+ state=existing_instance.state,
896
1000
  )
897
1001
 
898
1002
  # Check if database is in an intermediate state
899
1003
  if existing_instance.state in ["STARTING", "UPDATING"]:
900
1004
  logger.info(
901
- f"Database instance {database.instance_name} is in {existing_instance.state} state, waiting for it to become AVAILABLE..."
1005
+ "Database instance in intermediate state, waiting",
1006
+ instance_name=database.instance_name,
1007
+ state=existing_instance.state,
902
1008
  )
903
1009
 
904
1010
  # Wait for database to reach a stable state
@@ -914,65 +1020,87 @@ class DatabricksProvider(ServiceProvider):
914
1020
  )
915
1021
  )
916
1022
  current_state: str = current_instance.state
917
- logger.debug(f"Database instance state: {current_state}")
1023
+ logger.trace(
1024
+ "Checking database instance state",
1025
+ instance_name=database.instance_name,
1026
+ state=current_state,
1027
+ )
918
1028
 
919
1029
  if current_state == "AVAILABLE":
920
- logger.info(
921
- f"Database instance {database.instance_name} is now AVAILABLE"
1030
+ logger.success(
1031
+ "Database instance is now AVAILABLE",
1032
+ instance_name=database.instance_name,
922
1033
  )
923
1034
  break
924
1035
  elif current_state in ["STARTING", "UPDATING"]:
925
- logger.debug(
926
- f"Database instance still in {current_state} state, waiting {wait_interval} seconds..."
1036
+ logger.trace(
1037
+ "Database instance not ready, waiting",
1038
+ instance_name=database.instance_name,
1039
+ state=current_state,
1040
+ wait_seconds=wait_interval,
927
1041
  )
928
1042
  time.sleep(wait_interval)
929
1043
  elapsed += wait_interval
930
1044
  elif current_state in ["STOPPED", "DELETING"]:
931
1045
  logger.warning(
932
- f"Database instance {database.instance_name} is in unexpected state: {current_state}"
1046
+ "Database instance in unexpected state",
1047
+ instance_name=database.instance_name,
1048
+ state=current_state,
933
1049
  )
934
1050
  break
935
1051
  else:
936
1052
  logger.warning(
937
- f"Unknown database state: {current_state}, proceeding anyway"
1053
+ "Unknown database state, proceeding",
1054
+ instance_name=database.instance_name,
1055
+ state=current_state,
938
1056
  )
939
1057
  break
940
1058
  except NotFound:
941
1059
  logger.warning(
942
- f"Database instance {database.instance_name} no longer exists, will attempt to recreate"
1060
+ "Database instance no longer exists, will recreate",
1061
+ instance_name=database.instance_name,
943
1062
  )
944
1063
  break
945
1064
  except Exception as state_error:
946
1065
  logger.warning(
947
- f"Could not check database state: {state_error}, proceeding anyway"
1066
+ "Could not check database state, proceeding",
1067
+ instance_name=database.instance_name,
1068
+ error=str(state_error),
948
1069
  )
949
1070
  break
950
1071
 
951
1072
  if elapsed >= max_wait_time:
952
1073
  logger.warning(
953
- f"Timed out waiting for database instance {database.instance_name} to become AVAILABLE after {max_wait_time} seconds"
1074
+ "Timed out waiting for database to become AVAILABLE",
1075
+ instance_name=database.instance_name,
1076
+ max_wait_seconds=max_wait_time,
954
1077
  )
955
1078
 
956
1079
  elif existing_instance.state == "AVAILABLE":
957
1080
  logger.info(
958
- f"Database instance {database.instance_name} already exists and is AVAILABLE"
1081
+ "Database instance already exists and is AVAILABLE",
1082
+ instance_name=database.instance_name,
959
1083
  )
960
1084
  return
961
1085
  elif existing_instance.state in ["STOPPED", "DELETING"]:
962
1086
  logger.warning(
963
- f"Database instance {database.instance_name} is in {existing_instance.state} state"
1087
+ "Database instance in terminal state",
1088
+ instance_name=database.instance_name,
1089
+ state=existing_instance.state,
964
1090
  )
965
1091
  return
966
1092
  else:
967
1093
  logger.info(
968
- f"Database instance {database.instance_name} already exists with state: {existing_instance.state}"
1094
+ "Database instance already exists",
1095
+ instance_name=database.instance_name,
1096
+ state=existing_instance.state,
969
1097
  )
970
1098
  return
971
1099
 
972
1100
  except NotFound:
973
1101
  # Database doesn't exist, proceed with creation
974
- logger.debug(
975
- f"Database instance {database.instance_name} not found, creating new instance..."
1102
+ logger.info(
1103
+ "Creating new database instance", instance_name=database.instance_name
976
1104
  )
977
1105
 
978
1106
  try:
@@ -992,8 +1120,9 @@ class DatabricksProvider(ServiceProvider):
992
1120
  workspace_client.database.create_database_instance(
993
1121
  database_instance=database_instance
994
1122
  )
995
- logger.info(
996
- f"Successfully created database instance: {database.instance_name}"
1123
+ logger.success(
1124
+ "Database instance created successfully",
1125
+ instance_name=database.instance_name,
997
1126
  )
998
1127
 
999
1128
  # Wait for the newly created database to become AVAILABLE
@@ -1011,7 +1140,8 @@ class DatabricksProvider(ServiceProvider):
1011
1140
  or "RESOURCE_ALREADY_EXISTS" in error_msg
1012
1141
  ):
1013
1142
  logger.info(
1014
- f"Database instance {database.instance_name} was created concurrently by another process"
1143
+ "Database instance was created concurrently",
1144
+ instance_name=database.instance_name,
1015
1145
  )
1016
1146
  # Still need to wait for the database to become AVAILABLE
1017
1147
  self._wait_for_database_available(
@@ -1021,7 +1151,9 @@ class DatabricksProvider(ServiceProvider):
1021
1151
  else:
1022
1152
  # Re-raise unexpected errors
1023
1153
  logger.error(
1024
- f"Error creating database instance {database.instance_name}: {create_error}"
1154
+ "Error creating database instance",
1155
+ instance_name=database.instance_name,
1156
+ error=str(create_error),
1025
1157
  )
1026
1158
  raise
1027
1159
 
@@ -1035,12 +1167,15 @@ class DatabricksProvider(ServiceProvider):
1035
1167
  or "RESOURCE_ALREADY_EXISTS" in error_msg
1036
1168
  ):
1037
1169
  logger.info(
1038
- f"Database instance {database.instance_name} already exists (detected via exception)"
1170
+ "Database instance already exists (detected via exception)",
1171
+ instance_name=database.instance_name,
1039
1172
  )
1040
1173
  return
1041
1174
  else:
1042
1175
  logger.error(
1043
- f"Unexpected error while handling database {database.instance_name}: {e}"
1176
+ "Unexpected error while handling database",
1177
+ instance_name=database.instance_name,
1178
+ error=str(e),
1044
1179
  )
1045
1180
  raise
1046
1181
 
@@ -1048,7 +1183,9 @@ class DatabricksProvider(ServiceProvider):
1048
1183
  """
1049
1184
  Ask Databricks to mint a fresh DB credential for this instance.
1050
1185
  """
1051
- logger.debug(f"Generating password for lakebase instance: {instance_name}")
1186
+ logger.trace(
1187
+ "Generating password for lakebase instance", instance_name=instance_name
1188
+ )
1052
1189
  w: WorkspaceClient = self.w
1053
1190
  cred: DatabaseCredential = w.database.generate_database_credential(
1054
1191
  request_id=str(uuid.uuid4()),
@@ -1084,7 +1221,8 @@ class DatabricksProvider(ServiceProvider):
1084
1221
  # Validate that client_id is provided
1085
1222
  if not database.client_id:
1086
1223
  logger.warning(
1087
- f"client_id is required to create instance role for database {database.instance_name}"
1224
+ "client_id required to create instance role",
1225
+ instance_name=database.instance_name,
1088
1226
  )
1089
1227
  return
1090
1228
 
@@ -1094,7 +1232,10 @@ class DatabricksProvider(ServiceProvider):
1094
1232
  instance_name: str = database.instance_name
1095
1233
 
1096
1234
  logger.debug(
1097
- f"Creating instance role '{role_name}' for database {instance_name} with principal {client_id}"
1235
+ "Creating instance role",
1236
+ role_name=role_name,
1237
+ instance_name=instance_name,
1238
+ principal=client_id,
1098
1239
  )
1099
1240
 
1100
1241
  try:
@@ -1105,13 +1246,15 @@ class DatabricksProvider(ServiceProvider):
1105
1246
  name=role_name,
1106
1247
  )
1107
1248
  logger.info(
1108
- f"Instance role '{role_name}' already exists for database {instance_name}"
1249
+ "Instance role already exists",
1250
+ role_name=role_name,
1251
+ instance_name=instance_name,
1109
1252
  )
1110
1253
  return
1111
1254
  except NotFound:
1112
1255
  # Role doesn't exist, proceed with creation
1113
1256
  logger.debug(
1114
- f"Instance role '{role_name}' not found, creating new role..."
1257
+ "Instance role not found, creating new role", role_name=role_name
1115
1258
  )
1116
1259
 
1117
1260
  # Create the database instance role
@@ -1127,8 +1270,10 @@ class DatabricksProvider(ServiceProvider):
1127
1270
  database_instance_role=role,
1128
1271
  )
1129
1272
 
1130
- logger.info(
1131
- f"Successfully created instance role '{role_name}' for database {instance_name}"
1273
+ logger.success(
1274
+ "Instance role created successfully",
1275
+ role_name=role_name,
1276
+ instance_name=instance_name,
1132
1277
  )
1133
1278
 
1134
1279
  except Exception as e:
@@ -1140,13 +1285,18 @@ class DatabricksProvider(ServiceProvider):
1140
1285
  or "RESOURCE_ALREADY_EXISTS" in error_msg
1141
1286
  ):
1142
1287
  logger.info(
1143
- f"Instance role '{role_name}' was created concurrently for database {instance_name}"
1288
+ "Instance role was created concurrently",
1289
+ role_name=role_name,
1290
+ instance_name=instance_name,
1144
1291
  )
1145
1292
  return
1146
1293
 
1147
1294
  # Re-raise unexpected errors
1148
1295
  logger.error(
1149
- f"Error creating instance role '{role_name}' for database {instance_name}: {e}"
1296
+ "Error creating instance role",
1297
+ role_name=role_name,
1298
+ instance_name=instance_name,
1299
+ error=str(e),
1150
1300
  )
1151
1301
  raise
1152
1302
 
@@ -1159,7 +1309,14 @@ class DatabricksProvider(ServiceProvider):
1159
1309
  1. champion alias
1160
1310
  2. latest alias
1161
1311
  3. default alias
1162
- 4. Register default_template if provided
1312
+ 4. Register default_template if provided (only if register_to_registry=True)
1313
+ 5. Use default_template directly (fallback)
1314
+
1315
+ The auto_register field controls whether the default_template is automatically
1316
+ synced to the prompt registry:
1317
+ - If True (default): Auto-registers/updates the default_template in the registry
1318
+ - If False: Never registers, but can still load existing prompts from registry
1319
+ or use default_template directly as a local-only prompt
1163
1320
 
1164
1321
  Args:
1165
1322
  prompt_model: The prompt model configuration
@@ -1177,25 +1334,41 @@ class DatabricksProvider(ServiceProvider):
1177
1334
  if prompt_model.version or prompt_model.alias:
1178
1335
  try:
1179
1336
  prompt_version: PromptVersion = prompt_model.as_prompt()
1337
+ version_or_alias = (
1338
+ f"version {prompt_model.version}"
1339
+ if prompt_model.version
1340
+ else f"alias {prompt_model.alias}"
1341
+ )
1180
1342
  logger.debug(
1181
- f"Loaded prompt '{prompt_name}' with explicit "
1182
- f"{'version ' + str(prompt_model.version) if prompt_model.version else 'alias ' + prompt_model.alias}"
1343
+ "Loaded prompt with explicit version/alias",
1344
+ prompt_name=prompt_name,
1345
+ version_or_alias=version_or_alias,
1183
1346
  )
1184
1347
  return prompt_version
1185
1348
  except Exception as e:
1349
+ version_or_alias = (
1350
+ f"version {prompt_model.version}"
1351
+ if prompt_model.version
1352
+ else f"alias {prompt_model.alias}"
1353
+ )
1186
1354
  logger.warning(
1187
- f"Failed to load prompt '{prompt_name}' with explicit "
1188
- f"{'version ' + str(prompt_model.version) if prompt_model.version else 'alias ' + prompt_model.alias}: {e}"
1355
+ "Failed to load prompt with explicit version/alias",
1356
+ prompt_name=prompt_name,
1357
+ version_or_alias=version_or_alias,
1358
+ error=str(e),
1189
1359
  )
1190
1360
  # Fall through to try other methods
1191
1361
 
1192
1362
  # Try to load in priority order: champion → default (with sync check)
1193
- logger.debug(
1194
- f"Trying fallback order for '{prompt_name}': champion → default (with auto-sync)"
1363
+ logger.trace(
1364
+ "Trying prompt fallback order",
1365
+ prompt_name=prompt_name,
1366
+ order="champion → default",
1195
1367
  )
1196
1368
 
1197
1369
  # First, sync default alias if template has changed (even if champion exists)
1198
- if prompt_model.default_template:
1370
+ # Only do this if auto_register is True
1371
+ if prompt_model.default_template and prompt_model.auto_register:
1199
1372
  try:
1200
1373
  # Try to load existing default
1201
1374
  existing_default = load_prompt(f"prompts:/{prompt_name}@default")
@@ -1207,13 +1380,19 @@ class DatabricksProvider(ServiceProvider):
1207
1380
  champion_matches_default = (
1208
1381
  existing_champion.version == existing_default.version
1209
1382
  )
1210
- logger.debug(
1211
- f"Champion v{existing_champion.version} vs Default v{existing_default.version}: "
1212
- f"{'tracking' if champion_matches_default else 'pinned separately'}"
1383
+ status = (
1384
+ "tracking" if champion_matches_default else "pinned separately"
1385
+ )
1386
+ logger.trace(
1387
+ "Champion vs default version",
1388
+ prompt_name=prompt_name,
1389
+ champion_version=existing_champion.version,
1390
+ default_version=existing_default.version,
1391
+ status=status,
1213
1392
  )
1214
1393
  except Exception:
1215
1394
  # No champion exists
1216
- logger.debug(f"No champion alias found for '{prompt_name}'")
1395
+ logger.trace("No champion alias found", prompt_name=prompt_name)
1217
1396
 
1218
1397
  # Check if default_template differs from existing default
1219
1398
  if (
@@ -1221,19 +1400,23 @@ class DatabricksProvider(ServiceProvider):
1221
1400
  != prompt_model.default_template.strip()
1222
1401
  ):
1223
1402
  logger.info(
1224
- f"Default template for '{prompt_name}' has changed, "
1225
- "registering new version with default alias"
1403
+ "Default template changed, registering new version",
1404
+ prompt_name=prompt_name,
1226
1405
  )
1227
1406
 
1228
1407
  # Only update champion if it was pointing to the old default
1229
1408
  if champion_matches_default:
1230
1409
  logger.info(
1231
- f"Champion was tracking default (v{existing_default.version}), "
1232
- "will update champion to new default version"
1410
+ "Champion was tracking default, will update to new version",
1411
+ prompt_name=prompt_name,
1412
+ old_version=existing_default.version,
1233
1413
  )
1234
1414
  set_champion = True
1235
1415
  else:
1236
- logger.info("Champion is pinned separately, preserving it")
1416
+ logger.info(
1417
+ "Champion is pinned separately, preserving it",
1418
+ prompt_name=prompt_name,
1419
+ )
1237
1420
  set_champion = False
1238
1421
 
1239
1422
  self._register_default_template(
@@ -1244,9 +1427,12 @@ class DatabricksProvider(ServiceProvider):
1244
1427
  )
1245
1428
  except Exception as e:
1246
1429
  # No default exists yet, register it
1247
- logger.debug(f"No default alias found for '{prompt_name}': {e}")
1430
+ logger.trace(
1431
+ "No default alias found", prompt_name=prompt_name, error=str(e)
1432
+ )
1248
1433
  logger.info(
1249
- f"Registering default_template for '{prompt_name}' as default alias"
1434
+ "Registering default template as default alias",
1435
+ prompt_name=prompt_name,
1250
1436
  )
1251
1437
  # First registration - set both default and champion
1252
1438
  self._register_default_template(
@@ -1255,40 +1441,49 @@ class DatabricksProvider(ServiceProvider):
1255
1441
  prompt_model.description,
1256
1442
  set_champion=True,
1257
1443
  )
1444
+ elif prompt_model.default_template and not prompt_model.auto_register:
1445
+ logger.trace(
1446
+ "Prompt has auto_register=False, skipping registration",
1447
+ prompt_name=prompt_name,
1448
+ )
1258
1449
 
1259
1450
  # 1. Try champion alias (highest priority for execution)
1260
1451
  try:
1261
1452
  prompt_version = load_prompt(f"prompts:/{prompt_name}@champion")
1262
- logger.info(
1263
- f"Loaded prompt '{prompt_name}' from champion alias (default was synced separately)"
1264
- )
1453
+ logger.info("Loaded prompt from champion alias", prompt_name=prompt_name)
1265
1454
  return prompt_version
1266
1455
  except Exception as e:
1267
- logger.debug(f"Champion alias not found for '{prompt_name}': {e}")
1456
+ logger.trace(
1457
+ "Champion alias not found", prompt_name=prompt_name, error=str(e)
1458
+ )
1268
1459
 
1269
1460
  # 2. Try default alias (already synced above)
1270
1461
  if prompt_model.default_template:
1271
1462
  try:
1272
1463
  prompt_version = load_prompt(f"prompts:/{prompt_name}@default")
1273
- logger.info(f"Loaded prompt '{prompt_name}' from default alias")
1464
+ logger.info("Loaded prompt from default alias", prompt_name=prompt_name)
1274
1465
  return prompt_version
1275
1466
  except Exception as e:
1276
1467
  # Should not happen since we just registered it above, but handle anyway
1277
- logger.debug(f"Default alias not found for '{prompt_name}': {e}")
1468
+ logger.trace(
1469
+ "Default alias not found", prompt_name=prompt_name, error=str(e)
1470
+ )
1278
1471
 
1279
1472
  # 3. Try latest alias as final fallback
1280
1473
  try:
1281
1474
  prompt_version = load_prompt(f"prompts:/{prompt_name}@latest")
1282
- logger.info(f"Loaded prompt '{prompt_name}' from latest alias")
1475
+ logger.info("Loaded prompt from latest alias", prompt_name=prompt_name)
1283
1476
  return prompt_version
1284
1477
  except Exception as e:
1285
- logger.debug(f"Latest alias not found for '{prompt_name}': {e}")
1478
+ logger.trace(
1479
+ "Latest alias not found", prompt_name=prompt_name, error=str(e)
1480
+ )
1286
1481
 
1287
1482
  # 4. Final fallback: use default_template directly if available
1288
1483
  if prompt_model.default_template:
1289
1484
  logger.warning(
1290
- f"Could not load prompt '{prompt_name}' from registry. "
1291
- "Using default_template directly (likely in test environment)"
1485
+ "Could not load prompt from registry, using default_template directly",
1486
+ prompt_name=prompt_name,
1292
1487
  )
1293
1488
  return PromptVersion(
1294
1489
  name=prompt_name,
@@ -1325,8 +1520,9 @@ class DatabricksProvider(ServiceProvider):
1325
1520
  logs the error and raises.
1326
1521
  """
1327
1522
  logger.info(
1328
- f"Registering default_template for '{prompt_name}' "
1329
- f"(set_champion={set_champion})"
1523
+ "Registering default template",
1524
+ prompt_name=prompt_name,
1525
+ set_champion=set_champion,
1330
1526
  )
1331
1527
 
1332
1528
  try:
@@ -1340,15 +1536,24 @@ class DatabricksProvider(ServiceProvider):
1340
1536
 
1341
1537
  # Always set default alias
1342
1538
  try:
1539
+ logger.debug(
1540
+ "Setting default alias",
1541
+ prompt_name=prompt_name,
1542
+ version=prompt_version.version,
1543
+ )
1343
1544
  mlflow.genai.set_prompt_alias(
1344
1545
  name=prompt_name, alias="default", version=prompt_version.version
1345
1546
  )
1346
- logger.info(
1347
- f"Set default alias for '{prompt_name}' v{prompt_version.version}"
1547
+ logger.success(
1548
+ "Set default alias for prompt",
1549
+ prompt_name=prompt_name,
1550
+ version=prompt_version.version,
1348
1551
  )
1349
1552
  except Exception as alias_error:
1350
1553
  logger.warning(
1351
- f"Could not set default alias for '{prompt_name}': {alias_error}"
1554
+ "Could not set default alias",
1555
+ prompt_name=prompt_name,
1556
+ error=str(alias_error),
1352
1557
  )
1353
1558
 
1354
1559
  # Optionally set champion alias (only if no champion exists or explicitly requested)
@@ -1359,20 +1564,25 @@ class DatabricksProvider(ServiceProvider):
1359
1564
  alias="champion",
1360
1565
  version=prompt_version.version,
1361
1566
  )
1362
- logger.info(
1363
- f"Set champion alias for '{prompt_name}' v{prompt_version.version}"
1567
+ logger.success(
1568
+ "Set champion alias for prompt",
1569
+ prompt_name=prompt_name,
1570
+ version=prompt_version.version,
1364
1571
  )
1365
1572
  except Exception as alias_error:
1366
1573
  logger.warning(
1367
- f"Could not set champion alias for '{prompt_name}': {alias_error}"
1574
+ "Could not set champion alias",
1575
+ prompt_name=prompt_name,
1576
+ error=str(alias_error),
1368
1577
  )
1369
1578
 
1370
1579
  return prompt_version
1371
1580
 
1372
1581
  except Exception as reg_error:
1373
1582
  logger.error(
1374
- f"Failed to register prompt '{prompt_name}': {reg_error}. "
1375
- f"Please register the prompt from a notebook with write permissions before deployment."
1583
+ "Failed to register prompt - please register from notebook with write permissions",
1584
+ prompt_name=prompt_name,
1585
+ error=str(reg_error),
1376
1586
  )
1377
1587
  return PromptVersion(
1378
1588
  name=prompt_name,