dao-ai 0.1.20__tar.gz → 0.1.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dao_ai-0.1.20 → dao_ai-0.1.21}/PKG-INFO +1 -1
- dao_ai-0.1.21/config/examples/04_genie/cache_threshold_optimization.yaml +168 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/genie_basic.yaml +7 -2
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/genie_in_memory_semantic_cache.yaml +6 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/genie_lru_cache.yaml +7 -1
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/genie_semantic_cache.yaml +7 -1
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/genie_with_conversation_id.yaml +7 -0
- dao_ai-0.1.21/config/examples/17_parallel_tools/README.md +253 -0
- dao_ai-0.1.21/config/examples/17_parallel_tools/parallel_tool_calls.yaml +181 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/README.md +40 -6
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/genie_context_aware_cache_prompt_history.md +71 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/key-capabilities.md +13 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/quick_serve_restaurant/match_item_by_description_and_price.sql +2 -2
- dao_ai-0.1.20/notebooks/11_optimize_cache_thresholds.py → dao_ai-0.1.21/notebooks/11_optimize_context_aware_genie_cache.py +88 -79
- dao_ai-0.1.21/notebooks/12_genie_cache_service.py +332 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/pyproject.toml +1 -1
- {dao_ai-0.1.20 → dao_ai-0.1.21}/requirements.txt +1 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/config.py +114 -33
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/__init__.py +11 -9
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/context_aware/__init__.py +21 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/context_aware/base.py +54 -1
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/context_aware/in_memory.py +112 -0
- {dao_ai-0.1.20/src/dao_ai/genie/cache → dao_ai-0.1.21/src/dao_ai/genie/cache/context_aware}/optimization.py +83 -43
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/context_aware/postgres.py +177 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/__init__.py +8 -1
- dao_ai-0.1.21/src/dao_ai/middleware/tool_call_observability.py +227 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/utils.py +7 -3
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/genie/test_cache_optimization.py +46 -34
- dao_ai-0.1.21/tests/dao_ai/middleware/test_tool_call_observability.py +271 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_context_aware_cache.py +218 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_in_memory_context_aware_cache.py +283 -0
- dao_ai-0.1.21/tests/dao_ai/test_inline_function.py +290 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_prompt_optimizations.py +13 -7
- dao_ai-0.1.20/config/examples/04_genie/cache_threshold_optimization.yaml +0 -180
- {dao_ai-0.1.20 → dao_ai-0.1.21}/.gitignore +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/.python-version +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/CHANGELOG.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/CONTRIBUTING.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/CONTRIBUTORS.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/LICENSE +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/Makefile +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/app.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/01_getting_started/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/01_getting_started/minimal.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/02_mcp/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/02_mcp/custom_mcp.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/02_mcp/external_mcp.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/02_mcp/filtered_mcp.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/02_mcp/managed_mcp.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/02_mcp/slack_integration.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/03_reranking/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/03_reranking/instruction_aware_reranking.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/03_reranking/vector_search_with_reranking.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/05_memory/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/05_memory/conversation_summarization.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/05_memory/in_memory_basic.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/05_memory/lakebase_persistence.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/05_memory/postgres_persistence.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/06_on_behalf_of_user/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/06_on_behalf_of_user/obo_basic.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/07_human_in_the_loop/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/07_human_in_the_loop/human_in_the_loop.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/08_guardrails/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/08_guardrails/guardrails_basic.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/09_structured_output/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/09_structured_output/structured_output.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/10_agent_integrations/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/10_agent_integrations/agent_bricks.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/10_agent_integrations/kasal.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/11_prompt_engineering/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/11_prompt_engineering/prompt_optimization.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/11_prompt_engineering/prompt_registry.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/combined_middleware.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/context_management.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/custom_field_validation.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/limit_middleware.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/logging_middleware.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/pii_middleware.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/retry_middleware.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/12_middleware/tool_selector_middleware.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/13_orchestration/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/13_orchestration/supervisor_pattern.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/13_orchestration/swarm_pattern.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/14_basic_tools/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/14_basic_tools/sql_tool_example.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/brick_store.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/deep_research.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/executive_assistant.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/genie_and_genie_mcp.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/genie_vector_search_hybrid.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/hardware_store.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/hardware_store_instructed.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/hardware_store_lakebase.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/hardware_store_swarm.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/quick_serve_restaurant.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/15_complete_applications/reservations_system.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/16_instructed_retriever/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/16_instructed_retriever/full_pipeline.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/16_instructed_retriever/instructed_retriever.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/appointments.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/appointments_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/brand_rep_demo_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/brand_rep_demo_queries.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/brand_rep_demo_tables.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/brand_rep_demo_validation.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/customers.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/customers_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/dim_stores.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/dim_stores_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/employee_performance.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/employee_performance_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/employee_tasks.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/employee_tasks_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/inventory.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/inventory_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/managers.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/managers_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/product_data.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/products.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/dais2025/task_assignments.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/hardware_store/inventory.snappy.parquet +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/hardware_store/inventory.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/hardware_store/products.snappy.parquet +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/hardware_store/products.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/.gitkeep +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/fulfil_item_orders.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/items_description.csv +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/items_description.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/items_raw.csv +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/items_raw.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/orders_raw.csv +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/data/quick_serve_restaurant/orders_raw.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/databricks.yaml.template +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/architecture.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/cli-reference.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/configuration-reference.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/contributing.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/examples.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/faq.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/hardware_store/README.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/hardware_store/retail_supervisor.png +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/hardware_store/retail_swarm.png +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/images/genie.png +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/python-api.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/quick_serve_restaurant/.gitkeep +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/quick_serve_restaurant/quick-serve-restaurant.png +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/docs/why-dao.md +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/environment.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/examples/dais2025/examples.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/examples/deep_research/examples.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/examples/executive_assistant/examples.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/examples/hardware_store/examples.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/examples/quick_serve_restaurant/.gitkeep +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/examples/quick_serve_restaurant/examples.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/extract_store_numbers.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_inventory_by_sku.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_inventory_by_upc.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_product_by_sku.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_product_by_upc.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_store_by_number.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_store_inventory_by_sku.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/dais2025/find_store_inventory_by_upc.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/hardware_store/find_inventory_by_sku.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/hardware_store/find_inventory_by_upc.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/hardware_store/find_product_by_sku.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/hardware_store/find_product_by_upc.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/hardware_store/find_store_inventory_by_sku.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/hardware_store/find_store_inventory_by_upc.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/quick_serve_restaurant/.gitkeep +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/quick_serve_restaurant/insert_coffee_order.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/quick_serve_restaurant/lookup_items_by_descriptions.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/functions/quick_serve_restaurant/match_historical_item_order_by_date.sql +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/01_ingest_and_transform.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/02_provision_vector_search.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/03_provision_lakebase.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/04_unity_catalog_tools.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/05_deploy_agent.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/06_generate_evaluation_data.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/07_run_evaluation.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/08_run_examples.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/09_evaluate_inferences.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/10_optimize_prompts.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/notebooks/99_scratchpad.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/schemas/bundle_config_schema.json +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/schemas/model_config_schema.json +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/apps/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/apps/handlers.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/apps/model_serving.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/apps/resources.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/apps/server.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/catalog.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/cli.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/evaluation.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/base.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/context_aware/persistent.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/cache/lru.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/genie/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/graph.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/hooks/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/hooks/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/logging.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/memory/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/memory/base.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/memory/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/memory/databricks.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/memory/postgres.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/messages.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/assertions.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/base.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/context_editing.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/guardrails.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/human_in_the_loop.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/message_validation.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/model_call_limit.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/model_retry.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/pii.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/summarization.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/tool_call_limit.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/tool_retry.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/middleware/tool_selector.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/models.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/nodes.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/optimization.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/orchestration/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/orchestration/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/orchestration/supervisor.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/orchestration/swarm.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/prompts/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/prompts/instructed_retriever_decomposition.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/prompts/instruction_reranker.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/prompts/router.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/prompts/verifier.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/providers/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/providers/base.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/providers/databricks.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/state.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/__init__.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/agent.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/core.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/email.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/genie.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/instructed_retriever.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/instruction_reranker.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/mcp.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/memory.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/python.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/router.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/search.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/slack.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/sql.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/time.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/unity_catalog.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/vector_search.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/tools/verifier.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/types.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/src/dao_ai/vector_search.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/config/test_model_config.yaml +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/conftest.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/genie/cleanup_test_tables.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/genie/test_from_space_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/genie/test_prompt_history.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_context_editing.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_model_call_limit.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_model_retry.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_pii.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_tool_call_limit.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_tool_retry.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/middleware/test_tool_selector.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_agent_response_format.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_assertions_middleware.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_cache_fallback.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_catalog.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_chat_history.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_config.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_databricks.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_evaluation.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_function_parsing.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_genie.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_genie_conversation_ids_in_outputs.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_genie_databricks_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_genie_feedback.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_genie_room_model.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_guardrail_retry.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_hitl_config_model.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_hitl_responses_agent.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_hooks.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_human_in_the_loop.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_inference.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_inference_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_input_output_structure.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_instructed_retriever.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_instruction_reranker.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_instruction_reranker_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_interrupt_type.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_llm_interrupt_handling.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_mcp.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_mcp_filtering.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_mcp_filtering_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_mcp_function_model.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_message_validation_middleware.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_messages.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_models.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_optimization.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_postgres_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_prompts.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_reranking.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_reranking_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_resources_model_genie_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_response_format.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_responses_agent_structured_output_unit.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_router.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_sql_tool.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_sql_tool_integration.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_state.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_summarization_inference.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_swarm_middleware.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_tools.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_types.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_unity_catalog.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_utils.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_utils_type_from_fqn.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_vector_search.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_verifier.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/test_warehouse_model.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/dao_ai/weather_server_mcp.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/hardware_store/.gitkeep +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/hardware_store/test_graph.py +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/images/doritos_upc.png +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/images/lays_upc.png +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/quick_serve_restaurant/.gitkeep +0 -0
- {dao_ai-0.1.20 → dao_ai-0.1.21}/tests/test_mcp_app_auth.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dao-ai
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.21
|
|
4
4
|
Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
|
|
5
5
|
Project-URL: Homepage, https://github.com/natefleming/dao-ai
|
|
6
6
|
Project-URL: Documentation, https://natefleming.github.io/dao-ai
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
# yaml-language-server: $schema=../../../schemas/model_config_schema.json
|
|
2
|
+
#
|
|
3
|
+
# Example configuration for context-aware cache threshold optimization.
|
|
4
|
+
#
|
|
5
|
+
# This configuration demonstrates how to:
|
|
6
|
+
# 1. Define an evaluation dataset with question pairs
|
|
7
|
+
# 2. Configure threshold optimization parameters
|
|
8
|
+
# 3. Run Optuna Bayesian optimization to find optimal thresholds
|
|
9
|
+
#
|
|
10
|
+
# The optimizer tunes these parameters:
|
|
11
|
+
# - similarity_threshold: Minimum similarity for question matching (0.5-0.99)
|
|
12
|
+
# - context_similarity_threshold: Minimum similarity for context matching (0.5-0.99)
|
|
13
|
+
# - question_weight: Weight for question vs context in combined score (0.1-0.9)
|
|
14
|
+
#
|
|
15
|
+
# Usage:
|
|
16
|
+
# 1. Update the evaluation dataset with your domain-specific question pairs
|
|
17
|
+
# 2. Run: config.optimizations.optimize() or use the notebook
|
|
18
|
+
# 3. Apply the optimized thresholds to your cache configuration
|
|
19
|
+
|
|
20
|
+
schemas:
|
|
21
|
+
quick_serve_restaurant_schema: &quick_serve_restaurant_schema
|
|
22
|
+
catalog_name: retail_consumer_goods
|
|
23
|
+
schema_name: quick_serve_restaurant
|
|
24
|
+
|
|
25
|
+
resources:
|
|
26
|
+
llms:
|
|
27
|
+
# Judge model for semantic equivalence evaluation
|
|
28
|
+
# Used when expected_match is not provided for an entry
|
|
29
|
+
judge_model: &judge_model
|
|
30
|
+
name: databricks-meta-llama-3-3-70b-instruct
|
|
31
|
+
temperature: 0.0 # Low temperature for consistent judgments
|
|
32
|
+
max_tokens: 10 # Only need "MATCH" or "NO_MATCH"
|
|
33
|
+
|
|
34
|
+
# Embedding model for generating embeddings
|
|
35
|
+
embedding_model: &embedding_model
|
|
36
|
+
name: databricks-gte-large-en
|
|
37
|
+
|
|
38
|
+
warehouses:
|
|
39
|
+
shared_endpoint_warehouse: &shared_endpoint_warehouse
|
|
40
|
+
name: "Shared Endpoint Warehouse"
|
|
41
|
+
warehouse_id: 148ccb90800933a1
|
|
42
|
+
|
|
43
|
+
databases:
|
|
44
|
+
semantic_cache_db: &semantic_cache_db
|
|
45
|
+
name: "Retail and Consumer Goods Database"
|
|
46
|
+
instance_name: "retail-consumer-goods"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# =============================================================================
|
|
50
|
+
# OPTIMIZATIONS CONFIGURATION
|
|
51
|
+
# =============================================================================
|
|
52
|
+
# Configure cache threshold optimizations using Optuna Bayesian optimization.
|
|
53
|
+
|
|
54
|
+
optimizations:
|
|
55
|
+
cache_threshold_optimizations:
|
|
56
|
+
# ---------------------------------------------------------------------------
|
|
57
|
+
# Retail Cache Threshold Optimization
|
|
58
|
+
# ---------------------------------------------------------------------------
|
|
59
|
+
optimize_retail_cache_thresholds:
|
|
60
|
+
name: optimize_retail_cache_thresholds
|
|
61
|
+
|
|
62
|
+
# Current thresholds to improve (optional - uses defaults if not provided)
|
|
63
|
+
cache_parameters:
|
|
64
|
+
database: *semantic_cache_db
|
|
65
|
+
warehouse: *shared_endpoint_warehouse
|
|
66
|
+
embedding_model: *embedding_model
|
|
67
|
+
similarity_threshold: 0.85 # Question matching threshold
|
|
68
|
+
context_similarity_threshold: 0.80 # Context matching threshold
|
|
69
|
+
question_weight: 0.6 # Weight for question (context = 1 - question)
|
|
70
|
+
time_to_live_seconds: 86400
|
|
71
|
+
|
|
72
|
+
# Evaluation dataset with question pairs
|
|
73
|
+
# Each entry contains:
|
|
74
|
+
# - question/context: The incoming query
|
|
75
|
+
# - cached_question/cached_context: The cached entry to compare against
|
|
76
|
+
# - expected_match: true (should match), false (should not), or omit for LLM judge
|
|
77
|
+
#
|
|
78
|
+
# Note: Embeddings are normally pre-computed. Use the notebook or
|
|
79
|
+
# generate_eval_dataset_from_cache() to create a dataset from cache entries.
|
|
80
|
+
dataset:
|
|
81
|
+
name: retail_cache_eval_dataset
|
|
82
|
+
description: "Evaluation dataset for retail domain semantic cache tuning"
|
|
83
|
+
entries: []
|
|
84
|
+
# In practice, populate with real entries like:
|
|
85
|
+
#
|
|
86
|
+
# entries:
|
|
87
|
+
# # Positive pair - paraphrases that should match
|
|
88
|
+
# - question: "What are total sales for Q1?"
|
|
89
|
+
# question_embedding: [0.1, 0.2, ...] # Pre-computed embeddings
|
|
90
|
+
# context: "Previous: Show me revenue breakdown"
|
|
91
|
+
# context_embedding: [0.1, 0.2, ...]
|
|
92
|
+
# cached_question: "Show me Q1 total sales"
|
|
93
|
+
# cached_question_embedding: [0.1, 0.2, ...]
|
|
94
|
+
# cached_context: "Previous: Show me revenue breakdown"
|
|
95
|
+
# cached_context_embedding: [0.1, 0.2, ...]
|
|
96
|
+
# expected_match: true
|
|
97
|
+
#
|
|
98
|
+
# # Negative pair - different questions that should NOT match
|
|
99
|
+
# - question: "What is inventory count by store?"
|
|
100
|
+
# question_embedding: [0.3, 0.1, ...]
|
|
101
|
+
# context: ""
|
|
102
|
+
# context_embedding: [0.0, 0.0, ...]
|
|
103
|
+
# cached_question: "Show revenue by region"
|
|
104
|
+
# cached_question_embedding: [0.5, 0.6, ...]
|
|
105
|
+
# cached_context: ""
|
|
106
|
+
# cached_context_embedding: [0.0, 0.0, ...]
|
|
107
|
+
# expected_match: false
|
|
108
|
+
#
|
|
109
|
+
# # Unlabeled entry - LLM judge will determine
|
|
110
|
+
# - question: "How many items sold last week?"
|
|
111
|
+
# question_embedding: [0.2, 0.3, ...]
|
|
112
|
+
# context: "Previous: Filter by electronics"
|
|
113
|
+
# context_embedding: [0.1, 0.4, ...]
|
|
114
|
+
# cached_question: "Total items sold in past 7 days"
|
|
115
|
+
# cached_question_embedding: [0.2, 0.35, ...]
|
|
116
|
+
# cached_context: "Previous: Filter by electronics"
|
|
117
|
+
# cached_context_embedding: [0.1, 0.4, ...]
|
|
118
|
+
# # expected_match omitted - will use LLM judge
|
|
119
|
+
|
|
120
|
+
# LLM for judging unlabeled entries
|
|
121
|
+
judge_model: *judge_model
|
|
122
|
+
|
|
123
|
+
# Optimization parameters
|
|
124
|
+
n_trials: 50 # Number of Optuna trials (more = better results)
|
|
125
|
+
metric: f1 # Metric to optimize: f1, precision, recall, fbeta
|
|
126
|
+
beta: 1.0 # Beta for fbeta metric (higher = favor recall)
|
|
127
|
+
seed: 42 # Random seed for reproducibility
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# =============================================================================
|
|
131
|
+
# USAGE INSTRUCTIONS
|
|
132
|
+
# =============================================================================
|
|
133
|
+
#
|
|
134
|
+
# 1. PREPARE EVALUATION DATA:
|
|
135
|
+
# Generate embeddings for your question pairs using the embedding model.
|
|
136
|
+
# You can use the notebook or the generate_eval_dataset_from_cache() function
|
|
137
|
+
# to create a dataset from existing cache entries.
|
|
138
|
+
#
|
|
139
|
+
# 2. RUN OPTIMIZATION:
|
|
140
|
+
# Use the notebook notebooks/11_optimize_context_aware_genie_cache.py,
|
|
141
|
+
# or run programmatically:
|
|
142
|
+
#
|
|
143
|
+
# ```python
|
|
144
|
+
# from dao_ai.config import AppConfig
|
|
145
|
+
#
|
|
146
|
+
# config = AppConfig.from_file("cache_threshold_optimization.yaml")
|
|
147
|
+
#
|
|
148
|
+
# # Run all optimizations (prompts and cache thresholds)
|
|
149
|
+
# results = config.optimizations.optimize()
|
|
150
|
+
#
|
|
151
|
+
# # Or run a specific cache threshold optimization
|
|
152
|
+
# optimization = config.optimizations.cache_threshold_optimizations["optimize_retail_cache_thresholds"]
|
|
153
|
+
# result = optimization.optimize()
|
|
154
|
+
#
|
|
155
|
+
# print(f"Optimized thresholds: {result.optimized_thresholds}")
|
|
156
|
+
# print(f"Improvement: {result.improvement:.1%}")
|
|
157
|
+
# ```
|
|
158
|
+
#
|
|
159
|
+
# 3. APPLY RESULTS:
|
|
160
|
+
# Update your semantic cache configuration with the optimized values:
|
|
161
|
+
#
|
|
162
|
+
# semantic_cache_parameters:
|
|
163
|
+
# similarity_threshold: <optimized_value>
|
|
164
|
+
# context_similarity_threshold: <optimized_value>
|
|
165
|
+
# question_weight: <optimized_value>
|
|
166
|
+
#
|
|
167
|
+
# 4. MONITOR:
|
|
168
|
+
# Track cache hit rates and accuracy in production to validate improvements.
|
|
@@ -16,8 +16,6 @@ resources:
|
|
|
16
16
|
max_tokens: 8192 # Maximum tokens per response
|
|
17
17
|
on_behalf_of_user: False
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
|
|
21
19
|
genie_rooms:
|
|
22
20
|
# Genie space for retail data queries
|
|
23
21
|
retail_genie_room: &retail_genie_room
|
|
@@ -27,6 +25,13 @@ resources:
|
|
|
27
25
|
env: RETAIL_AI_GENIE_SPACE_ID
|
|
28
26
|
default_value: 01f01c91f1f414d59daaefd2b7ec82ea
|
|
29
27
|
|
|
28
|
+
tables:
|
|
29
|
+
# The retail_consumer_goods`.`quick_serve_restaurant`.`lookup_items_by_descriptions` function is used to lookup the items description vector index.
|
|
30
|
+
# The function will be implicitly granted permission to the genie agent, however, we we need to grant permission to the index for the genie agent to use it.
|
|
31
|
+
items_description_vs_index:
|
|
32
|
+
schema: *quick_serve_restaurant_schema
|
|
33
|
+
name: items_description_vs_index
|
|
34
|
+
|
|
30
35
|
tools:
|
|
31
36
|
genie_tool: &genie_tool
|
|
32
37
|
name: genie
|
{dao_ai-0.1.20 → dao_ai-0.1.21}/config/examples/04_genie/genie_in_memory_semantic_cache.yaml
RENAMED
|
@@ -51,6 +51,12 @@ resources:
|
|
|
51
51
|
env: RETAIL_AI_GENIE_SPACE_ID
|
|
52
52
|
default_value: 01f01c91f1f414d59daaefd2b7ec82ea
|
|
53
53
|
|
|
54
|
+
tables:
|
|
55
|
+
# The retail_consumer_goods`.`quick_serve_restaurant`.`lookup_items_by_descriptions` function is used to lookup the items description vector index.
|
|
56
|
+
# The function will be implicitly granted permission to the genie agent, however, we we need to grant permission to the index for the genie agent to use it.
|
|
57
|
+
items_description_vs_index:
|
|
58
|
+
schema: *quick_serve_restaurant_schema
|
|
59
|
+
name: items_description_vs_index
|
|
54
60
|
|
|
55
61
|
# =============================================================================
|
|
56
62
|
# MEMORY CONFIGURATION
|
|
@@ -33,7 +33,13 @@ resources:
|
|
|
33
33
|
env: RETAIL_AI_GENIE_SPACE_ID
|
|
34
34
|
default_value: 01f01c91f1f414d59daaefd2b7ec82ea
|
|
35
35
|
|
|
36
|
-
|
|
36
|
+
tables:
|
|
37
|
+
# The retail_consumer_goods`.`quick_serve_restaurant`.`lookup_items_by_descriptions` function is used to lookup the items description vector index.
|
|
38
|
+
# The function will be implicitly granted permission to the genie agent, however, we we need to grant permission to the index for the genie agent to use it.
|
|
39
|
+
items_description_vs_index:
|
|
40
|
+
schema: *quick_serve_restaurant_schema
|
|
41
|
+
name: items_description_vs_index
|
|
42
|
+
|
|
37
43
|
# =============================================================================
|
|
38
44
|
# MEMORY CONFIGURATION
|
|
39
45
|
# =============================================================================
|
|
@@ -53,7 +53,13 @@ resources:
|
|
|
53
53
|
env: RETAIL_AI_GENIE_SPACE_ID
|
|
54
54
|
default_value: 01f01c91f1f414d59daaefd2b7ec82ea
|
|
55
55
|
|
|
56
|
-
|
|
56
|
+
tables:
|
|
57
|
+
# The retail_consumer_goods`.`quick_serve_restaurant`.`lookup_items_by_descriptions` function is used to lookup the items description vector index.
|
|
58
|
+
# The function will be implicitly granted permission to the genie agent, however, we we need to grant permission to the index for the genie agent to use it.
|
|
59
|
+
items_description_vs_index:
|
|
60
|
+
schema: *quick_serve_restaurant_schema
|
|
61
|
+
name: items_description_vs_index
|
|
62
|
+
|
|
57
63
|
# =============================================================================
|
|
58
64
|
# MEMORY CONFIGURATION
|
|
59
65
|
# =============================================================================
|
|
@@ -75,6 +75,13 @@ resources:
|
|
|
75
75
|
default_value: 01f01c91f1f414d59daaefd2b7ec82ea
|
|
76
76
|
on_behalf_of_user: false
|
|
77
77
|
|
|
78
|
+
tables:
|
|
79
|
+
# The retail_consumer_goods`.`quick_serve_restaurant`.`lookup_items_by_descriptions` function is used to lookup the items description vector index.
|
|
80
|
+
# The function will be implicitly granted permission to the genie agent, however, we we need to grant permission to the index for the genie agent to use it.
|
|
81
|
+
items_description_vs_index:
|
|
82
|
+
schema: *quick_serve_retaurant_schema
|
|
83
|
+
name: items_description_vs_index
|
|
84
|
+
|
|
78
85
|
memory: &memory
|
|
79
86
|
# Conversation checkpointing for state persistence
|
|
80
87
|
checkpointer:
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
# 17. Parallel Tool Calls
|
|
2
|
+
|
|
3
|
+
**Maximize agent performance with concurrent tool execution**
|
|
4
|
+
|
|
5
|
+
Learn how to enable and observe parallel tool calling, where the LLM requests multiple tools in a single response and they execute concurrently.
|
|
6
|
+
|
|
7
|
+
## Architecture Overview
|
|
8
|
+
|
|
9
|
+
```mermaid
|
|
10
|
+
%%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#7b1fa2'}}}%%
|
|
11
|
+
flowchart TB
|
|
12
|
+
subgraph Sequential["Sequential Execution (Slow)"]
|
|
13
|
+
direction TB
|
|
14
|
+
S1["Tool A"] --> S2["Tool B"] --> S3["Tool C"]
|
|
15
|
+
ST["Total: 3 round trips"]
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
subgraph Parallel["Parallel Execution (Fast)"]
|
|
19
|
+
direction TB
|
|
20
|
+
P1["Tool A"]
|
|
21
|
+
P2["Tool B"]
|
|
22
|
+
P3["Tool C"]
|
|
23
|
+
PT["Total: 1 round trip"]
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
User["User Query"] --> LLM["LLM Decision"]
|
|
27
|
+
LLM -->|"One at a time"| Sequential
|
|
28
|
+
LLM -->|"All at once"| Parallel
|
|
29
|
+
|
|
30
|
+
style Sequential fill:#ffebee,stroke:#c62828
|
|
31
|
+
style Parallel fill:#e8f5e9,stroke:#2e7d32
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Examples
|
|
35
|
+
|
|
36
|
+
| File | Description |
|
|
37
|
+
|------|-------------|
|
|
38
|
+
| [`parallel_tool_calls.yaml`](./parallel_tool_calls.yaml) | Complete example with inline tools and observability middleware |
|
|
39
|
+
|
|
40
|
+
## Key Concepts
|
|
41
|
+
|
|
42
|
+
### 1. Parallel Tool Calling
|
|
43
|
+
|
|
44
|
+
When an LLM needs multiple pieces of independent information, it can request all tools in a single response:
|
|
45
|
+
|
|
46
|
+
```mermaid
|
|
47
|
+
%%{init: {'theme': 'base'}}%%
|
|
48
|
+
sequenceDiagram
|
|
49
|
+
autonumber
|
|
50
|
+
participant User
|
|
51
|
+
participant LLM
|
|
52
|
+
participant ToolNode
|
|
53
|
+
participant Tools
|
|
54
|
+
|
|
55
|
+
User->>LLM: "Calculate 5+3, 10*2, and 100/4"
|
|
56
|
+
LLM->>ToolNode: [calc(5+3), calc(10*2), calc(100/4)]
|
|
57
|
+
Note over ToolNode: Single response with 3 tool calls
|
|
58
|
+
|
|
59
|
+
par Parallel Execution
|
|
60
|
+
ToolNode->>Tools: calc(5+3)
|
|
61
|
+
ToolNode->>Tools: calc(10*2)
|
|
62
|
+
ToolNode->>Tools: calc(100/4)
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
Tools-->>ToolNode: [8, 20, 25]
|
|
66
|
+
ToolNode-->>LLM: All results
|
|
67
|
+
LLM-->>User: "5+3=8, 10*2=20, 100/4=25"
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### 2. Inline Tool Definitions
|
|
71
|
+
|
|
72
|
+
Define simple tools directly in YAML without separate Python files:
|
|
73
|
+
|
|
74
|
+
```yaml
|
|
75
|
+
tools:
|
|
76
|
+
calculator:
|
|
77
|
+
name: calculator
|
|
78
|
+
function:
|
|
79
|
+
type: inline
|
|
80
|
+
code: |
|
|
81
|
+
from langchain.tools import tool
|
|
82
|
+
|
|
83
|
+
@tool
|
|
84
|
+
def calculator(expression: str) -> str:
|
|
85
|
+
"""Evaluate a mathematical expression."""
|
|
86
|
+
return str(eval(expression))
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### 3. Tool Call Observability
|
|
90
|
+
|
|
91
|
+
Monitor parallel vs sequential tool calling patterns:
|
|
92
|
+
|
|
93
|
+
```yaml
|
|
94
|
+
middleware:
|
|
95
|
+
- name: dao_ai.middleware.tool_call_observability.create_tool_call_observability_middleware
|
|
96
|
+
args:
|
|
97
|
+
log_level: INFO
|
|
98
|
+
include_args: true
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
## Prompt Engineering for Parallel Calls
|
|
102
|
+
|
|
103
|
+
The key to enabling parallel tool calls is **explicit instruction** in the system prompt:
|
|
104
|
+
|
|
105
|
+
```yaml
|
|
106
|
+
prompt: |
|
|
107
|
+
You are a helpful assistant with access to various tools.
|
|
108
|
+
|
|
109
|
+
## CRITICAL: Parallel Tool Execution
|
|
110
|
+
|
|
111
|
+
**ALWAYS call multiple tools simultaneously when they are independent.**
|
|
112
|
+
|
|
113
|
+
When you need to perform multiple independent operations, you MUST call ALL
|
|
114
|
+
relevant tools in a SINGLE response. Do NOT call them one at a time.
|
|
115
|
+
|
|
116
|
+
Examples of CORRECT parallel behavior:
|
|
117
|
+
- User asks for time in 3 cities -> Call get_time 3 times IN ONE RESPONSE
|
|
118
|
+
- User asks for 3 calculations -> Call calculator 3 times IN ONE RESPONSE
|
|
119
|
+
- User asks to look up items 101, 102, 103 -> Call lookup 3 times IN ONE RESPONSE
|
|
120
|
+
|
|
121
|
+
Only call tools sequentially when one tool's output is needed as INPUT for another.
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Observability Output
|
|
125
|
+
|
|
126
|
+
The observability middleware provides detailed logging:
|
|
127
|
+
|
|
128
|
+
### Parallel Calls Detected
|
|
129
|
+
```
|
|
130
|
+
INFO | PARALLEL tool calls detected | num_tools=3 | tool_names=calculator,calculator,calculator
|
|
131
|
+
INFO | Tool: calculator | args={'expression': '5 + 3'}
|
|
132
|
+
INFO | Tool: calculator | args={'expression': '10 * 2'}
|
|
133
|
+
INFO | Tool: calculator | args={'expression': '100 / 4'}
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### Summary Statistics
|
|
137
|
+
```
|
|
138
|
+
INFO | Tool Call Observability Summary
|
|
139
|
+
| total_model_calls=2
|
|
140
|
+
| total_tool_calls=3
|
|
141
|
+
| parallel_batches=1
|
|
142
|
+
| sequential_calls=0
|
|
143
|
+
| parallelism_ratio=100.0%
|
|
144
|
+
|
|
145
|
+
SUCCESS | Parallel tool calling IS happening: 1 batches with multiple tools
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
### Sequential Calls Warning
|
|
149
|
+
```
|
|
150
|
+
WARNING | All tool calls are SEQUENTIAL: 5 single-tool responses.
|
|
151
|
+
| Consider prompt engineering to encourage parallel calls.
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
## Quick Start
|
|
155
|
+
|
|
156
|
+
```bash
|
|
157
|
+
# Run the parallel tool calls example
|
|
158
|
+
dao-ai chat -c config/examples/17_parallel_tools/parallel_tool_calls.yaml
|
|
159
|
+
|
|
160
|
+
# Test queries that should trigger parallel calls:
|
|
161
|
+
> What is 5+3, 10*2, and 100/4?
|
|
162
|
+
> Look up items 101, 102, and 201
|
|
163
|
+
> Roll three dice for me
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
## Performance Benefits
|
|
167
|
+
|
|
168
|
+
```mermaid
|
|
169
|
+
%%{init: {'theme': 'base'}}%%
|
|
170
|
+
graph LR
|
|
171
|
+
subgraph Before["Before: Sequential"]
|
|
172
|
+
B1["3 tools x 500ms each = 1500ms"]
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
subgraph After["After: Parallel"]
|
|
176
|
+
A1["3 tools concurrent = 500ms"]
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
Before -->|"3x faster"| After
|
|
180
|
+
|
|
181
|
+
style Before fill:#ffebee,stroke:#c62828
|
|
182
|
+
style After fill:#e8f5e9,stroke:#2e7d32
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
| Scenario | Sequential | Parallel | Speedup |
|
|
186
|
+
|----------|------------|----------|---------|
|
|
187
|
+
| 3 independent lookups | 1.5s | 0.5s | 3x |
|
|
188
|
+
| 5 API calls | 2.5s | 0.5s | 5x |
|
|
189
|
+
| 10 database queries | 5.0s | 0.5s | 10x |
|
|
190
|
+
|
|
191
|
+
## Inline Tools Reference
|
|
192
|
+
|
|
193
|
+
The `inline` function type allows defining tools directly in YAML:
|
|
194
|
+
|
|
195
|
+
```yaml
|
|
196
|
+
tools:
|
|
197
|
+
my_tool:
|
|
198
|
+
name: my_tool
|
|
199
|
+
function:
|
|
200
|
+
type: inline
|
|
201
|
+
code: |
|
|
202
|
+
from langchain.tools import tool
|
|
203
|
+
|
|
204
|
+
@tool
|
|
205
|
+
def my_tool(param: str) -> str:
|
|
206
|
+
"""Tool description shown to the LLM."""
|
|
207
|
+
# Your tool logic here
|
|
208
|
+
return f"Result: {param}"
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### Requirements
|
|
212
|
+
|
|
213
|
+
- Must import `@tool` decorator from `langchain.tools`
|
|
214
|
+
- Must define at least one function decorated with `@tool`
|
|
215
|
+
- The function docstring becomes the tool description
|
|
216
|
+
- Return type should be `str` for best compatibility
|
|
217
|
+
|
|
218
|
+
### Use Cases
|
|
219
|
+
|
|
220
|
+
- Prototyping and testing
|
|
221
|
+
- Simple utility tools
|
|
222
|
+
- Demo configurations
|
|
223
|
+
- Learning and experimentation
|
|
224
|
+
|
|
225
|
+
For production tools, consider using `type: python` or `type: factory` with proper module organization.
|
|
226
|
+
|
|
227
|
+
## Troubleshooting
|
|
228
|
+
|
|
229
|
+
| Issue | Solution |
|
|
230
|
+
|-------|----------|
|
|
231
|
+
| Tools called sequentially | Add explicit parallel instructions to prompt |
|
|
232
|
+
| Model ignores parallel prompt | Try more emphatic wording, use examples |
|
|
233
|
+
| Observability not logging | Ensure middleware is first in list |
|
|
234
|
+
| Inline tool errors | Check imports and `@tool` decorator |
|
|
235
|
+
|
|
236
|
+
## Best Practices
|
|
237
|
+
|
|
238
|
+
1. **Prompt Engineering**: Explicitly instruct the model to batch independent operations
|
|
239
|
+
2. **Observability**: Always add the observability middleware during development
|
|
240
|
+
3. **Test Queries**: Use queries that naturally require multiple independent operations
|
|
241
|
+
4. **Monitor Parallelism Ratio**: Aim for high parallelism ratio in your use cases
|
|
242
|
+
|
|
243
|
+
## Next Steps
|
|
244
|
+
|
|
245
|
+
- **12_middleware/** - Learn about other middleware options
|
|
246
|
+
- **14_basic_tools/** - Explore tool definition patterns
|
|
247
|
+
- **15_complete_applications/** - See parallel tools in production configs
|
|
248
|
+
|
|
249
|
+
## Related Documentation
|
|
250
|
+
|
|
251
|
+
- [Tool Configuration](../../../docs/key-capabilities.md#tools)
|
|
252
|
+
- [Middleware Configuration](../../../docs/key-capabilities.md#middleware)
|
|
253
|
+
- [Performance Optimization](../../../docs/architecture.md#performance)
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
# yaml-language-server: $schema=../../schemas/model_config_schema.json
|
|
2
|
+
#
|
|
3
|
+
# Test configuration for parallel tool calling observability
|
|
4
|
+
# Run with: dao chat --config config/examples/parallel_tool_test.yaml
|
|
5
|
+
#
|
|
6
|
+
# Test queries to try:
|
|
7
|
+
# - "Get the current time in New York, London, and Tokyo"
|
|
8
|
+
# - "What is 5+3, 10*2, and 100/4?"
|
|
9
|
+
# - "Roll three dice for me"
|
|
10
|
+
|
|
11
|
+
resources:
|
|
12
|
+
llms:
|
|
13
|
+
default_llm: &default_llm
|
|
14
|
+
name: databricks-claude-sonnet-4
|
|
15
|
+
temperature: 0.1
|
|
16
|
+
|
|
17
|
+
tools:
|
|
18
|
+
# Simple calculator tool
|
|
19
|
+
calculator: &calculator_tool
|
|
20
|
+
name: calculator
|
|
21
|
+
function:
|
|
22
|
+
type: inline
|
|
23
|
+
code: |
|
|
24
|
+
from langchain.tools import tool
|
|
25
|
+
|
|
26
|
+
@tool
|
|
27
|
+
def calculator(expression: str) -> str:
|
|
28
|
+
"""
|
|
29
|
+
Evaluate a mathematical expression.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
expression: A mathematical expression like "5 + 3" or "10 * 2"
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
The result of the calculation
|
|
36
|
+
"""
|
|
37
|
+
import time
|
|
38
|
+
time.sleep(0.5) # Simulate some latency
|
|
39
|
+
try:
|
|
40
|
+
result = eval(expression)
|
|
41
|
+
return f"Result: {expression} = {result}"
|
|
42
|
+
except Exception as e:
|
|
43
|
+
return f"Error evaluating '{expression}': {e}"
|
|
44
|
+
|
|
45
|
+
# Get current time tool
|
|
46
|
+
get_time: &get_time_tool
|
|
47
|
+
name: get_time
|
|
48
|
+
function:
|
|
49
|
+
type: inline
|
|
50
|
+
code: |
|
|
51
|
+
from langchain.tools import tool
|
|
52
|
+
|
|
53
|
+
@tool
|
|
54
|
+
def get_time(timezone: str = "UTC") -> str:
|
|
55
|
+
"""
|
|
56
|
+
Get the current time in a specified timezone.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
timezone: The timezone name (e.g., "UTC", "US/Eastern", "Europe/London", "Asia/Tokyo")
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
The current time in that timezone
|
|
63
|
+
"""
|
|
64
|
+
import time
|
|
65
|
+
time.sleep(0.5) # Simulate some latency
|
|
66
|
+
from datetime import datetime
|
|
67
|
+
try:
|
|
68
|
+
import pytz
|
|
69
|
+
tz = pytz.timezone(timezone)
|
|
70
|
+
current_time = datetime.now(tz)
|
|
71
|
+
return f"Current time in {timezone}: {current_time.strftime('%Y-%m-%d %H:%M:%S %Z')}"
|
|
72
|
+
except Exception:
|
|
73
|
+
from datetime import datetime
|
|
74
|
+
return f"Current time (UTC): {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} (timezone '{timezone}' not available)"
|
|
75
|
+
|
|
76
|
+
# Random number generator tool
|
|
77
|
+
random_number: &random_number_tool
|
|
78
|
+
name: random_number
|
|
79
|
+
function:
|
|
80
|
+
type: inline
|
|
81
|
+
code: |
|
|
82
|
+
from langchain.tools import tool
|
|
83
|
+
|
|
84
|
+
@tool
|
|
85
|
+
def random_number(min_val: int = 1, max_val: int = 100, label: str = "random") -> str:
|
|
86
|
+
"""
|
|
87
|
+
Generate a random number between min and max values.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
min_val: Minimum value (inclusive)
|
|
91
|
+
max_val: Maximum value (inclusive)
|
|
92
|
+
label: A label for this random number (e.g., "dice1", "lottery")
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
A random number with its label
|
|
96
|
+
"""
|
|
97
|
+
import time
|
|
98
|
+
import random
|
|
99
|
+
time.sleep(0.5) # Simulate some latency
|
|
100
|
+
result = random.randint(min_val, max_val)
|
|
101
|
+
return f"{label}: {result}"
|
|
102
|
+
|
|
103
|
+
# Lookup tool (simulates database lookup)
|
|
104
|
+
lookup: &lookup_tool
|
|
105
|
+
name: lookup
|
|
106
|
+
function:
|
|
107
|
+
type: inline
|
|
108
|
+
code: |
|
|
109
|
+
from langchain.tools import tool
|
|
110
|
+
|
|
111
|
+
@tool
|
|
112
|
+
def lookup(item_id: str) -> str:
|
|
113
|
+
"""
|
|
114
|
+
Look up information about an item by its ID.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
item_id: The ID of the item to look up
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Information about the item
|
|
121
|
+
"""
|
|
122
|
+
import time
|
|
123
|
+
time.sleep(0.5) # Simulate database latency
|
|
124
|
+
# Simulated data
|
|
125
|
+
items = {
|
|
126
|
+
"101": {"name": "Widget A", "price": 9.99, "stock": 150},
|
|
127
|
+
"102": {"name": "Widget B", "price": 14.99, "stock": 75},
|
|
128
|
+
"103": {"name": "Widget C", "price": 24.99, "stock": 200},
|
|
129
|
+
"201": {"name": "Gadget X", "price": 49.99, "stock": 50},
|
|
130
|
+
"202": {"name": "Gadget Y", "price": 79.99, "stock": 25},
|
|
131
|
+
}
|
|
132
|
+
if item_id in items:
|
|
133
|
+
item = items[item_id]
|
|
134
|
+
return f"Item {item_id}: {item['name']} - ${item['price']:.2f} ({item['stock']} in stock)"
|
|
135
|
+
return f"Item {item_id} not found"
|
|
136
|
+
|
|
137
|
+
agents:
|
|
138
|
+
parallel_test_agent: ¶llel_test_agent
|
|
139
|
+
name: parallel_test_agent
|
|
140
|
+
model: *default_llm
|
|
141
|
+
tools:
|
|
142
|
+
- *calculator_tool
|
|
143
|
+
- *get_time_tool
|
|
144
|
+
- *random_number_tool
|
|
145
|
+
- *lookup_tool
|
|
146
|
+
middleware:
|
|
147
|
+
# Add observability middleware to track parallel vs sequential calls
|
|
148
|
+
- name: dao_ai.middleware.tool_call_observability.create_tool_call_observability_middleware
|
|
149
|
+
args:
|
|
150
|
+
log_level: INFO
|
|
151
|
+
include_args: true
|
|
152
|
+
track_timing: true
|
|
153
|
+
prompt: |
|
|
154
|
+
You are a helpful assistant with access to various tools.
|
|
155
|
+
|
|
156
|
+
## CRITICAL: Parallel Tool Execution
|
|
157
|
+
|
|
158
|
+
**ALWAYS call multiple tools simultaneously when they are independent.**
|
|
159
|
+
|
|
160
|
+
When you need to perform multiple independent operations, you MUST call ALL
|
|
161
|
+
relevant tools in a SINGLE response. Do NOT call them one at a time.
|
|
162
|
+
|
|
163
|
+
Examples of CORRECT parallel behavior:
|
|
164
|
+
- User asks for time in 3 cities → Call get_time 3 times IN ONE RESPONSE
|
|
165
|
+
- User asks for 3 calculations → Call calculator 3 times IN ONE RESPONSE
|
|
166
|
+
- User asks to look up items 101, 102, 103 → Call lookup 3 times IN ONE RESPONSE
|
|
167
|
+
|
|
168
|
+
The system will execute parallel calls concurrently, which is much faster.
|
|
169
|
+
|
|
170
|
+
Only call tools sequentially when one tool's output is needed as INPUT for another.
|
|
171
|
+
|
|
172
|
+
app:
|
|
173
|
+
name: parallel_tool_test
|
|
174
|
+
log_level: INFO
|
|
175
|
+
registered_model:
|
|
176
|
+
schema:
|
|
177
|
+
catalog_name: main
|
|
178
|
+
schema_name: default
|
|
179
|
+
name: parallel_tool_test
|
|
180
|
+
agents:
|
|
181
|
+
- *parallel_test_agent
|