holmesgpt 0.14.0a0__tar.gz → 0.14.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of holmesgpt might be problematic. Click here for more details.
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/PKG-INFO +10 -14
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/README.md +8 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/__init__.py +1 -1
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/clients/robusta_client.py +15 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/common/env_vars.py +8 -1
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/config.py +66 -139
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/investigation.py +1 -2
- holmesgpt-0.14.1/holmes/core/llm.py +570 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/models.py +2 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/safeguards.py +4 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/supabase_dal.py +14 -8
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/tool_calling_llm.py +110 -102
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/tools.py +260 -25
- holmesgpt-0.14.1/holmes/core/tools_utils/data_types.py +81 -0
- holmesgpt-0.14.1/holmes/core/tools_utils/tool_context_window_limiter.py +33 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/tools_utils/tool_executor.py +2 -2
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/toolset_manager.py +150 -3
- holmesgpt-0.14.1/holmes/core/transformers/__init__.py +23 -0
- holmesgpt-0.14.1/holmes/core/transformers/base.py +62 -0
- holmesgpt-0.14.1/holmes/core/transformers/llm_summarize.py +174 -0
- holmesgpt-0.14.1/holmes/core/transformers/registry.py +122 -0
- holmesgpt-0.14.1/holmes/core/transformers/transformer.py +31 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/main.py +5 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_fetch_logs.jinja2 +10 -1
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/aks-node-health.yaml +46 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/aks.yaml +64 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +17 -15
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +8 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +7 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +3 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +7 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +4 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +7 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +7 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +7 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +7 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/bash_toolset.py +6 -6
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/common/bash.py +7 -7
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
- holmesgpt-0.14.1/holmes/plugins/toolsets/datadog/datadog_api.py +682 -0
- holmesgpt-0.14.1/holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +54 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/toolset_datadog_general.py +344 -205
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +189 -17
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +95 -30
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +10 -10
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +20 -20
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/git.py +21 -21
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/common.py +2 -2
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/toolset_grafana.py +4 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +5 -4
- holmesgpt-0.14.1/holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +247 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +165 -307
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/internet/internet.py +3 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/internet/notion.py +3 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/investigator/core_investigation.py +3 -3
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/kafka.py +18 -18
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/kubernetes.yaml +58 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/kubernetes_logs.py +6 -6
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/logging_utils/logging_api.py +1 -1
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/mcp/toolset_mcp.py +4 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/newrelic.py +5 -5
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/opensearch/opensearch.py +5 -5
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/opensearch/opensearch_traces.py +10 -10
- holmesgpt-0.14.1/holmes/plugins/toolsets/prometheus/prometheus.py +1569 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +39 -2
- holmesgpt-0.14.1/holmes/plugins/toolsets/prometheus/utils.py +28 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +6 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/robusta/robusta.py +10 -10
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/runbook/runbook_fetcher.py +4 -4
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/servicenow/servicenow.py +6 -6
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/utils.py +88 -0
- holmesgpt-0.14.1/holmes/utils/config_utils.py +91 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/env.py +7 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/holmes_status.py +2 -1
- holmesgpt-0.14.1/holmes/utils/sentry_helper.py +41 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/stream.py +9 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/pyproject.toml +9 -16
- holmesgpt-0.14.0a0/holmes/core/llm.py +0 -327
- holmesgpt-0.14.0a0/holmes/plugins/toolsets/datadog/datadog_api.py +0 -216
- holmesgpt-0.14.0a0/holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +0 -43
- holmesgpt-0.14.0a0/holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +0 -147
- holmesgpt-0.14.0a0/holmes/plugins/toolsets/prometheus/prometheus.py +0 -1079
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/LICENSE.txt +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/.git_archival.json +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/common/openshift.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/config.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/conversations.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/investigation_structured_output.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/issue.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/openai_formatting.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/performance_timing.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/prompt.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/resource_instruction.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/runbooks.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/todo_tasks_formatter.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/tools_utils/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/tools_utils/toolset_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/core/tracing.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/interactive.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/destinations/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/destinations/slack/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/destinations/slack/plugin.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/interfaces.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_ai_safety.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_current_date_time.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_default_log_prompt.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_general_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_global_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_permission_errors.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_runbook_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/_toolsets_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/generic_ask.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/generic_ask_conversation.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/generic_investigation.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/generic_post_processing.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/generic_ticket.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/investigation_output_format.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/investigation_procedure.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/CLAUDE.md +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/README.md +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/catalog.json +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/jira.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/kube-prometheus-stack.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/networking/dns_troubleshooting_instructions.md +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/runbooks/upgrade/upgrade_troubleshooting_instructions.md +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/github/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/jira/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/opsgenie/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/pagerduty/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/prometheus/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/prometheus/models.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/sources/prometheus/plugin.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/argocd.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/atlas_mongodb/instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/aws.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/azure_base_toolset.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/azure_sql_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/install.md +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/tools/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/azure_sql/utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/argocd/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/argocd/constants.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/aws/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/aws/constants.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/azure/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/azure/constants.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/bash_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/common/bash_command.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/common/config.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/common/stringify.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/common/validators.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/docker/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/docker/constants.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/helm/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/helm/constants.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/constants.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/kubectl_describe.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/kubectl_events.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/kubectl_logs.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/kubectl_run.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/kubectl/kubectl_top.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/parse_command.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/base64_util.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/cut.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/grep/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/head.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/jq.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/sed.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/sort.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/tail.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/tr.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/uniq.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/bash/utilities/wc.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/confluence.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/consts.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/coralogix/api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/coralogix/utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/datadog_general_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/datadog_rds_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/datadog_traces_formatter.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/datadog/instructions_datadog_traces.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/docker.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/base_grafana_toolset.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/grafana_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/grafana_tempo_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/loki_api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/grafana/trace_parser.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/helm.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/investigator/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/investigator/investigator_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/investigator/model.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/logging_utils/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/logging_utils/types.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/opensearch/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/opensearch/opensearch_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/rabbitmq/api.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/rabbitmq/rabbitmq_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/robusta/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/runbook/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/service_discovery.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/servicenow/install.md +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/toolsets/slab.yaml +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/plugins/utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/__init__.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/cache.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/cert_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/colors.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/console/consts.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/console/logging.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/console/result.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/default_toolset_installation_guide.jinja2 +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/definitions.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/file_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/global_instructions.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/holmes_sync_toolsets.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/keygen_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/llms.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/markdown_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/pydantic_utils.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/utils/tags.py +0 -0
- {holmesgpt-0.14.0a0 → holmesgpt-0.14.1}/holmes/version.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: holmesgpt
|
|
3
|
-
Version: 0.14.
|
|
3
|
+
Version: 0.14.1
|
|
4
4
|
Summary:
|
|
5
5
|
Author: Natan Yellin
|
|
6
6
|
Author-email: natan@robusta.dev
|
|
@@ -8,7 +8,6 @@ Requires-Python: >=3.10,<4.0
|
|
|
8
8
|
Classifier: Programming Language :: Python :: 3
|
|
9
9
|
Classifier: Programming Language :: Python :: 3.10
|
|
10
10
|
Classifier: Programming Language :: Python :: 3.11
|
|
11
|
-
Requires-Dist: aiohttp (>=3.10.2,<4.0.0)
|
|
12
11
|
Requires-Dist: azure-core (>=1.34.0,<2.0.0)
|
|
13
12
|
Requires-Dist: azure-identity (>=1.23.0,<2.0.0)
|
|
14
13
|
Requires-Dist: azure-mgmt-alertsmanagement (>=1.0.0,<2.0.0)
|
|
@@ -24,11 +23,10 @@ Requires-Dist: certifi (>=2024.7.4,<2025.0.0)
|
|
|
24
23
|
Requires-Dist: colorlog (>=6.8.2,<7.0.0)
|
|
25
24
|
Requires-Dist: confluent-kafka (>=2.6.1,<3.0.0)
|
|
26
25
|
Requires-Dist: fastapi (>=0.116,<0.117)
|
|
27
|
-
Requires-Dist: google-api-python-client (>=2.156.0,<3.0.0)
|
|
28
26
|
Requires-Dist: humanize (>=4.9.0,<5.0.0)
|
|
29
27
|
Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
|
|
30
28
|
Requires-Dist: kubernetes (>=32.0.1,<33.0.0)
|
|
31
|
-
Requires-Dist: litellm (
|
|
29
|
+
Requires-Dist: litellm (==1.77.1)
|
|
32
30
|
Requires-Dist: markdown (>=3.6,<4.0)
|
|
33
31
|
Requires-Dist: markdownify (>=1.1.0,<2.0.0)
|
|
34
32
|
Requires-Dist: mcp (==v1.12.2)
|
|
@@ -37,28 +35,18 @@ Requires-Dist: opensearch-py (>=2.8.0,<3.0.0)
|
|
|
37
35
|
Requires-Dist: postgrest (==0.16.8)
|
|
38
36
|
Requires-Dist: prometrix (==0.2.5)
|
|
39
37
|
Requires-Dist: prompt-toolkit (>=3.0.51,<4.0.0)
|
|
40
|
-
Requires-Dist: protobuf (>=6.31.1)
|
|
41
38
|
Requires-Dist: pydantic (>=2.7,<3.0)
|
|
42
|
-
Requires-Dist: pydantic-settings (>=2.1.0,<3.0.0)
|
|
43
|
-
Requires-Dist: pydash (>=8.0.1,<9.0.0)
|
|
44
39
|
Requires-Dist: pygments (>=2.18.0,<3.0.0)
|
|
45
40
|
Requires-Dist: pyodbc (>=5.0.1,<6.0.0)
|
|
46
|
-
Requires-Dist: pytest-shared-session-scope (>=0.4.0,<0.5.0)
|
|
47
41
|
Requires-Dist: python-benedict (>=0.33.1,<0.34.0)
|
|
48
|
-
Requires-Dist: python_multipart (>=0.0.18,<0.0.19)
|
|
49
|
-
Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
|
|
50
42
|
Requires-Dist: requests (>=2.32.4,<3.0.0)
|
|
51
43
|
Requires-Dist: requests-aws4auth (>=1.3.1,<2.0.0)
|
|
52
44
|
Requires-Dist: rich (>=13.7.1,<14.0.0)
|
|
53
45
|
Requires-Dist: sentry-sdk[fastapi] (>=2.20.0,<3.0.0)
|
|
54
|
-
Requires-Dist: setuptools (>=80.9.0,<81.0.0)
|
|
55
|
-
Requires-Dist: slack-bolt (>=1.18.1,<2.0.0)
|
|
56
|
-
Requires-Dist: starlette (==0.47.2)
|
|
57
46
|
Requires-Dist: strenum (>=0.4.15,<0.5.0)
|
|
58
47
|
Requires-Dist: supabase (>=2.5,<3.0)
|
|
59
48
|
Requires-Dist: tenacity (>=9.1.2,<10.0.0)
|
|
60
49
|
Requires-Dist: typer (>=0.15.4,<0.16.0)
|
|
61
|
-
Requires-Dist: urllib3 (>=1.26.19,<2.0.0)
|
|
62
50
|
Requires-Dist: uvicorn (>=0.30,<0.31)
|
|
63
51
|
Description-Content-Type: text/markdown
|
|
64
52
|
|
|
@@ -223,6 +211,14 @@ You can save common settings and API Keys in a config file to avoid passing them
|
|
|
223
211
|
You can save common settings and API keys in config file for re-use. Place the config file in <code>~/.holmes/config.yaml`</code> or pass it using the <code> --config</code>
|
|
224
212
|
|
|
225
213
|
You can view an example config file with all available settings [here](config.example.yaml).
|
|
214
|
+
|
|
215
|
+
### Tool Output Transformers
|
|
216
|
+
|
|
217
|
+
HolmesGPT supports **transformers** to process large tool outputs before sending them to your primary LLM. This feature helps manage context window limits while preserving essential information.
|
|
218
|
+
|
|
219
|
+
The most common transformer is `llm_summarize`, which uses a fast secondary model to summarize lengthy outputs from tools like `kubectl describe`, log queries, or metrics collection.
|
|
220
|
+
|
|
221
|
+
📖 **Learn more**: [Tool Output Transformers Documentation](docs/transformers.md)
|
|
226
222
|
</details>
|
|
227
223
|
|
|
228
224
|
## 🔐 Data Privacy
|
|
@@ -159,6 +159,14 @@ You can save common settings and API Keys in a config file to avoid passing them
|
|
|
159
159
|
You can save common settings and API keys in config file for re-use. Place the config file in <code>~/.holmes/config.yaml`</code> or pass it using the <code> --config</code>
|
|
160
160
|
|
|
161
161
|
You can view an example config file with all available settings [here](config.example.yaml).
|
|
162
|
+
|
|
163
|
+
### Tool Output Transformers
|
|
164
|
+
|
|
165
|
+
HolmesGPT supports **transformers** to process large tool outputs before sending them to your primary LLM. This feature helps manage context window limits while preserving essential information.
|
|
166
|
+
|
|
167
|
+
The most common transformer is `llm_summarize`, which uses a fast secondary model to summarize lengthy outputs from tools like `kubectl describe`, log queries, or metrics collection.
|
|
168
|
+
|
|
169
|
+
📖 **Learn more**: [Tool Output Transformers Documentation](docs/transformers.md)
|
|
162
170
|
</details>
|
|
163
171
|
|
|
164
172
|
## 🔐 Data Privacy
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import List, Optional
|
|
2
|
+
from typing import List, Optional, Dict, Any
|
|
3
3
|
import requests # type: ignore
|
|
4
4
|
from functools import cache
|
|
5
|
-
from pydantic import BaseModel, ConfigDict
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
6
6
|
from holmes.common.env_vars import ROBUSTA_API_ENDPOINT
|
|
7
7
|
|
|
8
8
|
HOLMES_GET_INFO_URL = f"{ROBUSTA_API_ENDPOINT}/api/holmes/get_info"
|
|
@@ -14,8 +14,19 @@ class HolmesInfo(BaseModel):
|
|
|
14
14
|
latest_version: Optional[str] = None
|
|
15
15
|
|
|
16
16
|
|
|
17
|
+
class RobustaModelsResponse(BaseModel):
|
|
18
|
+
model_config = ConfigDict(extra="ignore")
|
|
19
|
+
models: List[str]
|
|
20
|
+
models_args: Dict[str, Any] = Field(
|
|
21
|
+
default_factory=dict, alias="models_holmes_args"
|
|
22
|
+
)
|
|
23
|
+
default_model: Optional[str] = None
|
|
24
|
+
|
|
25
|
+
|
|
17
26
|
@cache
|
|
18
|
-
def fetch_robusta_models(
|
|
27
|
+
def fetch_robusta_models(
|
|
28
|
+
account_id: str, token: str
|
|
29
|
+
) -> Optional[RobustaModelsResponse]:
|
|
19
30
|
try:
|
|
20
31
|
session_request = {"session_token": token, "account_id": account_id}
|
|
21
32
|
resp = requests.post(
|
|
@@ -25,7 +36,7 @@ def fetch_robusta_models(account_id, token) -> Optional[List[str]]:
|
|
|
25
36
|
)
|
|
26
37
|
resp.raise_for_status()
|
|
27
38
|
response_json = resp.json()
|
|
28
|
-
return response_json
|
|
39
|
+
return RobustaModelsResponse(**response_json)
|
|
29
40
|
except Exception:
|
|
30
41
|
logging.exception("Failed to fetch robusta models")
|
|
31
42
|
return None
|
|
@@ -73,4 +73,11 @@ LOG_LLM_USAGE_RESPONSE = load_bool("LOG_LLM_USAGE_RESPONSE", False)
|
|
|
73
73
|
# For CLI only, enable user approval for potentially sensitive commands that would otherwise be rejected
|
|
74
74
|
ENABLE_CLI_TOOL_APPROVAL = load_bool("ENABLE_CLI_TOOL_APPROVAL", True)
|
|
75
75
|
|
|
76
|
-
MAX_GRAPH_POINTS = float(os.environ.get("MAX_GRAPH_POINTS",
|
|
76
|
+
MAX_GRAPH_POINTS = float(os.environ.get("MAX_GRAPH_POINTS", 200))
|
|
77
|
+
|
|
78
|
+
# Limit each tool response to N% of the total context window.
|
|
79
|
+
# Number between 0 and 100
|
|
80
|
+
# Setting to either 0 or any number above 100 disables the logic that limits tool response size
|
|
81
|
+
TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT = float(
|
|
82
|
+
os.environ.get("TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT", 10)
|
|
83
|
+
)
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import json
|
|
2
1
|
import logging
|
|
3
2
|
import os
|
|
4
3
|
import os.path
|
|
@@ -6,16 +5,13 @@ from enum import Enum
|
|
|
6
5
|
from pathlib import Path
|
|
7
6
|
from typing import TYPE_CHECKING, Any, List, Optional, Union
|
|
8
7
|
|
|
8
|
+
import sentry_sdk
|
|
9
9
|
import yaml # type: ignore
|
|
10
|
-
from pydantic import BaseModel, ConfigDict, FilePath, SecretStr
|
|
10
|
+
from pydantic import BaseModel, ConfigDict, FilePath, PrivateAttr, SecretStr
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
from holmes.
|
|
14
|
-
from holmes.core.llm import DefaultLLM
|
|
13
|
+
from holmes.core.llm import DefaultLLM, LLMModelRegistry
|
|
15
14
|
from holmes.common.env_vars import (
|
|
16
|
-
ROBUSTA_AI,
|
|
17
|
-
LOAD_ALL_ROBUSTA_MODELS,
|
|
18
|
-
ROBUSTA_API_ENDPOINT,
|
|
19
15
|
ROBUSTA_CONFIG_PATH,
|
|
20
16
|
)
|
|
21
17
|
from holmes.core.tools_utils.tool_executor import ToolExecutor
|
|
@@ -29,7 +25,6 @@ from holmes.plugins.runbooks import (
|
|
|
29
25
|
|
|
30
26
|
# Source plugin imports moved to their respective create methods to speed up startup
|
|
31
27
|
if TYPE_CHECKING:
|
|
32
|
-
from holmes.core.llm import LLM
|
|
33
28
|
from holmes.core.tool_calling_llm import IssueInvestigator, ToolCallingLLM
|
|
34
29
|
from holmes.plugins.destinations.slack import SlackDestination
|
|
35
30
|
from holmes.plugins.sources.github import GitHubSource
|
|
@@ -41,15 +36,9 @@ if TYPE_CHECKING:
|
|
|
41
36
|
from holmes.core.supabase_dal import SupabaseDal
|
|
42
37
|
from holmes.core.config import config_path_dir
|
|
43
38
|
from holmes.utils.definitions import RobustaConfig
|
|
44
|
-
from holmes.utils.env import replace_env_vars_values
|
|
45
|
-
from holmes.utils.file_utils import load_yaml_file
|
|
46
39
|
from holmes.utils.pydantic_utils import RobustaBaseConfig, load_model_from_file
|
|
47
40
|
|
|
48
41
|
DEFAULT_CONFIG_LOCATION = os.path.join(config_path_dir, "config.yaml")
|
|
49
|
-
MODEL_LIST_FILE_LOCATION = os.environ.get(
|
|
50
|
-
"MODEL_LIST_FILE_LOCATION", "/etc/holmes/config/model_list.yaml"
|
|
51
|
-
)
|
|
52
|
-
ROBUSTA_AI_MODEL_NAME = "Robusta"
|
|
53
42
|
|
|
54
43
|
|
|
55
44
|
class SupportedTicketSources(str, Enum):
|
|
@@ -57,32 +46,11 @@ class SupportedTicketSources(str, Enum):
|
|
|
57
46
|
PAGERDUTY = "pagerduty"
|
|
58
47
|
|
|
59
48
|
|
|
60
|
-
def is_old_toolset_config(
|
|
61
|
-
toolsets: Union[dict[str, dict[str, Any]], List[dict[str, Any]]],
|
|
62
|
-
) -> bool:
|
|
63
|
-
# old config is a list of toolsets
|
|
64
|
-
if isinstance(toolsets, list):
|
|
65
|
-
return True
|
|
66
|
-
return False
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
def parse_models_file(path: str):
|
|
70
|
-
models = load_yaml_file(path, raise_error=False, warn_not_found=False)
|
|
71
|
-
|
|
72
|
-
for _, params in models.items():
|
|
73
|
-
params = replace_env_vars_values(params)
|
|
74
|
-
|
|
75
|
-
return models
|
|
76
|
-
|
|
77
|
-
|
|
78
49
|
class Config(RobustaBaseConfig):
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
session_token: Optional[SecretStr] = None
|
|
84
|
-
|
|
85
|
-
model: Optional[str] = "gpt-4o"
|
|
50
|
+
model: Optional[str] = None
|
|
51
|
+
api_base: Optional[str] = None
|
|
52
|
+
api_version: Optional[str] = None
|
|
53
|
+
fast_model: Optional[str] = None
|
|
86
54
|
max_steps: int = 40
|
|
87
55
|
cluster_name: Optional[str] = None
|
|
88
56
|
|
|
@@ -123,14 +91,18 @@ class Config(RobustaBaseConfig):
|
|
|
123
91
|
# custom_toolsets_from_cli is passed from CLI option `--custom-toolsets` as 'experimental' custom toolsets.
|
|
124
92
|
# The status of toolset here won't be cached, so the toolset from cli will always be loaded when specified in the CLI.
|
|
125
93
|
custom_toolsets_from_cli: Optional[List[FilePath]] = None
|
|
126
|
-
|
|
94
|
+
# if True, we will try to load the Robusta AI model, in cli we aren't trying to load it.
|
|
95
|
+
should_try_robusta_ai: bool = False
|
|
127
96
|
|
|
128
97
|
toolsets: Optional[dict[str, dict[str, Any]]] = None
|
|
129
98
|
mcp_servers: Optional[dict[str, dict[str, Any]]] = None
|
|
130
99
|
|
|
131
100
|
_server_tool_executor: Optional[ToolExecutor] = None
|
|
132
101
|
|
|
133
|
-
|
|
102
|
+
# TODO: Separate those fields to facade class, this shouldn't be part of the config.
|
|
103
|
+
_toolset_manager: Optional[ToolsetManager] = PrivateAttr(None)
|
|
104
|
+
_llm_model_registry: Optional[LLMModelRegistry] = PrivateAttr(None)
|
|
105
|
+
_dal: Optional[SupabaseDal] = PrivateAttr(None)
|
|
134
106
|
|
|
135
107
|
@property
|
|
136
108
|
def toolset_manager(self) -> ToolsetManager:
|
|
@@ -140,80 +112,29 @@ class Config(RobustaBaseConfig):
|
|
|
140
112
|
mcp_servers=self.mcp_servers,
|
|
141
113
|
custom_toolsets=self.custom_toolsets,
|
|
142
114
|
custom_toolsets_from_cli=self.custom_toolsets_from_cli,
|
|
115
|
+
global_fast_model=self.fast_model,
|
|
143
116
|
)
|
|
144
117
|
return self._toolset_manager
|
|
145
118
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
self.configure_robusta_ai_model()
|
|
153
|
-
|
|
154
|
-
def configure_robusta_ai_model(self) -> None:
|
|
155
|
-
try:
|
|
156
|
-
if not self.cluster_name or not LOAD_ALL_ROBUSTA_MODELS:
|
|
157
|
-
self._load_default_robusta_config()
|
|
158
|
-
return
|
|
159
|
-
|
|
160
|
-
if not self.api_key:
|
|
161
|
-
dal = SupabaseDal(self.cluster_name)
|
|
162
|
-
self.load_robusta_api_key(dal)
|
|
163
|
-
|
|
164
|
-
if not self.account_id or not self.session_token:
|
|
165
|
-
self._load_default_robusta_config()
|
|
166
|
-
return
|
|
119
|
+
@property
|
|
120
|
+
def dal(self) -> SupabaseDal:
|
|
121
|
+
if not self._dal:
|
|
122
|
+
self._dal = SupabaseDal(self.cluster_name) # type: ignore
|
|
123
|
+
return self._dal
|
|
167
124
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
return
|
|
174
|
-
|
|
175
|
-
for model in models:
|
|
176
|
-
logging.info(f"Loading Robusta AI model: {model}")
|
|
177
|
-
self._model_list[model] = {
|
|
178
|
-
"base_url": f"{ROBUSTA_API_ENDPOINT}/llm/{model}",
|
|
179
|
-
"is_robusta_model": True,
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
except Exception:
|
|
183
|
-
logging.exception("Failed to get all robusta models")
|
|
184
|
-
# fallback to default behavior
|
|
185
|
-
self._load_default_robusta_config()
|
|
186
|
-
|
|
187
|
-
def _load_default_robusta_config(self):
|
|
188
|
-
if self._should_load_robusta_ai() and self.api_key:
|
|
189
|
-
logging.info("Loading default Robusta AI model")
|
|
190
|
-
self._model_list[ROBUSTA_AI_MODEL_NAME] = {
|
|
191
|
-
"base_url": ROBUSTA_API_ENDPOINT,
|
|
192
|
-
"is_robusta_model": True,
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
def _should_load_robusta_ai(self) -> bool:
|
|
196
|
-
if not self.should_try_robusta_ai:
|
|
197
|
-
return False
|
|
198
|
-
|
|
199
|
-
# ROBUSTA_AI were set in the env vars, so we can use it directly
|
|
200
|
-
if ROBUSTA_AI is not None:
|
|
201
|
-
return ROBUSTA_AI
|
|
202
|
-
|
|
203
|
-
# MODEL is set in the env vars, e.g. the user is using a custom model
|
|
204
|
-
# so we don't need to load the robusta AI model and keep the behavior backward compatible
|
|
205
|
-
if "MODEL" in os.environ:
|
|
206
|
-
return False
|
|
207
|
-
|
|
208
|
-
# if the user has provided a model list, we don't need to load the robusta AI model
|
|
209
|
-
if self._model_list:
|
|
210
|
-
return False
|
|
211
|
-
|
|
212
|
-
return True
|
|
125
|
+
@property
|
|
126
|
+
def llm_model_registry(self) -> LLMModelRegistry:
|
|
127
|
+
if not self._llm_model_registry:
|
|
128
|
+
self._llm_model_registry = LLMModelRegistry(self, dal=self.dal)
|
|
129
|
+
return self._llm_model_registry
|
|
213
130
|
|
|
214
131
|
def log_useful_info(self):
|
|
215
|
-
if self.
|
|
216
|
-
logging.info(
|
|
132
|
+
if self.llm_model_registry and self.llm_model_registry.models:
|
|
133
|
+
logging.info(
|
|
134
|
+
f"Loaded models: {list(self.llm_model_registry.models.keys())}"
|
|
135
|
+
)
|
|
136
|
+
else:
|
|
137
|
+
logging.warning("No llm models were loaded")
|
|
217
138
|
|
|
218
139
|
@classmethod
|
|
219
140
|
def load_from_file(cls, config_file: Optional[Path], **kwargs) -> "Config":
|
|
@@ -227,6 +148,7 @@ class Config(RobustaBaseConfig):
|
|
|
227
148
|
Returns:
|
|
228
149
|
Config instance with merged settings
|
|
229
150
|
"""
|
|
151
|
+
|
|
230
152
|
config_from_file: Optional[Config] = None
|
|
231
153
|
if config_file is not None and config_file.exists():
|
|
232
154
|
logging.debug(f"Loading config from {config_file}")
|
|
@@ -250,7 +172,10 @@ class Config(RobustaBaseConfig):
|
|
|
250
172
|
kwargs = {}
|
|
251
173
|
for field_name in [
|
|
252
174
|
"model",
|
|
175
|
+
"fast_model",
|
|
253
176
|
"api_key",
|
|
177
|
+
"api_base",
|
|
178
|
+
"api_version",
|
|
254
179
|
"max_steps",
|
|
255
180
|
"alertmanager_url",
|
|
256
181
|
"alertmanager_username",
|
|
@@ -516,39 +441,41 @@ class Config(RobustaBaseConfig):
|
|
|
516
441
|
raise ValueError("--slack-channel must be specified")
|
|
517
442
|
return SlackDestination(self.slack_token.get_secret_value(), self.slack_channel)
|
|
518
443
|
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
model_params =
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
444
|
+
# TODO: move this to the llm model registry
|
|
445
|
+
def _get_llm(self, model_key: Optional[str] = None, tracer=None) -> "DefaultLLM":
|
|
446
|
+
sentry_sdk.set_tag("requested_model", model_key)
|
|
447
|
+
model_params = self.llm_model_registry.get_model_params(model_key)
|
|
448
|
+
api_base = self.api_base
|
|
449
|
+
api_version = self.api_version
|
|
450
|
+
|
|
451
|
+
is_robusta_model = model_params.pop("is_robusta_model", False)
|
|
452
|
+
sentry_sdk.set_tag("is_robusta_model", is_robusta_model)
|
|
453
|
+
if is_robusta_model:
|
|
454
|
+
# we set here the api_key since it is being refresh when exprided and not as part of the model loading.
|
|
455
|
+
account_id, token = self.dal.get_ai_credentials()
|
|
456
|
+
api_key = f"{account_id} {token}"
|
|
457
|
+
else:
|
|
458
|
+
api_key = model_params.pop("api_key", None)
|
|
459
|
+
|
|
460
|
+
model = model_params.pop("model")
|
|
461
|
+
# It's ok if the model does not have api base and api version, which are defaults to None.
|
|
462
|
+
# Handle both api_base and base_url - api_base takes precedence
|
|
463
|
+
model_api_base = model_params.pop("api_base", None)
|
|
464
|
+
model_base_url = model_params.pop("base_url", None)
|
|
465
|
+
api_base = model_api_base or model_base_url or api_base
|
|
466
|
+
api_version = model_params.pop("api_version", api_version)
|
|
467
|
+
model_name = model_params.pop("name", None) or model_key or model
|
|
468
|
+
sentry_sdk.set_tag("model_name", model_name)
|
|
469
|
+
logging.info(f"Creating LLM with model: {model_name}")
|
|
470
|
+
return DefaultLLM(
|
|
471
|
+
model, api_key, api_base, api_version, model_params, tracer, model_name
|
|
472
|
+
) # type: ignore
|
|
539
473
|
|
|
540
474
|
def get_models_list(self) -> List[str]:
|
|
541
|
-
if self.
|
|
542
|
-
return
|
|
543
|
-
|
|
544
|
-
return json.dumps([self.model]) # type: ignore
|
|
475
|
+
if self.llm_model_registry and self.llm_model_registry.models:
|
|
476
|
+
return list(self.llm_model_registry.models.keys())
|
|
545
477
|
|
|
546
|
-
|
|
547
|
-
if ROBUSTA_AI:
|
|
548
|
-
account_id, token = dal.get_ai_credentials()
|
|
549
|
-
self.api_key = SecretStr(f"{account_id} {token}")
|
|
550
|
-
self.account_id = account_id
|
|
551
|
-
self.session_token = SecretStr(token)
|
|
478
|
+
return []
|
|
552
479
|
|
|
553
480
|
|
|
554
481
|
class TicketSource(BaseModel):
|
|
@@ -26,7 +26,6 @@ def investigate_issues(
|
|
|
26
26
|
model: Optional[str] = None,
|
|
27
27
|
trace_span=DummySpan(),
|
|
28
28
|
) -> InvestigationResult:
|
|
29
|
-
config.load_robusta_api_key(dal=dal)
|
|
30
29
|
context = dal.get_issue_data(investigate_request.context.get("robusta_issue_id"))
|
|
31
30
|
|
|
32
31
|
resource_instructions = dal.get_resource_instructions(
|
|
@@ -71,6 +70,7 @@ def investigate_issues(
|
|
|
71
70
|
sections=sections,
|
|
72
71
|
tool_calls=investigation.tool_calls or [],
|
|
73
72
|
instructions=investigation.instructions,
|
|
73
|
+
metadata=investigation.metadata,
|
|
74
74
|
)
|
|
75
75
|
|
|
76
76
|
|
|
@@ -80,7 +80,6 @@ def get_investigation_context(
|
|
|
80
80
|
config: Config,
|
|
81
81
|
request_structured_output_from_llm: Optional[bool] = None,
|
|
82
82
|
):
|
|
83
|
-
config.load_robusta_api_key(dal=dal)
|
|
84
83
|
ai = config.create_issue_investigator(dal=dal, model=investigate_request.model)
|
|
85
84
|
|
|
86
85
|
raw_data = investigate_request.model_dump()
|