holmesgpt 0.14.2__tar.gz → 0.14.3a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (252) hide show
  1. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/PKG-INFO +6 -8
  2. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/README.md +4 -7
  3. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/__init__.py +1 -1
  4. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/common/env_vars.py +6 -0
  5. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/config.py +3 -6
  6. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/conversations.py +12 -2
  7. holmesgpt-0.14.3a0/holmes/core/feedback.py +191 -0
  8. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/llm.py +16 -12
  9. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/models.py +101 -1
  10. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/supabase_dal.py +23 -9
  11. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/tool_calling_llm.py +197 -15
  12. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/tools.py +20 -7
  13. holmesgpt-0.14.3a0/holmes/core/tools_utils/token_counting.py +13 -0
  14. holmesgpt-0.14.3a0/holmes/core/tools_utils/tool_context_window_limiter.py +55 -0
  15. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/tools_utils/tool_executor.py +11 -6
  16. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/toolset_manager.py +5 -1
  17. holmesgpt-0.14.3a0/holmes/core/truncation/dal_truncation_utils.py +23 -0
  18. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/interactive.py +146 -14
  19. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_fetch_logs.jinja2 +3 -0
  20. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/__init__.py +6 -1
  21. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/__init__.py +11 -4
  22. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +9 -20
  23. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +2 -3
  24. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +2 -3
  25. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +6 -4
  26. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +6 -4
  27. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +2 -3
  28. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +6 -4
  29. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +2 -3
  30. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +2 -3
  31. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +2 -3
  32. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +2 -3
  33. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/bash_toolset.py +4 -7
  34. holmesgpt-0.14.3a0/holmes/plugins/toolsets/cilium.yaml +284 -0
  35. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/toolset_datadog_general.py +5 -10
  36. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +1 -1
  37. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +6 -13
  38. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +3 -6
  39. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +4 -9
  40. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/git.py +14 -12
  41. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/grafana_tempo_api.py +23 -42
  42. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/toolset_grafana.py +2 -3
  43. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +18 -36
  44. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/internet/internet.py +2 -3
  45. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/internet/notion.py +2 -3
  46. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/investigator/core_investigation.py +7 -9
  47. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/kafka.py +7 -18
  48. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/logging_utils/logging_api.py +79 -3
  49. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/mcp/toolset_mcp.py +2 -3
  50. holmesgpt-0.14.3a0/holmes/plugins/toolsets/newrelic/new_relic_api.py +125 -0
  51. holmesgpt-0.14.3a0/holmes/plugins/toolsets/newrelic/newrelic.jinja2 +41 -0
  52. holmesgpt-0.14.3a0/holmes/plugins/toolsets/newrelic/newrelic.py +211 -0
  53. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/opensearch/opensearch.py +5 -12
  54. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/opensearch/opensearch_traces.py +3 -6
  55. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/prometheus/prometheus.py +131 -97
  56. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +3 -6
  57. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/robusta/robusta.py +4 -9
  58. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/runbook/runbook_fetcher.py +93 -13
  59. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/servicenow/servicenow.py +5 -10
  60. holmesgpt-0.14.3a0/holmes/utils/__init__.py +0 -0
  61. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/sentry_helper.py +1 -1
  62. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/stream.py +22 -7
  63. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/version.py +34 -14
  64. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/pyproject.toml +12 -7
  65. holmesgpt-0.14.2/holmes/core/tools_utils/data_types.py +0 -81
  66. holmesgpt-0.14.2/holmes/core/tools_utils/tool_context_window_limiter.py +0 -33
  67. holmesgpt-0.14.2/holmes/plugins/toolsets/newrelic.py +0 -231
  68. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/LICENSE.txt +0 -0
  69. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/.git_archival.json +0 -0
  70. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/clients/robusta_client.py +0 -0
  71. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/common/openshift.py +0 -0
  72. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/__init__.py +0 -0
  73. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/config.py +0 -0
  74. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/investigation.py +0 -0
  75. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/investigation_structured_output.py +0 -0
  76. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/issue.py +0 -0
  77. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/openai_formatting.py +0 -0
  78. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/performance_timing.py +0 -0
  79. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/prompt.py +0 -0
  80. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/resource_instruction.py +0 -0
  81. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/runbooks.py +0 -0
  82. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/safeguards.py +0 -0
  83. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/todo_tasks_formatter.py +0 -0
  84. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/tools_utils/__init__.py +0 -0
  85. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/tools_utils/toolset_utils.py +0 -0
  86. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/tracing.py +0 -0
  87. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/transformers/__init__.py +0 -0
  88. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/transformers/base.py +0 -0
  89. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/transformers/llm_summarize.py +0 -0
  90. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/transformers/registry.py +0 -0
  91. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/core/transformers/transformer.py +0 -0
  92. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/main.py +0 -0
  93. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/__init__.py +0 -0
  94. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/destinations/__init__.py +0 -0
  95. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/destinations/slack/__init__.py +0 -0
  96. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/destinations/slack/plugin.py +0 -0
  97. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/interfaces.py +0 -0
  98. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/__init__.py +0 -0
  99. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_ai_safety.jinja2 +0 -0
  100. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_current_date_time.jinja2 +0 -0
  101. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_default_log_prompt.jinja2 +0 -0
  102. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_general_instructions.jinja2 +0 -0
  103. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_global_instructions.jinja2 +0 -0
  104. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_permission_errors.jinja2 +0 -0
  105. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_runbook_instructions.jinja2 +0 -0
  106. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/_toolsets_instructions.jinja2 +0 -0
  107. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/generic_ask.jinja2 +0 -0
  108. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/generic_ask_conversation.jinja2 +0 -0
  109. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +0 -0
  110. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/generic_investigation.jinja2 +0 -0
  111. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/generic_post_processing.jinja2 +0 -0
  112. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/generic_ticket.jinja2 +0 -0
  113. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/investigation_output_format.jinja2 +0 -0
  114. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/investigation_procedure.jinja2 +0 -0
  115. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +0 -0
  116. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +0 -0
  117. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/CLAUDE.md +0 -0
  118. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/README.md +0 -0
  119. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/catalog.json +0 -0
  120. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/jira.yaml +0 -0
  121. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/kube-prometheus-stack.yaml +0 -0
  122. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/networking/dns_troubleshooting_instructions.md +0 -0
  123. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/runbooks/upgrade/upgrade_troubleshooting_instructions.md +0 -0
  124. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/github/__init__.py +0 -0
  125. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/jira/__init__.py +0 -0
  126. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/opsgenie/__init__.py +0 -0
  127. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/pagerduty/__init__.py +0 -0
  128. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/prometheus/__init__.py +0 -0
  129. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/prometheus/models.py +0 -0
  130. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/sources/prometheus/plugin.py +0 -0
  131. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/aks-node-health.yaml +0 -0
  132. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/aks.yaml +0 -0
  133. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/argocd.yaml +0 -0
  134. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/atlas_mongodb/instructions.jinja2 +0 -0
  135. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/aws.yaml +0 -0
  136. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/__init__.py +0 -0
  137. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +0 -0
  138. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +0 -0
  139. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +0 -0
  140. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +0 -0
  141. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +0 -0
  142. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/azure_base_toolset.py +0 -0
  143. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/azure_sql_instructions.jinja2 +0 -0
  144. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +0 -0
  145. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/install.md +0 -0
  146. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/tools/__init__.py +0 -0
  147. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/azure_sql/utils.py +0 -0
  148. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/__init__.py +0 -0
  149. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/argocd/__init__.py +0 -0
  150. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/argocd/constants.py +0 -0
  151. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/aws/__init__.py +0 -0
  152. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/aws/constants.py +0 -0
  153. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/azure/__init__.py +0 -0
  154. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/azure/constants.py +0 -0
  155. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/bash_instructions.jinja2 +0 -0
  156. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/common/bash.py +0 -0
  157. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/common/bash_command.py +0 -0
  158. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/common/config.py +0 -0
  159. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/common/stringify.py +0 -0
  160. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/common/validators.py +0 -0
  161. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/docker/__init__.py +0 -0
  162. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/docker/constants.py +0 -0
  163. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/helm/__init__.py +0 -0
  164. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/helm/constants.py +0 -0
  165. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/__init__.py +0 -0
  166. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/constants.py +0 -0
  167. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/kubectl_describe.py +0 -0
  168. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/kubectl_events.py +0 -0
  169. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +0 -0
  170. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/kubectl_logs.py +0 -0
  171. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/kubectl_run.py +0 -0
  172. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/kubectl/kubectl_top.py +0 -0
  173. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/parse_command.py +0 -0
  174. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/__init__.py +0 -0
  175. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/base64_util.py +0 -0
  176. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/cut.py +0 -0
  177. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/grep/__init__.py +0 -0
  178. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/head.py +0 -0
  179. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/jq.py +0 -0
  180. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/sed.py +0 -0
  181. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/sort.py +0 -0
  182. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/tail.py +0 -0
  183. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/tr.py +0 -0
  184. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/uniq.py +0 -0
  185. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/bash/utilities/wc.py +0 -0
  186. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/confluence.yaml +0 -0
  187. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/consts.py +0 -0
  188. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/coralogix/api.py +0 -0
  189. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +0 -0
  190. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/coralogix/utils.py +0 -0
  191. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/datadog_api.py +0 -0
  192. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/datadog_general_instructions.jinja2 +0 -0
  193. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +0 -0
  194. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +0 -0
  195. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/datadog_rds_instructions.jinja2 +0 -0
  196. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/datadog_traces_formatter.py +0 -0
  197. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/datadog/instructions_datadog_traces.jinja2 +0 -0
  198. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/docker.yaml +0 -0
  199. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/__init__.py +0 -0
  200. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/base_grafana_toolset.py +0 -0
  201. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/common.py +0 -0
  202. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/grafana_api.py +0 -0
  203. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/loki_api.py +0 -0
  204. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +0 -0
  205. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +0 -0
  206. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/grafana/trace_parser.py +0 -0
  207. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/helm.yaml +0 -0
  208. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/investigator/__init__.py +0 -0
  209. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/investigator/investigator_instructions.jinja2 +0 -0
  210. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/investigator/model.py +0 -0
  211. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/kubernetes.yaml +0 -0
  212. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/kubernetes_logs.py +0 -0
  213. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/kubernetes_logs.yaml +0 -0
  214. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/logging_utils/__init__.py +0 -0
  215. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/logging_utils/types.py +0 -0
  216. {holmesgpt-0.14.2/holmes/plugins/toolsets/opensearch → holmesgpt-0.14.3a0/holmes/plugins/toolsets/newrelic}/__init__.py +0 -0
  217. {holmesgpt-0.14.2/holmes/plugins/toolsets/robusta → holmesgpt-0.14.3a0/holmes/plugins/toolsets/opensearch}/__init__.py +0 -0
  218. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/opensearch/opensearch_logs.py +0 -0
  219. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +0 -0
  220. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/opensearch/opensearch_utils.py +0 -0
  221. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +0 -0
  222. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/prometheus/utils.py +0 -0
  223. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/rabbitmq/api.py +0 -0
  224. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/rabbitmq/rabbitmq_instructions.jinja2 +0 -0
  225. {holmesgpt-0.14.2/holmes/plugins/toolsets/runbook → holmesgpt-0.14.3a0/holmes/plugins/toolsets/robusta}/__init__.py +0 -0
  226. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +0 -0
  227. {holmesgpt-0.14.2/holmes/utils → holmesgpt-0.14.3a0/holmes/plugins/toolsets/runbook}/__init__.py +0 -0
  228. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/service_discovery.py +0 -0
  229. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/servicenow/install.md +0 -0
  230. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -0
  231. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/slab.yaml +0 -0
  232. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/toolsets/utils.py +0 -0
  233. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/plugins/utils.py +0 -0
  234. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/cache.py +0 -0
  235. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/cert_utils.py +0 -0
  236. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/colors.py +0 -0
  237. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/config_utils.py +0 -0
  238. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/console/consts.py +0 -0
  239. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/console/logging.py +0 -0
  240. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/console/result.py +0 -0
  241. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/default_toolset_installation_guide.jinja2 +0 -0
  242. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/definitions.py +0 -0
  243. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/env.py +0 -0
  244. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/file_utils.py +0 -0
  245. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/global_instructions.py +0 -0
  246. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/holmes_status.py +0 -0
  247. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/holmes_sync_toolsets.py +0 -0
  248. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/keygen_utils.py +0 -0
  249. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/llms.py +0 -0
  250. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/markdown_utils.py +0 -0
  251. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/pydantic_utils.py +0 -0
  252. {holmesgpt-0.14.2 → holmesgpt-0.14.3a0}/holmes/utils/tags.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: holmesgpt
3
- Version: 0.14.2
3
+ Version: 0.14.3a0
4
4
  Summary:
5
5
  Author: Natan Yellin
6
6
  Author-email: natan@robusta.dev
@@ -23,6 +23,7 @@ Requires-Dist: certifi (>=2024.7.4,<2025.0.0)
23
23
  Requires-Dist: colorlog (>=6.8.2,<7.0.0)
24
24
  Requires-Dist: confluent-kafka (>=2.6.1,<3.0.0)
25
25
  Requires-Dist: fastapi (>=0.116,<0.117)
26
+ Requires-Dist: httpx[socks] (<0.28)
26
27
  Requires-Dist: humanize (>=4.9.0,<5.0.0)
27
28
  Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
28
29
  Requires-Dist: kubernetes (>=32.0.1,<33.0.0)
@@ -245,14 +246,11 @@ Distributed under the MIT License. See [LICENSE.txt](https://github.com/robusta-
245
246
 
246
247
  ## Community
247
248
 
248
- Join our community meetings to discuss the HolmesGPT roadmap and share feedback:
249
+ Join our community to discuss the HolmesGPT roadmap and share feedback:
249
250
 
250
- 📅 **First Community Meeting:** Thursday, August 21, 2025
251
- - **Time:** 8:00-9:00 AM PT / 11:00 AM-12:00 PM ET / 8:30-9:30 PM IST
252
- - **Where:** [Google Meet](https://meet.google.com/jxc-ujyf-xwy)
253
- - **Agenda:** [Roadmap discussion](https://github.com/orgs/robusta-dev/projects/2), community feedback, and Q&A
254
-
255
- [📝 Meeting Notes](https://docs.google.com/document/d/1sIHCcTivyzrF5XNvos7ZT_UcxEOqgwfawsTbb9wMJe4/edit?tab=t.0) | [📋 Full Details](https://holmesgpt.dev/community/)
251
+ 📹 **First Community Meetup Recording:** [Watch on YouTube](https://youtu.be/slQRc6nlFQU)
252
+ - **Topics:** Roadmap discussion, community feedback, and Q&A
253
+ - **Resources:** [📝 Meeting Notes](https://docs.google.com/document/d/1sIHCcTivyzrF5XNvos7ZT_UcxEOqgwfawsTbb9wMJe4/edit?tab=t.0) | [📋 Community Page](https://holmesgpt.dev/community/)
256
254
 
257
255
  ## Support
258
256
 
@@ -193,14 +193,11 @@ Distributed under the MIT License. See [LICENSE.txt](https://github.com/robusta-
193
193
 
194
194
  ## Community
195
195
 
196
- Join our community meetings to discuss the HolmesGPT roadmap and share feedback:
196
+ Join our community to discuss the HolmesGPT roadmap and share feedback:
197
197
 
198
- 📅 **First Community Meeting:** Thursday, August 21, 2025
199
- - **Time:** 8:00-9:00 AM PT / 11:00 AM-12:00 PM ET / 8:30-9:30 PM IST
200
- - **Where:** [Google Meet](https://meet.google.com/jxc-ujyf-xwy)
201
- - **Agenda:** [Roadmap discussion](https://github.com/orgs/robusta-dev/projects/2), community feedback, and Q&A
202
-
203
- [📝 Meeting Notes](https://docs.google.com/document/d/1sIHCcTivyzrF5XNvos7ZT_UcxEOqgwfawsTbb9wMJe4/edit?tab=t.0) | [📋 Full Details](https://holmesgpt.dev/community/)
198
+ 📹 **First Community Meetup Recording:** [Watch on YouTube](https://youtu.be/slQRc6nlFQU)
199
+ - **Topics:** Roadmap discussion, community feedback, and Q&A
200
+ - **Resources:** [📝 Meeting Notes](https://docs.google.com/document/d/1sIHCcTivyzrF5XNvos7ZT_UcxEOqgwfawsTbb9wMJe4/edit?tab=t.0) | [📋 Community Page](https://holmesgpt.dev/community/)
204
201
 
205
202
  ## Support
206
203
 
@@ -1,5 +1,5 @@
1
1
  # This is patched by github actions during release
2
- __version__ = "0.14.2"
2
+ __version__ = "0.14.3-alpha"
3
3
 
4
4
  # Re-export version functions from version module for backward compatibility
5
5
  from .version import (
@@ -81,3 +81,9 @@ MAX_GRAPH_POINTS = float(os.environ.get("MAX_GRAPH_POINTS", 100))
81
81
  TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT = float(
82
82
  os.environ.get("TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT", 15)
83
83
  )
84
+
85
+ MAX_EVIDENCE_DATA_CHARACTERS_BEFORE_TRUNCATION = int(
86
+ os.environ.get("MAX_EVIDENCE_DATA_CHARACTERS_BEFORE_TRUNCATION", 3000)
87
+ )
88
+
89
+ DISABLE_PROMETHEUS_TOOLSET = load_bool("DISABLE_PROMETHEUS_TOOLSET", False)
@@ -9,11 +9,8 @@ import sentry_sdk
9
9
  import yaml # type: ignore
10
10
  from pydantic import BaseModel, ConfigDict, FilePath, PrivateAttr, SecretStr
11
11
 
12
-
12
+ from holmes.common.env_vars import ROBUSTA_CONFIG_PATH
13
13
  from holmes.core.llm import DefaultLLM, LLMModelRegistry
14
- from holmes.common.env_vars import (
15
- ROBUSTA_CONFIG_PATH,
16
- )
17
14
  from holmes.core.tools_utils.tool_executor import ToolExecutor
18
15
  from holmes.core.toolset_manager import ToolsetManager
19
16
  from holmes.plugins.runbooks import (
@@ -33,8 +30,8 @@ if TYPE_CHECKING:
33
30
  from holmes.plugins.sources.pagerduty import PagerDutySource
34
31
  from holmes.plugins.sources.prometheus.plugin import AlertManagerSource
35
32
 
36
- from holmes.core.supabase_dal import SupabaseDal
37
33
  from holmes.core.config import config_path_dir
34
+ from holmes.core.supabase_dal import SupabaseDal
38
35
  from holmes.utils.definitions import RobustaConfig
39
36
  from holmes.utils.pydantic_utils import RobustaBaseConfig, load_model_from_file
40
37
 
@@ -129,7 +126,7 @@ class Config(RobustaBaseConfig):
129
126
  return self._llm_model_registry
130
127
 
131
128
  def log_useful_info(self):
132
- if self.llm_model_registry and self.llm_model_registry.models:
129
+ if self.llm_model_registry.models:
133
130
  logging.info(
134
131
  f"Loaded models: {list(self.llm_model_registry.models.keys())}"
135
132
  )
@@ -262,7 +262,10 @@ def build_issue_chat_messages(
262
262
 
263
263
 
264
264
  def add_or_update_system_prompt(
265
- conversation_history: List[Dict[str, str]], ai: ToolCallingLLM, config: Config
265
+ conversation_history: List[Dict[str, str]],
266
+ ai: ToolCallingLLM,
267
+ config: Config,
268
+ additional_system_prompt: Optional[str] = None,
266
269
  ):
267
270
  """Either add the system prompt or replace an existing system prompt.
268
271
  As a 'defensive' measure, this code will only replace an existing system prompt if it is the
@@ -278,6 +281,9 @@ def add_or_update_system_prompt(
278
281
 
279
282
  system_prompt = load_and_render_prompt(template_path, context)
280
283
 
284
+ if additional_system_prompt:
285
+ system_prompt = system_prompt + "\n" + additional_system_prompt
286
+
281
287
  if not conversation_history or len(conversation_history) == 0:
282
288
  conversation_history.append({"role": "system", "content": system_prompt})
283
289
  elif conversation_history[0]["role"] == "system":
@@ -303,6 +309,7 @@ def build_chat_messages(
303
309
  ai: ToolCallingLLM,
304
310
  config: Config,
305
311
  global_instructions: Optional[Instructions] = None,
312
+ additional_system_prompt: Optional[str] = None,
306
313
  ) -> List[dict]:
307
314
  """
308
315
  This function generates a list of messages for general chat conversation and ensures that the message sequence adheres to the model's context window limitations
@@ -358,7 +365,10 @@ def build_chat_messages(
358
365
  conversation_history = conversation_history.copy()
359
366
 
360
367
  conversation_history = add_or_update_system_prompt(
361
- conversation_history=conversation_history, ai=ai, config=config
368
+ conversation_history=conversation_history,
369
+ ai=ai,
370
+ config=config,
371
+ additional_system_prompt=additional_system_prompt,
362
372
  )
363
373
 
364
374
  ask = add_global_instructions_to_user_prompt(ask, global_instructions)
@@ -0,0 +1,191 @@
1
+ import os
2
+ from abc import ABC, abstractmethod
3
+ from typing import Callable, Optional
4
+
5
+ from .llm import LLM
6
+
7
+ DEFAULT_PRIVACY_NOTICE_BANNER = "Your feedback will be used to improve Holmesgpt's performance. Please avoid sharing sensitive personal information. By continuing, you consent to this data usage."
8
+ PRIVACY_NOTICE_BANNER = os.environ.get(
9
+ "PRIVACY_NOTICE_BANNER", DEFAULT_PRIVACY_NOTICE_BANNER
10
+ )
11
+
12
+
13
+ class FeedbackInfoBase(ABC):
14
+ """Abstract base class for all feedback-related classes that must implement to_dict()."""
15
+
16
+ @abstractmethod
17
+ def to_dict(self) -> dict:
18
+ """Convert to dictionary representation. Must be implemented by all subclasses."""
19
+ pass
20
+
21
+
22
+ class FeedbackLLM(FeedbackInfoBase):
23
+ """Class to represent a LLM in the feedback."""
24
+
25
+ def __init__(self, model: str, max_context_size: int):
26
+ self.model = model
27
+ self.max_context_size = max_context_size
28
+
29
+ def update_from_llm(self, llm: LLM):
30
+ self.model = llm.model
31
+ self.max_context_size = llm.get_context_window_size()
32
+
33
+ def to_dict(self) -> dict:
34
+ """Convert to dictionary representation."""
35
+ return self.__dict__
36
+
37
+
38
+ # TODO: extend the FeedbackLLMResponse to include each tool call results details used for evaluate the overall response.
39
+ # Currenlty tool call details in plan:
40
+ # - toolcall parameter and success/failure, toolcall truncation size
41
+ # - Holmes plan (todo list)
42
+ # - Holmes intermediate output
43
+ class FeedbackLLMResponse(FeedbackInfoBase):
44
+ """Class to represent a LLM response in the feedback"""
45
+
46
+ def __init__(self, user_ask: str, response: str):
47
+ self.user_ask = user_ask
48
+ self.response = response
49
+
50
+ def to_dict(self) -> dict:
51
+ """Convert to dictionary representation."""
52
+ return self.__dict__
53
+
54
+
55
+ class FeedbackMetadata(FeedbackInfoBase):
56
+ """Class to store feedback metadata."""
57
+
58
+ def __init__(self):
59
+ # In iteration mode, there can be multiple ask and response pairs.
60
+ self.llm_responses = []
61
+ self.llm = FeedbackLLM("", 0)
62
+
63
+ def add_llm_response(self, user_ask: str, response: str) -> None:
64
+ """Add a LLM response to the metadata."""
65
+ llm_response = FeedbackLLMResponse(user_ask, response)
66
+ self.llm_responses.append(llm_response)
67
+
68
+ def update_llm(self, llm: LLM) -> None:
69
+ """Update the LLM information in the metadata."""
70
+ self.llm.update_from_llm(llm)
71
+
72
+ def to_dict(self) -> dict:
73
+ """Convert to dictionary representation."""
74
+ return {
75
+ "llm_responses": [resp.to_dict() for resp in self.llm_responses],
76
+ "llm": self.llm.to_dict(),
77
+ }
78
+
79
+
80
+ class UserFeedback(FeedbackInfoBase):
81
+ """Class to store user rate and comment to the AI response."""
82
+
83
+ def __init__(self, is_positive: bool, comment: Optional[str]):
84
+ self.is_positive = is_positive
85
+ self.comment = comment
86
+
87
+ @property
88
+ def rating_text(self) -> str:
89
+ """Return human-readable rating text."""
90
+ return "useful" if self.is_positive else "not useful"
91
+
92
+ @property
93
+ def rating_emoji(self) -> str:
94
+ """Return emoji representation of the rating."""
95
+ return "👍" if self.is_positive else "👎"
96
+
97
+ def __str__(self) -> str:
98
+ """Return string representation of the feedback."""
99
+ if self.comment:
100
+ return f"Rating: {self.rating_text}. Comment: {self.comment}"
101
+ else:
102
+ return f"Rating: {self.rating_text}. No additional comment."
103
+
104
+ def to_dict(self) -> dict:
105
+ """Convert to dictionary representation."""
106
+ return {
107
+ "is_positive": self.is_positive,
108
+ "comment": self.comment,
109
+ }
110
+
111
+
112
+ class Feedback(FeedbackInfoBase):
113
+ """Class to store overall feedback data used to evaluate the AI response."""
114
+
115
+ def __init__(self):
116
+ self.metadata = FeedbackMetadata()
117
+ self.user_feedback: Optional[UserFeedback] = None
118
+
119
+ def set_user_feedback(self, user_feedback: UserFeedback) -> None:
120
+ """Set the user feedback."""
121
+ self.user_feedback = user_feedback
122
+
123
+ def to_dict(self) -> dict:
124
+ """Convert to dictionary representation."""
125
+ return {
126
+ "metadata": self.metadata.to_dict(),
127
+ "user_feedback": self.user_feedback.to_dict()
128
+ if self.user_feedback
129
+ else None,
130
+ }
131
+
132
+
133
+ FeedbackCallback = Callable[[Feedback], None]
134
+
135
+
136
+ def feedback_callback_example(feedback: Feedback) -> None:
137
+ """
138
+ Example implementation of a feedback callback function.
139
+
140
+ This function demonstrates how to process feedback data using to_dict() methods
141
+ and could be used for:
142
+ - Logging feedback to files or databases
143
+ - Sending feedback to analytics services
144
+ - Training data collection
145
+ - User satisfaction monitoring
146
+
147
+ Args:
148
+ feedback: Feedback object containing user feedback and metadata
149
+ """
150
+ print("\n=== Feedback Received ===")
151
+
152
+ # Convert entire feedback to dict first - this is the main data structure
153
+ feedback_dict = feedback.to_dict()
154
+ print(f"Complete feedback dictionary keys: {list(feedback_dict.keys())}")
155
+
156
+ # How to check user feedback using to_dict()
157
+ print("\n1. Checking User Feedback:")
158
+ user_feedback_dict = (
159
+ feedback.user_feedback.to_dict() if feedback.user_feedback else None
160
+ )
161
+ if user_feedback_dict:
162
+ print(f" User feedback dict: {user_feedback_dict}")
163
+ print(f" Is positive: {user_feedback_dict['is_positive']}")
164
+ print(f" Comment: {user_feedback_dict['comment'] or 'None'}")
165
+ # You can also access properties through the object:
166
+ print(f" Rating emoji: {feedback.user_feedback.rating_emoji}") # type: ignore
167
+ print(f" Rating text: {feedback.user_feedback.rating_text}") # type: ignore
168
+ else:
169
+ print(" No user feedback provided (user_feedback is None)")
170
+
171
+ # How to check LLM information using to_dict()
172
+ print("\n2. Checking LLM Information:")
173
+ metadata_dict = feedback.metadata.to_dict()
174
+ llm_dict = metadata_dict["llm"]
175
+ print(f" LLM dict: {llm_dict}")
176
+ print(f" Model: {llm_dict['model']}")
177
+ print(f" Max context size: {llm_dict['max_context_size']}")
178
+
179
+ # How to check ask and response pairs using to_dict()
180
+ print("\n3. Checking Ask and Response History:")
181
+ llm_responses_dict = metadata_dict["llm_responses"]
182
+ print(f" Number of exchanges: {len(llm_responses_dict)}")
183
+
184
+ for i, response_dict in enumerate(llm_responses_dict, 1):
185
+ print(f" Exchange {i} dict: {list(response_dict.keys())}")
186
+ user_ask = response_dict["user_ask"]
187
+ ai_response = response_dict["response"]
188
+ print(f" User ask: {user_ask}")
189
+ print(f" AI response: {ai_response}")
190
+
191
+ print("=== End Feedback ===\n")
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  import logging
3
3
  from abc import abstractmethod
4
+ from math import floor
4
5
  from typing import Any, Dict, List, Optional, Type, Union, TYPE_CHECKING
5
6
 
6
7
  from litellm.types.utils import ModelResponse, TextCompletionResponse
@@ -292,6 +293,8 @@ class DefaultLLM(LLM):
292
293
  raise Exception(f"Unexpected type returned by the LLM {type(result)}")
293
294
 
294
295
  def get_maximum_output_token(self) -> int:
296
+ max_output_tokens = floor(min(64000, self.get_context_window_size() / 5))
297
+
295
298
  if OVERRIDE_MAX_OUTPUT_TOKEN:
296
299
  logging.debug(
297
300
  f"Using OVERRIDE_MAX_OUTPUT_TOKEN {OVERRIDE_MAX_OUTPUT_TOKEN}"
@@ -301,17 +304,22 @@ class DefaultLLM(LLM):
301
304
  # Try each name variant
302
305
  for name in self._get_model_name_variants_for_lookup():
303
306
  try:
304
- return litellm.model_cost[name]["max_output_tokens"]
307
+ litellm_max_output_tokens = litellm.model_cost[name][
308
+ "max_output_tokens"
309
+ ]
310
+ if litellm_max_output_tokens < max_output_tokens:
311
+ max_output_tokens = litellm_max_output_tokens
312
+ return max_output_tokens
305
313
  except Exception:
306
314
  continue
307
315
 
308
316
  # Log which lookups we tried
309
317
  logging.warning(
310
318
  f"Couldn't find model {self.model} in litellm's model list (tried: {', '.join(self._get_model_name_variants_for_lookup())}), "
311
- f"using default 4096 tokens for max_output_tokens. "
319
+ f"using {max_output_tokens} tokens for max_output_tokens. "
312
320
  f"To override, set OVERRIDE_MAX_OUTPUT_TOKEN environment variable to the correct value for your model."
313
321
  )
314
- return 4096
322
+ return max_output_tokens
315
323
 
316
324
  def _add_cache_control_to_last_message(
317
325
  self, messages: List[Dict[str, Any]]
@@ -349,7 +357,7 @@ class DefaultLLM(LLM):
349
357
  if content is None:
350
358
  return
351
359
 
352
- if isinstance(content, str):
360
+ if isinstance(content, str) and content:
353
361
  # Convert string to structured format with cache_control
354
362
  target_msg["content"] = [
355
363
  {
@@ -520,13 +528,14 @@ class LLMModelRegistry:
520
528
  def _create_robusta_model_entry(
521
529
  self, model_name: str, args: Optional[dict[str, Any]] = None
522
530
  ) -> dict[str, Any]:
523
- return self._create_model_entry(
531
+ entry = self._create_model_entry(
524
532
  model="gpt-4o", # Robusta AI model is using openai like API.
525
533
  model_name=model_name,
526
534
  base_url=f"{ROBUSTA_API_ENDPOINT}/llm/{model_name}",
527
535
  is_robusta_model=True,
528
- args=args or {},
529
536
  )
537
+ entry["custom_args"] = args or {} # type: ignore[assignment]
538
+ return entry
530
539
 
531
540
  def _create_model_entry(
532
541
  self,
@@ -534,18 +543,13 @@ class LLMModelRegistry:
534
543
  model_name: str,
535
544
  base_url: Optional[str] = None,
536
545
  is_robusta_model: Optional[bool] = None,
537
- args: Optional[dict[str, Any]] = None,
538
546
  ) -> dict[str, Any]:
539
- entry = {
547
+ return {
540
548
  "name": model_name,
541
549
  "base_url": base_url,
542
550
  "is_robusta_model": is_robusta_model,
543
551
  "model": model,
544
552
  }
545
- if args:
546
- entry["custom_args"] = args # type: ignore[assignment]
547
-
548
- return entry
549
553
 
550
554
 
551
555
  def get_llm_usage(
@@ -1,9 +1,87 @@
1
+ import json
1
2
  from holmes.core.investigation_structured_output import InputSectionsDataType
2
- from holmes.core.tool_calling_llm import ToolCallResult
3
3
  from typing import Optional, List, Dict, Any, Union
4
4
  from pydantic import BaseModel, model_validator, Field
5
5
  from enum import Enum
6
6
 
7
+ from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
8
+
9
+
10
+ class TruncationMetadata(BaseModel):
11
+ tool_call_id: str
12
+ start_index: int
13
+ end_index: int
14
+ tool_name: str
15
+ original_token_count: int
16
+
17
+
18
+ class TruncationResult(BaseModel):
19
+ truncated_messages: list[dict]
20
+ truncations: list[TruncationMetadata]
21
+
22
+
23
+ class ToolCallResult(BaseModel):
24
+ tool_call_id: str
25
+ tool_name: str
26
+ description: str
27
+ result: StructuredToolResult
28
+ size: Optional[int] = None
29
+
30
+ def as_tool_call_message(self):
31
+ return {
32
+ "tool_call_id": self.tool_call_id,
33
+ "role": "tool",
34
+ "name": self.tool_name,
35
+ "content": format_tool_result_data(self.result),
36
+ }
37
+
38
+ def as_tool_result_response(self):
39
+ result_dump = self.result.model_dump()
40
+ result_dump["data"] = self.result.get_stringified_data()
41
+
42
+ return {
43
+ "tool_call_id": self.tool_call_id,
44
+ "tool_name": self.tool_name,
45
+ "description": self.description,
46
+ "role": "tool",
47
+ "result": result_dump,
48
+ }
49
+
50
+ def as_streaming_tool_result_response(self):
51
+ result_dump = self.result.model_dump()
52
+ result_dump["data"] = self.result.get_stringified_data()
53
+
54
+ return {
55
+ "tool_call_id": self.tool_call_id,
56
+ "role": "tool",
57
+ "description": self.description,
58
+ "name": self.tool_name,
59
+ "result": result_dump,
60
+ }
61
+
62
+
63
+ def format_tool_result_data(tool_result: StructuredToolResult) -> str:
64
+ tool_response = tool_result.data
65
+ if isinstance(tool_result.data, str):
66
+ tool_response = tool_result.data
67
+ else:
68
+ try:
69
+ if isinstance(tool_result.data, BaseModel):
70
+ tool_response = tool_result.data.model_dump_json(indent=2)
71
+ else:
72
+ tool_response = json.dumps(tool_result.data, indent=2)
73
+ except Exception:
74
+ tool_response = str(tool_result.data)
75
+ if tool_result.status == StructuredToolResultStatus.ERROR:
76
+ tool_response = f"{tool_result.error or 'Tool execution failed'}:\n\n{tool_result.data or ''}".strip()
77
+
78
+ if tool_result.params:
79
+ tool_response = (
80
+ f"Params used for the tool call: {json.dumps(tool_result.params)}. The tool call output follows on the next line.\n"
81
+ + tool_response
82
+ )
83
+ return tool_response
84
+
7
85
 
8
86
  class InvestigationResult(BaseModel):
9
87
  analysis: Optional[str] = None
@@ -87,10 +165,31 @@ class ConversationRequest(BaseModel):
87
165
  include_tool_call_results: bool = False
88
166
 
89
167
 
168
+ class PendingToolApproval(BaseModel):
169
+ """Represents a tool call that requires user approval."""
170
+
171
+ tool_call_id: str
172
+ tool_name: str
173
+ description: str
174
+ params: Dict[str, Any]
175
+
176
+
177
+ class ToolApprovalDecision(BaseModel):
178
+ """Represents a user's decision on a tool approval."""
179
+
180
+ tool_call_id: str
181
+ approved: bool
182
+
183
+
90
184
  class ChatRequestBaseModel(BaseModel):
91
185
  conversation_history: Optional[list[dict]] = None
92
186
  model: Optional[str] = None
93
187
  stream: bool = Field(default=False)
188
+ enable_tool_approval: Optional[bool] = (
189
+ False # Optional boolean for backwards compatibility
190
+ )
191
+ tool_decisions: Optional[List[ToolApprovalDecision]] = None
192
+ additional_system_prompt: Optional[str] = None
94
193
 
95
194
  # In our setup with litellm, the first message in conversation_history
96
195
  # should follow the structure [{"role": "system", "content": ...}],
@@ -146,6 +245,7 @@ class ChatResponse(BaseModel):
146
245
  conversation_history: list[dict]
147
246
  tool_calls: Optional[List[ToolCallResult]] = []
148
247
  follow_up_actions: Optional[List[FollowUpAction]] = []
248
+ pending_approvals: Optional[List[PendingToolApproval]] = None
149
249
  metadata: Optional[Dict[Any, Any]] = None
150
250
 
151
251
 
@@ -1,5 +1,6 @@
1
1
  import base64
2
2
  import binascii
3
+ import gzip
3
4
  import json
4
5
  import logging
5
6
  import os
@@ -7,7 +8,6 @@ import threading
7
8
  from datetime import datetime, timedelta
8
9
  from typing import Dict, List, Optional, Tuple
9
10
  from uuid import uuid4
10
- import gzip
11
11
 
12
12
  import yaml # type: ignore
13
13
  from cachetools import TTLCache # type: ignore
@@ -30,6 +30,9 @@ from holmes.core.resource_instruction import (
30
30
  ResourceInstructionDocument,
31
31
  ResourceInstructions,
32
32
  )
33
+ from holmes.core.truncation.dal_truncation_utils import (
34
+ truncate_evidences_entities_if_necessary,
35
+ )
33
36
  from holmes.utils.definitions import RobustaConfig
34
37
  from holmes.utils.env import get_env_replacement
35
38
  from holmes.utils.global_instructions import Instructions
@@ -46,6 +49,9 @@ HOLMES_TOOLSET = "HolmesToolsStatus"
46
49
  SCANS_META_TABLE = "ScansMeta"
47
50
  SCANS_RESULTS_TABLE = "ScansResults"
48
51
 
52
+ ENRICHMENT_BLACKLIST = ["text_file", "graph", "ai_analysis", "holmes"]
53
+ ENRICHMENT_BLACKLIST_SET = set(ENRICHMENT_BLACKLIST)
54
+
49
55
 
50
56
  class RobustaToken(BaseModel):
51
57
  store_url: str
@@ -60,7 +66,7 @@ class SupabaseDal:
60
66
  self.enabled = self.__init_config()
61
67
  self.cluster = cluster
62
68
  if not self.enabled:
63
- logging.info(
69
+ logging.debug(
64
70
  "Not connecting to Robusta platform - robusta token not provided - using ROBUSTA_AI will not be possible"
65
71
  )
66
72
  return
@@ -118,7 +124,7 @@ class SupabaseDal:
118
124
  )
119
125
 
120
126
  if not os.path.exists(config_file_path):
121
- logging.info(f"No robusta config in {config_file_path}")
127
+ logging.debug(f"No robusta config in {config_file_path}")
122
128
  return None
123
129
 
124
130
  logging.info(f"loading config {config_file_path}")
@@ -262,11 +268,14 @@ class SupabaseDal:
262
268
  .select("*")
263
269
  .eq("account_id", self.account_id)
264
270
  .in_("issue_id", changes_ids)
271
+ .not_.in_("enrichment_type", ENRICHMENT_BLACKLIST)
265
272
  .execute()
266
273
  )
267
274
  if not len(change_data_response.data):
268
275
  return None
269
276
 
277
+ truncate_evidences_entities_if_necessary(change_data_response.data)
278
+
270
279
  except Exception:
271
280
  logging.exception("Supabase error while retrieving change content")
272
281
  return None
@@ -323,17 +332,17 @@ class SupabaseDal:
323
332
  return data
324
333
 
325
334
  def extract_relevant_issues(self, evidence):
326
- enrichment_blacklist = {"text_file", "graph", "ai_analysis", "holmes"}
327
335
  data = [
328
336
  enrich
329
337
  for enrich in evidence.data
330
- if enrich.get("enrichment_type") not in enrichment_blacklist
338
+ if enrich.get("enrichment_type") not in ENRICHMENT_BLACKLIST_SET
331
339
  ]
332
340
 
333
341
  unzipped_files = [
334
342
  self.unzip_evidence_file(enrich)
335
343
  for enrich in evidence.data
336
344
  if enrich.get("enrichment_type") == "text_file"
345
+ or enrich.get("enrichment_type") == "alert_raw_data"
337
346
  ]
338
347
 
339
348
  data.extend(unzipped_files)
@@ -370,12 +379,14 @@ class SupabaseDal:
370
379
  evidence = (
371
380
  self.client.table(EVIDENCE_TABLE)
372
381
  .select("*")
373
- .filter("issue_id", "eq", issue_id)
382
+ .eq("issue_id", issue_id)
383
+ .not_.in_("enrichment_type", ENRICHMENT_BLACKLIST)
374
384
  .execute()
375
385
  )
376
- data = self.extract_relevant_issues(evidence)
386
+ relevant_evidence = self.extract_relevant_issues(evidence)
387
+ truncate_evidences_entities_if_necessary(relevant_evidence)
377
388
 
378
- issue_data["evidence"] = data
389
+ issue_data["evidence"] = relevant_evidence
379
390
 
380
391
  # build issue investigation dates
381
392
  started_at = issue_data.get("starts_at")
@@ -518,10 +529,13 @@ class SupabaseDal:
518
529
  self.client.table(EVIDENCE_TABLE)
519
530
  .select("data, enrichment_type")
520
531
  .in_("issue_id", unique_issues)
532
+ .not_.in_("enrichment_type", ENRICHMENT_BLACKLIST)
521
533
  .execute()
522
534
  )
523
535
 
524
- return self.extract_relevant_issues(res)
536
+ relevant_issues = self.extract_relevant_issues(res)
537
+ truncate_evidences_entities_if_necessary(relevant_issues)
538
+ return relevant_issues
525
539
 
526
540
  except Exception:
527
541
  logging.exception("failed to fetch workload issues data", exc_info=True)