unique_toolkit 0.8.46__tar.gz → 0.8.47__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/CHANGELOG.md +4 -0
  2. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/PKG-INFO +5 -1
  3. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/pyproject.toml +1 -1
  4. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/hallucination/constants.py +3 -2
  5. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/history_manager/history_manager.py +1 -1
  6. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/functions.py +1 -2
  7. unique_toolkit-0.8.46/unique_toolkit/evaluators/__init__.py +0 -1
  8. unique_toolkit-0.8.46/unique_toolkit/evaluators/config.py +0 -26
  9. unique_toolkit-0.8.46/unique_toolkit/evaluators/constants.py +0 -1
  10. unique_toolkit-0.8.46/unique_toolkit/evaluators/context_relevancy/constants.py +0 -34
  11. unique_toolkit-0.8.46/unique_toolkit/evaluators/context_relevancy/prompts.py +0 -31
  12. unique_toolkit-0.8.46/unique_toolkit/evaluators/context_relevancy/service.py +0 -53
  13. unique_toolkit-0.8.46/unique_toolkit/evaluators/context_relevancy/utils.py +0 -156
  14. unique_toolkit-0.8.46/unique_toolkit/evaluators/exception.py +0 -5
  15. unique_toolkit-0.8.46/unique_toolkit/evaluators/hallucination/constants.py +0 -41
  16. unique_toolkit-0.8.46/unique_toolkit/evaluators/hallucination/prompts.py +0 -79
  17. unique_toolkit-0.8.46/unique_toolkit/evaluators/hallucination/service.py +0 -58
  18. unique_toolkit-0.8.46/unique_toolkit/evaluators/hallucination/utils.py +0 -212
  19. unique_toolkit-0.8.46/unique_toolkit/evaluators/output_parser.py +0 -30
  20. unique_toolkit-0.8.46/unique_toolkit/evaluators/schemas.py +0 -82
  21. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/LICENSE +0 -0
  22. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/README.md +0 -0
  23. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/__init__.py +0 -0
  24. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/_base_service.py +0 -0
  25. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/_time_utils.py +0 -0
  26. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/base_model_type_attribute.py +0 -0
  27. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/chunk_relevancy_sorter/config.py +0 -0
  28. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/chunk_relevancy_sorter/exception.py +0 -0
  29. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +0 -0
  30. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/chunk_relevancy_sorter/service.py +0 -0
  31. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +0 -0
  32. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/default_language_model.py +0 -0
  33. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/endpoint_builder.py +0 -0
  34. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/endpoint_requestor.py +0 -0
  35. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/exception.py +0 -0
  36. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/feature_flags/schema.py +0 -0
  37. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/pydantic_helpers.py +0 -0
  38. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/token/image_token_counting.py +0 -0
  39. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/token/token_counting.py +0 -0
  40. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/utils/structured_output/schema.py +0 -0
  41. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/validate_required_values.py +0 -0
  42. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/_common/validators.py +0 -0
  43. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/__init__.py +0 -0
  44. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/dev_util.py +0 -0
  45. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/init_logging.py +0 -0
  46. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/init_sdk.py +0 -0
  47. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/performance/async_tasks.py +0 -0
  48. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/performance/async_wrapper.py +0 -0
  49. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/schemas.py +0 -0
  50. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/unique_settings.py +0 -0
  51. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/app/verification.py +0 -0
  52. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/__init__.py +0 -0
  53. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/constants.py +0 -0
  54. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/functions.py +0 -0
  55. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/schemas.py +0 -0
  56. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/service.py +0 -0
  57. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/state.py +0 -0
  58. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/chat/utils.py +0 -0
  59. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/content/__init__.py +0 -0
  60. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/content/constants.py +0 -0
  61. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/content/functions.py +0 -0
  62. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/content/schemas.py +0 -0
  63. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/content/service.py +0 -0
  64. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/content/utils.py +0 -0
  65. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/debug_info_manager/debug_info_manager.py +0 -0
  66. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/embedding/__init__.py +0 -0
  67. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/embedding/constants.py +0 -0
  68. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/embedding/functions.py +0 -0
  69. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/embedding/schemas.py +0 -0
  70. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/embedding/service.py +0 -0
  71. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/embedding/utils.py +0 -0
  72. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/config.py +0 -0
  73. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/context_relevancy/prompts.py +0 -0
  74. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/context_relevancy/schema.py +0 -0
  75. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/context_relevancy/service.py +0 -0
  76. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/evaluation_manager.py +0 -0
  77. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/exception.py +0 -0
  78. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/hallucination/hallucination_evaluation.py +0 -0
  79. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/hallucination/prompts.py +0 -0
  80. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/hallucination/service.py +0 -0
  81. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/hallucination/utils.py +0 -0
  82. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/output_parser.py +0 -0
  83. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/schemas.py +0 -0
  84. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/tests/test_context_relevancy_service.py +0 -0
  85. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/evals/tests/test_output_parser.py +0 -0
  86. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/__init__.py +0 -0
  87. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/langchain/client.py +0 -0
  88. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/langchain/history.py +0 -0
  89. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/openai/__init__.py +0 -0
  90. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/openai/client.py +0 -0
  91. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/openai/message_builder.py +0 -0
  92. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/framework_utilities/utils.py +0 -0
  93. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/history_manager/history_construction_with_contents.py +0 -0
  94. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/history_manager/loop_token_reducer.py +0 -0
  95. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/history_manager/utils.py +0 -0
  96. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/__init__.py +0 -0
  97. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/builder.py +0 -0
  98. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/constants.py +0 -0
  99. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/infos.py +0 -0
  100. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/prompt.py +0 -0
  101. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/reference.py +0 -0
  102. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/schemas.py +0 -0
  103. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/service.py +0 -0
  104. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/language_model/utils.py +0 -0
  105. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/postprocessor/postprocessor_manager.py +0 -0
  106. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/protocols/support.py +0 -0
  107. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/reference_manager/reference_manager.py +0 -0
  108. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/short_term_memory/__init__.py +0 -0
  109. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/short_term_memory/constants.py +0 -0
  110. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/short_term_memory/functions.py +0 -0
  111. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/short_term_memory/persistent_short_term_memory_manager.py +0 -0
  112. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/short_term_memory/schemas.py +0 -0
  113. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/short_term_memory/service.py +0 -0
  114. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/smart_rules/__init__.py +0 -0
  115. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/smart_rules/compile.py +0 -0
  116. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/thinking_manager/thinking_manager.py +0 -0
  117. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/a2a/__init__.py +0 -0
  118. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/a2a/config.py +0 -0
  119. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/a2a/manager.py +0 -0
  120. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/a2a/memory.py +0 -0
  121. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/a2a/schema.py +0 -0
  122. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/a2a/service.py +0 -0
  123. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/agent_chunks_hanlder.py +0 -0
  124. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/config.py +0 -0
  125. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/factory.py +0 -0
  126. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/mcp/__init__.py +0 -0
  127. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/mcp/manager.py +0 -0
  128. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/mcp/models.py +0 -0
  129. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/mcp/tool_wrapper.py +0 -0
  130. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/schemas.py +0 -0
  131. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/test/test_mcp_manager.py +0 -0
  132. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/test/test_tool_progress_reporter.py +0 -0
  133. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/tool.py +0 -0
  134. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/tool_manager.py +0 -0
  135. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/tool_progress_reporter.py +0 -0
  136. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/utils/execution/execution.py +0 -0
  137. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/utils/source_handling/schema.py +0 -0
  138. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/utils/source_handling/source_formatting.py +0 -0
  139. {unique_toolkit-0.8.46 → unique_toolkit-0.8.47}/unique_toolkit/tools/utils/source_handling/tests/test_source_formatting.py +0 -0
@@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.8.47] - 2025-09-05
9
+ - Removed old code
10
+ - Fixed small bugs in history manager & set the hallucination to use gpt4o as default.
11
+
8
12
  ## [0.8.46] - 2025-09-04
9
13
  - Bugfix for hostname identification inside Unique cluster in `unique_settings.py`
10
14
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.8.46
3
+ Version: 0.8.47
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Cedric Klinkert
@@ -117,6 +117,10 @@ All notable changes to this project will be documented in this file.
117
117
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
118
118
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
119
119
 
120
+ ## [0.8.47] - 2025-09-05
121
+ - Removed old code
122
+ - Fixed small bugs in history manager & set the hallucination to use gpt4o as default.
123
+
120
124
  ## [0.8.46] - 2025-09-04
121
125
  - Bugfix for hostname identification inside Unique cluster in `unique_settings.py`
122
126
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "unique_toolkit"
3
- version = "0.8.46"
3
+ version = "0.8.47"
4
4
  description = ""
5
5
  authors = [
6
6
  "Cedric Klinkert <cedric.klinkert@unique.ch>",
@@ -2,6 +2,7 @@ from typing import Any
2
2
 
3
3
  from pydantic import Field
4
4
 
5
+ from unique_toolkit._common.default_language_model import DEFAULT_GPT_4o
5
6
  from unique_toolkit._common.validators import LMI
6
7
  from unique_toolkit.evals.config import EvaluationMetricConfig
7
8
  from unique_toolkit.evals.hallucination.prompts import (
@@ -14,7 +15,7 @@ from unique_toolkit.evals.schemas import (
14
15
  EvaluationMetricInputFieldName,
15
16
  EvaluationMetricName,
16
17
  )
17
- from unique_toolkit.language_model.infos import LanguageModelInfo, LanguageModelName
18
+ from unique_toolkit.language_model.infos import LanguageModelInfo
18
19
 
19
20
  SYSTEM_MSG_KEY = "systemPrompt"
20
21
  USER_MSG_KEY = "userPrompt"
@@ -26,7 +27,7 @@ class HallucinationConfig(EvaluationMetricConfig):
26
27
  enabled: bool = False
27
28
  name: EvaluationMetricName = EvaluationMetricName.HALLUCINATION
28
29
  language_model: LMI = LanguageModelInfo.from_name(
29
- LanguageModelName.AZURE_GPT_35_TURBO_0125,
30
+ DEFAULT_GPT_4o,
30
31
  )
31
32
  additional_llm_options: dict[str, Any] = Field(
32
33
  default={},
@@ -36,7 +36,7 @@ class UploadedContentConfig(BaseModel):
36
36
  percent_for_uploaded_content: float = Field(
37
37
  default=0.6,
38
38
  ge=0.0,
39
- lt=1.0,
39
+ le=1.0,
40
40
  description="The fraction of the max input tokens that will be reserved for the uploaded content.",
41
41
  )
42
42
 
@@ -10,7 +10,6 @@ from pydantic import BaseModel
10
10
 
11
11
  from unique_toolkit.chat.schemas import ChatMessage, ChatMessageRole
12
12
  from unique_toolkit.content.schemas import ContentChunk, ContentReference
13
- from unique_toolkit.evaluators import DOMAIN_NAME
14
13
  from unique_toolkit.language_model import (
15
14
  LanguageModelMessageRole,
16
15
  LanguageModelMessages,
@@ -34,7 +33,7 @@ from .constants import (
34
33
  DEFAULT_COMPLETE_TIMEOUT,
35
34
  )
36
35
 
37
- logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
36
+ logger = logging.getLogger(f"toolkit.language_model.{__name__}")
38
37
 
39
38
 
40
39
  def complete(
@@ -1 +0,0 @@
1
- from .constants import DOMAIN_NAME as DOMAIN_NAME
@@ -1,26 +0,0 @@
1
- from humps import camelize
2
- from pydantic import BaseModel, ConfigDict
3
-
4
- from unique_toolkit._common.validators import LMI, LanguageModelInfo
5
- from unique_toolkit.evaluators.schemas import (
6
- EvaluationMetricName,
7
- )
8
- from unique_toolkit.language_model.infos import (
9
- LanguageModelName,
10
- )
11
-
12
-
13
- class EvaluationMetricConfig(BaseModel):
14
- model_config = ConfigDict(
15
- alias_generator=camelize,
16
- populate_by_name=True,
17
- validate_default=True,
18
- )
19
-
20
- enabled: bool = False
21
- name: EvaluationMetricName
22
- language_model: LMI = LanguageModelInfo.from_name(
23
- LanguageModelName.AZURE_GPT_35_TURBO_0125,
24
- )
25
- custom_prompts: dict[str, str] = {}
26
- score_to_emoji: dict[str, str] = {}
@@ -1 +0,0 @@
1
- DOMAIN_NAME = "evaluators"
@@ -1,34 +0,0 @@
1
- from unique_toolkit.evaluators.config import EvaluationMetricConfig
2
- from unique_toolkit.evaluators.context_relevancy.prompts import (
3
- CONTEXT_RELEVANCY_METRIC_SYSTEM_MSG,
4
- CONTEXT_RELEVANCY_METRIC_USER_MSG,
5
- )
6
- from unique_toolkit.evaluators.schemas import (
7
- EvaluationMetricInputFieldName,
8
- EvaluationMetricName,
9
- )
10
- from unique_toolkit.language_model.infos import LanguageModelInfo
11
- from unique_toolkit.language_model.service import LanguageModelName
12
-
13
- SYSTEM_MSG_KEY = "systemPrompt"
14
- USER_MSG_KEY = "userPrompt"
15
-
16
- # Required input fields for context relevancy evaluation
17
- context_relevancy_required_input_fields = [
18
- EvaluationMetricInputFieldName.INPUT_TEXT,
19
- EvaluationMetricInputFieldName.CONTEXT_TEXTS,
20
- ]
21
-
22
-
23
- default_config = EvaluationMetricConfig(
24
- enabled=False,
25
- name=EvaluationMetricName.CONTEXT_RELEVANCY,
26
- language_model=LanguageModelInfo.from_name(
27
- LanguageModelName.AZURE_GPT_35_TURBO_0125
28
- ),
29
- score_to_emoji={"LOW": "🟢", "MEDIUM": "🟡", "HIGH": "🔴"},
30
- custom_prompts={
31
- SYSTEM_MSG_KEY: CONTEXT_RELEVANCY_METRIC_SYSTEM_MSG,
32
- USER_MSG_KEY: CONTEXT_RELEVANCY_METRIC_USER_MSG,
33
- },
34
- )
@@ -1,31 +0,0 @@
1
- CONTEXT_RELEVANCY_METRIC_SYSTEM_MSG = """
2
- You will receive an input and a set of contexts.
3
- Your task is to evaluate how relevant the contexts are to the input text.
4
-
5
- Use the following rating scale to generate a score:
6
- [low] - The contexts are not relevant to the input.
7
- [medium] - The contexts are somewhat relevant to the input.
8
- [high] - The contexts are highly relevant to the input.
9
-
10
- Your answer must be in JSON format:
11
- {
12
- "reason": Your explanation of your judgement of the evaluation,
13
- "value": decision, must be one of the following ["low", "medium", "high"]
14
- }
15
- """
16
-
17
- CONTEXT_RELEVANCY_METRIC_USER_MSG = """
18
- Here is the data:
19
-
20
- Input:
21
- '''
22
- $input_text
23
- '''
24
-
25
- Contexts:
26
- '''
27
- $context_texts
28
- '''
29
-
30
- Answer as JSON:
31
- """
@@ -1,53 +0,0 @@
1
- from logging import Logger
2
-
3
- from unique_toolkit.app.schemas import Event
4
- from unique_toolkit.evaluators.config import EvaluationMetricConfig
5
- from unique_toolkit.evaluators.context_relevancy.constants import default_config
6
- from unique_toolkit.evaluators.context_relevancy.utils import (
7
- check_context_relevancy_async,
8
- )
9
- from unique_toolkit.evaluators.schemas import (
10
- EvaluationMetricInput,
11
- EvaluationMetricResult,
12
- )
13
-
14
-
15
- class ContextRelevancyEvaluator:
16
- def __init__(
17
- self,
18
- event: Event,
19
- logger: Logger,
20
- ):
21
- self.event = event
22
- self.logger = logger
23
-
24
- async def run(
25
- self,
26
- input: EvaluationMetricInput,
27
- config: EvaluationMetricConfig = default_config,
28
- ) -> EvaluationMetricResult | None:
29
- """
30
- Analyzes the level of relevancy of a context by comparing
31
- it with the input text.
32
-
33
- Args:
34
- input (EvaluationMetricInput): The input for the metric.
35
- config (EvaluationMetricConfig): The configuration for the metric.
36
-
37
- Returns:
38
- EvaluationMetricResult | None: The result of the evaluation, indicating the level of context relevancy.
39
- Returns None if the metric is not enabled.
40
-
41
- Raises:
42
- EvaluatorException: If required fields are missing or an error occurs during evaluation.
43
- """
44
- if config.enabled is False:
45
- self.logger.info("Context relevancy metric is not enabled.")
46
- return None
47
-
48
- return await check_context_relevancy_async(
49
- company_id=self.event.company_id,
50
- input=input,
51
- config=config,
52
- logger=self.logger,
53
- )
@@ -1,156 +0,0 @@
1
- import logging
2
- from string import Template
3
-
4
- from unique_toolkit.evaluators.config import (
5
- EvaluationMetricConfig,
6
- )
7
- from unique_toolkit.evaluators.context_relevancy.constants import (
8
- SYSTEM_MSG_KEY,
9
- USER_MSG_KEY,
10
- context_relevancy_required_input_fields,
11
- )
12
- from unique_toolkit.evaluators.context_relevancy.prompts import (
13
- CONTEXT_RELEVANCY_METRIC_SYSTEM_MSG,
14
- CONTEXT_RELEVANCY_METRIC_USER_MSG,
15
- )
16
- from unique_toolkit.evaluators.exception import EvaluatorException
17
- from unique_toolkit.evaluators.output_parser import (
18
- parse_eval_metric_result,
19
- )
20
- from unique_toolkit.evaluators.schemas import (
21
- EvaluationMetricInput,
22
- EvaluationMetricName,
23
- EvaluationMetricResult,
24
- )
25
- from unique_toolkit.language_model import LanguageModelName
26
- from unique_toolkit.language_model.schemas import (
27
- LanguageModelMessages,
28
- LanguageModelSystemMessage,
29
- LanguageModelUserMessage,
30
- )
31
- from unique_toolkit.language_model.service import LanguageModelService
32
-
33
- logger = logging.getLogger(__name__)
34
-
35
-
36
- async def check_context_relevancy_async(
37
- company_id: str,
38
- evaluation_metric_input: EvaluationMetricInput,
39
- config: EvaluationMetricConfig,
40
- logger: logging.Logger = logger,
41
- ) -> EvaluationMetricResult | None:
42
- """Analyzes the relevancy of the context provided for the given evaluation_metric_input and output.
43
-
44
- The analysis classifies the context relevancy level as:
45
- - low
46
- - medium
47
- - high
48
-
49
- This method performs the following steps:
50
- 1. Logs the start of the analysis using the provided `logger`.
51
- 2. Validates the required fields in the `evaluation_metric_input` data.
52
- 3. Retrieves the messages using the `_get_msgs` method.
53
- 4. Calls `LanguageModelService.complete_async_util` to get a completion result.
54
- 5. Parses and returns the evaluation metric result based on the content of the completion result.
55
-
56
- Args:
57
- company_id (str): The company ID for the analysis.
58
- evaluation_metric_input (EvaluationMetricInput): The evaluation_metric_input data used for evaluation, including the generated output and reference information.
59
- config (EvaluationMetricConfig): Configuration settings for the evaluation.
60
- logger (Optional[logging.Logger], optional): The logger used for logging information and errors. Defaults to the logger for the current module.
61
-
62
- Returns:
63
- EvaluationMetricResult | None: The result of the evaluation, indicating the level of context relevancy. Returns `None` if an error occurs.
64
-
65
- Raises:
66
- EvaluatorException: If required fields are missing or an error occurs during the evaluation.
67
-
68
- """
69
- model_group_name = (
70
- config.language_model.name.value
71
- if isinstance(config.language_model.name, LanguageModelName)
72
- else config.language_model.name
73
- )
74
- logger.info(f"Analyzing context relevancy with {model_group_name}.")
75
-
76
- evaluation_metric_input.validate_required_fields(
77
- context_relevancy_required_input_fields,
78
- )
79
-
80
- if (
81
- evaluation_metric_input.context_texts
82
- and len(evaluation_metric_input.context_texts) == 0
83
- ):
84
- error_message = "No context texts provided."
85
- raise EvaluatorException(
86
- user_message=error_message,
87
- error_message=error_message,
88
- )
89
-
90
- try:
91
- msgs = _get_msgs(evaluation_metric_input, config)
92
- result = await LanguageModelService.complete_async_util(
93
- company_id=company_id,
94
- messages=msgs,
95
- model_name=model_group_name,
96
- )
97
- result_content = result.choices[0].message.content
98
- if not result_content:
99
- error_message = "Context relevancy evaluation did not return a result."
100
- raise EvaluatorException(
101
- error_message=error_message,
102
- user_message=error_message,
103
- )
104
- return parse_eval_metric_result(
105
- result_content, # type: ignore
106
- EvaluationMetricName.CONTEXT_RELEVANCY,
107
- )
108
- except Exception as e:
109
- error_message = "Error occurred during context relevancy metric analysis"
110
- raise EvaluatorException(
111
- error_message=f"{error_message}: {e}",
112
- user_message=error_message,
113
- exception=e,
114
- )
115
-
116
-
117
- def _get_msgs(
118
- evaluation_metric_input: EvaluationMetricInput,
119
- config: EvaluationMetricConfig,
120
- ) -> LanguageModelMessages:
121
- """Composes the messages for context relevancy analysis.
122
-
123
- The messages are based on the provided evaluation_metric_input and configuration.
124
-
125
- Args:
126
- evaluation_metric_input (EvaluationMetricInput): The evaluation_metric_input data that includes context texts for the analysis.
127
- config (EvaluationMetricConfig): The configuration settings for composing messages.
128
-
129
- Returns:
130
- LanguageModelMessages: The composed messages as per the provided evaluation_metric_input and configuration.
131
-
132
- """
133
- system_msg_content = _get_system_prompt(config)
134
- system_msg = LanguageModelSystemMessage(content=system_msg_content)
135
-
136
- user_msg_templ = Template(_get_user_prompt(config))
137
- user_msg_content = user_msg_templ.substitute(
138
- evaluation_metric_input_text=evaluation_metric_input.evaluation_metric_input_text,
139
- contexts_text=evaluation_metric_input.get_joined_context_texts(),
140
- )
141
- user_msg = LanguageModelUserMessage(content=user_msg_content)
142
- return LanguageModelMessages([system_msg, user_msg])
143
-
144
-
145
- def _get_system_prompt(config: EvaluationMetricConfig):
146
- return config.custom_prompts.setdefault(
147
- SYSTEM_MSG_KEY,
148
- CONTEXT_RELEVANCY_METRIC_SYSTEM_MSG,
149
- )
150
-
151
-
152
- def _get_user_prompt(config: EvaluationMetricConfig):
153
- return config.custom_prompts.setdefault(
154
- USER_MSG_KEY,
155
- CONTEXT_RELEVANCY_METRIC_USER_MSG,
156
- )
@@ -1,5 +0,0 @@
1
- from unique_toolkit._common.exception import CommonException
2
-
3
-
4
- class EvaluatorException(CommonException):
5
- pass
@@ -1,41 +0,0 @@
1
- from unique_toolkit.evaluators.config import EvaluationMetricConfig
2
- from unique_toolkit.evaluators.hallucination.prompts import (
3
- HALLUCINATION_METRIC_SYSTEM_MSG,
4
- HALLUCINATION_METRIC_SYSTEM_MSG_DEFAULT,
5
- HALLUCINATION_METRIC_USER_MSG,
6
- HALLUCINATION_METRIC_USER_MSG_DEFAULT,
7
- )
8
- from unique_toolkit.evaluators.schemas import (
9
- EvaluationMetricInputFieldName,
10
- EvaluationMetricName,
11
- )
12
- from unique_toolkit.language_model.infos import (
13
- LanguageModelInfo,
14
- LanguageModelName,
15
- )
16
-
17
- SYSTEM_MSG_KEY = "systemPrompt"
18
- USER_MSG_KEY = "userPrompt"
19
- SYSTEM_MSG_DEFAULT_KEY = "systemPromptDefault"
20
- USER_MSG_DEFAULT_KEY = "userPromptDefault"
21
-
22
-
23
- hallucination_metric_default_config = EvaluationMetricConfig(
24
- enabled=False,
25
- name=EvaluationMetricName.HALLUCINATION,
26
- language_model=LanguageModelInfo.from_name(LanguageModelName.AZURE_GPT_4_0613),
27
- score_to_emoji={"LOW": "🟢", "MEDIUM": "🟡", "HIGH": "🔴"},
28
- custom_prompts={
29
- SYSTEM_MSG_KEY: HALLUCINATION_METRIC_SYSTEM_MSG,
30
- USER_MSG_KEY: HALLUCINATION_METRIC_USER_MSG,
31
- SYSTEM_MSG_DEFAULT_KEY: HALLUCINATION_METRIC_SYSTEM_MSG_DEFAULT,
32
- USER_MSG_DEFAULT_KEY: HALLUCINATION_METRIC_USER_MSG_DEFAULT,
33
- },
34
- )
35
-
36
- hallucination_required_input_fields = [
37
- EvaluationMetricInputFieldName.INPUT_TEXT,
38
- EvaluationMetricInputFieldName.CONTEXT_TEXTS,
39
- EvaluationMetricInputFieldName.HISTORY_MESSAGES,
40
- EvaluationMetricInputFieldName.OUTPUT_TEXT,
41
- ]
@@ -1,79 +0,0 @@
1
- HALLUCINATION_METRIC_SYSTEM_MSG = """
2
- You will receive a question, references, a conversation between a user and an agent, and an output.
3
- The output is the answer to the question.
4
- Your task is to evaluate if the output is fully supported by the information provided in the references and conversation, and provide explanations on your judgement in 2 sentences.
5
-
6
- Use the following entailment scale to generate a score:
7
- [low] - All information in output is supported by the references/conversation, or extractions from the references/conversation.
8
- [medium] - The output is supported by the references/conversation to some extent, but there is at least some information in the output that is not discussed in the references/conversation. For example, if an instruction asks about two concepts and the references/conversation only discusses either of them, it should be considered a [medium] hallucination level.
9
- [high] - The output contains information that is not part of the references/conversation, is unrelated to the references/conversation, or contradicts the references/conversation.
10
-
11
- Make sure to not use any external information/knowledge to judge whether the output is true or not. Only check whether the output is supported by the references/conversation, and not whether the output is correct or not. Also do not evaluate if the references/conversation contain further information that is not part of the output but could be relevant to the qestion.
12
-
13
- Your answer must be in JSON format:
14
- {
15
- "reason": Your explanation of your judgement of the evaluation,
16
- "value": decision, must be one of the following: ["high", "medium", "low"]
17
- }
18
- """
19
-
20
- HALLUCINATION_METRIC_USER_MSG = """
21
- Here is the data:
22
-
23
- Input:
24
- '''
25
- $input_text
26
- '''
27
-
28
- References:
29
- '''
30
- $contexts_text
31
- '''
32
-
33
- Conversation:
34
- '''
35
- $history_messages_text
36
- '''
37
-
38
- Output:
39
- '''
40
- $output_text
41
- '''
42
-
43
- Answer as JSON:
44
- """
45
-
46
- HALLUCINATION_METRIC_SYSTEM_MSG_DEFAULT = """
47
- You will receive a question and an output.
48
- The output is the answer to the question.
49
- The situation is that no references could be found to answer the question. Your task is to evaluate if the output contains any information to answer the question,
50
- and provide a short explanations of your reasoning in 2 sentences. Also mention in your explanation that no references were provided to answer the question.
51
-
52
- Use the following entailment scale to generate a score:
53
- [low] - The output does not contain any information to answer the question.
54
- [medium] - The output contains some information to answer the question, but does not answer the question entirely.
55
- [high] - The output answers the question.
56
-
57
- It is not considered an answer when the output relates to the questions subject. Make sure to not use any external information/knowledge to judge whether the output is true or not. Only check that the output does not answer the question, and not whether the output is correct or not.
58
- Your answer must be in JSON format:
59
- {
60
- "reason": Your explanation of your reasoning of the evaluation,
61
- "value": decision, must be one of the following: ["low", "medium", "high"]
62
- }
63
- """
64
-
65
- HALLUCINATION_METRIC_USER_MSG_DEFAULT = """
66
- Here is the data:
67
-
68
- Input:
69
- '''
70
- $input_text
71
- '''
72
-
73
- Output:
74
- '''
75
- $output_text
76
- '''
77
-
78
- Answer as JSON:
79
- """
@@ -1,58 +0,0 @@
1
- import logging
2
-
3
- from unique_toolkit.app.schemas import Event
4
- from unique_toolkit.evaluators.config import (
5
- EvaluationMetricConfig,
6
- )
7
- from unique_toolkit.evaluators.hallucination.constants import (
8
- hallucination_metric_default_config,
9
- )
10
- from unique_toolkit.evaluators.hallucination.utils import check_hallucination_async
11
- from unique_toolkit.evaluators.schemas import (
12
- EvaluationMetricInput,
13
- EvaluationMetricResult,
14
- )
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- class HallucinationEvaluator:
20
- def __init__(self, event: Event, logger: logging.Logger = logger):
21
- self.event = event
22
- self.logger = logger
23
-
24
- async def run(
25
- self,
26
- input: EvaluationMetricInput,
27
- config: EvaluationMetricConfig = hallucination_metric_default_config,
28
- ) -> EvaluationMetricResult | None:
29
- """
30
- Analyzes the level of hallucination in the generated output by comparing it with the input
31
- and the provided contexts or history. The analysis classifies the hallucination level as:
32
- - low
33
- - medium
34
- - high
35
-
36
- If no contexts or history are referenced in the generated output, the method verifies
37
- that the output does not contain any relevant information to answer the question.
38
-
39
- This method calls `check_hallucination_async` to perform the actual analysis. The `check_hallucination_async`
40
- function handles the evaluation using the company ID from the event, the provided input, and the configuration.
41
-
42
- Args:
43
- input (EvaluationMetricInput): The input data used for evaluation, including the generated output and reference information.
44
- config (EvaluationMetricConfig, optional): Configuration settings for the evaluation. Defaults to `hallucination_metric_default_config`.
45
-
46
- Returns:
47
- EvaluationMetricResult | None: The result of the evaluation, indicating the level of hallucination. Returns `None` if the analysis cannot be performed.
48
-
49
- Raises:
50
- EvaluatorException: If the context texts are empty, required fields are missing, or an error occurs during the evaluation.
51
- """
52
- if config.enabled is False:
53
- self.logger.info("Hallucination metric is not enabled.")
54
- return None
55
-
56
- return await check_hallucination_async(
57
- company_id=self.event.company_id, input=input, config=config
58
- )