cwyodmodules 0.3.32__py3-none-any.whl → 0.3.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cwyodmodules/api/chat_history.py +14 -7
  2. cwyodmodules/batch/utilities/chat_history/auth_utils.py +7 -3
  3. cwyodmodules/batch/utilities/chat_history/cosmosdb.py +17 -1
  4. cwyodmodules/batch/utilities/chat_history/postgresdbservice.py +239 -254
  5. cwyodmodules/batch/utilities/common/source_document.py +60 -61
  6. cwyodmodules/batch/utilities/document_chunking/fixed_size_overlap.py +8 -3
  7. cwyodmodules/batch/utilities/document_chunking/layout.py +8 -3
  8. cwyodmodules/batch/utilities/document_chunking/page.py +8 -3
  9. cwyodmodules/batch/utilities/document_loading/read.py +30 -34
  10. cwyodmodules/batch/utilities/helpers/azure_computer_vision_client.py +10 -3
  11. cwyodmodules/batch/utilities/helpers/azure_form_recognizer_helper.py +6 -2
  12. cwyodmodules/batch/utilities/helpers/azure_postgres_helper.py +14 -2
  13. cwyodmodules/batch/utilities/helpers/azure_postgres_helper_light_rag.py +14 -2
  14. cwyodmodules/batch/utilities/helpers/azure_search_helper.py +15 -6
  15. cwyodmodules/batch/utilities/helpers/config/config_helper.py +24 -2
  16. cwyodmodules/batch/utilities/helpers/env_helper.py +9 -9
  17. cwyodmodules/batch/utilities/helpers/lightrag_helper.py +9 -2
  18. cwyodmodules/batch/utilities/helpers/llm_helper.py +13 -2
  19. cwyodmodules/batch/utilities/helpers/secret_helper.py +9 -9
  20. cwyodmodules/batch/utilities/integrated_vectorization/azure_search_index.py +8 -2
  21. cwyodmodules/batch/utilities/integrated_vectorization/azure_search_indexer.py +9 -2
  22. cwyodmodules/batch/utilities/integrated_vectorization/azure_search_skillset.py +6 -2
  23. cwyodmodules/batch/utilities/orchestrator/lang_chain_agent.py +8 -2
  24. cwyodmodules/batch/utilities/orchestrator/open_ai_functions.py +6 -2
  25. cwyodmodules/batch/utilities/orchestrator/orchestrator_base.py +9 -3
  26. cwyodmodules/batch/utilities/orchestrator/prompt_flow.py +8 -2
  27. cwyodmodules/batch/utilities/orchestrator/semantic_kernel_orchestrator.py +135 -138
  28. cwyodmodules/batch/utilities/parser/output_parser_tool.py +64 -64
  29. cwyodmodules/batch/utilities/plugins/outlook_calendar_plugin.py +91 -93
  30. cwyodmodules/batch/utilities/search/azure_search_handler.py +16 -3
  31. cwyodmodules/batch/utilities/search/azure_search_handler_light_rag.py +14 -2
  32. cwyodmodules/batch/utilities/search/integrated_vectorization_search_handler.py +36 -24
  33. cwyodmodules/batch/utilities/search/lightrag_search_handler.py +14 -2
  34. cwyodmodules/batch/utilities/search/postgres_search_handler.py +100 -97
  35. cwyodmodules/batch/utilities/search/postgres_search_handler_light_rag.py +103 -104
  36. cwyodmodules/batch/utilities/search/search.py +21 -24
  37. cwyodmodules/batch/utilities/tools/content_safety_checker.py +66 -78
  38. cwyodmodules/batch/utilities/tools/post_prompt_tool.py +48 -60
  39. cwyodmodules/batch/utilities/tools/question_answer_tool.py +196 -206
  40. cwyodmodules/batch/utilities/tools/text_processing_tool.py +36 -39
  41. cwyodmodules/logging_config.py +15 -0
  42. {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/METADATA +2 -1
  43. {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/RECORD +46 -45
  44. {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/WHEEL +0 -0
  45. {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/licenses/LICENSE +0 -0
  46. {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/top_level.txt +0 -0
@@ -7,97 +7,85 @@ from ..helpers.env_helper import EnvHelper
7
7
  from .answer_processing_base import AnswerProcessingBase
8
8
  from ..common.answer import Answer
9
9
 
10
- from logging import getLogger
11
- from opentelemetry import trace, baggage
12
- from opentelemetry.propagate import extract
13
-
14
- # logger = getLogger("__main__" + ".base_package")
15
- logger = getLogger("__main__")
16
- # tracer = trace.get_tracer("__main__" + ".base_package")
17
- tracer = trace.get_tracer("__main__")
10
+ from logging_config import logger
11
+ env_helper: EnvHelper = EnvHelper()
12
+ log_args = env_helper.LOG_ARGS
13
+ log_result = env_helper.LOG_RESULT
18
14
 
19
15
 
20
16
  class ContentSafetyChecker(AnswerProcessingBase):
21
17
  def __init__(self):
22
- with tracer.start_as_current_span("ContentSafetyChecker.init"):
23
- env_helper = EnvHelper()
24
- self.azure_identity_helper = AzureIdentityHelper()
25
-
26
- if env_helper.AZURE_AUTH_TYPE == "rbac":
27
- credential = self.azure_identity_helper.get_credential()
28
- logger.info(
29
- "Initializing ContentSafetyClient with RBAC authentication."
30
- )
31
- self.content_safety_client = ContentSafetyClient(
32
- env_helper.AZURE_CONTENT_SAFETY_ENDPOINT,
33
- credential,
34
- )
35
- else:
36
- logger.info(
37
- "Initializing ContentSafetyClient with AzureKeyCredential authentication."
38
- )
39
- self.content_safety_client = ContentSafetyClient(
40
- env_helper.AZURE_CONTENT_SAFETY_ENDPOINT,
41
- AzureKeyCredential(env_helper.AZURE_CONTENT_SAFETY_KEY),
42
- )
18
+ self.azure_identity_helper = AzureIdentityHelper()
19
+
20
+ if env_helper.AZURE_AUTH_TYPE == "rbac":
21
+ credential = self.azure_identity_helper.get_credential()
22
+ logger.info(
23
+ "Initializing ContentSafetyClient with RBAC authentication."
24
+ )
25
+ self.content_safety_client = ContentSafetyClient(
26
+ env_helper.AZURE_CONTENT_SAFETY_ENDPOINT,
27
+ credential,
28
+ )
29
+ else:
30
+ logger.info(
31
+ "Initializing ContentSafetyClient with AzureKeyCredential authentication."
32
+ )
33
+ self.content_safety_client = ContentSafetyClient(
34
+ env_helper.AZURE_CONTENT_SAFETY_ENDPOINT,
35
+ AzureKeyCredential(env_helper.AZURE_CONTENT_SAFETY_KEY),
36
+ )
43
37
 
38
+ @logger.trace_function(log_args=False, log_result=False)
44
39
  def process_answer(self, answer: Answer, **kwargs: dict) -> Answer:
45
- with tracer.start_as_current_span("ContentSafetyChecker.process_answer"):
46
- logger.info("Processing answer.")
47
- response_template = kwargs["response_template"]
48
- answer.answer = self._filter_text_and_replace(
49
- answer.answer, response_template
50
- )
51
- return answer
40
+ logger.info("Processing answer.")
41
+ response_template = kwargs["response_template"]
42
+ answer.answer = self._filter_text_and_replace(
43
+ answer.answer, response_template
44
+ )
45
+ return answer
52
46
 
47
+ @logger.trace_function(log_args=False, log_result=False)
53
48
  def validate_input_and_replace_if_harmful(self, text):
54
- with tracer.start_as_current_span(
55
- "ContentSafetyChecker.validate_input_and_replace_if_harmful"
56
- ):
57
- logger.info("Validating input text for harmful content")
58
- response_template = f'{"Unfortunately, I am not able to process your question, as I have detected sensitive content that I am not allowed to process. This might be a mistake, so please try rephrasing your question."}'
59
- return self.process_answer(
60
- Answer(question="", answer=text, source_documents=[]),
61
- response_template=response_template,
62
- ).answer
49
+ logger.info("Validating input text for harmful content")
50
+ response_template = f'{"Unfortunately, I am not able to process your question, as I have detected sensitive content that I am not allowed to process. This might be a mistake, so please try rephrasing your question."}'
51
+ return self.process_answer(
52
+ Answer(question="", answer=text, source_documents=[]),
53
+ response_template=response_template,
54
+ ).answer
63
55
 
56
+ @logger.trace_function(log_args=False, log_result=False)
64
57
  def validate_output_and_replace_if_harmful(self, text):
65
- with tracer.start_as_current_span(
66
- "ContentSafetyChecker.validate_output_and_replace_if_harmful"
67
- ):
68
- logger.info("Validating output text for harmful content")
69
- response_template = f'{"Unfortunately, I have detected sensitive content in my answer, which I am not allowed to show you. This might be a mistake, so please try again and maybe rephrase your question."}'
70
- return self.process_answer(
71
- Answer(question="", answer=text, source_documents=[]),
72
- response_template=response_template,
73
- ).answer
58
+ logger.info("Validating output text for harmful content")
59
+ response_template = f'{"Unfortunately, I have detected sensitive content in my answer, which I am not allowed to show you. This might be a mistake, so please try again and maybe rephrase your question."}'
60
+ return self.process_answer(
61
+ Answer(question="", answer=text, source_documents=[]),
62
+ response_template=response_template,
63
+ ).answer
74
64
 
65
+ @logger.trace_function(log_args=False, log_result=False)
75
66
  def _filter_text_and_replace(self, text, response_template):
76
- with tracer.start_as_current_span(
77
- "ContentSafetyChecker._filter_text_and_replace"
78
- ):
79
- logger.info("Analyzing text for harmful content")
80
- request = AnalyzeTextOptions(text=text)
81
- try:
82
- response = self.content_safety_client.analyze_text(request)
83
- except HttpResponseError as e:
84
- if e.error:
85
- logger.error(
86
- f"Analyze text failed. Error code: {e.error.code}. Error message: {e.error.message}."
87
- )
88
- raise
89
- logger.exception("Analyze text failed.")
67
+ logger.info("Analyzing text for harmful content")
68
+ request = AnalyzeTextOptions(text=text)
69
+ try:
70
+ response = self.content_safety_client.analyze_text(request)
71
+ except HttpResponseError as e:
72
+ if e.error:
73
+ logger.error(
74
+ f"Analyze text failed. Error code: {e.error.code}. Error message: {e.error.message}."
75
+ )
90
76
  raise
77
+ logger.exception("Analyze text failed.")
78
+ raise
91
79
 
92
- filtered_text = text
80
+ filtered_text = text
93
81
 
94
- # if response.hate_result.severity > 0 or response.self_harm_result.severity > 0 or response.sexual_result.severity > 0 or response.violence_result.severity > 0:
95
- # filtered_text = response_template
96
- for result in response.categories_analysis:
97
- if result.severity > 0:
98
- logger.warning(
99
- f"Harmful content detected: Severity: {result.severity}. Replacing text."
100
- )
101
- filtered_text = response_template
82
+ # if response.hate_result.severity > 0 or response.self_harm_result.severity > 0 or response.sexual_result.severity > 0 or response.violence_result.severity > 0:
83
+ # filtered_text = response_template
84
+ for result in response.categories_analysis:
85
+ if result.severity > 0:
86
+ logger.warning(
87
+ f"Harmful content detected: Severity: {result.severity}. Replacing text."
88
+ )
89
+ filtered_text = response_template
102
90
 
103
- return filtered_text
91
+ return filtered_text
@@ -2,75 +2,63 @@ from ..common.answer import Answer
2
2
  from ..helpers.llm_helper import LLMHelper
3
3
  from ..helpers.config.config_helper import ConfigHelper
4
4
 
5
- from logging import getLogger
6
- from opentelemetry import trace, baggage
7
- from opentelemetry.propagate import extract
8
-
9
- # logger = getLogger("__main__" + ".base_package")
10
- logger = getLogger("__main__")
11
- # tracer = trace.get_tracer("__main__" + ".base_package")
12
- tracer = trace.get_tracer("__main__")
5
+ from ...utilities.helpers.env_helper import EnvHelper
6
+ from logging_config import logger
7
+ env_helper: EnvHelper = EnvHelper()
8
+ log_args = env_helper.LOG_ARGS
9
+ log_result = env_helper.LOG_RESULT
13
10
 
14
11
 
15
12
  class PostPromptTool:
16
13
  def __init__(self) -> None:
17
14
  pass
18
15
 
16
+ @logger.trace_function(log_args=False, log_result=False)
19
17
  def validate_answer(self, answer: Answer) -> Answer:
20
- with tracer.start_as_current_span("PostPromptTool.validate_answer") as span:
21
- logger.info("Validating answer using post-answering prompt.")
22
- config = ConfigHelper.get_active_config_or_default()
23
- llm_helper = LLMHelper()
18
+ logger.info("Validating answer using post-answering prompt.")
19
+ config = ConfigHelper.get_active_config_or_default()
20
+ llm_helper = LLMHelper()
24
21
 
25
- sources = "\n".join(
26
- [
27
- f"[doc{i+1}]: {source.content}"
28
- for i, source in enumerate(answer.source_documents)
29
- ]
22
+ sources = "\n".join(
23
+ [
24
+ f"[doc{i+1}]: {source.content}"
25
+ for i, source in enumerate(answer.source_documents)
26
+ ]
27
+ )
28
+ message = config.prompts.post_answering_prompt.format(
29
+ question=answer.question,
30
+ answer=answer.answer,
31
+ sources=sources,
32
+ )
33
+ logger.debug(f"Post-answering prompt message: {message}")
34
+ response = llm_helper.get_chat_completion(
35
+ [
36
+ {
37
+ "role": "user",
38
+ "content": message,
39
+ }
40
+ ]
41
+ )
42
+ result = response.choices[0].message.content
43
+ logger.debug(f"LLM response content: {result}")
44
+ was_message_filtered = result.lower() not in ["true", "yes"]
45
+ logger.debug(f"Was message filtered: {was_message_filtered}")
46
+ # Return filtered answer or just the original one
47
+ if was_message_filtered:
48
+ logger.info("Message was filtered; returning filtered answer.")
49
+ return Answer(
50
+ question=answer.question,
51
+ answer=config.messages.post_answering_filter,
52
+ source_documents=[],
53
+ prompt_tokens=response.usage.prompt_tokens,
54
+ completion_tokens=response.usage.completion_tokens,
30
55
  )
31
-
32
- message = config.prompts.post_answering_prompt.format(
56
+ else:
57
+ logger.info("Message was not filtered; returning original answer.")
58
+ return Answer(
33
59
  question=answer.question,
34
60
  answer=answer.answer,
35
- sources=sources,
61
+ source_documents=answer.source_documents,
62
+ prompt_tokens=response.usage.prompt_tokens,
63
+ completion_tokens=response.usage.completion_tokens,
36
64
  )
37
-
38
- logger.debug(f"Post-answering prompt message: {message}")
39
- span.set_attribute("prompt_message", message)
40
-
41
- response = llm_helper.get_chat_completion(
42
- [
43
- {
44
- "role": "user",
45
- "content": message,
46
- }
47
- ]
48
- )
49
-
50
- result = response.choices[0].message.content
51
- logger.debug(f"LLM response content: {result}")
52
- span.set_attribute("llm_response", result)
53
-
54
- was_message_filtered = result.lower() not in ["true", "yes"]
55
- logger.debug(f"Was message filtered: {was_message_filtered}")
56
- span.set_attribute("message_filtered", was_message_filtered)
57
-
58
- # Return filtered answer or just the original one
59
- if was_message_filtered:
60
- logger.info("Message was filtered; returning filtered answer.")
61
- return Answer(
62
- question=answer.question,
63
- answer=config.messages.post_answering_filter,
64
- source_documents=[],
65
- prompt_tokens=response.usage.prompt_tokens,
66
- completion_tokens=response.usage.completion_tokens,
67
- )
68
- else:
69
- logger.info("Message was not filtered; returning original answer.")
70
- return Answer(
71
- question=answer.question,
72
- answer=answer.answer,
73
- source_documents=answer.source_documents,
74
- prompt_tokens=response.usage.prompt_tokens,
75
- completion_tokens=response.usage.completion_tokens,
76
- )