cwyodmodules 0.3.32__py3-none-any.whl → 0.3.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cwyodmodules/api/chat_history.py +14 -7
- cwyodmodules/batch/utilities/chat_history/auth_utils.py +7 -3
- cwyodmodules/batch/utilities/chat_history/cosmosdb.py +17 -1
- cwyodmodules/batch/utilities/chat_history/postgresdbservice.py +239 -254
- cwyodmodules/batch/utilities/common/source_document.py +60 -61
- cwyodmodules/batch/utilities/document_chunking/fixed_size_overlap.py +8 -3
- cwyodmodules/batch/utilities/document_chunking/layout.py +8 -3
- cwyodmodules/batch/utilities/document_chunking/page.py +8 -3
- cwyodmodules/batch/utilities/document_loading/read.py +30 -34
- cwyodmodules/batch/utilities/helpers/azure_computer_vision_client.py +10 -3
- cwyodmodules/batch/utilities/helpers/azure_form_recognizer_helper.py +6 -2
- cwyodmodules/batch/utilities/helpers/azure_postgres_helper.py +14 -2
- cwyodmodules/batch/utilities/helpers/azure_postgres_helper_light_rag.py +14 -2
- cwyodmodules/batch/utilities/helpers/azure_search_helper.py +15 -6
- cwyodmodules/batch/utilities/helpers/config/config_helper.py +24 -2
- cwyodmodules/batch/utilities/helpers/env_helper.py +9 -9
- cwyodmodules/batch/utilities/helpers/lightrag_helper.py +9 -2
- cwyodmodules/batch/utilities/helpers/llm_helper.py +13 -2
- cwyodmodules/batch/utilities/helpers/secret_helper.py +9 -9
- cwyodmodules/batch/utilities/integrated_vectorization/azure_search_index.py +8 -2
- cwyodmodules/batch/utilities/integrated_vectorization/azure_search_indexer.py +9 -2
- cwyodmodules/batch/utilities/integrated_vectorization/azure_search_skillset.py +6 -2
- cwyodmodules/batch/utilities/orchestrator/lang_chain_agent.py +8 -2
- cwyodmodules/batch/utilities/orchestrator/open_ai_functions.py +6 -2
- cwyodmodules/batch/utilities/orchestrator/orchestrator_base.py +9 -3
- cwyodmodules/batch/utilities/orchestrator/prompt_flow.py +8 -2
- cwyodmodules/batch/utilities/orchestrator/semantic_kernel_orchestrator.py +135 -138
- cwyodmodules/batch/utilities/parser/output_parser_tool.py +64 -64
- cwyodmodules/batch/utilities/plugins/outlook_calendar_plugin.py +91 -93
- cwyodmodules/batch/utilities/search/azure_search_handler.py +16 -3
- cwyodmodules/batch/utilities/search/azure_search_handler_light_rag.py +14 -2
- cwyodmodules/batch/utilities/search/integrated_vectorization_search_handler.py +36 -24
- cwyodmodules/batch/utilities/search/lightrag_search_handler.py +14 -2
- cwyodmodules/batch/utilities/search/postgres_search_handler.py +100 -97
- cwyodmodules/batch/utilities/search/postgres_search_handler_light_rag.py +103 -104
- cwyodmodules/batch/utilities/search/search.py +21 -24
- cwyodmodules/batch/utilities/tools/content_safety_checker.py +66 -78
- cwyodmodules/batch/utilities/tools/post_prompt_tool.py +48 -60
- cwyodmodules/batch/utilities/tools/question_answer_tool.py +196 -206
- cwyodmodules/batch/utilities/tools/text_processing_tool.py +36 -39
- cwyodmodules/logging_config.py +15 -0
- {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/METADATA +2 -1
- {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/RECORD +46 -45
- {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/WHEEL +0 -0
- {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/licenses/LICENSE +0 -0
- {cwyodmodules-0.3.32.dist-info → cwyodmodules-0.3.33.dist-info}/top_level.txt +0 -0
@@ -10,14 +10,10 @@ from ..search.search import Search
|
|
10
10
|
from .answering_tool_base import AnsweringToolBase
|
11
11
|
from openai.types.chat import ChatCompletion
|
12
12
|
|
13
|
-
from
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
# logger = getLogger("__main__" + ".base_package")
|
18
|
-
logger = getLogger("__main__")
|
19
|
-
# tracer = trace.get_tracer("__main__" + ".base_package")
|
20
|
-
tracer = trace.get_tracer("__main__")
|
13
|
+
from logging_config import logger
|
14
|
+
env_helper: EnvHelper = EnvHelper()
|
15
|
+
log_args = env_helper.LOG_ARGS
|
16
|
+
log_result = env_helper.LOG_RESULT
|
21
17
|
|
22
18
|
|
23
19
|
class QuestionAnswerTool(AnsweringToolBase):
|
@@ -31,21 +27,20 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
31
27
|
"""
|
32
28
|
Initialize the QuestionAnswerTool with necessary helpers and configurations.
|
33
29
|
"""
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
self.search_handler = Search.get_search_handler(env_helper=self.env_helper)
|
40
|
-
self.verbose = True
|
30
|
+
logger.info("Initializing QuestionAnswerTool...")
|
31
|
+
self.name = "QuestionAnswer"
|
32
|
+
self.llm_helper = LLMHelper()
|
33
|
+
self.search_handler = Search.get_search_handler(env_helper=self.env_helper)
|
34
|
+
self.verbose = True
|
41
35
|
|
42
|
-
|
43
|
-
|
36
|
+
self.config = ConfigHelper.get_active_config_or_default()
|
37
|
+
logger.info("QuestionAnswerTool initialized with configuration.")
|
44
38
|
|
45
39
|
def __str__(self):
|
46
40
|
return self.answer
|
47
41
|
|
48
42
|
@staticmethod
|
43
|
+
@logger.trace_function(log_args=False, log_result=False)
|
49
44
|
def json_remove_whitespace(obj: str) -> str:
|
50
45
|
"""
|
51
46
|
Remove whitespace from a JSON string.
|
@@ -56,14 +51,14 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
56
51
|
Returns:
|
57
52
|
str: The JSON string without whitespace.
|
58
53
|
"""
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
return obj
|
54
|
+
try:
|
55
|
+
return json.dumps(json.loads(obj), separators=(",", ":"))
|
56
|
+
except json.JSONDecodeError:
|
57
|
+
logger.exception("Failed to parse JSON in json_remove_whitespace.")
|
58
|
+
return obj
|
65
59
|
|
66
60
|
@staticmethod
|
61
|
+
@logger.trace_function(log_args=False, log_result=False)
|
67
62
|
def clean_chat_history(chat_history: list[dict]) -> list[dict]:
|
68
63
|
"""
|
69
64
|
Clean the chat history by retaining only the content and role of each message.
|
@@ -74,20 +69,20 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
74
69
|
Returns:
|
75
70
|
list[dict]: The cleaned chat history.
|
76
71
|
"""
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
72
|
+
logger.info("Cleaning chat history...")
|
73
|
+
cleaned_history = [
|
74
|
+
{
|
75
|
+
"content": message["content"],
|
76
|
+
"role": message["role"],
|
77
|
+
}
|
78
|
+
for message in chat_history
|
79
|
+
]
|
80
|
+
logger.info(
|
81
|
+
f"Chat history cleaned. Returning {len(cleaned_history)} messages."
|
82
|
+
)
|
83
|
+
return cleaned_history
|
84
|
+
|
85
|
+
@logger.trace_function(log_args=False, log_result=False)
|
91
86
|
def generate_messages(self, question: str, sources: list[SourceDocument]):
|
92
87
|
"""
|
93
88
|
Generate messages for the language model based on the question and source documents.
|
@@ -99,25 +94,25 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
99
94
|
Returns:
|
100
95
|
list[dict]: The generated messages.
|
101
96
|
"""
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
97
|
+
sources_text = "\n\n".join(
|
98
|
+
[f"[doc{i+1}]: {source.content}" for i, source in enumerate(sources)]
|
99
|
+
)
|
100
|
+
|
101
|
+
logger.info(
|
102
|
+
f"Generating messages for question: {question} with {len(sources)} sources."
|
103
|
+
)
|
104
|
+
messages = [
|
105
|
+
{
|
106
|
+
"content": self.config.prompts.answering_user_prompt.format(
|
107
|
+
question=question, sources=sources_text
|
108
|
+
),
|
109
|
+
"role": "user",
|
110
|
+
},
|
111
|
+
]
|
112
|
+
logger.debug(f"Generated messages: {messages}")
|
113
|
+
return messages
|
114
|
+
|
115
|
+
@logger.trace_function(log_args=False, log_result=False)
|
121
116
|
def generate_on_your_data_messages(
|
122
117
|
self,
|
123
118
|
question: str,
|
@@ -137,93 +132,91 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
137
132
|
Returns:
|
138
133
|
list[dict]: The generated messages.
|
139
134
|
"""
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
135
|
+
logger.info(f"Generating On Your Data messages for question: {question}")
|
136
|
+
examples = []
|
137
|
+
|
138
|
+
few_shot_example = {
|
139
|
+
"sources": self.config.example.documents.strip(),
|
140
|
+
"question": self.config.example.user_question.strip(),
|
141
|
+
"answer": self.config.example.answer.strip(),
|
142
|
+
}
|
143
|
+
|
144
|
+
if few_shot_example["sources"]:
|
145
|
+
few_shot_example["sources"] = QuestionAnswerTool.json_remove_whitespace(
|
146
|
+
few_shot_example["sources"]
|
147
|
+
)
|
151
148
|
|
152
|
-
|
153
|
-
|
154
|
-
|
149
|
+
if any(few_shot_example.values()):
|
150
|
+
if all((few_shot_example.values())):
|
151
|
+
examples.append(
|
152
|
+
{
|
153
|
+
"content": self.config.prompts.answering_user_prompt.format(
|
154
|
+
sources=few_shot_example["sources"],
|
155
|
+
question=few_shot_example["question"],
|
156
|
+
),
|
157
|
+
"name": "example_user",
|
158
|
+
"role": "system",
|
159
|
+
}
|
160
|
+
)
|
161
|
+
examples.append(
|
162
|
+
{
|
163
|
+
"content": few_shot_example["answer"],
|
164
|
+
"name": "example_assistant",
|
165
|
+
"role": "system",
|
166
|
+
}
|
167
|
+
)
|
168
|
+
else:
|
169
|
+
warnings.warn(
|
170
|
+
"Not all example fields are set in the config. Skipping few-shot example."
|
155
171
|
)
|
156
172
|
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
],
|
187
|
-
},
|
188
|
-
separators=(",", ":"),
|
189
|
-
)
|
190
|
-
|
191
|
-
messages = [
|
192
|
-
{
|
193
|
-
"role": "system",
|
194
|
-
"content": self.config.prompts.answering_system_prompt,
|
195
|
-
},
|
196
|
-
*examples,
|
197
|
-
{
|
198
|
-
"role": "system",
|
199
|
-
"content": self.env_helper.AZURE_OPENAI_SYSTEM_MESSAGE,
|
200
|
-
},
|
201
|
-
*QuestionAnswerTool.clean_chat_history(chat_history),
|
202
|
-
{
|
203
|
-
"role": "user",
|
204
|
-
"content": [
|
205
|
-
{
|
206
|
-
"type": "text",
|
207
|
-
"text": self.config.prompts.answering_user_prompt.format(
|
208
|
-
sources=documents,
|
209
|
-
question=question,
|
210
|
-
),
|
211
|
-
},
|
212
|
-
*(
|
213
|
-
[
|
214
|
-
{
|
215
|
-
"type": "image_url",
|
216
|
-
"image_url": {"url": image_url},
|
217
|
-
}
|
218
|
-
for image_url in image_urls
|
219
|
-
]
|
173
|
+
documents = json.dumps(
|
174
|
+
{
|
175
|
+
"retrieved_documents": [
|
176
|
+
{f"[doc{i+1}]": {"content": source.content}}
|
177
|
+
for i, source in enumerate(sources)
|
178
|
+
],
|
179
|
+
},
|
180
|
+
separators=(",", ":"),
|
181
|
+
)
|
182
|
+
|
183
|
+
messages = [
|
184
|
+
{
|
185
|
+
"role": "system",
|
186
|
+
"content": self.config.prompts.answering_system_prompt,
|
187
|
+
},
|
188
|
+
*examples,
|
189
|
+
{
|
190
|
+
"role": "system",
|
191
|
+
"content": self.env_helper.AZURE_OPENAI_SYSTEM_MESSAGE,
|
192
|
+
},
|
193
|
+
*QuestionAnswerTool.clean_chat_history(chat_history),
|
194
|
+
{
|
195
|
+
"role": "user",
|
196
|
+
"content": [
|
197
|
+
{
|
198
|
+
"type": "text",
|
199
|
+
"text": self.config.prompts.answering_user_prompt.format(
|
200
|
+
sources=documents,
|
201
|
+
question=question,
|
220
202
|
),
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
203
|
+
},
|
204
|
+
*(
|
205
|
+
[
|
206
|
+
{
|
207
|
+
"type": "image_url",
|
208
|
+
"image_url": {"url": image_url},
|
209
|
+
}
|
210
|
+
for image_url in image_urls
|
211
|
+
]
|
212
|
+
),
|
213
|
+
],
|
214
|
+
},
|
215
|
+
]
|
216
|
+
logger.debug(f"Generated On Your Data messages: {messages}")
|
217
|
+
return messages
|
226
218
|
|
219
|
+
@logger.trace_function(log_args=False, log_result=False)
|
227
220
|
def answer_question(self, question: str, chat_history: list[dict], **kwargs):
|
228
221
|
"""
|
229
222
|
Answer the given question using the chat history and additional parameters.
|
@@ -236,44 +229,44 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
236
229
|
Returns:
|
237
230
|
Answer: The formatted answer.
|
238
231
|
"""
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
)
|
244
|
-
|
245
|
-
if self.env_helper.USE_ADVANCED_IMAGE_PROCESSING:
|
246
|
-
image_urls = self.create_image_url_list(source_documents)
|
247
|
-
logger.info(
|
248
|
-
f"Generated {len(image_urls)} image URLs for advanced image processing."
|
249
|
-
)
|
250
|
-
else:
|
251
|
-
image_urls = []
|
252
|
-
|
253
|
-
model = self.env_helper.AZURE_OPENAI_VISION_MODEL if image_urls else None
|
232
|
+
logger.info("Answering question")
|
233
|
+
source_documents = Search.get_source_documents(
|
234
|
+
self.search_handler, question
|
235
|
+
)
|
254
236
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
)
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
)
|
263
|
-
messages = self.generate_messages(question, source_documents)
|
237
|
+
if self.env_helper.USE_ADVANCED_IMAGE_PROCESSING:
|
238
|
+
image_urls = self.create_image_url_list(source_documents)
|
239
|
+
logger.info(
|
240
|
+
f"Generated {len(image_urls)} image URLs for advanced image processing."
|
241
|
+
)
|
242
|
+
else:
|
243
|
+
image_urls = []
|
264
244
|
|
265
|
-
|
245
|
+
model = self.env_helper.AZURE_OPENAI_VISION_MODEL if image_urls else None
|
266
246
|
|
267
|
-
|
268
|
-
|
247
|
+
if self.config.prompts.use_on_your_data_format:
|
248
|
+
messages = self.generate_on_your_data_messages(
|
249
|
+
question, chat_history, source_documents, image_urls
|
269
250
|
)
|
270
|
-
|
271
|
-
|
251
|
+
else:
|
252
|
+
warnings.warn(
|
253
|
+
"Azure OpenAI On Your Data prompt format is recommended and should be enabled in the Admin app.",
|
272
254
|
)
|
273
|
-
|
274
|
-
|
275
|
-
|
255
|
+
messages = self.generate_messages(question, source_documents)
|
256
|
+
|
257
|
+
llm_helper = LLMHelper()
|
258
|
+
|
259
|
+
response = llm_helper.get_chat_completion(
|
260
|
+
messages, model=model, temperature=0
|
261
|
+
)
|
262
|
+
clean_answer = self.format_answer_from_response(
|
263
|
+
response, question, source_documents
|
264
|
+
)
|
265
|
+
logger.info("Cleaned answer generated successfully.")
|
266
|
+
logger.debug(f"Answer: {clean_answer.answer}")
|
267
|
+
return clean_answer
|
276
268
|
|
269
|
+
@logger.trace_function(log_args=False, log_result=log_result)
|
277
270
|
def create_image_url_list(self, source_documents):
|
278
271
|
"""
|
279
272
|
Create a list of image URLs from the source documents.
|
@@ -284,23 +277,23 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
284
277
|
Returns:
|
285
278
|
list[str]: The list of image URLs.
|
286
279
|
"""
|
287
|
-
|
288
|
-
image_types = self.config.get_advanced_image_processing_image_types()
|
280
|
+
image_types = self.config.get_advanced_image_processing_image_types()
|
289
281
|
|
290
|
-
|
291
|
-
|
282
|
+
blob_client = AzureBlobStorageClient()
|
283
|
+
container_sas = blob_client.get_container_sas()
|
292
284
|
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
285
|
+
image_urls = [
|
286
|
+
doc.source.replace("_SAS_TOKEN_PLACEHOLDER_", container_sas)
|
287
|
+
for doc in source_documents
|
288
|
+
if doc.title is not None and doc.title.split(".")[-1] in image_types
|
289
|
+
][: self.env_helper.ADVANCED_IMAGE_PROCESSING_MAX_IMAGES]
|
298
290
|
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
291
|
+
logger.info(
|
292
|
+
f"Generated {len(image_urls)} image URLs for {len(source_documents)} source documents."
|
293
|
+
)
|
294
|
+
return image_urls
|
303
295
|
|
296
|
+
@logger.trace_function(log_args=False, log_result=False)
|
304
297
|
def format_answer_from_response(
|
305
298
|
self,
|
306
299
|
response: ChatCompletion,
|
@@ -318,26 +311,23 @@ class QuestionAnswerTool(AnsweringToolBase):
|
|
318
311
|
Returns:
|
319
312
|
Answer: The formatted answer.
|
320
313
|
"""
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
#
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
)
|
342
|
-
|
343
|
-
return clean_answer
|
314
|
+
answer = response.choices[0].message.content
|
315
|
+
logger.debug(f"Answer format_answer_from_response: {answer}")
|
316
|
+
|
317
|
+
# Append document citations to the answer
|
318
|
+
citations = "".join([
|
319
|
+
f"[doc{i+1}]"
|
320
|
+
for i in range(len(source_documents))
|
321
|
+
if f"[doc{i+1}]" not in answer
|
322
|
+
])
|
323
|
+
answer_with_citations = f"{answer} {citations}"
|
324
|
+
# Generate Answer Object
|
325
|
+
clean_answer = Answer(
|
326
|
+
question=question,
|
327
|
+
answer=answer_with_citations, # Use the answer with citations
|
328
|
+
source_documents=source_documents,
|
329
|
+
prompt_tokens=response.usage.prompt_tokens,
|
330
|
+
completion_tokens=response.usage.completion_tokens,
|
331
|
+
)
|
332
|
+
|
333
|
+
return clean_answer
|
@@ -3,51 +3,48 @@ from ..helpers.llm_helper import LLMHelper
|
|
3
3
|
from .answering_tool_base import AnsweringToolBase
|
4
4
|
from ..common.answer import Answer
|
5
5
|
|
6
|
-
from
|
7
|
-
from
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
logger = getLogger("__main__")
|
12
|
-
# tracer = trace.get_tracer("__main__" + ".base_package")
|
13
|
-
tracer = trace.get_tracer("__main__")
|
6
|
+
from ...utilities.helpers.env_helper import EnvHelper
|
7
|
+
from logging_config import logger
|
8
|
+
env_helper: EnvHelper = EnvHelper()
|
9
|
+
log_args = env_helper.LOG_ARGS
|
10
|
+
log_result = env_helper.LOG_RESULT
|
14
11
|
|
15
12
|
|
16
13
|
class TextProcessingTool(AnsweringToolBase):
|
17
14
|
def __init__(self) -> None:
|
18
15
|
self.name = "TextProcessing"
|
19
16
|
|
17
|
+
@logger.trace_function(log_args=False, log_result=False)
|
20
18
|
def answer_question(self, question: str, chat_history: List[dict] = [], **kwargs):
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
19
|
+
logger.info(f"Answering question: {question}")
|
20
|
+
llm_helper = LLMHelper()
|
21
|
+
text = kwargs.get("text")
|
22
|
+
operation = kwargs.get("operation")
|
23
|
+
user_content = (
|
24
|
+
f"{operation} the following TEXT: {text}"
|
25
|
+
if (text and operation)
|
26
|
+
else question
|
27
|
+
)
|
28
|
+
|
29
|
+
system_message = """You are an AI assistant for the user."""
|
30
|
+
|
31
|
+
try:
|
32
|
+
result = llm_helper.get_chat_completion(
|
33
|
+
[
|
34
|
+
{"role": "system", "content": system_message},
|
35
|
+
{"role": "user", "content": user_content},
|
36
|
+
]
|
30
37
|
)
|
31
38
|
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
answer=result.choices[0].message.content,
|
45
|
-
source_documents=[],
|
46
|
-
prompt_tokens=result.usage.prompt_tokens,
|
47
|
-
completion_tokens=result.usage.completion_tokens,
|
48
|
-
)
|
49
|
-
logger.info(f"Answer generated successfully.")
|
50
|
-
return answer
|
51
|
-
except Exception as e:
|
52
|
-
logger.error(f"Error during get_chat_completion: {e}", exc_info=True)
|
53
|
-
raise
|
39
|
+
answer = Answer(
|
40
|
+
question=question,
|
41
|
+
answer=result.choices[0].message.content,
|
42
|
+
source_documents=[],
|
43
|
+
prompt_tokens=result.usage.prompt_tokens,
|
44
|
+
completion_tokens=result.usage.completion_tokens,
|
45
|
+
)
|
46
|
+
logger.info(f"Answer generated successfully.")
|
47
|
+
return answer
|
48
|
+
except Exception as e:
|
49
|
+
logger.error(f"Error during get_chat_completion: {e}", exc_info=True)
|
50
|
+
raise
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import os
|
2
|
+
from azpaddypy.mgmt.logging import create_app_logger
|
3
|
+
|
4
|
+
application_insights_connection_string=os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
|
5
|
+
|
6
|
+
# Create a single instance of the logger
|
7
|
+
logger = create_app_logger(
|
8
|
+
connection_string=application_insights_connection_string,
|
9
|
+
service_name=__name__,
|
10
|
+
service_version="1.0.0",
|
11
|
+
enable_console_logging=True,
|
12
|
+
)
|
13
|
+
|
14
|
+
# Export the logger instance
|
15
|
+
__all__ = ['logger']
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: cwyodmodules
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.33
|
4
4
|
Summary: Add your description here
|
5
5
|
Author-email: Patrik <patrikhartl@gmail.com>
|
6
6
|
Classifier: Operating System :: OS Independent
|
@@ -40,6 +40,7 @@ Requires-Dist: azure-search-documents==11.6.0b4
|
|
40
40
|
Requires-Dist: semantic-kernel==1.3.0
|
41
41
|
Requires-Dist: pydantic==2.7.4
|
42
42
|
Requires-Dist: pandas>=2.2.3
|
43
|
+
Requires-Dist: azpaddypy>=0.2.4
|
43
44
|
Dynamic: license-file
|
44
45
|
|
45
46
|
# paddypy
|