typeagent-py 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- typeagent/aitools/auth.py +61 -0
- typeagent/aitools/embeddings.py +232 -0
- typeagent/aitools/utils.py +244 -0
- typeagent/aitools/vectorbase.py +175 -0
- typeagent/knowpro/answer_context_schema.py +49 -0
- typeagent/knowpro/answer_response_schema.py +34 -0
- typeagent/knowpro/answers.py +577 -0
- typeagent/knowpro/collections.py +759 -0
- typeagent/knowpro/common.py +9 -0
- typeagent/knowpro/convknowledge.py +112 -0
- typeagent/knowpro/convsettings.py +94 -0
- typeagent/knowpro/convutils.py +49 -0
- typeagent/knowpro/date_time_schema.py +32 -0
- typeagent/knowpro/field_helpers.py +87 -0
- typeagent/knowpro/fuzzyindex.py +144 -0
- typeagent/knowpro/interfaces.py +818 -0
- typeagent/knowpro/knowledge.py +88 -0
- typeagent/knowpro/kplib.py +125 -0
- typeagent/knowpro/query.py +1128 -0
- typeagent/knowpro/search.py +628 -0
- typeagent/knowpro/search_query_schema.py +165 -0
- typeagent/knowpro/searchlang.py +729 -0
- typeagent/knowpro/searchlib.py +345 -0
- typeagent/knowpro/secindex.py +100 -0
- typeagent/knowpro/serialization.py +390 -0
- typeagent/knowpro/textlocindex.py +179 -0
- typeagent/knowpro/utils.py +17 -0
- typeagent/mcp/server.py +139 -0
- typeagent/podcasts/podcast.py +473 -0
- typeagent/podcasts/podcast_import.py +105 -0
- typeagent/storage/__init__.py +25 -0
- typeagent/storage/memory/__init__.py +13 -0
- typeagent/storage/memory/collections.py +68 -0
- typeagent/storage/memory/convthreads.py +81 -0
- typeagent/storage/memory/messageindex.py +178 -0
- typeagent/storage/memory/propindex.py +289 -0
- typeagent/storage/memory/provider.py +84 -0
- typeagent/storage/memory/reltermsindex.py +318 -0
- typeagent/storage/memory/semrefindex.py +660 -0
- typeagent/storage/memory/timestampindex.py +176 -0
- typeagent/storage/sqlite/__init__.py +31 -0
- typeagent/storage/sqlite/collections.py +362 -0
- typeagent/storage/sqlite/messageindex.py +382 -0
- typeagent/storage/sqlite/propindex.py +119 -0
- typeagent/storage/sqlite/provider.py +293 -0
- typeagent/storage/sqlite/reltermsindex.py +328 -0
- typeagent/storage/sqlite/schema.py +248 -0
- typeagent/storage/sqlite/semrefindex.py +156 -0
- typeagent/storage/sqlite/timestampindex.py +146 -0
- typeagent/storage/utils.py +41 -0
- typeagent_py-0.1.0.dist-info/METADATA +28 -0
- typeagent_py-0.1.0.dist-info/RECORD +55 -0
- typeagent_py-0.1.0.dist-info/WHEEL +5 -0
- typeagent_py-0.1.0.dist-info/licenses/LICENSE +21 -0
- typeagent_py-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,577 @@
|
|
1
|
+
# Copyright (c) Microsoft Corporation.
|
2
|
+
# Licensed under the MIT License.
|
3
|
+
|
4
|
+
from collections.abc import Iterable, Iterator
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from typing import Any
|
7
|
+
|
8
|
+
import black
|
9
|
+
import typechat
|
10
|
+
|
11
|
+
from .answer_context_schema import AnswerContext, RelevantKnowledge, RelevantMessage
|
12
|
+
from .answer_response_schema import AnswerResponse
|
13
|
+
from .collections import Scored, get_top_k
|
14
|
+
from .interfaces import (
|
15
|
+
DateRange,
|
16
|
+
Datetime,
|
17
|
+
IConversation,
|
18
|
+
IMessage,
|
19
|
+
IMessageCollection,
|
20
|
+
ISemanticRefCollection,
|
21
|
+
ITermToSemanticRefIndex,
|
22
|
+
Knowledge,
|
23
|
+
KnowledgeType,
|
24
|
+
IMessageMetadata,
|
25
|
+
MessageOrdinal,
|
26
|
+
ScoredMessageOrdinal,
|
27
|
+
ScoredSemanticRefOrdinal,
|
28
|
+
SemanticRef,
|
29
|
+
SemanticRefSearchResult,
|
30
|
+
TextLocation,
|
31
|
+
TextRange,
|
32
|
+
Topic,
|
33
|
+
)
|
34
|
+
from .kplib import ConcreteEntity, Facet
|
35
|
+
from .search import ConversationSearchResult
|
36
|
+
|
37
|
+
|
38
|
+
@dataclass
|
39
|
+
class AnswerContextOptions:
|
40
|
+
entities_top_k: int | None = None
|
41
|
+
topics_top_k: int | None = None
|
42
|
+
messages_top_k: int | None = None
|
43
|
+
chunking: bool | None = None
|
44
|
+
|
45
|
+
|
46
|
+
async def generate_answers(
|
47
|
+
translator: typechat.TypeChatJsonTranslator[AnswerResponse],
|
48
|
+
search_results: list[ConversationSearchResult],
|
49
|
+
conversation: IConversation,
|
50
|
+
orig_query_text: str,
|
51
|
+
options: AnswerContextOptions | None = None,
|
52
|
+
) -> tuple[list[AnswerResponse], AnswerResponse]: # (all answers, combined answer)
|
53
|
+
all_answers: list[AnswerResponse] = []
|
54
|
+
good_answers: list[str] = []
|
55
|
+
for i, search_result in enumerate(search_results):
|
56
|
+
for j, result in enumerate(search_results):
|
57
|
+
answer = await generate_answer(translator, result, conversation, options)
|
58
|
+
all_answers.append(answer)
|
59
|
+
match answer.type:
|
60
|
+
case "Answered":
|
61
|
+
assert answer.answer is not None, "Answered answer must not be None"
|
62
|
+
good = answer.answer.strip()
|
63
|
+
if good:
|
64
|
+
good_answers.append(good)
|
65
|
+
case "NoAnswer":
|
66
|
+
pass
|
67
|
+
case _:
|
68
|
+
assert False, f"Unexpected answer type: {answer.type}"
|
69
|
+
if len(all_answers) == 1:
|
70
|
+
return all_answers, all_answers[0]
|
71
|
+
combined_answer: AnswerResponse | None = None
|
72
|
+
if len(good_answers) >= 2:
|
73
|
+
combined_answer = await combine_answers(
|
74
|
+
translator, good_answers, orig_query_text
|
75
|
+
)
|
76
|
+
elif len(good_answers) == 1:
|
77
|
+
combined_answer = AnswerResponse(type="Answered", answer=good_answers[0])
|
78
|
+
else:
|
79
|
+
combined_answer = AnswerResponse(
|
80
|
+
type="NoAnswer", whyNoAnswer="No good answers found."
|
81
|
+
)
|
82
|
+
return all_answers, combined_answer
|
83
|
+
|
84
|
+
|
85
|
+
async def generate_answer[TMessage: IMessage, TIndex: ITermToSemanticRefIndex](
|
86
|
+
translator: typechat.TypeChatJsonTranslator[AnswerResponse],
|
87
|
+
search_result: ConversationSearchResult,
|
88
|
+
conversation: IConversation[TMessage, TIndex],
|
89
|
+
options: AnswerContextOptions | None = None,
|
90
|
+
) -> AnswerResponse:
|
91
|
+
assert search_result.raw_query_text is not None, "Raw query text must not be None"
|
92
|
+
context = await make_context(search_result, conversation, options)
|
93
|
+
request = f"{create_question_prompt(search_result.raw_query_text)}\n\n{create_context_prompt(context)}"
|
94
|
+
# print("+" * 80)
|
95
|
+
# print(request)
|
96
|
+
# print("+" * 80)
|
97
|
+
result = await translator.translate(request)
|
98
|
+
if isinstance(result, typechat.Failure):
|
99
|
+
return AnswerResponse(
|
100
|
+
type="NoAnswer",
|
101
|
+
answer=None,
|
102
|
+
whyNoAnswer=f"TypeChat failure: {result.message}",
|
103
|
+
)
|
104
|
+
else:
|
105
|
+
return result.value
|
106
|
+
|
107
|
+
|
108
|
+
def create_question_prompt(question: str) -> str:
|
109
|
+
prompt = [
|
110
|
+
"The following is a user question:",
|
111
|
+
"===",
|
112
|
+
question,
|
113
|
+
"===",
|
114
|
+
"- The included [ANSWER CONTEXT] contains information that MAY be relevant to answering the question.",
|
115
|
+
"- Answer the user question PRECISELY using ONLY relevant topics, entities, actions, messages and time ranges/timestamps found in [ANSWER CONTEXT].",
|
116
|
+
"- Return 'NoAnswer' if unsure or if the topics and entity names/types in the question are not in [ANSWER CONTEXT].",
|
117
|
+
"- Use the 'name', 'type' and 'facets' properties of the provided JSON entities to identify those highly relevant to answering the question.",
|
118
|
+
"- When asked for lists, ensure the the list contents answer the question and nothing else.",
|
119
|
+
"E.g. for the question 'List all books': List only the books in [ANSWER CONTEXT].",
|
120
|
+
"- Use direct quotes only when needed or asked. Otherwise answer in your own words.",
|
121
|
+
"- Your answer is readable and complete, with appropriate formatting: line breaks, numbered lists, bullet points etc.",
|
122
|
+
]
|
123
|
+
return "\n".join(prompt)
|
124
|
+
|
125
|
+
|
126
|
+
def create_context_prompt(context: AnswerContext) -> str:
|
127
|
+
# TODO: Use a more compact representation of the context than JSON.
|
128
|
+
prompt = [
|
129
|
+
"[ANSWER CONTEXT]",
|
130
|
+
"===",
|
131
|
+
black.format_str(str(dictify(context)), mode=black.FileMode(line_length=200)),
|
132
|
+
"===",
|
133
|
+
]
|
134
|
+
return "\n".join(prompt)
|
135
|
+
|
136
|
+
|
137
|
+
def dictify(object: object) -> Any:
|
138
|
+
"""Convert an object to a dictionary, recursively."""
|
139
|
+
# NOTE: Can't use dataclasses.asdict() because not every object is a dataclass.
|
140
|
+
if ann := getattr(object.__class__, "__annotations__", None):
|
141
|
+
return {
|
142
|
+
k: dictify(v) for k in ann if (v := getattr(object, k, None)) is not None
|
143
|
+
}
|
144
|
+
elif isinstance(object, dict):
|
145
|
+
return {k: dictify(v) for k, v in object.items() if v is not None}
|
146
|
+
elif isinstance(object, list):
|
147
|
+
return [dictify(item) for item in object]
|
148
|
+
elif hasattr(object, "__dict__"):
|
149
|
+
return {
|
150
|
+
k: dictify(v) for k, v in object.__dict__.items() if v is not None
|
151
|
+
} # if not k.startswith("_")
|
152
|
+
else:
|
153
|
+
if isinstance(object, float) and object.is_integer():
|
154
|
+
return int(object)
|
155
|
+
else:
|
156
|
+
return object
|
157
|
+
|
158
|
+
|
159
|
+
async def make_context[TMessage: IMessage, TIndex: ITermToSemanticRefIndex](
|
160
|
+
search_result: ConversationSearchResult,
|
161
|
+
conversation: IConversation[TMessage, TIndex],
|
162
|
+
options: AnswerContextOptions | None = None,
|
163
|
+
) -> AnswerContext:
|
164
|
+
context = AnswerContext([], [], [])
|
165
|
+
|
166
|
+
if search_result.message_matches:
|
167
|
+
context.messages = await get_relevant_messages_for_answer(
|
168
|
+
conversation,
|
169
|
+
search_result.message_matches,
|
170
|
+
options and options.messages_top_k,
|
171
|
+
)
|
172
|
+
|
173
|
+
for knowledge_type, knowledge in search_result.knowledge_matches.items():
|
174
|
+
match knowledge_type:
|
175
|
+
case "entity":
|
176
|
+
context.entities = await get_relevant_entities_for_answer(
|
177
|
+
conversation,
|
178
|
+
knowledge,
|
179
|
+
options and options.entities_top_k,
|
180
|
+
)
|
181
|
+
case "topic":
|
182
|
+
context.topics = await get_relevant_topics_for_answer(
|
183
|
+
conversation,
|
184
|
+
knowledge,
|
185
|
+
options and options.topics_top_k,
|
186
|
+
)
|
187
|
+
case _:
|
188
|
+
pass # TODO: Actions and tags (once we support them)?
|
189
|
+
|
190
|
+
return context
|
191
|
+
|
192
|
+
|
193
|
+
type MergedFacets = dict[str, list[str]]
|
194
|
+
|
195
|
+
|
196
|
+
# NOT a dataclass -- an optional merge-in attribute for MergedEntity etc.
|
197
|
+
class MergedKnowledge:
|
198
|
+
source_message_ordinals: set[MessageOrdinal] | None = None
|
199
|
+
|
200
|
+
|
201
|
+
@dataclass
|
202
|
+
class MergedTopic(MergedKnowledge):
|
203
|
+
topic: Topic
|
204
|
+
|
205
|
+
|
206
|
+
@dataclass
|
207
|
+
class MergedEntity(MergedKnowledge):
|
208
|
+
name: str
|
209
|
+
type: list[str]
|
210
|
+
facets: MergedFacets | None = None
|
211
|
+
|
212
|
+
|
213
|
+
async def get_relevant_messages_for_answer[
|
214
|
+
TMessage: IMessage, TIndex: ITermToSemanticRefIndex
|
215
|
+
](
|
216
|
+
conversation: IConversation[TMessage, TIndex],
|
217
|
+
message_matches: list[ScoredMessageOrdinal],
|
218
|
+
top_k: int | None = None,
|
219
|
+
) -> list[RelevantMessage]:
|
220
|
+
relevant_messages = []
|
221
|
+
|
222
|
+
for scored_msg_ord in message_matches:
|
223
|
+
msg = await conversation.messages.get_item(scored_msg_ord.message_ordinal)
|
224
|
+
if not msg.text_chunks:
|
225
|
+
continue
|
226
|
+
metadata: IMessageMetadata | None = msg.metadata
|
227
|
+
assert metadata is not None # For type checkers
|
228
|
+
relevant_messages.append(
|
229
|
+
RelevantMessage(
|
230
|
+
from_=metadata.source,
|
231
|
+
to=metadata.dest,
|
232
|
+
timestamp=msg.timestamp,
|
233
|
+
messageText=(
|
234
|
+
msg.text_chunks[0] if len(msg.text_chunks) == 1 else msg.text_chunks
|
235
|
+
),
|
236
|
+
)
|
237
|
+
)
|
238
|
+
if top_k and len(relevant_messages) >= top_k:
|
239
|
+
break
|
240
|
+
|
241
|
+
return relevant_messages
|
242
|
+
|
243
|
+
|
244
|
+
async def get_relevant_topics_for_answer(
|
245
|
+
conversation: IConversation,
|
246
|
+
search_result: SemanticRefSearchResult,
|
247
|
+
top_k: int | None = None,
|
248
|
+
) -> list[RelevantKnowledge]:
|
249
|
+
assert conversation.semantic_refs is not None, "Semantic refs must not be None"
|
250
|
+
scored_topics: Iterable[Scored[SemanticRef]] = (
|
251
|
+
await get_scored_semantic_refs_from_ordinals_iter(
|
252
|
+
conversation.semantic_refs,
|
253
|
+
search_result.semantic_ref_matches,
|
254
|
+
"topic",
|
255
|
+
)
|
256
|
+
)
|
257
|
+
merged_topics = merge_scored_topics(scored_topics, True)
|
258
|
+
candidate_topics: Iterable[Scored[MergedTopic]] = merged_topics.values()
|
259
|
+
if top_k and len(merged_topics) > top_k:
|
260
|
+
candidate_topics = get_top_k(candidate_topics, top_k)
|
261
|
+
|
262
|
+
relevant_topics: list[RelevantKnowledge] = []
|
263
|
+
|
264
|
+
for scored_value in candidate_topics:
|
265
|
+
merged_topic = scored_value.item
|
266
|
+
relevant_topics.append(
|
267
|
+
await create_relevant_knowledge(
|
268
|
+
conversation,
|
269
|
+
merged_topic.topic,
|
270
|
+
merged_topic.source_message_ordinals,
|
271
|
+
)
|
272
|
+
)
|
273
|
+
|
274
|
+
return relevant_topics
|
275
|
+
|
276
|
+
|
277
|
+
def merge_scored_topics(
|
278
|
+
scored_topics: Iterable[Scored[SemanticRef]],
|
279
|
+
merge_ordinals: bool,
|
280
|
+
) -> dict[str, Scored[MergedTopic]]:
|
281
|
+
merged_topics: dict[str, Scored[MergedTopic]] = {}
|
282
|
+
|
283
|
+
for scored_topic in scored_topics:
|
284
|
+
assert isinstance(scored_topic.item.knowledge, Topic)
|
285
|
+
topic = scored_topic.item.knowledge
|
286
|
+
existing = merged_topics.get(topic.text)
|
287
|
+
if existing is not None:
|
288
|
+
assert existing.item.topic.text == topic.text
|
289
|
+
# Merge scores.
|
290
|
+
if existing.score < scored_topic.score:
|
291
|
+
existing.score = scored_topic.score
|
292
|
+
else:
|
293
|
+
existing = Scored(
|
294
|
+
item=MergedTopic(topic=topic),
|
295
|
+
score=scored_topic.score,
|
296
|
+
)
|
297
|
+
merged_topics[topic.text] = existing
|
298
|
+
if merge_ordinals:
|
299
|
+
merge_message_ordinals(existing.item, scored_topic.item)
|
300
|
+
|
301
|
+
return merged_topics
|
302
|
+
|
303
|
+
|
304
|
+
async def get_relevant_entities_for_answer(
|
305
|
+
conversation: IConversation,
|
306
|
+
search_result: SemanticRefSearchResult,
|
307
|
+
top_k: int | None = None,
|
308
|
+
) -> list[RelevantKnowledge]:
|
309
|
+
assert conversation.semantic_refs is not None, "Semantic refs must not be None"
|
310
|
+
merged_entities = merge_scored_concrete_entities(
|
311
|
+
await get_scored_semantic_refs_from_ordinals_iter(
|
312
|
+
conversation.semantic_refs,
|
313
|
+
search_result.semantic_ref_matches,
|
314
|
+
"entity",
|
315
|
+
),
|
316
|
+
merge_ordinals=True,
|
317
|
+
)
|
318
|
+
candidate_entities = merged_entities.values()
|
319
|
+
if top_k and len(merged_entities) > top_k:
|
320
|
+
candidate_entities = get_top_k(candidate_entities, top_k)
|
321
|
+
|
322
|
+
relevant_entities: list[RelevantKnowledge] = []
|
323
|
+
|
324
|
+
for scored_value in candidate_entities:
|
325
|
+
merged_entity = scored_value.item
|
326
|
+
relevane_entity = await create_relevant_knowledge(
|
327
|
+
conversation,
|
328
|
+
merged_to_concrete_entity(merged_entity),
|
329
|
+
merged_entity.source_message_ordinals,
|
330
|
+
)
|
331
|
+
relevant_entities.append(relevane_entity)
|
332
|
+
|
333
|
+
return relevant_entities
|
334
|
+
|
335
|
+
|
336
|
+
async def create_relevant_knowledge(
|
337
|
+
conversation: IConversation,
|
338
|
+
knowledge: Knowledge,
|
339
|
+
source_message_ordinals: set[MessageOrdinal] | None = None,
|
340
|
+
) -> RelevantKnowledge:
|
341
|
+
relevant_knowledge = RelevantKnowledge(knowledge)
|
342
|
+
|
343
|
+
if source_message_ordinals:
|
344
|
+
relevant_knowledge.time_range = await get_enclosing_data_range_for_messages(
|
345
|
+
conversation.messages, source_message_ordinals
|
346
|
+
)
|
347
|
+
meta = await get_enclosing_metadata_for_messages(
|
348
|
+
conversation.messages, source_message_ordinals
|
349
|
+
)
|
350
|
+
if meta.source:
|
351
|
+
relevant_knowledge.origin = meta.source
|
352
|
+
if meta.dest:
|
353
|
+
relevant_knowledge.audience = meta.dest
|
354
|
+
|
355
|
+
return relevant_knowledge
|
356
|
+
|
357
|
+
|
358
|
+
async def get_enclosing_data_range_for_messages(
|
359
|
+
messages: IMessageCollection,
|
360
|
+
message_ordinals: Iterable[MessageOrdinal],
|
361
|
+
) -> DateRange | None:
|
362
|
+
text_range = get_enclosing_text_range(message_ordinals)
|
363
|
+
if not text_range:
|
364
|
+
return None
|
365
|
+
return await get_enclosing_date_range_for_text_range(messages, text_range)
|
366
|
+
|
367
|
+
|
368
|
+
def get_enclosing_text_range(
|
369
|
+
message_ordinals: Iterable[MessageOrdinal],
|
370
|
+
) -> TextRange | None:
|
371
|
+
start: MessageOrdinal | None = None
|
372
|
+
end: MessageOrdinal | None = start
|
373
|
+
for ordinal in message_ordinals:
|
374
|
+
if start is None or ordinal < start:
|
375
|
+
start = ordinal
|
376
|
+
if end is None or ordinal > end:
|
377
|
+
end = ordinal
|
378
|
+
if start is None or end is None:
|
379
|
+
return None
|
380
|
+
return text_range_from_message_range(start, end)
|
381
|
+
|
382
|
+
|
383
|
+
def text_range_from_message_range(
|
384
|
+
start: MessageOrdinal, end: MessageOrdinal
|
385
|
+
) -> TextRange | None:
|
386
|
+
if start == end:
|
387
|
+
# Point location
|
388
|
+
return TextRange(start=TextLocation(start))
|
389
|
+
elif start < end:
|
390
|
+
return TextRange(
|
391
|
+
start=TextLocation(start),
|
392
|
+
end=TextLocation(end),
|
393
|
+
)
|
394
|
+
else:
|
395
|
+
raise ValueError(f"Expect message ordinal range: {start} <= {end}")
|
396
|
+
|
397
|
+
|
398
|
+
async def get_enclosing_date_range_for_text_range(
|
399
|
+
messages: IMessageCollection,
|
400
|
+
range: TextRange,
|
401
|
+
) -> DateRange | None:
|
402
|
+
start_timestamp = (await messages.get_item(range.start.message_ordinal)).timestamp
|
403
|
+
if not start_timestamp:
|
404
|
+
return None
|
405
|
+
end_timestamp = (
|
406
|
+
(await messages.get_item(range.end.message_ordinal)).timestamp
|
407
|
+
if range.end
|
408
|
+
else None
|
409
|
+
)
|
410
|
+
return DateRange(
|
411
|
+
start=Datetime.fromisoformat(start_timestamp),
|
412
|
+
end=Datetime.fromisoformat(end_timestamp) if end_timestamp else None,
|
413
|
+
)
|
414
|
+
|
415
|
+
|
416
|
+
@dataclass
|
417
|
+
class MessageMetadata(IMessageMetadata):
|
418
|
+
source: str | list[str] | None = None
|
419
|
+
dest: str | list[str] | None = None
|
420
|
+
|
421
|
+
|
422
|
+
async def get_enclosing_metadata_for_messages(
|
423
|
+
messages: IMessageCollection,
|
424
|
+
message_ordinals: Iterable[MessageOrdinal],
|
425
|
+
) -> IMessageMetadata:
|
426
|
+
source: set[str] = set()
|
427
|
+
dest: set[str] = set()
|
428
|
+
|
429
|
+
def collect(s: set[str], value: str | list[str] | None) -> None:
|
430
|
+
if isinstance(value, str):
|
431
|
+
s.add(value)
|
432
|
+
elif isinstance(value, list):
|
433
|
+
s.update(value)
|
434
|
+
|
435
|
+
for ordinal in message_ordinals:
|
436
|
+
metadata = (await messages.get_item(ordinal)).metadata
|
437
|
+
if not metadata:
|
438
|
+
continue
|
439
|
+
collect(source, metadata.source)
|
440
|
+
collect(dest, metadata.dest)
|
441
|
+
|
442
|
+
return MessageMetadata(
|
443
|
+
source=list(source) if source else None, dest=list(dest) if dest else None
|
444
|
+
)
|
445
|
+
|
446
|
+
|
447
|
+
async def get_scored_semantic_refs_from_ordinals_iter(
|
448
|
+
semantic_refs: ISemanticRefCollection,
|
449
|
+
semantic_ref_matches: list[ScoredSemanticRefOrdinal],
|
450
|
+
knowledge_type: KnowledgeType,
|
451
|
+
) -> list[Scored[SemanticRef]]:
|
452
|
+
result = []
|
453
|
+
for semantic_ref_match in semantic_ref_matches:
|
454
|
+
semantic_ref = await semantic_refs.get_item(
|
455
|
+
semantic_ref_match.semantic_ref_ordinal
|
456
|
+
)
|
457
|
+
if semantic_ref.knowledge.knowledge_type == knowledge_type:
|
458
|
+
result.append(
|
459
|
+
Scored(
|
460
|
+
item=semantic_ref,
|
461
|
+
score=semantic_ref_match.score,
|
462
|
+
)
|
463
|
+
)
|
464
|
+
return result
|
465
|
+
|
466
|
+
|
467
|
+
def merge_scored_concrete_entities(
|
468
|
+
scored_entities: Iterable[Scored[SemanticRef]],
|
469
|
+
merge_ordinals: bool,
|
470
|
+
) -> dict[str, Scored[MergedEntity]]:
|
471
|
+
merged_entities: dict[str, Scored[MergedEntity]] = {}
|
472
|
+
|
473
|
+
for scored_entity in scored_entities:
|
474
|
+
assert isinstance(scored_entity.item.knowledge, ConcreteEntity)
|
475
|
+
merged_entity = concrete_to_merged_entity(
|
476
|
+
scored_entity.item.knowledge,
|
477
|
+
)
|
478
|
+
existing = merged_entities.get(merged_entity.name)
|
479
|
+
if existing is not None:
|
480
|
+
assert existing.item.name == merged_entity.name
|
481
|
+
# Merge type list.
|
482
|
+
if not existing.item.type:
|
483
|
+
existing.item.type = merged_entity.type
|
484
|
+
elif merged_entity.type:
|
485
|
+
existing.item.type = sorted(
|
486
|
+
set(existing.item.type) | set(merged_entity.type)
|
487
|
+
)
|
488
|
+
# Merge facet dicts.
|
489
|
+
if not existing.item.facets:
|
490
|
+
existing.item.facets = merged_entity.facets
|
491
|
+
elif merged_entity.facets:
|
492
|
+
for name, value in merged_entity.facets.items():
|
493
|
+
existing.item.facets.setdefault(name, []).extend(value)
|
494
|
+
# Merge scores.
|
495
|
+
if existing.score < scored_entity.score:
|
496
|
+
existing.score = scored_entity.score
|
497
|
+
else:
|
498
|
+
existing = Scored(
|
499
|
+
item=merged_entity,
|
500
|
+
score=scored_entity.score,
|
501
|
+
)
|
502
|
+
merged_entities[merged_entity.name] = existing
|
503
|
+
if existing and merge_ordinals:
|
504
|
+
merge_message_ordinals(existing.item, scored_entity.item)
|
505
|
+
|
506
|
+
return merged_entities
|
507
|
+
|
508
|
+
|
509
|
+
def merge_message_ordinals(merged_entity: MergedKnowledge, sr: SemanticRef) -> None:
|
510
|
+
if merged_entity.source_message_ordinals is None:
|
511
|
+
merged_entity.source_message_ordinals = set()
|
512
|
+
merged_entity.source_message_ordinals.add(sr.range.start.message_ordinal)
|
513
|
+
|
514
|
+
|
515
|
+
def concrete_to_merged_entity(
|
516
|
+
entity: ConcreteEntity,
|
517
|
+
) -> MergedEntity:
|
518
|
+
return MergedEntity(
|
519
|
+
name=entity.name.lower(),
|
520
|
+
type=sorted(tp.lower() for tp in entity.type),
|
521
|
+
facets=facets_to_merged_facets(entity.facets) if entity.facets else None,
|
522
|
+
)
|
523
|
+
|
524
|
+
|
525
|
+
def merged_to_concrete_entity(merged_entity: MergedEntity) -> ConcreteEntity:
|
526
|
+
entity = ConcreteEntity(name=merged_entity.name, type=merged_entity.type)
|
527
|
+
if merged_entity.facets:
|
528
|
+
entity.facets = merged_facets_to_facets(merged_entity.facets)
|
529
|
+
return entity
|
530
|
+
|
531
|
+
|
532
|
+
def facets_to_merged_facets(facets: list[Facet]) -> MergedFacets:
|
533
|
+
merged_facets: MergedFacets = {}
|
534
|
+
for facet in facets:
|
535
|
+
name = facet.name.lower()
|
536
|
+
value = str(facet).lower()
|
537
|
+
merged_facets.setdefault(name, []).append(value)
|
538
|
+
return merged_facets
|
539
|
+
|
540
|
+
|
541
|
+
def merged_facets_to_facets(merged_facets: MergedFacets) -> list[Facet]:
|
542
|
+
facets: list[Facet] = []
|
543
|
+
for facet_name, facet_values in merged_facets.items():
|
544
|
+
if facet_values:
|
545
|
+
facets.append(Facet(name=facet_name, value="; ".join(facet_values)))
|
546
|
+
return facets
|
547
|
+
|
548
|
+
|
549
|
+
async def combine_answers(
|
550
|
+
translator: typechat.TypeChatJsonTranslator[AnswerResponse],
|
551
|
+
answers: list[str],
|
552
|
+
original_query_text: str,
|
553
|
+
) -> AnswerResponse:
|
554
|
+
"""Combine multiple answers into a single answer."""
|
555
|
+
if not answers:
|
556
|
+
return AnswerResponse(type="NoAnswer", whyNoAnswer="No answers provided.")
|
557
|
+
if len(answers) == 1:
|
558
|
+
return AnswerResponse(type="Answered", answer=answers[0])
|
559
|
+
request_parts = [
|
560
|
+
"The following are multiple partial answers to the same question.",
|
561
|
+
"Combine the partial answers into a single answer to the original question.",
|
562
|
+
"Don't just concatenate the answers, but blend them into a single accurate and precise answer.",
|
563
|
+
"",
|
564
|
+
"*** Original Question ***",
|
565
|
+
original_query_text,
|
566
|
+
"*** Partial answers ***",
|
567
|
+
"===",
|
568
|
+
]
|
569
|
+
for answer in answers:
|
570
|
+
request_parts.append(answer.strip())
|
571
|
+
request_parts.append("===")
|
572
|
+
request = "\n".join(request_parts)
|
573
|
+
result = await translator.translate(request)
|
574
|
+
if isinstance(result, typechat.Failure):
|
575
|
+
return AnswerResponse(type="NoAnswer", whyNoAnswer=result.message)
|
576
|
+
else:
|
577
|
+
return result.value
|