kolzchut-ragbot 1.4.1__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kolzchut_ragbot/engine.py CHANGED
@@ -1,246 +1,254 @@
1
- import time
2
- from collections import defaultdict
3
- from datetime import datetime
4
- from .llm_client import LLMClient
5
- from . import config
6
- from .model import es_client_factory
7
- from .Document import factory
8
- from sentence_transformers import SentenceTransformer
9
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
10
-
11
- import torch
12
- import os
13
-
14
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
15
- definitions = factory()
16
-
17
-
18
- class Engine:
19
- """
20
- Engine class for handling document search and retrieval using Elasticsearch and LLMs.
21
-
22
- Attributes:
23
- llms_client (LLMClient): The LLM client instance.
24
- elastic_model (Model): The Elasticsearch model instance.
25
- models (dict): A dictionary of SentenceTransformer models.
26
- reranker_tokenizer (AutoTokenizer): The tokenizer for the reranker model.
27
- reranker_model (AutoModelForSequenceClassification): The reranker model.
28
- identifier_field (str): The identifier field for documents.
29
-
30
- Methods:
31
- rerank_with_me5(query, documents, k=5):
32
- Reranks documents based on the query using the reranker model.
33
-
34
- update_docs(list_of_docs, embed_only_fields=None, delete_existing=False):
35
- Updates or creates documents in the Elasticsearch index.
36
-
37
- reciprocal_rank_fusion(ranking_lists, k=60, weights=None):
38
- Performs Reciprocal Rank Fusion on a list of ranking lists.
39
-
40
- search_documents(query, top_k):
41
- Searches for documents based on the query and returns the top_k results.
42
-
43
- answer_query(query, top_k, model):
44
- Answers a query using the top_k documents and the specified model.
45
- """
46
-
47
- def __init__(self, llms_client: LLMClient, elastic_model=None, models=None, reranker_tokenizer=None,
48
- reranker_model=None, es_client=None):
49
- """
50
- Initializes the Engine instance.
51
-
52
- Args:
53
- llms_client (LLMClient): The LLM client instance.
54
- elastic_model (Model, optional): The Elasticsearch model instance. Default is None.
55
- models (dict, optional): A dictionary of SentenceTransformer models. Default is None.
56
- reranker_tokenizer (AutoTokenizer, optional): The tokenizer for the reranker model. Default is None.
57
- reranker_model (AutoModelForSequenceClassification, optional): The reranker model. Default is None.
58
- es_client (optional): The Elasticsearch client instance. Default is None.
59
- """
60
- if elastic_model is None:
61
- self.elastic_model = es_client_factory(es_client)
62
- else:
63
- self.elastic_model = elastic_model
64
-
65
- self.llms_client = llms_client
66
-
67
- self.identifier_field = factory().identifier
68
-
69
- if models is None:
70
- self.models = {f"{model_name}": SentenceTransformer(config.MODELS_LOCATION + "/" + model_name).to(device)
71
- for model_name in definitions.models.keys()}
72
- else:
73
- self.models = models
74
- for model in self.models.values():
75
- model.eval()
76
-
77
- if reranker_tokenizer is None:
78
- self.reranker_tokenizer = AutoTokenizer.from_pretrained(os.getenv("TOKENIZER_LOCATION"))
79
- else:
80
- self.reranker_tokenizer = reranker_tokenizer
81
-
82
- if reranker_model is None:
83
- self.reranker_model = AutoModelForSequenceClassification.from_pretrained(os.getenv("TOKENIZER_LOCATION"))
84
- else:
85
- self.reranker_model = reranker_model
86
- self.reranker_model.eval()
87
-
88
-
89
- def rerank_with_me5(self, query, documents, k=5):
90
- """
91
- Reranks documents based on the query using the reranker model.
92
-
93
- Args:
94
- query (str): The query string.
95
- documents (list): A list of documents to be reranked.
96
- k (int, optional): The number of top documents to return. Default is 5.
97
-
98
- Returns:
99
- list: A list of top k reranked documents.
100
- """
101
- pairs = [(query, doc) for doc in set(documents)]
102
- inputs = self.reranker_tokenizer(pairs, return_tensors='pt', padding=True, truncation=True, max_length=512)
103
-
104
- # Make predictions
105
- with torch.no_grad():
106
- outputs = self.reranker_model(**inputs)
107
-
108
- scores = outputs.logits.squeeze()
109
-
110
- if scores.ndim > 1:
111
- scores = scores[:, 1] # Assuming binary classification and index 1 is the relevance score
112
-
113
- sorted_indices = torch.argsort(scores, descending=True)
114
- # If there is only one document, return it to avoid torch error
115
- if len(sorted_indices) == 1:
116
- return [pairs[0][1]]
117
- # Sort documents by their highest score
118
- sorted_docs = [pairs[i][1] for i in sorted_indices]
119
- return sorted_docs[:k]
120
-
121
- def update_docs(self, list_of_docs: list[dict], embed_only_fields=None, delete_existing=False):
122
- """
123
- Updates or creates documents in the Elasticsearch index.
124
-
125
- Args:
126
- list_of_docs (list[dict]): A list of dictionaries representing the documents to be indexed.
127
- embed_only_fields (list, optional): A list of fields to be embedded. Default is None.
128
- delete_existing (bool, optional): Whether to delete existing documents. Default is False.
129
- """
130
- embed_only_fields = embed_only_fields or definitions.models.values()
131
- for doc in list_of_docs:
132
- for semantic_model, field in definitions.models.items():
133
- if field in doc.keys() and field in embed_only_fields:
134
- content_vectors = self.models[semantic_model].encode(doc[field])
135
- doc[f'{field}_{semantic_model}_vectors'] = content_vectors
136
-
137
- doc['last_update'] = datetime.now()
138
- self.elastic_model.create_or_update_documents(list_of_docs, delete_existing)
139
-
140
- def reciprocal_rank_fusion(self, ranking_lists, k=60, weights=None):
141
- """
142
- Performs Reciprocal Rank Fusion on a list of ranking lists.
143
-
144
- Args:
145
- :param ranking_lists: List of ranking lists, where each ranking list is a list of documents returned by a model.
146
- :param k: The parameter for the reciprocal rank calculation (default is 60).
147
- :param: weights: Optional. Weights for each ranking list.
148
-
149
- Returns:
150
- list: A fused ranking list of documents.
151
- """
152
- scores = defaultdict(float)
153
-
154
- for list_index, rank_list in enumerate(ranking_lists):
155
- for rank, identifier in enumerate(rank_list):
156
- # Reciprocal rank score
157
- w = weights[list_index] if weights else 1
158
- scores[identifier] += w / (k + rank + 1)
159
-
160
- # Sort the documents by their cumulative scores in descending order
161
- fused_list = sorted(scores, key=scores.get, reverse=True)
162
-
163
- return fused_list
164
-
165
- def search_documents(self, query: str, top_k: int):
166
- """
167
- Searches for documents based on the query and returns the top_k results.
168
-
169
- Args:
170
- query (str): The query string.
171
- top_k (int): The number of top documents to return.
172
-
173
- Returns:
174
- list: A list of top k documents.
175
- """
176
- query_embeddings = {f"{semantic_model}": self.models[semantic_model].encode(query) for semantic_model in
177
- definitions.models.keys()}
178
- all_docs_by_model = self.elastic_model.search(query_embeddings)
179
- all_docs = []
180
- ids_for_fusion = []
181
- all_docs_and_scores = {}
182
-
183
- for key, values in all_docs_by_model.items():
184
- print(f"\nFound {len(values)} documents for model\n")
185
- model_ids = []
186
- scores_for_model = []
187
-
188
- for doc in values:
189
- model_ids.append(doc["_source"]["page_id"])
190
- all_docs.append(doc)
191
- scores_for_model.append({"doc": doc["_source"]["title"], "score": doc["_score"]})
192
- ids_for_fusion.append(model_ids)
193
- all_docs_and_scores[f'{key}'] = scores_for_model
194
- print(f"\nFusing {len(ids_for_fusion)} results\n")
195
- fused_ids = self.reciprocal_rank_fusion(ids_for_fusion, k=top_k)
196
- top_k_documents = []
197
- top_titles = []
198
-
199
- for fused_id in fused_ids:
200
- for doc in all_docs:
201
- if doc["_source"]["page_id"] == fused_id and doc["_source"]["title"] not in top_titles:
202
- top_k_documents.append(doc["_source"])
203
- top_titles.append(doc["_source"]["title"])
204
- break
205
- if len(top_titles) >= top_k:
206
- break
207
-
208
- return top_k_documents, all_docs_and_scores
209
-
210
- def answer_query(self, query, top_k: int, model):
211
- """
212
- Answers a query using the top_k documents and the specified model.
213
-
214
- Args:
215
- query (str): The query string.
216
- top_k (int): The number of top documents to use for answering the query.
217
- model: The model to use for answering the query.
218
-
219
- Returns:
220
- tuple: A tuple containing the top k documents, the answer, and the stats.
221
- """
222
- before_retrieval = time.perf_counter()
223
- top_k_documents, all_docs_and_scores = self.search_documents(query, top_k)
224
-
225
-
226
- retrieval_time = round(time.perf_counter() - before_retrieval, 4)
227
- print(f"retrieval time: {retrieval_time}")
228
-
229
- gpt_answer, gpt_elapsed, tokens = self.llms_client.answer(query, top_k_documents)
230
- stats = {
231
- "retrieval_time": retrieval_time,
232
- "gpt_model": model,
233
- "gpt_time": gpt_elapsed,
234
- "tokens": tokens
235
- }
236
- return top_k_documents, gpt_answer, stats, all_docs_and_scores
237
-
238
-
239
- engine = None
240
-
241
-
242
- def engine_factory(llms_client: LLMClient, es_client=None):
243
- global engine
244
- if engine is None:
245
- engine = Engine(llms_client=llms_client, es_client=es_client)
246
- return engine
1
+ import time
2
+ from collections import defaultdict
3
+ from datetime import datetime
4
+ from .llm_client import LLMClient
5
+ from . import config
6
+ from .model import es_client_factory
7
+ from .Document import factory
8
+ from sentence_transformers import SentenceTransformer
9
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
10
+
11
+ import torch
12
+ import os
13
+
14
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
15
+ definitions = factory()
16
+
17
+
18
+ class Engine:
19
+ """
20
+ Engine class for handling document search and retrieval using Elasticsearch and LLMs.
21
+
22
+ Attributes:
23
+ llms_client (LLMClient): The LLM client instance.
24
+ elastic_model (Model): The Elasticsearch model instance.
25
+ models (dict): A dictionary of SentenceTransformer models.
26
+ reranker_tokenizer (AutoTokenizer): The tokenizer for the reranker model.
27
+ reranker_model (AutoModelForSequenceClassification): The reranker model.
28
+ identifier_field (str): The identifier field for documents.
29
+
30
+ Methods:
31
+ rerank_with_me5(query, documents, k=5):
32
+ Reranks documents based on the query using the reranker model.
33
+
34
+ update_docs(list_of_docs, embed_only_fields=None, delete_existing=False):
35
+ Updates or creates documents in the Elasticsearch index.
36
+
37
+ reciprocal_rank_fusion(ranking_lists, k=60, weights=None):
38
+ Performs Reciprocal Rank Fusion on a list of ranking lists.
39
+
40
+ search_documents(query, top_k):
41
+ Searches for documents based on the query and returns the top_k results.
42
+
43
+ answer_query(query, top_k, model):
44
+ Answers a query using the top_k documents and the specified model.
45
+ """
46
+
47
+ def __init__(self, llms_client: LLMClient, elastic_model=None, models=None, reranker_tokenizer=None,
48
+ reranker_model=None, es_client=None):
49
+ """
50
+ Initializes the Engine instance.
51
+
52
+ Args:
53
+ llms_client (LLMClient): The LLM client instance.
54
+ elastic_model (Model, optional): The Elasticsearch model instance. Default is None.
55
+ models (dict, optional): A dictionary of SentenceTransformer models. Default is None.
56
+ reranker_tokenizer (AutoTokenizer, optional): The tokenizer for the reranker model. Default is None.
57
+ reranker_model (AutoModelForSequenceClassification, optional): The reranker model. Default is None.
58
+ es_client (optional): The Elasticsearch client instance. Default is None.
59
+ """
60
+ if elastic_model is None:
61
+ self.elastic_model = es_client_factory(es_client)
62
+ else:
63
+ self.elastic_model = elastic_model
64
+
65
+ self.llms_client = llms_client
66
+
67
+ self.identifier_field = factory().identifier
68
+
69
+ if models is None:
70
+ self.models = {f"{model_name}": SentenceTransformer(config.MODELS_LOCATION + "/" + model_name).to(device)
71
+ for model_name in definitions.models.keys()}
72
+ else:
73
+ self.models = models
74
+ for model in self.models.values():
75
+ model.eval()
76
+
77
+ if reranker_tokenizer is None:
78
+ self.reranker_tokenizer = AutoTokenizer.from_pretrained(os.getenv("TOKENIZER_LOCATION"))
79
+ else:
80
+ self.reranker_tokenizer = reranker_tokenizer
81
+
82
+ if reranker_model is None:
83
+ self.reranker_model = AutoModelForSequenceClassification.from_pretrained(os.getenv("TOKENIZER_LOCATION"))
84
+ else:
85
+ self.reranker_model = reranker_model
86
+ self.reranker_model.eval()
87
+
88
+ def change_llm(self, llms_client: LLMClient):
89
+ """
90
+ Changes the LLM client for the Engine instance.
91
+
92
+ Args:
93
+ llms_client (LLMClient): The new LLM client instance.
94
+ """
95
+ self.llms_client = llms_client
96
+
97
+ def rerank_with_me5(self, query, documents, k=5):
98
+ """
99
+ Reranks documents based on the query using the reranker model.
100
+
101
+ Args:
102
+ query (str): The query string.
103
+ documents (list): A list of documents to be reranked.
104
+ k (int, optional): The number of top documents to return. Default is 5.
105
+
106
+ Returns:
107
+ list: A list of top k reranked documents.
108
+ """
109
+ pairs = [(query, doc) for doc in set(documents)]
110
+ inputs = self.reranker_tokenizer(pairs, return_tensors='pt', padding=True, truncation=True, max_length=512)
111
+
112
+ # Make predictions
113
+ with torch.no_grad():
114
+ outputs = self.reranker_model(**inputs)
115
+
116
+ scores = outputs.logits.squeeze()
117
+
118
+ if scores.ndim > 1:
119
+ scores = scores[:, 1] # Assuming binary classification and index 1 is the relevance score
120
+
121
+ sorted_indices = torch.argsort(scores, descending=True)
122
+ # If there is only one document, return it to avoid torch error
123
+ if len(sorted_indices) == 1:
124
+ return [pairs[0][1]]
125
+ # Sort documents by their highest score
126
+ sorted_docs = [pairs[i][1] for i in sorted_indices]
127
+ return sorted_docs[:k]
128
+
129
+ def update_docs(self, list_of_docs: list[dict], embed_only_fields=None, delete_existing=False):
130
+ """
131
+ Updates or creates documents in the Elasticsearch index.
132
+
133
+ Args:
134
+ list_of_docs (list[dict]): A list of dictionaries representing the documents to be indexed.
135
+ embed_only_fields (list, optional): A list of fields to be embedded. Default is None.
136
+ delete_existing (bool, optional): Whether to delete existing documents. Default is False.
137
+ """
138
+ embed_only_fields = embed_only_fields or definitions.models.values()
139
+ for doc in list_of_docs:
140
+ for semantic_model, field in definitions.models.items():
141
+ if field in doc.keys() and field in embed_only_fields:
142
+ content_vectors = self.models[semantic_model].encode(doc[field])
143
+ doc[f'{field}_{semantic_model}_vectors'] = content_vectors
144
+
145
+ doc['last_update'] = datetime.now()
146
+ self.elastic_model.create_or_update_documents(list_of_docs, delete_existing)
147
+
148
+ def reciprocal_rank_fusion(self, ranking_lists, k=60, weights=None):
149
+ """
150
+ Performs Reciprocal Rank Fusion on a list of ranking lists.
151
+
152
+ Args:
153
+ :param ranking_lists: List of ranking lists, where each ranking list is a list of documents returned by a model.
154
+ :param k: The parameter for the reciprocal rank calculation (default is 60).
155
+ :param: weights: Optional. Weights for each ranking list.
156
+
157
+ Returns:
158
+ list: A fused ranking list of documents.
159
+ """
160
+ scores = defaultdict(float)
161
+
162
+ for list_index, rank_list in enumerate(ranking_lists):
163
+ for rank, identifier in enumerate(rank_list):
164
+ # Reciprocal rank score
165
+ w = weights[list_index] if weights else 1
166
+ scores[identifier] += w / (k + rank + 1)
167
+
168
+ # Sort the documents by their cumulative scores in descending order
169
+ fused_list = sorted(scores, key=scores.get, reverse=True)
170
+
171
+ return fused_list
172
+
173
+ def search_documents(self, query: str, top_k: int):
174
+ """
175
+ Searches for documents based on the query and returns the top_k results.
176
+
177
+ Args:
178
+ query (str): The query string.
179
+ top_k (int): The number of top documents to return.
180
+
181
+ Returns:
182
+ list: A list of top k documents.
183
+ """
184
+ query_embeddings = {f"{semantic_model}": self.models[semantic_model].encode(query) for semantic_model in
185
+ definitions.models.keys()}
186
+ all_docs_by_model = self.elastic_model.search(query_embeddings)
187
+ all_docs = []
188
+ ids_for_fusion = []
189
+ all_docs_and_scores = {}
190
+
191
+ for key, values in all_docs_by_model.items():
192
+ print(f"\nFound {len(values)} documents for model\n")
193
+ model_ids = []
194
+ scores_for_model = []
195
+
196
+ for doc in values:
197
+ model_ids.append(doc["_source"]["page_id"])
198
+ all_docs.append(doc)
199
+ scores_for_model.append({"doc": doc["_source"]["title"], "score": doc["_score"]})
200
+ ids_for_fusion.append(model_ids)
201
+ all_docs_and_scores[f'{key}'] = scores_for_model
202
+ print(f"\nFusing {len(ids_for_fusion)} results\n")
203
+ fused_ids = self.reciprocal_rank_fusion(ids_for_fusion, k=top_k)
204
+ top_k_documents = []
205
+ top_titles = []
206
+
207
+ for fused_id in fused_ids:
208
+ for doc in all_docs:
209
+ if doc["_source"]["page_id"] == fused_id and doc["_source"]["title"] not in top_titles:
210
+ top_k_documents.append(doc["_source"])
211
+ top_titles.append(doc["_source"]["title"])
212
+ break
213
+ if len(top_titles) >= top_k:
214
+ break
215
+
216
+ return top_k_documents, all_docs_and_scores
217
+
218
+ def answer_query(self, query, top_k: int, model):
219
+ """
220
+ Answers a query using the top_k documents and the specified model.
221
+
222
+ Args:
223
+ query (str): The query string.
224
+ top_k (int): The number of top documents to use for answering the query.
225
+ model: The model to use for answering the query.
226
+
227
+ Returns:
228
+ tuple: A tuple containing the top k documents, the answer, and the stats.
229
+ """
230
+ before_retrieval = time.perf_counter()
231
+ top_k_documents, all_docs_and_scores = self.search_documents(query, top_k)
232
+
233
+
234
+ retrieval_time = round(time.perf_counter() - before_retrieval, 4)
235
+ print(f"retrieval time: {retrieval_time}")
236
+
237
+ gpt_answer, gpt_elapsed, tokens = self.llms_client.answer(query, top_k_documents)
238
+ stats = {
239
+ "retrieval_time": retrieval_time,
240
+ "gpt_model": model,
241
+ "gpt_time": gpt_elapsed,
242
+ "tokens": tokens
243
+ }
244
+ return top_k_documents, gpt_answer, stats, all_docs_and_scores
245
+
246
+
247
+ engine = None
248
+
249
+
250
+ def engine_factory(llms_client: LLMClient, es_client=None):
251
+ global engine
252
+ if engine is None:
253
+ engine = Engine(llms_client=llms_client, es_client=es_client)
254
+ return engine
@@ -1,11 +1,11 @@
1
- from .Document import factory
2
- from abc import ABC, abstractmethod
3
- definitions = factory()
4
-
5
- class LLMClient(ABC):
6
- @abstractmethod
7
- def __init__(self):
8
- self.field_for_answer = definitions.field_for_llm
9
- @abstractmethod
10
- def answer(self, _question, _top_k_docs) -> tuple[str, float, int]:
11
- raise NotImplementedError
1
+ from .Document import factory
2
+ from abc import ABC, abstractmethod
3
+ definitions = factory()
4
+
5
+ class LLMClient(ABC):
6
+ @abstractmethod
7
+ def __init__(self):
8
+ self.field_for_answer = definitions.field_for_llm
9
+ @abstractmethod
10
+ def answer(self, _question, _top_k_docs) -> tuple[str, float, int]:
11
+ raise NotImplementedError