kssrag 0.2.2__tar.gz → 0.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {kssrag-0.2.2 → kssrag-0.2.3}/PKG-INFO +2 -2
  2. {kssrag-0.2.2 → kssrag-0.2.3}/README.md +1 -1
  3. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/core/agents.py +149 -106
  4. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/server.py +1 -1
  5. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag.egg-info/PKG-INFO +2 -2
  6. {kssrag-0.2.2 → kssrag-0.2.3}/setup.py +1 -1
  7. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/__init__.py +0 -0
  8. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/cli.py +0 -0
  9. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/config.py +0 -0
  10. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/core/__init__.py +0 -0
  11. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/core/chunkers.py +0 -0
  12. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/core/retrievers.py +0 -0
  13. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/core/vectorstores.py +0 -0
  14. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/kssrag.py +0 -0
  15. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/models/__init__.py +0 -0
  16. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/models/local_llms.py +0 -0
  17. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/models/openrouter.py +0 -0
  18. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/utils/__init__.py +0 -0
  19. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/utils/document_loaders.py +0 -0
  20. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/utils/helpers.py +0 -0
  21. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/utils/ocr.py +0 -0
  22. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/utils/ocr_loader.py +0 -0
  23. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag/utils/preprocessors.py +0 -0
  24. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag.egg-info/SOURCES.txt +0 -0
  25. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag.egg-info/dependency_links.txt +0 -0
  26. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag.egg-info/entry_points.txt +0 -0
  27. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag.egg-info/requires.txt +0 -0
  28. {kssrag-0.2.2 → kssrag-0.2.3}/kssrag.egg-info/top_level.txt +0 -0
  29. {kssrag-0.2.2 → kssrag-0.2.3}/setup.cfg +0 -0
  30. {kssrag-0.2.2 → kssrag-0.2.3}/tests/__init__.py +0 -0
  31. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_basic.py +0 -0
  32. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_bm25s.py +0 -0
  33. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_config.py +0 -0
  34. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_image_chunker.py +0 -0
  35. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_integration.py +0 -0
  36. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_ocr.py +0 -0
  37. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_streaming.py +0 -0
  38. {kssrag-0.2.2 → kssrag-0.2.3}/tests/test_vectorstores.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kssrag
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Summary: A flexible Retrieval-Augmented Generation framework by Ksschkw
5
5
  Home-page: https://github.com/Ksschkw/kssrag
6
6
  Author: Ksschkw
@@ -85,7 +85,7 @@ Dynamic: summary
85
85
 
86
86
  ![Python Version](https://img.shields.io/badge/python-3.8%2B-blue)
87
87
  ![License](https://img.shields.io/badge/license-MIT-green)
88
- ![Version](https://img.shields.io/badge/version-0.2.0-brightgreen)
88
+ ![Version](https://img.shields.io/badge/version-0.2.3-brightgreen)
89
89
  ![Framework](https://img.shields.io/badge/framework-RAG-orange)
90
90
  ![Documentation](https://img.shields.io/badge/docs-comprehensive-brightgreen)
91
91
 
@@ -4,7 +4,7 @@
4
4
 
5
5
  ![Python Version](https://img.shields.io/badge/python-3.8%2B-blue)
6
6
  ![License](https://img.shields.io/badge/license-MIT-green)
7
- ![Version](https://img.shields.io/badge/version-0.2.0-brightgreen)
7
+ ![Version](https://img.shields.io/badge/version-0.2.3-brightgreen)
8
8
  ![Framework](https://img.shields.io/badge/framework-RAG-orange)
9
9
  ![Documentation](https://img.shields.io/badge/docs-comprehensive-brightgreen)
10
10
 
@@ -21,13 +21,36 @@ class RAGAgent:
21
21
  if not any(msg.get("role") == "system" for msg in self.conversation):
22
22
  self.add_message("system", self.system_prompt)
23
23
 
24
+ # def add_message(self, role: str, content: str):
25
+ # """Add a message to the conversation history"""
26
+ # self.conversation.append({"role": role, "content": content})
27
+
28
+ # # Keep conversation manageable (last 15 messages)
29
+ # if len(self.conversation) > 15:
30
+ # self._smart_trim_conversation()
31
+
24
32
  def add_message(self, role: str, content: str):
25
- """Add a message to the conversation history"""
33
+ """Add a message to the conversation history (with simple dedupe for assistant)."""
34
+ content = content.strip()
35
+ # Prevent adding empty messages
36
+ if not content:
37
+ logger.info("Attempted to add empty message – ignored.")
38
+ return
39
+
40
+ # If last message is identical assistant content, skip to avoid duplicates
41
+ if self.conversation:
42
+ last = self.conversation[-1]
43
+ if role == "assistant" and last.get("role") == "assistant":
44
+ if last.get("content", "").strip() == content:
45
+ logger.info("Duplicate assistant message suppressed.")
46
+ return
47
+
26
48
  self.conversation.append({"role": role, "content": content})
27
-
49
+
28
50
  # Keep conversation manageable (last 15 messages)
29
51
  if len(self.conversation) > 15:
30
52
  self._smart_trim_conversation()
53
+
31
54
 
32
55
  def _smart_trim_conversation(self):
33
56
  """Trim conversation while preserving system message and recent exchanges"""
@@ -123,136 +146,140 @@ class RAGAgent:
123
146
  - Focus on user preferences, names, important context
124
147
 
125
148
  The summary will be automatically hidden from the user."""
126
-
149
+
127
150
  # def _extract_summary_and_response(self, full_response: str) -> tuple[str, Optional[str]]:
128
151
  # """Extract summary from response and return clean user response - handles partial markers"""
152
+ # # Keep original markers for backward compatibility
129
153
  # summary_start = "[SUMMARY_START]"
130
154
  # summary_end = "[SUMMARY_END]"
131
155
 
132
- # # Check if we have complete markers
133
- # if summary_start in full_response and summary_end in full_response:
134
- # start_idx = full_response.find(summary_start) + len(summary_start)
135
- # end_idx = full_response.find(summary_end)
156
+ # # NEW: Normalize the response first (improvement from new version)
157
+ # normalized = full_response.replace('\n', ' ').replace('\r', ' ').strip()
158
+
159
+ # # Check if we have complete markers - KEEP original logic but use normalized
160
+ # if summary_start in normalized and summary_end in normalized:
161
+ # start_idx = normalized.find(summary_start) + len(summary_start)
162
+ # end_idx = normalized.find(summary_end)
136
163
 
137
- # summary = full_response[start_idx:end_idx].strip()
138
- # user_response = full_response[:full_response.find(summary_start)].strip()
164
+ # summary = normalized[start_idx:end_idx].strip()
165
+ # user_response = normalized[:normalized.find(summary_start)].strip()
139
166
 
140
167
  # logger.info(f"✅ SUCCESS: Summary extracted and separated from user response")
141
168
  # logger.info(f"User response length: {len(user_response)} chars")
142
169
  # logger.info(f"Summary extracted: '{summary}'")
143
- # return user_response, summary
144
-
145
- # # Check if we have partial markers (common in streaming)
146
- # elif summary_start in full_response:
147
- # # We have start marker but no end marker - extract what we can
148
- # start_idx = full_response.find(summary_start) + len(summary_start)
149
- # potential_summary = full_response[start_idx:].strip()
150
170
 
151
- # # Clean up any partial end markers or weird formatting
152
- # if potential_summary:
153
- # # Remove any trailing partial markers or whitespace
154
- # cleaned_summary = potential_summary.split('[SUMMARY_')[0].split('[SUMMARY')[0].strip()
155
- # user_response = full_response[:full_response.find(summary_start)].strip()
171
+ # # NEW: Add validation from improved version
172
+ # if not summary or len(summary) < 5:
173
+ # logger.info("❌ Summary too short, returning full response")
174
+ # return full_response.strip(), None
156
175
 
157
- # if cleaned_summary and len(cleaned_summary) > 10: # Only if meaningful content
158
- # logger.info(f"⚠️ Partial summary extracted (missing end marker): '{cleaned_summary}'")
159
- # return user_response, cleaned_summary
160
-
161
- # logger.info("❌ Incomplete summary markers found")
162
- # return full_response, None
163
-
164
- # logger.info("❌ No summary markers found, returning full response")
165
- # logger.info(f"Full response length: {len(full_response)} chars")
166
- # return full_response, None
176
+ # return user_response, summary
167
177
 
168
178
  def _extract_summary_and_response(self, full_response: str) -> tuple[str, Optional[str]]:
169
- """Extract summary from response and return clean user response - handles partial markers"""
170
- # Keep original markers for backward compatibility
179
+ """Extract summary from response and return clean user response."""
180
+
181
+ if not full_response:
182
+ return "", None
183
+
171
184
  summary_start = "[SUMMARY_START]"
172
185
  summary_end = "[SUMMARY_END]"
173
-
174
- # NEW: Normalize the response first (improvement from new version)
175
- normalized = full_response.replace('\n', ' ').replace('\r', ' ').strip()
176
-
177
- # Check if we have complete markers - KEEP original logic but use normalized
186
+
187
+ original = full_response
188
+ normalized = original.replace('\r\n', '\n').replace('\r', '\n')
189
+
190
+ # Case 1: Complete markers
178
191
  if summary_start in normalized and summary_end in normalized:
179
192
  start_idx = normalized.find(summary_start) + len(summary_start)
180
193
  end_idx = normalized.find(summary_end)
181
-
182
194
  summary = normalized[start_idx:end_idx].strip()
183
- user_response = normalized[:normalized.find(summary_start)].strip()
184
-
185
- logger.info(f"✅ SUCCESS: Summary extracted and separated from user response")
186
- logger.info(f"User response length: {len(user_response)} chars")
187
- logger.info(f"Summary extracted: '{summary}'")
188
-
189
- # NEW: Add validation from improved version
195
+
196
+ user_response = original.split(summary_start)[0].strip()
197
+
190
198
  if not summary or len(summary) < 5:
191
- logger.info("Summary too short, returning full response")
192
- return full_response.strip(), None
193
-
199
+ logger.info("Summary too short or invalid")
200
+ return original.strip(), None
201
+
202
+ logger.info("Summary extracted successfully")
194
203
  return user_response, summary
195
-
196
- # Check if we have partial markers (common in streaming) - IMPROVED logic
197
- elif summary_start in normalized:
198
- # We have start marker but no end marker - extract what we can
204
+
205
+ # Case 2: Partial marker (start only)
206
+ if summary_start in normalized:
199
207
  start_idx = normalized.find(summary_start) + len(summary_start)
200
-
201
- # NEW: Take reasonable chunk (200 chars) instead of everything
202
- potential_summary = normalized[start_idx:start_idx+200].strip()
203
-
204
- # COMBINED: Clean up from both versions
205
- if potential_summary:
206
- # Clean up any partial markers or weird formatting
207
- cleaned_summary = (potential_summary
208
- .split('[SUMMARY_')[0]
209
- .split('[SUMMARY')[0]
210
- .split('[')[0] # NEW from improved version
211
- .split('\n')[0] # NEW from improved version
212
- .strip())
213
-
214
- user_response = normalized[:normalized.find(summary_start)].strip()
215
-
216
- # COMBINED validation: meaningful content check
217
- if cleaned_summary and len(cleaned_summary) >= 10: # Original threshold
218
- logger.info(f"⚠️ Partial summary extracted (missing end marker): '{cleaned_summary}'")
219
- # NEW: Additional validation
220
- if len(cleaned_summary) >= 5: # Improved version threshold
221
- return user_response, cleaned_summary
222
-
223
- logger.info("❌ Incomplete summary markers found")
224
- return full_response.strip(), None # NEW: strip for consistency
225
-
208
+ potential = normalized[start_idx:start_idx + 200].strip()
209
+
210
+ cleaned_summary = (
211
+ potential
212
+ .split('[SUMMARY_')[0]
213
+ .split('[SUMMARY')[0]
214
+ .split('[')[0]
215
+ .strip()
216
+ )
217
+
218
+ user_response = original.split(summary_start)[0].strip()
219
+
220
+ if cleaned_summary and len(cleaned_summary) >= 10:
221
+ logger.info("Partial summary extracted")
222
+ return user_response, cleaned_summary
223
+
224
+ logger.info("Partial summary invalid")
225
+ return original.strip(), None
226
+
227
+ # Case 3: No markers at all
228
+ logger.info("No summary markers found")
226
229
  # No markers found - KEEP original but with normalization
227
- logger.info(" No summary markers found, returning full response")
228
- logger.info(f"Full response length: {len(full_response)} chars")
229
- return full_response.strip(), None # NEW: strip for consistency
230
+ # logger.info(" No summary markers found, returning full response")
231
+ logger.info(f"Full response length: {len(original)} chars")
232
+ return original.strip(), None
233
+
234
+
235
+
236
+ # return full_response.strip(), None # NEW: strip for consistency
230
237
 
238
+ # def _add_conversation_summary(self, new_summary: str):
239
+ # """Add a new discrete conversation summary"""
240
+ # if not new_summary or new_summary.lower() == "none":
241
+ # logger.info("🔄 No summary to add (empty or 'none')")
242
+ # return
243
+
244
+ # # Add as a new discrete summary
245
+ # self.conversation_summaries.append(new_summary)
246
+ # logger.info(f"📝 ADDED Summary #{len(self.conversation_summaries)}: '{new_summary}'")
247
+
248
+ # # Keep only recent summaries (last 7)
249
+ # if len(self.conversation_summaries) > 7:
250
+ # self.conversation_summaries = self.conversation_summaries[-7:]
251
+ # removed = self.conversation_summaries.pop(0)
252
+ # logger.info(f"🗑️ DROPPED Oldest summary: '{removed}'")
253
+ # logger.info(f"📊 Summary count maintained at {len(self.conversation_summaries)}")
254
+ # logger.info(f"Added conversation summary #{len(self.conversation_summaries)}: {new_summary}")
231
255
  def _add_conversation_summary(self, new_summary: str):
232
256
  """Add a new discrete conversation summary"""
233
257
  if not new_summary or new_summary.lower() == "none":
234
- logger.info("🔄 No summary to add (empty or 'none')")
258
+ logger.info(" No summary to add (empty or 'none')")
235
259
  return
236
-
237
- # Add as a new discrete summary
260
+
261
+ new_summary = new_summary.strip()
262
+ if not new_summary:
263
+ logger.info(" No summary to add after strip")
264
+ return
265
+
266
+ # Append new summary
238
267
  self.conversation_summaries.append(new_summary)
239
- logger.info(f"📝 ADDED Summary #{len(self.conversation_summaries)}: '{new_summary}'")
268
+ logger.info(f" ADDED Summary #{len(self.conversation_summaries)}: '{new_summary}'")
240
269
 
241
270
  # Keep only recent summaries (last 7)
242
271
  if len(self.conversation_summaries) > 7:
243
272
  self.conversation_summaries = self.conversation_summaries[-7:]
244
- removed = self.conversation_summaries.pop(0)
245
- logger.info(f"🗑️ DROPPED Oldest summary: '{removed}'")
246
- logger.info(f"📊 Summary count maintained at {len(self.conversation_summaries)}")
247
- logger.info(f"Added conversation summary #{len(self.conversation_summaries)}: {new_summary}")
273
+ logger.info(f" Summary count trimmed to {len(self.conversation_summaries)}")
274
+
248
275
 
249
276
  def query(self, question: str, top_k: int = 5, include_context: bool = True) -> str:
250
277
  """Process a query with stealth conversation summarization"""
251
278
  try:
252
279
  # Retrieve relevant context
253
- logger.info(f"🔍 QUERY START: '{question}' (top_k: {top_k})")
280
+ logger.info(f" QUERY START: '{question}' (top_k: {top_k})")
254
281
  context_docs = self.retriever.retrieve(question, top_k)
255
- logger.info(f"📄 Retrieved {len(context_docs)} context documents")
282
+ logger.info(f" Retrieved {len(context_docs)} context documents")
256
283
 
257
284
  if not context_docs and include_context:
258
285
  logger.warning(f"No context found for query: {question}")
@@ -266,7 +293,7 @@ class RAGAgent:
266
293
 
267
294
  # Generate response
268
295
  full_response = self.llm.predict(messages)
269
- logger.info(f"🤖 LLM response received: {len(full_response)} chars")
296
+ logger.info(f" LLM response received: {len(full_response)} chars")
270
297
 
271
298
  # Extract summary and clean response
272
299
  user_response, conversation_summary = self._extract_summary_and_response(full_response)
@@ -281,12 +308,12 @@ class RAGAgent:
281
308
  # Add assistant response to conversation (clean version only)
282
309
  self.add_message("assistant", user_response)
283
310
 
284
- logger.info(f"💬 Final user response: {len(user_response)} chars")
311
+ logger.info(f" Final user response: {len(user_response)} chars")
285
312
  return user_response
286
313
 
287
314
  except Exception as e:
288
315
  logger.error(f"Error processing query: {str(e)}")
289
- # logger.error(f"💥 QUERY FAILED: {str(e)}")
316
+ # logger.error(f" QUERY FAILED: {str(e)}")
290
317
  return "I encountered an issue processing your query. Please try again."
291
318
 
292
319
  def query_stream(self, question: str, top_k: int = 5) -> Generator[str, None, None]:
@@ -294,7 +321,7 @@ class RAGAgent:
294
321
  Professional-grade streaming with multiple fallback strategies
295
322
  """
296
323
  try:
297
- logger.info(f"🌊 STREAMING QUERY START: '{question}'")
324
+ logger.info(f" STREAMING QUERY START: '{question}'")
298
325
 
299
326
  # Strategy 1: Try true streaming first
300
327
  if hasattr(self.llm, 'predict_stream'):
@@ -305,11 +332,11 @@ class RAGAgent:
305
332
  logger.warning(f"Streaming failed, falling back: {stream_error}")
306
333
 
307
334
  # Strategy 2: Fallback to simulated streaming
308
- logger.info("🔄 Falling back to simulated streaming")
335
+ logger.info(" Falling back to simulated streaming")
309
336
  yield from self._simulated_streaming(question, top_k)
310
337
 
311
338
  except Exception as e:
312
- logger.error(f"💥 ALL STREAMING STRATEGIES FAILED: {str(e)}")
339
+ logger.error(f" ALL STREAMING STRATEGIES FAILED: {str(e)}")
313
340
  yield f"Error: {str(e)}"
314
341
 
315
342
  def _stream_with_summary_protection(self, question: str, top_k: int) -> Generator[str, None, None]:
@@ -328,7 +355,7 @@ class RAGAgent:
328
355
  # Check for summary markers
329
356
  if any(marker in chunk for marker in ['[SUMMARY', 'SUMMARY_']):
330
357
  if not summary_started:
331
- logger.info("🚨 Summary markers detected - cutting stream")
358
+ logger.info(" Summary markers detected - cutting stream")
332
359
  summary_started = True
333
360
  clean_part = self._extract_clean_content(buffer)
334
361
  if clean_part:
@@ -346,15 +373,31 @@ class RAGAgent:
346
373
  logger.error(f"Streaming error: {e}")
347
374
  raise # Re-raise to trigger fallback
348
375
 
376
+ # def _process_complete_response(self, full_response: str):
377
+ # """Process complete response and extract summary"""
378
+ # user_response, conversation_summary = self._extract_summary_and_response(full_response)
379
+
380
+ # if conversation_summary:
381
+ # logger.info(f" Summary extracted: '{conversation_summary}'")
382
+ # self._add_conversation_summary(conversation_summary)
383
+
384
+ # self.add_message("assistant", user_response)
349
385
  def _process_complete_response(self, full_response: str):
350
386
  """Process complete response and extract summary"""
351
387
  user_response, conversation_summary = self._extract_summary_and_response(full_response)
352
-
388
+
353
389
  if conversation_summary:
354
- logger.info(f"📝 Summary extracted: '{conversation_summary}'")
390
+ logger.info(f" Summary extracted: '{conversation_summary}'")
355
391
  self._add_conversation_summary(conversation_summary)
356
-
357
- self.add_message("assistant", user_response)
392
+
393
+ # extra guard: only add assistant message if different from last assistant message
394
+ if user_response:
395
+ last = self.conversation[-1] if self.conversation else None
396
+ if not (last and last.get("role") == "assistant" and last.get("content", "").strip() == user_response.strip()):
397
+ self.add_message("assistant", user_response)
398
+ else:
399
+ logger.info("Skipped adding duplicate assistant message in _process_complete_response.")
400
+
358
401
 
359
402
  def _simulated_streaming(self, question: str, top_k: int) -> Generator[str, None, None]:
360
403
  """Simulated streaming that guarantees no summary leakage"""
@@ -369,14 +412,14 @@ class RAGAgent:
369
412
  user_response, conversation_summary = self._extract_summary_and_response(complete_response)
370
413
 
371
414
  if conversation_summary:
372
- logger.info(f"📝 Summary extracted: '{conversation_summary}'")
415
+ logger.info(f" Summary extracted: '{conversation_summary}'")
373
416
  self._add_conversation_summary(conversation_summary)
374
417
 
375
418
  self.add_message("assistant", user_response)
376
419
 
377
420
  # Simulate streaming (smaller chunks for better UX)
378
421
  chunk_size = 2 # Even smaller chunks for smoother streaming
379
- for i in range(0, len(user_response), chunk_size):
422
+ for i in range(0, len(user_response), chunk_add_conversation_summary_size):
380
423
  yield user_response[i:i+chunk_size]
381
424
  import time
382
425
  time.sleep(0.02) # Slightly longer delay for readability
@@ -403,5 +446,5 @@ class RAGAgent:
403
446
  "message_count": len(self.conversation),
404
447
  "recent_messages": [f"{msg['role']}: {msg['content'][:50]}..." for msg in self.conversation[-3:]]
405
448
  }
406
- logger.info(f"📊 Context snapshot: {context}")
449
+ logger.info(f" Context snapshot: {context}")
407
450
  return context
@@ -125,7 +125,7 @@ def create_app(rag_agent: RAGAgent, server_config: Optional[ServerConfig] = None
125
125
 
126
126
  return StreamingResponse(
127
127
  generate(),
128
- media_type="text/plain",
128
+ media_type="text/event-stream",
129
129
  headers={
130
130
  "Cache-Control": "no-cache",
131
131
  "Connection": "keep-alive",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kssrag
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Summary: A flexible Retrieval-Augmented Generation framework by Ksschkw
5
5
  Home-page: https://github.com/Ksschkw/kssrag
6
6
  Author: Ksschkw
@@ -85,7 +85,7 @@ Dynamic: summary
85
85
 
86
86
  ![Python Version](https://img.shields.io/badge/python-3.8%2B-blue)
87
87
  ![License](https://img.shields.io/badge/license-MIT-green)
88
- ![Version](https://img.shields.io/badge/version-0.2.0-brightgreen)
88
+ ![Version](https://img.shields.io/badge/version-0.2.3-brightgreen)
89
89
  ![Framework](https://img.shields.io/badge/framework-RAG-orange)
90
90
  ![Documentation](https://img.shields.io/badge/docs-comprehensive-brightgreen)
91
91
 
@@ -6,7 +6,7 @@ long_description = (here / "README.md").read_text(encoding="utf-8")
6
6
 
7
7
  setup(
8
8
  name="kssrag",
9
- version="0.2.2",
9
+ version="0.2.3",
10
10
  description="A flexible Retrieval-Augmented Generation framework by Ksschkw",
11
11
  long_description=long_description,
12
12
  long_description_content_type="text/markdown",
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes