cwyodmodules 0.3.81__py3-none-any.whl → 0.3.84__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,5 @@
1
1
  from typing import List
2
2
  from .document_chunking_base import DocumentChunkingBase
3
- from langchain.text_splitter import TokenTextSplitter
4
3
  from .chunking_strategy import ChunkingSettings
5
4
  from ..common.source_document import SourceDocument
6
5
  from ...utilities.helpers.env_helper import EnvHelper
@@ -10,6 +9,54 @@ log_execution = env_helper.LOG_EXECUTION
10
9
  log_args = env_helper.LOG_ARGS
11
10
  log_result = env_helper.LOG_RESULT
12
11
 
12
+
13
+ class SimpleTokenSplitter:
14
+ """Simple token-based text splitter to replace LangChain's TokenTextSplitter."""
15
+
16
+ def __init__(self, chunk_size: int, chunk_overlap: int):
17
+ self.chunk_size = chunk_size
18
+ self.chunk_overlap = chunk_overlap
19
+
20
+ def split_text(self, text: str) -> List[str]:
21
+ """Split text into chunks based on approximate token count."""
22
+ if not text:
23
+ return []
24
+
25
+ # Rough approximation: 1 token ≈ 4 characters
26
+ char_chunk_size = self.chunk_size * 4
27
+ char_overlap = self.chunk_overlap * 4
28
+
29
+ chunks = []
30
+ start = 0
31
+
32
+ while start < len(text):
33
+ # Calculate end position
34
+ end = start + char_chunk_size
35
+
36
+ # If this is not the last chunk, try to find a good break point
37
+ if end < len(text):
38
+ # Look for sentence endings, then paragraph breaks, then word boundaries
39
+ for break_char in ['. ', '.\n', '\n\n', '\n', ' ']:
40
+ break_pos = text.rfind(break_char, start, end)
41
+ if break_pos > start:
42
+ end = break_pos + len(break_char)
43
+ break
44
+
45
+ # Extract chunk
46
+ chunk = text[start:end].strip()
47
+ if chunk:
48
+ chunks.append(chunk)
49
+
50
+ # Move start position (with overlap)
51
+ start = max(start + 1, end - char_overlap)
52
+
53
+ # Prevent infinite loop
54
+ if start >= len(text):
55
+ break
56
+
57
+ return chunks
58
+
59
+
13
60
  class FixedSizeOverlapDocumentChunking(DocumentChunkingBase):
14
61
  def __init__(self) -> None:
15
62
  pass
@@ -28,10 +75,13 @@ class FixedSizeOverlapDocumentChunking(DocumentChunkingBase):
28
75
  logger.error("No documents provided for chunking.")
29
76
  logger.debug(e)
30
77
  document_url = None
31
- splitter = TokenTextSplitter.from_tiktoken_encoder(
32
- chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap
78
+
79
+ splitter = SimpleTokenSplitter(
80
+ chunk_size=chunking.chunk_size,
81
+ chunk_overlap=chunking.chunk_overlap
33
82
  )
34
83
  chunked_content_list = splitter.split_text(full_document_content)
84
+
35
85
  # Create document for each chunk
36
86
  documents = []
37
87
  chunk_offset = 0
@@ -1,6 +1,5 @@
1
1
  from typing import List
2
2
  from .document_chunking_base import DocumentChunkingBase
3
- from langchain.text_splitter import MarkdownTextSplitter
4
3
  from .chunking_strategy import ChunkingSettings
5
4
  from ..common.source_document import SourceDocument
6
5
  from ...utilities.helpers.env_helper import EnvHelper
@@ -11,6 +10,50 @@ log_execution = env_helper.LOG_EXECUTION
11
10
  log_args = env_helper.LOG_ARGS
12
11
  log_result = env_helper.LOG_RESULT
13
12
 
13
+
14
+ class SimpleTextSplitter:
15
+ """Simple text splitter to replace LangChain's MarkdownTextSplitter."""
16
+
17
+ def __init__(self, chunk_size: int, chunk_overlap: int):
18
+ self.chunk_size = chunk_size
19
+ self.chunk_overlap = chunk_overlap
20
+
21
+ def split_text(self, text: str) -> List[str]:
22
+ """Split text into chunks with overlap."""
23
+ if not text:
24
+ return []
25
+
26
+ chunks = []
27
+ start = 0
28
+
29
+ while start < len(text):
30
+ # Calculate end position
31
+ end = start + self.chunk_size
32
+
33
+ # If this is not the last chunk, try to find a good break point
34
+ if end < len(text):
35
+ # Look for sentence endings, then paragraph breaks, then word boundaries
36
+ for break_char in ['. ', '.\n', '\n\n', '\n', ' ']:
37
+ break_pos = text.rfind(break_char, start, end)
38
+ if break_pos > start:
39
+ end = break_pos + len(break_char)
40
+ break
41
+
42
+ # Extract chunk
43
+ chunk = text[start:end].strip()
44
+ if chunk:
45
+ chunks.append(chunk)
46
+
47
+ # Move start position (with overlap)
48
+ start = max(start + 1, end - self.chunk_overlap)
49
+
50
+ # Prevent infinite loop
51
+ if start >= len(text):
52
+ break
53
+
54
+ return chunks
55
+
56
+
14
57
  class LayoutDocumentChunking(DocumentChunkingBase):
15
58
  def __init__(self) -> None:
16
59
  pass
@@ -29,10 +72,13 @@ class LayoutDocumentChunking(DocumentChunkingBase):
29
72
  logger.error("No documents provided for chunking.")
30
73
  logger.debug(e)
31
74
  document_url = None
32
- splitter = MarkdownTextSplitter.from_tiktoken_encoder(
33
- chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap
75
+
76
+ splitter = SimpleTextSplitter(
77
+ chunk_size=chunking.chunk_size,
78
+ chunk_overlap=chunking.chunk_overlap
34
79
  )
35
80
  chunked_content_list = splitter.split_text(full_document_content)
81
+
36
82
  # Create document for each chunk
37
83
  documents = []
38
84
  chunk_offset = 0
@@ -1,6 +1,5 @@
1
1
  from typing import List
2
2
  from .document_chunking_base import DocumentChunkingBase
3
- from langchain.text_splitter import MarkdownTextSplitter
4
3
  from .chunking_strategy import ChunkingSettings
5
4
  from ..common.source_document import SourceDocument
6
5
  from ...utilities.helpers.env_helper import EnvHelper
@@ -10,6 +9,50 @@ log_execution = env_helper.LOG_EXECUTION
10
9
  log_args = env_helper.LOG_ARGS
11
10
  log_result = env_helper.LOG_RESULT
12
11
 
12
+
13
+ class SimpleTextSplitter:
14
+ """Simple text splitter to replace LangChain's MarkdownTextSplitter."""
15
+
16
+ def __init__(self, chunk_size: int, chunk_overlap: int):
17
+ self.chunk_size = chunk_size
18
+ self.chunk_overlap = chunk_overlap
19
+
20
+ def split_text(self, text: str) -> List[str]:
21
+ """Split text into chunks with overlap."""
22
+ if not text:
23
+ return []
24
+
25
+ chunks = []
26
+ start = 0
27
+
28
+ while start < len(text):
29
+ # Calculate end position
30
+ end = start + self.chunk_size
31
+
32
+ # If this is not the last chunk, try to find a good break point
33
+ if end < len(text):
34
+ # Look for sentence endings, then paragraph breaks, then word boundaries
35
+ for break_char in ['. ', '.\n', '\n\n', '\n', ' ']:
36
+ break_pos = text.rfind(break_char, start, end)
37
+ if break_pos > start:
38
+ end = break_pos + len(break_char)
39
+ break
40
+
41
+ # Extract chunk
42
+ chunk = text[start:end].strip()
43
+ if chunk:
44
+ chunks.append(chunk)
45
+
46
+ # Move start position (with overlap)
47
+ start = max(start + 1, end - self.chunk_overlap)
48
+
49
+ # Prevent infinite loop
50
+ if start >= len(text):
51
+ break
52
+
53
+ return chunks
54
+
55
+
13
56
  class PageDocumentChunking(DocumentChunkingBase):
14
57
  def __init__(self) -> None:
15
58
  pass
@@ -25,8 +68,10 @@ class PageDocumentChunking(DocumentChunkingBase):
25
68
  logger.error("No documents provided for chunking.")
26
69
  logger.debug(e)
27
70
  document_url = None
28
- splitter = MarkdownTextSplitter.from_tiktoken_encoder(
29
- chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap
71
+
72
+ splitter = SimpleTextSplitter(
73
+ chunk_size=chunking.chunk_size,
74
+ chunk_overlap=chunking.chunk_overlap
30
75
  )
31
76
  documents_chunked = []
32
77
  for idx, document in enumerate(documents):
@@ -1,16 +1,70 @@
1
1
  from typing import List
2
2
  import re
3
- from langchain_community.document_loaders import WebBaseLoader
3
+ import requests
4
+ from bs4 import BeautifulSoup
4
5
  from .document_loading_base import DocumentLoadingBase
5
6
  from ..common.source_document import SourceDocument
6
7
 
7
8
 
9
+ class SimpleWebDocument:
10
+ """Simple document class to replace LangChain's Document."""
11
+ def __init__(self, page_content: str, metadata: dict):
12
+ self.page_content = page_content
13
+ self.metadata = metadata
14
+
15
+
16
+ class SimpleWebLoader:
17
+ """Simple web loader to replace LangChain's WebBaseLoader."""
18
+
19
+ def __init__(self, url: str):
20
+ self.url = url
21
+
22
+ def load(self) -> List[SimpleWebDocument]:
23
+ """Load web content from URL."""
24
+ try:
25
+ # Fetch the webpage
26
+ headers = {
27
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
28
+ }
29
+ response = requests.get(self.url, headers=headers, timeout=30)
30
+ response.raise_for_status()
31
+
32
+ # Parse HTML content
33
+ soup = BeautifulSoup(response.content, 'html.parser')
34
+
35
+ # Remove script and style elements
36
+ for script in soup(["script", "style"]):
37
+ script.decompose()
38
+
39
+ # Get text content
40
+ text = soup.get_text()
41
+
42
+ # Clean up text
43
+ lines = (line.strip() for line in text.splitlines())
44
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
45
+ text = ' '.join(chunk for chunk in chunks if chunk)
46
+
47
+ return [SimpleWebDocument(
48
+ page_content=text,
49
+ metadata={"source": self.url}
50
+ )]
51
+
52
+ except Exception as e:
53
+ # Return empty content if loading fails
54
+ return [SimpleWebDocument(
55
+ page_content="",
56
+ metadata={"source": self.url, "error": str(e)}
57
+ )]
58
+
59
+
8
60
  class WebDocumentLoading(DocumentLoadingBase):
9
61
  def __init__(self) -> None:
10
62
  super().__init__()
11
63
 
12
64
  def load(self, document_url: str) -> List[SourceDocument]:
13
- documents = WebBaseLoader(document_url).load()
65
+ loader = SimpleWebLoader(document_url)
66
+ documents = loader.load()
67
+
14
68
  for document in documents:
15
69
  document.page_content = re.sub("\n{3,}", "\n\n", document.page_content)
16
70
  # Remove half non-ascii character from start/end of doc content
@@ -20,6 +74,7 @@ class WebDocumentLoading(DocumentLoadingBase):
20
74
  document.page_content = re.sub(pattern, "", document.page_content)
21
75
  if document.page_content == "":
22
76
  documents.remove(document)
77
+
23
78
  source_documents: List[SourceDocument] = [
24
79
  SourceDocument(
25
80
  content=document.page_content,
@@ -1,6 +1,6 @@
1
1
  from azure.identity import ChainedTokenCredential, DefaultAzureCredential
2
2
  from typing import Union
3
- from langchain_community.vectorstores import AzureSearch
3
+
4
4
  from azure.core.credentials import AzureKeyCredential
5
5
  from azure.search.documents import SearchClient
6
6
  from azure.search.documents.indexes import SearchIndexClient
@@ -276,15 +276,6 @@ class AzureSearchHelper:
276
276
  ),
277
277
  ]
278
278
 
279
- return AzureSearch(
280
- azure_search_endpoint=self.env_helper.AZURE_SEARCH_SERVICE,
281
- azure_search_key=(
282
- self.env_helper.AZURE_SEARCH_KEY
283
- if self.env_helper.is_auth_type_keys()
284
- else None
285
- ),
286
- index_name=self.env_helper.AZURE_SEARCH_CONVERSATIONS_LOG_INDEX,
287
- embedding_function=self.llm_helper.get_embedding_model().embed_query,
288
- fields=fields,
289
- user_agent="langchain chatwithyourdata-sa",
290
- )
279
+ # Return simple search client instead of LangChain AzureSearch
280
+ # This maintains compatibility while removing LangChain dependency
281
+ return self.search_client
@@ -1,7 +1,6 @@
1
1
  from openai import AzureOpenAI
2
2
  from typing import List, Union, cast
3
- from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings
4
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
3
+ # Removed LangChain dependencies - using direct OpenAI SDK instead
5
4
  from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion
6
5
  from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (
7
6
  AzureChatPromptExecutionSettings,
@@ -49,68 +48,32 @@ class LLMHelper:
49
48
 
50
49
  @logger.trace_function(log_execution=log_execution, log_args=log_args, log_result=False)
51
50
  def get_llm(self):
52
- if self.auth_type_keys:
53
- return AzureChatOpenAI(
54
- deployment_name=self.llm_model,
55
- temperature=0,
56
- max_tokens=self.llm_max_tokens,
57
- openai_api_version=self.openai_client._api_version,
58
- azure_endpoint=self.env_helper.AZURE_OPENAI_ENDPOINT,
59
- api_key=self.env_helper.OPENAI_API_KEY,
60
- )
61
- else:
62
- return AzureChatOpenAI(
63
- deployment_name=self.llm_model,
64
- temperature=0,
65
- max_tokens=self.llm_max_tokens,
66
- openai_api_version=self.openai_client._api_version,
67
- azure_endpoint=self.env_helper.AZURE_OPENAI_ENDPOINT,
68
- azure_ad_token_provider=self.token_provider,
69
- )
51
+ # Return the OpenAI client directly instead of LangChain wrapper
52
+ return self.openai_client
70
53
 
71
- # TODO: This needs to have a custom callback to stream back to the UI
72
54
  @logger.trace_function(log_execution=log_execution, log_args=log_args, log_result=False)
73
55
  def get_streaming_llm(self):
74
- if self.auth_type_keys:
75
- return AzureChatOpenAI(
76
- azure_endpoint=self.env_helper.AZURE_OPENAI_ENDPOINT,
77
- api_key=self.env_helper.OPENAI_API_KEY,
78
- streaming=True,
79
- callbacks=[StreamingStdOutCallbackHandler],
80
- deployment_name=self.llm_model,
81
- temperature=0,
82
- max_tokens=self.llm_max_tokens,
83
- openai_api_version=self.openai_client._api_version,
84
- )
85
- else:
86
- return AzureChatOpenAI(
87
- azure_endpoint=self.env_helper.AZURE_OPENAI_ENDPOINT,
88
- api_key=self.env_helper.OPENAI_API_KEY,
89
- streaming=True,
90
- callbacks=[StreamingStdOutCallbackHandler],
91
- deployment_name=self.llm_model,
92
- temperature=0,
93
- max_tokens=self.llm_max_tokens,
94
- openai_api_version=self.openai_client._api_version,
95
- azure_ad_token_provider=self.token_provider,
96
- )
56
+ # Return the OpenAI client directly - streaming is handled via stream=True parameter
57
+ return self.openai_client
97
58
 
98
59
  @logger.trace_function(log_execution=log_execution, log_args=log_args, log_result=False)
99
60
  def get_embedding_model(self):
100
- if self.auth_type_keys:
101
- return AzureOpenAIEmbeddings(
102
- azure_endpoint=self.env_helper.AZURE_OPENAI_ENDPOINT,
103
- api_key=self.env_helper.OPENAI_API_KEY,
104
- azure_deployment=self.embedding_model,
105
- chunk_size=1,
106
- )
107
- else:
108
- return AzureOpenAIEmbeddings(
109
- azure_endpoint=self.env_helper.AZURE_OPENAI_ENDPOINT,
110
- azure_deployment=self.embedding_model,
111
- chunk_size=1,
112
- azure_ad_token_provider=self.token_provider,
113
- )
61
+ # Return a simple embedding model wrapper that uses the OpenAI client directly
62
+ class EmbeddingModel:
63
+ def __init__(self, openai_client, embedding_model):
64
+ self.openai_client = openai_client
65
+ self.embedding_model = embedding_model
66
+
67
+ def embed_query(self, text: str) -> List[float]:
68
+ return (
69
+ self.openai_client.embeddings.create(
70
+ input=[text], model=self.embedding_model
71
+ )
72
+ .data[0]
73
+ .embedding
74
+ )
75
+
76
+ return EmbeddingModel(self.openai_client, self.embedding_model)
114
77
 
115
78
  @logger.trace_function(log_execution=log_execution, log_args=False, log_result=False)
116
79
  def generate_embeddings(self, input: Union[str, list[int]]) -> List[float]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cwyodmodules
3
- Version: 0.3.81
3
+ Version: 0.3.84
4
4
  Summary: Add your description here
5
5
  Author-email: Patrik <patrikhartl@gmail.com>
6
6
  Classifier: Operating System :: OS Independent
@@ -18,6 +18,7 @@ Requires-Dist: asyncpg<0.31.0,>=0.30.0
18
18
  Requires-Dist: azure-storage-queue<13.0.0,>=12.12.0
19
19
  Requires-Dist: chardet<6.0.0,>=5.2.0
20
20
  Requires-Dist: azure-ai-formrecognizer<4.0.0,>=3.3.3
21
+ Requires-Dist: tiktoken<1.0.0,>=0.8.0
21
22
  Requires-Dist: azure-search<2.0.0,>=1.0.0b2
22
23
  Requires-Dist: azure-functions<2.0.0,>=1.21.3
23
24
  Requires-Dist: azure-ai-ml<2.0.0,>=1.25.0
@@ -16,9 +16,9 @@ cwyodmodules/batch/utilities/common/source_document.py,sha256=_FNNE-gaydoJ2trfMz
16
16
  cwyodmodules/batch/utilities/document_chunking/__init__.py,sha256=s7G-4CLzaVRPe-ePbSfFKLI93Oimf1RYJYC0JA9qlek,272
17
17
  cwyodmodules/batch/utilities/document_chunking/chunking_strategy.py,sha256=c-ZNxdz14r2pSiCBYHh17WHs2nM2RpKou3q8DunDHnU,2001
18
18
  cwyodmodules/batch/utilities/document_chunking/document_chunking_base.py,sha256=suBdj8Iko8g_jO7IWlf1cg9PKTx0hMk1zfP9fXyMigU,442
19
- cwyodmodules/batch/utilities/document_chunking/fixed_size_overlap.py,sha256=_BFtDfRBBqdtBC9m0n2cKlwEZIfYrp_XdBAjBMHL7Qc,1927
20
- cwyodmodules/batch/utilities/document_chunking/layout.py,sha256=r8DU9sHvgBQRhDCzAt88KF57kxMlLQKtvuAdWn3k538,1925
21
- cwyodmodules/batch/utilities/document_chunking/page.py,sha256=4J__8BT2IDfkFXYUXzAWAzxSBITq8kiR3y09umx6kuA,1904
19
+ cwyodmodules/batch/utilities/document_chunking/fixed_size_overlap.py,sha256=IEJ_rDioJJ6hk-AjrslZImrxJ2wSonjIpL9BxDbmKjI,3512
20
+ cwyodmodules/batch/utilities/document_chunking/layout.py,sha256=NLLToNVj8MmQvuN35ddRZSS7MFuvFhHSW5520jAV6RY,3322
21
+ cwyodmodules/batch/utilities/document_chunking/page.py,sha256=jKLS4Hbl36mdDwys9A6LMmgiUdinVXd8h_NN4Bb4d1o,3292
22
22
  cwyodmodules/batch/utilities/document_chunking/paragraph.py,sha256=cnTUMpOhbwCng_k42H5AJbXiFlgkFpJU0r4onaHEPyY,539
23
23
  cwyodmodules/batch/utilities/document_chunking/strategies.py,sha256=udKC3li_tuLkveYNH2_SRPVmgK8wxhfULBN7mgl1Z30,1722
24
24
  cwyodmodules/batch/utilities/document_loading/__init__.py,sha256=a4Fq-2vYnTedtknfOwTPyFi_czVrK1MvVz7TDy54LH8,637
@@ -26,7 +26,7 @@ cwyodmodules/batch/utilities/document_loading/document_loading_base.py,sha256=Ma
26
26
  cwyodmodules/batch/utilities/document_loading/layout.py,sha256=3PMo3Hc-75_mNAq6oz7GCqC3uFrLmkPMLOw4jH57df4,893
27
27
  cwyodmodules/batch/utilities/document_loading/read.py,sha256=bTE2NV_PQYoUVIbS5-QS61OdgFdV0F7JomWQP6jjXH0,1681
28
28
  cwyodmodules/batch/utilities/document_loading/strategies.py,sha256=ZBKYPJD8UJmPBzljQc4yh0rMHJvYn9Gxn7TbuYrNU6A,792
29
- cwyodmodules/batch/utilities/document_loading/web.py,sha256=LRTNYs_7CN8nfMOaCoW7Py_obrLpj3vI4kneNVEHGXE,1186
29
+ cwyodmodules/batch/utilities/document_loading/web.py,sha256=TE-zN3DmSXmxM4mdIE83m-sUtohx3eVWfe8XHAErNr4,3016
30
30
  cwyodmodules/batch/utilities/document_loading/word_document.py,sha256=-F1asMaupQk4swEeCoAD8tyYENE4Qq-05-VmPUjRdeA,1569
31
31
  cwyodmodules/batch/utilities/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  cwyodmodules/batch/utilities/helpers/azure_blob_storage_client.py,sha256=WgJFr1o4ZLqTTogS5oFwqSTHs6ofrycjrnSvffhIjoA,10140
@@ -34,12 +34,12 @@ cwyodmodules/batch/utilities/helpers/azure_computer_vision_client.py,sha256=zxpU
34
34
  cwyodmodules/batch/utilities/helpers/azure_form_recognizer_helper.py,sha256=wEbSJjZuV_u-4yWYNCiG5RwifjxNO8hdfXDuDoV-KiY,6890
35
35
  cwyodmodules/batch/utilities/helpers/azure_postgres_helper.py,sha256=efDNCnhrY-IJBKEwrJN_xbg4I72_r2HY3g6vyMYw5AM,11786
36
36
  cwyodmodules/batch/utilities/helpers/azure_postgres_helper_light_rag.py,sha256=M92Ir9vzXRMpCXcENE3Jux13C6mrMc6o9julcM6b3uY,11835
37
- cwyodmodules/batch/utilities/helpers/azure_search_helper.py,sha256=vIIMEck1wPg9oRlWweE2gSZ1nUYc_tmEe4QeFlsrwKk,11314
37
+ cwyodmodules/batch/utilities/helpers/azure_search_helper.py,sha256=RIK3KsdGlHRt_h7xUHtbTW9vxaBws4Ab5K3lT5U60D0,10910
38
38
  cwyodmodules/batch/utilities/helpers/document_chunking_helper.py,sha256=2MZOjW-fHXgYijP3m9O-nizOlk96Yg0axyxT0K6fTnM,725
39
39
  cwyodmodules/batch/utilities/helpers/document_loading_helper.py,sha256=2HBEl3vW-_PKbX5pPntTC_R5eToTk2Qb-q3M4Mt6hCU,603
40
40
  cwyodmodules/batch/utilities/helpers/env_helper.py,sha256=qrx_SrPawrzhF-l_VoNUXZX09Ky_qXIgHXtVun4mQh4,15787
41
41
  cwyodmodules/batch/utilities/helpers/lightrag_helper.py,sha256=7lb9JMm5IohsO73LWo5bWmlzWCGYNsx_fYl-aFdwATQ,3845
42
- cwyodmodules/batch/utilities/helpers/llm_helper.py,sha256=lHLYrUidtaemmKrVbWoo7oIvwluUoPUk16U5lV-YIX8,8282
42
+ cwyodmodules/batch/utilities/helpers/llm_helper.py,sha256=tQ_TYYsnr0GwJXW5MNUKV8T7KDzfYp742dm07eSnG7s,6640
43
43
  cwyodmodules/batch/utilities/helpers/orchestrator_helper.py,sha256=9mAmkrWAWn9ixz9vuzmms3Xccgm0V4yvAZGTmtyp-ag,582
44
44
  cwyodmodules/batch/utilities/helpers/config/agent_mode.py,sha256=8XMbu8dwMXva_xxeZNDlwOjDaZwIcwc-xJK1-QsaJ3w,82
45
45
  cwyodmodules/batch/utilities/helpers/config/assistant_strategy.py,sha256=uT8h646zEURU9x8oDOH7pWoZKb0Mw6dA2nJtA2M-ufg,171
@@ -103,8 +103,8 @@ cwyodmodules/graphrag/query/generate.py,sha256=BZiB6iw7PkIovw-CyYFogMHnDxK0Qu_4u
103
103
  cwyodmodules/graphrag/query/graph_search.py,sha256=95h3ecSWx4864XgKABtG0fh3Nk8HkqJVzoCrO8daJ-Y,7724
104
104
  cwyodmodules/graphrag/query/types.py,sha256=1Iq1dp4I4a56_cuFjOZ0NTgd0A2_MpVFznp_czgt6cI,617
105
105
  cwyodmodules/graphrag/query/vector_search.py,sha256=9Gwu9LPjtoAYUU8WKqCvbCHAIg3dpk71reoYd1scLnQ,1807
106
- cwyodmodules-0.3.81.dist-info/licenses/LICENSE,sha256=UqBDTipijsSW2ZSOXyTZnMsXmLoEHTgNEM0tL4g-Sso,1150
107
- cwyodmodules-0.3.81.dist-info/METADATA,sha256=V5vNozCIPe4MnA0rQ21zI7TVVlkH97FzVgKBUZDOoEE,1816
108
- cwyodmodules-0.3.81.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
109
- cwyodmodules-0.3.81.dist-info/top_level.txt,sha256=99RENLbkdRX-qpJvsxZ5AfmTL5s6shSaKOWYpz1vwzg,13
110
- cwyodmodules-0.3.81.dist-info/RECORD,,
106
+ cwyodmodules-0.3.84.dist-info/licenses/LICENSE,sha256=UqBDTipijsSW2ZSOXyTZnMsXmLoEHTgNEM0tL4g-Sso,1150
107
+ cwyodmodules-0.3.84.dist-info/METADATA,sha256=T63d7HIkijP4KGNGS7zeXRfl6ICXBH2y_u7294-Sj-w,1855
108
+ cwyodmodules-0.3.84.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
109
+ cwyodmodules-0.3.84.dist-info/top_level.txt,sha256=99RENLbkdRX-qpJvsxZ5AfmTL5s6shSaKOWYpz1vwzg,13
110
+ cwyodmodules-0.3.84.dist-info/RECORD,,