alita-sdk 0.3.265__py3-none-any.whl → 0.3.267__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -422,7 +422,7 @@ def create_graph(
422
422
  if not pipeline_name:
423
423
  raise ValueError("Subgraph must have a 'tool' node: add required tool to the subgraph node")
424
424
  node_fn = SubgraphRunnable(
425
- inner=tool,
425
+ inner=tool.graph,
426
426
  name=pipeline_name,
427
427
  input_mapping=node.get('input_mapping', {}),
428
428
  output_mapping=node.get('output_mapping', {}),
@@ -666,7 +666,9 @@ class LangGraphAgentRunnable(CompiledStateGraph):
666
666
  config: Optional[RunnableConfig] = None,
667
667
  *args, **kwargs):
668
668
  logger.info(f"Incomming Input: {input}")
669
- if not config.get("configurable", {}).get("thread_id"):
669
+ if config is None:
670
+ config = RunnableConfig()
671
+ if not config.get("configurable", {}).get("thread_id", ""):
670
672
  config["configurable"] = {"thread_id": str(uuid4())}
671
673
  thread_id = config.get("configurable", {}).get("thread_id")
672
674
  # Handle chat history and current input properly
@@ -1,8 +1,11 @@
1
1
  from typing import List, Any
2
2
 
3
+ from langchain_core.tools import BaseTool
4
+ from langgraph.checkpoint.memory import MemorySaver
3
5
  from langgraph.graph.state import CompiledStateGraph
4
6
 
5
7
  from ..langchain.langraph_agent import create_graph, SUBGRAPH_REGISTRY
8
+ from ..tools.graph import GraphTool
6
9
  from ..utils.utils import clean_string
7
10
 
8
11
 
@@ -16,7 +19,7 @@ class SubgraphToolkit:
16
19
  llm,
17
20
  app_api_key: str,
18
21
  selected_tools: list[str] = []
19
- ) -> List[CompiledStateGraph]:
22
+ ) -> List[BaseTool]:
20
23
  from .tools import get_tools
21
24
  # from langgraph.checkpoint.memory import MemorySaver
22
25
 
@@ -36,18 +39,20 @@ class SubgraphToolkit:
36
39
 
37
40
  # For backward compatibility, still create a compiled graph stub
38
41
  # This is mainly used for identification in the parent graph's tools list
42
+ # For now the graph toolkit will have its own ephemeral in memory checkpoint memory.
39
43
  graph = create_graph(
40
44
  client=llm,
41
45
  tools=tools,
42
46
  yaml_schema=version_details['instructions'],
43
47
  debug=False,
44
48
  store=None,
45
- memory=None,
46
- for_subgraph=True, # compile as raw subgraph
49
+ memory=MemorySaver(),
50
+ # for_subgraph=True, # compile as raw subgraph
47
51
  )
48
-
52
+
53
+ cleaned_subgraph_name = clean_string(subgraph_name)
49
54
  # Tag the graph stub for parent lookup
50
- graph.name = clean_string(subgraph_name)
55
+ graph.name = cleaned_subgraph_name
51
56
 
52
57
  # Return the compiled graph stub for backward compatibility
53
- return [graph]
58
+ return [GraphTool(description=app_details['description'], name=subgraph_name, graph=graph)]
@@ -9,6 +9,7 @@ from pydantic import create_model, Field, model_validator
9
9
 
10
10
  from alita_sdk.tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
11
11
  from alita_sdk.tools.utils.available_tools_decorator import extend_with_parent_available_tools
12
+ from ...runtime.utils.utils import IndexerKeywords
12
13
 
13
14
 
14
15
  class ArtifactWrapper(NonCodeIndexerToolkit):
@@ -82,8 +83,8 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
82
83
  for document in documents:
83
84
  try:
84
85
  page_content = self.artifact.get_content_bytes(artifact_name=document.metadata['name'])
85
- document.metadata['loader_content'] = page_content
86
- document.metadata['loader_content_type'] = document.metadata['name']
86
+ document.metadata[IndexerKeywords.CONTENT_IN_BYTES.value] = page_content
87
+ document.metadata[IndexerKeywords.CONTENT_FILE_NAME.value] = document.metadata['name']
87
88
  yield document
88
89
  except Exception as e:
89
90
  logging.error(f"Failed while parsing the file '{document.metadata['name']}': {e}")
@@ -0,0 +1,75 @@
1
+ import json
2
+
3
+ from langgraph.graph.state import CompiledStateGraph
4
+
5
+ from ..utils.utils import clean_string
6
+ from langchain_core.tools import BaseTool
7
+ from langchain_core.messages import BaseMessage, AIMessage, ToolCall
8
+ from typing import Any, Type, Optional, Union
9
+ from pydantic import create_model, field_validator, BaseModel
10
+ from pydantic.fields import FieldInfo
11
+ from ..langchain.mixedAgentRenderes import convert_message_to_json
12
+ from logging import getLogger
13
+
14
+ logger = getLogger(__name__)
15
+
16
+ graphToolSchema = create_model(
17
+ "graphToolSchema",
18
+ input=(str, FieldInfo(description="User Input for Graph")),
19
+ chat_history=(Optional[list[BaseMessage]],
20
+ FieldInfo(description="Chat History relevant for Graph in format [{'role': '<user| assistant | etc>', 'content': '<content of the respected message>'}]", default=[]))
21
+ )
22
+
23
+
24
+ def formulate_query(kwargs):
25
+ chat_history = []
26
+ if kwargs.get('chat_history'):
27
+ if isinstance(kwargs.get('chat_history')[-1], BaseMessage):
28
+ chat_history = convert_message_to_json(kwargs.get('chat_history')[:])
29
+ elif isinstance(kwargs.get('chat_history')[-1], dict):
30
+ if all([True if message.get('role') and message.get('content') else False for message in
31
+ kwargs.get('chat_history')]):
32
+ chat_history = kwargs.get('chat_history')[:]
33
+ else:
34
+ for each in kwargs.get('chat_history')[:]:
35
+ chat_history.append(AIMessage(json.dumps(each)))
36
+ elif isinstance(kwargs.get('chat_history')[-1], str):
37
+ chat_history = []
38
+ for each in kwargs.get('chat_history')[:]:
39
+ chat_history.append(AIMessage(each))
40
+ elif kwargs.get('messages'):
41
+ chat_history = convert_message_to_json(kwargs.get('messages')[:])
42
+ result = {"input": kwargs.get('input'), "chat_history": chat_history}
43
+ for key, value in kwargs.items():
44
+ if key not in ("input", "chat_history"):
45
+ result[key] = value
46
+ return result
47
+
48
+
49
+ class GraphTool(BaseTool):
50
+ name: str
51
+ description: str
52
+ graph: CompiledStateGraph
53
+ args_schema: Type[BaseModel] = graphToolSchema
54
+ return_type: str = "str"
55
+
56
+ @field_validator('name', mode='before')
57
+ @classmethod
58
+ def remove_spaces(cls, v):
59
+ return clean_string(v)
60
+
61
+ def invoke(self, input: Any, config: Optional[dict] = None, **kwargs: Any) -> Any:
62
+ """Override default invoke to preserve all fields, not just args_schema"""
63
+ schema_values = self.args_schema(**input).model_dump() if self.args_schema else {}
64
+ extras = {k: v for k, v in input.items() if k not in schema_values}
65
+ all_kwargs = {**kwargs, **extras, **schema_values}
66
+ if config is None:
67
+ config = {}
68
+ return self._run(*config, **all_kwargs)
69
+
70
+ def _run(self, *args, **kwargs):
71
+ response = self.graph.invoke(formulate_query(kwargs))
72
+ if self.return_type == "str":
73
+ return response["output"]
74
+ else:
75
+ return {"messages": [{"role": "assistant", "content": response["output"]}]}
@@ -9,6 +9,8 @@ class IndexerKeywords(Enum):
9
9
  PARENT = 'parent_id'
10
10
  # DEPENDENCY_ID = 'dependency_id'
11
11
  UPDATED_ON = 'updated_on'
12
+ CONTENT_IN_BYTES = 'loader_content'
13
+ CONTENT_FILE_NAME = 'loader_content_type'
12
14
 
13
15
  # This pattern matches characters that are NOT alphanumeric, underscores, or hyphens
14
16
  clean_string_pattern = re.compile(r'[^a-zA-Z0-9_.-]')
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import logging
3
3
  import xml.etree.ElementTree as ET
4
- from typing import Generator, Optional
4
+ from typing import Generator, Literal, Optional
5
5
 
6
6
  from azure.devops.connection import Connection
7
7
  from azure.devops.v7_0.test_plan.models import TestPlanCreateParams, TestSuiteCreateParams, \
@@ -14,7 +14,9 @@ from pydantic import create_model, PrivateAttr, model_validator, SecretStr
14
14
  from pydantic.fields import FieldInfo as Field
15
15
 
16
16
  from ..work_item import AzureDevOpsApiWrapper
17
- from ...elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
17
+ from ...non_code_indexer_toolkit import NonCodeIndexerToolkit
18
+ from ...utils.available_tools_decorator import extend_with_parent_available_tools
19
+ from ....runtime.utils.utils import IndexerKeywords
18
20
 
19
21
  logger = logging.getLogger(__name__)
20
22
 
@@ -158,7 +160,7 @@ TestCasesGetModel = create_model(
158
160
  suite_id=(int, Field(description="ID of the test suite for which test cases are requested"))
159
161
  )
160
162
 
161
- class TestPlanApiWrapper(BaseVectorStoreToolApiWrapper):
163
+ class TestPlanApiWrapper(NonCodeIndexerToolkit):
162
164
  # TODO use ado_configuration instead of organization_url, project and token
163
165
  __test__ = False
164
166
  organization_url: str
@@ -178,7 +180,7 @@ class TestPlanApiWrapper(BaseVectorStoreToolApiWrapper):
178
180
  cls._client = connection.clients.get_test_plan_client()
179
181
  except Exception as e:
180
182
  raise ImportError(f"Failed to connect to Azure DevOps: {e}")
181
- return values
183
+ return super().validate_toolkit(values)
182
184
 
183
185
  def create_test_plan(self, test_plan_create_params: str):
184
186
  """Create a test plan in Azure DevOps."""
@@ -360,7 +362,7 @@ class TestPlanApiWrapper(BaseVectorStoreToolApiWrapper):
360
362
  logger.error(f"Error getting test cases: {e}")
361
363
  return ToolException(f"Error getting test cases: {e}")
362
364
 
363
- def _base_loader(self, plan_id: str, suite_ids: Optional[list[str]] = [], **kwargs) -> Generator[Document, None, None]:
365
+ def _base_loader(self, plan_id: str, suite_ids: Optional[list[str]] = [], chunking_tool: str = None, **kwargs) -> Generator[Document, None, None]:
364
366
  cases = []
365
367
  for sid in suite_ids:
366
368
  cases.extend(self.get_test_cases(plan_id, sid))
@@ -368,29 +370,39 @@ class TestPlanApiWrapper(BaseVectorStoreToolApiWrapper):
368
370
  for case in cases:
369
371
  field_dicts = case.get('work_item', {}).get('work_item_fields', [])
370
372
  data = {k: v for d in field_dicts for k, v in d.items()}
371
- yield Document(
372
- page_content=data.get('Microsoft.VSTS.TCM.Steps', ''),
373
- metadata={
374
- 'id': case.get('work_item', {}).get('id', ''),
375
- 'title': case.get('work_item', {}).get('name', ''),
376
- 'plan_id': case.get('test_plan', {}).get('id', ''),
377
- 'suite_id': case.get('test_suite', {}).get('id', ''),
378
- 'description': data.get('System.Description', ''),
379
- 'updated_on': data.get('System.Rev', ''),
380
- })
381
-
382
- def _process_document(self, document: Document) -> Generator[Document, None, None]:
383
- if False:
384
- yield # Unreachable, but keeps the function a generator
373
+ if chunking_tool:
374
+ yield Document(
375
+ page_content='',
376
+ metadata={
377
+ 'id': case.get('work_item', {}).get('id', ''),
378
+ 'title': case.get('work_item', {}).get('name', ''),
379
+ 'plan_id': case.get('test_plan', {}).get('id', ''),
380
+ 'suite_id': case.get('test_suite', {}).get('id', ''),
381
+ 'description': data.get('System.Description', ''),
382
+ 'updated_on': data.get('System.Rev', ''),
383
+ IndexerKeywords.CONTENT_IN_BYTES.value: data.get('Microsoft.VSTS.TCM.Steps', '').encode("utf-8")
384
+ })
385
+ else:
386
+ yield Document(
387
+ page_content=data.get('Microsoft.VSTS.TCM.Steps', ''),
388
+ metadata={
389
+ 'id': case.get('work_item', {}).get('id', ''),
390
+ 'title': case.get('work_item', {}).get('name', ''),
391
+ 'plan_id': case.get('test_plan', {}).get('id', ''),
392
+ 'suite_id': case.get('test_suite', {}).get('id', ''),
393
+ 'description': data.get('System.Description', ''),
394
+ 'updated_on': data.get('System.Rev', ''),
395
+ })
385
396
 
386
397
  def _index_tool_params(self):
387
398
  """Return the parameters for indexing data."""
388
399
  return {
389
400
  "plan_id": (str, Field(description="ID of the test plan for which test cases are requested")),
390
- "suite_ids": (str, Field(description="List of test suite IDs for which test cases are requested (can be empty)"))
401
+ "suite_ids": (str, Field(description="List of test suite IDs for which test cases are requested (can be empty)")),
402
+ 'chunking_tool':(Literal['html'], Field(description="Name of chunking tool", default='html'))
391
403
  }
392
404
 
393
- @extend_with_vector_tools
405
+ @extend_with_parent_available_tools
394
406
  def get_available_tools(self):
395
407
  """Return a list of available tools."""
396
408
  return [
@@ -1,6 +1,6 @@
1
1
  import hashlib
2
2
  import logging
3
- from typing import Any, Dict, Generator, Optional
3
+ from typing import Generator, Literal, Optional
4
4
 
5
5
  from azure.devops.connection import Connection
6
6
  from azure.devops.exceptions import AzureDevOpsServiceError
@@ -15,7 +15,9 @@ from pydantic import create_model, PrivateAttr, SecretStr
15
15
  from pydantic import model_validator
16
16
  from pydantic.fields import Field
17
17
 
18
- from ...elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
18
+ from ...non_code_indexer_toolkit import NonCodeIndexerToolkit
19
+ from ...utils.available_tools_decorator import extend_with_parent_available_tools
20
+ from ....runtime.utils.utils import IndexerKeywords
19
21
 
20
22
  logger = logging.getLogger(__name__)
21
23
 
@@ -55,7 +57,7 @@ RenamePageInput = create_model(
55
57
  )
56
58
 
57
59
 
58
- class AzureDevOpsApiWrapper(BaseVectorStoreToolApiWrapper):
60
+ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
59
61
  # TODO use ado_configuration instead of organization_url, project and token
60
62
  organization_url: str
61
63
  project: str
@@ -82,7 +84,7 @@ class AzureDevOpsApiWrapper(BaseVectorStoreToolApiWrapper):
82
84
  except Exception as e:
83
85
  return ImportError(f"Failed to connect to Azure DevOps: {e}")
84
86
 
85
- return values
87
+ return super().validate_toolkit(values)
86
88
 
87
89
  def get_wiki(self, wiki_identified: str):
88
90
  """Extract ADO wiki information."""
@@ -219,7 +221,7 @@ class AzureDevOpsApiWrapper(BaseVectorStoreToolApiWrapper):
219
221
  logger.error(f"Unable to modify wiki page: {str(e)}")
220
222
  return ToolException(f"Unable to modify wiki page: {str(e)}")
221
223
 
222
- def _base_loader(self, wiki_identifier: str, title_contains: Optional[str] = None, **kwargs) -> Generator[Document, None, None]:
224
+ def _base_loader(self, wiki_identifier: str, chunking_tool: str = None, title_contains: Optional[str] = None, **kwargs) -> Generator[Document, None, None]:
223
225
  pages = self._client.get_pages_batch(pages_batch_request={}, project=self.project, wiki_identifier=wiki_identifier)
224
226
  #
225
227
  for page in pages:
@@ -227,21 +229,31 @@ class AzureDevOpsApiWrapper(BaseVectorStoreToolApiWrapper):
227
229
  content_hash = hashlib.sha256(content.encode("utf-8")).hexdigest()
228
230
  title = page.path.rsplit("/", 1)[-1]
229
231
  if not title_contains or (title_contains and title_contains.lower() in title.lower()):
230
- yield Document(page_content=content, metadata={
231
- 'id': str(page.id),
232
- 'path': page.path,
233
- 'title': title,
234
- 'updated_on': content_hash
235
- })
232
+ if chunking_tool:
233
+ yield Document(page_content='', metadata={
234
+ 'id': str(page.id),
235
+ 'path': page.path,
236
+ 'title': title,
237
+ 'updated_on': content_hash,
238
+ IndexerKeywords.CONTENT_IN_BYTES.value: content.encode("utf-8")
239
+ })
240
+ else:
241
+ yield Document(page_content=content, metadata={
242
+ 'id': str(page.id),
243
+ 'path': page.path,
244
+ 'title': title,
245
+ 'updated_on': content_hash
246
+ })
236
247
 
237
248
  def _index_tool_params(self):
238
249
  """Return the parameters for indexing data."""
239
250
  return {
240
251
  "wiki_identifier": (str, Field(description="Wiki identifier to index, e.g., 'ABCProject.wiki'")),
241
- 'title_contains': (Optional[str], Field(default=None, description="Optional filter to include only pages with titles containing exact this string"))
252
+ 'title_contains': (Optional[str], Field(default=None, description="Optional filter to include only pages with titles containing exact this string")),
253
+ 'chunking_tool':(Literal['markdown'], Field(description="Name of chunking tool", default='markdown'))
242
254
  }
243
255
 
244
- @extend_with_vector_tools
256
+ @extend_with_parent_available_tools
245
257
  def get_available_tools(self):
246
258
  """Return a list of available tools."""
247
259
  return [
@@ -15,6 +15,7 @@ from pydantic import model_validator
15
15
  from pydantic.fields import Field
16
16
 
17
17
  from alita_sdk.tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
18
+ from ....runtime.utils.utils import IndexerKeywords
18
19
 
19
20
  logger = logging.getLogger(__name__)
20
21
 
@@ -525,7 +526,7 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
525
526
  for attachment_id, file_name in document.metadata.get('attachment_ids', {}).items():
526
527
  content_generator = self._client.get_attachment_content(id=attachment_id, download=True)
527
528
  content = b"".join(x for x in content_generator)
528
- yield Document(page_content="", metadata={'id': attachment_id, 'loader_content_type': file_name, 'loader_content': content})
529
+ yield Document(page_content="", metadata={'id': attachment_id, IndexerKeywords.CONTENT_FILE_NAME.value: file_name, IndexerKeywords.CONTENT_IN_BYTES.value: content})
529
530
 
530
531
  def _index_tool_params(self):
531
532
  """Return the parameters for indexing data."""
@@ -5,7 +5,7 @@ from typing import Any, Optional, List, Literal, Dict, Generator
5
5
  from langchain_core.documents import Document
6
6
  from pydantic import create_model, Field, SecretStr
7
7
 
8
- from .utils.content_parser import process_content_by_type
8
+ from .utils.content_parser import file_extension_by_chunker, process_content_by_type
9
9
  from .vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
10
10
  from ..runtime.tools.vectorstore_base import VectorStoreWrapperBase
11
11
  from ..runtime.utils.utils import IndexerKeywords
@@ -170,19 +170,26 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
170
170
  chunking_config['llm'] = self.llm
171
171
 
172
172
  for document in documents:
173
- if content_type := document.metadata.get('loader_content_type', None):
173
+ if content_type := document.metadata.get(IndexerKeywords.CONTENT_FILE_NAME.value, None):
174
174
  # apply parsing based on content type and chunk if chunker was applied to parent doc
175
- content = document.metadata.pop('loader_content', None)
175
+ content = document.metadata.pop(IndexerKeywords.CONTENT_IN_BYTES.value, None)
176
176
  yield from process_content_by_type(
177
177
  document=document,
178
178
  content=content,
179
179
  extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
180
+ elif chunking_tool and (content_in_bytes := document.metadata.pop(IndexerKeywords.CONTENT_IN_BYTES.value, None)):
181
+ # apply parsing based on content type resolved from chunking_tool
182
+ content_type = file_extension_by_chunker(chunking_tool)
183
+ yield from process_content_by_type(
184
+ document=document,
185
+ content=content_in_bytes,
186
+ extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
180
187
  elif chunking_tool:
181
188
  # apply default chunker from toolkit config. No parsing.
182
189
  chunker = chunkers.get(chunking_tool)
183
190
  yield from chunker(file_content_generator=iter([document]), config=chunking_config)
184
191
  else:
185
- # return as is if neither chunker or content typa are specified
192
+ # return as is if neither chunker nor content type are specified
186
193
  yield document
187
194
 
188
195
  def _extend_data(self, documents: Generator[Document, None, None]):
@@ -10,6 +10,7 @@ from pydantic import Field, PrivateAttr, create_model, model_validator, SecretSt
10
10
 
11
11
  from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
12
12
  from ..utils.content_parser import parse_file_content
13
+ from ...runtime.utils.utils import IndexerKeywords
13
14
 
14
15
  NoInput = create_model(
15
16
  "NoInput"
@@ -200,8 +201,8 @@ class SharepointApiWrapper(NonCodeIndexerToolkit):
200
201
  def _extend_data(self, documents: Generator[Document, None, None]):
201
202
  for document in documents:
202
203
  try:
203
- document.metadata['loader_content'] = self._load_file_content_in_bytes(document.metadata['Path'])
204
- document.metadata['loader_content_type'] = document.metadata['Name']
204
+ document.metadata[IndexerKeywords.CONTENT_IN_BYTES.value] = self._load_file_content_in_bytes(document.metadata['Path'])
205
+ document.metadata[IndexerKeywords.CONTENT_FILE_NAME.value] = document.metadata['Name']
205
206
  yield document
206
207
  except Exception as e:
207
208
  logging.error(f"Failed while parsing the file '{document.metadata['Path']}': {e}")
@@ -8,6 +8,7 @@ from langchain_core.documents import Document
8
8
  from langchain_core.tools import ToolException
9
9
 
10
10
  from alita_sdk.runtime.langchain.document_loaders.constants import loaders_map
11
+ from ...runtime.utils.utils import IndexerKeywords
11
12
 
12
13
  logger = getLogger(__name__)
13
14
 
@@ -175,7 +176,7 @@ def process_content_by_type(document: Document, content, extension_source: str,
175
176
  with tempfile.NamedTemporaryFile(mode='w+b', suffix=extension, delete=False) as temp_file:
176
177
  temp_file_path = temp_file.name
177
178
  if content is None:
178
- logger.warning("'loader_content' ie expected but not found in document metadata.")
179
+ logger.warning(f"'{IndexerKeywords.CONTENT_IN_BYTES.value}' ie expected but not found in document metadata.")
179
180
  return
180
181
 
181
182
  temp_file.write(content)
@@ -218,4 +219,19 @@ def sanitize_for_postgres(text: str, replacement: str = "") -> str:
218
219
  >>> sanitize_for_postgres("Hello\\x00world", " ")
219
220
  'Hello world'
220
221
  """
221
- return text.replace("\x00", replacement)
222
+ return text.replace("\x00", replacement)
223
+
224
+
225
+ def file_extension_by_chunker(chunker_name: str) -> str:
226
+ name = chunker_name.lower()
227
+ if name == "markdown":
228
+ return ".md"
229
+ if name == "json":
230
+ return ".json"
231
+ if name == "text" or name == "txt":
232
+ return ".txt"
233
+ if name == "html":
234
+ return ".html"
235
+ if name == "csv":
236
+ return ".csv"
237
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.265
3
+ Version: 0.3.267
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -34,7 +34,7 @@ alita_sdk/runtime/langchain/assistant.py,sha256=suBFEt24t9bLyBHMzkR3Mkgd9HIrGBq_
34
34
  alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
35
35
  alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
36
36
  alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
37
- alita_sdk/runtime/langchain/langraph_agent.py,sha256=m96QQR1zoWQ8cDfwD4mcrlBl41JIrLwxDGPOg6yQzf8,43850
37
+ alita_sdk/runtime/langchain/langraph_agent.py,sha256=3s1QLEPu0U0pz-PVaLBIEZLp7dhqAxy2wNUIjvIt49I,43925
38
38
  alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
39
39
  alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
40
40
  alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
@@ -87,16 +87,17 @@ alita_sdk/runtime/toolkits/artifact.py,sha256=3AjdKxrsbb0kzQ6NxJla9XqJcgyDJicMAq
87
87
  alita_sdk/runtime/toolkits/configurations.py,sha256=kIDAlnryPQfbZyFxV-9SzN2-Vefzx06TX1BBdIIpN90,141
88
88
  alita_sdk/runtime/toolkits/datasource.py,sha256=qk78OdPoReYPCWwahfkKLbKc4pfsu-061oXRryFLP6I,2498
89
89
  alita_sdk/runtime/toolkits/prompt.py,sha256=WIpTkkVYWqIqOWR_LlSWz3ug8uO9tm5jJ7aZYdiGRn0,1192
90
- alita_sdk/runtime/toolkits/subgraph.py,sha256=ZYqI4yVLbEPAjCR8dpXbjbL2ipX598Hk3fL6AgaqFD4,1758
90
+ alita_sdk/runtime/toolkits/subgraph.py,sha256=wwUK8JjPXkGzyVZ3tAukmvST6eGbqx_U11rpnmbrvtg,2105
91
91
  alita_sdk/runtime/toolkits/tools.py,sha256=jNgWazbCb2t-H0OOErg7oPDxKXM-ezUAc6y9wqlI6GY,7875
92
92
  alita_sdk/runtime/toolkits/vectorstore.py,sha256=BGppQADa1ZiLO17fC0uCACTTEvPHlodEDYEzUcBRbAA,2901
93
93
  alita_sdk/runtime/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
94
94
  alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
95
95
  alita_sdk/runtime/tools/application.py,sha256=mC2_ZFx4WLHc98Gzll88Vw6cqyx2cmbig2IeJBtHRdg,2836
96
- alita_sdk/runtime/tools/artifact.py,sha256=fM_yp8Y2gNXAWvGNrtwQkS92c3Pfme8vZVxUrqi535o,8617
96
+ alita_sdk/runtime/tools/artifact.py,sha256=yIn-kfI9OWoaxbBeqdqF0M1HPeMtNnvZ_pCPoUIwnCk,8708
97
97
  alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
98
98
  alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
99
99
  alita_sdk/runtime/tools/function.py,sha256=ZFpd7TGwIawze2e7BHlKwP0NHwNw42wwrmmnXyJQJhk,2600
100
+ alita_sdk/runtime/tools/graph.py,sha256=MbnZYqdmvZY7SGDp43lOVVIjUt5ARHSgj43mdtBjSjQ,3092
100
101
  alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1MrPl9cz5eLA,4375
101
102
  alita_sdk/runtime/tools/llm.py,sha256=NsrsP-SblyxDdzgMCn9_OBUL0sUGDVS5yqer49V7ciE,15069
102
103
  alita_sdk/runtime/tools/loop.py,sha256=uds0WhZvwMxDVFI6MZHrcmMle637cQfBNg682iLxoJA,8335
@@ -117,9 +118,9 @@ alita_sdk/runtime/utils/save_dataframe.py,sha256=i-E1wp-t4wb17Zq3nA3xYwgSILjoXNi
117
118
  alita_sdk/runtime/utils/streamlit.py,sha256=ZgHpibL2ARHt6qrWj5JhK6HNZv2UjxQ04qTk6gmz1Eo,104928
118
119
  alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7rUxp2MRR4tmYR8,5136
119
120
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
120
- alita_sdk/runtime/utils/utils.py,sha256=CpEl3LCeLbhzQySz08lkKPm7Auac6IiLF7WB8wmArMI,589
121
+ alita_sdk/runtime/utils/utils.py,sha256=VXNLsdeTmf6snn9EtUyobv4yL-xzLhUcH8P_ORMifYc,675
121
122
  alita_sdk/tools/__init__.py,sha256=ko5TToGYZFmBrho26DRAVvrkHWxQ2sfs8gVAASinYp8,10611
122
- alita_sdk/tools/base_indexer_toolkit.py,sha256=UkCjxQkBudIEjKFwUB2313mx6qQCxtF_rIiDYOAgbIw,17851
123
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=17v9AIMGLO0OBUWS6A_-1xvHMArqTwK84mfkpTrwPwM,18434
123
124
  alita_sdk/tools/elitea_base.py,sha256=PfelIUb5YFTjDN_1jNYT9tJbjfYr11PAUrPQHyW2d5I,32830
124
125
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=v9uq1POE1fQKCd152mbqDtF-HSe0qoDj83k4E5LAkMI,1080
125
126
  alita_sdk/tools/ado/__init__.py,sha256=u2tdDgufGuDb-7lIgKKQlqgStL9Wd1gzNmRNYems2c0,1267
@@ -127,11 +128,11 @@ alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,
127
128
  alita_sdk/tools/ado/repos/__init__.py,sha256=n-IhKED05RwQGWT4LfCaxJ85uDyG4S9zTjSjK6A8N4o,5192
128
129
  alita_sdk/tools/ado/repos/repos_wrapper.py,sha256=e3bGsM03m0UggSQfoVh5Gg_M1MYt_BTKS-s9G2Unc1k,49739
129
130
  alita_sdk/tools/ado/test_plan/__init__.py,sha256=4fEw_3cm4shuZ868HhAU-uMH3xNXPyb3uRjyNWoBKls,5243
130
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=57nYl-F4OPDwZu-amg9ptG0Z4iAYMBobcKlrbGgSAg8,20079
131
+ alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=dsHRNSkojvXEC8ItVZy21SdGoKsH_OWd65iUlvxpzQA,20961
131
132
  alita_sdk/tools/ado/wiki/__init__.py,sha256=uBKo_Meu2ZxMxcxGsMmvCXyplRE2um1_PIRvdYd37rM,5171
132
- alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=YfumbP0lI04hXWY01P9f-q9JBWuQRaeIJEvYDpcZnu0,14238
133
+ alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=AjavSE3peuQ-uONQmmfT3MS_8mxeSBRv7Q5QHt0Z2KU,14952
133
134
  alita_sdk/tools/ado/work_item/__init__.py,sha256=HNcdIMwTSNe-25_Pg-KmVVXTFci3vNa84tkTFkls36c,5373
134
- alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=uPhDp2zC8t42FQk7xc7gNyYs2o-hfOxsrw_rw31f7Sw,28223
135
+ alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=TXl3V46SgGafQaxQKSTD3AN4MoQ3yNuQBwgVZ6-JhSk,28315
135
136
  alita_sdk/tools/advanced_jira_mining/__init__.py,sha256=pUTzECqGvYaR5qWY3JPUhrImrZgc7pCXuqSe5eWIE80,4604
136
137
  alita_sdk/tools/advanced_jira_mining/data_mining_wrapper.py,sha256=nZPtuwVWp8VeHw1B8q9kdwf-6ZvHnlXTOGdcIMDkKpw,44211
137
138
  alita_sdk/tools/aws/__init__.py,sha256=tB6GCOg4XGSpR6qgbgAF4MUQ5-YmQCbWurWgrVKEKQ8,181
@@ -299,7 +300,7 @@ alita_sdk/tools/servicenow/__init__.py,sha256=hReiTp8yv07eR0O_1KJThzUO2xhWhIWcjU
299
300
  alita_sdk/tools/servicenow/api_wrapper.py,sha256=WpH-bBLGFdhehs4g-K-WAkNuaD1CSrwsDpdgB3RG53s,6120
300
301
  alita_sdk/tools/servicenow/servicenow_client.py,sha256=Rdqfu-ll-qbnclMzChLZBsfXRDzgoX_FdeI2WLApWxc,3269
301
302
  alita_sdk/tools/sharepoint/__init__.py,sha256=Mofg_N-7zFf5mKm3_0D0dhC_H0MX-bk3YQ5Sl3oXokg,4114
302
- alita_sdk/tools/sharepoint/api_wrapper.py,sha256=-k2CPhS-mUjtAXVw6DHhP9c71oDcBjuxAljpK8bUGb0,11347
303
+ alita_sdk/tools/sharepoint/api_wrapper.py,sha256=btyfIAAnxpj-MB5fq264JzMp6Q8svpSkOEUQEhhg9TM,11438
303
304
  alita_sdk/tools/sharepoint/authorization_helper.py,sha256=n-nL5dlBoLMK70nHu7P2RYCb8C6c9HMA_gEaw8LxuhE,2007
304
305
  alita_sdk/tools/sharepoint/utils.py,sha256=fZ1YzAu5CTjKSZeslowpOPH974902S8vCp1Wu7L44LM,446
305
306
  alita_sdk/tools/slack/__init__.py,sha256=o8BnDMWGC5qA8pVIyIiflM6T__dZ6qAE1UdtJcvmaxk,3901
@@ -313,7 +314,7 @@ alita_sdk/tools/testrail/__init__.py,sha256=0kETjWKLU7R6mugBWsjwEUsh10pipbAeNSGJ
313
314
  alita_sdk/tools/testrail/api_wrapper.py,sha256=5T-QyTzt-J0rI32xc_E684lCdgyWeHSyeTYiwQwtGyg,32275
314
315
  alita_sdk/tools/utils/__init__.py,sha256=155xepXPr4OEzs2Mz5YnjXcBpxSv1X2eznRUVoPtyK0,3268
315
316
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
316
- alita_sdk/tools/utils/content_parser.py,sha256=a8m5kSpEuI4d3YIJlBqSrHtEc-igAnOUDI_uRyo4Sls,9878
317
+ alita_sdk/tools/utils/content_parser.py,sha256=ZHBHz2-VnDE3Q-YuPMZI4daI436MMNwHGmaVif-Issc,10309
317
318
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=a6FAsiix_EvATIKUf5YT6vHh5LDyJ5uSP3LJqoxFo04,17367
318
319
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
319
320
  alita_sdk/tools/xray/__init__.py,sha256=BnG2StSfX44CUMtrjHTcSCDWxxse5tCZqwyaZSkBKIc,4230
@@ -335,8 +336,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=HOt9ShtJI_1tVPcwd3Rwk-VS0SMLq
335
336
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0AI_j27xVO5Gk5HQMFrqPTd4uvuVTpiZUicBrdfEpKg,2796
336
337
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
337
338
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
338
- alita_sdk-0.3.265.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
339
- alita_sdk-0.3.265.dist-info/METADATA,sha256=X3vxAP0rhjG5eSPdKzPW5vKsWm7viCAJ0s1AGBbHnmI,18897
340
- alita_sdk-0.3.265.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
341
- alita_sdk-0.3.265.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
342
- alita_sdk-0.3.265.dist-info/RECORD,,
339
+ alita_sdk-0.3.267.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
340
+ alita_sdk-0.3.267.dist-info/METADATA,sha256=Iew_1rOLkmDqmW7OI53GCnR87M1LMIpVP43W5ELV3cs,18897
341
+ alita_sdk-0.3.267.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
342
+ alita_sdk-0.3.267.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
343
+ alita_sdk-0.3.267.dist-info/RECORD,,