alita-sdk 0.3.203__py3-none-any.whl → 0.3.205__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -194,7 +194,9 @@ PostmanUpdateRequestDescription = create_model(
194
194
  PostmanUpdateRequestHeaders = create_model(
195
195
  "PostmanUpdateRequestHeaders",
196
196
  request_path=(str, Field(description="Path to the request (folder/requestName)")),
197
- headers=(Optional[List[Dict[str, Any]]], Field(default=None, description="Request headers."))
197
+ headers=(str, Field(description="String containing HTTP headers, separated by newline characters. "
198
+ "Each header should be in the format: \"Header-Name: value\". "
199
+ "Example: \"Content-Type: application/json\\nAuthorization: Bearer token123\". "))
198
200
  )
199
201
 
200
202
  PostmanUpdateRequestBody = create_model(
@@ -207,8 +209,15 @@ PostmanUpdateRequestAuth = create_model(
207
209
  "PostmanUpdateRequestAuth",
208
210
  request_path=(str, Field(description="Path to the request (folder/requestName)")),
209
211
  auth=(Optional[Dict[str, Any]], Field(default=None,
210
- description="Updated authentication settings. Example: {'type': 'bearer',token '': 'your_token'}"
211
- ))
212
+ description=(
213
+ "An object. "
214
+ "For API key authentication, use: {\"type\": \"apikey\", \"apikey\": [{\"key\": \"key\", \"value\": \"api-key\"}, {\"key\": \"value\", \"value\": \"XXX\"}]}. "
215
+ "For bearer authentication, use: {\"type\": \"bearer\", \"bearer\": [{\"key\": \"token\", \"value\": \"XXX\", \"type\": \"string\"}]}. "
216
+ "For basic authentication, use: {\"type\": \"basic\", \"basic\": [{\"key\": \"username\", \"value\": \"user\"}, {\"key\": \"password\", \"value\": \"pass\"}]}. "
217
+ "`type`: Authentication type (e.g., \"apikey\", \"bearer\", \"basic\"). "
218
+ "`apikey`, `bearer`, `basic`: List of key-value pairs for configuration."
219
+ "Other types can be added as needed, following the same structure."
220
+ )))
212
221
  )
213
222
 
214
223
  PostmanUpdateRequestTests = create_model(
@@ -1644,7 +1653,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
1644
1653
  raise ToolException(
1645
1654
  f"Unable to update request '{request_path}' description: {str(e)}")
1646
1655
 
1647
- def update_request_headers(self, request_path: str, headers: Dict[str, Any], **kwargs) -> str:
1656
+ def update_request_headers(self, request_path: str, headers: str, **kwargs) -> str:
1648
1657
  """Update request headers."""
1649
1658
  try:
1650
1659
  # Get request item and ID
@@ -1652,7 +1661,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
1652
1661
 
1653
1662
  # Create update payload
1654
1663
  request_update = {
1655
- "header": headers
1664
+ "headers": headers
1656
1665
  }
1657
1666
 
1658
1667
  # Update the headers field
@@ -1672,9 +1681,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
1672
1681
  request_item, request_id, _ = self._get_request_item_and_id(request_path)
1673
1682
 
1674
1683
  # Create update payload
1675
- request_update = {
1676
- "body": body
1677
- }
1684
+ request_update = body
1678
1685
 
1679
1686
  # Update the body field
1680
1687
  response = self._make_request('PUT', f'/collections/{self.collection_id}/requests/{request_id}',
@@ -1,5 +1,6 @@
1
+ import json
1
2
  import logging
2
- from typing import Optional
3
+ from typing import Optional, List, Dict, Any
3
4
 
4
5
  from ..utils.content_parser import parse_file_content
5
6
  from langchain_core.tools import ToolException
@@ -7,7 +8,9 @@ from office365.runtime.auth.client_credential import ClientCredential
7
8
  from office365.sharepoint.client_context import ClientContext
8
9
  from pydantic import Field, PrivateAttr, create_model, model_validator, SecretStr
9
10
 
10
- from ..elitea_base import BaseToolApiWrapper
11
+ from ..elitea_base import BaseToolApiWrapper, BaseIndexParams, BaseVectorStoreToolApiWrapper
12
+ from ...runtime.langchain.interfaces.llm_processor import get_embeddings
13
+ from langchain_core.documents import Document
11
14
 
12
15
  NoInput = create_model(
13
16
  "NoInput"
@@ -32,14 +35,30 @@ ReadDocument = create_model(
32
35
  page_number=(Optional[int], Field(description="Specifies which page to read. If it is None, then full document will be read.", default=None))
33
36
  )
34
37
 
38
+ indexData = create_model(
39
+ "indexData",
40
+ __base__=BaseIndexParams,
41
+ progress_step=(Optional[int], Field(default=None, ge=0, le=100,
42
+ description="Optional step size for progress reporting during indexing")),
43
+ clean_index=(Optional[bool], Field(default=False,
44
+ description="Optional flag to enforce clean existing index before indexing new data")),
45
+ )
46
+
35
47
 
36
- class SharepointApiWrapper(BaseToolApiWrapper):
48
+ class SharepointApiWrapper(BaseVectorStoreToolApiWrapper):
37
49
  site_url: str
38
50
  client_id: str = None
39
51
  client_secret: SecretStr = None
40
52
  token: SecretStr = None
41
53
  _client: Optional[ClientContext] = PrivateAttr() # Private attribute for the office365 client
42
54
 
55
+ llm: Any = None
56
+ connection_string: Optional[SecretStr] = None
57
+ collection_name: Optional[str] = None
58
+ embedding_model: Optional[str] = "HuggingFaceEmbeddings"
59
+ embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
60
+ vectorstore_type: Optional[str] = "PGVector"
61
+
43
62
  @model_validator(mode='before')
44
63
  @classmethod
45
64
  def validate_toolkit(cls, values):
@@ -111,7 +130,8 @@ class SharepointApiWrapper(BaseToolApiWrapper):
111
130
  'Path': file.properties['ServerRelativeUrl'],
112
131
  'Created': file.properties['TimeCreated'],
113
132
  'Modified': file.properties['TimeLastModified'],
114
- 'Link': file.properties['LinkingUrl']
133
+ 'Link': file.properties['LinkingUrl'],
134
+ 'id': file.properties['UniqueId']
115
135
  }
116
136
  result.append(temp_props)
117
137
  return result if result else ToolException("Can not get files or folder is empty. Please, double check folder name and read permissions.")
@@ -132,6 +152,36 @@ class SharepointApiWrapper(BaseToolApiWrapper):
132
152
  return ToolException("File not found. Please, check file name and path.")
133
153
  return parse_file_content(file.name, file_content, is_capture_image, page_number)
134
154
 
155
+ def _base_loader(self) -> List[Document]:
156
+ try:
157
+ all_files = self.get_files_list()
158
+ except Exception as e:
159
+ raise ToolException(f"Unable to extract files: {e}")
160
+
161
+ docs: List[Document] = []
162
+ for file in all_files:
163
+ metadata = {
164
+ ("updated_at" if k == "Modified" else k): str(v)
165
+ for k, v in file.items()
166
+ }
167
+ docs.append(Document(page_content="", metadata=metadata))
168
+ return docs
169
+
170
+ def index_data(self,
171
+ collection_suffix: str = '',
172
+ progress_step: int = None,
173
+ clean_index: bool = False):
174
+ docs = self._base_loader()
175
+ embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
176
+ vs = self._init_vector_store(collection_suffix, embeddings=embedding)
177
+ return vs.index_documents(docs, progress_step=progress_step, clean_index=clean_index)
178
+
179
+ def _process_document(self, document: Document) -> Document:
180
+ page_content = self.read_file(document.metadata['Path'], is_capture_image=True)
181
+
182
+ document.page_content = json.dumps(str(page_content))
183
+ return document
184
+
135
185
  def get_available_tools(self):
136
186
  return [
137
187
  {
@@ -151,5 +201,11 @@ class SharepointApiWrapper(BaseToolApiWrapper):
151
201
  "description": self.read_file.__doc__,
152
202
  "args_schema": ReadDocument,
153
203
  "ref": self.read_file
204
+ },
205
+ {
206
+ "name": "index_data",
207
+ "ref": self.index_data,
208
+ "description": self.index_data.__doc__,
209
+ "args_schema": indexData,
154
210
  }
155
211
  ]
@@ -16,7 +16,15 @@ def get_tools(tool):
16
16
  url=tool['settings']['url'],
17
17
  password=tool['settings'].get('password', None),
18
18
  email=tool['settings'].get('email', None),
19
- toolkit_name=tool.get('toolkit_name')
19
+ toolkit_name=tool.get('toolkit_name'),
20
+ llm=tool['settings'].get('llm', None),
21
+
22
+ # indexer settings
23
+ connection_string=tool['settings'].get('connection_string', None),
24
+ collection_name=f"{tool.get('toolkit_name')}_{str(tool['id'])}",
25
+ embedding_model="HuggingFaceEmbeddings",
26
+ embedding_model_params={"model_name": "sentence-transformers/all-MiniLM-L6-v2"},
27
+ vectorstore_type="PGVector"
20
28
  ).get_tools()
21
29
 
22
30
 
@@ -1,14 +1,18 @@
1
1
  import json
2
2
  import logging
3
- from typing import Dict, List, Optional, Union
3
+ from typing import Dict, List, Optional, Union, Any
4
4
 
5
5
  import pandas as pd
6
6
  from langchain_core.tools import ToolException
7
7
  from pydantic import SecretStr, create_model, model_validator
8
8
  from pydantic.fields import Field, PrivateAttr
9
9
  from testrail_api import StatusCodeError, TestRailAPI
10
-
11
- from ..elitea_base import BaseToolApiWrapper
10
+ from ..elitea_base import BaseVectorStoreToolApiWrapper, BaseIndexParams
11
+ from langchain_core.documents import Document
12
+ try:
13
+ from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
14
+ except ImportError:
15
+ from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
12
16
 
13
17
  logger = logging.getLogger(__name__)
14
18
 
@@ -281,6 +285,19 @@ updateCase = create_model(
281
285
  ),
282
286
  )
283
287
 
288
+ # Schema for indexing TestRail data into vector store
289
+ indexData = create_model(
290
+ "indexData",
291
+ __base__=BaseIndexParams,
292
+ project_id=(str, Field(description="TestRail project ID to index data from")),
293
+ suite_id=(Optional[str], Field(default=None, description="Optional TestRail suite ID to filter test cases")),
294
+ section_id=(Optional[int], Field(default=None, description="Optional section ID to filter test cases")),
295
+ title_keyword=(Optional[str], Field(default=None, description="Optional keyword to filter test cases by title")),
296
+ progress_step=(Optional[int],
297
+ Field(default=None, ge=0, le=100, description="Optional step size for progress reporting during indexing")),
298
+ clean_index=(Optional[bool],
299
+ Field(default=False, description="Optional flag to enforce clean existing index before indexing new data")),
300
+ )
284
301
 
285
302
  SUPPORTED_KEYS = {
286
303
  "id", "title", "section_id", "template_id", "type_id", "priority_id", "milestone_id",
@@ -291,11 +308,19 @@ SUPPORTED_KEYS = {
291
308
  }
292
309
 
293
310
 
294
- class TestrailAPIWrapper(BaseToolApiWrapper):
311
+ class TestrailAPIWrapper(BaseVectorStoreToolApiWrapper):
295
312
  url: str
296
313
  password: Optional[SecretStr] = None,
297
314
  email: Optional[str] = None,
298
315
  _client: Optional[TestRailAPI] = PrivateAttr() # Private attribute for the TestRail client
316
+ llm: Any = None
317
+
318
+ connection_string: Optional[SecretStr] = None
319
+ collection_name: Optional[str] = None
320
+ embedding_model: Optional[str] = "HuggingFaceEmbeddings"
321
+ embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
322
+ vectorstore_type: Optional[str] = "PGVector"
323
+
299
324
 
300
325
  @model_validator(mode="before")
301
326
  @classmethod
@@ -492,7 +517,7 @@ class TestrailAPIWrapper(BaseToolApiWrapper):
492
517
  you can submit and update specific fields only).
493
518
 
494
519
  :param case_id: T
495
- he ID of the test case
520
+ He ID of the test case
496
521
  :param kwargs:
497
522
  :key title: str
498
523
  The title of the test case
@@ -522,6 +547,98 @@ class TestrailAPIWrapper(BaseToolApiWrapper):
522
547
  f"Test case #{case_id} has been updated at '{updated_case['updated_on']}')"
523
548
  )
524
549
 
550
+ def _base_loader(self, project_id: str,
551
+ suite_id: Optional[str] = None,
552
+ section_id: Optional[int] = None,
553
+ title_keyword: Optional[str] = None
554
+ ) -> List[Document]:
555
+ try:
556
+ if suite_id:
557
+ resp = self._client.cases.get_cases(project_id=project_id, suite_id=int(suite_id))
558
+ cases = resp.get('cases', [])
559
+ else:
560
+ resp = self._client.cases.get_cases(project_id=project_id)
561
+ cases = resp.get('cases', [])
562
+ except StatusCodeError as e:
563
+ raise ToolException(f"Unable to extract test cases: {e}")
564
+ # Apply filters
565
+ if section_id is not None:
566
+ cases = [case for case in cases if case.get('section_id') == section_id]
567
+ if title_keyword is not None:
568
+ cases = [case for case in cases if title_keyword.lower() in case.get('title', '').lower()]
569
+
570
+ docs: List[Document] = []
571
+ for case in cases:
572
+ docs.append(Document(page_content=json.dumps(case), metadata={
573
+ 'project_id': project_id,
574
+ 'title': case.get('title', ''),
575
+ 'suite_id': suite_id or case.get('suite_id', ''),
576
+ 'id': str(case.get('id', '')),
577
+ 'updated_on': case.get('updated_on', ''),
578
+ }))
579
+ return docs
580
+
581
+ def index_data(
582
+ self,
583
+ project_id: str,
584
+ suite_id: Optional[str] = None,
585
+ collection_suffix: str = "",
586
+ section_id: Optional[int] = None,
587
+ title_keyword: Optional[str] = None,
588
+ progress_step: Optional[int] = None,
589
+ clean_index: Optional[bool] = False
590
+ ):
591
+ """Load TestRail test cases into the vector store."""
592
+ docs = self._base_loader(project_id, suite_id, section_id, title_keyword)
593
+ embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
594
+ vs = self._init_vector_store(collection_suffix, embeddings=embedding)
595
+ return vs.index_documents(docs, progress_step=progress_step, clean_index=clean_index)
596
+
597
+ def _process_document(self, document: Document) -> Document:
598
+ """
599
+ Process an existing base document to extract relevant metadata for full document preparation.
600
+ Used for late processing of documents after we ensure that the document has to be indexed to avoid
601
+ time-consuming operations for documents which might be useless.
602
+
603
+ Args:
604
+ document (Document): The base document to process.
605
+
606
+ Returns:
607
+ Document: The processed document with metadata.
608
+ """
609
+ try:
610
+ # get base data from the document required to extract attachments and other metadata
611
+ base_data = json.loads(document.page_content)
612
+ case_id = base_data.get("id")
613
+
614
+ # get a list of attachments for the case
615
+ attachments = self._client.attachments.get_attachments_for_case_bulk(case_id=case_id)
616
+ attachments_data = {}
617
+
618
+ # process each attachment to extract its content
619
+ for attachment in attachments:
620
+ attachments_data[attachment['filename']] = self._process_attachment(attachment)
621
+ base_data['attachments'] = attachments_data
622
+ document.page_content = json.dumps(base_data)
623
+ return document
624
+ except json.JSONDecodeError as e:
625
+ raise ToolException(f"Failed to decode JSON from document: {e}")
626
+
627
+ def _process_attachment(self, attachment: Dict[str, Any]) -> str:
628
+ """
629
+ Processes an attachment to extract its content.
630
+
631
+ Args:
632
+ attachment (Dict[str, Any]): The attachment data.
633
+
634
+ Returns:
635
+ str: string description of the attachment.
636
+ """
637
+ if attachment['filetype'] == 'txt' :
638
+ return self._client.get(endpoint=f"get_attachment/{attachment['id']}")
639
+ # TODO: add support for other file types
640
+ return "This filetype is not supported."
641
+
525
642
  def _to_markup(self, data: List[Dict], output_format: str) -> str:
526
643
  """
527
644
  Converts the given data into the specified format: 'json', 'csv', or 'markdown'.
@@ -550,7 +667,7 @@ class TestrailAPIWrapper(BaseToolApiWrapper):
550
667
  return df.to_markdown(index=False)
551
668
 
552
669
  def get_available_tools(self):
553
- return [
670
+ tools = [
554
671
  {
555
672
  "name": "get_case",
556
673
  "ref": self.get_case,
@@ -587,4 +704,13 @@ class TestrailAPIWrapper(BaseToolApiWrapper):
587
704
  "description": self.update_case.__doc__,
588
705
  "args_schema": updateCase,
589
706
  },
707
+ {
708
+ "name": "index_data",
709
+ "ref": self.index_data,
710
+ "description": self.index_data.__doc__,
711
+ "args_schema": indexData,
712
+ }
590
713
  ]
714
+ # Add vector search from base
715
+ tools.extend(self._get_vector_search_tools())
716
+ return tools