alita-sdk 0.3.209__py3-none-any.whl → 0.3.210__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. alita_sdk/runtime/clients/artifact.py +18 -4
  2. alita_sdk/runtime/langchain/document_loaders/AlitaCSVLoader.py +2 -1
  3. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +3 -3
  4. alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +8 -4
  5. alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
  6. alita_sdk/runtime/langchain/langraph_agent.py +1 -1
  7. alita_sdk/runtime/toolkits/artifact.py +7 -3
  8. alita_sdk/runtime/toolkits/tools.py +8 -1
  9. alita_sdk/runtime/tools/application.py +2 -0
  10. alita_sdk/runtime/tools/artifact.py +65 -8
  11. alita_sdk/runtime/tools/vectorstore.py +125 -41
  12. alita_sdk/runtime/utils/utils.py +3 -0
  13. alita_sdk/tools/ado/__init__.py +8 -0
  14. alita_sdk/tools/ado/repos/repos_wrapper.py +37 -0
  15. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +0 -7
  16. alita_sdk/tools/ado/work_item/__init__.py +4 -0
  17. alita_sdk/tools/ado/work_item/ado_wrapper.py +37 -4
  18. alita_sdk/tools/aws/delta_lake/__init__.py +1 -1
  19. alita_sdk/tools/bitbucket/__init__.py +13 -1
  20. alita_sdk/tools/bitbucket/api_wrapper.py +31 -4
  21. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +31 -0
  22. alita_sdk/tools/chunkers/code/codeparser.py +18 -10
  23. alita_sdk/tools/confluence/api_wrapper.py +35 -134
  24. alita_sdk/tools/confluence/loader.py +30 -28
  25. alita_sdk/tools/elitea_base.py +112 -11
  26. alita_sdk/tools/figma/__init__.py +13 -1
  27. alita_sdk/tools/figma/api_wrapper.py +47 -3
  28. alita_sdk/tools/github/api_wrapper.py +8 -0
  29. alita_sdk/tools/github/github_client.py +18 -0
  30. alita_sdk/tools/gitlab/__init__.py +4 -0
  31. alita_sdk/tools/gitlab/api_wrapper.py +10 -0
  32. alita_sdk/tools/google/bigquery/__init__.py +1 -1
  33. alita_sdk/tools/jira/__init__.py +21 -13
  34. alita_sdk/tools/jira/api_wrapper.py +285 -5
  35. alita_sdk/tools/sharepoint/__init__.py +11 -1
  36. alita_sdk/tools/sharepoint/api_wrapper.py +23 -53
  37. alita_sdk/tools/testrail/__init__.py +4 -0
  38. alita_sdk/tools/testrail/api_wrapper.py +21 -54
  39. alita_sdk/tools/utils/content_parser.py +72 -8
  40. alita_sdk/tools/xray/__init__.py +8 -1
  41. alita_sdk/tools/xray/api_wrapper.py +505 -14
  42. alita_sdk/tools/zephyr_scale/api_wrapper.py +5 -5
  43. {alita_sdk-0.3.209.dist-info → alita_sdk-0.3.210.dist-info}/METADATA +1 -1
  44. {alita_sdk-0.3.209.dist-info → alita_sdk-0.3.210.dist-info}/RECORD +47 -47
  45. {alita_sdk-0.3.209.dist-info → alita_sdk-0.3.210.dist-info}/WHEEL +0 -0
  46. {alita_sdk-0.3.209.dist-info → alita_sdk-0.3.210.dist-info}/licenses/LICENSE +0 -0
  47. {alita_sdk-0.3.209.dist-info → alita_sdk-0.3.210.dist-info}/top_level.txt +0 -0
@@ -1,14 +1,25 @@
1
+ import json
1
2
  import logging
2
- from typing import Optional, Any, List
3
+ import hashlib
4
+ from typing import Any, Dict, Generator, List, Optional
3
5
 
4
6
  import requests
7
+ from langchain_core.documents import Document
5
8
  from langchain_core.tools import ToolException
6
- from pydantic import create_model, PrivateAttr, SecretStr
7
- from pydantic import model_validator
8
- from pydantic.fields import Field
9
+ from pydantic import PrivateAttr, SecretStr, create_model, model_validator, Field
9
10
  from python_graphql_client import GraphqlClient
10
11
 
11
- from ..elitea_base import BaseToolApiWrapper
12
+ from ..elitea_base import (
13
+ BaseVectorStoreToolApiWrapper,
14
+ extend_with_vector_tools,
15
+ )
16
+ from ...runtime.utils.utils import IndexerKeywords
17
+ from ..utils.content_parser import parse_file_content, load_content_from_bytes
18
+
19
+ try:
20
+ from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
21
+ except ImportError:
22
+ from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
12
23
 
13
24
  logger = logging.getLogger(__name__)
14
25
 
@@ -20,7 +31,7 @@ _get_tests_query = """query GetTests($jql: String!, $limit:Int!, $start: Int)
20
31
  limit
21
32
  results {
22
33
  issueId
23
- jira(fields: ["key"])
34
+ jira(fields: ["key", "summary", "created", "updated", "assignee.displayName", "reporter.displayName"])
24
35
  projectId
25
36
  testType {
26
37
  name
@@ -34,6 +45,7 @@ _get_tests_query = """query GetTests($jql: String!, $limit:Int!, $start: Int)
34
45
  attachments {
35
46
  id
36
47
  filename
48
+ downloadLink
37
49
  }
38
50
  }
39
51
  preconditions(limit: $limit) {
@@ -46,6 +58,8 @@ _get_tests_query = """query GetTests($jql: String!, $limit:Int!, $start: Int)
46
58
  projectId
47
59
  }
48
60
  }
61
+ unstructured
62
+ gherkin
49
63
  }
50
64
  }
51
65
  }
@@ -96,7 +110,6 @@ XrayCreateTests = create_model(
96
110
  graphql_mutations=(list[str], Field(description="list of GraphQL mutations:\n" + _graphql_mutation_description))
97
111
  )
98
112
 
99
-
100
113
  def _parse_tests(test_results) -> List[Any]:
101
114
  """Handles tests in order to minimize tests' output"""
102
115
 
@@ -107,13 +120,16 @@ def _parse_tests(test_results) -> List[Any]:
107
120
  return test_results
108
121
 
109
122
 
110
- class XrayApiWrapper(BaseToolApiWrapper):
123
+ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
111
124
  _default_base_url: str = 'https://xray.cloud.getxray.app'
112
125
  base_url: str = ""
113
- client_id: str = None,
114
- client_secret: SecretStr = None,
115
- limit: Optional[int] = 100,
126
+ client_id: str = None
127
+ client_secret: SecretStr = None
128
+ limit: Optional[int] = 100
116
129
  _client: Optional[GraphqlClient] = PrivateAttr()
130
+ _auth_token: Optional[str] = PrivateAttr(default=None)
131
+
132
+ doctype: str = "xray_test"
117
133
 
118
134
  class Config:
119
135
  arbitrary_types_allowed = True
@@ -139,15 +155,80 @@ class XrayApiWrapper(BaseToolApiWrapper):
139
155
  }
140
156
  try:
141
157
  auth_response = requests.post(auth_url, json=auth_data)
158
+ auth_response.raise_for_status()
142
159
  token = auth_response.json()
143
- cls._client = GraphqlClient(endpoint=f"{values['base_url']}/api/v2/graphql",
144
- headers={'Authorization': f'Bearer {token}'})
160
+ values['_auth_token'] = token
161
+
162
+ values['_client_endpoint'] = f"{values['base_url']}/api/v2/graphql"
163
+ values['_client_headers'] = {'Authorization': f'Bearer {token}'}
164
+
145
165
  except Exception as e:
146
166
  if "invalid or doesn't have the required permissions" in str(e):
147
167
  masked_secret = '*' * (len(client_secret) - 4) + client_secret[-4:] if client_secret is not None else "UNDEFINED"
148
168
  return ToolException(f"Please, check you credentials ({values['client_id']} / {masked_secret}). Unable")
169
+ else:
170
+ return ToolException(f"Authentication failed: {str(e)}")
149
171
  return values
150
172
 
173
+ def __init__(self, **data):
174
+ super().__init__(**data)
175
+
176
+ from python_graphql_client import GraphqlClient
177
+
178
+ if not hasattr(self, '_auth_token') or self._auth_token is None:
179
+ if hasattr(self, 'client_id') and hasattr(self, 'client_secret'):
180
+ try:
181
+ auth_url = f"{self.base_url}/api/v1/authenticate"
182
+ auth_data = {
183
+ "client_id": self.client_id,
184
+ "client_secret": self.client_secret.get_secret_value() if hasattr(self.client_secret, 'get_secret_value') else str(self.client_secret)
185
+ }
186
+ auth_response = requests.post(auth_url, json=auth_data, timeout=30)
187
+ auth_response.raise_for_status()
188
+ self._auth_token = auth_response.json()
189
+ except Exception as e:
190
+ raise ToolException(f"Failed to authenticate in __init__: {str(e)}")
191
+ else:
192
+ raise ToolException("No client_id or client_secret available for authentication")
193
+
194
+ # Initialize the GraphQL client
195
+ if self._auth_token and hasattr(self, 'base_url'):
196
+ endpoint = f"{self.base_url}/api/v2/graphql"
197
+ headers = {'Authorization': f'Bearer {self._auth_token}'}
198
+ self._client = GraphqlClient(endpoint=endpoint, headers=headers)
199
+ else:
200
+ raise ToolException(f"GraphQL client could not be initialized - missing auth_token: {self._auth_token is not None}, base_url: {hasattr(self, 'base_url')}")
201
+
202
+ if '_graphql_endpoint' in data:
203
+ self._graphql_endpoint = data['_graphql_endpoint']
204
+
205
+ def _ensure_auth_token(self) -> str:
206
+ """
207
+ Ensure we have a valid auth token, refreshing if necessary.
208
+
209
+ Returns:
210
+ str: The authentication token
211
+
212
+ Raises:
213
+ ToolException: If authentication fails
214
+ """
215
+ if self._auth_token is None:
216
+ logger.warning("Auth token is None, attempting to re-authenticate")
217
+ try:
218
+ auth_url = f"{self.base_url}/api/v1/authenticate"
219
+ auth_data = {
220
+ "client_id": self.client_id,
221
+ "client_secret": self.client_secret.get_secret_value() if hasattr(self.client_secret, 'get_secret_value') else str(self.client_secret)
222
+ }
223
+ auth_response = requests.post(auth_url, json=auth_data, timeout=30)
224
+ auth_response.raise_for_status()
225
+ self._auth_token = auth_response.json()
226
+ logger.info("Successfully re-authenticated and obtained new token")
227
+ except Exception as e:
228
+ raise ToolException(f"Failed to authenticate: {str(e)}")
229
+
230
+ return self._auth_token
231
+
151
232
  def get_tests(self, jql: str):
152
233
  """get all tests"""
153
234
 
@@ -197,8 +278,415 @@ class XrayApiWrapper(BaseToolApiWrapper):
197
278
  except Exception as e:
198
279
  return ToolException(f"Unable to execute custom graphql due to error:\n{str(e)}")
199
280
 
281
+ def _base_loader(
282
+ self, jql: Optional[str] = None, graphql: Optional[str] = None, include_attachments: Optional[bool] = False,
283
+ skip_attachment_extensions: Optional[List[str]] = None, **kwargs: Any
284
+ ) -> Generator[Document, None, None]:
285
+ """
286
+ Index Xray test cases into vector store using JQL query or custom GraphQL.
287
+
288
+ Args:
289
+ jql: JQL query for searching test cases
290
+ graphql: Custom GraphQL query for advanced data extraction
291
+ include_attachments: Whether to include attachment content in indexing
292
+ Examples:
293
+ # Using JQL
294
+ jql = 'project = "CALC" AND testType = "Manual" AND labels in ("Smoke", "Critical")'
295
+
296
+ # Using GraphQL
297
+ graphql = 'query { getTests(jql: "project = \\"CALC\\"") { results { issueId jira(fields: ["key"]) steps { action result } } } }'
298
+ """
299
+
300
+ self._skipped_attachment_extensions = skip_attachment_extensions if skip_attachment_extensions else []
301
+ self._include_attachments = include_attachments
302
+
303
+ if not jql and not graphql:
304
+ raise ToolException("Either 'jql' or 'graphql' parameter must be provided.")
305
+
306
+ if jql and graphql:
307
+ raise ToolException("Please provide either 'jql' or 'graphql', not both.")
308
+
309
+ try:
310
+ if jql:
311
+ tests_data = self._get_tests_direct(jql)
312
+
313
+ elif graphql:
314
+ graphql_data = self._execute_graphql_direct(graphql)
315
+
316
+ if "data" in graphql_data:
317
+ if "getTests" in graphql_data["data"]:
318
+ tests_data = graphql_data["data"]["getTests"].get("results", [])
319
+ else:
320
+ tests_data = []
321
+ for key, value in graphql_data["data"].items():
322
+ if isinstance(value, list):
323
+ tests_data = value
324
+ break
325
+ elif isinstance(value, dict) and "results" in value:
326
+ tests_data = value["results"]
327
+ break
328
+ else:
329
+ tests_data = graphql_data if isinstance(graphql_data, list) else []
330
+
331
+ if not tests_data:
332
+ raise ToolException("No test data found in GraphQL response")
333
+
334
+ for test in tests_data:
335
+ page_content = ""
336
+ test_type_name = test.get("testType", {}).get("name", "").lower()
337
+
338
+ attachment_ids = []
339
+ if include_attachments and "steps" in test:
340
+ for step in test["steps"]:
341
+ if "attachments" in step and step["attachments"]:
342
+ for attachment in step["attachments"]:
343
+ if attachment and "id" in attachment:
344
+ attachment_ids.append(str(attachment["id"]))
345
+
346
+ if test_type_name == "manual" and "steps" in test and test["steps"]:
347
+ steps_content = []
348
+ for step in test["steps"]:
349
+ step_obj = {}
350
+ if step.get("action"):
351
+ step_obj["action"] = step["action"]
352
+ if step.get("data"):
353
+ step_obj["data"] = step["data"]
354
+ if step.get("result"):
355
+ step_obj["result"] = step["result"]
356
+ if step_obj:
357
+ steps_content.append(step_obj)
358
+
359
+ content_structure = {"steps": steps_content}
360
+ if attachment_ids:
361
+ content_structure["attachment_ids"] = sorted(attachment_ids)
362
+ page_content = json.dumps(content_structure, indent=2)
363
+
364
+ elif test_type_name == "cucumber" and test.get("gherkin"):
365
+ content_structure = {"gherkin": test["gherkin"]}
366
+ if attachment_ids:
367
+ content_structure["attachment_ids"] = sorted(attachment_ids)
368
+ page_content = json.dumps(content_structure, indent=2)
369
+
370
+ elif test.get("unstructured"):
371
+ content_structure = {"unstructured": test["unstructured"]}
372
+ if attachment_ids:
373
+ content_structure["attachment_ids"] = sorted(attachment_ids)
374
+ page_content = json.dumps(content_structure, indent=2)
375
+
376
+ metadata = {"doctype": self.doctype}
377
+
378
+ if "jira" in test and test["jira"]:
379
+ jira_data = test["jira"]
380
+ metadata["key"] = jira_data.get("key", "")
381
+ metadata["summary"] = jira_data.get("summary", "")
382
+
383
+ if "created" in jira_data:
384
+ metadata["created_on"] = jira_data["created"]
385
+
386
+ content_hash = hashlib.sha256(page_content.encode('utf-8')).hexdigest()[:16]
387
+ metadata["updated_on"] = content_hash
388
+
389
+ if "assignee" in jira_data and jira_data["assignee"]:
390
+ metadata["assignee"] = str(jira_data["assignee"])
391
+
392
+ if "reporter" in jira_data and jira_data["reporter"]:
393
+ metadata["reporter"] = str(jira_data["reporter"])
394
+
395
+ if "issueId" in test:
396
+ metadata["issueId"] = str(test["issueId"])
397
+ metadata["id"] = str(test["issueId"])
398
+ if "projectId" in test:
399
+ metadata["projectId"] = str(test["projectId"])
400
+ if "testType" in test and test["testType"]:
401
+ metadata["testType"] = test["testType"].get("name", "")
402
+ metadata["testKind"] = test["testType"].get("kind", "")
403
+
404
+ if include_attachments and "steps" in test:
405
+ attachments_data = []
406
+ for step in test["steps"]:
407
+ if "attachments" in step and step["attachments"]:
408
+ for attachment in step["attachments"]:
409
+ if attachment and "id" in attachment and "filename" in attachment:
410
+ attachments_data.append(attachment)
411
+ if attachments_data:
412
+ metadata["_attachments_data"] = attachments_data
413
+
414
+ yield Document(page_content=page_content, metadata=metadata)
415
+
416
+ except Exception as e:
417
+ logger.error(f"Error processing test data: {e}")
418
+ raise ToolException(f"Error processing test data: {e}")
419
+
420
+ def _process_document(self, document: Document) -> Generator[Document, None, None]:
421
+ """
422
+ Process an existing base document to extract relevant metadata for full document preparation.
423
+ Used for late processing of documents after we ensure that the document has to be indexed to avoid
424
+ time-consuming operations for documents which might be useless.
425
+
426
+ Args:
427
+ document (Document): The base document to process.
428
+
429
+ Returns:
430
+ Generator[Document, None, None]: A generator yielding processed Document objects with metadata.
431
+ """
432
+ try:
433
+ if not getattr(self, '_include_attachments', False):
434
+ yield document
435
+ return
436
+
437
+ attachments_data = document.metadata.get("_attachments_data", [])
438
+ if not attachments_data:
439
+ yield document
440
+ return
441
+
442
+ issue_id = document.metadata.get("id")
443
+
444
+ for attachment in attachments_data:
445
+ filename = attachment.get('filename', '')
446
+ if filename:
447
+ ext = f".{filename.split('.')[-1].lower()}"
448
+ else:
449
+ ext = ""
450
+
451
+ if hasattr(self, '_skipped_attachment_extensions') and ext in self._skipped_attachment_extensions:
452
+ logger.info(f"Skipping attachment {filename} due to extension filter: {ext}")
453
+ continue
454
+
455
+ attachment_id = f"attach_{attachment['id']}"
456
+ document.metadata.setdefault(
457
+ IndexerKeywords.DEPENDENT_DOCS.value, []
458
+ ).append(attachment_id)
459
+
460
+ try:
461
+ content = self._process_attachment(attachment)
462
+ if not content or content.startswith("Attachment processing failed"):
463
+ logger.warning(f"Skipping attachment {filename} due to processing failure")
464
+ continue
465
+ except Exception as e:
466
+ logger.error(f"Failed to process attachment {filename}: {str(e)}")
467
+ continue
468
+
469
+ attachment_metadata = {
470
+ 'id': str(attachment_id),
471
+ 'issue_key': document.metadata.get('key', ''),
472
+ 'issueId': str(issue_id),
473
+ 'projectId': document.metadata.get('projectId', ''),
474
+ 'source': f"xray_test_{issue_id}",
475
+ 'filename': filename,
476
+ 'download_link': attachment.get('downloadLink', ''),
477
+ 'entity_type': 'test_case_attachment',
478
+ 'key': document.metadata.get('key', ''),
479
+ IndexerKeywords.PARENT.value: document.metadata.get('id', str(issue_id)),
480
+ 'type': 'attachment',
481
+ 'doctype': self.doctype,
482
+ }
483
+
484
+ yield Document(
485
+ page_content=content,
486
+ metadata=attachment_metadata
487
+ )
488
+
489
+ if "_attachments_data" in document.metadata:
490
+ del document.metadata["_attachments_data"]
491
+
492
+ yield document
493
+
494
+ except Exception as e:
495
+ logger.error(f"Error processing document for attachments: {e}")
496
+ yield document
497
+
498
+ def _process_attachment(self, attachment: Dict[str, Any]) -> str:
499
+ """
500
+ Processes an attachment to extract its content.
501
+
502
+ Args:
503
+ attachment (Dict[str, Any]): The attachment data containing id, filename, and downloadLink.
504
+
505
+ Returns:
506
+ str: String description/content of the attachment.
507
+ """
508
+ try:
509
+ download_link = attachment.get('downloadLink')
510
+ filename = attachment.get('filename', '')
511
+
512
+ if not download_link:
513
+ return f"Attachment: {filename} (no download link available)"
514
+
515
+ try:
516
+ auth_token = self._ensure_auth_token()
517
+ headers = {'Authorization': f'Bearer {auth_token}'}
518
+ response = requests.get(download_link, headers=headers, timeout=30)
519
+ response.raise_for_status()
520
+
521
+ ext = f".{filename.split('.')[-1].lower()}" if filename and '.' in filename else ""
522
+
523
+ if ext == '.pdf':
524
+ content = parse_file_content(
525
+ file_content=response.content,
526
+ file_name=filename,
527
+ llm=self.llm,
528
+ is_capture_image=True
529
+ )
530
+ else:
531
+ content = load_content_from_bytes(
532
+ response.content,
533
+ ext,
534
+ llm=self.llm
535
+ )
536
+
537
+ if content:
538
+ return f"filename: {filename}\ncontent: {content}"
539
+ else:
540
+ logger.warning(f"No content extracted from attachment {filename}")
541
+ return f"filename: {filename}\ncontent: [No extractable content]"
542
+
543
+ except requests.RequestException as req_e:
544
+ logger.error(f"Unable to download attachment {filename} with existing token: {req_e}")
545
+
546
+ # If the token fails (401 Unauthorized), try to re-authenticate and retry
547
+ if "401" in str(req_e) or "Unauthorized" in str(req_e):
548
+ try:
549
+ logger.info(f"Re-authenticating for attachment download: {filename}")
550
+ # Re-authenticate to get a fresh token
551
+ auth_url = f"{self.base_url}/api/v1/authenticate"
552
+ auth_data = {
553
+ "client_id": self.client_id,
554
+ "client_secret": self.client_secret.get_secret_value() if hasattr(self.client_secret, 'get_secret_value') else str(self.client_secret)
555
+ }
556
+ auth_response = requests.post(auth_url, json=auth_data, timeout=30)
557
+ auth_response.raise_for_status()
558
+ fresh_token = auth_response.json()
559
+
560
+ fresh_headers = {'Authorization': f'Bearer {fresh_token}'}
561
+ response = requests.get(download_link, headers=fresh_headers, timeout=60)
562
+ response.raise_for_status()
563
+
564
+ ext = f".{filename.split('.')[-1].lower()}" if filename and '.' in filename else ""
565
+ content = parse_file_content(
566
+ file_content=response.content,
567
+ file_name=filename,
568
+ llm=self.llm,
569
+ is_capture_image=True
570
+ ) if ext == '.pdf' else load_content_from_bytes(response.content, ext, llm=self.llm)
571
+
572
+ if content:
573
+ return f"filename: {filename}\ncontent: {content}"
574
+ else:
575
+ return f"filename: {filename}\ncontent: [Content extraction failed after re-auth]"
576
+
577
+ except Exception as reauth_e:
578
+ logger.error(f"Re-authentication and retry failed for {filename}: {reauth_e}")
579
+ return f"Attachment: {filename} (download failed: {str(req_e)}, re-auth failed: {str(reauth_e)})"
580
+ else:
581
+ try:
582
+ auth_token = self._ensure_auth_token()
583
+ fallback_headers = {
584
+ 'Authorization': f'Bearer {auth_token}',
585
+ 'User-Agent': 'Mozilla/5.0 (compatible; XrayAPI/1.0; Python)',
586
+ 'Accept': '*/*'
587
+ }
588
+ response = requests.get(download_link, headers=fallback_headers, timeout=60)
589
+ response.raise_for_status()
590
+
591
+ ext = f".{filename.split('.')[-1].lower()}" if filename and '.' in filename else ""
592
+ content = parse_file_content(
593
+ file_content=response.content,
594
+ file_name=filename,
595
+ llm=self.llm,
596
+ is_capture_image=True
597
+ ) if ext == '.pdf' else load_content_from_bytes(response.content, ext, llm=self.llm)
598
+
599
+ if content:
600
+ return f"filename: {filename}\ncontent: {content}"
601
+ else:
602
+ return f"filename: {filename}\ncontent: [Content extraction failed after fallback]"
603
+
604
+ except Exception as fallback_e:
605
+ logger.error(f"Fallback download also failed for {filename}: {fallback_e}")
606
+ return f"Attachment: {filename} (download failed: {str(req_e)}, fallback failed: {str(fallback_e)})"
607
+
608
+ except Exception as parse_e:
609
+ logger.error(f"Unable to parse attachment {filename}: {parse_e}")
610
+ return f"Attachment: {filename} (parsing failed: {str(parse_e)})"
611
+
612
+ except Exception as e:
613
+ logger.error(f"Error processing attachment: {e}")
614
+ return f"Attachment processing failed: {str(e)}"
615
+
616
+ def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
617
+ return {
618
+ 'jql': (Optional[str], Field(description="""JQL query for searching test cases in Xray.
619
+
620
+ Standard JQL query syntax for filtering Xray test cases. Examples:
621
+ - project = "CALC" AND testType = "Manual"
622
+ - project = "CALC" AND labels in ("Smoke", "Regression")
623
+ - project = "CALC" AND summary ~ "login"
624
+ - project = "CALC" AND testType = "Manual" AND labels = "Critical"
625
+
626
+ Supported fields:
627
+ - project: project key filter (e.g., project = "CALC")
628
+ - testType: filter by test type (e.g., testType = "Manual")
629
+ - labels: filter by labels (e.g., labels = "Smoke" or labels in ("Smoke", "Regression"))
630
+ - summary: search in test summary (e.g., summary ~ "login")
631
+ - description: search in test description
632
+ - status: filter by test status
633
+ - priority: filter by test priority
634
+
635
+ Example:
636
+ 'project = "CALC" AND testType = "Manual" AND labels in ("Smoke", "Critical")'
637
+ """, default=None)),
638
+ 'graphql': (Optional[str], Field(description="""Custom GraphQL query for advanced data extraction.
639
+
640
+ Use this for custom GraphQL queries that return test data. The query should return test objects
641
+ with relevant fields like issueId, jira, testType, steps, etc.
642
+
643
+ Example:
644
+ 'query { getTests(jql: "project = \\"CALC\\"") { results { issueId jira(fields: ["key"]) testType { name } steps { action result } } } }'
645
+ """, default=None)),
646
+ 'include_attachments': (Optional[bool],
647
+ Field(description="Whether to include attachment content in indexing",
648
+ default=False)),
649
+ 'skip_attachment_extensions': (Optional[List[str]], Field(
650
+ description="List of file extensions to skip when processing attachments (e.g., ['.exe', '.zip', '.bin'])",
651
+ default=None)),
652
+ }
653
+
654
+ def _get_tests_direct(self, jql: str) -> List[Dict]:
655
+ """Direct method to get test data without string formatting"""
656
+ start_at = 0
657
+ all_tests = []
658
+ logger.info(f"[indexing] jql to get tests: {jql}")
659
+
660
+ while True:
661
+ try:
662
+ get_tests_response = self._client.execute(query=_get_tests_query,
663
+ variables={"jql": jql, "start": start_at,
664
+ "limit": self.limit})['data']["getTests"]
665
+ except Exception as e:
666
+ raise ToolException(f"Unable to get tests due to error: {str(e)}")
667
+
668
+ tests = _parse_tests(get_tests_response["results"])
669
+ total = get_tests_response['total']
670
+ all_tests.extend(tests)
671
+
672
+ if len(all_tests) == total:
673
+ break
674
+
675
+ start_at += self.limit
676
+
677
+ return all_tests
678
+
679
+ def _execute_graphql_direct(self, graphql: str) -> Any:
680
+ """Direct method to execute GraphQL and return parsed data"""
681
+ logger.info(f"[indexing] executing GraphQL query: {graphql}")
682
+ try:
683
+ return self._client.execute(query=graphql)
684
+ except Exception as e:
685
+ raise ToolException(f"Unable to execute GraphQL due to error: {str(e)}")
686
+
687
+ @extend_with_vector_tools
200
688
  def get_available_tools(self):
201
- return [
689
+ tools = [
202
690
  {
203
691
  "name": "get_tests",
204
692
  "description": self.get_tests.__doc__,
@@ -224,3 +712,6 @@ class XrayApiWrapper(BaseToolApiWrapper):
224
712
  "ref": self.execute_graphql,
225
713
  }
226
714
  ]
715
+
716
+ tools.extend(self._get_vector_search_tools())
717
+ return tools
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import logging
3
3
  import re
4
- from typing import Any, Optional, List, Dict, Tuple, Union
4
+ from typing import Any, Optional, List, Dict, Tuple, Union, Generator
5
5
 
6
6
  from pydantic import model_validator, BaseModel, SecretStr
7
7
  from langchain_core.tools import ToolException
@@ -1296,16 +1296,16 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
1296
1296
  if isinstance(v, (str, int, float, bool, list, dict))
1297
1297
  }
1298
1298
  if last_version and isinstance(last_version, dict) and 'createdOn' in last_version:
1299
- metadata['updated_at'] = last_version['createdOn']
1299
+ metadata['updated_on'] = last_version['createdOn']
1300
1300
  else:
1301
- metadata['updated_at'] = case['createdOn']
1301
+ metadata['updated_on'] = case['createdOn']
1302
1302
 
1303
1303
  case['type'] = "TEST_CASE"
1304
1304
 
1305
1305
  docs.append(Document(page_content=json.dumps(case), metadata=metadata))
1306
1306
  return docs
1307
1307
 
1308
- def _process_document(self, document: Document) -> Document:
1308
+ def _process_document(self, document: Document) -> Generator[Document, None, None]:
1309
1309
  try:
1310
1310
  base_data = json.loads(document.page_content)
1311
1311
 
@@ -1314,7 +1314,7 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
1314
1314
  base_data['test_case_content'] = additional_content
1315
1315
 
1316
1316
  document.page_content = json.dumps(base_data)
1317
- return document
1317
+ yield document
1318
1318
  except json.JSONDecodeError as e:
1319
1319
  raise ToolException(f"Failed to decode JSON from document: {e}")
1320
1320
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.209
3
+ Version: 0.3.210
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0