alita-sdk 0.3.271__py3-none-any.whl → 0.3.273__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. alita_sdk/configurations/__init__.py +10 -0
  2. alita_sdk/configurations/ado.py +4 -2
  3. alita_sdk/configurations/azure_search.py +1 -1
  4. alita_sdk/configurations/bigquery.py +1 -1
  5. alita_sdk/configurations/browser.py +18 -0
  6. alita_sdk/configurations/carrier.py +19 -0
  7. alita_sdk/configurations/delta_lake.py +1 -1
  8. alita_sdk/configurations/google_places.py +17 -0
  9. alita_sdk/configurations/postman.py +1 -1
  10. alita_sdk/configurations/qtest.py +1 -3
  11. alita_sdk/configurations/report_portal.py +19 -0
  12. alita_sdk/configurations/salesforce.py +19 -0
  13. alita_sdk/configurations/service_now.py +1 -12
  14. alita_sdk/configurations/sharepoint.py +19 -0
  15. alita_sdk/configurations/sonar.py +18 -0
  16. alita_sdk/configurations/sql.py +20 -0
  17. alita_sdk/configurations/testio.py +18 -0
  18. alita_sdk/configurations/zephyr_essential.py +18 -0
  19. alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
  20. alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +19 -6
  21. alita_sdk/runtime/langchain/document_loaders/ImageParser.py +17 -0
  22. alita_sdk/runtime/tools/vectorstore.py +25 -9
  23. alita_sdk/runtime/tools/vectorstore_base.py +4 -1
  24. alita_sdk/tools/aws/delta_lake/__init__.py +2 -2
  25. alita_sdk/tools/azure_ai/search/__init__.py +1 -1
  26. alita_sdk/tools/base_indexer_toolkit.py +8 -8
  27. alita_sdk/tools/bitbucket/__init__.py +1 -1
  28. alita_sdk/tools/browser/__init__.py +14 -10
  29. alita_sdk/tools/carrier/__init__.py +11 -11
  30. alita_sdk/tools/code/sonar/__init__.py +10 -7
  31. alita_sdk/tools/confluence/__init__.py +1 -1
  32. alita_sdk/tools/elitea_base.py +9 -8
  33. alita_sdk/tools/figma/__init__.py +1 -1
  34. alita_sdk/tools/github/__init__.py +2 -2
  35. alita_sdk/tools/gitlab_org/__init__.py +1 -1
  36. alita_sdk/tools/google/bigquery/__init__.py +1 -1
  37. alita_sdk/tools/google_places/__init__.py +10 -5
  38. alita_sdk/tools/jira/__init__.py +1 -1
  39. alita_sdk/tools/jira/api_wrapper.py +140 -99
  40. alita_sdk/tools/qtest/__init__.py +1 -1
  41. alita_sdk/tools/rally/__init__.py +1 -1
  42. alita_sdk/tools/report_portal/__init__.py +9 -8
  43. alita_sdk/tools/salesforce/__init__.py +9 -8
  44. alita_sdk/tools/servicenow/__init__.py +1 -1
  45. alita_sdk/tools/sharepoint/__init__.py +5 -7
  46. alita_sdk/tools/slack/__init__.py +1 -1
  47. alita_sdk/tools/sql/__init__.py +9 -11
  48. alita_sdk/tools/testio/__init__.py +9 -6
  49. alita_sdk/tools/utils/content_parser.py +59 -24
  50. alita_sdk/tools/xray/api_wrapper.py +60 -101
  51. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -1
  52. alita_sdk/tools/zephyr_essential/__init__.py +5 -4
  53. alita_sdk/tools/zephyr_essential/api_wrapper.py +42 -10
  54. alita_sdk/tools/zephyr_scale/__init__.py +1 -1
  55. {alita_sdk-0.3.271.dist-info → alita_sdk-0.3.273.dist-info}/METADATA +1 -1
  56. {alita_sdk-0.3.271.dist-info → alita_sdk-0.3.273.dist-info}/RECORD +59 -48
  57. {alita_sdk-0.3.271.dist-info → alita_sdk-0.3.273.dist-info}/WHEEL +0 -0
  58. {alita_sdk-0.3.271.dist-info → alita_sdk-0.3.273.dist-info}/licenses/LICENSE +0 -0
  59. {alita_sdk-0.3.271.dist-info → alita_sdk-0.3.273.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import logging
3
3
  import hashlib
4
- from typing import Any, Dict, Generator, List, Optional
4
+ from typing import Any, Dict, Generator, List, Optional, Literal
5
5
 
6
6
  import requests
7
7
  from langchain_core.documents import Document
@@ -13,8 +13,9 @@ from ..elitea_base import (
13
13
  BaseVectorStoreToolApiWrapper,
14
14
  extend_with_vector_tools,
15
15
  )
16
+ from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
16
17
  from ...runtime.utils.utils import IndexerKeywords
17
- from ..utils.content_parser import parse_file_content, load_content_from_bytes
18
+ from ..utils.content_parser import load_file_docs
18
19
 
19
20
  try:
20
21
  from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
@@ -31,7 +32,7 @@ _get_tests_query = """query GetTests($jql: String!, $limit:Int!, $start: Int)
31
32
  limit
32
33
  results {
33
34
  issueId
34
- jira(fields: ["key", "summary", "created", "updated", "assignee.displayName", "reporter.displayName"])
35
+ jira(fields: ["key", "summary", "description", "created", "updated", "assignee.displayName", "reporter.displayName"])
35
36
  projectId
36
37
  testType {
37
38
  name
@@ -120,7 +121,7 @@ def _parse_tests(test_results) -> List[Any]:
120
121
  return test_results
121
122
 
122
123
 
123
- class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
124
+ class XrayApiWrapper(NonCodeIndexerToolkit):
124
125
  _default_base_url: str = 'https://xray.cloud.getxray.app'
125
126
  base_url: str = ""
126
127
  client_id: str = None
@@ -147,7 +148,7 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
147
148
  client_id = values['client_id']
148
149
  client_secret = values['client_secret']
149
150
  # Authenticate to get the token
150
- values['base_url'] = values.get('base_url', '') or cls._default_base_url
151
+ values['base_url'] = values.get('base_url', '') or cls._default_base_url.default
151
152
  auth_url = f"{values['base_url']}/api/v1/authenticate"
152
153
  auth_data = {
153
154
  "client_id": client_id,
@@ -168,7 +169,7 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
168
169
  return ToolException(f"Please, check you credentials ({values['client_id']} / {masked_secret}). Unable")
169
170
  else:
170
171
  return ToolException(f"Authentication failed: {str(e)}")
171
- return values
172
+ return super().validate_toolkit(values)
172
173
 
173
174
  def __init__(self, **data):
174
175
  super().__init__(**data)
@@ -333,6 +334,7 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
333
334
 
334
335
  for test in tests_data:
335
336
  page_content = ""
337
+ content_structure = {}
336
338
  test_type_name = test.get("testType", {}).get("name", "").lower()
337
339
 
338
340
  attachment_ids = []
@@ -359,19 +361,16 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
359
361
  content_structure = {"steps": steps_content}
360
362
  if attachment_ids:
361
363
  content_structure["attachment_ids"] = sorted(attachment_ids)
362
- page_content = json.dumps(content_structure, indent=2)
363
364
 
364
365
  elif test_type_name == "cucumber" and test.get("gherkin"):
365
366
  content_structure = {"gherkin": test["gherkin"]}
366
367
  if attachment_ids:
367
368
  content_structure["attachment_ids"] = sorted(attachment_ids)
368
- page_content = json.dumps(content_structure, indent=2)
369
369
 
370
370
  elif test.get("unstructured"):
371
371
  content_structure = {"unstructured": test["unstructured"]}
372
372
  if attachment_ids:
373
373
  content_structure["attachment_ids"] = sorted(attachment_ids)
374
- page_content = json.dumps(content_structure, indent=2)
375
374
 
376
375
  metadata = {"doctype": self.doctype}
377
376
 
@@ -382,7 +381,12 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
382
381
 
383
382
  if "created" in jira_data:
384
383
  metadata["created_on"] = jira_data["created"]
385
-
384
+
385
+ if jira_data.get("description"):
386
+ content_structure["description"] = jira_data.get("description")
387
+
388
+ page_content = json.dumps(content_structure if content_structure.items() else "", indent=2)
389
+
386
390
  content_hash = hashlib.sha256(page_content.encode('utf-8')).hexdigest()[:16]
387
391
  metadata["updated_on"] = content_hash
388
392
 
@@ -407,6 +411,7 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
407
411
  if "attachments" in step and step["attachments"]:
408
412
  for attachment in step["attachments"]:
409
413
  if attachment and "id" in attachment and "filename" in attachment:
414
+ attachment['step_id'] = step['id']
410
415
  attachments_data.append(attachment)
411
416
  if attachments_data:
412
417
  metadata["_attachments_data"] = attachments_data
@@ -430,14 +435,7 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
430
435
  Generator[Document, None, None]: A generator yielding processed Document objects with metadata.
431
436
  """
432
437
  try:
433
- if not getattr(self, '_include_attachments', False):
434
- yield document
435
- return
436
-
437
438
  attachments_data = document.metadata.get("_attachments_data", [])
438
- if not attachments_data:
439
- yield document
440
- return
441
439
 
442
440
  issue_id = document.metadata.get("id")
443
441
 
@@ -458,44 +456,33 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
458
456
  ).append(attachment_id)
459
457
 
460
458
  try:
461
- content = self._process_attachment(attachment)
462
- if not content or content.startswith("Attachment processing failed"):
463
- logger.warning(f"Skipping attachment {filename} due to processing failure")
464
- continue
459
+ attachment_metadata = {
460
+ 'id': str(attachment_id),
461
+ 'issue_key': document.metadata.get('key', ''),
462
+ 'issueId': str(issue_id),
463
+ 'projectId': document.metadata.get('projectId', ''),
464
+ 'source': f"xray_test_{issue_id}",
465
+ 'filename': filename,
466
+ 'download_link': attachment.get('downloadLink', ''),
467
+ 'entity_type': 'test_case_attachment',
468
+ 'step_id': attachment.get('step_id', ''),
469
+ 'key': document.metadata.get('key', ''),
470
+ IndexerKeywords.PARENT.value: document.metadata.get('id', str(issue_id)),
471
+ 'type': 'attachment',
472
+ 'doctype': self.doctype,
473
+ }
474
+ yield from self._process_attachment(attachment, attachment_metadata)
465
475
  except Exception as e:
466
476
  logger.error(f"Failed to process attachment {filename}: {str(e)}")
467
477
  continue
468
-
469
- attachment_metadata = {
470
- 'id': str(attachment_id),
471
- 'issue_key': document.metadata.get('key', ''),
472
- 'issueId': str(issue_id),
473
- 'projectId': document.metadata.get('projectId', ''),
474
- 'source': f"xray_test_{issue_id}",
475
- 'filename': filename,
476
- 'download_link': attachment.get('downloadLink', ''),
477
- 'entity_type': 'test_case_attachment',
478
- 'key': document.metadata.get('key', ''),
479
- IndexerKeywords.PARENT.value: document.metadata.get('id', str(issue_id)),
480
- 'type': 'attachment',
481
- 'doctype': self.doctype,
482
- }
483
-
484
- yield Document(
485
- page_content=content,
486
- metadata=attachment_metadata
487
- )
488
478
 
489
479
  if "_attachments_data" in document.metadata:
490
480
  del document.metadata["_attachments_data"]
491
481
 
492
- yield document
493
-
494
482
  except Exception as e:
495
483
  logger.error(f"Error processing document for attachments: {e}")
496
- yield document
497
484
 
498
- def _process_attachment(self, attachment: Dict[str, Any]) -> str:
485
+ def _process_attachment(self, attachment: Dict[str, Any], attachment_metadata) -> Generator[Document, None, None]:
499
486
  """
500
487
  Processes an attachment to extract its content.
501
488
 
@@ -508,38 +495,17 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
508
495
  try:
509
496
  download_link = attachment.get('downloadLink')
510
497
  filename = attachment.get('filename', '')
511
-
512
- if not download_link:
513
- return f"Attachment: {filename} (no download link available)"
514
498
 
515
499
  try:
516
500
  auth_token = self._ensure_auth_token()
517
501
  headers = {'Authorization': f'Bearer {auth_token}'}
518
502
  response = requests.get(download_link, headers=headers, timeout=30)
519
503
  response.raise_for_status()
520
-
521
- ext = f".{filename.split('.')[-1].lower()}" if filename and '.' in filename else ""
522
-
523
- if ext == '.pdf':
524
- content = parse_file_content(
525
- file_content=response.content,
526
- file_name=filename,
527
- llm=self.llm,
528
- is_capture_image=True
529
- )
530
- else:
531
- content = load_content_from_bytes(
532
- response.content,
533
- ext,
534
- llm=self.llm
535
- )
536
-
537
- if content:
538
- return f"filename: {filename}\ncontent: {content}"
539
- else:
540
- logger.warning(f"No content extracted from attachment {filename}")
541
- return f"filename: {filename}\ncontent: [No extractable content]"
542
-
504
+
505
+ yield from self._load_attachment(content=response.content,
506
+ file_name=filename,
507
+ attachment_metadata=attachment_metadata)
508
+
543
509
  except requests.RequestException as req_e:
544
510
  logger.error(f"Unable to download attachment {filename} with existing token: {req_e}")
545
511
 
@@ -560,23 +526,13 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
560
526
  fresh_headers = {'Authorization': f'Bearer {fresh_token}'}
561
527
  response = requests.get(download_link, headers=fresh_headers, timeout=60)
562
528
  response.raise_for_status()
563
-
564
- ext = f".{filename.split('.')[-1].lower()}" if filename and '.' in filename else ""
565
- content = parse_file_content(
566
- file_content=response.content,
567
- file_name=filename,
568
- llm=self.llm,
569
- is_capture_image=True
570
- ) if ext == '.pdf' else load_content_from_bytes(response.content, ext, llm=self.llm)
571
-
572
- if content:
573
- return f"filename: {filename}\ncontent: {content}"
574
- else:
575
- return f"filename: {filename}\ncontent: [Content extraction failed after re-auth]"
529
+
530
+ yield from self._load_attachment(content=response.content,
531
+ file_name=filename,
532
+ attachment_metadata=attachment_metadata)
576
533
 
577
534
  except Exception as reauth_e:
578
535
  logger.error(f"Re-authentication and retry failed for {filename}: {reauth_e}")
579
- return f"Attachment: {filename} (download failed: {str(req_e)}, re-auth failed: {str(reauth_e)})"
580
536
  else:
581
537
  try:
582
538
  auth_token = self._ensure_auth_token()
@@ -587,31 +543,32 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
587
543
  }
588
544
  response = requests.get(download_link, headers=fallback_headers, timeout=60)
589
545
  response.raise_for_status()
590
-
591
- ext = f".{filename.split('.')[-1].lower()}" if filename and '.' in filename else ""
592
- content = parse_file_content(
593
- file_content=response.content,
594
- file_name=filename,
595
- llm=self.llm,
596
- is_capture_image=True
597
- ) if ext == '.pdf' else load_content_from_bytes(response.content, ext, llm=self.llm)
598
-
599
- if content:
600
- return f"filename: {filename}\ncontent: {content}"
601
- else:
602
- return f"filename: {filename}\ncontent: [Content extraction failed after fallback]"
546
+
547
+ yield from self._load_attachment(content=response.content,
548
+ file_name=filename,
549
+ attachment_metadata=attachment_metadata)
603
550
 
604
551
  except Exception as fallback_e:
605
552
  logger.error(f"Fallback download also failed for {filename}: {fallback_e}")
606
- return f"Attachment: {filename} (download failed: {str(req_e)}, fallback failed: {str(fallback_e)})"
607
553
 
608
554
  except Exception as parse_e:
609
555
  logger.error(f"Unable to parse attachment {filename}: {parse_e}")
610
- return f"Attachment: {filename} (parsing failed: {str(parse_e)})"
611
556
 
612
557
  except Exception as e:
613
558
  logger.error(f"Error processing attachment: {e}")
614
- return f"Attachment processing failed: {str(e)}"
559
+
560
+ def _load_attachment(self, content, file_name, attachment_metadata) -> Generator[Document, None, None]:
561
+ content_docs = load_file_docs(file_content=content, file_name=file_name,
562
+ llm=self.llm, is_capture_image=True, excel_by_sheets=True)
563
+
564
+ if not content_docs or isinstance(content_docs, ToolException):
565
+ return
566
+ for doc in content_docs:
567
+ yield Document(page_content=doc.page_content,
568
+ metadata={
569
+ **doc.metadata,
570
+ **attachment_metadata
571
+ })
615
572
 
616
573
  def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
617
574
  return {
@@ -649,6 +606,8 @@ class XrayApiWrapper(BaseVectorStoreToolApiWrapper):
649
606
  'skip_attachment_extensions': (Optional[List[str]], Field(
650
607
  description="List of file extensions to skip when processing attachments (e.g., ['.exe', '.zip', '.bin'])",
651
608
  default=None)),
609
+ 'chunking_tool': (Literal['json'],
610
+ Field(description="Name of chunking tool for base document", default='json')),
652
611
  }
653
612
 
654
613
  def _get_tests_direct(self, jql: str) -> List[Dict]:
@@ -36,7 +36,7 @@ class ZephyrEnterpriseToolkit(BaseToolkit):
36
36
  ZephyrEnterpriseToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
37
37
  return create_model(
38
38
  name,
39
- zephyr_configuration=(Optional[ZephyrEnterpriseConfiguration], Field(description="Zephyr Configuration", json_schema_extra={'configuration_types': ['zephyr-enterprise']})),
39
+ zephyr_configuration=(ZephyrEnterpriseConfiguration, Field(description="Zephyr Configuration", json_schema_extra={'configuration_types': ['zephyr-enterprise']})),
40
40
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(description="PgVector Configuration",
41
41
  json_schema_extra={
42
42
  'configuration_types': ['pgvector']},
@@ -1,19 +1,20 @@
1
1
  from typing import List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseToolkit, BaseTool
4
- from pydantic import create_model, BaseModel, Field, SecretStr
4
+ from pydantic import create_model, BaseModel, Field
5
5
 
6
6
  from .api_wrapper import ZephyrEssentialApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
9
9
  from ...configurations.pgvector import PgVectorConfiguration
10
+ from ...configurations.zephyr_essential import ZephyrEssentialConfiguration
10
11
 
11
12
  name = "zephyr_essential"
12
13
 
13
14
  def get_tools(tool):
14
15
  return ZephyrEssentialToolkit().get_toolkit(
15
16
  selected_tools=tool['settings'].get('selected_tools', []),
16
- token=tool['settings']["token"],
17
+ zephyr_essential_configuration=tool['settings']['zephyr_essential_configuration'],
17
18
  toolkit_name=tool.get('toolkit_name'),
18
19
  llm = tool['settings'].get('llm', None),
19
20
  alita=tool['settings'].get('alita', None),
@@ -35,8 +36,7 @@ class ZephyrEssentialToolkit(BaseToolkit):
35
36
  ZephyrEssentialToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
36
37
  return create_model(
37
38
  name,
38
- token=(SecretStr, Field(description="Bearer api token")),
39
- base_url=(Optional[str], Field(description="Zephyr Essential base url", default=None)),
39
+ zephyr_essential_configuration=(ZephyrEssentialConfiguration, Field(description="Zephyr Essential Configuration", json_schema_extra={'configuration_types': ['zephyr-essential']})),
40
40
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
41
41
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
42
42
  description="PgVector Configuration",
@@ -56,6 +56,7 @@ class ZephyrEssentialToolkit(BaseToolkit):
56
56
  selected_tools = []
57
57
  wrapper_payload = {
58
58
  **kwargs,
59
+ **kwargs.get('zephyr_essential_configuration', {}),
59
60
  **(kwargs.get('pgvector_configuration') or {}),
60
61
  }
61
62
  zephyr_api_wrapper = ZephyrEssentialApiWrapper(**wrapper_payload)
@@ -657,13 +657,43 @@ CreateTestCaseTestSteps = create_model(
657
657
  test_case_key=(str, Field(description="Key of the test case to create test steps for.")),
658
658
  json=(str, Field(description=("""
659
659
  JSON body to create test steps. Example:
660
- [
661
- {
662
- "step": "Step 1",
663
- "data": "Test Data",
664
- "result": "Expected Result"
665
- }
666
- ]
660
+ {
661
+ "mode": "APPEND",
662
+ "items": [
663
+ {
664
+ "inline": {
665
+ "description": "Attempt to login to the application",
666
+ "testData": "Username = SmartBear Password = weLoveAtlassian",
667
+ "expectedResult": "Login succeeds, web-app redirects to the dashboard view",
668
+ "customFields": {
669
+ "Build Number": 20,
670
+ "Release Date": "2020-01-01",
671
+ "Pre-Condition(s)": "User should have logged in. <br> User should have navigated to the administration panel.",
672
+ "Implemented": false,
673
+ "Category": [
674
+ "Performance",
675
+ "Regression"
676
+ ],
677
+ "Tester": "fa2e582e-5e15-521e-92e3-47e6ca2e7256"
678
+ }
679
+ },
680
+ "testCase": {
681
+ "self": "string",
682
+ "testCaseKey": "PROJ-T123",
683
+ "parameters": [
684
+ {
685
+ "name": "username",
686
+ "type": "DEFAULT_VALUE",
687
+ "value": "admin"
688
+ }
689
+ ]
690
+ }
691
+ }
692
+ ]
693
+ }
694
+ Where:
695
+ mode: str - required - Valid values: "APPEND", "OVERWRITE",
696
+ items - The list of test steps. Each step should be an object containing inline or testCase. They should only include one of these fields at a time.
667
697
  """
668
698
  )))
669
699
  )
@@ -848,10 +878,12 @@ CreateFolder = create_model(
848
878
  json=(str, Field(description=("""
849
879
  JSON body to create a folder. Example:
850
880
  {
851
- "name": "Folder Name",
852
- "description": "Folder Description",
853
- "projectKey": "PROJECT_KEY"
881
+ "parentId": 24389289,
882
+ "name": "ZephyrEssential_test",
883
+ "projectKey": "EL",
884
+ "folderType": "TEST_CASE"
854
885
  }
886
+ Possible folder types: "TEST_CASE", "TEST_PLAN", "TEST_CYCLE"
855
887
  """
856
888
  )))
857
889
  )
@@ -40,7 +40,7 @@ class ZephyrScaleToolkit(BaseToolkit):
40
40
  return create_model(
41
41
  name,
42
42
  max_results=(int, Field(default=100, description="Results count to show")),
43
- zephyr_configuration=(Optional[ZephyrConfiguration], Field(description="Zephyr Configuration",
43
+ zephyr_configuration=(ZephyrConfiguration, Field(description="Zephyr Configuration",
44
44
  json_schema_extra={'configuration_types': ['zephyr']})),
45
45
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None, description="PgVector Configuration",
46
46
  json_schema_extra={
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.271
3
+ Version: 0.3.273
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0