alita-sdk 0.3.270__py3-none-any.whl → 0.3.272__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. alita_sdk/configurations/__init__.py +10 -0
  2. alita_sdk/configurations/ado.py +4 -2
  3. alita_sdk/configurations/azure_search.py +1 -1
  4. alita_sdk/configurations/bigquery.py +1 -1
  5. alita_sdk/configurations/browser.py +18 -0
  6. alita_sdk/configurations/carrier.py +19 -0
  7. alita_sdk/configurations/delta_lake.py +1 -1
  8. alita_sdk/configurations/google_places.py +17 -0
  9. alita_sdk/configurations/postman.py +1 -1
  10. alita_sdk/configurations/qtest.py +1 -3
  11. alita_sdk/configurations/report_portal.py +19 -0
  12. alita_sdk/configurations/salesforce.py +19 -0
  13. alita_sdk/configurations/service_now.py +1 -12
  14. alita_sdk/configurations/sharepoint.py +19 -0
  15. alita_sdk/configurations/sonar.py +18 -0
  16. alita_sdk/configurations/sql.py +20 -0
  17. alita_sdk/configurations/testio.py +18 -0
  18. alita_sdk/configurations/zephyr_essential.py +18 -0
  19. alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
  20. alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +19 -6
  21. alita_sdk/runtime/langchain/document_loaders/ImageParser.py +17 -0
  22. alita_sdk/runtime/tools/vectorstore.py +21 -5
  23. alita_sdk/runtime/tools/vectorstore_base.py +4 -1
  24. alita_sdk/tools/aws/delta_lake/__init__.py +2 -2
  25. alita_sdk/tools/azure_ai/search/__init__.py +1 -1
  26. alita_sdk/tools/bitbucket/__init__.py +9 -4
  27. alita_sdk/tools/bitbucket/api_wrapper.py +1 -1
  28. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
  29. alita_sdk/tools/browser/__init__.py +14 -10
  30. alita_sdk/tools/carrier/__init__.py +11 -11
  31. alita_sdk/tools/code/sonar/__init__.py +10 -7
  32. alita_sdk/tools/confluence/__init__.py +1 -1
  33. alita_sdk/tools/figma/__init__.py +1 -1
  34. alita_sdk/tools/github/__init__.py +2 -2
  35. alita_sdk/tools/gitlab_org/__init__.py +1 -1
  36. alita_sdk/tools/google/bigquery/__init__.py +1 -1
  37. alita_sdk/tools/google_places/__init__.py +10 -5
  38. alita_sdk/tools/jira/__init__.py +1 -1
  39. alita_sdk/tools/jira/api_wrapper.py +140 -99
  40. alita_sdk/tools/qtest/__init__.py +1 -1
  41. alita_sdk/tools/rally/__init__.py +1 -1
  42. alita_sdk/tools/report_portal/__init__.py +9 -8
  43. alita_sdk/tools/salesforce/__init__.py +9 -8
  44. alita_sdk/tools/servicenow/__init__.py +1 -1
  45. alita_sdk/tools/sharepoint/__init__.py +5 -7
  46. alita_sdk/tools/slack/__init__.py +1 -1
  47. alita_sdk/tools/sql/__init__.py +9 -11
  48. alita_sdk/tools/testio/__init__.py +9 -6
  49. alita_sdk/tools/utils/content_parser.py +59 -24
  50. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -1
  51. alita_sdk/tools/zephyr_essential/__init__.py +4 -3
  52. alita_sdk/tools/zephyr_essential/api_wrapper.py +42 -10
  53. alita_sdk/tools/zephyr_scale/__init__.py +1 -1
  54. {alita_sdk-0.3.270.dist-info → alita_sdk-0.3.272.dist-info}/METADATA +1 -1
  55. {alita_sdk-0.3.270.dist-info → alita_sdk-0.3.272.dist-info}/RECORD +58 -47
  56. {alita_sdk-0.3.270.dist-info → alita_sdk-0.3.272.dist-info}/WHEEL +0 -0
  57. {alita_sdk-0.3.270.dist-info → alita_sdk-0.3.272.dist-info}/licenses/LICENSE +0 -0
  58. {alita_sdk-0.3.270.dist-info → alita_sdk-0.3.272.dist-info}/top_level.txt +0 -0
@@ -2,20 +2,19 @@ from typing import List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseToolkit, BaseTool
4
4
 
5
- from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
5
+ from pydantic import create_model, BaseModel, ConfigDict, Field
6
6
 
7
7
  from .api_wrapper import ReportPortalApiWrapper
8
8
  from ..base.tool import BaseAction
9
9
  from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
10
+ from ...configurations.report_portal import ReportPortalConfiguration
10
11
 
11
12
  name = "report_portal"
12
13
 
13
14
  def get_tools(tool):
14
15
  return ReportPortalToolkit().get_toolkit(
15
16
  selected_tools=tool['settings'].get('selected_tools', []),
16
- endpoint=tool['settings']['endpoint'],
17
- api_key=tool['settings']['api_key'],
18
- project=tool['settings']['project'],
17
+ report_portal_configuration=tool['settings']['report_portal_configuration'],
19
18
  toolkit_name=tool.get('toolkit_name')
20
19
  ).get_tools()
21
20
 
@@ -30,9 +29,7 @@ class ReportPortalToolkit(BaseToolkit):
30
29
  ReportPortalToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
31
30
  return create_model(
32
31
  name,
33
- endpoint=(str, Field(description="Report Portal endpoint", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': ReportPortalToolkit.toolkit_max_length})),
34
- project=(str, Field(description="Report Portal project")),
35
- api_key=(SecretStr, Field(description="User API key", json_schema_extra={'secret': True})),
32
+ report_portal_configuration=(ReportPortalConfiguration, Field(description="Report Portal Configuration", json_schema_extra={'configuration_types': ['report_portal']})),
36
33
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
37
34
  __config__=ConfigDict(json_schema_extra={'metadata': {"label": "Report Portal", "icon_url": "reportportal-icon.svg",
38
35
  "categories": ["testing"],
@@ -43,7 +40,11 @@ class ReportPortalToolkit(BaseToolkit):
43
40
  def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
44
41
  if selected_tools is None:
45
42
  selected_tools = []
46
- report_portal_api_wrapper = ReportPortalApiWrapper(**kwargs)
43
+ wrapper_payload = {
44
+ **kwargs,
45
+ **kwargs.get('report_portal_configuration', {}),
46
+ }
47
+ report_portal_api_wrapper = ReportPortalApiWrapper(**wrapper_payload)
47
48
  prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
48
49
  available_tools = report_portal_api_wrapper.get_available_tools()
49
50
  tools = []
@@ -2,17 +2,16 @@ from typing import List, Optional, Literal
2
2
  from .api_wrapper import SalesforceApiWrapper
3
3
  from langchain_core.tools import BaseTool, BaseToolkit
4
4
  from ..base.tool import BaseAction
5
- from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
5
+ from pydantic import create_model, BaseModel, ConfigDict, Field
6
6
  from ..utils import clean_string, TOOLKIT_SPLITTER,get_max_toolkit_length
7
+ from ...configurations.salesforce import SalesforceConfiguration
7
8
 
8
9
  name = "salesforce"
9
10
 
10
11
  def get_tools(tool):
11
12
  return SalesforceToolkit().get_toolkit(
12
13
  selected_tools=tool['settings'].get('selected_tools', []),
13
- base_url=tool['settings'].get('base_url'),
14
- client_id=tool['settings'].get('client_id'),
15
- client_secret=tool['settings'].get('client_secret'),
14
+ salesforce_configuration=tool['settings']['salesforce_configuration'],
16
15
  api_version=tool['settings'].get('api_version', 'v59.0')
17
16
  ).get_tools()
18
17
 
@@ -25,10 +24,8 @@ class SalesforceToolkit(BaseToolkit):
25
24
  SalesforceToolkit.toolkit_max_length = get_max_toolkit_length(available_tools)
26
25
  return create_model(
27
26
  name,
28
- base_url=(str, Field(description="Salesforce instance URL", json_schema_extra={'toolkit_name': True})),
29
- client_id=(str, Field(description="Salesforce Connected App Client ID")),
30
- client_secret=(SecretStr, Field(description="Salesforce Connected App Client Secret", json_schema_extra={'secret': True})),
31
27
  api_version=(str, Field(description="Salesforce API Version", default='v59.0')),
28
+ salesforce_configuration=(SalesforceConfiguration, Field(description="Salesforce Configuration", json_schema_extra={'configuration_types': ['salesforce']})),
32
29
  selected_tools=(List[Literal[tuple(available_tools)]], Field(default=[], json_schema_extra={'args_schemas': available_tools})),
33
30
  __config__=ConfigDict(json_schema_extra={'metadata': {
34
31
  "label": "Salesforce", "icon_url": "salesforce-icon.svg",
@@ -42,7 +39,11 @@ class SalesforceToolkit(BaseToolkit):
42
39
  if selected_tools is None:
43
40
  selected_tools = []
44
41
 
45
- api_wrapper = SalesforceApiWrapper(**kwargs)
42
+ wrapper_payload = {
43
+ **kwargs,
44
+ **kwargs.get('salesforce_configuration', {}),
45
+ }
46
+ api_wrapper = SalesforceApiWrapper(**wrapper_payload)
46
47
  prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
47
48
  tools = []
48
49
 
@@ -37,7 +37,7 @@ class ServiceNowToolkit(BaseToolkit):
37
37
  json_schema_extra={
38
38
  'toolkit_name': True, 'max_toolkit_length': ServiceNowToolkit.toolkit_max_length})),
39
39
  response_fields=(Optional[str], Field(description="Response fields", default=None)),
40
- servicenow_configuration=(Optional[ServiceNowConfiguration], Field(description="ServiceNow Configuration",
40
+ servicenow_configuration=(ServiceNowConfiguration, Field(description="ServiceNow Configuration",
41
41
  json_schema_extra={
42
42
  'configuration_types': [
43
43
  'service_now']})),
@@ -1,11 +1,12 @@
1
1
  from typing import List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseToolkit, BaseTool
4
- from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
4
+ from pydantic import create_model, BaseModel, ConfigDict, Field
5
5
  from .api_wrapper import SharepointApiWrapper
6
6
  from ..base.tool import BaseAction
7
7
  from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
8
8
  from ...configurations.pgvector import PgVectorConfiguration
9
+ from ...configurations.sharepoint import SharepointConfiguration
9
10
 
10
11
  name = "sharepoint"
11
12
 
@@ -13,9 +14,7 @@ def get_tools(tool):
13
14
  return (SharepointToolkit()
14
15
  .get_toolkit(
15
16
  selected_tools=tool['settings'].get('selected_tools', []),
16
- site_url=tool['settings'].get('site_url', None),
17
- client_id=tool['settings'].get('client_id', None),
18
- client_secret=tool['settings'].get('client_secret', None),
17
+ sharepoint_configuration=tool['settings']['sharepoint_configuration'],
19
18
  toolkit_name=tool.get('toolkit_name'),
20
19
  llm=tool['settings'].get('llm'),
21
20
  alita=tool['settings'].get('alita', None),
@@ -37,9 +36,7 @@ class SharepointToolkit(BaseToolkit):
37
36
  SharepointToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
38
37
  return create_model(
39
38
  name,
40
- site_url=(str, Field(description="Sharepoint site's URL", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': SharepointToolkit.toolkit_max_length})),
41
- client_id=(str, Field(description="Client ID")),
42
- client_secret=(SecretStr, Field(description="Client Secret", json_schema_extra={'secret': True})),
39
+ sharepoint_configuration=(SharepointConfiguration, Field(description="SharePoint Configuration", json_schema_extra={'configuration_types': ['sharepoint']})),
43
40
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
44
41
  # indexer settings
45
42
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
@@ -61,6 +58,7 @@ class SharepointToolkit(BaseToolkit):
61
58
  selected_tools = []
62
59
  wrapper_payload = {
63
60
  **kwargs,
61
+ **kwargs.get('sharepoint_configuration', {}),
64
62
  **(kwargs.get('pgvector_configuration') or {}),
65
63
  }
66
64
  sharepoint_api_wrapper = SharepointApiWrapper(**wrapper_payload)
@@ -50,7 +50,7 @@ class SlackToolkit(BaseToolkit):
50
50
 
51
51
  model = create_model(
52
52
  name,
53
- slack_configuration=(Optional[SlackConfiguration], Field(default=None, description="Slack configuration",
53
+ slack_configuration=(SlackConfiguration, Field(default=None, description="Slack configuration",
54
54
  json_schema_extra={'configuration_types': ['slack']})),
55
55
  selected_tools=(List[Literal[tuple(selected_tools)]],
56
56
  Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
@@ -1,12 +1,13 @@
1
1
  from typing import List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseToolkit, BaseTool
4
- from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
4
+ from pydantic import create_model, BaseModel, ConfigDict, Field
5
5
 
6
6
  from .api_wrapper import SQLApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from .models import SQLDialect
9
9
  from ..utils import TOOLKIT_SPLITTER, clean_string, get_max_toolkit_length
10
+ from ...configurations.sql import SqlConfiguration
10
11
 
11
12
  name = "sql"
12
13
 
@@ -14,11 +15,8 @@ def get_tools(tool):
14
15
  return SQLToolkit().get_toolkit(
15
16
  selected_tools=tool['settings'].get('selected_tools', []),
16
17
  dialect=tool['settings']['dialect'],
17
- host=tool['settings']['host'],
18
- port=tool['settings']['port'],
19
- username=tool['settings']['username'],
20
- password=tool['settings']['password'],
21
18
  database_name=tool['settings']['database_name'],
19
+ sql_configuration=tool['settings']['sql_configuration'],
22
20
  toolkit_name=tool.get('toolkit_name')
23
21
  ).get_tools()
24
22
 
@@ -35,11 +33,8 @@ class SQLToolkit(BaseToolkit):
35
33
  return create_model(
36
34
  name,
37
35
  dialect=(Literal[tuple(supported_dialects)], Field(description="Database dialect (mysql or postgres)")),
38
- host=(str, Field(description="Database server address")),
39
- port=(str, Field(description="Database server port")),
40
- username=(str, Field(description="Database username")),
41
- password=(SecretStr, Field(description="Database password", json_schema_extra={'secret': True})),
42
36
  database_name=(str, Field(description="Database name", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': SQLToolkit.toolkit_max_length})),
37
+ sql_configuration=(SqlConfiguration, Field(description="SQL Configuration", json_schema_extra={'configuration_types': ['sql']})),
43
38
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
44
39
  __config__=ConfigDict(json_schema_extra=
45
40
  {
@@ -54,7 +49,11 @@ class SQLToolkit(BaseToolkit):
54
49
  def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
55
50
  if selected_tools is None:
56
51
  selected_tools = []
57
- sql_api_wrapper = SQLApiWrapper(**kwargs)
52
+ wrapper_payload = {
53
+ **kwargs,
54
+ **kwargs.get('sql_configuration', {}),
55
+ }
56
+ sql_api_wrapper = SQLApiWrapper(**wrapper_payload)
58
57
  prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
59
58
  available_tools = sql_api_wrapper.get_available_tools()
60
59
  tools = []
@@ -71,4 +70,3 @@ class SQLToolkit(BaseToolkit):
71
70
 
72
71
  def get_tools(self) -> list[BaseTool]:
73
72
  return self.tools
74
-
@@ -1,19 +1,19 @@
1
1
  from typing import List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseToolkit, BaseTool
4
- from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
4
+ from pydantic import create_model, BaseModel, ConfigDict, Field
5
5
 
6
6
  from .api_wrapper import TestIOApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
9
+ from ...configurations.testio import TestIOConfiguration
9
10
 
10
11
  name = "testio"
11
12
 
12
13
  def get_tools(tool):
13
14
  return TestIOToolkit().get_toolkit(
14
15
  selected_tools=tool['settings'].get('selected_tools', []),
15
- endpoint=tool['settings']['endpoint'],
16
- api_key=tool['settings']['api_key'],
16
+ testio_configuration=tool['settings']['testio_configuration'],
17
17
  toolkit_name=tool['toolkit_name']
18
18
  ).get_tools()
19
19
 
@@ -29,8 +29,7 @@ class TestIOToolkit(BaseToolkit):
29
29
  selected_tools = {x['name']: x['args_schema'].schema() for x in TestIOApiWrapper.model_construct().get_available_tools()}
30
30
  return create_model(
31
31
  name,
32
- endpoint=(str, Field(description="TestIO endpoint", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': TOOLKIT_MAX_LENGTH})),
33
- api_key=(SecretStr, Field(description="API key", json_schema_extra={'secret': True})),
32
+ testio_configuration=(TestIOConfiguration, Field(description="TestIO Configuration", json_schema_extra={'configuration_types': ['testio']})),
34
33
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
35
34
  __config__=ConfigDict(json_schema_extra={'metadata': {"label": "TestIO", "icon_url": "testio-icon.svg",
36
35
  "categories": ["testing"],
@@ -41,7 +40,11 @@ class TestIOToolkit(BaseToolkit):
41
40
  def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
42
41
  if selected_tools is None:
43
42
  selected_tools = []
44
- testio_api_wrapper = TestIOApiWrapper(**kwargs)
43
+ wrapper_payload = {
44
+ **kwargs,
45
+ **kwargs.get('testio_configuration', {}),
46
+ }
47
+ testio_api_wrapper = TestIOApiWrapper(**wrapper_payload)
45
48
  prefix = clean_string(toolkit_name, TOOLKIT_MAX_LENGTH) + TOOLKIT_SPLITTER if toolkit_name else ''
46
49
  available_tools = testio_api_wrapper.get_available_tools()
47
50
  tools = []
@@ -2,7 +2,7 @@ import os
2
2
  import tempfile
3
3
  from logging import getLogger
4
4
  from pathlib import Path
5
- from typing import Generator
5
+ from typing import Generator, List
6
6
 
7
7
  from langchain_core.documents import Document
8
8
  from langchain_core.tools import ToolException
@@ -73,17 +73,56 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
73
73
  Raises:
74
74
  ToolException: If the file type is not supported or if there is an error reading the file.
75
75
  """
76
+ loader = prepare_loader(
77
+ file_name=file_name,
78
+ file_content=file_content,
79
+ is_capture_image=is_capture_image,
80
+ page_number=page_number,
81
+ sheet_name=sheet_name,
82
+ llm=llm,
83
+ file_path=file_path,
84
+ excel_by_sheets=excel_by_sheets
85
+ )
76
86
 
77
- if (file_path and (file_name or file_content)) or (not file_path and (not file_name or file_content is None)):
78
- raise ToolException("Either (file_name and file_content) or file_path must be provided, but not both.")
87
+ if not loader:
88
+ return ToolException(
89
+ "Not supported type of files entered. Supported types are TXT, DOCX, PDF, PPTX, XLSX and XLS only.")
79
90
 
80
- extension = Path(file_path if file_path else file_name).suffix
91
+ if hasattr(loader, 'get_content'):
92
+ return loader.get_content()
93
+ else:
94
+ extension = Path(file_path if file_path else file_name).suffix
95
+ loader_kwargs = get_loader_kwargs(loaders_map.get(extension), file_name, file_content, is_capture_image, page_number, sheet_name, llm, file_path, excel_by_sheets)
96
+ if file_content:
97
+ return load_content_from_bytes(file_content=file_content,
98
+ extension=extension,
99
+ loader_extra_config=loader_kwargs,
100
+ llm=llm)
101
+ else:
102
+ return load_content(file_path=file_path,
103
+ extension=extension,
104
+ loader_extra_config=loader_kwargs,
105
+ llm=llm)
81
106
 
82
- loader_object = loaders_map.get(extension)
83
- if not loader_object:
84
- logger.warning(f"No loader found for file extension: {extension}. File: {file_path if file_path else file_name}")
107
+ def load_file_docs(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
108
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False) -> List[Document] | ToolException:
109
+ loader = prepare_loader(
110
+ file_name=file_name,
111
+ file_content=file_content,
112
+ is_capture_image=is_capture_image,
113
+ page_number=page_number,
114
+ sheet_name=sheet_name,
115
+ llm=llm,
116
+ file_path=file_path,
117
+ excel_by_sheets=excel_by_sheets
118
+ )
119
+ if not loader:
85
120
  return ToolException(
86
121
  "Not supported type of files entered. Supported types are TXT, DOCX, PDF, PPTX, XLSX and XLS only.")
122
+ return loader.load()
123
+
124
+ def get_loader_kwargs(loader_object, file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
125
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False):
87
126
  loader_kwargs = loader_object['kwargs']
88
127
  loader_kwargs.update({
89
128
  "file_path": file_path,
@@ -97,25 +136,21 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
97
136
  "row_content": True,
98
137
  "json_documents": False
99
138
  })
100
- loader = loader_object['class'](**loader_kwargs)
139
+ return loader_kwargs
101
140
 
102
- if not loader:
103
- return ToolException(
104
- "Not supported type of files entered. Supported types are TXT, DOCX, PDF, PPTX, XLSX and XLS only.")
141
+ def prepare_loader(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
142
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False):
143
+ if (file_path and (file_name or file_content)) or (not file_path and (not file_name or file_content is None)):
144
+ raise ToolException("Either (file_name and file_content) or file_path must be provided, but not both.")
105
145
 
106
- if hasattr(loader, 'get_content'):
107
- return loader.get_content()
108
- else:
109
- if file_content:
110
- return load_content_from_bytes(file_content=file_content,
111
- extension=extension,
112
- loader_extra_config=loader_kwargs,
113
- llm=llm)
114
- else:
115
- return load_content(file_path=file_path,
116
- extension=extension,
117
- loader_extra_config=loader_kwargs,
118
- llm=llm)
146
+ extension = Path(file_path if file_path else file_name).suffix
147
+
148
+ loader_object = loaders_map.get(extension)
149
+ if not loader_object:
150
+ return None
151
+ loader_kwargs = get_loader_kwargs(loader_object, file_name, file_content, is_capture_image, page_number, sheet_name, llm, file_path, excel_by_sheets)
152
+ loader = loader_object['class'](**loader_kwargs)
153
+ return loader
119
154
 
120
155
  # TODO: review usage of this function alongside with functions above
121
156
  def load_content(file_path: str, extension: str = None, loader_extra_config: dict = None, llm = None) -> str:
@@ -36,7 +36,7 @@ class ZephyrEnterpriseToolkit(BaseToolkit):
36
36
  ZephyrEnterpriseToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
37
37
  return create_model(
38
38
  name,
39
- zephyr_configuration=(Optional[ZephyrEnterpriseConfiguration], Field(description="Zephyr Configuration", json_schema_extra={'configuration_types': ['zephyr-enterprise']})),
39
+ zephyr_configuration=(ZephyrEnterpriseConfiguration, Field(description="Zephyr Configuration", json_schema_extra={'configuration_types': ['zephyr-enterprise']})),
40
40
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(description="PgVector Configuration",
41
41
  json_schema_extra={
42
42
  'configuration_types': ['pgvector']},
@@ -7,13 +7,14 @@ from .api_wrapper import ZephyrEssentialApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
9
9
  from ...configurations.pgvector import PgVectorConfiguration
10
+ from ...configurations.zephyr_essential import ZephyrEssentialConfiguration
10
11
 
11
12
  name = "zephyr_essential"
12
13
 
13
14
  def get_tools(tool):
14
15
  return ZephyrEssentialToolkit().get_toolkit(
15
16
  selected_tools=tool['settings'].get('selected_tools', []),
16
- token=tool['settings']["token"],
17
+ zephyr_essential_configuration=tool['settings']['zephyr_essential_configuration'],
17
18
  toolkit_name=tool.get('toolkit_name'),
18
19
  llm = tool['settings'].get('llm', None),
19
20
  alita=tool['settings'].get('alita', None),
@@ -35,8 +36,7 @@ class ZephyrEssentialToolkit(BaseToolkit):
35
36
  ZephyrEssentialToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
36
37
  return create_model(
37
38
  name,
38
- token=(str, Field(description="Bearer api token")),
39
- base_url=(Optional[str], Field(description="Zephyr Essential base url", default=None)),
39
+ zephyr_essential_configuration=(ZephyrEssentialConfiguration, Field(description="Zephyr Essential Configuration", json_schema_extra={'configuration_types': ['zephyr-essential']})),
40
40
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
41
41
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
42
42
  description="PgVector Configuration",
@@ -56,6 +56,7 @@ class ZephyrEssentialToolkit(BaseToolkit):
56
56
  selected_tools = []
57
57
  wrapper_payload = {
58
58
  **kwargs,
59
+ **kwargs.get('zephyr_essential_configuration', {}),
59
60
  **(kwargs.get('pgvector_configuration') or {}),
60
61
  }
61
62
  zephyr_api_wrapper = ZephyrEssentialApiWrapper(**wrapper_payload)
@@ -657,13 +657,43 @@ CreateTestCaseTestSteps = create_model(
657
657
  test_case_key=(str, Field(description="Key of the test case to create test steps for.")),
658
658
  json=(str, Field(description=("""
659
659
  JSON body to create test steps. Example:
660
- [
661
- {
662
- "step": "Step 1",
663
- "data": "Test Data",
664
- "result": "Expected Result"
665
- }
666
- ]
660
+ {
661
+ "mode": "APPEND",
662
+ "items": [
663
+ {
664
+ "inline": {
665
+ "description": "Attempt to login to the application",
666
+ "testData": "Username = SmartBear Password = weLoveAtlassian",
667
+ "expectedResult": "Login succeeds, web-app redirects to the dashboard view",
668
+ "customFields": {
669
+ "Build Number": 20,
670
+ "Release Date": "2020-01-01",
671
+ "Pre-Condition(s)": "User should have logged in. <br> User should have navigated to the administration panel.",
672
+ "Implemented": false,
673
+ "Category": [
674
+ "Performance",
675
+ "Regression"
676
+ ],
677
+ "Tester": "fa2e582e-5e15-521e-92e3-47e6ca2e7256"
678
+ }
679
+ },
680
+ "testCase": {
681
+ "self": "string",
682
+ "testCaseKey": "PROJ-T123",
683
+ "parameters": [
684
+ {
685
+ "name": "username",
686
+ "type": "DEFAULT_VALUE",
687
+ "value": "admin"
688
+ }
689
+ ]
690
+ }
691
+ }
692
+ ]
693
+ }
694
+ Where:
695
+ mode: str - required - Valid values: "APPEND", "OVERWRITE",
696
+ items - The list of test steps. Each step should be an object containing inline or testCase. They should only include one of these fields at a time.
667
697
  """
668
698
  )))
669
699
  )
@@ -848,10 +878,12 @@ CreateFolder = create_model(
848
878
  json=(str, Field(description=("""
849
879
  JSON body to create a folder. Example:
850
880
  {
851
- "name": "Folder Name",
852
- "description": "Folder Description",
853
- "projectKey": "PROJECT_KEY"
881
+ "parentId": 24389289,
882
+ "name": "ZephyrEssential_test",
883
+ "projectKey": "EL",
884
+ "folderType": "TEST_CASE"
854
885
  }
886
+ Possible folder types: "TEST_CASE", "TEST_PLAN", "TEST_CYCLE"
855
887
  """
856
888
  )))
857
889
  )
@@ -40,7 +40,7 @@ class ZephyrScaleToolkit(BaseToolkit):
40
40
  return create_model(
41
41
  name,
42
42
  max_results=(int, Field(default=100, description="Results count to show")),
43
- zephyr_configuration=(Optional[ZephyrConfiguration], Field(description="Zephyr Configuration",
43
+ zephyr_configuration=(ZephyrConfiguration, Field(description="Zephyr Configuration",
44
44
  json_schema_extra={'configuration_types': ['zephyr']})),
45
45
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None, description="PgVector Configuration",
46
46
  json_schema_extra={
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.270
3
+ Version: 0.3.272
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0