alita-sdk 0.3.175__py3-none-any.whl → 0.3.177__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. alita_sdk/community/__init__.py +7 -17
  2. alita_sdk/tools/carrier/api_wrapper.py +6 -0
  3. alita_sdk/tools/carrier/backend_tests_tool.py +308 -7
  4. alita_sdk/tools/carrier/carrier_sdk.py +18 -0
  5. alita_sdk/tools/carrier/create_ui_test_tool.py +90 -109
  6. alita_sdk/tools/carrier/run_ui_test_tool.py +311 -184
  7. alita_sdk/tools/carrier/tools.py +2 -1
  8. alita_sdk/tools/confluence/api_wrapper.py +1 -0
  9. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/METADATA +2 -2
  10. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/RECORD +13 -44
  11. alita_sdk/community/browseruse/__init__.py +0 -73
  12. alita_sdk/community/browseruse/api_wrapper.py +0 -288
  13. alita_sdk/community/deep_researcher/__init__.py +0 -70
  14. alita_sdk/community/deep_researcher/agents/__init__.py +0 -1
  15. alita_sdk/community/deep_researcher/agents/baseclass.py +0 -182
  16. alita_sdk/community/deep_researcher/agents/knowledge_gap_agent.py +0 -74
  17. alita_sdk/community/deep_researcher/agents/long_writer_agent.py +0 -251
  18. alita_sdk/community/deep_researcher/agents/planner_agent.py +0 -124
  19. alita_sdk/community/deep_researcher/agents/proofreader_agent.py +0 -80
  20. alita_sdk/community/deep_researcher/agents/thinking_agent.py +0 -64
  21. alita_sdk/community/deep_researcher/agents/tool_agents/__init__.py +0 -20
  22. alita_sdk/community/deep_researcher/agents/tool_agents/crawl_agent.py +0 -87
  23. alita_sdk/community/deep_researcher/agents/tool_agents/search_agent.py +0 -96
  24. alita_sdk/community/deep_researcher/agents/tool_selector_agent.py +0 -83
  25. alita_sdk/community/deep_researcher/agents/utils/__init__.py +0 -0
  26. alita_sdk/community/deep_researcher/agents/utils/parse_output.py +0 -148
  27. alita_sdk/community/deep_researcher/agents/writer_agent.py +0 -63
  28. alita_sdk/community/deep_researcher/api_wrapper.py +0 -116
  29. alita_sdk/community/deep_researcher/deep_research.py +0 -185
  30. alita_sdk/community/deep_researcher/examples/deep_example.py +0 -30
  31. alita_sdk/community/deep_researcher/examples/iterative_example.py +0 -34
  32. alita_sdk/community/deep_researcher/examples/report_plan_example.py +0 -27
  33. alita_sdk/community/deep_researcher/iterative_research.py +0 -419
  34. alita_sdk/community/deep_researcher/llm_config.py +0 -87
  35. alita_sdk/community/deep_researcher/main.py +0 -67
  36. alita_sdk/community/deep_researcher/tools/__init__.py +0 -2
  37. alita_sdk/community/deep_researcher/tools/crawl_website.py +0 -109
  38. alita_sdk/community/deep_researcher/tools/web_search.py +0 -294
  39. alita_sdk/community/deep_researcher/utils/__init__.py +0 -0
  40. alita_sdk/community/deep_researcher/utils/md_to_pdf.py +0 -8
  41. alita_sdk/community/deep_researcher/utils/os.py +0 -21
  42. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/WHEEL +0 -0
  43. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/licenses/LICENSE +0 -0
  44. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/top_level.txt +0 -0
@@ -11,7 +11,7 @@ import importlib
11
11
  __all__ = []
12
12
 
13
13
  # Standard module imports with fallback
14
- _modules = ['utils', 'analysis', 'browseruse', 'deep_researcher', 'eda']
14
+ _modules = ['utils', 'analysis', 'deep_researcher', 'eda']
15
15
 
16
16
  for module_name in _modules:
17
17
  try:
@@ -26,8 +26,7 @@ _toolkits = [
26
26
  ('analysis.jira_analyse', 'AnalyseJira'),
27
27
  ('analysis.ado_analyse', 'AnalyseAdo'),
28
28
  ('analysis.gitlab_analyse', 'AnalyseGitLab'),
29
- ('analysis.github_analyse', 'AnalyseGithub'),
30
- ('browseruse', 'BrowserUseToolkit')
29
+ ('analysis.github_analyse', 'AnalyseGithub')
31
30
  ]
32
31
 
33
32
  for module_path, class_name in _toolkits:
@@ -61,8 +60,7 @@ def get_tools(tools_list: list, alita_client, llm) -> list:
61
60
  'analyse_jira': 'AnalyseJira',
62
61
  'analyse_ado': 'AnalyseAdo',
63
62
  'analyse_gitlab': 'AnalyseGitLab',
64
- 'analyse_github': 'AnalyseGithub',
65
- 'browser_use': 'BrowserUseToolkit'
63
+ 'analyse_github': 'AnalyseGithub'
66
64
  }
67
65
 
68
66
  for tool in tools_list:
@@ -72,18 +70,10 @@ def get_tools(tools_list: list, alita_client, llm) -> list:
72
70
  if class_name and class_name in globals():
73
71
  try:
74
72
  toolkit_class = globals()[class_name]
75
- if tool_type == 'browser_use':
76
- toolkit = toolkit_class.get_toolkit(
77
- client=alita_client,
78
- llm=llm,
79
- toolkit_name=tool.get('toolkit_name', ''),
80
- **tool['settings']
81
- )
82
- else:
83
- toolkit = toolkit_class.get_toolkit(
84
- client=alita_client,
85
- **tool['settings']
86
- )
73
+ toolkit = toolkit_class.get_toolkit(
74
+ client=alita_client,
75
+ **tool['settings']
76
+ )
87
77
  tools.extend(toolkit.get_tools())
88
78
  except Exception:
89
79
  pass # Fail silently for robustness
@@ -64,6 +64,12 @@ class CarrierAPIWrapper(BaseModel):
64
64
  def get_tests_list(self) -> List[Dict[str, Any]]:
65
65
  return self._client.get_tests_list()
66
66
 
67
+ def create_test(self, data: dict):
68
+ return self._client.create_test(data)
69
+
70
+ def get_integrations(self, name: str):
71
+ return self._client.get_integrations(name)
72
+
67
73
  def run_test(self, test_id: str, json_body):
68
74
  return self._client.run_test(test_id, json_body)
69
75
 
@@ -1,7 +1,7 @@
1
1
  import logging
2
2
  import json
3
3
  import traceback
4
- from typing import Type
4
+ from typing import Type, Optional, List, Dict, Union
5
5
  from langchain_core.tools import BaseTool, ToolException
6
6
  from pydantic.fields import Field
7
7
  from pydantic import create_model, BaseModel
@@ -30,7 +30,6 @@ class GetTestsTool(BaseTool):
30
30
 
31
31
  trimmed_tests = []
32
32
  for test in tests:
33
-
34
33
  # Keep only desired base fields
35
34
  trimmed = {k: test[k] for k in base_fields if k in test}
36
35
 
@@ -82,7 +81,14 @@ class RunTestByIDTool(BaseTool):
82
81
  "RunTestByIdInput",
83
82
  test_id=(str, Field(default=None, description="Test id to execute")),
84
83
  name=(str, Field(default=None, description="Test name to execute")),
85
- test_parameters=(dict, Field(default=None, description="Test parameters to override")),
84
+ test_parameters=(list, Field(
85
+ default=None,
86
+ description=(
87
+ "Test parameters to override. Provide as a list of dictionaries, "
88
+ "e.g., [{'vUsers': '5', 'duration': '120'}]. Each dictionary should "
89
+ "contain parameter names and their values."
90
+ )
91
+ )),
86
92
  )
87
93
 
88
94
  def _run(self, test_id=None, name=None, test_parameters=None):
@@ -111,9 +117,12 @@ class RunTestByIDTool(BaseTool):
111
117
  return {
112
118
  "message": "Please confirm or override the following test parameters to proceed with the test execution.",
113
119
  "default_test_parameters": default_test_parameters,
114
- "instruction": "To override parameters, provide a dictionary of updated values for 'test_parameters'.",
120
+ "instruction": "To override parameters, provide a list of dictionaries for 'test_parameters', e.g., [{'vUsers': '5', 'duration': '120'}].",
115
121
  }
116
122
 
123
+ # Normalize test_parameters if provided in an incorrect format
124
+ test_parameters = self._normalize_test_parameters(test_parameters)
125
+
117
126
  # Apply user-provided test parameters
118
127
  updated_test_parameters = self._apply_test_parameters(default_test_parameters, test_parameters)
119
128
 
@@ -146,6 +155,37 @@ class RunTestByIDTool(BaseTool):
146
155
  logger.error(f"Test not found: {stacktrace}")
147
156
  raise ToolException(stacktrace)
148
157
 
158
+ def _normalize_test_parameters(self, test_parameters):
159
+ """
160
+ Normalize test_parameters to ensure they are in the correct list-of-dictionaries format.
161
+ If test_parameters are provided as a list of strings (e.g., ['vUsers=5', 'duration=120']),
162
+ convert them to a list of dictionaries (e.g., [{'vUsers': '5', 'duration': '120'}]).
163
+ """
164
+ if isinstance(test_parameters, list):
165
+ # Check if the list contains strings in the format "key=value"
166
+ if all(isinstance(param, str) and "=" in param for param in test_parameters):
167
+ normalized_parameters = []
168
+ for param in test_parameters:
169
+ name, value = param.split("=", 1)
170
+ normalized_parameters.append({name.strip(): value.strip()})
171
+ return normalized_parameters
172
+ # Check if the list already contains dictionaries
173
+ elif all(isinstance(param, dict) for param in test_parameters):
174
+ return test_parameters
175
+ else:
176
+ raise ValueError(
177
+ "Invalid format for test_parameters. Provide as a list of 'key=value' strings "
178
+ "or a list of dictionaries."
179
+ )
180
+ elif isinstance(test_parameters, dict):
181
+ # Convert a single dictionary to a list of dictionaries
182
+ return [test_parameters]
183
+ else:
184
+ raise ValueError(
185
+ "Invalid format for test_parameters. Provide as a list of 'key=value' strings "
186
+ "or a list of dictionaries."
187
+ )
188
+
149
189
  def _apply_test_parameters(self, default_test_parameters, user_parameters):
150
190
  """
151
191
  Apply user-provided parameters to the default test parameters.
@@ -153,8 +193,269 @@ class RunTestByIDTool(BaseTool):
153
193
  updated_parameters = []
154
194
  for param in default_test_parameters:
155
195
  name = param["name"]
156
- if name in user_parameters:
196
+ # Find the matching user parameter
197
+ user_param = next((p for p in user_parameters if name in p), None)
198
+ if user_param:
157
199
  # Override the parameter value with the user-provided value
158
- param["default"] = user_parameters[name]
159
- updated_parameters.append(param)
200
+ param["default"] = user_param[name]
201
+ # Ensure the parameter structure remains consistent
202
+ updated_parameters.append({
203
+ "name": param["name"],
204
+ "type": param["type"],
205
+ "description": param["description"],
206
+ "default": param["default"]
207
+ })
160
208
  return updated_parameters
209
+
210
+
211
+ class CreateBackendTestInput(BaseModel):
212
+ test_name: str = Field(..., description="Test name")
213
+ test_type: str = Field(..., description="Test type")
214
+ env_type: str = Field(..., description="Env type")
215
+ entrypoint: str = Field(..., description="Entrypoint for the test (JMeter script path or Gatling simulation path)")
216
+ custom_cmd: str = Field(..., description="Custom command line to execute the test (e.g., -l /tmp/reports/jmeter.jtl -e -o /tmp/reports/html_report)")
217
+ runner: str = Field(..., description="Test runner (Gatling or JMeter)")
218
+ source: Optional[Dict[str, Optional[str]]] = Field(
219
+ None,
220
+ description=(
221
+ "Test source configuration (Git repo). The dictionary should include the following keys:\n"
222
+ "- 'name' (required): The type of source (e.g., 'git_https').\n"
223
+ "- 'repo' (required): The URL of the Git repository.\n"
224
+ "- 'branch' (optional): The branch of the repository to use.\n"
225
+ "- 'username' (optional): The username for accessing the repository.\n"
226
+ "- 'password' (optional): The password or token for accessing the repository."
227
+ ),
228
+ example={
229
+ "name": "git_https",
230
+ "repo": "https://your_git_repo.git",
231
+ "branch": "main",
232
+ "username": "your_username",
233
+ "password": "your_password",
234
+ },
235
+ )
236
+ test_parameters: Optional[List[Dict[str, str]]] = Field(
237
+ None,
238
+ description=(
239
+ "Test parameters as a list of dictionaries. Each dictionary should include the following keys:\n"
240
+ "- 'name' (required): The name of the parameter (e.g., 'VUSERS').\n"
241
+ "- 'default' (required): The value of the parameter (e.g., '5')."
242
+ ),
243
+ example=[
244
+ {"name": "VUSERS", "default": "5"},
245
+ {"name": "DURATION", "default": "60"},
246
+ {"name": "RAMP_UP", "default": "30"},
247
+ ],
248
+ )
249
+ email_integration: Optional[Dict[str, Optional[Union[int, List[str]]]]] = Field(
250
+ None,
251
+ description=(
252
+ "Email integration configuration. The dictionary should include the following keys:\n"
253
+ "- 'integration_id' (required): The ID of the selected email integration (integer).\n"
254
+ "- 'recipients' (required): A list of email addresses to receive notifications."
255
+ ),
256
+ example={
257
+ "integration_id": 1,
258
+ "recipients": ["example@example.com", "user@example.com"],
259
+ },
260
+ )
261
+
262
+
263
+ class CreateBackendTestTool(BaseTool):
264
+ api_wrapper: CarrierAPIWrapper = Field(..., description="Carrier API Wrapper instance")
265
+ name: str = "create_backend_test"
266
+ description: str = "Create a new backend test plan in the Carrier platform."
267
+ args_schema: Type[BaseModel] = CreateBackendTestInput
268
+
269
+ def _run(self, test_name=None, test_type=None, env_type=None, entrypoint=None, custom_cmd=None, runner=None,
270
+ source=None, test_parameters=None, email_integration=None):
271
+ try:
272
+ # Validate required fields
273
+ if not test_name:
274
+ return {"message": "Please provide test name"}
275
+ if not test_type:
276
+ return {
277
+ "message": "Please provide performance test type (capacity, baseline, response time, stable, stress, etc)"}
278
+ if not env_type:
279
+ return {"message": "Please provide test env (stage, prod, dev, etc)"}
280
+ if not entrypoint:
281
+ return {"message": "Please provide test entrypoint (JMeter script path or Gatling simulation path)"}
282
+ if not custom_cmd:
283
+ return {
284
+ "message": "Please provide custom_cmd. This parameter is optional. (e.g., -l /tmp/reports/jmeter.jtl -e -o /tmp/reports/html_report)"}
285
+
286
+ # Validate runner
287
+ available_runners = {
288
+ "JMeter_v5.6.3": "v5.6.3",
289
+ "JMeter_v5.5": "v5.5",
290
+ "Gatling_v3.7": "v3.7",
291
+ "Gatling_maven": "maven",
292
+ }
293
+
294
+ if not runner:
295
+ return {
296
+ "message": (
297
+ "Please provide a valid test runner. The test runner specifies the tool and version to use for running the test."
298
+ ),
299
+ "instructions": (
300
+ "You can choose a test runner by providing either the key or the value from the available options below. "
301
+ "For example, you can provide 'JMeter_v5.5' or 'v5.5'."
302
+ ),
303
+ "available_runners": available_runners,
304
+ "example": "For JMeter 5.5, you can provide either 'JMeter_v5.5' or 'v5.5'.",
305
+ }
306
+
307
+ # Normalize the runner input to ensure we always use the value in the final data
308
+ if runner in available_runners:
309
+ runner_value = available_runners[runner] # User provided the key (e.g., 'JMeter_v5.5')
310
+ elif runner in available_runners.values():
311
+ runner_value = runner # User provided the value directly (e.g., 'v5.5')
312
+ else:
313
+ return {
314
+ "message": (
315
+ "Invalid test runner provided. Please choose a valid test runner from the available options."
316
+ ),
317
+ "instructions": (
318
+ "You can choose a test runner by providing either the key or the value from the available options below. "
319
+ "For example, you can provide 'JMeter_v5.5' or 'v5.5'."
320
+ ),
321
+ "available_runners": available_runners,
322
+ "example": "For JMeter 5.5, you can provide either 'JMeter_v5.5' or 'v5.5'.",
323
+ }
324
+
325
+ # Validate source
326
+ if not source:
327
+ return {
328
+ "message": (
329
+ "Please provide the test source configuration. The source configuration is required to specify "
330
+ "the Git repository details for the test. Ensure all fields are provided in the correct format."
331
+ ),
332
+ "instructions": (
333
+ "The 'source' parameter should be a dictionary with the following keys:\n"
334
+ "- 'name' (required): The type of source (e.g., 'git_https').\n"
335
+ "- 'repo' (required): The URL of the Git repository.\n"
336
+ "- 'branch' (optional): The branch of the repository to use.\n"
337
+ "- 'username' (optional): The username for accessing the repository.\n"
338
+ "- 'password' (optional): The password or token for accessing the repository."
339
+ ),
340
+ "example_source": {
341
+ "name": "git_https",
342
+ "repo": "https://your_git_repo.git",
343
+ "branch": "main",
344
+ "username": "",
345
+ "password": "",
346
+ },
347
+ }
348
+
349
+ # Validate test_parameters
350
+ if test_parameters is None:
351
+ return {
352
+ "message": (
353
+ "Do you want to add test parameters? Test parameters allow you to configure the test with specific values."
354
+ ),
355
+ "instructions": (
356
+ "Provide test parameters as a list of dictionaries in the format:\n"
357
+ "- {'name': 'VUSERS', 'default': '5'}\n"
358
+ "- {'name': 'DURATION', 'default': '60'}\n"
359
+ "- {'name': 'RAMP_UP', 'default': '30'}\n"
360
+ "You can provide multiple parameters as a list, e.g., [{'name': 'VUSERS', 'default': '5'}, {'name': 'DURATION', 'default': '60'}].\n"
361
+ "If no parameters are needed, respond with 'no'."
362
+ ),
363
+ "example_parameters": [
364
+ {"name": "VUSERS", "default": "5"},
365
+ {"name": "DURATION", "default": "60"},
366
+ {"name": "RAMP_UP", "default": "30"},
367
+ ],
368
+ }
369
+
370
+ # Ensure test_parameters is an empty list if the user indicates no parameters are needed
371
+ if isinstance(test_parameters, str) and test_parameters.lower() == "no":
372
+ test_parameters = []
373
+
374
+ # Fetch available integrations
375
+ integrations_list = self.api_wrapper.get_integrations(name="reporter_email")
376
+
377
+ # Validate email_integration
378
+ if email_integration is None:
379
+ # Return instructions for configuring email integration
380
+ return {
381
+ "message": "Do you want to configure email integration?",
382
+ "instructions": (
383
+ "If yes, select an integration from the available options below and provide email recipients.\n"
384
+ "If no, respond with 'no'."
385
+ ),
386
+ "available_integrations": [
387
+ {
388
+ "id": integration["id"],
389
+ "name": integration["config"]["name"],
390
+ "description": integration["section"]["integration_description"],
391
+ }
392
+ for integration in integrations_list
393
+ ],
394
+ "example_response": {
395
+ "integration_id": 1,
396
+ "recipients": ["example@example.com", "user@example.com"],
397
+ },
398
+ }
399
+
400
+ # Prepare the final data dictionary
401
+ data = {
402
+ "common_params": {
403
+ "name": test_name,
404
+ "test_type": test_type,
405
+ "env_type": env_type,
406
+ "entrypoint": entrypoint,
407
+ "runner": runner_value,
408
+ "source": source,
409
+ "env_vars": {
410
+ "cpu_quota": 1,
411
+ "memory_quota": 4,
412
+ "cloud_settings": {},
413
+ "custom_cmd": custom_cmd,
414
+ },
415
+ "parallel_runners": 1,
416
+ "cc_env_vars": {},
417
+ "customization": {},
418
+ "location": "default", # TODO update location
419
+ },
420
+ "test_parameters": test_parameters,
421
+ "integrations": {
422
+ "reporters": {
423
+ "reporter_email": {
424
+ "id": email_integration["integration_id"],
425
+ "is_local": True,
426
+ "project_id": integrations_list[0]["project_id"], # Example project_id
427
+ "recipients": email_integration["recipients"],
428
+ }
429
+ }
430
+ },
431
+ "scheduling": [],
432
+ "run_test": False,
433
+ }
434
+
435
+ response = self.api_wrapper.create_test(data)
436
+ try:
437
+ info = "Test created successfully"
438
+ test_info = response.json()
439
+ except:
440
+ info = "Failed to create the test"
441
+ test_info = response.text
442
+ return f"{info}. {test_info}"
443
+
444
+ except Exception as e:
445
+ stacktrace = traceback.format_exc()
446
+ logger.error(f"Error while creating test: {stacktrace}")
447
+ raise ToolException(stacktrace)
448
+
449
+ # data = {"common_params":{"name":"toolkit_demo","test_type":"toolkit_demo","env_type":"toolkit_demo",
450
+ # "entrypoint":"tests/BasicEcommerceWithTransaction.jmx","runner":"v5.6.3",
451
+ # "source":{"name":"git_https","repo":"https://git.epam.com/epm-perf/boilerplate.git",
452
+ # "branch":"jmeter","username":"mykhailo_hunko@epam.com",
453
+ # "password":"{{secret.mykhailo_gitlab}}"},
454
+ # "env_vars":{"cpu_quota":2,"memory_quota":6,"cloud_settings":{},
455
+ # "custom_cmd":"-l /tmp/reports/jmeter.jtl -e -o /tmp/reports/html_report"},
456
+ # "parallel_runners":1,"cc_env_vars":{},"customization":{},"location":"default"},
457
+ # "test_parameters":[{"name":"VUSERS","default":"5","type":"string","description":"","action":""},
458
+ # {"name":"DURATION","default":"60","type":"string","description":"","action":""}],
459
+ # "integrations":{"reporters":{"reporter_email":{"id":1,"is_local":True,"project_id":36,
460
+ # "recipients":["mykhailo_hunko@epam.com"]}}},
461
+ # "scheduling":[],"run_test":True}
@@ -81,10 +81,28 @@ class CarrierClient(BaseModel):
81
81
  endpoint = f"api/v1/backend_performance/tests/{self.credentials.project_id}"
82
82
  return self.request('get', endpoint).get("rows", [])
83
83
 
84
+ def create_test(self, data):
85
+ endpoint = f"api/v1/backend_performance/tests/{self.credentials.project_id}"
86
+ full_url = f"{self.credentials.url.rstrip('/')}/{endpoint.lstrip('/')}"
87
+ headers = {'Authorization': f'bearer {self.credentials.token}'}
88
+ from json import dumps
89
+ # Serialize the `data` dictionary into a JSON string
90
+ form_data = {"data": dumps(data)}
91
+ # Send the POST request
92
+ res = requests.post(full_url, headers=headers, data=form_data)
93
+ print("************************* response")
94
+ print(res.text)
95
+ print("**********************************")
96
+ return res
97
+
84
98
  def run_test(self, test_id: str, json_body):
85
99
  endpoint = f"api/v1/backend_performance/test/{self.credentials.project_id}/{test_id}"
86
100
  return self.request('post', endpoint, json=json_body).get("result_id", "")
87
101
 
102
+ def get_integrations(self, name: str):
103
+ endpoint = f"api/v1/integrations/integrations/{self.credentials.project_id}?name={name}"
104
+ return self.request('get', endpoint)
105
+
88
106
  def run_ui_test(self, test_id: str, json_body):
89
107
  """Run a UI test with the given test ID and JSON body."""
90
108
  endpoint = f"api/v1/ui_performance/test/{self.credentials.project_id}/{test_id}"