alita-sdk 0.3.175__py3-none-any.whl → 0.3.177__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. alita_sdk/community/__init__.py +7 -17
  2. alita_sdk/tools/carrier/api_wrapper.py +6 -0
  3. alita_sdk/tools/carrier/backend_tests_tool.py +308 -7
  4. alita_sdk/tools/carrier/carrier_sdk.py +18 -0
  5. alita_sdk/tools/carrier/create_ui_test_tool.py +90 -109
  6. alita_sdk/tools/carrier/run_ui_test_tool.py +311 -184
  7. alita_sdk/tools/carrier/tools.py +2 -1
  8. alita_sdk/tools/confluence/api_wrapper.py +1 -0
  9. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/METADATA +2 -2
  10. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/RECORD +13 -44
  11. alita_sdk/community/browseruse/__init__.py +0 -73
  12. alita_sdk/community/browseruse/api_wrapper.py +0 -288
  13. alita_sdk/community/deep_researcher/__init__.py +0 -70
  14. alita_sdk/community/deep_researcher/agents/__init__.py +0 -1
  15. alita_sdk/community/deep_researcher/agents/baseclass.py +0 -182
  16. alita_sdk/community/deep_researcher/agents/knowledge_gap_agent.py +0 -74
  17. alita_sdk/community/deep_researcher/agents/long_writer_agent.py +0 -251
  18. alita_sdk/community/deep_researcher/agents/planner_agent.py +0 -124
  19. alita_sdk/community/deep_researcher/agents/proofreader_agent.py +0 -80
  20. alita_sdk/community/deep_researcher/agents/thinking_agent.py +0 -64
  21. alita_sdk/community/deep_researcher/agents/tool_agents/__init__.py +0 -20
  22. alita_sdk/community/deep_researcher/agents/tool_agents/crawl_agent.py +0 -87
  23. alita_sdk/community/deep_researcher/agents/tool_agents/search_agent.py +0 -96
  24. alita_sdk/community/deep_researcher/agents/tool_selector_agent.py +0 -83
  25. alita_sdk/community/deep_researcher/agents/utils/__init__.py +0 -0
  26. alita_sdk/community/deep_researcher/agents/utils/parse_output.py +0 -148
  27. alita_sdk/community/deep_researcher/agents/writer_agent.py +0 -63
  28. alita_sdk/community/deep_researcher/api_wrapper.py +0 -116
  29. alita_sdk/community/deep_researcher/deep_research.py +0 -185
  30. alita_sdk/community/deep_researcher/examples/deep_example.py +0 -30
  31. alita_sdk/community/deep_researcher/examples/iterative_example.py +0 -34
  32. alita_sdk/community/deep_researcher/examples/report_plan_example.py +0 -27
  33. alita_sdk/community/deep_researcher/iterative_research.py +0 -419
  34. alita_sdk/community/deep_researcher/llm_config.py +0 -87
  35. alita_sdk/community/deep_researcher/main.py +0 -67
  36. alita_sdk/community/deep_researcher/tools/__init__.py +0 -2
  37. alita_sdk/community/deep_researcher/tools/crawl_website.py +0 -109
  38. alita_sdk/community/deep_researcher/tools/web_search.py +0 -294
  39. alita_sdk/community/deep_researcher/utils/__init__.py +0 -0
  40. alita_sdk/community/deep_researcher/utils/md_to_pdf.py +0 -8
  41. alita_sdk/community/deep_researcher/utils/os.py +0 -21
  42. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/WHEEL +0 -0
  43. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/licenses/LICENSE +0 -0
  44. {alita_sdk-0.3.175.dist-info → alita_sdk-0.3.177.dist-info}/top_level.txt +0 -0
@@ -1,116 +0,0 @@
1
- from typing import Any, Optional, Dict
2
- import asyncio
3
- import json
4
- from pydantic import create_model, Field
5
-
6
- from alita_sdk.tools.elitea_base import BaseToolApiWrapper
7
- from .deep_research import DeepResearcher
8
- from .iterative_research import IterativeResearcher
9
- from .llm_config import LLMConfig, create_default_config
10
- from langchain_core.language_models.llms import BaseLLM
11
- from langchain_core.language_models.chat_models import BaseChatModel
12
-
13
-
14
- class DeepResearcherWrapper(BaseToolApiWrapper):
15
- """Wrapper for deep_researcher module to be used as a LangChain toolkit."""
16
- alita: Any = None
17
- llm: Optional[BaseLLM | BaseChatModel] = None
18
- max_iterations: int = 5
19
- max_time_minutes: int = 10
20
- verbose: bool = False
21
- tracing: bool = False
22
- config: Optional[LLMConfig] = None
23
-
24
- def __init__(self, **kwargs):
25
- super().__init__(**kwargs)
26
- # Initialize the config if not provided
27
- if not self.config:
28
- self.config = create_default_config(langchain_llm=self.llm)
29
- # Override llm in config if provided
30
- elif self.llm and not self.config.langchain_llm:
31
- # Create a new config with the langchain_llm
32
- self.config = create_default_config(langchain_llm=self.llm)
33
-
34
- def _setup_deep_researcher(self) -> DeepResearcher:
35
- """Initialize a DeepResearcher instance with current settings."""
36
- return DeepResearcher(
37
- max_iterations=self.max_iterations,
38
- max_time_minutes=self.max_time_minutes,
39
- verbose=self.verbose,
40
- tracing=self.tracing,
41
- config=self.config,
42
- llm=self.llm,
43
- alita=self.alita
44
- )
45
-
46
- def _setup_iterative_researcher(self) -> IterativeResearcher:
47
- """Initialize an IterativeResearcher instance with current settings."""
48
- return IterativeResearcher(
49
- max_iterations=self.max_iterations,
50
- max_time_minutes=self.max_time_minutes,
51
- verbose=self.verbose,
52
- tracing=self.tracing,
53
- config=self.config,
54
- llm=self.llm,
55
- alita=self.alita
56
- )
57
-
58
- def run_deep_research(self, query: str) -> str:
59
- """
60
- Run deep research on a query, breaking it down into sections and iteratively researching each part.
61
-
62
- Args:
63
- query: The research query
64
-
65
- Returns:
66
- Comprehensive research report
67
- """
68
- researcher = self._setup_deep_researcher()
69
- return asyncio.run(researcher.run(query))
70
-
71
- def run_iterative_research(self, query: str, output_length: str = "5 pages", output_instructions: str = "", background_context: str = "") -> str:
72
- """
73
- Run iterative research on a query, conducting multiple iterations to address knowledge gaps.
74
-
75
- Args:
76
- query: The research query
77
- output_length: Desired length of the output (e.g., "5 pages", "2 paragraphs")
78
- output_instructions: Additional instructions for output formatting
79
- background_context: Additional context to provide for the research
80
-
81
- Returns:
82
- Research report based on iterative findings
83
- """
84
- researcher = self._setup_iterative_researcher()
85
- return asyncio.run(researcher.run(
86
- query=query,
87
- output_length=output_length,
88
- output_instructions=output_instructions,
89
- background_context=background_context
90
- ))
91
-
92
- def get_available_tools(self):
93
- """Return the list of available tools."""
94
- return [
95
- {
96
- "name": "run_deep_research",
97
- "ref": self.run_deep_research,
98
- "description": self.run_deep_research.__doc__,
99
- "args_schema": create_model(
100
- "DeepResearchModel",
101
- query=(str, Field(description="The research query to investigate thoroughly"))
102
- )
103
- },
104
- {
105
- "name": "run_iterative_research",
106
- "ref": self.run_iterative_research,
107
- "description": self.run_iterative_research.__doc__,
108
- "args_schema": create_model(
109
- "IterativeResearchModel",
110
- query=(str, Field(description="The research query to investigate")),
111
- output_length=(str, Field(description="Desired length of the output (e.g., '5 pages', '2 paragraphs')", default="5 pages")),
112
- output_instructions=(str, Field(description="Additional instructions for output formatting", default="")),
113
- background_context=(str, Field(description="Additional context to provide for the research", default=""))
114
- )
115
- }
116
- ]
@@ -1,185 +0,0 @@
1
- import asyncio
2
- import time
3
- from .iterative_research import IterativeResearcher
4
- from .agents.planner_agent import init_planner_agent, ReportPlan, ReportPlanSection
5
- from .agents.proofreader_agent import ReportDraftSection, ReportDraft, init_proofreader_agent
6
- from .agents.long_writer_agent import init_long_writer_agent, write_report
7
- from .agents.baseclass import ResearchRunner
8
- from typing import List, Optional, Any
9
- from agents.tracing import trace, gen_trace_id, custom_span
10
- from .llm_config import LLMConfig, create_default_config
11
-
12
-
13
- class DeepResearcher:
14
- """
15
- Manager for the deep research workflow that breaks down a query into a report plan with sections and then runs an iterative research loop for each section.
16
- """
17
- def __init__(
18
- self,
19
- max_iterations: int = 5,
20
- max_time_minutes: int = 10,
21
- verbose: bool = True,
22
- tracing: bool = False,
23
- config: Optional[LLMConfig] = None,
24
- llm: Optional[Any] = None,
25
- alita: Optional[Any] = None
26
- ):
27
- self.max_iterations = max_iterations
28
- self.max_time_minutes = max_time_minutes
29
- self.verbose = verbose
30
- self.tracing = tracing
31
- self.alita = alita
32
-
33
- # Initialize config with langchain LLM if provided
34
- if llm is not None:
35
- self.config = create_default_config(langchain_llm=llm)
36
- elif config is not None:
37
- self.config = config
38
- else:
39
- self.config = create_default_config()
40
-
41
- self.planner_agent = init_planner_agent(self.config)
42
- self.proofreader_agent = init_proofreader_agent(self.config)
43
- self.long_writer_agent = init_long_writer_agent(self.config)
44
-
45
- async def run(self, query: str) -> str:
46
- """Run the deep research workflow"""
47
- start_time = time.time()
48
-
49
- if self.tracing:
50
- trace_id = gen_trace_id()
51
- workflow_trace = trace("deep_researcher", trace_id=trace_id)
52
- print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}")
53
- workflow_trace.start(mark_as_current=True)
54
-
55
- # First build the report plan which outlines the sections and compiles any relevant background context on the query
56
- report_plan: ReportPlan = await self._build_report_plan(query)
57
-
58
- # Run the independent research loops concurrently for each section and gather the results
59
- research_results: List[str] = await self._run_research_loops(report_plan)
60
-
61
- # Create the final report from the original report plan and the drafts of each section
62
- final_report: str = await self._create_final_report(query, report_plan, research_results)
63
-
64
- elapsed_time = time.time() - start_time
65
- self._log_message(f"DeepResearcher completed in {int(elapsed_time // 60)} minutes and {int(elapsed_time % 60)} seconds")
66
-
67
- if self.tracing:
68
- workflow_trace.finish(reset_current=True)
69
-
70
- return final_report
71
-
72
- async def _build_report_plan(self, query: str) -> ReportPlan:
73
- """Build the initial report plan including the report outline (sections and key questions) and background context"""
74
- if self.tracing:
75
- span = custom_span(name="build_report_plan")
76
- span.start(mark_as_current=True)
77
-
78
- self._log_message("=== Building Report Plan ===")
79
- user_message = f"QUERY: {query}"
80
- result = await ResearchRunner.run(
81
- self.planner_agent,
82
- user_message
83
- )
84
- report_plan = result.final_output_as(ReportPlan)
85
-
86
- if self.verbose:
87
- num_sections = len(report_plan.report_outline)
88
- message_log = '\n\n'.join(f"Section: {section.title}\nKey question: {section.key_question}" for section in report_plan.report_outline)
89
- if report_plan.background_context:
90
- message_log += f"\n\nThe following background context has been included for the report build:\n{report_plan.background_context}"
91
- else:
92
- message_log += "\n\nNo background context was provided for the report build.\n"
93
- self._log_message(f"Report plan created with {num_sections} sections:\n{message_log}")
94
-
95
- if self.tracing:
96
- span.finish(reset_current=True)
97
-
98
- return report_plan
99
-
100
- async def _run_research_loops(
101
- self,
102
- report_plan: ReportPlan
103
- ) -> List[str]:
104
- """For a given ReportPlan, run a research loop concurrently for each section and gather the results"""
105
- async def run_research_for_section(section: ReportPlanSection):
106
- iterative_researcher = IterativeResearcher(
107
- max_iterations=self.max_iterations,
108
- max_time_minutes=self.max_time_minutes,
109
- verbose=self.verbose,
110
- tracing=False, # Do not trace as this will conflict with the tracing we already have set up for the deep researcher
111
- config=self.config
112
- )
113
- args = {
114
- "query": section.key_question,
115
- "output_length": "",
116
- "output_instructions": "",
117
- "background_context": report_plan.background_context,
118
- }
119
-
120
- # Only use custom span if tracing is enabled
121
- if self.tracing:
122
- with custom_span(
123
- name=f"iterative_researcher:{section.title}",
124
- data={"key_question": section.key_question}
125
- ):
126
- return await iterative_researcher.run(**args)
127
- else:
128
- return await iterative_researcher.run(**args)
129
-
130
- self._log_message("=== Initializing Research Loops ===")
131
- # Run all research loops concurrently in a single gather call
132
- research_results = await asyncio.gather(
133
- *(run_research_for_section(section) for section in report_plan.report_outline)
134
- )
135
- return research_results
136
-
137
- async def _create_final_report(
138
- self,
139
- query: str,
140
- report_plan: ReportPlan,
141
- section_drafts: List[str],
142
- use_long_writer: bool = True
143
- ) -> str:
144
- """Create the final report from the original report plan and the drafts of each section"""
145
- if self.tracing:
146
- span = custom_span(name="create_final_report")
147
- span.start(mark_as_current=True)
148
-
149
- # Each section is a string containing the markdown for the section
150
- # From this we need to build a ReportDraft object to feed to the final proofreader agent
151
- report_draft = ReportDraft(
152
- sections=[]
153
- )
154
- for i, section_draft in enumerate(section_drafts):
155
- report_draft.sections.append(
156
- ReportDraftSection(
157
- section_title=report_plan.report_outline[i].title,
158
- section_content=section_draft
159
- )
160
- )
161
-
162
- self._log_message("\n=== Building Final Report ===")
163
-
164
- if use_long_writer:
165
- final_output = await write_report(self.long_writer_agent, query, report_plan.report_title, report_draft)
166
- else:
167
- user_prompt = f"QUERY:\n{query}\n\nREPORT DRAFT:\n{report_draft.model_dump_json()}"
168
- # Run the proofreader agent to produce the final report
169
- final_report = await ResearchRunner.run(
170
- self.proofreader_agent,
171
- user_prompt
172
- )
173
- final_output = final_report.final_output
174
-
175
- self._log_message(f"Final report completed")
176
-
177
- if self.tracing:
178
- span.finish(reset_current=True)
179
-
180
- return final_output
181
-
182
- def _log_message(self, message: str) -> None:
183
- """Log a message if verbose is True"""
184
- if self.verbose:
185
- print(message)
@@ -1,30 +0,0 @@
1
- """
2
- Example usage of the DeepResearcher to produce a report.
3
-
4
- See deep_output.txt for the console output from running this script, and deep_output.pdf for the final report
5
- """
6
-
7
- import asyncio
8
- import os
9
- import sys
10
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
11
- from deep_researcher import DeepResearcher
12
-
13
- manager = DeepResearcher(
14
- max_iterations=3,
15
- max_time_minutes=10,
16
- verbose=True,
17
- tracing=True
18
- )
19
-
20
- query = "Write a report on Plato - who was he, what were his main works " \
21
- "and what are the main philosophical ideas he's known for"
22
-
23
- report = asyncio.run(
24
- manager.run(
25
- query
26
- )
27
- )
28
-
29
- print("\n=== Final Report ===")
30
- print(report)
@@ -1,34 +0,0 @@
1
- """
2
- Example usage of the IterativeResearcher to produce a report.
3
-
4
- See iterative_output.txt for the console output from running this script, and iterative_output.pdf for the final report
5
- """
6
-
7
- import asyncio
8
- import os
9
- import sys
10
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
11
- from deep_researcher import IterativeResearcher
12
-
13
- manager = IterativeResearcher(
14
- max_iterations=5,
15
- max_time_minutes=10,
16
- verbose=True,
17
- tracing=True
18
- )
19
-
20
- query = "Write a report on Plato - who was he, what were his main works " \
21
- "and what are the main philosophical ideas he's known for"
22
- output_length = "5 pages"
23
- output_instructions = ""
24
-
25
- report = asyncio.run(
26
- manager.run(
27
- query,
28
- output_length=output_length,
29
- output_instructions=output_instructions
30
- )
31
- )
32
-
33
- print("\n=== Final Report ===")
34
- print(report)
@@ -1,27 +0,0 @@
1
- import asyncio
2
- from deep_researcher.agents.planner_agent import planner_agent, ReportPlan
3
- from agents import gen_trace_id, trace
4
- from deep_researcher import ResearchRunner
5
-
6
-
7
- async def run_report_planner(query):
8
- trace_id = gen_trace_id()
9
-
10
- with trace("Deep Research trace", trace_id=trace_id):
11
- print(f"View trace: https://platform.openai.com/traces/{trace_id}")
12
- result = await ResearchRunner.run(planner_agent, query)
13
- plan = result.final_output_as(ReportPlan)
14
- return plan
15
-
16
-
17
- user_query = "Provide a detailed overview of the company Quantera (quantera.io) from an investor's perspective"
18
-
19
- plan = asyncio.run(run_report_planner(user_query))
20
-
21
- print(f"BACKGROUND CONTEXT:\n{plan.background_context if plan.background_context else 'No background context'}")
22
-
23
- print("\nREPORT OUTLINE:\n")
24
- for section in plan.report_outline:
25
- print(f"Section: {section.title}")
26
- print(f"Key question: {section.key_question}\n")
27
-