alita-sdk 0.3.176__py3-none-any.whl → 0.3.177__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/community/__init__.py +7 -17
- alita_sdk/tools/carrier/api_wrapper.py +6 -0
- alita_sdk/tools/carrier/backend_tests_tool.py +308 -7
- alita_sdk/tools/carrier/carrier_sdk.py +18 -0
- alita_sdk/tools/carrier/tools.py +2 -1
- {alita_sdk-0.3.176.dist-info → alita_sdk-0.3.177.dist-info}/METADATA +1 -2
- {alita_sdk-0.3.176.dist-info → alita_sdk-0.3.177.dist-info}/RECORD +10 -41
- alita_sdk/community/browseruse/__init__.py +0 -73
- alita_sdk/community/browseruse/api_wrapper.py +0 -288
- alita_sdk/community/deep_researcher/__init__.py +0 -70
- alita_sdk/community/deep_researcher/agents/__init__.py +0 -1
- alita_sdk/community/deep_researcher/agents/baseclass.py +0 -182
- alita_sdk/community/deep_researcher/agents/knowledge_gap_agent.py +0 -74
- alita_sdk/community/deep_researcher/agents/long_writer_agent.py +0 -251
- alita_sdk/community/deep_researcher/agents/planner_agent.py +0 -124
- alita_sdk/community/deep_researcher/agents/proofreader_agent.py +0 -80
- alita_sdk/community/deep_researcher/agents/thinking_agent.py +0 -64
- alita_sdk/community/deep_researcher/agents/tool_agents/__init__.py +0 -20
- alita_sdk/community/deep_researcher/agents/tool_agents/crawl_agent.py +0 -87
- alita_sdk/community/deep_researcher/agents/tool_agents/search_agent.py +0 -96
- alita_sdk/community/deep_researcher/agents/tool_selector_agent.py +0 -83
- alita_sdk/community/deep_researcher/agents/utils/__init__.py +0 -0
- alita_sdk/community/deep_researcher/agents/utils/parse_output.py +0 -148
- alita_sdk/community/deep_researcher/agents/writer_agent.py +0 -63
- alita_sdk/community/deep_researcher/api_wrapper.py +0 -116
- alita_sdk/community/deep_researcher/deep_research.py +0 -185
- alita_sdk/community/deep_researcher/examples/deep_example.py +0 -30
- alita_sdk/community/deep_researcher/examples/iterative_example.py +0 -34
- alita_sdk/community/deep_researcher/examples/report_plan_example.py +0 -27
- alita_sdk/community/deep_researcher/iterative_research.py +0 -419
- alita_sdk/community/deep_researcher/llm_config.py +0 -87
- alita_sdk/community/deep_researcher/main.py +0 -67
- alita_sdk/community/deep_researcher/tools/__init__.py +0 -2
- alita_sdk/community/deep_researcher/tools/crawl_website.py +0 -109
- alita_sdk/community/deep_researcher/tools/web_search.py +0 -294
- alita_sdk/community/deep_researcher/utils/__init__.py +0 -0
- alita_sdk/community/deep_researcher/utils/md_to_pdf.py +0 -8
- alita_sdk/community/deep_researcher/utils/os.py +0 -21
- {alita_sdk-0.3.176.dist-info → alita_sdk-0.3.177.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.176.dist-info → alita_sdk-0.3.177.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.176.dist-info → alita_sdk-0.3.177.dist-info}/top_level.txt +0 -0
@@ -1,185 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import time
|
3
|
-
from .iterative_research import IterativeResearcher
|
4
|
-
from .agents.planner_agent import init_planner_agent, ReportPlan, ReportPlanSection
|
5
|
-
from .agents.proofreader_agent import ReportDraftSection, ReportDraft, init_proofreader_agent
|
6
|
-
from .agents.long_writer_agent import init_long_writer_agent, write_report
|
7
|
-
from .agents.baseclass import ResearchRunner
|
8
|
-
from typing import List, Optional, Any
|
9
|
-
from agents.tracing import trace, gen_trace_id, custom_span
|
10
|
-
from .llm_config import LLMConfig, create_default_config
|
11
|
-
|
12
|
-
|
13
|
-
class DeepResearcher:
|
14
|
-
"""
|
15
|
-
Manager for the deep research workflow that breaks down a query into a report plan with sections and then runs an iterative research loop for each section.
|
16
|
-
"""
|
17
|
-
def __init__(
|
18
|
-
self,
|
19
|
-
max_iterations: int = 5,
|
20
|
-
max_time_minutes: int = 10,
|
21
|
-
verbose: bool = True,
|
22
|
-
tracing: bool = False,
|
23
|
-
config: Optional[LLMConfig] = None,
|
24
|
-
llm: Optional[Any] = None,
|
25
|
-
alita: Optional[Any] = None
|
26
|
-
):
|
27
|
-
self.max_iterations = max_iterations
|
28
|
-
self.max_time_minutes = max_time_minutes
|
29
|
-
self.verbose = verbose
|
30
|
-
self.tracing = tracing
|
31
|
-
self.alita = alita
|
32
|
-
|
33
|
-
# Initialize config with langchain LLM if provided
|
34
|
-
if llm is not None:
|
35
|
-
self.config = create_default_config(langchain_llm=llm)
|
36
|
-
elif config is not None:
|
37
|
-
self.config = config
|
38
|
-
else:
|
39
|
-
self.config = create_default_config()
|
40
|
-
|
41
|
-
self.planner_agent = init_planner_agent(self.config)
|
42
|
-
self.proofreader_agent = init_proofreader_agent(self.config)
|
43
|
-
self.long_writer_agent = init_long_writer_agent(self.config)
|
44
|
-
|
45
|
-
async def run(self, query: str) -> str:
|
46
|
-
"""Run the deep research workflow"""
|
47
|
-
start_time = time.time()
|
48
|
-
|
49
|
-
if self.tracing:
|
50
|
-
trace_id = gen_trace_id()
|
51
|
-
workflow_trace = trace("deep_researcher", trace_id=trace_id)
|
52
|
-
print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}")
|
53
|
-
workflow_trace.start(mark_as_current=True)
|
54
|
-
|
55
|
-
# First build the report plan which outlines the sections and compiles any relevant background context on the query
|
56
|
-
report_plan: ReportPlan = await self._build_report_plan(query)
|
57
|
-
|
58
|
-
# Run the independent research loops concurrently for each section and gather the results
|
59
|
-
research_results: List[str] = await self._run_research_loops(report_plan)
|
60
|
-
|
61
|
-
# Create the final report from the original report plan and the drafts of each section
|
62
|
-
final_report: str = await self._create_final_report(query, report_plan, research_results)
|
63
|
-
|
64
|
-
elapsed_time = time.time() - start_time
|
65
|
-
self._log_message(f"DeepResearcher completed in {int(elapsed_time // 60)} minutes and {int(elapsed_time % 60)} seconds")
|
66
|
-
|
67
|
-
if self.tracing:
|
68
|
-
workflow_trace.finish(reset_current=True)
|
69
|
-
|
70
|
-
return final_report
|
71
|
-
|
72
|
-
async def _build_report_plan(self, query: str) -> ReportPlan:
|
73
|
-
"""Build the initial report plan including the report outline (sections and key questions) and background context"""
|
74
|
-
if self.tracing:
|
75
|
-
span = custom_span(name="build_report_plan")
|
76
|
-
span.start(mark_as_current=True)
|
77
|
-
|
78
|
-
self._log_message("=== Building Report Plan ===")
|
79
|
-
user_message = f"QUERY: {query}"
|
80
|
-
result = await ResearchRunner.run(
|
81
|
-
self.planner_agent,
|
82
|
-
user_message
|
83
|
-
)
|
84
|
-
report_plan = result.final_output_as(ReportPlan)
|
85
|
-
|
86
|
-
if self.verbose:
|
87
|
-
num_sections = len(report_plan.report_outline)
|
88
|
-
message_log = '\n\n'.join(f"Section: {section.title}\nKey question: {section.key_question}" for section in report_plan.report_outline)
|
89
|
-
if report_plan.background_context:
|
90
|
-
message_log += f"\n\nThe following background context has been included for the report build:\n{report_plan.background_context}"
|
91
|
-
else:
|
92
|
-
message_log += "\n\nNo background context was provided for the report build.\n"
|
93
|
-
self._log_message(f"Report plan created with {num_sections} sections:\n{message_log}")
|
94
|
-
|
95
|
-
if self.tracing:
|
96
|
-
span.finish(reset_current=True)
|
97
|
-
|
98
|
-
return report_plan
|
99
|
-
|
100
|
-
async def _run_research_loops(
|
101
|
-
self,
|
102
|
-
report_plan: ReportPlan
|
103
|
-
) -> List[str]:
|
104
|
-
"""For a given ReportPlan, run a research loop concurrently for each section and gather the results"""
|
105
|
-
async def run_research_for_section(section: ReportPlanSection):
|
106
|
-
iterative_researcher = IterativeResearcher(
|
107
|
-
max_iterations=self.max_iterations,
|
108
|
-
max_time_minutes=self.max_time_minutes,
|
109
|
-
verbose=self.verbose,
|
110
|
-
tracing=False, # Do not trace as this will conflict with the tracing we already have set up for the deep researcher
|
111
|
-
config=self.config
|
112
|
-
)
|
113
|
-
args = {
|
114
|
-
"query": section.key_question,
|
115
|
-
"output_length": "",
|
116
|
-
"output_instructions": "",
|
117
|
-
"background_context": report_plan.background_context,
|
118
|
-
}
|
119
|
-
|
120
|
-
# Only use custom span if tracing is enabled
|
121
|
-
if self.tracing:
|
122
|
-
with custom_span(
|
123
|
-
name=f"iterative_researcher:{section.title}",
|
124
|
-
data={"key_question": section.key_question}
|
125
|
-
):
|
126
|
-
return await iterative_researcher.run(**args)
|
127
|
-
else:
|
128
|
-
return await iterative_researcher.run(**args)
|
129
|
-
|
130
|
-
self._log_message("=== Initializing Research Loops ===")
|
131
|
-
# Run all research loops concurrently in a single gather call
|
132
|
-
research_results = await asyncio.gather(
|
133
|
-
*(run_research_for_section(section) for section in report_plan.report_outline)
|
134
|
-
)
|
135
|
-
return research_results
|
136
|
-
|
137
|
-
async def _create_final_report(
|
138
|
-
self,
|
139
|
-
query: str,
|
140
|
-
report_plan: ReportPlan,
|
141
|
-
section_drafts: List[str],
|
142
|
-
use_long_writer: bool = True
|
143
|
-
) -> str:
|
144
|
-
"""Create the final report from the original report plan and the drafts of each section"""
|
145
|
-
if self.tracing:
|
146
|
-
span = custom_span(name="create_final_report")
|
147
|
-
span.start(mark_as_current=True)
|
148
|
-
|
149
|
-
# Each section is a string containing the markdown for the section
|
150
|
-
# From this we need to build a ReportDraft object to feed to the final proofreader agent
|
151
|
-
report_draft = ReportDraft(
|
152
|
-
sections=[]
|
153
|
-
)
|
154
|
-
for i, section_draft in enumerate(section_drafts):
|
155
|
-
report_draft.sections.append(
|
156
|
-
ReportDraftSection(
|
157
|
-
section_title=report_plan.report_outline[i].title,
|
158
|
-
section_content=section_draft
|
159
|
-
)
|
160
|
-
)
|
161
|
-
|
162
|
-
self._log_message("\n=== Building Final Report ===")
|
163
|
-
|
164
|
-
if use_long_writer:
|
165
|
-
final_output = await write_report(self.long_writer_agent, query, report_plan.report_title, report_draft)
|
166
|
-
else:
|
167
|
-
user_prompt = f"QUERY:\n{query}\n\nREPORT DRAFT:\n{report_draft.model_dump_json()}"
|
168
|
-
# Run the proofreader agent to produce the final report
|
169
|
-
final_report = await ResearchRunner.run(
|
170
|
-
self.proofreader_agent,
|
171
|
-
user_prompt
|
172
|
-
)
|
173
|
-
final_output = final_report.final_output
|
174
|
-
|
175
|
-
self._log_message(f"Final report completed")
|
176
|
-
|
177
|
-
if self.tracing:
|
178
|
-
span.finish(reset_current=True)
|
179
|
-
|
180
|
-
return final_output
|
181
|
-
|
182
|
-
def _log_message(self, message: str) -> None:
|
183
|
-
"""Log a message if verbose is True"""
|
184
|
-
if self.verbose:
|
185
|
-
print(message)
|
@@ -1,30 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Example usage of the DeepResearcher to produce a report.
|
3
|
-
|
4
|
-
See deep_output.txt for the console output from running this script, and deep_output.pdf for the final report
|
5
|
-
"""
|
6
|
-
|
7
|
-
import asyncio
|
8
|
-
import os
|
9
|
-
import sys
|
10
|
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
11
|
-
from deep_researcher import DeepResearcher
|
12
|
-
|
13
|
-
manager = DeepResearcher(
|
14
|
-
max_iterations=3,
|
15
|
-
max_time_minutes=10,
|
16
|
-
verbose=True,
|
17
|
-
tracing=True
|
18
|
-
)
|
19
|
-
|
20
|
-
query = "Write a report on Plato - who was he, what were his main works " \
|
21
|
-
"and what are the main philosophical ideas he's known for"
|
22
|
-
|
23
|
-
report = asyncio.run(
|
24
|
-
manager.run(
|
25
|
-
query
|
26
|
-
)
|
27
|
-
)
|
28
|
-
|
29
|
-
print("\n=== Final Report ===")
|
30
|
-
print(report)
|
@@ -1,34 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Example usage of the IterativeResearcher to produce a report.
|
3
|
-
|
4
|
-
See iterative_output.txt for the console output from running this script, and iterative_output.pdf for the final report
|
5
|
-
"""
|
6
|
-
|
7
|
-
import asyncio
|
8
|
-
import os
|
9
|
-
import sys
|
10
|
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
11
|
-
from deep_researcher import IterativeResearcher
|
12
|
-
|
13
|
-
manager = IterativeResearcher(
|
14
|
-
max_iterations=5,
|
15
|
-
max_time_minutes=10,
|
16
|
-
verbose=True,
|
17
|
-
tracing=True
|
18
|
-
)
|
19
|
-
|
20
|
-
query = "Write a report on Plato - who was he, what were his main works " \
|
21
|
-
"and what are the main philosophical ideas he's known for"
|
22
|
-
output_length = "5 pages"
|
23
|
-
output_instructions = ""
|
24
|
-
|
25
|
-
report = asyncio.run(
|
26
|
-
manager.run(
|
27
|
-
query,
|
28
|
-
output_length=output_length,
|
29
|
-
output_instructions=output_instructions
|
30
|
-
)
|
31
|
-
)
|
32
|
-
|
33
|
-
print("\n=== Final Report ===")
|
34
|
-
print(report)
|
@@ -1,27 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
from deep_researcher.agents.planner_agent import planner_agent, ReportPlan
|
3
|
-
from agents import gen_trace_id, trace
|
4
|
-
from deep_researcher import ResearchRunner
|
5
|
-
|
6
|
-
|
7
|
-
async def run_report_planner(query):
|
8
|
-
trace_id = gen_trace_id()
|
9
|
-
|
10
|
-
with trace("Deep Research trace", trace_id=trace_id):
|
11
|
-
print(f"View trace: https://platform.openai.com/traces/{trace_id}")
|
12
|
-
result = await ResearchRunner.run(planner_agent, query)
|
13
|
-
plan = result.final_output_as(ReportPlan)
|
14
|
-
return plan
|
15
|
-
|
16
|
-
|
17
|
-
user_query = "Provide a detailed overview of the company Quantera (quantera.io) from an investor's perspective"
|
18
|
-
|
19
|
-
plan = asyncio.run(run_report_planner(user_query))
|
20
|
-
|
21
|
-
print(f"BACKGROUND CONTEXT:\n{plan.background_context if plan.background_context else 'No background context'}")
|
22
|
-
|
23
|
-
print("\nREPORT OUTLINE:\n")
|
24
|
-
for section in plan.report_outline:
|
25
|
-
print(f"Section: {section.title}")
|
26
|
-
print(f"Key question: {section.key_question}\n")
|
27
|
-
|