openai-sdk-helpers 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +62 -0
- openai_sdk_helpers/agent/__init__.py +31 -0
- openai_sdk_helpers/agent/base.py +330 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +511 -0
- openai_sdk_helpers/agent/prompt_utils.py +9 -0
- openai_sdk_helpers/agent/runner.py +215 -0
- openai_sdk_helpers/agent/summarizer.py +85 -0
- openai_sdk_helpers/agent/translator.py +139 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/validation.py +97 -0
- openai_sdk_helpers/agent/vector_search.py +462 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +153 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +29 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/py.typed +0 -0
- openai_sdk_helpers/response/__init__.py +18 -0
- openai_sdk_helpers/response/base.py +501 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +104 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/structure/__init__.py +43 -0
- openai_sdk_helpers/structure/agent_blueprint.py +224 -0
- openai_sdk_helpers/structure/base.py +713 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +64 -0
- openai_sdk_helpers/structure/plan/plan.py +253 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +132 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/validation.py +47 -0
- openai_sdk_helpers/structure/vector_search.py +86 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +13 -0
- openai_sdk_helpers/utils/core.py +208 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +501 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/METADATA +1 -1
- openai_sdk_helpers-0.0.6.dist-info/RECORD +50 -0
- openai_sdk_helpers-0.0.5.dist-info/RECORD +0 -7
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,462 @@
|
|
|
1
|
+
"""Core workflow management for ``vector search``."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from agents import custom_span, gen_trace_id, trace
|
|
10
|
+
|
|
11
|
+
from ..structure.vector_search import (
|
|
12
|
+
VectorSearchItemStructure,
|
|
13
|
+
VectorSearchItemResultStructure,
|
|
14
|
+
VectorSearchItemResultsStructure,
|
|
15
|
+
VectorSearchStructure,
|
|
16
|
+
VectorSearchPlanStructure,
|
|
17
|
+
VectorSearchReportStructure,
|
|
18
|
+
)
|
|
19
|
+
from ..vector_storage import VectorStorage
|
|
20
|
+
from .base import AgentBase
|
|
21
|
+
from .config import AgentConfig
|
|
22
|
+
from .utils import run_coroutine_agent_sync
|
|
23
|
+
|
|
24
|
+
MAX_CONCURRENT_SEARCHES = 10
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class VectorSearchPlanner(AgentBase):
|
|
28
|
+
"""Plan vector searches to satisfy a user query.
|
|
29
|
+
|
|
30
|
+
Methods
|
|
31
|
+
-------
|
|
32
|
+
run_agent(query)
|
|
33
|
+
Generate a vector search plan for the provided query.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self, prompt_dir: Optional[Path] = None, default_model: Optional[str] = None
|
|
38
|
+
) -> None:
|
|
39
|
+
"""Initialize the planner agent.
|
|
40
|
+
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
prompt_dir : pathlib.Path or None, default=None
|
|
44
|
+
Directory containing prompt templates.
|
|
45
|
+
default_model : str or None, default=None
|
|
46
|
+
Default model identifier to use when not defined in config.
|
|
47
|
+
|
|
48
|
+
Returns
|
|
49
|
+
-------
|
|
50
|
+
None
|
|
51
|
+
"""
|
|
52
|
+
config = AgentConfig(
|
|
53
|
+
name="vector_planner",
|
|
54
|
+
description="Plan vector searches based on a user query.",
|
|
55
|
+
output_type=VectorSearchPlanStructure,
|
|
56
|
+
)
|
|
57
|
+
super().__init__(
|
|
58
|
+
config=config, prompt_dir=prompt_dir, default_model=default_model
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
async def run_agent(self, query: str) -> VectorSearchPlanStructure:
|
|
62
|
+
"""Create a search plan for ``query``.
|
|
63
|
+
|
|
64
|
+
Parameters
|
|
65
|
+
----------
|
|
66
|
+
query : str
|
|
67
|
+
User search query.
|
|
68
|
+
|
|
69
|
+
Returns
|
|
70
|
+
-------
|
|
71
|
+
VectorSearchPlanStructure
|
|
72
|
+
Generated search plan.
|
|
73
|
+
"""
|
|
74
|
+
result: VectorSearchPlanStructure = await self.run_async(
|
|
75
|
+
input=query,
|
|
76
|
+
output_type=self._output_type,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
return result
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class VectorSearchTool(AgentBase):
|
|
83
|
+
"""Execute vector searches defined in a search plan.
|
|
84
|
+
|
|
85
|
+
Methods
|
|
86
|
+
-------
|
|
87
|
+
run_agent(search_plan)
|
|
88
|
+
Execute searches described by the plan.
|
|
89
|
+
run_search(item)
|
|
90
|
+
Perform a single vector search and summarise the result.
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
def __init__(
|
|
94
|
+
self,
|
|
95
|
+
prompt_dir: Optional[Path] = None,
|
|
96
|
+
default_model: Optional[str] = None,
|
|
97
|
+
store_name: Optional[str] = None,
|
|
98
|
+
max_concurrent_searches: int = MAX_CONCURRENT_SEARCHES,
|
|
99
|
+
vector_storage: Optional[VectorStorage] = None,
|
|
100
|
+
vector_storage_factory: Optional[Callable[[str], VectorStorage]] = None,
|
|
101
|
+
) -> None:
|
|
102
|
+
"""Initialize the search tool agent.
|
|
103
|
+
|
|
104
|
+
Parameters
|
|
105
|
+
----------
|
|
106
|
+
prompt_dir : pathlib.Path or None, default=None
|
|
107
|
+
Directory containing prompt templates.
|
|
108
|
+
default_model : str or None, default=None
|
|
109
|
+
Default model identifier to use when not defined in config.
|
|
110
|
+
store_name : str or None, default=None
|
|
111
|
+
Name of the vector store to query.
|
|
112
|
+
max_concurrent_searches : int, default=MAX_CONCURRENT_SEARCHES
|
|
113
|
+
Maximum number of concurrent vector search tasks to run.
|
|
114
|
+
vector_storage : VectorStorage or None, default=None
|
|
115
|
+
Optional preconfigured vector storage instance to reuse.
|
|
116
|
+
vector_storage_factory : callable, default=None
|
|
117
|
+
Factory for constructing a :class:`VectorStorage` when one is not
|
|
118
|
+
provided. Receives ``store_name`` as an argument.
|
|
119
|
+
|
|
120
|
+
Returns
|
|
121
|
+
-------
|
|
122
|
+
None
|
|
123
|
+
"""
|
|
124
|
+
self._vector_storage: Optional[VectorStorage] = None
|
|
125
|
+
self._store_name = store_name or "editorial"
|
|
126
|
+
self._vector_storage_factory = vector_storage_factory
|
|
127
|
+
if vector_storage is not None:
|
|
128
|
+
self._vector_storage = vector_storage
|
|
129
|
+
self._max_concurrent_searches = max_concurrent_searches
|
|
130
|
+
config = AgentConfig(
|
|
131
|
+
name="vector_search",
|
|
132
|
+
description="Perform vector searches based on a search plan.",
|
|
133
|
+
input_type=VectorSearchPlanStructure,
|
|
134
|
+
output_type=VectorSearchItemResultsStructure,
|
|
135
|
+
)
|
|
136
|
+
super().__init__(
|
|
137
|
+
config=config, prompt_dir=prompt_dir, default_model=default_model
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
def _get_vector_storage(self) -> VectorStorage:
|
|
141
|
+
"""Return a cached vector storage instance.
|
|
142
|
+
|
|
143
|
+
Returns
|
|
144
|
+
-------
|
|
145
|
+
VectorStorage
|
|
146
|
+
Vector storage helper for executing searches.
|
|
147
|
+
"""
|
|
148
|
+
if self._vector_storage is None:
|
|
149
|
+
if self._vector_storage_factory is not None:
|
|
150
|
+
self._vector_storage = self._vector_storage_factory(self._store_name)
|
|
151
|
+
else:
|
|
152
|
+
self._vector_storage = VectorStorage(store_name=self._store_name)
|
|
153
|
+
return self._vector_storage
|
|
154
|
+
|
|
155
|
+
async def run_agent(
|
|
156
|
+
self, search_plan: VectorSearchPlanStructure
|
|
157
|
+
) -> VectorSearchItemResultsStructure:
|
|
158
|
+
"""Execute all searches in the plan with a progress bar.
|
|
159
|
+
|
|
160
|
+
Parameters
|
|
161
|
+
----------
|
|
162
|
+
search_plan : VectorSearchPlanStructure
|
|
163
|
+
Plan describing each search to perform.
|
|
164
|
+
|
|
165
|
+
Returns
|
|
166
|
+
-------
|
|
167
|
+
VectorSearchItemResultsStructure
|
|
168
|
+
Collection of results for the completed searches.
|
|
169
|
+
"""
|
|
170
|
+
with custom_span("Search vector store"):
|
|
171
|
+
semaphore = asyncio.Semaphore(self._max_concurrent_searches)
|
|
172
|
+
|
|
173
|
+
async def _bounded_search(
|
|
174
|
+
item: VectorSearchItemStructure,
|
|
175
|
+
) -> VectorSearchItemResultStructure:
|
|
176
|
+
"""Execute a single search within the concurrency limit.
|
|
177
|
+
|
|
178
|
+
Parameters
|
|
179
|
+
----------
|
|
180
|
+
item : VectorSearchItemStructure
|
|
181
|
+
Search item to process.
|
|
182
|
+
|
|
183
|
+
Returns
|
|
184
|
+
-------
|
|
185
|
+
VectorSearchItemResultStructure
|
|
186
|
+
Result of the search.
|
|
187
|
+
"""
|
|
188
|
+
async with semaphore:
|
|
189
|
+
return await self.run_search(item)
|
|
190
|
+
|
|
191
|
+
tasks = [
|
|
192
|
+
asyncio.create_task(_bounded_search(item))
|
|
193
|
+
for item in search_plan.searches
|
|
194
|
+
]
|
|
195
|
+
results_list = await asyncio.gather(*tasks, return_exceptions=True)
|
|
196
|
+
results = VectorSearchItemResultsStructure()
|
|
197
|
+
for item, result in zip(search_plan.searches, results_list):
|
|
198
|
+
if isinstance(result, BaseException):
|
|
199
|
+
results.errors.append(f"Search for '{item.query}' failed: {result}")
|
|
200
|
+
continue
|
|
201
|
+
if result is not None:
|
|
202
|
+
results.append(result)
|
|
203
|
+
|
|
204
|
+
return results
|
|
205
|
+
|
|
206
|
+
async def run_search(
|
|
207
|
+
self, item: VectorSearchItemStructure
|
|
208
|
+
) -> VectorSearchItemResultStructure:
|
|
209
|
+
"""Perform a single vector search using the search tool.
|
|
210
|
+
|
|
211
|
+
Parameters
|
|
212
|
+
----------
|
|
213
|
+
item : VectorSearchItemStructure
|
|
214
|
+
Search item containing the query and reason.
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
VectorSearchItemResultStructure
|
|
219
|
+
Summarized search result. The ``texts`` attribute is empty when no
|
|
220
|
+
results are found.
|
|
221
|
+
"""
|
|
222
|
+
results = self._get_vector_storage().search(item.query)
|
|
223
|
+
if results is None:
|
|
224
|
+
texts: List[str] = []
|
|
225
|
+
else:
|
|
226
|
+
texts = [
|
|
227
|
+
content.text
|
|
228
|
+
for result in results.data
|
|
229
|
+
for content in (result.content or [])
|
|
230
|
+
if getattr(content, "text", None)
|
|
231
|
+
]
|
|
232
|
+
return VectorSearchItemResultStructure(texts=texts)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class VectorSearchWriter(AgentBase):
|
|
236
|
+
"""Generate reports summarizing vector search results.
|
|
237
|
+
|
|
238
|
+
Methods
|
|
239
|
+
-------
|
|
240
|
+
run_agent(query, search_results)
|
|
241
|
+
Compile a final report from search results.
|
|
242
|
+
"""
|
|
243
|
+
|
|
244
|
+
def __init__(
|
|
245
|
+
self, prompt_dir: Optional[Path] = None, default_model: Optional[str] = None
|
|
246
|
+
) -> None:
|
|
247
|
+
"""Initialize the writer agent.
|
|
248
|
+
|
|
249
|
+
Parameters
|
|
250
|
+
----------
|
|
251
|
+
prompt_dir : pathlib.Path or None, default=None
|
|
252
|
+
Directory containing prompt templates.
|
|
253
|
+
default_model : str or None, default=None
|
|
254
|
+
Default model identifier to use when not defined in config.
|
|
255
|
+
|
|
256
|
+
Returns
|
|
257
|
+
-------
|
|
258
|
+
None
|
|
259
|
+
"""
|
|
260
|
+
config = AgentConfig(
|
|
261
|
+
name="vector_writer",
|
|
262
|
+
description="Write a report based on search results.",
|
|
263
|
+
output_type=VectorSearchReportStructure,
|
|
264
|
+
)
|
|
265
|
+
super().__init__(
|
|
266
|
+
config=config, prompt_dir=prompt_dir, default_model=default_model
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
async def run_agent(
|
|
270
|
+
self, query: str, search_results: VectorSearchItemResultsStructure
|
|
271
|
+
) -> VectorSearchReportStructure:
|
|
272
|
+
"""Compile a final report from search results.
|
|
273
|
+
|
|
274
|
+
Parameters
|
|
275
|
+
----------
|
|
276
|
+
query : str
|
|
277
|
+
Original search query.
|
|
278
|
+
search_results : VectorSearchItemResultsStructure
|
|
279
|
+
Results returned from the search step.
|
|
280
|
+
|
|
281
|
+
Returns
|
|
282
|
+
-------
|
|
283
|
+
VectorSearchReportStructure
|
|
284
|
+
Generated report for the query.
|
|
285
|
+
"""
|
|
286
|
+
template_context: Dict[str, Any] = {
|
|
287
|
+
"original_query": query,
|
|
288
|
+
"search_results": search_results,
|
|
289
|
+
}
|
|
290
|
+
result: VectorSearchReportStructure = await self.run_async(
|
|
291
|
+
input=query,
|
|
292
|
+
context=template_context,
|
|
293
|
+
output_type=self._output_type,
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
return result
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
class VectorSearch(AgentBase):
|
|
300
|
+
"""Manage the complete vector search workflow.
|
|
301
|
+
|
|
302
|
+
Methods
|
|
303
|
+
-------
|
|
304
|
+
run_agent(search_query)
|
|
305
|
+
Execute the research workflow asynchronously.
|
|
306
|
+
run_agent_sync(search_query)
|
|
307
|
+
Execute the research workflow synchronously.
|
|
308
|
+
run_vector_agent(search_query)
|
|
309
|
+
Convenience asynchronous entry point for the workflow.
|
|
310
|
+
run_vector_agent_sync(search_query)
|
|
311
|
+
Convenience synchronous entry point for the workflow.
|
|
312
|
+
"""
|
|
313
|
+
|
|
314
|
+
def __init__(
|
|
315
|
+
self,
|
|
316
|
+
config: Optional[AgentConfig] = None,
|
|
317
|
+
prompt_dir: Optional[Path] = None,
|
|
318
|
+
default_model: Optional[str] = None,
|
|
319
|
+
vector_store_name: Optional[str] = None,
|
|
320
|
+
max_concurrent_searches: int = MAX_CONCURRENT_SEARCHES,
|
|
321
|
+
vector_storage: Optional[VectorStorage] = None,
|
|
322
|
+
vector_storage_factory: Optional[Callable[[str], VectorStorage]] = None,
|
|
323
|
+
) -> None:
|
|
324
|
+
"""Create the main VectorSearch agent.
|
|
325
|
+
|
|
326
|
+
Parameters
|
|
327
|
+
----------
|
|
328
|
+
config : AgentConfig or None, default=None
|
|
329
|
+
Optional configuration for the agent.
|
|
330
|
+
prompt_dir : pathlib.Path or None, default=None
|
|
331
|
+
Directory containing prompt templates.
|
|
332
|
+
default_model : str or None, default=None
|
|
333
|
+
Default model identifier to use when not defined in config.
|
|
334
|
+
vector_store_name : str or None, default=None
|
|
335
|
+
Name of the vector store to query.
|
|
336
|
+
max_concurrent_searches : int, default=MAX_CONCURRENT_SEARCHES
|
|
337
|
+
Maximum number of concurrent search tasks to run.
|
|
338
|
+
vector_storage : VectorStorage or None, default=None
|
|
339
|
+
Optional preconfigured vector storage instance to reuse.
|
|
340
|
+
vector_storage_factory : callable, default=None
|
|
341
|
+
Factory used to construct a :class:`VectorStorage` when one is not
|
|
342
|
+
provided. Receives ``vector_store_name`` as an argument.
|
|
343
|
+
|
|
344
|
+
Returns
|
|
345
|
+
-------
|
|
346
|
+
None
|
|
347
|
+
"""
|
|
348
|
+
if config is None:
|
|
349
|
+
config = AgentConfig(
|
|
350
|
+
name="vector_agent",
|
|
351
|
+
description="Coordinates the research process, including planning, searching, and report writing.",
|
|
352
|
+
output_type=VectorSearchStructure,
|
|
353
|
+
input_type=VectorSearchReportStructure,
|
|
354
|
+
)
|
|
355
|
+
super().__init__(
|
|
356
|
+
config=config, prompt_dir=prompt_dir, default_model=default_model
|
|
357
|
+
)
|
|
358
|
+
self._prompt_dir = prompt_dir
|
|
359
|
+
self._vector_store_name = vector_store_name
|
|
360
|
+
self._max_concurrent_searches = max_concurrent_searches
|
|
361
|
+
self._vector_storage = vector_storage
|
|
362
|
+
self._vector_storage_factory = vector_storage_factory
|
|
363
|
+
|
|
364
|
+
async def run_agent(self, search_query: str) -> VectorSearchStructure:
|
|
365
|
+
"""Execute the entire research workflow for ``search_query``.
|
|
366
|
+
|
|
367
|
+
Parameters
|
|
368
|
+
----------
|
|
369
|
+
search_query : str
|
|
370
|
+
User's research query.
|
|
371
|
+
|
|
372
|
+
Returns
|
|
373
|
+
-------
|
|
374
|
+
VectorSearchStructure
|
|
375
|
+
Completed research output.
|
|
376
|
+
"""
|
|
377
|
+
trace_id = gen_trace_id()
|
|
378
|
+
with trace("VectorSearch trace", trace_id=trace_id):
|
|
379
|
+
planner = VectorSearchPlanner(
|
|
380
|
+
prompt_dir=self._prompt_dir, default_model=self.model
|
|
381
|
+
)
|
|
382
|
+
tool = VectorSearchTool(
|
|
383
|
+
prompt_dir=self._prompt_dir,
|
|
384
|
+
default_model=self.model,
|
|
385
|
+
store_name=self._vector_store_name,
|
|
386
|
+
max_concurrent_searches=self._max_concurrent_searches,
|
|
387
|
+
vector_storage=self._vector_storage,
|
|
388
|
+
vector_storage_factory=self._vector_storage_factory,
|
|
389
|
+
)
|
|
390
|
+
writer = VectorSearchWriter(
|
|
391
|
+
prompt_dir=self._prompt_dir, default_model=self.model
|
|
392
|
+
)
|
|
393
|
+
with custom_span("vector_search.plan"):
|
|
394
|
+
search_plan = await planner.run_agent(query=search_query)
|
|
395
|
+
with custom_span("vector_search.search"):
|
|
396
|
+
search_results = await tool.run_agent(search_plan=search_plan)
|
|
397
|
+
with custom_span("vector_search.write"):
|
|
398
|
+
search_report = await writer.run_agent(search_query, search_results)
|
|
399
|
+
return VectorSearchStructure(
|
|
400
|
+
query=search_query,
|
|
401
|
+
plan=search_plan,
|
|
402
|
+
results=search_results,
|
|
403
|
+
report=search_report,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
def run_agent_sync(self, search_query: str) -> VectorSearchStructure:
|
|
407
|
+
"""Run :meth:`run_agent` synchronously for ``search_query``.
|
|
408
|
+
|
|
409
|
+
Parameters
|
|
410
|
+
----------
|
|
411
|
+
search_query : str
|
|
412
|
+
User's research query.
|
|
413
|
+
|
|
414
|
+
Returns
|
|
415
|
+
-------
|
|
416
|
+
VectorSearchStructure
|
|
417
|
+
Completed research output.
|
|
418
|
+
"""
|
|
419
|
+
return run_coroutine_agent_sync(self.run_agent(search_query))
|
|
420
|
+
|
|
421
|
+
@staticmethod
|
|
422
|
+
async def run_vector_agent(search_query: str) -> VectorSearchStructure:
|
|
423
|
+
"""Return a research report for the given query using ``VectorSearch``.
|
|
424
|
+
|
|
425
|
+
Parameters
|
|
426
|
+
----------
|
|
427
|
+
search_query : str
|
|
428
|
+
User's research query.
|
|
429
|
+
|
|
430
|
+
Returns
|
|
431
|
+
-------
|
|
432
|
+
VectorSearchStructure
|
|
433
|
+
Completed research output.
|
|
434
|
+
"""
|
|
435
|
+
return await VectorSearch().run_agent(search_query=search_query)
|
|
436
|
+
|
|
437
|
+
@staticmethod
|
|
438
|
+
def run_vector_agent_sync(search_query: str) -> VectorSearchStructure:
|
|
439
|
+
"""Run :meth:`run_vector_agent` synchronously for ``search_query``.
|
|
440
|
+
|
|
441
|
+
Parameters
|
|
442
|
+
----------
|
|
443
|
+
search_query : str
|
|
444
|
+
User's research query.
|
|
445
|
+
|
|
446
|
+
Returns
|
|
447
|
+
-------
|
|
448
|
+
VectorSearchStructure
|
|
449
|
+
Completed research output.
|
|
450
|
+
"""
|
|
451
|
+
return run_coroutine_agent_sync(
|
|
452
|
+
VectorSearch.run_vector_agent(search_query=search_query)
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
__all__ = [
|
|
457
|
+
"MAX_CONCURRENT_SEARCHES",
|
|
458
|
+
"VectorSearchPlanner",
|
|
459
|
+
"VectorSearchTool",
|
|
460
|
+
"VectorSearchWriter",
|
|
461
|
+
"VectorSearch",
|
|
462
|
+
]
|