vectara-agentic 0.4.0__tar.gz → 0.4.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectara_agentic-0.4.0/vectara_agentic.egg-info → vectara_agentic-0.4.2}/PKG-INFO +79 -39
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/README.md +46 -3
- vectara_agentic-0.4.2/requirements.txt +43 -0
- vectara_agentic-0.4.2/tests/benchmark_models.py +945 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/conftest.py +9 -5
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/run_tests.py +3 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_agent.py +57 -29
- vectara_agentic-0.4.2/tests/test_agent_fallback_memory.py +270 -0
- vectara_agentic-0.4.2/tests/test_agent_memory_consistency.py +229 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_agent_type.py +4 -0
- vectara_agentic-0.4.2/tests/test_bedrock.py +61 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_fallback.py +1 -1
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_gemini.py +7 -22
- vectara_agentic-0.4.2/tests/test_groq.py +61 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_private_llm.py +1 -1
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_serialization.py +3 -6
- vectara_agentic-0.4.2/tests/test_session_memory.py +252 -0
- vectara_agentic-0.4.2/tests/test_streaming.py +109 -0
- vectara_agentic-0.4.2/tests/test_together.py +62 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_vhc.py +3 -2
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_workflow.py +9 -28
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/_observability.py +19 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/_version.py +1 -1
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent.py +246 -37
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/factory.py +34 -153
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/prompts.py +19 -13
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/serialization.py +17 -8
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/streaming.py +27 -43
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/utils/__init__.py +0 -5
- vectara_agentic-0.4.2/vectara_agentic/agent_core/utils/hallucination.py +157 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/llm_utils.py +4 -2
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/sub_query_workflow.py +3 -2
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/tools.py +0 -19
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/types.py +9 -3
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2/vectara_agentic.egg-info}/PKG-INFO +79 -39
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic.egg-info/SOURCES.txt +5 -1
- vectara_agentic-0.4.2/vectara_agentic.egg-info/requires.txt +43 -0
- vectara_agentic-0.4.0/requirements.txt +0 -46
- vectara_agentic-0.4.0/tests/test_bedrock.py +0 -46
- vectara_agentic-0.4.0/tests/test_groq.py +0 -46
- vectara_agentic-0.4.0/tests/test_streaming.py +0 -88
- vectara_agentic-0.4.0/vectara_agentic/agent_core/utils/hallucination.py +0 -202
- vectara_agentic-0.4.0/vectara_agentic/agent_core/utils/prompt_formatting.py +0 -56
- vectara_agentic-0.4.0/vectara_agentic.egg-info/requires.txt +0 -46
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/LICENSE +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/MANIFEST.in +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/setup.cfg +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/setup.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/__init__.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/endpoint.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_api_endpoint.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_return_direct.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_tools.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/tests/test_vectara_llms.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/__init__.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/_callback.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_config.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/__init__.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/utils/logging.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/utils/schemas.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_core/utils/tools.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/agent_endpoint.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/db_tools.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/tool_utils.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/tools_catalog.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic/utils.py +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic.egg-info/dependency_links.txt +0 -0
- {vectara_agentic-0.4.0 → vectara_agentic-0.4.2}/vectara_agentic.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.2
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -16,46 +16,43 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
16
16
|
Requires-Python: >=3.10
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
|
-
Requires-Dist: llama-index==0.
|
|
20
|
-
Requires-Dist: llama-index-core==0.
|
|
19
|
+
Requires-Dist: llama-index==0.13.1
|
|
20
|
+
Requires-Dist: llama-index-core==0.13.1
|
|
21
21
|
Requires-Dist: llama-index-workflow==1.0.1
|
|
22
|
-
Requires-Dist: llama-index-cli==0.
|
|
23
|
-
Requires-Dist: llama-index-indices-managed-vectara==0.
|
|
24
|
-
Requires-Dist: llama-index-
|
|
25
|
-
Requires-Dist: llama-index-
|
|
26
|
-
Requires-Dist: llama-index-
|
|
27
|
-
Requires-Dist: llama-index-llms-
|
|
28
|
-
Requires-Dist: llama-index-llms-
|
|
29
|
-
Requires-Dist: llama-index-llms-
|
|
30
|
-
Requires-Dist: llama-index-llms-
|
|
31
|
-
Requires-Dist: llama-index-llms-
|
|
32
|
-
Requires-Dist: llama-index-
|
|
33
|
-
Requires-Dist: llama-index-
|
|
34
|
-
Requires-Dist: llama-index-
|
|
35
|
-
Requires-Dist: llama-index-tools-
|
|
36
|
-
Requires-Dist: llama-index-tools-
|
|
37
|
-
Requires-Dist:
|
|
38
|
-
Requires-Dist: llama-index-tools-
|
|
39
|
-
Requires-Dist: llama-index-tools-
|
|
40
|
-
Requires-Dist:
|
|
41
|
-
Requires-Dist: llama-index-tools-
|
|
42
|
-
Requires-Dist: llama-index-tools-
|
|
43
|
-
Requires-Dist: llama-index-
|
|
44
|
-
Requires-Dist: llama-index-tools-
|
|
45
|
-
Requires-Dist: llama-index-tools-
|
|
46
|
-
Requires-Dist:
|
|
47
|
-
Requires-Dist:
|
|
48
|
-
Requires-Dist:
|
|
49
|
-
Requires-Dist:
|
|
50
|
-
Requires-Dist: tavily-python>=0.7.9
|
|
51
|
-
Requires-Dist: exa-py>=1.14.8
|
|
52
|
-
Requires-Dist: openinference-instrumentation-llama-index==4.3.1
|
|
22
|
+
Requires-Dist: llama-index-cli==0.5.0
|
|
23
|
+
Requires-Dist: llama-index-indices-managed-vectara==0.5.0
|
|
24
|
+
Requires-Dist: llama-index-llms-openai==0.5.2
|
|
25
|
+
Requires-Dist: llama-index-llms-openai-like==0.5.0
|
|
26
|
+
Requires-Dist: llama-index-llms-anthropic==0.8.2
|
|
27
|
+
Requires-Dist: llama-index-llms-together==0.4.0
|
|
28
|
+
Requires-Dist: llama-index-llms-groq==0.4.0
|
|
29
|
+
Requires-Dist: llama-index-llms-cohere==0.6.0
|
|
30
|
+
Requires-Dist: llama-index-llms-google-genai==0.3.0
|
|
31
|
+
Requires-Dist: llama-index-llms-bedrock-converse==0.8.0
|
|
32
|
+
Requires-Dist: llama-index-tools-yahoo-finance==0.4.0
|
|
33
|
+
Requires-Dist: llama-index-tools-arxiv==0.4.0
|
|
34
|
+
Requires-Dist: llama-index-tools-database==0.4.0
|
|
35
|
+
Requires-Dist: llama-index-tools-google==0.6.0
|
|
36
|
+
Requires-Dist: llama-index-tools-tavily_research==0.4.0
|
|
37
|
+
Requires-Dist: llama_index.tools.brave_search==0.4.0
|
|
38
|
+
Requires-Dist: llama-index-tools-neo4j==0.4.0
|
|
39
|
+
Requires-Dist: llama-index-tools-waii==0.4.0
|
|
40
|
+
Requires-Dist: llama-index-graph-stores-kuzu==0.9.0
|
|
41
|
+
Requires-Dist: llama-index-tools-salesforce==0.4.0
|
|
42
|
+
Requires-Dist: llama-index-tools-slack==0.4.0
|
|
43
|
+
Requires-Dist: llama-index-tools-exa==0.4.0
|
|
44
|
+
Requires-Dist: llama-index-tools-wikipedia==0.4.0
|
|
45
|
+
Requires-Dist: llama-index-tools-bing-search==0.4.0
|
|
46
|
+
Requires-Dist: openai>=1.99.3
|
|
47
|
+
Requires-Dist: tavily-python>=0.7.10
|
|
48
|
+
Requires-Dist: exa-py>=1.14.20
|
|
49
|
+
Requires-Dist: openinference-instrumentation-llama-index==4.3.4
|
|
53
50
|
Requires-Dist: opentelemetry-proto>=1.31.0
|
|
54
51
|
Requires-Dist: arize-phoenix==10.9.1
|
|
55
52
|
Requires-Dist: arize-phoenix-otel==0.10.3
|
|
56
|
-
Requires-Dist: protobuf==5.29.
|
|
53
|
+
Requires-Dist: protobuf==5.29.5
|
|
57
54
|
Requires-Dist: tokenizers>=0.20
|
|
58
|
-
Requires-Dist: pydantic
|
|
55
|
+
Requires-Dist: pydantic>=2.11.5
|
|
59
56
|
Requires-Dist: pandas==2.2.3
|
|
60
57
|
Requires-Dist: retrying==1.3.4
|
|
61
58
|
Requires-Dist: python-dotenv==1.0.1
|
|
@@ -125,7 +122,7 @@ Dynamic: summary
|
|
|
125
122
|
- **Rapid Tool Creation:**
|
|
126
123
|
Build Vectara RAG tools or search tools with a single line of code.
|
|
127
124
|
- **Agent Flexibility:**
|
|
128
|
-
Supports multiple agent types including `ReAct
|
|
125
|
+
Supports multiple agent types including `ReAct` and `Function Calling`.
|
|
129
126
|
- **Pre-Built Domain Tools:**
|
|
130
127
|
Tools tailored for finance, legal, and other verticals.
|
|
131
128
|
- **Multi-LLM Integration:**
|
|
@@ -532,6 +529,49 @@ Built-in formatters include `format_as_table`, `format_as_json`, and `format_as_
|
|
|
532
529
|
|
|
533
530
|
The human-readable format, if available, is used when using Vectara Hallucination Correction.
|
|
534
531
|
|
|
532
|
+
## 🔍 Vectara Hallucination Correction (VHC)
|
|
533
|
+
|
|
534
|
+
`vectara-agentic` provides built-in support for Vectara Hallucination Correction (VHC), which analyzes agent responses and corrects any detected hallucinations based on the factual content retrieved by VHC-eligible tools.
|
|
535
|
+
|
|
536
|
+
### Computing VHC
|
|
537
|
+
|
|
538
|
+
After a chat interaction, you can compute VHC to analyze and correct the agent's response:
|
|
539
|
+
|
|
540
|
+
```python
|
|
541
|
+
# Chat with the agent
|
|
542
|
+
response = agent.chat("What was Apple's revenue in 2022?")
|
|
543
|
+
print(response.response)
|
|
544
|
+
|
|
545
|
+
# Compute VHC analysis
|
|
546
|
+
vhc_result = agent.compute_vhc()
|
|
547
|
+
|
|
548
|
+
# Access corrected text and corrections
|
|
549
|
+
if vhc_result["corrected_text"]:
|
|
550
|
+
print("Original:", response.response)
|
|
551
|
+
print("Corrected:", vhc_result["corrected_text"])
|
|
552
|
+
print("Corrections:", vhc_result["corrections"])
|
|
553
|
+
else:
|
|
554
|
+
print("No corrections needed or VHC not available")
|
|
555
|
+
```
|
|
556
|
+
|
|
557
|
+
### Async VHC Computation
|
|
558
|
+
|
|
559
|
+
For async applications, use `acompute_vhc()`:
|
|
560
|
+
|
|
561
|
+
```python
|
|
562
|
+
# Async chat
|
|
563
|
+
response = await agent.achat("What was Apple's revenue in 2022?")
|
|
564
|
+
|
|
565
|
+
# Async VHC computation
|
|
566
|
+
vhc_result = await agent.acompute_vhc()
|
|
567
|
+
```
|
|
568
|
+
|
|
569
|
+
### VHC Requirements
|
|
570
|
+
|
|
571
|
+
- VHC requires a valid `VECTARA_API_KEY` environment variable
|
|
572
|
+
- Only VHC-eligible tools (those marked with `vhc_eligible=True`) contribute to the analysis
|
|
573
|
+
- VHC results are cached for each query/response pair to avoid redundant computation
|
|
574
|
+
|
|
535
575
|
### Tool Validation
|
|
536
576
|
|
|
537
577
|
When creating an agent, you can enable tool validation by setting `validate_tools=True`. This will check that any tools mentioned in your custom instructions actually exist in the agent's tool set:
|
|
@@ -745,11 +785,11 @@ agent = Agent(
|
|
|
745
785
|
```
|
|
746
786
|
|
|
747
787
|
The `AgentConfig` object may include the following items:
|
|
748
|
-
- `agent_type`: the agent type. Valid values are `REACT
|
|
788
|
+
- `agent_type`: the agent type. Valid values are `REACT` or `FUNCTION_CALLING` (default: `FUNCTION_CALLING`).
|
|
749
789
|
- `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` (default: `OPENAI`).
|
|
750
790
|
|
|
751
791
|
> **Note:** Fireworks AI support has been removed. If you were using Fireworks, please migrate to one of the supported providers listed above.
|
|
752
|
-
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1, Gemini uses gemini-2.5-flash).
|
|
792
|
+
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1-mini, Gemini uses gemini-2.5-flash).
|
|
753
793
|
- `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
|
|
754
794
|
- `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
|
|
755
795
|
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
- **Rapid Tool Creation:**
|
|
49
49
|
Build Vectara RAG tools or search tools with a single line of code.
|
|
50
50
|
- **Agent Flexibility:**
|
|
51
|
-
Supports multiple agent types including `ReAct
|
|
51
|
+
Supports multiple agent types including `ReAct` and `Function Calling`.
|
|
52
52
|
- **Pre-Built Domain Tools:**
|
|
53
53
|
Tools tailored for finance, legal, and other verticals.
|
|
54
54
|
- **Multi-LLM Integration:**
|
|
@@ -455,6 +455,49 @@ Built-in formatters include `format_as_table`, `format_as_json`, and `format_as_
|
|
|
455
455
|
|
|
456
456
|
The human-readable format, if available, is used when using Vectara Hallucination Correction.
|
|
457
457
|
|
|
458
|
+
## 🔍 Vectara Hallucination Correction (VHC)
|
|
459
|
+
|
|
460
|
+
`vectara-agentic` provides built-in support for Vectara Hallucination Correction (VHC), which analyzes agent responses and corrects any detected hallucinations based on the factual content retrieved by VHC-eligible tools.
|
|
461
|
+
|
|
462
|
+
### Computing VHC
|
|
463
|
+
|
|
464
|
+
After a chat interaction, you can compute VHC to analyze and correct the agent's response:
|
|
465
|
+
|
|
466
|
+
```python
|
|
467
|
+
# Chat with the agent
|
|
468
|
+
response = agent.chat("What was Apple's revenue in 2022?")
|
|
469
|
+
print(response.response)
|
|
470
|
+
|
|
471
|
+
# Compute VHC analysis
|
|
472
|
+
vhc_result = agent.compute_vhc()
|
|
473
|
+
|
|
474
|
+
# Access corrected text and corrections
|
|
475
|
+
if vhc_result["corrected_text"]:
|
|
476
|
+
print("Original:", response.response)
|
|
477
|
+
print("Corrected:", vhc_result["corrected_text"])
|
|
478
|
+
print("Corrections:", vhc_result["corrections"])
|
|
479
|
+
else:
|
|
480
|
+
print("No corrections needed or VHC not available")
|
|
481
|
+
```
|
|
482
|
+
|
|
483
|
+
### Async VHC Computation
|
|
484
|
+
|
|
485
|
+
For async applications, use `acompute_vhc()`:
|
|
486
|
+
|
|
487
|
+
```python
|
|
488
|
+
# Async chat
|
|
489
|
+
response = await agent.achat("What was Apple's revenue in 2022?")
|
|
490
|
+
|
|
491
|
+
# Async VHC computation
|
|
492
|
+
vhc_result = await agent.acompute_vhc()
|
|
493
|
+
```
|
|
494
|
+
|
|
495
|
+
### VHC Requirements
|
|
496
|
+
|
|
497
|
+
- VHC requires a valid `VECTARA_API_KEY` environment variable
|
|
498
|
+
- Only VHC-eligible tools (those marked with `vhc_eligible=True`) contribute to the analysis
|
|
499
|
+
- VHC results are cached for each query/response pair to avoid redundant computation
|
|
500
|
+
|
|
458
501
|
### Tool Validation
|
|
459
502
|
|
|
460
503
|
When creating an agent, you can enable tool validation by setting `validate_tools=True`. This will check that any tools mentioned in your custom instructions actually exist in the agent's tool set:
|
|
@@ -668,11 +711,11 @@ agent = Agent(
|
|
|
668
711
|
```
|
|
669
712
|
|
|
670
713
|
The `AgentConfig` object may include the following items:
|
|
671
|
-
- `agent_type`: the agent type. Valid values are `REACT
|
|
714
|
+
- `agent_type`: the agent type. Valid values are `REACT` or `FUNCTION_CALLING` (default: `FUNCTION_CALLING`).
|
|
672
715
|
- `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` (default: `OPENAI`).
|
|
673
716
|
|
|
674
717
|
> **Note:** Fireworks AI support has been removed. If you were using Fireworks, please migrate to one of the supported providers listed above.
|
|
675
|
-
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1, Gemini uses gemini-2.5-flash).
|
|
718
|
+
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1-mini, Gemini uses gemini-2.5-flash).
|
|
676
719
|
- `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
|
|
677
720
|
- `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
|
|
678
721
|
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
llama-index==0.13.1
|
|
2
|
+
llama-index-core==0.13.1
|
|
3
|
+
llama-index-workflow==1.0.1
|
|
4
|
+
llama-index-cli==0.5.0
|
|
5
|
+
llama-index-indices-managed-vectara==0.5.0
|
|
6
|
+
llama-index-llms-openai==0.5.2
|
|
7
|
+
llama-index-llms-openai-like==0.5.0
|
|
8
|
+
llama-index-llms-anthropic==0.8.2
|
|
9
|
+
llama-index-llms-together==0.4.0
|
|
10
|
+
llama-index-llms-groq==0.4.0
|
|
11
|
+
llama-index-llms-cohere==0.6.0
|
|
12
|
+
llama-index-llms-google-genai==0.3.0
|
|
13
|
+
llama-index-llms-bedrock-converse==0.8.0
|
|
14
|
+
llama-index-tools-yahoo-finance==0.4.0
|
|
15
|
+
llama-index-tools-arxiv==0.4.0
|
|
16
|
+
llama-index-tools-database==0.4.0
|
|
17
|
+
llama-index-tools-google==0.6.0
|
|
18
|
+
llama-index-tools-tavily_research==0.4.0
|
|
19
|
+
llama_index.tools.brave_search==0.4.0
|
|
20
|
+
llama-index-tools-neo4j==0.4.0
|
|
21
|
+
llama-index-tools-waii==0.4.0
|
|
22
|
+
llama-index-graph-stores-kuzu==0.9.0
|
|
23
|
+
llama-index-tools-salesforce==0.4.0
|
|
24
|
+
llama-index-tools-slack==0.4.0
|
|
25
|
+
llama-index-tools-exa==0.4.0
|
|
26
|
+
llama-index-tools-wikipedia==0.4.0
|
|
27
|
+
llama-index-tools-bing-search==0.4.0
|
|
28
|
+
openai>=1.99.3
|
|
29
|
+
tavily-python>=0.7.10
|
|
30
|
+
exa-py>=1.14.20
|
|
31
|
+
openinference-instrumentation-llama-index==4.3.4
|
|
32
|
+
opentelemetry-proto>=1.31.0
|
|
33
|
+
arize-phoenix==10.9.1
|
|
34
|
+
arize-phoenix-otel==0.10.3
|
|
35
|
+
protobuf==5.29.5
|
|
36
|
+
tokenizers>=0.20
|
|
37
|
+
pydantic>=2.11.5
|
|
38
|
+
pandas==2.2.3
|
|
39
|
+
retrying==1.3.4
|
|
40
|
+
python-dotenv==1.0.1
|
|
41
|
+
cloudpickle>=3.1.1
|
|
42
|
+
httpx==0.28.1
|
|
43
|
+
commonmark==0.9.1
|