vectara-agentic 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

@@ -0,0 +1,174 @@
1
+ """
2
+ Utilities for the Vectara agentic.
3
+ """
4
+ from types import MethodType
5
+ from typing import Tuple, Callable, Optional
6
+ from functools import lru_cache
7
+ import tiktoken
8
+
9
+ from llama_index.core.llms import LLM
10
+ from llama_index.llms.openai import OpenAI
11
+ from llama_index.llms.anthropic import Anthropic
12
+
13
+ from .types import LLMRole, AgentType, ModelProvider
14
+ from .agent_config import AgentConfig
15
+ from .tool_utils import _updated_openai_prepare_chat_with_tools
16
+
17
+ provider_to_default_model_name = {
18
+ ModelProvider.OPENAI: "gpt-4o",
19
+ ModelProvider.ANTHROPIC: "claude-3-7-sonnet-latest",
20
+ ModelProvider.TOGETHER: "Qwen/Qwen2.5-72B-Instruct-Turbo",
21
+ ModelProvider.GROQ: "meta-llama/llama-4-scout-17b-16e-instruct",
22
+ ModelProvider.FIREWORKS: "accounts/fireworks/models/firefunction-v2",
23
+ ModelProvider.BEDROCK: "anthropic.claude-3-7-sonnet-20250219-v1:0",
24
+ ModelProvider.COHERE: "command-a-03-2025",
25
+ ModelProvider.GEMINI: "models/gemini-2.0-flash",
26
+ }
27
+
28
+ DEFAULT_MODEL_PROVIDER = ModelProvider.OPENAI
29
+
30
+
31
+ @lru_cache(maxsize=None)
32
+ def _get_llm_params_for_role(
33
+ role: LLMRole, config: Optional[AgentConfig] = None
34
+ ) -> Tuple[ModelProvider, str]:
35
+ """
36
+ Get the model provider and model name for the specified role.
37
+
38
+ If config is None, a new AgentConfig() is instantiated using environment defaults.
39
+ """
40
+ config = config or AgentConfig() # fallback to default config
41
+
42
+ if role == LLMRole.TOOL:
43
+ model_provider = ModelProvider(config.tool_llm_provider)
44
+ # If the user hasn’t explicitly set a tool_llm_model_name,
45
+ # fallback to provider default from provider_to_default_model_name
46
+ model_name = config.tool_llm_model_name or provider_to_default_model_name.get(
47
+ model_provider
48
+ )
49
+ else:
50
+ model_provider = ModelProvider(config.main_llm_provider)
51
+ model_name = config.main_llm_model_name or provider_to_default_model_name.get(
52
+ model_provider
53
+ )
54
+
55
+ # If the agent type is OpenAI, check that the main LLM provider is also OpenAI.
56
+ if role == LLMRole.MAIN and config.agent_type == AgentType.OPENAI:
57
+ if model_provider != ModelProvider.OPENAI:
58
+ raise ValueError(
59
+ "OpenAI agent requested but main model provider is not OpenAI."
60
+ )
61
+
62
+ return model_provider, model_name
63
+
64
+
65
+ @lru_cache(maxsize=None)
66
+ def get_tokenizer_for_model(
67
+ role: LLMRole, config: Optional[AgentConfig] = None
68
+ ) -> Optional[Callable]:
69
+ """
70
+ Get the tokenizer for the specified model, as determined by the role & config.
71
+ """
72
+ model_provider, model_name = _get_llm_params_for_role(role, config)
73
+ if model_provider == ModelProvider.OPENAI:
74
+ # This might raise an exception if the model_name is unknown to tiktoken
75
+ return tiktoken.encoding_for_model(model_name).encode
76
+ if model_provider == ModelProvider.ANTHROPIC:
77
+ return Anthropic().tokenizer
78
+ return None
79
+
80
+
81
+ @lru_cache(maxsize=None)
82
+ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
83
+ """
84
+ Get the LLM for the specified role, using the provided config
85
+ or a default if none is provided.
86
+ """
87
+ max_tokens = 8192
88
+ model_provider, model_name = _get_llm_params_for_role(role, config)
89
+ if model_provider == ModelProvider.OPENAI:
90
+ llm = OpenAI(
91
+ model=model_name,
92
+ temperature=0,
93
+ is_function_calling_model=True,
94
+ strict=True,
95
+ max_tokens=max_tokens,
96
+ pydantic_program_mode="openai",
97
+ )
98
+ elif model_provider == ModelProvider.ANTHROPIC:
99
+ llm = Anthropic(
100
+ model=model_name,
101
+ temperature=0,
102
+ max_tokens=max_tokens,
103
+ )
104
+ elif model_provider == ModelProvider.GEMINI:
105
+ from llama_index.llms.google_genai import GoogleGenAI
106
+
107
+ llm = GoogleGenAI(
108
+ model=model_name,
109
+ temperature=0,
110
+ is_function_calling_model=True,
111
+ allow_parallel_tool_calls=True,
112
+ max_tokens=max_tokens,
113
+ )
114
+ elif model_provider == ModelProvider.TOGETHER:
115
+ from llama_index.llms.together import TogetherLLM
116
+
117
+ llm = TogetherLLM(
118
+ model=model_name,
119
+ temperature=0,
120
+ is_function_calling_model=True,
121
+ max_tokens=max_tokens,
122
+ )
123
+ # pylint: disable=protected-access
124
+ llm._prepare_chat_with_tools = MethodType(
125
+ _updated_openai_prepare_chat_with_tools,
126
+ llm,
127
+ )
128
+ elif model_provider == ModelProvider.GROQ:
129
+ from llama_index.llms.groq import Groq
130
+
131
+ llm = Groq(
132
+ model=model_name,
133
+ temperature=0,
134
+ is_function_calling_model=True,
135
+ max_tokens=max_tokens,
136
+ )
137
+ # pylint: disable=protected-access
138
+ llm._prepare_chat_with_tools = MethodType(
139
+ _updated_openai_prepare_chat_with_tools,
140
+ llm,
141
+ )
142
+ elif model_provider == ModelProvider.FIREWORKS:
143
+ from llama_index.llms.fireworks import Fireworks
144
+
145
+ llm = Fireworks(model=model_name, temperature=0, max_tokens=max_tokens)
146
+ elif model_provider == ModelProvider.BEDROCK:
147
+ from llama_index.llms.bedrock import Bedrock
148
+
149
+ llm = Bedrock(model=model_name, temperature=0, max_tokens=max_tokens)
150
+ elif model_provider == ModelProvider.COHERE:
151
+ from llama_index.llms.cohere import Cohere
152
+
153
+ llm = Cohere(model=model_name, temperature=0, max_tokens=max_tokens)
154
+ elif model_provider == ModelProvider.PRIVATE:
155
+ from llama_index.llms.openai_like import OpenAILike
156
+
157
+ llm = OpenAILike(
158
+ model=model_name,
159
+ temperature=0,
160
+ is_function_calling_model=True,
161
+ is_chat_model=True,
162
+ api_base=config.private_llm_api_base,
163
+ api_key=config.private_llm_api_key,
164
+ max_tokens=max_tokens,
165
+ )
166
+ # pylint: disable=protected-access
167
+ llm._prepare_chat_with_tools = MethodType(
168
+ _updated_openai_prepare_chat_with_tools,
169
+ llm,
170
+ )
171
+
172
+ else:
173
+ raise ValueError(f"Unknown LLM provider: {model_provider}")
174
+ return llm