vectara-agentic 0.2.24__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

@@ -53,7 +53,7 @@ from .agent_config import AgentConfig
53
53
  provider_to_default_model_name = {
54
54
  ModelProvider.OPENAI: "gpt-4.1",
55
55
  ModelProvider.ANTHROPIC: "claude-sonnet-4-20250514",
56
- ModelProvider.TOGETHER: "meta-llama/Llama-4-Scout-17B-16E-Instruct",
56
+ ModelProvider.TOGETHER: "moonshotai/Kimi-K2-Instruct",
57
57
  ModelProvider.GROQ: "deepseek-r1-distill-llama-70b",
58
58
  ModelProvider.FIREWORKS: "accounts/fireworks/models/firefunction-v2",
59
59
  ModelProvider.BEDROCK: "us.anthropic.claude-sonnet-4-20250514-v1:0",
@@ -109,7 +109,7 @@ def get_tokenizer_for_model(
109
109
  try:
110
110
  model_provider, model_name = _get_llm_params_for_role(role, config)
111
111
  if model_provider == ModelProvider.OPENAI:
112
- return tiktoken.encoding_for_model('gpt-4o').encode
112
+ return tiktoken.encoding_for_model("gpt-4o").encode
113
113
  if model_provider == ModelProvider.ANTHROPIC:
114
114
  return Anthropic().tokenizer
115
115
  except Exception:
@@ -124,8 +124,18 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
124
124
  Get the LLM for the specified role, using the provided config
125
125
  or a default if none is provided.
126
126
  """
127
- max_tokens = 8192
128
127
  model_provider, model_name = _get_llm_params_for_role(role, config)
128
+ max_tokens = (
129
+ 16384
130
+ if model_provider
131
+ in [
132
+ ModelProvider.GEMINI,
133
+ ModelProvider.TOGETHER,
134
+ ModelProvider.OPENAI,
135
+ ModelProvider.ANTHROPIC,
136
+ ]
137
+ else 8192
138
+ )
129
139
  if model_provider == ModelProvider.OPENAI:
130
140
  llm = OpenAI(
131
141
  model=model_name,
@@ -143,7 +153,9 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
143
153
  )
144
154
  elif model_provider == ModelProvider.GEMINI:
145
155
  if GoogleGenAI is None:
146
- raise ImportError("google_genai not available. Install with: pip install llama-index-llms-google-genai")
156
+ raise ImportError(
157
+ "google_genai not available. Install with: pip install llama-index-llms-google-genai"
158
+ )
147
159
  llm = GoogleGenAI(
148
160
  model=model_name,
149
161
  temperature=0,
@@ -153,7 +165,9 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
153
165
  )
154
166
  elif model_provider == ModelProvider.TOGETHER:
155
167
  if TogetherLLM is None:
156
- raise ImportError("together not available. Install with: pip install llama-index-llms-together")
168
+ raise ImportError(
169
+ "together not available. Install with: pip install llama-index-llms-together"
170
+ )
157
171
  llm = TogetherLLM(
158
172
  model=model_name,
159
173
  temperature=0,
@@ -162,7 +176,9 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
162
176
  )
163
177
  elif model_provider == ModelProvider.GROQ:
164
178
  if Groq is None:
165
- raise ImportError("groq not available. Install with: pip install llama-index-llms-groq")
179
+ raise ImportError(
180
+ "groq not available. Install with: pip install llama-index-llms-groq"
181
+ )
166
182
  llm = Groq(
167
183
  model=model_name,
168
184
  temperature=0,
@@ -171,11 +187,15 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
171
187
  )
172
188
  elif model_provider == ModelProvider.FIREWORKS:
173
189
  if Fireworks is None:
174
- raise ImportError("fireworks not available. Install with: pip install llama-index-llms-fireworks")
190
+ raise ImportError(
191
+ "fireworks not available. Install with: pip install llama-index-llms-fireworks"
192
+ )
175
193
  llm = Fireworks(model=model_name, temperature=0, max_tokens=max_tokens)
176
194
  elif model_provider == ModelProvider.BEDROCK:
177
195
  if BedrockConverse is None:
178
- raise ImportError("bedrock_converse not available. Install with: pip install llama-index-llms-bedrock")
196
+ raise ImportError(
197
+ "bedrock_converse not available. Install with: pip install llama-index-llms-bedrock"
198
+ )
179
199
  aws_profile_name = os.getenv("AWS_PROFILE", None)
180
200
  aws_region = os.getenv("AWS_REGION", "us-east-2")
181
201
 
@@ -188,11 +208,15 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
188
208
  )
189
209
  elif model_provider == ModelProvider.COHERE:
190
210
  if Cohere is None:
191
- raise ImportError("cohere not available. Install with: pip install llama-index-llms-cohere")
211
+ raise ImportError(
212
+ "cohere not available. Install with: pip install llama-index-llms-cohere"
213
+ )
192
214
  llm = Cohere(model=model_name, temperature=0, max_tokens=max_tokens)
193
215
  elif model_provider == ModelProvider.PRIVATE:
194
216
  if OpenAILike is None:
195
- raise ImportError("openai_like not available. Install with: pip install llama-index-llms-openai-like")
217
+ raise ImportError(
218
+ "openai_like not available. Install with: pip install llama-index-llms-openai-like"
219
+ )
196
220
  llm = OpenAILike(
197
221
  model=model_name,
198
222
  temperature=0,
@@ -6,8 +6,16 @@ import inspect
6
6
  import re
7
7
 
8
8
  from typing import (
9
- Callable, List, Dict, Any, Optional, Union, Type, Tuple,
10
- get_origin, get_args
9
+ Callable,
10
+ List,
11
+ Dict,
12
+ Any,
13
+ Optional,
14
+ Union,
15
+ Type,
16
+ Tuple,
17
+ get_origin,
18
+ get_args,
11
19
  )
12
20
  from pydantic import BaseModel, create_model
13
21
  from pydantic_core import PydanticUndefined
@@ -83,7 +91,7 @@ class VectaraTool(FunctionTool):
83
91
  tool_metadata,
84
92
  callback,
85
93
  async_callback,
86
- partial_params
94
+ partial_params,
87
95
  )
88
96
  vectara_tool = cls(
89
97
  tool_type=tool_type,
@@ -119,7 +127,8 @@ class VectaraTool(FunctionTool):
119
127
  self, *args: Any, ctx: Optional[Context] = None, **kwargs: Any
120
128
  ) -> ToolOutput:
121
129
  try:
122
- return super().call(*args, ctx=ctx, **kwargs)
130
+ result = super().call(*args, ctx=ctx, **kwargs)
131
+ return self._format_tool_output(result)
123
132
  except TypeError as e:
124
133
  sig = inspect.signature(self.metadata.fn_schema)
125
134
  valid_parameters = list(sig.parameters.keys())
@@ -148,7 +157,8 @@ class VectaraTool(FunctionTool):
148
157
  self, *args: Any, ctx: Optional[Context] = None, **kwargs: Any
149
158
  ) -> ToolOutput:
150
159
  try:
151
- return await super().acall(*args, ctx=ctx, **kwargs)
160
+ result = await super().acall(*args, ctx=ctx, **kwargs)
161
+ return self._format_tool_output(result)
152
162
  except TypeError as e:
153
163
  sig = inspect.signature(self.metadata.fn_schema)
154
164
  valid_parameters = list(sig.parameters.keys())
@@ -166,6 +176,7 @@ class VectaraTool(FunctionTool):
166
176
  return err_output
167
177
  except Exception as e:
168
178
  import traceback
179
+
169
180
  err_output = ToolOutput(
170
181
  tool_name=self.metadata.name,
171
182
  content=f"Tool {self.metadata.name} Malfunction: {str(e)}, traceback: {traceback.format_exc()}",
@@ -174,10 +185,39 @@ class VectaraTool(FunctionTool):
174
185
  )
175
186
  return err_output
176
187
 
188
+ def _format_tool_output(self, result: ToolOutput) -> ToolOutput:
189
+ """Format tool output to use human-readable representation if available."""
190
+ if hasattr(result, "content") and _is_human_readable_output(result.content):
191
+ try:
192
+ # Use human-readable format for content, keep raw output
193
+ human_readable_content = result.content.to_human_readable()
194
+ raw_output = result.content.get_raw_output()
195
+ return ToolOutput(
196
+ tool_name=result.tool_name,
197
+ content=human_readable_content,
198
+ raw_input=result.raw_input,
199
+ raw_output=raw_output,
200
+ )
201
+ except Exception as e:
202
+ # If formatting fails, fall back to original content with error info
203
+ import logging
204
+
205
+ logging.warning(
206
+ f"Failed to format tool output for {result.tool_name}: {e}"
207
+ )
208
+ return ToolOutput(
209
+ tool_name=result.tool_name,
210
+ content=f"[Formatting Error] {str(result.content)}",
211
+ raw_input=result.raw_input,
212
+ raw_output={"error": str(e), "original_content": result.content},
213
+ )
214
+ return result
215
+
177
216
 
178
217
  class EmptyBaseModel(BaseModel):
179
218
  """empty base model"""
180
219
 
220
+
181
221
  def _clean_type_repr(type_repr: str) -> str:
182
222
  """Cleans the string representation of a type."""
183
223
  # Replace <class 'somename'> with somename
@@ -188,6 +228,7 @@ def _clean_type_repr(type_repr: str) -> str:
188
228
  type_repr = type_repr.replace("typing.", "")
189
229
  return type_repr
190
230
 
231
+
191
232
  def _format_type(annotation) -> str:
192
233
  """
193
234
  Turn things like Union[int, str, NoneType] into 'int | str | None',
@@ -209,6 +250,7 @@ def _format_type(annotation) -> str:
209
250
  type_repr = _clean_type_repr(type_repr)
210
251
  return type_repr.replace("NoneType", "None")
211
252
 
253
+
212
254
  def _make_docstring(
213
255
  function: Callable[..., ToolOutput],
214
256
  tool_name: str,
@@ -267,11 +309,15 @@ def _make_docstring(
267
309
  ty_info = schema_prop["type"]
268
310
  if isinstance(ty_info, str):
269
311
  ty_str = _clean_type_repr(ty_info)
270
- elif isinstance(ty_info, list): # Handle JSON schema array type e.g., ["integer", "string"]
312
+ elif isinstance(
313
+ ty_info, list
314
+ ): # Handle JSON schema array type e.g., ["integer", "string"]
271
315
  ty_str = " | ".join([_clean_type_repr(t) for t in ty_info])
272
316
 
273
317
  # inline default if present
274
- default_txt = f", default={default!r}" if default is not PydanticUndefined else ""
318
+ default_txt = (
319
+ f", default={default!r}" if default is not PydanticUndefined else ""
320
+ )
275
321
 
276
322
  # inline examples if any
277
323
  if examples:
@@ -288,8 +334,8 @@ def _make_docstring(
288
334
  doc_lines.append(f" dict[str, Any]: {return_desc}")
289
335
 
290
336
  initial_docstring = "\n".join(doc_lines)
291
- collapsed_spaces = re.sub(r' {2,}', ' ', initial_docstring)
292
- final_docstring = re.sub(r'\n{2,}', '\n', collapsed_spaces).strip()
337
+ collapsed_spaces = re.sub(r" {2,}", " ", initial_docstring)
338
+ final_docstring = re.sub(r"\n{2,}", "\n", collapsed_spaces).strip()
293
339
  return final_docstring
294
340
 
295
341
 
@@ -317,13 +363,17 @@ def create_tool_from_dynamic_function(
317
363
  if tool_args_schema is None:
318
364
  tool_args_schema = EmptyBaseModel
319
365
 
320
- if not isinstance(tool_args_schema, type) or not issubclass(tool_args_schema, BaseModel):
366
+ if not isinstance(tool_args_schema, type) or not issubclass(
367
+ tool_args_schema, BaseModel
368
+ ):
321
369
  raise TypeError("tool_args_schema must be a Pydantic BaseModel subclass")
322
370
 
323
371
  fields: Dict[str, Any] = {}
324
372
  base_params = []
325
373
  for field_name, field_info in base_params_model.model_fields.items():
326
- default = Ellipsis if field_info.default is PydanticUndefined else field_info.default
374
+ default = (
375
+ Ellipsis if field_info.default is PydanticUndefined else field_info.default
376
+ )
327
377
  param = inspect.Parameter(
328
378
  field_name,
329
379
  inspect.Parameter.POSITIONAL_OR_KEYWORD,
@@ -338,7 +388,9 @@ def create_tool_from_dynamic_function(
338
388
  if field_name in fields:
339
389
  continue
340
390
 
341
- default = Ellipsis if field_info.default is PydanticUndefined else field_info.default
391
+ default = (
392
+ Ellipsis if field_info.default is PydanticUndefined else field_info.default
393
+ )
342
394
  param = inspect.Parameter(
343
395
  field_name,
344
396
  inspect.Parameter.POSITIONAL_OR_KEYWORD,
@@ -362,9 +414,7 @@ def create_tool_from_dynamic_function(
362
414
  function.__name__ = re.sub(r"[^A-Za-z0-9_]", "_", tool_name)
363
415
 
364
416
  function.__doc__ = _make_docstring(
365
- function,
366
- tool_name, tool_description, fn_schema,
367
- all_params, compact_docstring
417
+ function, tool_name, tool_description, fn_schema, all_params, compact_docstring
368
418
  )
369
419
  tool = VectaraTool.from_defaults(
370
420
  fn=function,
@@ -526,3 +576,115 @@ def build_filter_string(
526
576
  if fixed_filter and joined:
527
577
  return f"({fixed_filter}) AND ({joined})"
528
578
  return fixed_filter or joined
579
+
580
+
581
+ def _is_human_readable_output(obj: Any) -> bool:
582
+ """Check if an object implements the HumanReadableOutput protocol."""
583
+ return (
584
+ hasattr(obj, "to_human_readable")
585
+ and hasattr(obj, "get_raw_output")
586
+ and callable(getattr(obj, "to_human_readable", None))
587
+ and callable(getattr(obj, "get_raw_output", None))
588
+ )
589
+
590
+
591
+ def create_human_readable_output(
592
+ raw_output: Any, formatter: Optional[Callable[[Any], str]] = None
593
+ ) -> "HumanReadableToolOutput":
594
+ """Create a HumanReadableToolOutput wrapper for tool outputs."""
595
+ return HumanReadableToolOutput(raw_output, formatter)
596
+
597
+
598
+ def format_as_table(data: List[Dict[str, Any]], max_width: int = 80) -> str:
599
+ """Format list of dictionaries as a table."""
600
+ if not data:
601
+ return "No data to display"
602
+
603
+ # Get all unique keys
604
+ all_keys = set()
605
+ for item in data:
606
+ all_keys.update(item.keys())
607
+
608
+ headers = list(all_keys)
609
+
610
+ # Calculate column widths
611
+ col_widths = {}
612
+ for header in headers:
613
+ col_widths[header] = max(
614
+ len(header), max(len(str(item.get(header, ""))) for item in data)
615
+ )
616
+ # Limit column width
617
+ col_widths[header] = min(col_widths[header], max_width // len(headers))
618
+
619
+ # Create table
620
+ lines = []
621
+
622
+ # Header row
623
+ header_row = " | ".join(header.ljust(col_widths[header]) for header in headers)
624
+ lines.append(header_row)
625
+ lines.append("-" * len(header_row))
626
+
627
+ # Data rows
628
+ for item in data:
629
+ row = " | ".join(
630
+ str(item.get(header, "")).ljust(col_widths[header])[: col_widths[header]]
631
+ for header in headers
632
+ )
633
+ lines.append(row)
634
+
635
+ return "\n".join(lines)
636
+
637
+
638
+ def format_as_json(data: Any, indent: int = 2) -> str:
639
+ """Format data as pretty-printed JSON."""
640
+ import json
641
+
642
+ try:
643
+ return json.dumps(data, indent=indent, ensure_ascii=False)
644
+ except (TypeError, ValueError):
645
+ return str(data)
646
+
647
+
648
+ def format_as_markdown_list(items: List[Any], numbered: bool = False) -> str:
649
+ """Format items as markdown list."""
650
+ if not items:
651
+ return "No items to display"
652
+
653
+ if numbered:
654
+ return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
655
+ else:
656
+ return "\n".join(f"- {item}" for item in items)
657
+
658
+
659
+ class HumanReadableToolOutput:
660
+ """Wrapper class that implements HumanReadableOutput protocol."""
661
+
662
+ def __init__(
663
+ self, raw_output: Any, formatter: Optional[Callable[[Any], str]] = None
664
+ ):
665
+ self._raw_output = raw_output
666
+ self._formatter = formatter or str
667
+
668
+ def to_human_readable(self) -> str:
669
+ """Convert the output to a human-readable format."""
670
+ try:
671
+ return self._formatter(self._raw_output)
672
+ except Exception as e:
673
+ import logging
674
+
675
+ logging.warning(f"Failed to format output with custom formatter: {e}")
676
+ # Fallback to string representation
677
+ try:
678
+ return str(self._raw_output)
679
+ except Exception:
680
+ return f"[Error formatting output: {e}]"
681
+
682
+ def get_raw_output(self) -> Any:
683
+ """Get the raw output data."""
684
+ return self._raw_output
685
+
686
+ def __str__(self) -> str:
687
+ return self.to_human_readable()
688
+
689
+ def __repr__(self) -> str:
690
+ return f"HumanReadableToolOutput({self._raw_output!r})"