llama-index-llms-openai 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -64,6 +64,7 @@ from llama_index.llms.openai.utils import (
64
64
  resolve_openai_credentials,
65
65
  to_openai_message_dicts,
66
66
  resolve_tool_choice,
67
+ update_tool_calls,
67
68
  )
68
69
  from llama_index.core.bridge.pydantic import (
69
70
  BaseModel,
@@ -450,53 +451,6 @@ class OpenAI(FunctionCallingLLM):
450
451
  additional_kwargs=self._get_response_token_counts(response),
451
452
  )
452
453
 
453
- def _update_tool_calls(
454
- self,
455
- tool_calls: List[ChoiceDeltaToolCall],
456
- tool_calls_delta: Optional[List[ChoiceDeltaToolCall]],
457
- ) -> List[ChoiceDeltaToolCall]:
458
- """
459
- Use the tool_calls_delta objects received from openai stream chunks
460
- to update the running tool_calls object.
461
-
462
- Args:
463
- tool_calls (List[ChoiceDeltaToolCall]): the list of tool calls
464
- tool_calls_delta (ChoiceDeltaToolCall): the delta to update tool_calls
465
-
466
- Returns:
467
- List[ChoiceDeltaToolCall]: the updated tool calls
468
- """
469
- # openai provides chunks consisting of tool_call deltas one tool at a time
470
- if tool_calls_delta is None:
471
- return tool_calls
472
-
473
- tc_delta = tool_calls_delta[0]
474
-
475
- if len(tool_calls) == 0:
476
- tool_calls.append(tc_delta)
477
- else:
478
- # we need to either update latest tool_call or start a
479
- # new tool_call (i.e., multiple tools in this turn) and
480
- # accumulate that new tool_call with future delta chunks
481
- t = tool_calls[-1]
482
- if t.index != tc_delta.index:
483
- # the start of a new tool call, so append to our running tool_calls list
484
- tool_calls.append(tc_delta)
485
- else:
486
- # not the start of a new tool call, so update last item of tool_calls
487
-
488
- # validations to get passed by mypy
489
- assert t.function is not None
490
- assert tc_delta.function is not None
491
- assert t.function.arguments is not None
492
- assert t.function.name is not None
493
- assert t.id is not None
494
-
495
- t.function.arguments += tc_delta.function.arguments or ""
496
- t.function.name += tc_delta.function.name or ""
497
- t.id += tc_delta.id or ""
498
- return tool_calls
499
-
500
454
  @llm_retry_decorator
501
455
  def _stream_chat(
502
456
  self, messages: Sequence[ChatMessage], **kwargs: Any
@@ -533,7 +487,7 @@ class OpenAI(FunctionCallingLLM):
533
487
 
534
488
  additional_kwargs = {}
535
489
  if is_function:
536
- tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls)
490
+ tool_calls = update_tool_calls(tool_calls, delta.tool_calls)
537
491
  additional_kwargs["tool_calls"] = tool_calls
538
492
 
539
493
  yield ChatResponse(
@@ -783,7 +737,7 @@ class OpenAI(FunctionCallingLLM):
783
737
 
784
738
  additional_kwargs = {}
785
739
  if is_function:
786
- tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls)
740
+ tool_calls = update_tool_calls(tool_calls, delta.tool_calls)
787
741
  additional_kwargs["tool_calls"] = tool_calls
788
742
 
789
743
  yield ChatResponse(
@@ -67,6 +67,7 @@ GPT4_MODELS: Dict[str, int] = {
67
67
 
68
68
  AZURE_TURBO_MODELS: Dict[str, int] = {
69
69
  "gpt-4o": 128000,
70
+ "gpt-4o-mini": 128000,
70
71
  "gpt-35-turbo-16k": 16384,
71
72
  "gpt-35-turbo": 4096,
72
73
  # 0125 (2024) model (JSON mode)
@@ -439,3 +440,50 @@ def resolve_tool_choice(tool_choice: Union[str, dict] = "auto") -> Union[str, di
439
440
  return {"type": "function", "function": {"name": tool_choice}}
440
441
 
441
442
  return tool_choice
443
+
444
+
445
+ def update_tool_calls(
446
+ tool_calls: List[ChoiceDeltaToolCall],
447
+ tool_calls_delta: Optional[List[ChoiceDeltaToolCall]],
448
+ ) -> List[ChoiceDeltaToolCall]:
449
+ """
450
+ Use the tool_calls_delta objects received from openai stream chunks
451
+ to update the running tool_calls object.
452
+
453
+ Args:
454
+ tool_calls (List[ChoiceDeltaToolCall]): the list of tool calls
455
+ tool_calls_delta (ChoiceDeltaToolCall): the delta to update tool_calls
456
+
457
+ Returns:
458
+ List[ChoiceDeltaToolCall]: the updated tool calls
459
+ """
460
+ # openai provides chunks consisting of tool_call deltas one tool at a time
461
+ if tool_calls_delta is None:
462
+ return tool_calls
463
+
464
+ tc_delta = tool_calls_delta[0]
465
+
466
+ if len(tool_calls) == 0:
467
+ tool_calls.append(tc_delta)
468
+ else:
469
+ # we need to either update latest tool_call or start a
470
+ # new tool_call (i.e., multiple tools in this turn) and
471
+ # accumulate that new tool_call with future delta chunks
472
+ t = tool_calls[-1]
473
+ if t.index != tc_delta.index:
474
+ # the start of a new tool call, so append to our running tool_calls list
475
+ tool_calls.append(tc_delta)
476
+ else:
477
+ # not the start of a new tool call, so update last item of tool_calls
478
+
479
+ # validations to get passed by mypy
480
+ assert t.function is not None
481
+ assert tc_delta.function is not None
482
+ assert t.function.arguments is not None
483
+ assert t.function.name is not None
484
+ assert t.id is not None
485
+
486
+ t.function.arguments += tc_delta.function.arguments or ""
487
+ t.function.name += tc_delta.function.name or ""
488
+ t.id += tc_delta.id or ""
489
+ return tool_calls
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-llms-openai
3
- Version: 0.2.9
3
+ Version: 0.2.11
4
4
  Summary: llama-index llms openai integration
5
5
  License: MIT
6
6
  Author: llama-index
@@ -0,0 +1,6 @@
1
+ llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
2
+ llama_index/llms/openai/base.py,sha256=qSjaNN3O8mxDoo_ViKg1j9DuDb8jOT70epu4Xp2NiQg,35119
3
+ llama_index/llms/openai/utils.py,sha256=QliTQj2fVRz-zwg5rYVltLKRAgab29dIfpSEvo3Df9k,15771
4
+ llama_index_llms_openai-0.2.11.dist-info/METADATA,sha256=mWoW8A4K0t9hV2kyt_vY_7zkploC8vhCRqDCtELBimI,649
5
+ llama_index_llms_openai-0.2.11.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
6
+ llama_index_llms_openai-0.2.11.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
2
- llama_index/llms/openai/base.py,sha256=h3jab2lijnvWRFwx2M7dlWsY48JBMmehDsU55ebjHBE,36997
3
- llama_index/llms/openai/utils.py,sha256=zduRqlmGGNy7c1uoDIdDcExD47Myq9WpczVTvRid2cI,14025
4
- llama_index_llms_openai-0.2.9.dist-info/METADATA,sha256=SzGCTND1cCtoBjU3qmCE5k_qhvV105uQISfoMmmj0Pw,648
5
- llama_index_llms_openai-0.2.9.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
6
- llama_index_llms_openai-0.2.9.dist-info/RECORD,,