livellm 1.5.1__tar.gz → 1.5.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: livellm
3
- Version: 1.5.1
3
+ Version: 1.5.3
4
4
  Summary: Python client for the LiveLLM Server
5
5
  Project-URL: Homepage, https://github.com/qalby-tech/livellm-client-py
6
6
  Project-URL: Repository, https://github.com/qalby-tech/livellm-client-py
@@ -36,6 +36,7 @@ class BaseLivellmClient(ABC):
36
36
  model: str,
37
37
  messages: list,
38
38
  tools: Optional[list] = None,
39
+ include_history: bool = False,
39
40
  **kwargs
40
41
  ) -> AgentResponse:
41
42
  ...
@@ -53,6 +54,7 @@ class BaseLivellmClient(ABC):
53
54
  model: Optional[str] = None,
54
55
  messages: Optional[list] = None,
55
56
  tools: Optional[list] = None,
57
+ include_history: bool = False,
56
58
  **kwargs
57
59
  ) -> AgentResponse:
58
60
  """
@@ -69,7 +71,8 @@ class BaseLivellmClient(ABC):
69
71
  provider_uid="...",
70
72
  model="gpt-4",
71
73
  messages=[TextMessage(...)],
72
- tools=[]
74
+ tools=[],
75
+ include_history=False
73
76
  )
74
77
 
75
78
  Args:
@@ -79,6 +82,7 @@ class BaseLivellmClient(ABC):
79
82
  messages: List of messages
80
83
  tools: Optional list of tools
81
84
  gen_config: Optional generation configuration
85
+ include_history: Whether to include full conversation history in the response
82
86
 
83
87
  Returns:
84
88
  AgentResponse with the agent's output
@@ -103,7 +107,8 @@ class BaseLivellmClient(ABC):
103
107
  model=model,
104
108
  messages=messages,
105
109
  tools=tools or [],
106
- gen_config=kwargs or None
110
+ gen_config=kwargs or None,
111
+ include_history=include_history
107
112
  )
108
113
  return await self.handle_agent_run(agent_request)
109
114
 
@@ -122,6 +127,7 @@ class BaseLivellmClient(ABC):
122
127
  model: str,
123
128
  messages: list,
124
129
  tools: Optional[list] = None,
130
+ include_history: bool = False,
125
131
  **kwargs
126
132
  ) -> AsyncIterator[AgentResponse]:
127
133
  ...
@@ -139,6 +145,7 @@ class BaseLivellmClient(ABC):
139
145
  model: Optional[str] = None,
140
146
  messages: Optional[list] = None,
141
147
  tools: Optional[list] = None,
148
+ include_history: bool = False,
142
149
  **kwargs
143
150
  ) -> AsyncIterator[AgentResponse]:
144
151
  """
@@ -157,7 +164,8 @@ class BaseLivellmClient(ABC):
157
164
  provider_uid="...",
158
165
  model="gpt-4",
159
166
  messages=[TextMessage(...)],
160
- tools=[]
167
+ tools=[],
168
+ include_history=False
161
169
  ):
162
170
  ...
163
171
 
@@ -168,6 +176,7 @@ class BaseLivellmClient(ABC):
168
176
  messages: List of messages
169
177
  tools: Optional list of tools
170
178
  gen_config: Optional generation configuration
179
+ include_history: Whether to include full conversation history in the response
171
180
 
172
181
  Returns:
173
182
  AsyncIterator of AgentResponse chunks
@@ -192,7 +201,8 @@ class BaseLivellmClient(ABC):
192
201
  model=model,
193
202
  messages=messages,
194
203
  tools=tools or [],
195
- gen_config=kwargs or None
204
+ gen_config=kwargs or None,
205
+ include_history=include_history
196
206
  )
197
207
  stream = self.handle_agent_run_stream(agent_request)
198
208
 
@@ -1,7 +1,7 @@
1
1
  # models for chat messages
2
2
  from pydantic import BaseModel, Field, model_validator, field_serializer
3
3
  from enum import Enum
4
- from typing import Optional, Union
4
+ from typing import Optional, Union, Any, Dict
5
5
 
6
6
  class MessageRole(str, Enum):
7
7
  USER = "user"
@@ -32,10 +32,10 @@ class BinaryMessage(Message):
32
32
  class ToolCallMessage(Message):
33
33
  """Message representing a tool call made by the agent"""
34
34
  tool_name: str = Field(..., description="The name of the tool being called")
35
- args: dict = Field(..., description="The arguments passed to the tool")
35
+ args: Union[Dict[str, Any], str] = Field(..., description="The arguments passed to the tool")
36
36
 
37
37
  class ToolReturnMessage(Message):
38
38
  """Message representing the return value from a tool call"""
39
39
  tool_name: str = Field(..., description="The name of the tool that was called")
40
- content: str = Field(..., description="The return value from the tool")
40
+ content: Any = Field(..., description="The return value from the tool")
41
41
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "livellm"
3
- version = "1.5.1"
3
+ version = "1.5.3"
4
4
  description = "Python client for the LiveLLM Server"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes