langroid 0.6.7__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +499 -55
- langroid/agent/callbacks/chainlit.py +1 -1
- langroid/agent/chat_agent.py +191 -37
- langroid/agent/chat_document.py +142 -29
- langroid/agent/openai_assistant.py +20 -4
- langroid/agent/special/lance_doc_chat_agent.py +25 -18
- langroid/agent/special/lance_rag/critic_agent.py +37 -5
- langroid/agent/special/lance_rag/query_planner_agent.py +102 -63
- langroid/agent/special/lance_tools.py +10 -2
- langroid/agent/special/sql/sql_chat_agent.py +69 -13
- langroid/agent/task.py +179 -43
- langroid/agent/tool_message.py +19 -7
- langroid/agent/tools/__init__.py +5 -0
- langroid/agent/tools/orchestration.py +216 -0
- langroid/agent/tools/recipient_tool.py +6 -11
- langroid/agent/tools/rewind_tool.py +1 -1
- langroid/agent/typed_task.py +19 -0
- langroid/language_models/.chainlit/config.toml +121 -0
- langroid/language_models/.chainlit/translations/en-US.json +231 -0
- langroid/language_models/base.py +114 -12
- langroid/language_models/mock_lm.py +10 -1
- langroid/language_models/openai_gpt.py +260 -36
- langroid/mytypes.py +0 -1
- langroid/parsing/parse_json.py +19 -2
- langroid/utils/pydantic_utils.py +19 -0
- langroid/vector_store/base.py +3 -1
- langroid/vector_store/lancedb.py +2 -0
- {langroid-0.6.7.dist-info → langroid-0.9.0.dist-info}/METADATA +4 -1
- {langroid-0.6.7.dist-info → langroid-0.9.0.dist-info}/RECORD +32 -33
- pyproject.toml +2 -1
- langroid/agent/special/lance_rag_new/__init__.py +0 -9
- langroid/agent/special/lance_rag_new/critic_agent.py +0 -171
- langroid/agent/special/lance_rag_new/lance_rag_task.py +0 -144
- langroid/agent/special/lance_rag_new/query_planner_agent.py +0 -222
- langroid/agent/team.py +0 -1758
- {langroid-0.6.7.dist-info → langroid-0.9.0.dist-info}/LICENSE +0 -0
- {langroid-0.6.7.dist-info → langroid-0.9.0.dist-info}/WHEEL +0 -0
langroid/language_models/base.py
CHANGED
@@ -9,6 +9,7 @@ from typing import (
|
|
9
9
|
Callable,
|
10
10
|
Dict,
|
11
11
|
List,
|
12
|
+
Literal,
|
12
13
|
Optional,
|
13
14
|
Tuple,
|
14
15
|
Type,
|
@@ -32,6 +33,11 @@ def noop_fn(*args: List[Any], **kwargs: Dict[str, Any]) -> None:
|
|
32
33
|
pass
|
33
34
|
|
34
35
|
|
36
|
+
FunctionCallTypes = Literal["none", "auto"]
|
37
|
+
ToolChoiceTypes = Literal["none", "auto", "required"]
|
38
|
+
ToolTypes = Literal["function"]
|
39
|
+
|
40
|
+
|
35
41
|
class LLMConfig(BaseSettings):
|
36
42
|
type: str = "openai"
|
37
43
|
streamer: Optional[Callable[[Any], None]] = noop_fn
|
@@ -60,7 +66,7 @@ class LLMConfig(BaseSettings):
|
|
60
66
|
|
61
67
|
class LLMFunctionCall(BaseModel):
|
62
68
|
"""
|
63
|
-
Structure of LLM response
|
69
|
+
Structure of LLM response indicating it "wants" to call a function.
|
64
70
|
Modeled after OpenAI spec for `function_call` field in ChatCompletion API.
|
65
71
|
"""
|
66
72
|
|
@@ -103,6 +109,45 @@ class LLMFunctionSpec(BaseModel):
|
|
103
109
|
parameters: Dict[str, Any]
|
104
110
|
|
105
111
|
|
112
|
+
class OpenAIToolCall(BaseModel):
|
113
|
+
"""
|
114
|
+
Represents a single tool call in a list of tool calls generated by OpenAI LLM API.
|
115
|
+
See https://platform.openai.com/docs/api-reference/chat/create
|
116
|
+
|
117
|
+
Attributes:
|
118
|
+
id: The id of the tool call.
|
119
|
+
type: The type of the tool call;
|
120
|
+
only "function" is currently possible (7/26/24).
|
121
|
+
function: The function call.
|
122
|
+
"""
|
123
|
+
|
124
|
+
id: str | None = None
|
125
|
+
type: ToolTypes = "function"
|
126
|
+
function: LLMFunctionCall | None = None
|
127
|
+
|
128
|
+
@staticmethod
|
129
|
+
def from_dict(message: Dict[str, Any]) -> "OpenAIToolCall":
|
130
|
+
"""
|
131
|
+
Initialize from dictionary.
|
132
|
+
Args:
|
133
|
+
d: dictionary containing fields to initialize
|
134
|
+
"""
|
135
|
+
id = message["id"]
|
136
|
+
type = message["type"]
|
137
|
+
function = LLMFunctionCall.from_dict(message["function"])
|
138
|
+
return OpenAIToolCall(id=id, type=type, function=function)
|
139
|
+
|
140
|
+
def __str__(self) -> str:
|
141
|
+
if self.function is None:
|
142
|
+
return ""
|
143
|
+
return "OAI-TOOL: " + json.dumps(self.function.dict(), indent=2)
|
144
|
+
|
145
|
+
|
146
|
+
class OpenAIToolSpec(BaseModel):
|
147
|
+
type: ToolTypes
|
148
|
+
function: LLMFunctionSpec
|
149
|
+
|
150
|
+
|
106
151
|
class LLMTokenUsage(BaseModel):
|
107
152
|
prompt_tokens: int = 0
|
108
153
|
completion_tokens: int = 0
|
@@ -132,18 +177,26 @@ class Role(str, Enum):
|
|
132
177
|
SYSTEM = "system"
|
133
178
|
ASSISTANT = "assistant"
|
134
179
|
FUNCTION = "function"
|
180
|
+
TOOL = "tool"
|
135
181
|
|
136
182
|
|
137
183
|
class LLMMessage(BaseModel):
|
138
184
|
"""
|
139
|
-
Class representing
|
185
|
+
Class representing an entry in the msg-history sent to the LLM API.
|
186
|
+
It could be one of these:
|
187
|
+
- a user message
|
188
|
+
- an LLM ("Assistant") response
|
189
|
+
- a fn-call or tool-call-list from an OpenAI-compatible LLM API response
|
190
|
+
- a result or results from executing a fn or tool-call(s)
|
140
191
|
"""
|
141
192
|
|
142
193
|
role: Role
|
143
194
|
name: Optional[str] = None
|
195
|
+
tool_call_id: Optional[str] = None # which OpenAI LLM tool this is a response to
|
144
196
|
tool_id: str = "" # used by OpenAIAssistant
|
145
197
|
content: str
|
146
198
|
function_call: Optional[LLMFunctionCall] = None
|
199
|
+
tool_calls: Optional[List[OpenAIToolCall]] = None
|
147
200
|
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
148
201
|
# link to corresponding chat document, for provenance/rewind purposes
|
149
202
|
chat_document_id: str = ""
|
@@ -169,6 +222,14 @@ class LLMMessage(BaseModel):
|
|
169
222
|
dict_no_none["function_call"]["arguments"] = json.dumps(
|
170
223
|
dict_no_none["function_call"]["arguments"]
|
171
224
|
)
|
225
|
+
if "tool_calls" in dict_no_none:
|
226
|
+
# convert tool calls to API format
|
227
|
+
for tc in dict_no_none["tool_calls"]:
|
228
|
+
if "arguments" in tc["function"]:
|
229
|
+
# arguments must be a string
|
230
|
+
tc["function"]["arguments"] = json.dumps(
|
231
|
+
tc["function"]["arguments"]
|
232
|
+
)
|
172
233
|
# IMPORTANT! drop fields that are not expected in API call
|
173
234
|
dict_no_none.pop("tool_id", None)
|
174
235
|
dict_no_none.pop("timestamp", None)
|
@@ -190,7 +251,9 @@ class LLMResponse(BaseModel):
|
|
190
251
|
"""
|
191
252
|
|
192
253
|
message: str
|
254
|
+
# TODO tool_id needs to generalize to multi-tool calls
|
193
255
|
tool_id: str = "" # used by OpenAIAssistant
|
256
|
+
oai_tool_calls: Optional[List[OpenAIToolCall]] = None
|
194
257
|
function_call: Optional[LLMFunctionCall] = None
|
195
258
|
usage: Optional[LLMTokenUsage] = None
|
196
259
|
cached: bool = False
|
@@ -198,18 +261,28 @@ class LLMResponse(BaseModel):
|
|
198
261
|
def __str__(self) -> str:
|
199
262
|
if self.function_call is not None:
|
200
263
|
return str(self.function_call)
|
264
|
+
elif self.oai_tool_calls:
|
265
|
+
return "\n".join(str(tc) for tc in self.oai_tool_calls)
|
201
266
|
else:
|
202
267
|
return self.message
|
203
268
|
|
204
269
|
def to_LLMMessage(self) -> LLMMessage:
|
205
|
-
|
206
|
-
|
207
|
-
|
270
|
+
"""Convert LLM response to an LLMMessage, to be included in the
|
271
|
+
message-list sent to the API.
|
272
|
+
This is currently NOT used in any significant way in the library, and is only
|
273
|
+
provided as a utility to construct a message list for the API when directly
|
274
|
+
working with an LLM object.
|
275
|
+
|
276
|
+
In a `ChatAgent`, an LLM response is first converted to a ChatDocument,
|
277
|
+
which is in turn converted to an LLMMessage via `ChatDocument.to_LLMMessage()`
|
278
|
+
See `ChatAgent._prep_llm_messages()` and `ChatAgent.llm_response_messages`
|
279
|
+
"""
|
208
280
|
return LLMMessage(
|
209
|
-
role=
|
210
|
-
content=
|
211
|
-
name=name,
|
281
|
+
role=Role.ASSISTANT,
|
282
|
+
content=self.message,
|
283
|
+
name=None if self.function_call is None else self.function_call.name,
|
212
284
|
function_call=self.function_call,
|
285
|
+
tool_calls=self.oai_tool_calls,
|
213
286
|
)
|
214
287
|
|
215
288
|
def get_recipient_and_message(
|
@@ -221,8 +294,9 @@ class LLMResponse(BaseModel):
|
|
221
294
|
of the recipient name if specified.
|
222
295
|
|
223
296
|
Two cases:
|
224
|
-
(a) `message` contains "TO: <name> <content>", or
|
225
|
-
(b) `message` is empty and
|
297
|
+
(a) `message` contains addressing string "TO: <name> <content>", or
|
298
|
+
(b) `message` is empty and function_call/tool_call with explicit `recipient`
|
299
|
+
|
226
300
|
|
227
301
|
Returns:
|
228
302
|
(str): name of recipient, which may be empty string if no recipient
|
@@ -240,8 +314,17 @@ class LLMResponse(BaseModel):
|
|
240
314
|
return recipient, msg
|
241
315
|
else:
|
242
316
|
msg = self.message
|
243
|
-
|
244
|
-
|
317
|
+
if self.oai_tool_calls is not None:
|
318
|
+
# get the first tool that has a recipient field, if any
|
319
|
+
for tc in self.oai_tool_calls:
|
320
|
+
if tc.function is not None and tc.function.arguments is not None:
|
321
|
+
recipient = tc.function.arguments.get(
|
322
|
+
"recipient"
|
323
|
+
) # type: ignore
|
324
|
+
if recipient is not None and recipient != "":
|
325
|
+
return recipient, ""
|
326
|
+
|
327
|
+
# It's not a function or tool call, so continue looking to see
|
245
328
|
# if a recipient is specified in the message.
|
246
329
|
|
247
330
|
# First check if message contains "TO: <recipient> <content>"
|
@@ -396,9 +479,25 @@ class LanguageModel(ABC):
|
|
396
479
|
self,
|
397
480
|
messages: Union[str, List[LLMMessage]],
|
398
481
|
max_tokens: int = 200,
|
482
|
+
tools: Optional[List[OpenAIToolSpec]] = None,
|
483
|
+
tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
|
399
484
|
functions: Optional[List[LLMFunctionSpec]] = None,
|
400
485
|
function_call: str | Dict[str, str] = "auto",
|
401
486
|
) -> LLMResponse:
|
487
|
+
"""
|
488
|
+
Get chat-completion response from LLM.
|
489
|
+
|
490
|
+
Args:
|
491
|
+
messages: message-history to send to the LLM
|
492
|
+
max_tokens: max tokens to generate
|
493
|
+
tools: tools available for the LLM to use in its response
|
494
|
+
tool_choice: tool call mode, one of "none", "auto", "required",
|
495
|
+
or a dict specifying a specific tool.
|
496
|
+
functions: functions available for LLM to call (deprecated)
|
497
|
+
function_call: function calling mode, "auto", "none", or a specific fn
|
498
|
+
(deprecated)
|
499
|
+
"""
|
500
|
+
|
402
501
|
pass
|
403
502
|
|
404
503
|
@abstractmethod
|
@@ -406,9 +505,12 @@ class LanguageModel(ABC):
|
|
406
505
|
self,
|
407
506
|
messages: Union[str, List[LLMMessage]],
|
408
507
|
max_tokens: int = 200,
|
508
|
+
tools: Optional[List[OpenAIToolSpec]] = None,
|
509
|
+
tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
|
409
510
|
functions: Optional[List[LLMFunctionSpec]] = None,
|
410
511
|
function_call: str | Dict[str, str] = "auto",
|
411
512
|
) -> LLMResponse:
|
513
|
+
"""Async version of `chat`. See `chat` for details."""
|
412
514
|
pass
|
413
515
|
|
414
516
|
def __call__(self, prompt: str, max_tokens: int) -> LLMResponse:
|
@@ -4,7 +4,12 @@ from typing import Callable, Dict, List, Optional, Union
|
|
4
4
|
|
5
5
|
import langroid.language_models as lm
|
6
6
|
from langroid.language_models import LLMResponse
|
7
|
-
from langroid.language_models.base import
|
7
|
+
from langroid.language_models.base import (
|
8
|
+
LanguageModel,
|
9
|
+
LLMConfig,
|
10
|
+
OpenAIToolSpec,
|
11
|
+
ToolChoiceTypes,
|
12
|
+
)
|
8
13
|
|
9
14
|
|
10
15
|
def none_fn(x: str) -> None | str:
|
@@ -50,6 +55,8 @@ class MockLM(LanguageModel):
|
|
50
55
|
self,
|
51
56
|
messages: Union[str, List[lm.LLMMessage]],
|
52
57
|
max_tokens: int = 200,
|
58
|
+
tools: Optional[List[OpenAIToolSpec]] = None,
|
59
|
+
tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
|
53
60
|
functions: Optional[List[lm.LLMFunctionSpec]] = None,
|
54
61
|
function_call: str | Dict[str, str] = "auto",
|
55
62
|
) -> lm.LLMResponse:
|
@@ -63,6 +70,8 @@ class MockLM(LanguageModel):
|
|
63
70
|
self,
|
64
71
|
messages: Union[str, List[lm.LLMMessage]],
|
65
72
|
max_tokens: int = 200,
|
73
|
+
tools: Optional[List[OpenAIToolSpec]] = None,
|
74
|
+
tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
|
66
75
|
functions: Optional[List[lm.LLMFunctionSpec]] = None,
|
67
76
|
function_call: str | Dict[str, str] = "auto",
|
68
77
|
) -> lm.LLMResponse:
|