langroid 0.6.7__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,121 @@
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Show the prompt playground
23
+ prompt_playground = true
24
+
25
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
26
+ unsafe_allow_html = false
27
+
28
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
29
+ latex = false
30
+
31
+ # Automatically tag threads with the current chat profile (if a chat profile is used)
32
+ auto_tag_thread = true
33
+
34
+ # Authorize users to spontaneously upload files with messages
35
+ [features.spontaneous_file_upload]
36
+ enabled = true
37
+ accept = ["*/*"]
38
+ max_files = 20
39
+ max_size_mb = 500
40
+
41
+ [features.audio]
42
+ # Threshold for audio recording
43
+ min_decibels = -45
44
+ # Delay for the user to start speaking in MS
45
+ initial_silence_timeout = 3000
46
+ # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
47
+ silence_timeout = 1500
48
+ # Above this duration (MS), the recording will forcefully stop.
49
+ max_duration = 15000
50
+ # Duration of the audio chunks in MS
51
+ chunk_duration = 1000
52
+ # Sample rate of the audio
53
+ sample_rate = 44100
54
+
55
+ [UI]
56
+ # Name of the app and chatbot.
57
+ name = "Chatbot"
58
+
59
+ # Show the readme while the thread is empty.
60
+ show_readme_as_default = true
61
+
62
+ # Description of the app and chatbot. This is used for HTML tags.
63
+ # description = ""
64
+
65
+ # Large size content are by default collapsed for a cleaner ui
66
+ default_collapse_content = true
67
+
68
+ # The default value for the expand messages settings.
69
+ default_expand_messages = false
70
+
71
+ # Hide the chain of thought details from the user in the UI.
72
+ hide_cot = false
73
+
74
+ # Link to your github repo. This will add a github button in the UI's header.
75
+ # github = ""
76
+
77
+ # Specify a CSS file that can be used to customize the user interface.
78
+ # The CSS file can be served from the public directory or via an external link.
79
+ # custom_css = "/public/test.css"
80
+
81
+ # Specify a Javascript file that can be used to customize the user interface.
82
+ # The Javascript file can be served from the public directory.
83
+ # custom_js = "/public/test.js"
84
+
85
+ # Specify a custom font url.
86
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
87
+
88
+ # Specify a custom meta image url.
89
+ # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
90
+
91
+ # Specify a custom build directory for the frontend.
92
+ # This can be used to customize the frontend code.
93
+ # Be careful: If this is a relative path, it should not start with a slash.
94
+ # custom_build = "./public/build"
95
+
96
+ [UI.theme]
97
+ #layout = "wide"
98
+ #font_family = "Inter, sans-serif"
99
+ # Override default MUI light theme. (Check theme.ts)
100
+ [UI.theme.light]
101
+ #background = "#FAFAFA"
102
+ #paper = "#FFFFFF"
103
+
104
+ [UI.theme.light.primary]
105
+ #main = "#F80061"
106
+ #dark = "#980039"
107
+ #light = "#FFE7EB"
108
+
109
+ # Override default MUI dark theme. (Check theme.ts)
110
+ [UI.theme.dark]
111
+ #background = "#FAFAFA"
112
+ #paper = "#FFFFFF"
113
+
114
+ [UI.theme.dark.primary]
115
+ #main = "#F80061"
116
+ #dark = "#980039"
117
+ #light = "#FFE7EB"
118
+
119
+
120
+ [meta]
121
+ generated_by = "1.1.202"
@@ -0,0 +1,231 @@
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Settings",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API Keys",
10
+ "logout": "Logout"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "New Chat"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Task List",
22
+ "loading": "Loading...",
23
+ "error": "An error occured"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancel upload",
28
+ "removeAttachment": "Remove attachment"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Create new chat?",
32
+ "clearChat": "This will clear the current messages and start a new chat.",
33
+ "cancel": "Cancel",
34
+ "confirm": "Confirm"
35
+ },
36
+ "settingsModal": {
37
+ "settings": "Settings",
38
+ "expandMessages": "Expand Messages",
39
+ "hideChainOfThought": "Hide Chain of Thought",
40
+ "darkMode": "Dark Mode"
41
+ },
42
+ "detailsButton": {
43
+ "using": "Using",
44
+ "running": "Running",
45
+ "took_one": "Took {{count}} step",
46
+ "took_other": "Took {{count}} steps"
47
+ },
48
+ "auth": {
49
+ "authLogin": {
50
+ "title": "Login to access the app.",
51
+ "form": {
52
+ "email": "Email address",
53
+ "password": "Password",
54
+ "noAccount": "Don't have an account?",
55
+ "alreadyHaveAccount": "Already have an account?",
56
+ "signup": "Sign Up",
57
+ "signin": "Sign In",
58
+ "or": "OR",
59
+ "continue": "Continue",
60
+ "forgotPassword": "Forgot password?",
61
+ "passwordMustContain": "Your password must contain:",
62
+ "emailRequired": "email is a required field",
63
+ "passwordRequired": "password is a required field"
64
+ },
65
+ "error": {
66
+ "default": "Unable to sign in.",
67
+ "signin": "Try signing in with a different account.",
68
+ "oauthsignin": "Try signing in with a different account.",
69
+ "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
70
+ "oauthcallbackerror": "Try signing in with a different account.",
71
+ "oauthcreateaccount": "Try signing in with a different account.",
72
+ "emailcreateaccount": "Try signing in with a different account.",
73
+ "callback": "Try signing in with a different account.",
74
+ "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
75
+ "emailsignin": "The e-mail could not be sent.",
76
+ "emailverify": "Please verify your email, a new email has been sent.",
77
+ "credentialssignin": "Sign in failed. Check the details you provided are correct.",
78
+ "sessionrequired": "Please sign in to access this page."
79
+ }
80
+ },
81
+ "authVerifyEmail": {
82
+ "almostThere": "You're almost there! We've sent an email to ",
83
+ "verifyEmailLink": "Please click on the link in that email to complete your signup.",
84
+ "didNotReceive": "Can't find the email?",
85
+ "resendEmail": "Resend email",
86
+ "goBack": "Go Back",
87
+ "emailSent": "Email sent successfully.",
88
+ "verifyEmail": "Verify your email address"
89
+ },
90
+ "providerButton": {
91
+ "continue": "Continue with {{provider}}",
92
+ "signup": "Sign up with {{provider}}"
93
+ },
94
+ "authResetPassword": {
95
+ "newPasswordRequired": "New password is a required field",
96
+ "passwordsMustMatch": "Passwords must match",
97
+ "confirmPasswordRequired": "Confirm password is a required field",
98
+ "newPassword": "New password",
99
+ "confirmPassword": "Confirm password",
100
+ "resetPassword": "Reset Password"
101
+ },
102
+ "authForgotPassword": {
103
+ "email": "Email address",
104
+ "emailRequired": "email is a required field",
105
+ "emailSent": "Please check the email address {{email}} for instructions to reset your password.",
106
+ "enterEmail": "Enter your email address and we will send you instructions to reset your password.",
107
+ "resendEmail": "Resend email",
108
+ "continue": "Continue",
109
+ "goBack": "Go Back"
110
+ }
111
+ }
112
+ },
113
+ "organisms": {
114
+ "chat": {
115
+ "history": {
116
+ "index": {
117
+ "showHistory": "Show history",
118
+ "lastInputs": "Last Inputs",
119
+ "noInputs": "Such empty...",
120
+ "loading": "Loading..."
121
+ }
122
+ },
123
+ "inputBox": {
124
+ "input": {
125
+ "placeholder": "Type your message here..."
126
+ },
127
+ "speechButton": {
128
+ "start": "Start recording",
129
+ "stop": "Stop recording"
130
+ },
131
+ "SubmitButton": {
132
+ "sendMessage": "Send message",
133
+ "stopTask": "Stop Task"
134
+ },
135
+ "UploadButton": {
136
+ "attachFiles": "Attach files"
137
+ },
138
+ "waterMark": {
139
+ "text": "Built with"
140
+ }
141
+ },
142
+ "Messages": {
143
+ "index": {
144
+ "running": "Running",
145
+ "executedSuccessfully": "executed successfully",
146
+ "failed": "failed",
147
+ "feedbackUpdated": "Feedback updated",
148
+ "updating": "Updating"
149
+ }
150
+ },
151
+ "dropScreen": {
152
+ "dropYourFilesHere": "Drop your files here"
153
+ },
154
+ "index": {
155
+ "failedToUpload": "Failed to upload",
156
+ "cancelledUploadOf": "Cancelled upload of",
157
+ "couldNotReachServer": "Could not reach the server",
158
+ "continuingChat": "Continuing previous chat"
159
+ },
160
+ "settings": {
161
+ "settingsPanel": "Settings panel",
162
+ "reset": "Reset",
163
+ "cancel": "Cancel",
164
+ "confirm": "Confirm"
165
+ }
166
+ },
167
+ "threadHistory": {
168
+ "sidebar": {
169
+ "filters": {
170
+ "FeedbackSelect": {
171
+ "feedbackAll": "Feedback: All",
172
+ "feedbackPositive": "Feedback: Positive",
173
+ "feedbackNegative": "Feedback: Negative"
174
+ },
175
+ "SearchBar": {
176
+ "search": "Search"
177
+ }
178
+ },
179
+ "DeleteThreadButton": {
180
+ "confirmMessage": "This will delete the thread as well as it's messages and elements.",
181
+ "cancel": "Cancel",
182
+ "confirm": "Confirm",
183
+ "deletingChat": "Deleting chat",
184
+ "chatDeleted": "Chat deleted"
185
+ },
186
+ "index": {
187
+ "pastChats": "Past Chats"
188
+ },
189
+ "ThreadList": {
190
+ "empty": "Empty...",
191
+ "today": "Today",
192
+ "yesterday": "Yesterday",
193
+ "previous7days": "Previous 7 days",
194
+ "previous30days": "Previous 30 days"
195
+ },
196
+ "TriggerButton": {
197
+ "closeSidebar": "Close sidebar",
198
+ "openSidebar": "Open sidebar"
199
+ }
200
+ },
201
+ "Thread": {
202
+ "backToChat": "Go back to chat",
203
+ "chatCreatedOn": "This chat was created on"
204
+ }
205
+ },
206
+ "header": {
207
+ "chat": "Chat",
208
+ "readme": "Readme"
209
+ }
210
+ }
211
+ },
212
+ "hooks": {
213
+ "useLLMProviders": {
214
+ "failedToFetchProviders": "Failed to fetch providers:"
215
+ }
216
+ },
217
+ "pages": {
218
+ "Design": {},
219
+ "Env": {
220
+ "savedSuccessfully": "Saved successfully",
221
+ "requiredApiKeys": "Required API Keys",
222
+ "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
223
+ },
224
+ "Page": {
225
+ "notPartOfProject": "You are not part of this project."
226
+ },
227
+ "ResumeButton": {
228
+ "resumeChat": "Resume Chat"
229
+ }
230
+ }
231
+ }
@@ -9,6 +9,7 @@ from typing import (
9
9
  Callable,
10
10
  Dict,
11
11
  List,
12
+ Literal,
12
13
  Optional,
13
14
  Tuple,
14
15
  Type,
@@ -32,6 +33,11 @@ def noop_fn(*args: List[Any], **kwargs: Dict[str, Any]) -> None:
32
33
  pass
33
34
 
34
35
 
36
+ FunctionCallTypes = Literal["none", "auto"]
37
+ ToolChoiceTypes = Literal["none", "auto", "required"]
38
+ ToolTypes = Literal["function"]
39
+
40
+
35
41
  class LLMConfig(BaseSettings):
36
42
  type: str = "openai"
37
43
  streamer: Optional[Callable[[Any], None]] = noop_fn
@@ -60,7 +66,7 @@ class LLMConfig(BaseSettings):
60
66
 
61
67
  class LLMFunctionCall(BaseModel):
62
68
  """
63
- Structure of LLM response indicate it "wants" to call a function.
69
+ Structure of LLM response indicating it "wants" to call a function.
64
70
  Modeled after OpenAI spec for `function_call` field in ChatCompletion API.
65
71
  """
66
72
 
@@ -103,6 +109,45 @@ class LLMFunctionSpec(BaseModel):
103
109
  parameters: Dict[str, Any]
104
110
 
105
111
 
112
+ class OpenAIToolCall(BaseModel):
113
+ """
114
+ Represents a single tool call in a list of tool calls generated by OpenAI LLM API.
115
+ See https://platform.openai.com/docs/api-reference/chat/create
116
+
117
+ Attributes:
118
+ id: The id of the tool call.
119
+ type: The type of the tool call;
120
+ only "function" is currently possible (7/26/24).
121
+ function: The function call.
122
+ """
123
+
124
+ id: str | None = None
125
+ type: ToolTypes = "function"
126
+ function: LLMFunctionCall | None = None
127
+
128
+ @staticmethod
129
+ def from_dict(message: Dict[str, Any]) -> "OpenAIToolCall":
130
+ """
131
+ Initialize from dictionary.
132
+ Args:
133
+ d: dictionary containing fields to initialize
134
+ """
135
+ id = message["id"]
136
+ type = message["type"]
137
+ function = LLMFunctionCall.from_dict(message["function"])
138
+ return OpenAIToolCall(id=id, type=type, function=function)
139
+
140
+ def __str__(self) -> str:
141
+ if self.function is None:
142
+ return ""
143
+ return "OAI-TOOL: " + json.dumps(self.function.dict(), indent=2)
144
+
145
+
146
+ class OpenAIToolSpec(BaseModel):
147
+ type: ToolTypes
148
+ function: LLMFunctionSpec
149
+
150
+
106
151
  class LLMTokenUsage(BaseModel):
107
152
  prompt_tokens: int = 0
108
153
  completion_tokens: int = 0
@@ -132,18 +177,26 @@ class Role(str, Enum):
132
177
  SYSTEM = "system"
133
178
  ASSISTANT = "assistant"
134
179
  FUNCTION = "function"
180
+ TOOL = "tool"
135
181
 
136
182
 
137
183
  class LLMMessage(BaseModel):
138
184
  """
139
- Class representing message sent to, or received from, LLM.
185
+ Class representing an entry in the msg-history sent to the LLM API.
186
+ It could be one of these:
187
+ - a user message
188
+ - an LLM ("Assistant") response
189
+ - a fn-call or tool-call-list from an OpenAI-compatible LLM API response
190
+ - a result or results from executing a fn or tool-call(s)
140
191
  """
141
192
 
142
193
  role: Role
143
194
  name: Optional[str] = None
195
+ tool_call_id: Optional[str] = None # which OpenAI LLM tool this is a response to
144
196
  tool_id: str = "" # used by OpenAIAssistant
145
197
  content: str
146
198
  function_call: Optional[LLMFunctionCall] = None
199
+ tool_calls: Optional[List[OpenAIToolCall]] = None
147
200
  timestamp: datetime = Field(default_factory=datetime.utcnow)
148
201
  # link to corresponding chat document, for provenance/rewind purposes
149
202
  chat_document_id: str = ""
@@ -169,6 +222,14 @@ class LLMMessage(BaseModel):
169
222
  dict_no_none["function_call"]["arguments"] = json.dumps(
170
223
  dict_no_none["function_call"]["arguments"]
171
224
  )
225
+ if "tool_calls" in dict_no_none:
226
+ # convert tool calls to API format
227
+ for tc in dict_no_none["tool_calls"]:
228
+ if "arguments" in tc["function"]:
229
+ # arguments must be a string
230
+ tc["function"]["arguments"] = json.dumps(
231
+ tc["function"]["arguments"]
232
+ )
172
233
  # IMPORTANT! drop fields that are not expected in API call
173
234
  dict_no_none.pop("tool_id", None)
174
235
  dict_no_none.pop("timestamp", None)
@@ -190,7 +251,9 @@ class LLMResponse(BaseModel):
190
251
  """
191
252
 
192
253
  message: str
254
+ # TODO tool_id needs to generalize to multi-tool calls
193
255
  tool_id: str = "" # used by OpenAIAssistant
256
+ oai_tool_calls: Optional[List[OpenAIToolCall]] = None
194
257
  function_call: Optional[LLMFunctionCall] = None
195
258
  usage: Optional[LLMTokenUsage] = None
196
259
  cached: bool = False
@@ -198,18 +261,28 @@ class LLMResponse(BaseModel):
198
261
  def __str__(self) -> str:
199
262
  if self.function_call is not None:
200
263
  return str(self.function_call)
264
+ elif self.oai_tool_calls:
265
+ return "\n".join(str(tc) for tc in self.oai_tool_calls)
201
266
  else:
202
267
  return self.message
203
268
 
204
269
  def to_LLMMessage(self) -> LLMMessage:
205
- content = self.message
206
- role = Role.ASSISTANT if self.function_call is None else Role.FUNCTION
207
- name = None if self.function_call is None else self.function_call.name
270
+ """Convert LLM response to an LLMMessage, to be included in the
271
+ message-list sent to the API.
272
+ This is currently NOT used in any significant way in the library, and is only
273
+ provided as a utility to construct a message list for the API when directly
274
+ working with an LLM object.
275
+
276
+ In a `ChatAgent`, an LLM response is first converted to a ChatDocument,
277
+ which is in turn converted to an LLMMessage via `ChatDocument.to_LLMMessage()`
278
+ See `ChatAgent._prep_llm_messages()` and `ChatAgent.llm_response_messages`
279
+ """
208
280
  return LLMMessage(
209
- role=role,
210
- content=content,
211
- name=name,
281
+ role=Role.ASSISTANT,
282
+ content=self.message,
283
+ name=None if self.function_call is None else self.function_call.name,
212
284
  function_call=self.function_call,
285
+ tool_calls=self.oai_tool_calls,
213
286
  )
214
287
 
215
288
  def get_recipient_and_message(
@@ -240,8 +313,17 @@ class LLMResponse(BaseModel):
240
313
  return recipient, msg
241
314
  else:
242
315
  msg = self.message
243
-
244
- # It's not a function call, so continue looking to see
316
+ if self.oai_tool_calls is not None:
317
+ # get the first tool that has a recipient field, if any
318
+ for tc in self.oai_tool_calls:
319
+ if tc.function is not None and tc.function.arguments is not None:
320
+ recipient = tc.function.arguments.get(
321
+ "recipient"
322
+ ) # type: ignore
323
+ if recipient is not None and recipient != "":
324
+ return recipient, ""
325
+
326
+ # It's not a function or tool call, so continue looking to see
245
327
  # if a recipient is specified in the message.
246
328
 
247
329
  # First check if message contains "TO: <recipient> <content>"
@@ -396,9 +478,25 @@ class LanguageModel(ABC):
396
478
  self,
397
479
  messages: Union[str, List[LLMMessage]],
398
480
  max_tokens: int = 200,
481
+ tools: Optional[List[OpenAIToolSpec]] = None,
482
+ tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
399
483
  functions: Optional[List[LLMFunctionSpec]] = None,
400
484
  function_call: str | Dict[str, str] = "auto",
401
485
  ) -> LLMResponse:
486
+ """
487
+ Get chat-completion response from LLM.
488
+
489
+ Args:
490
+ messages: message-history to send to the LLM
491
+ max_tokens: max tokens to generate
492
+ tools: tools available for the LLM to use in its response
493
+ tool_choice: tool call mode, one of "none", "auto", "required",
494
+ or a dict specifying a specific tool.
495
+ functions: functions available for LLM to call (deprecated)
496
+ function_call: function calling mode, "auto", "none", or a specific fn
497
+ (deprecated)
498
+ """
499
+
402
500
  pass
403
501
 
404
502
  @abstractmethod
@@ -406,9 +504,12 @@ class LanguageModel(ABC):
406
504
  self,
407
505
  messages: Union[str, List[LLMMessage]],
408
506
  max_tokens: int = 200,
507
+ tools: Optional[List[OpenAIToolSpec]] = None,
508
+ tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
409
509
  functions: Optional[List[LLMFunctionSpec]] = None,
410
510
  function_call: str | Dict[str, str] = "auto",
411
511
  ) -> LLMResponse:
512
+ """Async version of `chat`. See `chat` for details."""
412
513
  pass
413
514
 
414
515
  def __call__(self, prompt: str, max_tokens: int) -> LLMResponse:
@@ -4,7 +4,12 @@ from typing import Callable, Dict, List, Optional, Union
4
4
 
5
5
  import langroid.language_models as lm
6
6
  from langroid.language_models import LLMResponse
7
- from langroid.language_models.base import LanguageModel, LLMConfig
7
+ from langroid.language_models.base import (
8
+ LanguageModel,
9
+ LLMConfig,
10
+ OpenAIToolSpec,
11
+ ToolChoiceTypes,
12
+ )
8
13
 
9
14
 
10
15
  def none_fn(x: str) -> None | str:
@@ -50,6 +55,8 @@ class MockLM(LanguageModel):
50
55
  self,
51
56
  messages: Union[str, List[lm.LLMMessage]],
52
57
  max_tokens: int = 200,
58
+ tools: Optional[List[OpenAIToolSpec]] = None,
59
+ tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
53
60
  functions: Optional[List[lm.LLMFunctionSpec]] = None,
54
61
  function_call: str | Dict[str, str] = "auto",
55
62
  ) -> lm.LLMResponse:
@@ -63,6 +70,8 @@ class MockLM(LanguageModel):
63
70
  self,
64
71
  messages: Union[str, List[lm.LLMMessage]],
65
72
  max_tokens: int = 200,
73
+ tools: Optional[List[OpenAIToolSpec]] = None,
74
+ tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
66
75
  functions: Optional[List[lm.LLMFunctionSpec]] = None,
67
76
  function_call: str | Dict[str, str] = "auto",
68
77
  ) -> lm.LLMResponse: