amrita_core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,101 @@
1
+ from .chatmanager import ChatManager, ChatObject, ChatObjectMeta
2
+ from .config import get_config, set_config
3
+ from .hook.event import CompletionEvent, PreCompletionEvent
4
+ from .hook.matcher import MatcherManager
5
+ from .hook.on import on_completion, on_event, on_precompletion
6
+ from .libchat import (
7
+ call_completion,
8
+ get_last_response,
9
+ get_tokens,
10
+ text_generator,
11
+ tools_caller,
12
+ )
13
+ from .logging import debug_log, logger
14
+ from .preset import PresetManager, PresetReport
15
+ from .tools import mcp
16
+ from .tools.manager import ToolsManager, on_tools
17
+ from .tools.models import (
18
+ FunctionDefinitionSchema,
19
+ FunctionParametersSchema,
20
+ FunctionPropertySchema,
21
+ ToolContext,
22
+ ToolData,
23
+ ToolFunctionSchema,
24
+ )
25
+ from .types import (
26
+ BaseModel,
27
+ Function,
28
+ MemoryModel,
29
+ ModelConfig,
30
+ ModelPreset,
31
+ TextContent,
32
+ ToolCall,
33
+ ToolResult,
34
+ UniResponse,
35
+ UniResponseUsage,
36
+ )
37
+
38
+ __all__ = [
39
+ "BaseModel",
40
+ "ChatManager",
41
+ "ChatObject",
42
+ "ChatObjectMeta",
43
+ "CompletionEvent",
44
+ "Function",
45
+ "FunctionDefinitionSchema",
46
+ "FunctionParametersSchema",
47
+ "FunctionPropertySchema",
48
+ "MatcherManager",
49
+ "MemoryModel",
50
+ "ModelConfig",
51
+ "ModelPreset",
52
+ "PreCompletionEvent",
53
+ "PresetManager",
54
+ "PresetReport",
55
+ "TextContent",
56
+ "ToolCall",
57
+ "ToolContext",
58
+ "ToolData",
59
+ "ToolFunctionSchema",
60
+ "ToolResult",
61
+ "ToolsManager",
62
+ "UniResponse",
63
+ "UniResponseUsage",
64
+ "call_completion",
65
+ "debug_log",
66
+ "get_config",
67
+ "get_last_response",
68
+ "get_tokens",
69
+ "logger",
70
+ "mcp",
71
+ "on_completion",
72
+ "on_event",
73
+ "on_precompletion",
74
+ "on_tools",
75
+ "set_config",
76
+ "text_generator",
77
+ "tools_caller",
78
+ ]
79
+ __inited: bool = False
80
+
81
+
82
+ def init():
83
+ global __all__, __inited
84
+ if not __inited:
85
+ logger.info("AmritaCore is initalizing......")
86
+ import jieba
87
+
88
+ from . import builtins
89
+
90
+ __all__ += [builtins.__name__]
91
+
92
+ jieba.initialize()
93
+
94
+
95
+ async def load_amrita():
96
+ logger.info("Loading AmritaCore......")
97
+ config = get_config()
98
+ if config.function_config.agent_mcp_client_enable:
99
+ logger.info("Loading MCP clients......")
100
+ clients = list(config.function_config.agent_mcp_server_scripts)
101
+ await mcp.ClientManager().initialize_scripts_all(clients)
@@ -0,0 +1,7 @@
1
+ from . import adapter, agent, tools
2
+
3
+ __all__ = [
4
+ "adapter",
5
+ "agent",
6
+ "tools",
7
+ ]
@@ -0,0 +1,148 @@
1
+ from collections.abc import AsyncGenerator, Iterable
2
+
3
+ import openai
4
+ from openai.types.chat.chat_completion import ChatCompletion
5
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
6
+ from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
7
+ from openai.types.chat.chat_completion_named_tool_choice_param import (
8
+ ChatCompletionNamedToolChoiceParam,
9
+ )
10
+ from openai.types.chat.chat_completion_named_tool_choice_param import (
11
+ Function as OPENAI_Function,
12
+ )
13
+ from openai.types.chat.chat_completion_tool_choice_option_param import (
14
+ ChatCompletionToolChoiceOptionParam,
15
+ )
16
+ from typing_extensions import override
17
+
18
+ from amrita_core.config import get_config
19
+ from amrita_core.logging import debug_log
20
+ from amrita_core.protocol import ModelAdapter
21
+ from amrita_core.tools.models import ToolChoice, ToolFunctionSchema
22
+ from amrita_core.types import ToolCall, UniResponse, UniResponseUsage
23
+
24
+
25
+ class OpenAIAdapter(ModelAdapter):
26
+ """OpenAI Protocol Adapter"""
27
+
28
+ __override__ = True
29
+
30
+ @override
31
+ async def call_api(
32
+ self, messages: Iterable[ChatCompletionMessageParam]
33
+ ) -> AsyncGenerator[str | UniResponse[str, None], None]:
34
+ """Call OpenAI API to get chat responses"""
35
+ preset = self.preset
36
+ client = openai.AsyncOpenAI(
37
+ base_url=preset.base_url,
38
+ api_key=preset.api_key,
39
+ timeout=get_config().llm.llm_timeout,
40
+ max_retries=get_config().llm.max_retries,
41
+ )
42
+ completion: ChatCompletion | openai.AsyncStream[ChatCompletionChunk] | None = (
43
+ None
44
+ )
45
+ if stream := preset.config.stream:
46
+ completion = await client.chat.completions.create(
47
+ model=preset.model,
48
+ messages=messages,
49
+ max_tokens=get_config().llm.max_tokens,
50
+ stream=stream,
51
+ stream_options={"include_usage": True},
52
+ )
53
+ else:
54
+ completion = await client.chat.completions.create(
55
+ model=preset.model,
56
+ messages=messages,
57
+ max_tokens=get_config().llm.max_tokens,
58
+ stream=False,
59
+ )
60
+ response: str = ""
61
+ uni_usage = None
62
+ # Process streaming response
63
+ if self.preset.config.stream and isinstance(completion, openai.AsyncStream):
64
+ async for chunk in completion:
65
+ try:
66
+ if chunk.usage:
67
+ uni_usage = UniResponseUsage.model_validate(
68
+ chunk.usage, from_attributes=True
69
+ )
70
+ if chunk.choices[0].delta.content is not None:
71
+ response += chunk.choices[0].delta.content
72
+ yield chunk.choices[0].delta.content
73
+ debug_log(chunk.choices[0].delta.content)
74
+ except IndexError:
75
+ break
76
+ else:
77
+ debug_log(response)
78
+ if isinstance(completion, ChatCompletion):
79
+ response = (
80
+ completion.choices[0].message.content
81
+ if completion.choices[0].message.content is not None
82
+ else ""
83
+ )
84
+ yield response
85
+ if completion.usage:
86
+ uni_usage = UniResponseUsage.model_validate(
87
+ completion.usage, from_attributes=True
88
+ )
89
+ else:
90
+ raise RuntimeError("Received unexpected response type")
91
+ uni_response = UniResponse(
92
+ role="assistant",
93
+ content=response,
94
+ usage=uni_usage,
95
+ tool_calls=None,
96
+ )
97
+ yield uni_response
98
+
99
+ @override
100
+ async def call_tools(
101
+ self,
102
+ messages: Iterable,
103
+ tools: list,
104
+ tool_choice: ToolChoice | None = None,
105
+ ) -> UniResponse[None, list[ToolCall] | None]:
106
+ if not tool_choice:
107
+ choice: ChatCompletionToolChoiceOptionParam = "auto"
108
+ elif isinstance(tool_choice, ToolFunctionSchema):
109
+ choice = ChatCompletionNamedToolChoiceParam(
110
+ function=OPENAI_Function(name=tool_choice.function.name),
111
+ type=tool_choice.type,
112
+ )
113
+ else:
114
+ choice = tool_choice
115
+
116
+ preset = self.preset
117
+ base_url = preset.base_url
118
+ key = preset.api_key
119
+ model = preset.model
120
+ client = openai.AsyncOpenAI(
121
+ base_url=base_url,
122
+ api_key=key,
123
+ timeout=get_config().llm.llm_timeout,
124
+ )
125
+ completion: ChatCompletion = await client.chat.completions.create(
126
+ model=model,
127
+ messages=messages,
128
+ stream=False,
129
+ tool_choice=choice,
130
+ tools=tools,
131
+ )
132
+ msg = completion.choices[0].message
133
+ return UniResponse(
134
+ role="assistant",
135
+ tool_calls=(
136
+ [
137
+ ToolCall.model_validate(i, from_attributes=True)
138
+ for i in msg.tool_calls
139
+ ]
140
+ if msg.tool_calls
141
+ else None
142
+ ),
143
+ content=None,
144
+ )
145
+
146
+ @staticmethod
147
+ def get_adapter_protocol() -> tuple[str, str]:
148
+ return "openai", "__main__"
@@ -0,0 +1,415 @@
1
+ import json
2
+ import os
3
+ import typing
4
+ from collections.abc import Awaitable, Callable
5
+ from copy import deepcopy
6
+ from typing import Any
7
+
8
+ from amrita_core.chatmanager import MessageWithMetadata
9
+ from amrita_core.config import get_config
10
+ from amrita_core.hook.event import CompletionEvent, PreCompletionEvent
11
+ from amrita_core.hook.exception import MatcherException as ProcEXC
12
+ from amrita_core.hook.on import on_completion, on_precompletion
13
+ from amrita_core.libchat import (
14
+ tools_caller,
15
+ )
16
+ from amrita_core.logging import debug_log, logger
17
+ from amrita_core.tools.manager import ToolsManager, on_tools
18
+ from amrita_core.tools.models import ToolContext
19
+ from amrita_core.types import CONTENT_LIST_TYPE as SEND_MESSAGES
20
+ from amrita_core.types import (
21
+ Message,
22
+ TextContent,
23
+ ToolCall,
24
+ ToolResult,
25
+ UniResponse,
26
+ )
27
+
28
+ from .tools import (
29
+ PROCESS_MESSAGE,
30
+ PROCESS_MESSAGE_TOOL,
31
+ REASONING_TOOL,
32
+ STOP_TOOL,
33
+ )
34
+
35
+ prehook = on_precompletion(block=False, priority=2)
36
+ posthook = on_completion(block=False, priority=1)
37
+
38
+
39
+ BUILTIN_TOOLS_NAME = {
40
+ STOP_TOOL.function.name,
41
+ REASONING_TOOL.function.name,
42
+ PROCESS_MESSAGE.function.name,
43
+ }
44
+
45
+ AGENT_PROCESS_TOOLS = (
46
+ REASONING_TOOL,
47
+ STOP_TOOL,
48
+ PROCESS_MESSAGE,
49
+ )
50
+
51
+
52
+ class Continue(BaseException): ...
53
+
54
+
55
+ @on_tools(
56
+ data=PROCESS_MESSAGE_TOOL,
57
+ custom_run=True,
58
+ enable_if=lambda: get_config().function_config.agent_middle_message,
59
+ )
60
+ async def _(ctx: ToolContext) -> str | None:
61
+ msg: str = ctx.data["content"]
62
+ logger.debug(f"[LLM-ProcessMessage] {msg}")
63
+ await ctx.event.chat_object.yield_response(f"{msg}\n")
64
+ return f"Sent a message to user:\n\n```text\n{msg}\n```\n"
65
+
66
+
67
+ @prehook.handle()
68
+ async def agent_core(event: PreCompletionEvent) -> None:
69
+ agent_last_step: str = ""
70
+
71
+ async def _append_reasoning(
72
+ msg: SEND_MESSAGES, response: UniResponse[None, list[ToolCall] | None]
73
+ ):
74
+ nonlocal agent_last_step
75
+ tool_calls: list[ToolCall] | None = response.tool_calls
76
+ if tool_calls:
77
+ for tool in tool_calls:
78
+ if tool.function.name == REASONING_TOOL.function.name:
79
+ break
80
+ else:
81
+ raise ValueError(f"No reasoning tool found in response \n\n{response}")
82
+ if reasoning := json.loads(tool.function.arguments).get("content"):
83
+ msg.append(Message.model_validate(response, from_attributes=True))
84
+ msg.append(
85
+ ToolResult(
86
+ role="tool",
87
+ name=tool.function.name,
88
+ content=reasoning,
89
+ tool_call_id=tool.id,
90
+ )
91
+ )
92
+ agent_last_step = reasoning
93
+ logger.debug(f"[AmritaAgent] {reasoning}")
94
+ if not config.function_config.agent_reasoning_hide:
95
+ await chat_object.yield_response(
96
+ response=MessageWithMetadata(
97
+ content=f"<think>\n\n{reasoning}\n\n</think>\n",
98
+ metadata={"type": "reasoning", "content": reasoning},
99
+ )
100
+ )
101
+ else:
102
+ raise ValueError("Reasoning tool has no content!")
103
+
104
+ async def append_reasoning_msg(
105
+ msg: SEND_MESSAGES,
106
+ original_msg: str = "",
107
+ last_step: str = "",
108
+ tools_ctx: list[dict[str, Any]] = [],
109
+ ):
110
+ nonlocal agent_last_step
111
+ reasoning_msg = [
112
+ Message(
113
+ role="system",
114
+ content="Please analyze the task requirements based on the user input above,"
115
+ + " summarize the current step's purpose and reasons, and execute accordingly."
116
+ + " If no task needs to be performed, no description is needed;"
117
+ + " please analyze according to the character tone set in <SYS_SETTINGS> (if present)."
118
+ + (
119
+ f"\nYour previous task was:\n```text\n{last_step}\n```\n"
120
+ if last_step
121
+ else ""
122
+ )
123
+ + (f"\n<INPUT>\n{original_msg}\n</INPUT>\n" if original_msg else "")
124
+ + (
125
+ f"<SYS_SETTINGS>\n{event.get_context_messages().train.content!s}\n</SYS_SETTINGS>"
126
+ ),
127
+ ),
128
+ *deepcopy(msg),
129
+ ]
130
+ response: UniResponse[None, list[ToolCall] | None] = await tools_caller(
131
+ reasoning_msg,
132
+ [REASONING_TOOL.model_dump(), *tools_ctx],
133
+ tool_choice=REASONING_TOOL,
134
+ )
135
+ await _append_reasoning(msg, response)
136
+
137
+ async def run_tools(
138
+ msg_list: list,
139
+ call_count: int = 1,
140
+ original_msg: str = "",
141
+ ):
142
+ suggested_stop: bool = False
143
+
144
+ def stop_running():
145
+ """Mark agent workflow as completed."""
146
+ nonlocal suggested_stop
147
+ suggested_stop = True
148
+
149
+ logger.info(
150
+ f"Starting round {call_count} tool call, current message count: {len(msg_list)}"
151
+ )
152
+ config = get_config()
153
+ if config.function_config.tool_calling_mode == "agent" and (
154
+ (
155
+ call_count == 1
156
+ and config.function_config.agent_thought_mode == "reasoning"
157
+ )
158
+ or config.function_config.agent_thought_mode == "reasoning-required"
159
+ ):
160
+ await append_reasoning_msg(msg_list, original_msg, tools_ctx=tools)
161
+
162
+ if call_count > config.function_config.agent_tool_call_limit:
163
+ await chat_object.yield_response(
164
+ MessageWithMetadata(
165
+ content="[AmritaAgent] Too many tool calls! Workflow terminated!\n",
166
+ metadata={
167
+ "type": "system",
168
+ "message": "[AmritaAgent] Too many tool calls! Workflow terminated!\n",
169
+ },
170
+ )
171
+ )
172
+ msg_list.append(
173
+ Message(
174
+ role="user",
175
+ content="Too much tools called,please call later or follow user's instruction."
176
+ + "Now please continue to completion.",
177
+ )
178
+ )
179
+ return
180
+ response_msg = await tools_caller(
181
+ msg_list,
182
+ tools,
183
+ tool_choice=(
184
+ "required"
185
+ if (config.llm.require_tools and not suggested_stop)
186
+ else "auto"
187
+ ),
188
+ )
189
+
190
+ if tool_calls := response_msg.tool_calls:
191
+ result_msg_list: list[ToolResult] = []
192
+ for tool_call in tool_calls:
193
+ function_name = tool_call.function.name
194
+ function_args: dict[str, Any] = json.loads(tool_call.function.arguments)
195
+ debug_log(f"Function arguments are {tool_call.function.arguments}")
196
+ logger.info(f"Calling function {function_name}")
197
+ await chat_object.yield_response(
198
+ MessageWithMetadata(
199
+ content=f"Calling function {function_name}\n",
200
+ metadata={
201
+ "type": "function_call",
202
+ "function_name": function_name,
203
+ "is_done": False,
204
+ "tool_id": tool_call.id,
205
+ },
206
+ )
207
+ )
208
+ err: Exception | None = None
209
+ try:
210
+ match function_name:
211
+ case REASONING_TOOL.function.name:
212
+ logger.debug("Generating task summary and reason.")
213
+ await _append_reasoning(msg_list, response=response_msg)
214
+ raise Continue()
215
+ case STOP_TOOL.function.name:
216
+ logger.info("Agent work has been terminated.")
217
+ func_response = (
218
+ "You have indicated readiness to provide the final answer."
219
+ + "Please now generate the final, comprehensive response for the user."
220
+ )
221
+ if "result" in function_args:
222
+ debug_log(f"[Done] {function_args['result']}")
223
+ func_response += (
224
+ f"\nWork summary :\n{function_args['result']}"
225
+ )
226
+ msg_list.append(
227
+ Message.model_validate(
228
+ response_msg, from_attributes=True
229
+ )
230
+ )
231
+
232
+ stop_running()
233
+ case _:
234
+ if (
235
+ tool_data := ToolsManager().get_tool(function_name)
236
+ ) is not None:
237
+ if not tool_data.custom_run:
238
+ msg_list.append(
239
+ Message.model_validate(
240
+ response_msg, from_attributes=True
241
+ )
242
+ )
243
+ func_response: str = await typing.cast(
244
+ Callable[[dict[str, Any]], Awaitable[str]],
245
+ tool_data.func,
246
+ )(function_args)
247
+ elif (
248
+ tool_response := await typing.cast(
249
+ Callable[[ToolContext], Awaitable[str | None]],
250
+ tool_data.func,
251
+ )(
252
+ ToolContext(
253
+ data=function_args,
254
+ event=event,
255
+ matcher=prehook,
256
+ )
257
+ )
258
+ ) is None:
259
+ func_response = "(this tool returned no content)"
260
+ else:
261
+ msg_list.append(
262
+ Message.model_validate(
263
+ response_msg, from_attributes=True
264
+ )
265
+ )
266
+ func_response = tool_response
267
+ else:
268
+ raise RuntimeError("Received unexpected response type")
269
+
270
+ except Continue:
271
+ continue
272
+ except Exception as e:
273
+ err = e
274
+ if isinstance(e, ProcEXC):
275
+ raise
276
+ logger.error(f"Function {function_name} execution failed: {e}")
277
+ if (
278
+ config.function_config.tool_calling_mode == "agent"
279
+ and function_name not in BUILTIN_TOOLS_NAME
280
+ and config.function_config.agent_tool_call_notice
281
+ ):
282
+ await chat_object.yield_response(
283
+ MessageWithMetadata(
284
+ content=f"Error: {function_name} failed.",
285
+ metadata={
286
+ "type": "function_call",
287
+ "function_name": function_name,
288
+ "is_done": True,
289
+ "tool_id": tool_call.id,
290
+ "err": err,
291
+ },
292
+ )
293
+ )
294
+ msg_list.append(
295
+ ToolResult(
296
+ role="tool",
297
+ name=function_name,
298
+ content=f"ERR: Tool {function_name} execution failed\n{e!s}",
299
+ tool_call_id=tool_call.id,
300
+ )
301
+ )
302
+ continue
303
+ else:
304
+ logger.debug(f"Function {function_name} returned: {func_response}")
305
+
306
+ msg: ToolResult = ToolResult(
307
+ role="tool",
308
+ content=func_response,
309
+ name=function_name,
310
+ tool_call_id=tool_call.id,
311
+ )
312
+ msg_list.append(msg)
313
+ result_msg_list.append(msg)
314
+ finally:
315
+ call_count += 1
316
+
317
+ # Send tool call info to user
318
+ if config.function_config.agent_tool_call_notice == "notify":
319
+ for rslt in result_msg_list:
320
+ await chat_object.yield_response(
321
+ MessageWithMetadata(
322
+ content=f"Called tool {rslt.name}\n",
323
+ metadata={
324
+ "type": "function_call",
325
+ "function_name": function_name,
326
+ "is_done": True,
327
+ "tool_id": tool_call.id,
328
+ "err": None,
329
+ },
330
+ )
331
+ )
332
+
333
+ if config.function_config.tool_calling_mode == "agent":
334
+ await run_tools(msg_list, call_count, original_msg)
335
+
336
+ config = get_config()
337
+ chat_object = event.chat_object
338
+ if config.function_config.tool_calling_mode == "none":
339
+ return
340
+ msg_list: SEND_MESSAGES = (
341
+ [
342
+ deepcopy(event.message.train),
343
+ deepcopy(event.message.user_query),
344
+ ]
345
+ if config.function_config.use_minimal_context
346
+ else event.message.unwrap()
347
+ )
348
+ current_length = len(msg_list)
349
+ chat_list_backup = event.message.copy()
350
+ tools: list[dict[str, Any]] = []
351
+ if config.function_config.tool_calling_mode == "agent":
352
+ tools.append(STOP_TOOL.model_dump())
353
+ if config.function_config.agent_thought_mode.startswith("reasoning"):
354
+ tools.append(REASONING_TOOL.model_dump())
355
+ tools.extend(ToolsManager().tools_meta_dict().values())
356
+ logger.debug(
357
+ "Tool list:"
358
+ + "".join(
359
+ f"{tool['function']['name']}: {tool['function']['description']}\n\n"
360
+ for tool in tools
361
+ )
362
+ )
363
+ logger.debug(f"Tool list: {tools}")
364
+ if not tools:
365
+ logger.warning("No valid tools defined! Tools Workflow skipped.")
366
+ return
367
+ if str(os.getenv(key="AMRITA_IGNORE_AGENT_TOOLS")).lower() == "true" and (
368
+ config.function_config.tool_calling_mode == "agent"
369
+ and len(tools) == len(AGENT_PROCESS_TOOLS)
370
+ ):
371
+ logger.warning(
372
+ "Note: Currently there are only Agent mode process tools without other valid tools defined, which usually isn't a best practice for using Agent mode. Configure environment variable AMRITA_IGNORE_AGENT_TOOLS=true to ignore this warning."
373
+ )
374
+
375
+ try:
376
+ await run_tools(
377
+ msg_list,
378
+ original_msg=event.original_context
379
+ if isinstance(event.original_context, str)
380
+ else "".join(
381
+ [
382
+ i.content
383
+ for i in event.original_context
384
+ if isinstance(i, TextContent)
385
+ ]
386
+ ),
387
+ )
388
+ event._context_messages.extend(msg_list[current_length:])
389
+
390
+ except Exception as e:
391
+ if isinstance(e, ProcEXC):
392
+ raise
393
+ logger.warning(
394
+ f"ERROR\n{e!s}\n!Failed to call Tools! Continuing with old data..."
395
+ )
396
+ event._context_messages = chat_list_backup
397
+
398
+
399
+ @posthook.handle()
400
+ async def cookie(event: CompletionEvent):
401
+ config = get_config()
402
+ response = event.get_model_response()
403
+ if config.cookie.enable_cookie:
404
+ if cookie := config.cookie.cookie:
405
+ if cookie in response:
406
+ await event.chat_object.yield_response(
407
+ response=MessageWithMetadata(
408
+ "Some error occurred, please try again later.",
409
+ metadata={
410
+ "type": "error",
411
+ "content": "Some error occurred, please try again later.",
412
+ },
413
+ )
414
+ )
415
+ await event.chat_object.set_queue_done()