amrita_core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
amrita_core/types.py ADDED
@@ -0,0 +1,274 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import time
5
+ import typing
6
+ from collections.abc import Iterable
7
+ from copy import deepcopy
8
+ from pathlib import Path
9
+ from typing import Any, Generic, Literal
10
+
11
+ from pydantic import BaseModel as B_Model
12
+ from pydantic import Field
13
+
14
+ # Pydantic models
15
+
16
+ T = typing.TypeVar("T", None, str, None | typing.Literal[""])
17
+ T_INT = typing.TypeVar("T_INT", int, None)
18
+
19
+
20
+ class BaseModel(B_Model):
21
+ """BaseModel+dict duck typing"""
22
+
23
+ def __str__(self) -> str:
24
+ return json.dumps(self.model_dump(), ensure_ascii=True)
25
+
26
+ def __repr__(self) -> str:
27
+ return self.__str__()
28
+
29
+ def __getitem__(self, key: str) -> Any:
30
+ return self.model_dump()[key]
31
+
32
+ def __setitem__(self, key: str, value: Any) -> None:
33
+ self.__setattr__(key, value)
34
+
35
+
36
+ class ModelConfig(BaseModel):
37
+ """Model configuration"""
38
+
39
+ top_k: int = Field(default=50, description="TopK")
40
+ top_p: float = Field(default=0.95, description="TopP")
41
+ temperature: float = Field(default=0.7, description="Temperature")
42
+ stream: bool = Field(
43
+ default=False,
44
+ description="Whether to enable streaming response (output by character)",
45
+ )
46
+ thought_chain_model: bool = Field(
47
+ default=False,
48
+ description="Whether to enable thought chain model optimization (enhance complex problem solving)",
49
+ )
50
+ multimodal: bool = Field(
51
+ default=False,
52
+ description="Whether to support multimodal input (e.g. image recognition)",
53
+ )
54
+
55
+
56
+ class ModelPreset(BaseModel):
57
+ model: str = Field(
58
+ default="", description="Name of the AI model to use (e.g. gpt-3.5-turbo)"
59
+ )
60
+ name: str = Field(
61
+ default="default", description="Identifier name for current preset"
62
+ )
63
+ base_url: str = Field(
64
+ default="",
65
+ description="Base address of API service (use OpenAI default if empty)",
66
+ )
67
+ api_key: str = Field(default="", description="Key required to access API")
68
+ protocol: str = Field(default="__main__", description="Protocol adapter type")
69
+ config: ModelConfig = Field(default_factory=ModelConfig)
70
+ extra: dict[str, Any] = Field(default_factory=dict)
71
+
72
+ @classmethod
73
+ def load(cls, path: Path):
74
+ if path.exists():
75
+ with path.open(
76
+ "r",
77
+ encoding="utf-8",
78
+ ) as f:
79
+ data = json.load(f)
80
+ return cls.model_validate(data)
81
+ return cls() # Return default values
82
+
83
+ def save(self, path: Path):
84
+ with path.open("w", encoding="u8") as f:
85
+ json.dump(self.model_dump(), f, indent=4, ensure_ascii=False)
86
+
87
+
88
+ class Function(BaseModel):
89
+ arguments: str
90
+ """
91
+ The arguments to call the function with, as generated by the model in JSON
92
+ format. Note that the model does not always generate valid JSON, and may
93
+ hallucinate parameters not defined by your function schema. Validate the
94
+ arguments in your code before calling your function.
95
+ """
96
+
97
+ name: str
98
+ """The name of the function to call."""
99
+
100
+
101
+ class ToolCall(BaseModel):
102
+ id: str
103
+ """The ID of the tool call."""
104
+
105
+ function: Function
106
+ """The function that the model called."""
107
+
108
+ type: typing.Literal["function"] = "function"
109
+ """The type of the tool. Currently, only `function` is supported."""
110
+
111
+
112
+ T_TOOL = typing.TypeVar("T_TOOL", list[ToolCall], None, list[ToolCall] | None)
113
+
114
+
115
+ class UniResponseUsage(BaseModel, Generic[T_INT]):
116
+ prompt_tokens: T_INT
117
+ completion_tokens: T_INT
118
+ total_tokens: T_INT
119
+
120
+
121
+ class UniResponse(
122
+ BaseModel,
123
+ Generic[T, T_TOOL],
124
+ ):
125
+ """Unified response format"""
126
+
127
+ role: Literal["assistant"] = Field(
128
+ default="assistant", # Regardless of whether there's content/tool_call, role is assistant
129
+ description="Role",
130
+ )
131
+
132
+ usage: UniResponseUsage | None = None
133
+ content: T = Field(
134
+ ...,
135
+ description="Content",
136
+ exclude_if=lambda x: x is None,
137
+ )
138
+ tool_calls: T_TOOL = Field(
139
+ ...,
140
+ description="Tool call results",
141
+ exclude_if=lambda x: x is None,
142
+ )
143
+
144
+
145
+ class ImageUrl(BaseModel):
146
+ url: str = Field(..., description="Image URL")
147
+
148
+
149
+ class Content(BaseModel): ...
150
+
151
+
152
+ class ImageContent(Content):
153
+ type: Literal["image_url"] = "image_url"
154
+ image_url: ImageUrl = Field(..., description="Image URL")
155
+
156
+
157
+ class TextContent(Content):
158
+ type: Literal["text"] = "text"
159
+ text: str = Field(..., description="Text content")
160
+
161
+
162
+ CT_MAP: dict[str, type[Content]] = {
163
+ "image_url": ImageContent,
164
+ "text": TextContent,
165
+ }
166
+
167
+ USER_INPUT = list[TextContent | ImageContent] | str
168
+
169
+ _T = typing.TypeVar(
170
+ "_T",
171
+ str,
172
+ None,
173
+ list[TextContent],
174
+ list[TextContent | ImageContent],
175
+ USER_INPUT,
176
+ )
177
+
178
+
179
+ class Message(BaseModel, Generic[_T]):
180
+ role: Literal["user", "assistant", "system"] = Field(..., description="Role")
181
+ content: _T = Field(..., description="Content")
182
+ tool_calls: list[ToolCall] | None = Field(
183
+ default=None, description="Tool calls", exclude_if=lambda x: x is None
184
+ )
185
+
186
+
187
+ class ToolResult(BaseModel):
188
+ role: Literal["tool"] = Field(..., description="Role")
189
+ name: str = Field(..., description="Tool name")
190
+ content: str = Field(..., description="Tool return content")
191
+ tool_call_id: str = Field(..., description="Tool call ID")
192
+
193
+
194
+ class MemoryModel(BaseModel):
195
+ messages: list[CONTENT_LIST_TYPE_ITEM] = Field(default_factory=list)
196
+ time: float = Field(default_factory=time.time, description="Timestamp")
197
+ abstract: str = Field(default="", description="Summary")
198
+
199
+
200
+ CONTENT_LIST_TYPE_ITEM = Message | ToolResult
201
+ CONTENT_LIST_TYPE = list[CONTENT_LIST_TYPE_ITEM]
202
+
203
+
204
+ class SendMessageWrap(Iterable[CONTENT_LIST_TYPE_ITEM]):
205
+ """Wrapper class for CONTENT_LIST_TYPE"""
206
+
207
+ train: Message[str] # system message
208
+ memory: CONTENT_LIST_TYPE # Messages without system message
209
+ user_query: Message
210
+ end_messages: CONTENT_LIST_TYPE # End messages
211
+
212
+ def __init__(
213
+ self,
214
+ train: dict[str, str] | Message[str],
215
+ memory: CONTENT_LIST_TYPE | MemoryModel,
216
+ user_query: Message | None = None,
217
+ ):
218
+ self.train = (
219
+ train if isinstance(train, Message) else Message.model_validate(train)
220
+ )
221
+ self.end_messages = []
222
+ self.memory = memory if isinstance(memory, list) else memory.messages
223
+ query = user_query or self.memory[-1]
224
+ if isinstance(query, ToolResult) or query.role != "user":
225
+ raise ValueError("Invalid query message, expecting user message!")
226
+ self.user_query = query
227
+ self.memory.pop()
228
+
229
+ @classmethod
230
+ def validate_messages(cls, messages: CONTENT_LIST_TYPE) -> SendMessageWrap:
231
+ train = messages[0]
232
+ if train.role != "system": # Fall back to match the first system message
233
+ for idx, msg in enumerate(messages):
234
+ if msg.role == "system":
235
+ train = msg
236
+ messages.pop(idx)
237
+ memory = messages
238
+ break
239
+ else:
240
+ raise ValueError("Invalid messages, expecting system message!")
241
+ else:
242
+ memory = messages[1:]
243
+ return cls(train, memory)
244
+
245
+ def __len__(self) -> int:
246
+ return len(self.memory) + 2 + len(self.end_messages)
247
+
248
+ def __iter__(self) -> typing.Iterator[CONTENT_LIST_TYPE_ITEM]:
249
+ yield self.train
250
+ yield from self.memory
251
+ yield self.user_query
252
+ yield from self.end_messages
253
+
254
+ def copy(self) -> SendMessageWrap:
255
+ return deepcopy(self)
256
+
257
+ def unwrap(self, exclude_system: bool = False) -> CONTENT_LIST_TYPE:
258
+ system_msg: CONTENT_LIST_TYPE = [self.train] if not exclude_system else []
259
+ return [*system_msg, *self.memory, self.user_query, *self.end_messages]
260
+
261
+ def get_train(self) -> Message[str]:
262
+ return self.train
263
+
264
+ def get_memory(self) -> CONTENT_LIST_TYPE:
265
+ return self.memory
266
+
267
+ def get_user_query(self) -> Message:
268
+ return self.user_query
269
+
270
+ def append(self, message: CONTENT_LIST_TYPE_ITEM) -> None:
271
+ self.end_messages.append(message)
272
+
273
+ def extend(self, messages: CONTENT_LIST_TYPE) -> None:
274
+ self.end_messages.extend(messages)
amrita_core/utils.py ADDED
@@ -0,0 +1,66 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime
4
+ from typing import Any
5
+
6
+ import pytz
7
+
8
+
9
+ def remove_think_tag(text: str) -> str:
10
+ """Remove the first occurrence of think tag
11
+
12
+ Args:
13
+ text (str): Parameter to process
14
+
15
+ Returns:
16
+ str: Processed text
17
+ """
18
+
19
+ start_tag = "<think>"
20
+ end_tag = "</think>"
21
+
22
+ # Find the position of the first start tag
23
+ start_idx = text.find(start_tag)
24
+ if start_idx == -1:
25
+ return text # No start tag found, return original text
26
+
27
+ # Find the position of the end tag after the start tag
28
+ end_idx = text.find(end_tag, start_idx + len(start_tag))
29
+ if end_idx == -1:
30
+ return text # No corresponding end tag found, return original text
31
+
32
+ # Calculate the end position of the end tag
33
+ end_of_end_tag = end_idx + len(end_tag)
34
+
35
+ # Concatenate text after removing the tag
36
+ text_new = text[:start_idx] + text[end_of_end_tag:]
37
+ while text_new.startswith("\n"):
38
+ text_new = text_new[1:]
39
+ return text_new
40
+
41
+
42
+ def format_datetime_timestamp(time: float) -> str:
43
+ """Format timestamp to date, weekday and time string"""
44
+ now = datetime.fromtimestamp(time)
45
+ formatted_date = now.strftime("%Y-%m-%d")
46
+ formatted_weekday = now.strftime("%A")
47
+ formatted_time = now.strftime("%I:%M:%S %p")
48
+ return f"[{formatted_date} {formatted_weekday} {formatted_time}]"
49
+
50
+
51
+ def split_list(lst: list, threshold: int) -> list[Any]:
52
+ """Split list into multiple sublists, each sublist length does not exceed threshold"""
53
+ if len(lst) <= threshold:
54
+ return [lst]
55
+ return [lst[i : i + threshold] for i in range(0, len(lst), threshold)]
56
+
57
+
58
+ def get_current_datetime_timestamp(utc_time: None | datetime = None):
59
+ """Get current time and format as date, weekday and time string"""
60
+ utc_time = utc_time or datetime.now(pytz.utc)
61
+ asia_shanghai = pytz.timezone("Asia/Shanghai")
62
+ now = utc_time.astimezone(asia_shanghai)
63
+ formatted_date = now.strftime("%Y-%m-%d")
64
+ formatted_weekday = now.strftime("%A")
65
+ formatted_time = now.strftime("%H:%M:%S")
66
+ return f"[{formatted_date} {formatted_weekday} {formatted_time}]"
@@ -0,0 +1,73 @@
1
+ Metadata-Version: 2.4
2
+ Name: amrita_core
3
+ Version: 0.1.0
4
+ Summary: Agent core of Project Amrita
5
+ Project-URL: Homepage, https://github.com/AmritaBot/AmritaCore
6
+ Project-URL: Source, https://github.com/AmritaBot/AmritaCore
7
+ Project-URL: Issue Tracker, https://github.com/AmritaBot/AmritaCore/issues
8
+ Requires-Python: <3.14,>=3.10
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ Requires-Dist: fastmcp>=2.14.4
12
+ Requires-Dist: jieba>=0.42.1
13
+ Requires-Dist: loguru>=0.7.3
14
+ Requires-Dist: openai>=2.16.0
15
+ Requires-Dist: pydantic>=2.12.5
16
+ Requires-Dist: pytz>=2025.2
17
+ Dynamic: license-file
18
+
19
+ # AmritaCore
20
+
21
+ AmritaCore is the intelligent agent core module of Proj.Amrita, serving as the primary logical or control component of the project. It provides a flexible and extensible framework for implementing AI agents with advanced capabilities.
22
+
23
+ ## 🚀 What is AmritaCore?
24
+
25
+ AmritaCore is a next-generation agent framework designed to simplify the creation and deployment of intelligent agents. Built with modern Python technologies, it provides a comprehensive solution for implementing AI-powered applications with features like event-driven architecture, tool integration, and multi-modal support.
26
+
27
+ ## 🎯 Mission and Value Proposition
28
+
29
+ The mission of AmritaCore is to democratize the development of intelligent agents by providing a powerful yet accessible framework. Our core value propositions include:
30
+
31
+ - **Stream-based Design**: All message outputs are designed as asynchronous streams for real-time responses
32
+ - **Security**: Built-in cookie security detection to ensure session safety
33
+ - **Vendor Agnostic**: Data types and conversation management are independent of specific providers, offering high portability
34
+ - **Extensibility**: Integrated MCP client in extension mechanisms for enhanced system scalability
35
+
36
+ ## 🔑 Key Features
37
+
38
+ 1. **Every is a Stream**: All message outputs are asynchronous stream-based designs supporting real-time responses
39
+ 2. **Cookie Security Detection**: Built-in cookie security detection functionality to protect session security
40
+ 3. **Provider Independent Mechanism**: Data types and conversation management are independent of specific vendors, with high portability
41
+ 4. **MCP Client Support**: Extension mechanisms integrate MCP clients, enhancing system expansion capabilities
42
+ 5. **Event-Driven Architecture**: Comprehensive event system for flexible and reactive agent behavior
43
+ 6. **Tool Integration Framework**: Robust system for integrating external tools and services
44
+ 7. **Advanced Memory Management**: Sophisticated context handling with automatic summarization and token optimization
45
+
46
+ ## 📖 Documentation Structure
47
+
48
+ This repository contains documentation organized as follows:
49
+
50
+ - **Level 1: Project Introduction** - Overview of AmritaCore, its purpose, and key characteristics
51
+ - **Level 2: Quick Start** - Getting started guides, installation, and minimal examples
52
+ - **Level 3: Core Concepts** - Configuration systems, data types, event systems, and tool systems
53
+ - **Level 4: Implementation Guide** - Detailed functional implementations and usage patterns
54
+ - **Level 5: Extensions & Integration** - How to extend and integrate with other systems
55
+ - **Level 6: Security Mechanisms** - Security features and best practices
56
+ - **Level 7: Application Scenarios** - Use cases and practical examples
57
+ - **Level 8: Best Practices & FAQs** - Troubleshooting and optimization tips
58
+ - **Level 9: API Reference** - Complete API documentation
59
+ - **Level 10: Appendices** - Glossary, resources, and changelogs
60
+
61
+ Documentation is currently under construction. For quick start, please refer to the examples in the `demo/` folder.
62
+
63
+ ## 🛠️ Quick Start
64
+
65
+ To quickly start using AmritaCore, check out the examples in the [demo](./demo/) directory. The basic example demonstrates how to initialize the core, configure settings, and run a simple chat session with the AI assistant.
66
+
67
+ ## 🤝 Contributing
68
+
69
+ We welcome contributions! Please see our contribution guidelines for more information.
70
+
71
+ ## 📄 License
72
+
73
+ This project is licensed under the AGPL-3.0 License - see the [LICENSE](./LICENSE) file for details.
@@ -0,0 +1,26 @@
1
+ amrita_core/__init__.py,sha256=uNev8VnIbKg6CcdsaB_H7DgTLTNXLyTRoOqlAOYaBu8,2329
2
+ amrita_core/chatmanager.py,sha256=PV_IfEBiWfjIjKykaJ4XtcEHFYKCvMrwDy2eHVVy5kQ,32436
3
+ amrita_core/config.py,sha256=T8SAH5-ND496nMEQ-b-VGnmDdaGixlDVCSLZ985f860,5377
4
+ amrita_core/libchat.py,sha256=bM-WhD6BGvn_aFHAkywYGaU6vF67QAfudsHfB2Ia-M4,5614
5
+ amrita_core/logging.py,sha256=aUPGGMntZq-r5i60DAvTXfMaeD5ac93Nxus8rcIWw_8,1724
6
+ amrita_core/preset.py,sha256=q9Xf6n247lGndedlNVm2eIHtnMivs8lHJIXdZQZK4CM,5599
7
+ amrita_core/protocol.py,sha256=ehvnVwy6yxWFesAANDh4QOofgU6yDj3TXkBc_sMKhFs,3585
8
+ amrita_core/tokenizer.py,sha256=RkFJLQCT2VHhBAawFYX1rvSZ_7IEe51COwO9PKx5My8,3545
9
+ amrita_core/types.py,sha256=c7XVI1xyDGAQDJRcNcTGyTv45m6spPmXfm7QS26WPG8,8214
10
+ amrita_core/utils.py,sha256=f3aKsy_NEnQS5PR47HBmora9__WSH_-wJ4Jlz0SR6OE,2152
11
+ amrita_core/builtins/__init__.py,sha256=rY4dUdNlLmxFteAyelHVCGKms9RLNQTD6orMPcd5fJM,92
12
+ amrita_core/builtins/adapter.py,sha256=MSfi_orfALrMFVrkrwI0EBgKcqNgMN_Ii9JSuGo7AHs,5329
13
+ amrita_core/builtins/agent.py,sha256=A9KNkLWuXzyDl0eu8Wob_YNv7VhhCx03jDPGUmVkK4g,16801
14
+ amrita_core/builtins/tools.py,sha256=h6xoAYA6cflWvwZQ7jWXwc7g5tMBKVzxdL1lsf_K2p8,2304
15
+ amrita_core/hook/event.py,sha256=GoyV4M93BE8MwGcp_nJwduyib3DW1NTNe31RfWSGUQw,2243
16
+ amrita_core/hook/exception.py,sha256=Q4U8Vd8fZursEnuCwDyFjJizJGW75coj_0Gik4jERoM,226
17
+ amrita_core/hook/matcher.py,sha256=NGU6ClyIaNMWMdFduZkLTXm1st2YpgVFJIyn45Qu31M,7666
18
+ amrita_core/hook/on.py,sha256=C9Z66Nliy6Rq4xyn4CZt3V8NXWPEHPuavp8tDBXcP0w,451
19
+ amrita_core/tools/manager.py,sha256=SwFjopuTa2OqV6bEXRtRaP81d9T3nLiQI5sHvge5NO4,5229
20
+ amrita_core/tools/mcp.py,sha256=K-6SjsD-KUfRgII7HuX2AADClD79qwy2DyxXbSpjyYk,12739
21
+ amrita_core/tools/models.py,sha256=lUpdYSOh-rzOM1jIJURBYgqaZGHWOznnGGyGJExGFeI,12246
22
+ amrita_core-0.1.0.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
23
+ amrita_core-0.1.0.dist-info/METADATA,sha256=AFHnyDC6XTdxH4LJf3toCJQzfkTsP2Brs5oFXAwZuSA,4125
24
+ amrita_core-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
25
+ amrita_core-0.1.0.dist-info/top_level.txt,sha256=7mB9cYXC_VZ73UXo6DXplcfSvMGeP1sTNHza46x2FTk,12
26
+ amrita_core-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+