chibi-bot 1.6.0b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chibi/__init__.py +0 -0
- chibi/__main__.py +343 -0
- chibi/cli.py +90 -0
- chibi/config/__init__.py +6 -0
- chibi/config/app.py +123 -0
- chibi/config/gpt.py +108 -0
- chibi/config/logging.py +15 -0
- chibi/config/telegram.py +43 -0
- chibi/config_generator.py +233 -0
- chibi/constants.py +362 -0
- chibi/exceptions.py +58 -0
- chibi/models.py +496 -0
- chibi/schemas/__init__.py +0 -0
- chibi/schemas/anthropic.py +20 -0
- chibi/schemas/app.py +54 -0
- chibi/schemas/cloudflare.py +65 -0
- chibi/schemas/mistralai.py +56 -0
- chibi/schemas/suno.py +83 -0
- chibi/service.py +135 -0
- chibi/services/bot.py +276 -0
- chibi/services/lock_manager.py +20 -0
- chibi/services/mcp/manager.py +242 -0
- chibi/services/metrics.py +54 -0
- chibi/services/providers/__init__.py +16 -0
- chibi/services/providers/alibaba.py +79 -0
- chibi/services/providers/anthropic.py +40 -0
- chibi/services/providers/cloudflare.py +98 -0
- chibi/services/providers/constants/suno.py +2 -0
- chibi/services/providers/customopenai.py +11 -0
- chibi/services/providers/deepseek.py +15 -0
- chibi/services/providers/eleven_labs.py +85 -0
- chibi/services/providers/gemini_native.py +489 -0
- chibi/services/providers/grok.py +40 -0
- chibi/services/providers/minimax.py +96 -0
- chibi/services/providers/mistralai_native.py +312 -0
- chibi/services/providers/moonshotai.py +20 -0
- chibi/services/providers/openai.py +74 -0
- chibi/services/providers/provider.py +892 -0
- chibi/services/providers/suno.py +130 -0
- chibi/services/providers/tools/__init__.py +23 -0
- chibi/services/providers/tools/cmd.py +132 -0
- chibi/services/providers/tools/common.py +127 -0
- chibi/services/providers/tools/constants.py +78 -0
- chibi/services/providers/tools/exceptions.py +1 -0
- chibi/services/providers/tools/file_editor.py +875 -0
- chibi/services/providers/tools/mcp_management.py +274 -0
- chibi/services/providers/tools/mcp_simple.py +72 -0
- chibi/services/providers/tools/media.py +451 -0
- chibi/services/providers/tools/memory.py +252 -0
- chibi/services/providers/tools/schemas.py +10 -0
- chibi/services/providers/tools/send.py +435 -0
- chibi/services/providers/tools/tool.py +163 -0
- chibi/services/providers/tools/utils.py +146 -0
- chibi/services/providers/tools/web.py +261 -0
- chibi/services/providers/utils.py +182 -0
- chibi/services/task_manager.py +93 -0
- chibi/services/user.py +269 -0
- chibi/storage/abstract.py +54 -0
- chibi/storage/database.py +86 -0
- chibi/storage/dynamodb.py +257 -0
- chibi/storage/local.py +70 -0
- chibi/storage/redis.py +91 -0
- chibi/utils/__init__.py +0 -0
- chibi/utils/app.py +249 -0
- chibi/utils/telegram.py +521 -0
- chibi_bot-1.6.0b0.dist-info/LICENSE +21 -0
- chibi_bot-1.6.0b0.dist-info/METADATA +340 -0
- chibi_bot-1.6.0b0.dist-info/RECORD +70 -0
- chibi_bot-1.6.0b0.dist-info/WHEEL +4 -0
- chibi_bot-1.6.0b0.dist-info/entry_points.txt +3 -0
chibi/exceptions.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
class GptException(Exception):
|
|
2
|
+
def __init__(
|
|
3
|
+
self,
|
|
4
|
+
provider: str = "unknown",
|
|
5
|
+
model: str = "unknown",
|
|
6
|
+
detail: str = "Failed to receive response from the service",
|
|
7
|
+
) -> None:
|
|
8
|
+
self.model = model
|
|
9
|
+
self.provider = provider
|
|
10
|
+
self.detail = detail
|
|
11
|
+
|
|
12
|
+
def __str__(self) -> str:
|
|
13
|
+
class_name = self.__class__.__name__
|
|
14
|
+
return f"{class_name}(provider={self.provider!r}, model={self.model!r}, detail={self.detail!r})"
|
|
15
|
+
|
|
16
|
+
def __repr__(self) -> str:
|
|
17
|
+
return self.__str__()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class NoApiKeyProvidedError(GptException): ...
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class NotAuthorizedError(GptException): ...
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ServiceRateLimitError(GptException): ...
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ServiceResponseError(GptException): ...
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class NoResponseError(GptException): ...
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ServiceConnectionError(GptException): ...
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class NoModelSelectedError(GptException): ...
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class NoProviderSelectedError(GptException): ...
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class NoAccountIDSetError(GptException): ...
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class RecursionLimitExceeded(GptException):
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
exceeded_limit: int,
|
|
51
|
+
provider: str = "unknown",
|
|
52
|
+
model: str = "unknown",
|
|
53
|
+
detail: str = "Failed to receive response from the service",
|
|
54
|
+
) -> None:
|
|
55
|
+
self.model = model
|
|
56
|
+
self.provider = provider
|
|
57
|
+
self.detail = detail
|
|
58
|
+
self.exceeded_limit = exceeded_limit
|
chibi/models.py
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import base64
|
|
3
|
+
import binascii
|
|
4
|
+
import itertools
|
|
5
|
+
import json
|
|
6
|
+
import time
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
8
|
+
|
|
9
|
+
from anthropic.types import (
|
|
10
|
+
MessageParam,
|
|
11
|
+
TextBlockParam,
|
|
12
|
+
ToolResultBlockParam,
|
|
13
|
+
ToolUseBlockParam,
|
|
14
|
+
)
|
|
15
|
+
from google.genai.types import ContentDict, FunctionCallDict, PartDict
|
|
16
|
+
from mistralai.models import (
|
|
17
|
+
AssistantMessage as MistralAssistantMessage,
|
|
18
|
+
)
|
|
19
|
+
from mistralai.models import (
|
|
20
|
+
FunctionCall as MistralFunctionCall,
|
|
21
|
+
)
|
|
22
|
+
from mistralai.models import (
|
|
23
|
+
SystemMessage as MistralSystemMessage,
|
|
24
|
+
)
|
|
25
|
+
from mistralai.models import (
|
|
26
|
+
ToolCall as MistralToolCall,
|
|
27
|
+
)
|
|
28
|
+
from mistralai.models import (
|
|
29
|
+
ToolMessage as MistralToolMessage,
|
|
30
|
+
)
|
|
31
|
+
from mistralai.models import (
|
|
32
|
+
UserMessage as MistralUserMessage,
|
|
33
|
+
)
|
|
34
|
+
from openai.types.chat import (
|
|
35
|
+
ChatCompletionAssistantMessageParam,
|
|
36
|
+
ChatCompletionFunctionMessageParam,
|
|
37
|
+
ChatCompletionMessageParam,
|
|
38
|
+
ChatCompletionToolMessageParam,
|
|
39
|
+
ChatCompletionUserMessageParam,
|
|
40
|
+
)
|
|
41
|
+
from pydantic import BaseModel, Field, field_serializer, field_validator
|
|
42
|
+
|
|
43
|
+
from chibi.config import application_settings, gpt_settings
|
|
44
|
+
from chibi.exceptions import (
|
|
45
|
+
NoApiKeyProvidedError,
|
|
46
|
+
NoProviderSelectedError,
|
|
47
|
+
)
|
|
48
|
+
from chibi.schemas.app import ModelChangeSchema
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from chibi.services.providers import RegisteredProviders
|
|
52
|
+
from chibi.services.providers.provider import Provider
|
|
53
|
+
|
|
54
|
+
CHAT_COMPLETION_CLASSES = {
|
|
55
|
+
"assistant": ChatCompletionAssistantMessageParam,
|
|
56
|
+
"function": ChatCompletionFunctionMessageParam,
|
|
57
|
+
"tool": ChatCompletionToolMessageParam,
|
|
58
|
+
"user": ChatCompletionUserMessageParam,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class FunctionSchema(BaseModel):
|
|
63
|
+
name: str
|
|
64
|
+
arguments: str | None = None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ToolSchema(BaseModel):
|
|
68
|
+
id: str
|
|
69
|
+
type: str = "function"
|
|
70
|
+
function: FunctionSchema
|
|
71
|
+
thought_signature: bytes | None = None
|
|
72
|
+
|
|
73
|
+
@field_validator("thought_signature", mode="before")
|
|
74
|
+
@classmethod
|
|
75
|
+
def decode_signature_from_base64(cls, v: bytes | str | None) -> bytes | None:
|
|
76
|
+
if v is None:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
if isinstance(v, bytes):
|
|
80
|
+
return v
|
|
81
|
+
|
|
82
|
+
if isinstance(v, str):
|
|
83
|
+
try:
|
|
84
|
+
return base64.b64decode(v)
|
|
85
|
+
except binascii.Error:
|
|
86
|
+
raise ValueError("Invalid base64 string for thought_signature")
|
|
87
|
+
|
|
88
|
+
raise TypeError("thought_signature must be bytes or a base64 encoded string")
|
|
89
|
+
|
|
90
|
+
@field_serializer("thought_signature")
|
|
91
|
+
def serialize_signature_to_base64(self, value: bytes | None) -> str | None:
|
|
92
|
+
if not value:
|
|
93
|
+
return None
|
|
94
|
+
return base64.b64encode(value).decode("ascii")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class Message(BaseModel):
|
|
98
|
+
id: int = Field(default_factory=time.time_ns)
|
|
99
|
+
role: str
|
|
100
|
+
content: str
|
|
101
|
+
expire_at: float | None = None
|
|
102
|
+
tool_calls: list[ToolSchema] | None = None
|
|
103
|
+
tool_call_id: str | None = None
|
|
104
|
+
source: str | None = None
|
|
105
|
+
|
|
106
|
+
def to_openai(self) -> ChatCompletionMessageParam:
|
|
107
|
+
wrapper_class = CHAT_COMPLETION_CLASSES.get(self.role)
|
|
108
|
+
if not wrapper_class:
|
|
109
|
+
raise ValueError(f"Role {self.role} seems not supported yet")
|
|
110
|
+
|
|
111
|
+
open_ai_message = wrapper_class(**self.model_dump(exclude={"expire_at"}))
|
|
112
|
+
return open_ai_message
|
|
113
|
+
|
|
114
|
+
@classmethod
|
|
115
|
+
def from_openai(cls, open_ai_message: ChatCompletionMessageParam) -> "Message":
|
|
116
|
+
# if not open_ai_message.get("tool_calls"):
|
|
117
|
+
# open_ai_message["tool_calls"] = []
|
|
118
|
+
msg = cls(**open_ai_message)
|
|
119
|
+
msg.source = "openai"
|
|
120
|
+
return msg
|
|
121
|
+
|
|
122
|
+
def to_anthropic(self) -> MessageParam:
|
|
123
|
+
if self.role == "tool" and self.tool_call_id:
|
|
124
|
+
return MessageParam(
|
|
125
|
+
role="user",
|
|
126
|
+
content=[
|
|
127
|
+
ToolResultBlockParam(
|
|
128
|
+
tool_use_id=self.tool_call_id,
|
|
129
|
+
type="tool_result",
|
|
130
|
+
content=self.content,
|
|
131
|
+
)
|
|
132
|
+
],
|
|
133
|
+
)
|
|
134
|
+
if self.role == "user":
|
|
135
|
+
return MessageParam(
|
|
136
|
+
role="user",
|
|
137
|
+
content=[
|
|
138
|
+
TextBlockParam(
|
|
139
|
+
type="text",
|
|
140
|
+
text=self.content,
|
|
141
|
+
)
|
|
142
|
+
],
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
assistant_content: list[TextBlockParam | ToolUseBlockParam] = [
|
|
146
|
+
TextBlockParam(
|
|
147
|
+
type="text",
|
|
148
|
+
text=self.content or "No content",
|
|
149
|
+
)
|
|
150
|
+
]
|
|
151
|
+
if self.tool_calls:
|
|
152
|
+
for tool in self.tool_calls:
|
|
153
|
+
assistant_content.append(
|
|
154
|
+
ToolUseBlockParam(
|
|
155
|
+
type="tool_use",
|
|
156
|
+
id=tool.id,
|
|
157
|
+
name=tool.function.name,
|
|
158
|
+
input=json.loads(tool.function.arguments) if tool.function.arguments else {},
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
return MessageParam(
|
|
162
|
+
role="assistant",
|
|
163
|
+
content=assistant_content,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
@classmethod
|
|
167
|
+
def from_anthropic(cls, anthropic_message: MessageParam) -> "Message":
|
|
168
|
+
message_content = anthropic_message["content"]
|
|
169
|
+
role: Literal["user", "assistant", "tool"] = anthropic_message["role"]
|
|
170
|
+
tool_call_id: str | None = None
|
|
171
|
+
content: str = ""
|
|
172
|
+
tools: list[ToolSchema] = []
|
|
173
|
+
|
|
174
|
+
if isinstance(message_content, str):
|
|
175
|
+
return cls(role=role, content=message_content)
|
|
176
|
+
|
|
177
|
+
for content_block in message_content:
|
|
178
|
+
if isinstance(content_block, dict):
|
|
179
|
+
# TextBlockParam
|
|
180
|
+
if content_block["type"] == "text":
|
|
181
|
+
content = content_block.get("text", "No content")
|
|
182
|
+
|
|
183
|
+
# ToolResultBlockParam
|
|
184
|
+
if content_block["type"] == "tool_result":
|
|
185
|
+
role = "tool"
|
|
186
|
+
tool_call_id = content_block.get("tool_use_id")
|
|
187
|
+
content_data = content_block["content"]
|
|
188
|
+
# It is not very clear under what circumstances the content
|
|
189
|
+
# here might assume the value like `Iterable[Content]`.
|
|
190
|
+
# In anthropic code (at the moment) there is nothing like
|
|
191
|
+
# that. It is likely an atavism from the orig openai module
|
|
192
|
+
content = content_data if isinstance(content_data, str) else "No content"
|
|
193
|
+
|
|
194
|
+
# ToolUseBlockParam
|
|
195
|
+
if content_block["type"] == "tool_use":
|
|
196
|
+
function = FunctionSchema(
|
|
197
|
+
name=content_block["name"],
|
|
198
|
+
arguments=json.dumps(content_block["input"]),
|
|
199
|
+
)
|
|
200
|
+
tool = ToolSchema(id=content_block["id"], function=function)
|
|
201
|
+
tools.append(tool)
|
|
202
|
+
|
|
203
|
+
return cls(role=role, content=content, tool_calls=tools or None, tool_call_id=tool_call_id, source="anthropic")
|
|
204
|
+
|
|
205
|
+
def to_google(self) -> ContentDict:
|
|
206
|
+
"""Convert a Chibi Message to a Google AI ContentDict."""
|
|
207
|
+
|
|
208
|
+
# Google uses 'model' for the assistant's role
|
|
209
|
+
google_role = "model" if self.role == "assistant" else "user"
|
|
210
|
+
|
|
211
|
+
# Handle tool calls from the assistant
|
|
212
|
+
if self.role == "assistant" and self.tool_calls:
|
|
213
|
+
parts: list[PartDict] = []
|
|
214
|
+
# Add text content if present
|
|
215
|
+
if self.content:
|
|
216
|
+
parts.append({"text": self.content})
|
|
217
|
+
# Add function calls
|
|
218
|
+
for tool_call in self.tool_calls:
|
|
219
|
+
args = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
|
|
220
|
+
parts.append(
|
|
221
|
+
PartDict(
|
|
222
|
+
function_call=FunctionCallDict(
|
|
223
|
+
name=tool_call.function.name,
|
|
224
|
+
args=args,
|
|
225
|
+
),
|
|
226
|
+
thought_signature=tool_call.thought_signature,
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return ContentDict(role=google_role, parts=parts)
|
|
231
|
+
|
|
232
|
+
# Handle tool responses - Google uses 'user' role for tool responses
|
|
233
|
+
if self.role == "tool" and self.tool_call_id:
|
|
234
|
+
return {
|
|
235
|
+
"role": "user",
|
|
236
|
+
"parts": [
|
|
237
|
+
{
|
|
238
|
+
"function_response": {
|
|
239
|
+
"name": self.tool_call_id, # This needs to be resolved to function name by provider
|
|
240
|
+
"response": {"content": self.content},
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
],
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
# Handle simple text content for user or assistant
|
|
247
|
+
if self.content:
|
|
248
|
+
return {"role": google_role, "parts": [{"text": self.content}]}
|
|
249
|
+
|
|
250
|
+
# Return empty content if no other case matches
|
|
251
|
+
return {"role": google_role, "parts": []}
|
|
252
|
+
|
|
253
|
+
@classmethod
|
|
254
|
+
def from_google(cls, google_content: ContentDict | dict[str, Any]) -> "Message":
|
|
255
|
+
"""Convert a Google AI ContentDict to a Chibi Message."""
|
|
256
|
+
|
|
257
|
+
# Map Google role back to Chibi role
|
|
258
|
+
chibi_role = "assistant" if google_content.get("role") == "model" else google_content.get("role", "user")
|
|
259
|
+
content = ""
|
|
260
|
+
tools: list[ToolSchema] = []
|
|
261
|
+
tool_call_id: str | None = None
|
|
262
|
+
|
|
263
|
+
parts = google_content.get("parts") or []
|
|
264
|
+
|
|
265
|
+
for part in parts:
|
|
266
|
+
if isinstance(part, dict):
|
|
267
|
+
# Text content
|
|
268
|
+
if part.get("text"):
|
|
269
|
+
content = str(part["text"])
|
|
270
|
+
|
|
271
|
+
# Function call from assistant
|
|
272
|
+
elif part.get("function_call"):
|
|
273
|
+
function_call = part["function_call"]
|
|
274
|
+
if function_call and isinstance(function_call, dict):
|
|
275
|
+
function = FunctionSchema(
|
|
276
|
+
name=function_call.get("name", ""),
|
|
277
|
+
arguments=json.dumps(function_call.get("args", {})),
|
|
278
|
+
)
|
|
279
|
+
# Generate a unique ID for the tool call
|
|
280
|
+
tool_id = f"call_{time.time_ns()}"
|
|
281
|
+
tool = ToolSchema(
|
|
282
|
+
id=tool_id, function=function, thought_signature=part.get("thought_signature")
|
|
283
|
+
)
|
|
284
|
+
tools.append(tool)
|
|
285
|
+
|
|
286
|
+
# Function response (tool result)
|
|
287
|
+
elif part.get("function_response"):
|
|
288
|
+
chibi_role = "tool"
|
|
289
|
+
function_response = part["function_response"]
|
|
290
|
+
if function_response and isinstance(function_response, dict):
|
|
291
|
+
tool_call_id = function_response.get("name")
|
|
292
|
+
response_content = function_response.get("response", {})
|
|
293
|
+
if isinstance(response_content, dict):
|
|
294
|
+
content = response_content.get("content", "")
|
|
295
|
+
|
|
296
|
+
return cls(
|
|
297
|
+
role=chibi_role, content=content, tool_calls=tools or None, tool_call_id=tool_call_id, source="google"
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
def to_mistral(self) -> MistralSystemMessage | MistralUserMessage | MistralAssistantMessage | MistralToolMessage:
|
|
301
|
+
"""Convert to MistralAI SDK format."""
|
|
302
|
+
if self.role == "system":
|
|
303
|
+
return MistralSystemMessage(content=self.content, role="system")
|
|
304
|
+
|
|
305
|
+
elif self.role == "user":
|
|
306
|
+
return MistralUserMessage(content=self.content, role="user")
|
|
307
|
+
|
|
308
|
+
elif self.role == "assistant":
|
|
309
|
+
mistral_tool_calls: list[MistralToolCall] | None = None
|
|
310
|
+
if self.tool_calls:
|
|
311
|
+
mistral_tool_calls = [
|
|
312
|
+
MistralToolCall(
|
|
313
|
+
id=tool.id,
|
|
314
|
+
function=MistralFunctionCall(
|
|
315
|
+
name=tool.function.name,
|
|
316
|
+
arguments=tool.function.arguments or "{}",
|
|
317
|
+
),
|
|
318
|
+
)
|
|
319
|
+
for tool in self.tool_calls
|
|
320
|
+
]
|
|
321
|
+
return MistralAssistantMessage(
|
|
322
|
+
content=self.content,
|
|
323
|
+
tool_calls=mistral_tool_calls,
|
|
324
|
+
role="assistant",
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
elif self.role == "tool":
|
|
328
|
+
return MistralToolMessage(
|
|
329
|
+
content=self.content,
|
|
330
|
+
tool_call_id=self.tool_call_id or "",
|
|
331
|
+
name=None,
|
|
332
|
+
role="tool",
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
# Fallback to user message
|
|
336
|
+
return MistralUserMessage(content=self.content, role="user")
|
|
337
|
+
|
|
338
|
+
@classmethod
|
|
339
|
+
def from_mistral(
|
|
340
|
+
cls,
|
|
341
|
+
mistral_message: MistralUserMessage | MistralAssistantMessage | MistralToolMessage,
|
|
342
|
+
) -> "Message":
|
|
343
|
+
"""Convert from MistralAI SDK format to Chibi Message."""
|
|
344
|
+
role: Literal["user", "assistant", "tool"] = mistral_message.role # type: ignore
|
|
345
|
+
|
|
346
|
+
# Extract content - handle different content types
|
|
347
|
+
raw_content = mistral_message.content
|
|
348
|
+
if isinstance(raw_content, str):
|
|
349
|
+
content = raw_content
|
|
350
|
+
elif raw_content is None:
|
|
351
|
+
content = ""
|
|
352
|
+
else:
|
|
353
|
+
# Content is a list/complex type - convert to string
|
|
354
|
+
content = str(raw_content)
|
|
355
|
+
|
|
356
|
+
tool_calls: list[ToolSchema] | None = None
|
|
357
|
+
tool_call_id: str | None = None
|
|
358
|
+
|
|
359
|
+
if isinstance(mistral_message, MistralAssistantMessage) and mistral_message.tool_calls:
|
|
360
|
+
tool_calls = [
|
|
361
|
+
ToolSchema(
|
|
362
|
+
id=tool.id or "",
|
|
363
|
+
function=FunctionSchema(
|
|
364
|
+
name=tool.function.name,
|
|
365
|
+
arguments=tool.function.arguments,
|
|
366
|
+
),
|
|
367
|
+
)
|
|
368
|
+
for tool in mistral_message.tool_calls
|
|
369
|
+
]
|
|
370
|
+
|
|
371
|
+
if isinstance(mistral_message, MistralToolMessage):
|
|
372
|
+
# Handle Unset/None/str types
|
|
373
|
+
raw_tool_call_id = mistral_message.tool_call_id
|
|
374
|
+
if raw_tool_call_id and not isinstance(raw_tool_call_id, type(None)):
|
|
375
|
+
tool_call_id = str(raw_tool_call_id)
|
|
376
|
+
|
|
377
|
+
return cls(
|
|
378
|
+
role=role,
|
|
379
|
+
content=content,
|
|
380
|
+
tool_calls=tool_calls,
|
|
381
|
+
tool_call_id=tool_call_id,
|
|
382
|
+
source="mistral",
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
class ImageMeta(BaseModel):
|
|
387
|
+
id: int = Field(default_factory=time.time_ns)
|
|
388
|
+
expire_at: float
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
class User(BaseModel):
|
|
392
|
+
id: int
|
|
393
|
+
alibaba_token: str | None = gpt_settings.alibaba_key
|
|
394
|
+
anthropic_token: str | None = gpt_settings.anthropic_key
|
|
395
|
+
deepseek_token: str | None = gpt_settings.deepseek_key
|
|
396
|
+
gemini_token: str | None = gpt_settings.gemini_key
|
|
397
|
+
mistralai_token: str | None = gpt_settings.mistralai_key
|
|
398
|
+
openai_token: str | None = gpt_settings.openai_key
|
|
399
|
+
tokens: dict[str, str] = {}
|
|
400
|
+
messages: list[Message] = Field(default_factory=list)
|
|
401
|
+
images: list[ImageMeta] = Field(default_factory=list)
|
|
402
|
+
gpt_model: str | None = None # Deprecated
|
|
403
|
+
selected_gpt_model_name: str | None = None
|
|
404
|
+
selected_gpt_provider_name: str | None = None
|
|
405
|
+
selected_image_model_name: str | None = None
|
|
406
|
+
selected_image_provider_name: str | None = None
|
|
407
|
+
info: str = "No info provided"
|
|
408
|
+
working_dir: str = application_settings.working_dir
|
|
409
|
+
llm_skills: dict[str, str] = {}
|
|
410
|
+
|
|
411
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
412
|
+
if kwargs.get("gpt_model", None) and not kwargs.get("selected_gpt_model_name", None):
|
|
413
|
+
kwargs["selected_gpt_model_name"] = kwargs["gpt_model"]
|
|
414
|
+
super().__init__(**kwargs)
|
|
415
|
+
|
|
416
|
+
@property
|
|
417
|
+
def providers(self) -> "RegisteredProviders":
|
|
418
|
+
from chibi.services.providers import RegisteredProviders
|
|
419
|
+
|
|
420
|
+
return RegisteredProviders(user_api_keys=self.tokens)
|
|
421
|
+
|
|
422
|
+
@property
|
|
423
|
+
def active_image_provider(self) -> "Provider":
|
|
424
|
+
if not self.selected_image_provider_name:
|
|
425
|
+
image_provider = self.providers.first_image_generation_ready
|
|
426
|
+
else:
|
|
427
|
+
image_provider = self.providers.get(provider_name=self.selected_image_provider_name)
|
|
428
|
+
|
|
429
|
+
if not image_provider:
|
|
430
|
+
raise NoApiKeyProvidedError(provider="Unset", detail="No API key provided")
|
|
431
|
+
|
|
432
|
+
if not self.selected_image_provider_name:
|
|
433
|
+
self.selected_image_provider_name = image_provider.name
|
|
434
|
+
self.selected_image_model_name = image_provider.default_image_model
|
|
435
|
+
return image_provider
|
|
436
|
+
|
|
437
|
+
@property
|
|
438
|
+
def stt_provider(self) -> "Provider":
|
|
439
|
+
if gpt_settings.stt_provider:
|
|
440
|
+
if provider := self.providers.get(gpt_settings.stt_provider):
|
|
441
|
+
return provider
|
|
442
|
+
if provider := self.providers.first_stt_ready:
|
|
443
|
+
return provider
|
|
444
|
+
raise ValueError("No stt-provider found.")
|
|
445
|
+
|
|
446
|
+
@property
|
|
447
|
+
def tts_provider(self) -> "Provider":
|
|
448
|
+
if gpt_settings.tts_provider:
|
|
449
|
+
if provider := self.providers.get(gpt_settings.tts_provider):
|
|
450
|
+
return provider
|
|
451
|
+
if provider := self.providers.first_tts_ready:
|
|
452
|
+
return provider
|
|
453
|
+
raise ValueError("No tts-provider found.")
|
|
454
|
+
|
|
455
|
+
@property
|
|
456
|
+
def moderation_provider(self) -> "Provider":
|
|
457
|
+
if gpt_settings.moderation_provider:
|
|
458
|
+
if provider := self.providers.get(gpt_settings.moderation_provider):
|
|
459
|
+
return provider
|
|
460
|
+
if provider := self.providers.first_moderation_ready:
|
|
461
|
+
return provider
|
|
462
|
+
raise ValueError("No moderation-provider found.")
|
|
463
|
+
|
|
464
|
+
@property
|
|
465
|
+
def active_gpt_provider(self) -> "Provider":
|
|
466
|
+
if self.selected_gpt_provider_name:
|
|
467
|
+
if provider := self.providers.get(provider_name=self.selected_gpt_provider_name):
|
|
468
|
+
return provider
|
|
469
|
+
|
|
470
|
+
if gpt_settings.default_provider:
|
|
471
|
+
if provider := self.providers.get(provider_name=gpt_settings.default_provider):
|
|
472
|
+
self.selected_gpt_provider_name = provider.name
|
|
473
|
+
self.selected_gpt_model_name = gpt_settings.default_model or provider.default_model
|
|
474
|
+
return provider
|
|
475
|
+
|
|
476
|
+
if provider := self.providers.first_chat_ready:
|
|
477
|
+
self.selected_gpt_provider_name = provider.name
|
|
478
|
+
self.selected_gpt_model_name = provider.default_model
|
|
479
|
+
return provider
|
|
480
|
+
|
|
481
|
+
raise NoProviderSelectedError
|
|
482
|
+
|
|
483
|
+
async def get_available_models(self, image_generation: bool = False) -> list[ModelChangeSchema]:
|
|
484
|
+
providers = self.providers.available_instances
|
|
485
|
+
tasks = [provider.get_available_models(image_generation=image_generation) for provider in providers]
|
|
486
|
+
results = await asyncio.gather(*tasks)
|
|
487
|
+
|
|
488
|
+
return list(itertools.chain.from_iterable(results))
|
|
489
|
+
|
|
490
|
+
@property
|
|
491
|
+
def has_reached_image_limits(self) -> bool:
|
|
492
|
+
if not gpt_settings.image_generations_monthly_limit:
|
|
493
|
+
return False
|
|
494
|
+
if str(self.id) in gpt_settings.image_generations_whitelist:
|
|
495
|
+
return False
|
|
496
|
+
return len(self.images) >= gpt_settings.image_generations_monthly_limit
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ContentItemSchema(BaseModel):
|
|
5
|
+
type: str
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class TextItemSchema(ContentItemSchema):
|
|
9
|
+
text: str | None = None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ToolCallItemSchema(ContentItemSchema):
|
|
13
|
+
id: str | None = None
|
|
14
|
+
name: str | None = None
|
|
15
|
+
input: dict[str, str] | None = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AnthropicUsageSchema(BaseModel):
|
|
19
|
+
input_tokens: int
|
|
20
|
+
output_tokens: int
|
chibi/schemas/app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from openai.types import CompletionUsage
|
|
4
|
+
from openai.types.completion_usage import CompletionTokensDetails, PromptTokensDetails
|
|
5
|
+
from pydantic import BaseModel, model_validator
|
|
6
|
+
|
|
7
|
+
from chibi.config import telegram_settings
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class UsageSchema(BaseModel):
|
|
14
|
+
completion_tokens: int = 0
|
|
15
|
+
prompt_tokens: int = 0
|
|
16
|
+
total_tokens: int = 0
|
|
17
|
+
cache_creation_input_tokens: int = 0
|
|
18
|
+
cache_read_input_tokens: int = 0
|
|
19
|
+
completion_tokens_details: CompletionTokensDetails | None = None
|
|
20
|
+
prompt_tokens_details: PromptTokensDetails | None = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MetricTagsSchema(UsageSchema):
|
|
24
|
+
user_id: int
|
|
25
|
+
user_name: str | None = None
|
|
26
|
+
provider: str
|
|
27
|
+
model: str
|
|
28
|
+
bot: str = telegram_settings.bot_name
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ChatResponseSchema(BaseModel):
|
|
32
|
+
answer: str
|
|
33
|
+
provider: str
|
|
34
|
+
model: str
|
|
35
|
+
usage: UsageSchema | CompletionUsage | None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ModelChangeSchema(BaseModel):
|
|
39
|
+
provider: str
|
|
40
|
+
name: str
|
|
41
|
+
display_name: str = ""
|
|
42
|
+
image_generation: bool
|
|
43
|
+
|
|
44
|
+
@model_validator(mode="after")
|
|
45
|
+
def set_display_name_if_none(self) -> "ModelChangeSchema":
|
|
46
|
+
if not self.display_name:
|
|
47
|
+
self.display_name = self.name
|
|
48
|
+
return self
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class ModeratorsAnswer(BaseModel):
|
|
52
|
+
status: str | None = None
|
|
53
|
+
verdict: str
|
|
54
|
+
reason: str | None = None
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class UsageSchema(BaseModel):
|
|
5
|
+
prompt_tokens: int
|
|
6
|
+
completion_tokens: int
|
|
7
|
+
total_tokens: int
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AnswerItemSchema(BaseModel):
|
|
11
|
+
response: str
|
|
12
|
+
usage: UsageSchema
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ErroItemSchema(BaseModel):
|
|
16
|
+
code: int
|
|
17
|
+
message: str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ChatCompletionResponseSchema(BaseModel):
|
|
21
|
+
success: bool
|
|
22
|
+
errors: list[ErroItemSchema]
|
|
23
|
+
result: AnswerItemSchema
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class PriceSchema(BaseModel):
|
|
27
|
+
unit: str
|
|
28
|
+
price: float
|
|
29
|
+
currency: str
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class PropertySchema(BaseModel):
|
|
33
|
+
property_id: str
|
|
34
|
+
value: str | list[PriceSchema] | None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class TaskSchema(BaseModel):
|
|
38
|
+
id: str
|
|
39
|
+
name: str
|
|
40
|
+
description: str
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class ModelDescriptionSchema(BaseModel):
|
|
44
|
+
id: str
|
|
45
|
+
source: int
|
|
46
|
+
name: str
|
|
47
|
+
description: str
|
|
48
|
+
task: TaskSchema
|
|
49
|
+
created_at: str
|
|
50
|
+
tags: list[str]
|
|
51
|
+
properties: list[PropertySchema]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class ResultInfoSchema(BaseModel):
|
|
55
|
+
count: int
|
|
56
|
+
page: int
|
|
57
|
+
per_page: int
|
|
58
|
+
total_count: int
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ModelsSearchResponseSchema(BaseModel):
|
|
62
|
+
success: bool
|
|
63
|
+
result: list[ModelDescriptionSchema]
|
|
64
|
+
errors: list[ErroItemSchema]
|
|
65
|
+
result_info: ResultInfoSchema
|