py-aidol 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aidol/api/__init__.py +3 -0
- aidol/api/chatroom.py +325 -0
- aidol/api/companion.py +37 -50
- aidol/context/__init__.py +26 -0
- aidol/context/builder.py +376 -0
- aidol/factories.py +8 -0
- aidol/models/__init__.py +2 -1
- aidol/models/chatroom.py +48 -0
- aidol/protocols.py +64 -0
- aidol/providers/__init__.py +9 -0
- aidol/providers/llm/__init__.py +15 -0
- aidol/providers/llm/base.py +147 -0
- aidol/providers/llm/openai.py +101 -0
- aidol/repositories/__init__.py +2 -0
- aidol/repositories/chatroom.py +142 -0
- aidol/schemas/__init__.py +35 -0
- aidol/schemas/chatroom.py +147 -0
- aidol/schemas/model_settings.py +35 -0
- aidol/schemas/persona.py +20 -0
- aidol/services/__init__.py +2 -0
- aidol/services/response_generation_service.py +63 -0
- aidol/settings.py +9 -0
- {py_aidol-0.4.0.dist-info → py_aidol-0.5.1.dist-info}/METADATA +4 -1
- py_aidol-0.5.1.dist-info/RECORD +41 -0
- py_aidol-0.4.0.dist-info/RECORD +0 -28
- {py_aidol-0.4.0.dist-info → py_aidol-0.5.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"""LLM Provider Protocol for AIdol.
|
|
2
|
+
|
|
3
|
+
This module defines the LLMProvider Protocol for platform-agnostic LLM integration.
|
|
4
|
+
Implementations can use any LLM SDK (LangChain, LiteLLM, direct API calls, etc.).
|
|
5
|
+
Uses LangChain message types for type safety in the interface.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# pylint: disable=unnecessary-ellipsis,redundant-returns-doc
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from collections.abc import Sequence
|
|
13
|
+
from typing import Protocol
|
|
14
|
+
|
|
15
|
+
import litellm
|
|
16
|
+
from langchain_core.messages import BaseMessage
|
|
17
|
+
|
|
18
|
+
from aidol.schemas import ModelSettings
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def lookup_context_window(model_name: str) -> int:
|
|
22
|
+
"""Look up the total context window for a given model using LiteLLM's model_cost.
|
|
23
|
+
|
|
24
|
+
In LiteLLM's model_cost structure:
|
|
25
|
+
- 'max_input_tokens': Total context window size (input + output combined)
|
|
26
|
+
- 'max_tokens'/'max_output_tokens': Maximum output tokens only
|
|
27
|
+
|
|
28
|
+
Returns the value of 'max_input_tokens' which represents the total context window.
|
|
29
|
+
Raises ValueError if not found.
|
|
30
|
+
"""
|
|
31
|
+
if model_name in litellm.model_cost:
|
|
32
|
+
model_info = litellm.model_cost[model_name]
|
|
33
|
+
if "max_input_tokens" in model_info:
|
|
34
|
+
return model_info["max_input_tokens"]
|
|
35
|
+
raise ValueError(
|
|
36
|
+
f"Context window for model '{model_name}' not found in LiteLLM model_cost."
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ProviderConstraints(Protocol):
|
|
41
|
+
"""Protocol for provider constraints used in context building.
|
|
42
|
+
|
|
43
|
+
Defines only the constraint properties needed by MessageContextBuilder.
|
|
44
|
+
This minimal interface allows integrators to pass their existing provider
|
|
45
|
+
implementations without implementing the full LLMProvider interface.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
@property
|
|
49
|
+
def require_first_user_message(self) -> bool:
|
|
50
|
+
"""Whether the provider requires first message to be from user."""
|
|
51
|
+
...
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def combine_system_messages(self) -> bool:
|
|
55
|
+
"""Whether multiple system messages should be combined into one."""
|
|
56
|
+
...
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def enforce_alternating_turns(self) -> bool:
|
|
60
|
+
"""Whether messages must alternate between user and assistant."""
|
|
61
|
+
...
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class LLMProvider(ProviderConstraints, Protocol):
|
|
65
|
+
"""Protocol for LLM providers.
|
|
66
|
+
|
|
67
|
+
Extends ProviderConstraints with full LLM functionality.
|
|
68
|
+
Defines the interface for LLM integration in AIdol.
|
|
69
|
+
Implementations are free to use any underlying SDK.
|
|
70
|
+
|
|
71
|
+
Example implementations:
|
|
72
|
+
- LangChain wrapper (for LangChain-based integrators)
|
|
73
|
+
- LiteLLM direct calls (for standalone aidol)
|
|
74
|
+
- Direct API calls (for custom integrations)
|
|
75
|
+
|
|
76
|
+
Uses LangChain message types for type safety in the interface.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def require_first_user_message(self) -> bool:
|
|
81
|
+
"""Whether the provider requires first message to be from user.
|
|
82
|
+
|
|
83
|
+
Anthropic: True (Claude requires user message first)
|
|
84
|
+
OpenAI: False (GPT accepts system-only conversations)
|
|
85
|
+
"""
|
|
86
|
+
...
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def combine_system_messages(self) -> bool:
|
|
90
|
+
"""Whether multiple system messages should be combined into one.
|
|
91
|
+
|
|
92
|
+
Anthropic: True (Claude prefers single system message)
|
|
93
|
+
OpenAI: False (GPT handles multiple system messages)
|
|
94
|
+
"""
|
|
95
|
+
...
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def enforce_alternating_turns(self) -> bool:
|
|
99
|
+
"""Whether messages must alternate between user and assistant.
|
|
100
|
+
|
|
101
|
+
Anthropic: True (Claude requires strict alternation)
|
|
102
|
+
OpenAI: False (GPT allows consecutive same-role messages)
|
|
103
|
+
"""
|
|
104
|
+
...
|
|
105
|
+
|
|
106
|
+
def completion(
|
|
107
|
+
self,
|
|
108
|
+
model_settings: ModelSettings,
|
|
109
|
+
messages: Sequence[BaseMessage],
|
|
110
|
+
response_format: dict[str, str] | None = None,
|
|
111
|
+
) -> str:
|
|
112
|
+
"""Generate completion from messages.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
model_settings: Model configuration (model name, temperature, etc.)
|
|
116
|
+
messages: LangChain messages (BaseMessage).
|
|
117
|
+
response_format: Optional response format specification.
|
|
118
|
+
Example: {"type": "json_object"}
|
|
119
|
+
Note: Not all providers support this (e.g., Anthropic ignores it).
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Generated text response.
|
|
123
|
+
|
|
124
|
+
Raises:
|
|
125
|
+
Implementation-specific exceptions propagate to caller.
|
|
126
|
+
"""
|
|
127
|
+
...
|
|
128
|
+
|
|
129
|
+
def get_context_size(self, model_name: str) -> int:
|
|
130
|
+
"""Get the maximum total context window size for the given model.
|
|
131
|
+
|
|
132
|
+
The context window represents the total number of tokens that can be used
|
|
133
|
+
for both input messages and model output combined in a single API call.
|
|
134
|
+
This is NOT the maximum output tokens alone.
|
|
135
|
+
|
|
136
|
+
This value is used by the application to:
|
|
137
|
+
1. Check if input messages exceed the available token limit
|
|
138
|
+
2. Reserve tokens for model output (completion)
|
|
139
|
+
3. Truncate old messages when the context becomes too large
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
model_name: Model identifier (e.g., 'gpt-4o', 'claude-3-opus-20240229')
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Maximum total context window size in tokens (input + output combined).
|
|
146
|
+
"""
|
|
147
|
+
...
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""OpenAI LLM Provider for AIdol standalone.
|
|
2
|
+
|
|
3
|
+
Default provider implementation using LangChain ChatOpenAI.
|
|
4
|
+
Uses aioia-core OpenAIAPISettings for configuration.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections.abc import Sequence
|
|
10
|
+
|
|
11
|
+
from aioia_core.settings import OpenAIAPISettings
|
|
12
|
+
from langchain_core.messages import BaseMessage
|
|
13
|
+
from langchain_openai import ChatOpenAI
|
|
14
|
+
|
|
15
|
+
from aidol.providers.llm.base import lookup_context_window
|
|
16
|
+
from aidol.schemas import ModelSettings
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OpenAILLMProvider:
|
|
20
|
+
"""OpenAI LLM Provider using LangChain.
|
|
21
|
+
|
|
22
|
+
Default implementation for AIdol standalone apps.
|
|
23
|
+
Uses aioia-core OpenAIAPISettings for explicit API key injection.
|
|
24
|
+
|
|
25
|
+
Constraint properties:
|
|
26
|
+
- require_first_user_message: False (OpenAI accepts system-only)
|
|
27
|
+
- combine_system_messages: False (OpenAI handles multiple system messages)
|
|
28
|
+
- enforce_alternating_turns: False (OpenAI allows consecutive same-role)
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, settings: OpenAIAPISettings | None = None) -> None:
|
|
32
|
+
"""Initialize with explicit settings injection.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
settings: OpenAI API settings. If None, loads from environment variables.
|
|
36
|
+
"""
|
|
37
|
+
self._settings = settings or OpenAIAPISettings()
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def require_first_user_message(self) -> bool:
|
|
41
|
+
"""OpenAI accepts system-only conversations."""
|
|
42
|
+
return False
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def combine_system_messages(self) -> bool:
|
|
46
|
+
"""OpenAI handles multiple system messages natively."""
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def enforce_alternating_turns(self) -> bool:
|
|
51
|
+
"""OpenAI allows consecutive messages from same role."""
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
def completion(
|
|
55
|
+
self,
|
|
56
|
+
model_settings: ModelSettings,
|
|
57
|
+
messages: Sequence[BaseMessage],
|
|
58
|
+
response_format: dict[str, str] | None = None,
|
|
59
|
+
) -> str:
|
|
60
|
+
"""Generate completion using ChatOpenAI.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
model_settings: Model configuration (chat_model, temperature, etc.)
|
|
64
|
+
messages: LangChain messages (BaseMessage).
|
|
65
|
+
response_format: Optional response format (e.g., {"type": "json_object"}).
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Generated text response.
|
|
69
|
+
"""
|
|
70
|
+
# Build model_kwargs with optional response_format
|
|
71
|
+
model_kwargs: dict[str, dict[str, str]] = {}
|
|
72
|
+
if response_format:
|
|
73
|
+
model_kwargs["response_format"] = response_format
|
|
74
|
+
|
|
75
|
+
# Initialize ChatOpenAI with model settings and explicit API key injection
|
|
76
|
+
chat_model = ChatOpenAI(
|
|
77
|
+
model=model_settings.chat_model,
|
|
78
|
+
temperature=model_settings.temperature,
|
|
79
|
+
seed=model_settings.seed,
|
|
80
|
+
frequency_penalty=model_settings.frequency_penalty,
|
|
81
|
+
model_kwargs=model_kwargs,
|
|
82
|
+
openai_api_key=self._settings.api_key, # type: ignore[arg-type]
|
|
83
|
+
openai_organization=self._settings.organization, # type: ignore[arg-type]
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Generate response
|
|
87
|
+
response = chat_model.generate([list(messages)])
|
|
88
|
+
return response.generations[0][0].text
|
|
89
|
+
|
|
90
|
+
def get_context_size(self, model_name: str) -> int:
|
|
91
|
+
"""Get maximum context window size for OpenAI model.
|
|
92
|
+
|
|
93
|
+
Uses LiteLLM's model_cost for dynamic lookup.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
model_name: Model identifier (e.g., 'gpt-4o', 'gpt-4o-mini')
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Maximum context window size in tokens.
|
|
100
|
+
"""
|
|
101
|
+
return lookup_context_window(model_name)
|
aidol/repositories/__init__.py
CHANGED
|
@@ -4,10 +4,12 @@ AIdol repositories
|
|
|
4
4
|
|
|
5
5
|
from aidol.repositories.aidol import AIdolRepository
|
|
6
6
|
from aidol.repositories.aidol_lead import AIdolLeadRepository
|
|
7
|
+
from aidol.repositories.chatroom import ChatroomRepository
|
|
7
8
|
from aidol.repositories.companion import CompanionRepository
|
|
8
9
|
|
|
9
10
|
__all__ = [
|
|
10
11
|
"AIdolRepository",
|
|
11
12
|
"AIdolLeadRepository",
|
|
13
|
+
"ChatroomRepository",
|
|
12
14
|
"CompanionRepository",
|
|
13
15
|
]
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIdol chatroom repository
|
|
3
|
+
|
|
4
|
+
Implements BaseRepository pattern for BaseCrudRouter compatibility.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import uuid
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
|
|
10
|
+
from aioia_core.repositories import BaseRepository
|
|
11
|
+
from sqlalchemy.orm import Session
|
|
12
|
+
|
|
13
|
+
from aidol.models import DBChatroom, DBMessage
|
|
14
|
+
from aidol.schemas import (
|
|
15
|
+
Chatroom,
|
|
16
|
+
ChatroomCreate,
|
|
17
|
+
ChatroomUpdate,
|
|
18
|
+
CompanionMessage,
|
|
19
|
+
Message,
|
|
20
|
+
MessageCreate,
|
|
21
|
+
SenderType,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _convert_db_chatroom_to_model(db_chatroom: DBChatroom) -> Chatroom:
|
|
26
|
+
"""Convert DB chatroom to Pydantic model."""
|
|
27
|
+
return Chatroom(
|
|
28
|
+
id=db_chatroom.id,
|
|
29
|
+
name=db_chatroom.name,
|
|
30
|
+
language=db_chatroom.language,
|
|
31
|
+
created_at=db_chatroom.created_at.replace(tzinfo=timezone.utc),
|
|
32
|
+
updated_at=db_chatroom.updated_at.replace(tzinfo=timezone.utc),
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _convert_chatroom_create_to_db_model(schema: ChatroomCreate) -> dict:
|
|
37
|
+
"""Convert ChatroomCreate schema to DB model data dict."""
|
|
38
|
+
return schema.model_dump(exclude_unset=True)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _convert_db_message_to_model(db_message: DBMessage) -> Message:
|
|
42
|
+
"""Convert DB message to Pydantic model.
|
|
43
|
+
|
|
44
|
+
Returns CompanionMessage if sender_type is COMPANION, else Message.
|
|
45
|
+
"""
|
|
46
|
+
if db_message.sender_type == SenderType.COMPANION.value:
|
|
47
|
+
return CompanionMessage(
|
|
48
|
+
id=db_message.id,
|
|
49
|
+
sender_type=SenderType.COMPANION,
|
|
50
|
+
content=db_message.content,
|
|
51
|
+
created_at=db_message.created_at.replace(tzinfo=timezone.utc),
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
return Message(
|
|
55
|
+
id=db_message.id,
|
|
56
|
+
sender_type=SenderType(db_message.sender_type),
|
|
57
|
+
content=db_message.content,
|
|
58
|
+
created_at=db_message.created_at.replace(tzinfo=timezone.utc),
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ChatroomRepository(
|
|
63
|
+
BaseRepository[Chatroom, DBChatroom, ChatroomCreate, ChatroomUpdate]
|
|
64
|
+
):
|
|
65
|
+
"""
|
|
66
|
+
Database-backed chatroom repository.
|
|
67
|
+
|
|
68
|
+
Extends BaseRepository for CRUD operations compatible with BaseCrudRouter.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(self, db_session: Session):
|
|
72
|
+
super().__init__(
|
|
73
|
+
db_session=db_session,
|
|
74
|
+
db_model=DBChatroom,
|
|
75
|
+
convert_to_model=_convert_db_chatroom_to_model,
|
|
76
|
+
convert_to_db_model=_convert_chatroom_create_to_db_model,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# ========================================
|
|
80
|
+
# AIdol-specific methods
|
|
81
|
+
# ========================================
|
|
82
|
+
|
|
83
|
+
def add_message_to_chatroom(
|
|
84
|
+
self, chatroom_id: str, message: MessageCreate
|
|
85
|
+
) -> Message:
|
|
86
|
+
"""
|
|
87
|
+
Add message to chatroom.
|
|
88
|
+
|
|
89
|
+
Follows aioia-core pattern: CreateSchema (no id) → Model (with id).
|
|
90
|
+
Returned Message always has id set (generated by repository).
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
chatroom_id: Chatroom ID
|
|
94
|
+
message: MessageCreate or CompanionMessageCreate schema (no id)
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Message or CompanionMessage with id guaranteed to be set
|
|
98
|
+
"""
|
|
99
|
+
# sender_type.value is guaranteed by Pydantic validation
|
|
100
|
+
sender_type_value = message.sender_type.value
|
|
101
|
+
|
|
102
|
+
# Get companion_id if available (CompanionMessageCreate has it)
|
|
103
|
+
companion_id = getattr(message, "companion_id", None)
|
|
104
|
+
|
|
105
|
+
db_message = DBMessage(
|
|
106
|
+
id=str(uuid.uuid4()),
|
|
107
|
+
chatroom_id=chatroom_id,
|
|
108
|
+
sender_type=sender_type_value,
|
|
109
|
+
content=message.content,
|
|
110
|
+
claim_token=message.claim_token,
|
|
111
|
+
companion_id=companion_id,
|
|
112
|
+
created_at=datetime.now(timezone.utc),
|
|
113
|
+
updated_at=datetime.now(timezone.utc),
|
|
114
|
+
)
|
|
115
|
+
self.db_session.add(db_message)
|
|
116
|
+
self.db_session.commit()
|
|
117
|
+
self.db_session.refresh(db_message)
|
|
118
|
+
return _convert_db_message_to_model(db_message)
|
|
119
|
+
|
|
120
|
+
def get_messages_by_chatroom_id(
|
|
121
|
+
self, chatroom_id: str, limit: int, offset: int
|
|
122
|
+
) -> list[Message]:
|
|
123
|
+
"""
|
|
124
|
+
Get messages in chatroom with pagination.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
chatroom_id: Chatroom ID
|
|
128
|
+
limit: Maximum number of messages
|
|
129
|
+
offset: Number of messages to skip
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
List of messages ordered by created_at (descending, newest first)
|
|
133
|
+
"""
|
|
134
|
+
db_messages = (
|
|
135
|
+
self.db_session.query(DBMessage)
|
|
136
|
+
.filter(DBMessage.chatroom_id == chatroom_id)
|
|
137
|
+
.order_by(DBMessage.created_at.desc())
|
|
138
|
+
.offset(offset)
|
|
139
|
+
.limit(limit)
|
|
140
|
+
.all()
|
|
141
|
+
)
|
|
142
|
+
return [_convert_db_message_to_model(msg) for msg in db_messages]
|
aidol/schemas/__init__.py
CHANGED
|
@@ -13,6 +13,19 @@ from aidol.schemas.aidol import (
|
|
|
13
13
|
ImageGenerationResponse,
|
|
14
14
|
)
|
|
15
15
|
from aidol.schemas.aidol_lead import AIdolLead, AIdolLeadBase, AIdolLeadCreate
|
|
16
|
+
from aidol.schemas.chatroom import (
|
|
17
|
+
AudioFormat,
|
|
18
|
+
Chatroom,
|
|
19
|
+
ChatroomBase,
|
|
20
|
+
ChatroomCreate,
|
|
21
|
+
ChatroomUpdate,
|
|
22
|
+
CompanionMessage,
|
|
23
|
+
CompanionMessageCreate,
|
|
24
|
+
Message,
|
|
25
|
+
MessageBase,
|
|
26
|
+
MessageCreate,
|
|
27
|
+
SenderType,
|
|
28
|
+
)
|
|
16
29
|
from aidol.schemas.companion import (
|
|
17
30
|
Companion,
|
|
18
31
|
CompanionBase,
|
|
@@ -24,8 +37,11 @@ from aidol.schemas.companion import (
|
|
|
24
37
|
Grade,
|
|
25
38
|
Position,
|
|
26
39
|
)
|
|
40
|
+
from aidol.schemas.model_settings import ModelSettings, ModelSettingsBase
|
|
41
|
+
from aidol.schemas.persona import Persona
|
|
27
42
|
|
|
28
43
|
__all__ = [
|
|
44
|
+
# AIdol
|
|
29
45
|
"AIdol",
|
|
30
46
|
"AIdolBase",
|
|
31
47
|
"AIdolCreate",
|
|
@@ -34,9 +50,23 @@ __all__ = [
|
|
|
34
50
|
"ImageGenerationData",
|
|
35
51
|
"ImageGenerationRequest",
|
|
36
52
|
"ImageGenerationResponse",
|
|
53
|
+
# AIdolLead
|
|
37
54
|
"AIdolLead",
|
|
38
55
|
"AIdolLeadBase",
|
|
39
56
|
"AIdolLeadCreate",
|
|
57
|
+
# Chatroom
|
|
58
|
+
"AudioFormat",
|
|
59
|
+
"Chatroom",
|
|
60
|
+
"ChatroomBase",
|
|
61
|
+
"ChatroomCreate",
|
|
62
|
+
"ChatroomUpdate",
|
|
63
|
+
"CompanionMessage",
|
|
64
|
+
"CompanionMessageCreate",
|
|
65
|
+
"Message",
|
|
66
|
+
"MessageBase",
|
|
67
|
+
"MessageCreate",
|
|
68
|
+
"SenderType",
|
|
69
|
+
# Companion
|
|
40
70
|
"Companion",
|
|
41
71
|
"CompanionBase",
|
|
42
72
|
"CompanionCreate",
|
|
@@ -46,4 +76,9 @@ __all__ = [
|
|
|
46
76
|
"Gender",
|
|
47
77
|
"Grade",
|
|
48
78
|
"Position",
|
|
79
|
+
# Model Settings
|
|
80
|
+
"ModelSettings",
|
|
81
|
+
"ModelSettingsBase",
|
|
82
|
+
# Persona
|
|
83
|
+
"Persona",
|
|
49
84
|
]
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIdol chatroom, message, and related schemas
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from enum import Enum, unique
|
|
7
|
+
|
|
8
|
+
from humps import camelize
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
10
|
+
|
|
11
|
+
# =============================================================================
|
|
12
|
+
# Enums
|
|
13
|
+
# =============================================================================
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@unique
|
|
17
|
+
class SenderType(str, Enum):
|
|
18
|
+
"""
|
|
19
|
+
Sender types for chat messages.
|
|
20
|
+
|
|
21
|
+
Core types for AIdol standalone. Platform-specific integrators
|
|
22
|
+
may define their own extended SenderType with additional values.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
USER = "user"
|
|
26
|
+
COMPANION = "companion"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@unique
|
|
30
|
+
class AudioFormat(str, Enum):
|
|
31
|
+
"""Audio format types."""
|
|
32
|
+
|
|
33
|
+
MP3 = "mp3"
|
|
34
|
+
WAV = "wav"
|
|
35
|
+
OGG = "ogg"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# =============================================================================
|
|
39
|
+
# Message Schemas
|
|
40
|
+
# =============================================================================
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class MessageBase(BaseModel):
|
|
44
|
+
"""Base message model with common fields."""
|
|
45
|
+
|
|
46
|
+
model_config = ConfigDict(populate_by_name=True, alias_generator=camelize)
|
|
47
|
+
|
|
48
|
+
content: str = Field(..., description="Message content")
|
|
49
|
+
sender_type: SenderType = Field(
|
|
50
|
+
default=SenderType.USER, description="Type of the sender"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Message(MessageBase):
|
|
55
|
+
"""Message response schema with id and timestamp."""
|
|
56
|
+
|
|
57
|
+
model_config = ConfigDict(
|
|
58
|
+
frozen=True, populate_by_name=True, alias_generator=camelize
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
id: str = Field(..., description="Unique message identifier")
|
|
62
|
+
created_at: datetime = Field(
|
|
63
|
+
default_factory=lambda: datetime.now(timezone.utc),
|
|
64
|
+
description="Message creation timestamp",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
def is_user(self) -> bool:
|
|
68
|
+
"""Check if message is from user"""
|
|
69
|
+
return self.sender_type == SenderType.USER
|
|
70
|
+
|
|
71
|
+
def is_companion(self) -> bool:
|
|
72
|
+
"""Check if message is from companion (AI)"""
|
|
73
|
+
return self.sender_type == SenderType.COMPANION
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class CompanionMessage(Message):
|
|
77
|
+
"""Companion message response schema."""
|
|
78
|
+
|
|
79
|
+
sender_type: SenderType = Field(
|
|
80
|
+
default=SenderType.COMPANION, description="Type of the sender"
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class MessageCreate(MessageBase):
|
|
85
|
+
"""Schema for creating a message (no id, no timestamp).
|
|
86
|
+
|
|
87
|
+
claim_token is used for anonymous user identification and DAU/MAU analytics.
|
|
88
|
+
It's a UUID stored in localStorage, identifying users without authentication.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
claim_token: str | None = Field(
|
|
92
|
+
default=None, description="Anonymous user identifier for analytics"
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class CompanionMessageCreate(MessageCreate):
|
|
97
|
+
"""Schema for creating a companion message.
|
|
98
|
+
|
|
99
|
+
companion_id is optional for aidol standalone but may be required for platform integration.
|
|
100
|
+
When provided, it's used to identify the companion in the platform's companion system.
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
sender_type: SenderType = Field(
|
|
104
|
+
default=SenderType.COMPANION, description="Type of the sender"
|
|
105
|
+
)
|
|
106
|
+
companion_id: str | None = Field(
|
|
107
|
+
default=None, description="Companion ID for platform integration"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
# =============================================================================
|
|
112
|
+
# Chatroom Schemas
|
|
113
|
+
# =============================================================================
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class ChatroomBase(BaseModel):
|
|
117
|
+
"""Base chatroom model with common fields."""
|
|
118
|
+
|
|
119
|
+
model_config = ConfigDict(populate_by_name=True, alias_generator=camelize)
|
|
120
|
+
|
|
121
|
+
name: str = Field(..., description="Chatroom name")
|
|
122
|
+
language: str = Field(default="en", description="Chatroom language")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class Chatroom(ChatroomBase):
|
|
126
|
+
"""Chatroom response schema with id and timestamps."""
|
|
127
|
+
|
|
128
|
+
model_config = ConfigDict(
|
|
129
|
+
populate_by_name=True, from_attributes=True, alias_generator=camelize
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
id: str = Field(..., description="Chatroom ID")
|
|
133
|
+
created_at: datetime = Field(..., description="Creation timestamp")
|
|
134
|
+
updated_at: datetime = Field(..., description="Last update timestamp")
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class ChatroomCreate(ChatroomBase):
|
|
138
|
+
"""Schema for creating a chatroom (no id)."""
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class ChatroomUpdate(BaseModel):
|
|
142
|
+
"""Schema for updating a chatroom (all fields optional)."""
|
|
143
|
+
|
|
144
|
+
model_config = ConfigDict(populate_by_name=True, alias_generator=camelize)
|
|
145
|
+
|
|
146
|
+
name: str | None = Field(default=None, description="Chatroom name")
|
|
147
|
+
language: str | None = Field(default=None, description="Chatroom language")
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIdol LLM model settings schemas
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from humps import camelize
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ModelSettingsBase(BaseModel):
|
|
10
|
+
"""Base settings for LLM model configuration, excluding the model name.
|
|
11
|
+
|
|
12
|
+
Contains common parameters used across all model settings.
|
|
13
|
+
Platform-specific extensions can inherit from this class.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
model_config = ConfigDict(populate_by_name=True, alias_generator=camelize)
|
|
17
|
+
|
|
18
|
+
temperature: float = Field(
|
|
19
|
+
default=0.0, description="Sampling temperature (0.0-2.0)"
|
|
20
|
+
)
|
|
21
|
+
seed: int | None = Field(
|
|
22
|
+
default=None, description="Random seed for reproducibility"
|
|
23
|
+
)
|
|
24
|
+
frequency_penalty: float = Field(
|
|
25
|
+
default=0.0, description="Frequency penalty (-2.0-2.0)"
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ModelSettings(ModelSettingsBase):
|
|
30
|
+
"""Settings for LLM model configuration, including the model name.
|
|
31
|
+
|
|
32
|
+
Used by LLMProvider.completion() to configure model behavior.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
chat_model: str = Field(..., description="The model name (e.g., 'gpt-4o')")
|
aidol/schemas/persona.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIdol persona schema
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from humps import camelize
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Persona(BaseModel):
|
|
10
|
+
"""Chat agent persona"""
|
|
11
|
+
|
|
12
|
+
model_config = ConfigDict(populate_by_name=True, alias_generator=camelize)
|
|
13
|
+
|
|
14
|
+
name: str | None = Field(default=None, description="Agent name")
|
|
15
|
+
system_prompt: str | None = Field(
|
|
16
|
+
default=None, description="System prompt for the agent"
|
|
17
|
+
)
|
|
18
|
+
timezone_name: str = Field(
|
|
19
|
+
default="UTC", description="Timezone for real-time context (e.g., 'Asia/Seoul')"
|
|
20
|
+
)
|
aidol/services/__init__.py
CHANGED