chibi-bot 1.6.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. chibi/__init__.py +0 -0
  2. chibi/__main__.py +343 -0
  3. chibi/cli.py +90 -0
  4. chibi/config/__init__.py +6 -0
  5. chibi/config/app.py +123 -0
  6. chibi/config/gpt.py +108 -0
  7. chibi/config/logging.py +15 -0
  8. chibi/config/telegram.py +43 -0
  9. chibi/config_generator.py +233 -0
  10. chibi/constants.py +362 -0
  11. chibi/exceptions.py +58 -0
  12. chibi/models.py +496 -0
  13. chibi/schemas/__init__.py +0 -0
  14. chibi/schemas/anthropic.py +20 -0
  15. chibi/schemas/app.py +54 -0
  16. chibi/schemas/cloudflare.py +65 -0
  17. chibi/schemas/mistralai.py +56 -0
  18. chibi/schemas/suno.py +83 -0
  19. chibi/service.py +135 -0
  20. chibi/services/bot.py +276 -0
  21. chibi/services/lock_manager.py +20 -0
  22. chibi/services/mcp/manager.py +242 -0
  23. chibi/services/metrics.py +54 -0
  24. chibi/services/providers/__init__.py +16 -0
  25. chibi/services/providers/alibaba.py +79 -0
  26. chibi/services/providers/anthropic.py +40 -0
  27. chibi/services/providers/cloudflare.py +98 -0
  28. chibi/services/providers/constants/suno.py +2 -0
  29. chibi/services/providers/customopenai.py +11 -0
  30. chibi/services/providers/deepseek.py +15 -0
  31. chibi/services/providers/eleven_labs.py +85 -0
  32. chibi/services/providers/gemini_native.py +489 -0
  33. chibi/services/providers/grok.py +40 -0
  34. chibi/services/providers/minimax.py +96 -0
  35. chibi/services/providers/mistralai_native.py +312 -0
  36. chibi/services/providers/moonshotai.py +20 -0
  37. chibi/services/providers/openai.py +74 -0
  38. chibi/services/providers/provider.py +892 -0
  39. chibi/services/providers/suno.py +130 -0
  40. chibi/services/providers/tools/__init__.py +23 -0
  41. chibi/services/providers/tools/cmd.py +132 -0
  42. chibi/services/providers/tools/common.py +127 -0
  43. chibi/services/providers/tools/constants.py +78 -0
  44. chibi/services/providers/tools/exceptions.py +1 -0
  45. chibi/services/providers/tools/file_editor.py +875 -0
  46. chibi/services/providers/tools/mcp_management.py +274 -0
  47. chibi/services/providers/tools/mcp_simple.py +72 -0
  48. chibi/services/providers/tools/media.py +451 -0
  49. chibi/services/providers/tools/memory.py +252 -0
  50. chibi/services/providers/tools/schemas.py +10 -0
  51. chibi/services/providers/tools/send.py +435 -0
  52. chibi/services/providers/tools/tool.py +163 -0
  53. chibi/services/providers/tools/utils.py +146 -0
  54. chibi/services/providers/tools/web.py +261 -0
  55. chibi/services/providers/utils.py +182 -0
  56. chibi/services/task_manager.py +93 -0
  57. chibi/services/user.py +269 -0
  58. chibi/storage/abstract.py +54 -0
  59. chibi/storage/database.py +86 -0
  60. chibi/storage/dynamodb.py +257 -0
  61. chibi/storage/local.py +70 -0
  62. chibi/storage/redis.py +91 -0
  63. chibi/utils/__init__.py +0 -0
  64. chibi/utils/app.py +249 -0
  65. chibi/utils/telegram.py +521 -0
  66. chibi_bot-1.6.0b0.dist-info/LICENSE +21 -0
  67. chibi_bot-1.6.0b0.dist-info/METADATA +340 -0
  68. chibi_bot-1.6.0b0.dist-info/RECORD +70 -0
  69. chibi_bot-1.6.0b0.dist-info/WHEEL +4 -0
  70. chibi_bot-1.6.0b0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,56 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class ModelPermissionSchema(BaseModel):
5
+ id: str
6
+ object: str
7
+ created: int
8
+ allow_create_engine: bool
9
+ allow_sampling: bool
10
+ allow_logprobs: bool
11
+ allow_search_indices: bool
12
+ allow_view: bool
13
+ allow_fine_tuning: bool
14
+ organization: str
15
+ group: str | None
16
+ is_blocking: bool
17
+
18
+
19
+ class ModelDataSchema(BaseModel):
20
+ id: str
21
+ object: str
22
+ created: int
23
+ owned_by: str
24
+
25
+
26
+ class GetModelsResponseSchema(BaseModel):
27
+ object: str
28
+ data: list[ModelDataSchema]
29
+
30
+
31
+ class MessageSchema(BaseModel):
32
+ role: str
33
+ content: str
34
+ tool_calls: str | None = Field(None, alias="tool_calls")
35
+
36
+
37
+ class ChoiceSchema(BaseModel):
38
+ index: int
39
+ message: MessageSchema
40
+ finish_reason: str
41
+ logprobs: str | None = Field(None, alias="logprobs")
42
+
43
+
44
+ class MistralaiUsageSchema(BaseModel):
45
+ prompt_tokens: int
46
+ total_tokens: int
47
+ completion_tokens: int
48
+
49
+
50
+ class ChatCompletionSchema(BaseModel):
51
+ id: str
52
+ object: str
53
+ created: int
54
+ model: str
55
+ choices: list[ChoiceSchema]
56
+ usage: MistralaiUsageSchema
chibi/schemas/suno.py ADDED
@@ -0,0 +1,83 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import Any
5
+
6
+ from pydantic import BaseModel, ConfigDict, HttpUrl
7
+ from pydantic.alias_generators import to_camel
8
+
9
+
10
+ class SunoBaseModel(BaseModel):
11
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra="ignore")
12
+
13
+
14
+ class SunoTrackSchema(SunoBaseModel):
15
+ id: str
16
+ audio_url: HttpUrl | str = ""
17
+ source_audio_url: HttpUrl | None = None
18
+ stream_audio_url: HttpUrl | None = None
19
+ source_stream_audio_url: HttpUrl | None = None
20
+ image_url: HttpUrl | None = None
21
+ source_image_url: HttpUrl | None = None
22
+ prompt: str
23
+ model_name: str
24
+ title: str
25
+ tags: str
26
+ create_time: int | None = None # Milliseconds timestamp
27
+ duration: float | None = None
28
+
29
+
30
+ class TaskResponseSchema(SunoBaseModel):
31
+ task_id: str
32
+ suno_data: list[SunoTrackSchema]
33
+
34
+
35
+ class TaskDataSchema(SunoBaseModel):
36
+ task_id: str
37
+
38
+
39
+ class StartedTaskDataSchema(TaskDataSchema):
40
+ parent_music_id: str | None = None
41
+ param: str
42
+ response: TaskResponseSchema | None = None
43
+ status: str
44
+ type: str
45
+ operation_type: str
46
+ error_code: int | None = None
47
+ error_message: str | None = None
48
+ create_time: int | None = None
49
+
50
+ @property
51
+ def parsed_param(self) -> dict[str, Any]:
52
+ try:
53
+ return json.loads(self.param)
54
+ except (ValueError, TypeError):
55
+ return {}
56
+
57
+
58
+ class SunoAPIResponseSchema(SunoBaseModel):
59
+ code: int
60
+ msg: str
61
+
62
+ @property
63
+ def is_success(self) -> bool:
64
+ return self.code == 200
65
+
66
+
67
+ class SunoGetGenerationRequestSchema(SunoAPIResponseSchema):
68
+ data: TaskDataSchema | None = None
69
+
70
+
71
+ class SunoGetGenerationDetailsSchema(SunoAPIResponseSchema):
72
+ data: StartedTaskDataSchema | None = None
73
+
74
+ @property
75
+ def is_in_progress(self) -> bool:
76
+ if not self.data:
77
+ return True
78
+ return self.data.status not in (
79
+ "SUCCESS",
80
+ "CREATE_TASK_FAILED",
81
+ "GENERATE_AUDIO_FAILED",
82
+ "SENSITIVE_WORD_ERROR",
83
+ )
chibi/service.py ADDED
@@ -0,0 +1,135 @@
1
+ import os
2
+ import signal
3
+ import subprocess
4
+ import sys
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+
9
+ class Service:
10
+ """Service management for the Chibi bot."""
11
+
12
+ def __init__(
13
+ self,
14
+ pid_path: Optional[str] = None,
15
+ log_path: Optional[str] = None,
16
+ ) -> None:
17
+ """Initialize service with PID and log paths.
18
+
19
+ Args:
20
+ pid_path: Path to PID file. Defaults to ~/.chibi/chibi.pid.
21
+ log_path: Path to log file. Defaults to ~/.chibi/logs/chibi.log.
22
+ """
23
+ home = Path.home()
24
+ self.pid_path = pid_path or str(home / ".chibi" / "chibi.pid")
25
+ self.log_path = log_path or str(home / ".chibi" / "logs" / "chibi.log")
26
+
27
+ def _ensure_directories(self) -> None:
28
+ """Ensure required directories exist."""
29
+ Path(self.log_path).parent.mkdir(parents=True, exist_ok=True)
30
+ Path(self.pid_path).parent.mkdir(parents=True, exist_ok=True)
31
+
32
+ def _is_process_running(self, pid: int) -> bool:
33
+ """Check if a process with the given PID is still running.
34
+
35
+ Args:
36
+ pid: Process ID to check.
37
+
38
+ Returns:
39
+ True if process is running, False otherwise.
40
+ """
41
+ try:
42
+ os.kill(pid, 0) # Signal 0 doesn't kill, just checks if process exists
43
+ return True
44
+ except (OSError, ProcessLookupError):
45
+ return False
46
+
47
+ def _read_pid(self) -> Optional[int]:
48
+ """Read PID from PID file.
49
+
50
+ Returns:
51
+ PID if file exists and is valid, None otherwise.
52
+ """
53
+ if not os.path.exists(self.pid_path):
54
+ return None
55
+ try:
56
+ with open(self.pid_path, "r") as pid_file:
57
+ return int(pid_file.read().strip())
58
+ except (ValueError, IOError):
59
+ return None
60
+
61
+ def _write_pid(self, pid: int) -> None:
62
+ """Write PID to PID file atomically.
63
+
64
+ Args:
65
+ pid: Process ID to write.
66
+ """
67
+ with open(self.pid_path, "w") as pid_file:
68
+ pid_file.write(str(pid))
69
+
70
+ def start(self) -> None:
71
+ """Start the bot service in background."""
72
+ self._ensure_directories()
73
+
74
+ pid = self._read_pid()
75
+ if pid is not None and self._is_process_running(pid):
76
+ print(f"Service is already running (PID: {pid}).")
77
+ return
78
+
79
+ # Clean up stale PID file if process is not running
80
+ if os.path.exists(self.pid_path):
81
+ try:
82
+ os.remove(self.pid_path)
83
+ except OSError:
84
+ pass
85
+
86
+ try:
87
+ # Start the bot process in background
88
+ with open(self.log_path, "a") as log_file:
89
+ process = subprocess.Popen(
90
+ [sys.executable, "-m", "chibi"],
91
+ stdout=log_file,
92
+ stderr=log_file,
93
+ start_new_session=True, # Create new process group (Unix)
94
+ )
95
+ self._write_pid(process.pid)
96
+ print(f"Service started (PID: {process.pid}).")
97
+ except Exception as e:
98
+ print(f"Error starting service: {e}")
99
+
100
+ def stop(self) -> None:
101
+ """Stop the bot service."""
102
+ pid = self._read_pid()
103
+
104
+ if pid is None:
105
+ print("Service is not running.")
106
+ return
107
+
108
+ if not self._is_process_running(pid):
109
+ print("Service is not running (stale PID file removed).")
110
+ try:
111
+ os.remove(self.pid_path)
112
+ except OSError:
113
+ pass
114
+ return
115
+
116
+ try:
117
+ os.kill(pid, signal.SIGTERM)
118
+ print(f"Service stopped (PID: {pid}).")
119
+ try:
120
+ os.remove(self.pid_path)
121
+ except OSError:
122
+ pass
123
+ except ProcessLookupError:
124
+ print("Service process not found.")
125
+ try:
126
+ os.remove(self.pid_path)
127
+ except OSError:
128
+ pass
129
+ except Exception as e:
130
+ print(f"Error stopping service: {e}")
131
+
132
+ def restart(self) -> None:
133
+ """Restart the bot service."""
134
+ self.stop()
135
+ self.start()
chibi/services/bot.py ADDED
@@ -0,0 +1,276 @@
1
+ import asyncio
2
+ from io import BytesIO
3
+
4
+ from loguru import logger
5
+ from telegram import (
6
+ CallbackQuery,
7
+ File,
8
+ InlineKeyboardButton,
9
+ InlineKeyboardMarkup,
10
+ Update,
11
+ constants,
12
+ )
13
+ from telegram.ext import ContextTypes
14
+
15
+ from chibi.config import application_settings, gpt_settings
16
+ from chibi.constants import UserAction, UserContext
17
+ from chibi.schemas.app import ChatResponseSchema, ModelChangeSchema
18
+ from chibi.services.providers import RegisteredProviders
19
+ from chibi.services.providers.tools import ToolResponse
20
+ from chibi.services.providers.utils import get_usage_msg
21
+ from chibi.services.user import (
22
+ check_history_and_summarize,
23
+ generate_image,
24
+ get_llm_chat_completion_answer,
25
+ get_models_available,
26
+ reset_chat_history,
27
+ set_active_model,
28
+ set_api_key,
29
+ user_has_reached_images_generation_limit,
30
+ )
31
+ from chibi.utils.app import handle_gpt_exceptions
32
+ from chibi.utils.telegram import (
33
+ chat_data,
34
+ get_telegram_chat,
35
+ get_telegram_message,
36
+ get_telegram_user,
37
+ send_gpt_answer_message,
38
+ send_images,
39
+ send_message,
40
+ set_user_action,
41
+ set_user_context,
42
+ user_data,
43
+ )
44
+
45
+
46
+ @handle_gpt_exceptions
47
+ async def handle_model_selection(
48
+ update: Update,
49
+ context: ContextTypes.DEFAULT_TYPE,
50
+ model: ModelChangeSchema,
51
+ query: CallbackQuery,
52
+ ) -> None:
53
+ telegram_user = get_telegram_user(update=update)
54
+ await set_active_model(user_id=telegram_user.id, model=model)
55
+ logger.info(f"{user_data(update)} switched to model '{model.name} ({model.provider})'")
56
+ await query.edit_message_text(text=f"Selected model: '{model.name} ({model.provider})'")
57
+
58
+
59
+ async def handle_tool_response(tool_response: ToolResponse, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
60
+ telegram_user = get_telegram_user(update=update)
61
+ chat_response: ChatResponseSchema = await get_llm_chat_completion_answer(
62
+ user_id=telegram_user.id, tool_message=tool_response, context=context, update=update
63
+ )
64
+ usage_message = get_usage_msg(chat_response.usage)
65
+
66
+ if "<chibi>ack</chibi>" in chat_response.answer.lower():
67
+ logger.info(
68
+ f"[{user_data(update)}-{chat_data(update)}] LLM silently received tool result "
69
+ f"(answer: {chat_response.answer}). No user notification required. {usage_message}"
70
+ )
71
+ return None
72
+
73
+ if application_settings.log_prompt_data:
74
+ answer_to_log = chat_response.answer.replace("\r", " ").replace("\n", " ")
75
+ logged_answer = f"Answer: {answer_to_log}"
76
+ else:
77
+ logged_answer = ""
78
+
79
+ logger.info(
80
+ f"{user_data(update)} got {chat_response.provider} ({chat_response.model}) answer in "
81
+ f"the {chat_data(update)}. {logged_answer} {usage_message}"
82
+ )
83
+
84
+ await send_gpt_answer_message(gpt_answer=chat_response.answer, update=update, context=context)
85
+
86
+
87
+ @handle_gpt_exceptions
88
+ async def handle_user_prompt(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
89
+ telegram_user = get_telegram_user(update=update)
90
+ telegram_chat = get_telegram_chat(update=update)
91
+ telegram_message = get_telegram_message(update=update)
92
+ text_prompt = telegram_message.text
93
+
94
+ if telegram_message.voice:
95
+ file_id = telegram_message.voice.file_id
96
+ file: File = await context.bot.get_file(file_id)
97
+ voice_prompt = BytesIO()
98
+ await file.download_to_memory(out=voice_prompt)
99
+ voice_prompt.seek(0)
100
+ else:
101
+ voice_prompt = None
102
+
103
+ if not text_prompt and not voice_prompt:
104
+ return None
105
+
106
+ if text_prompt:
107
+ if text_prompt.startswith("/ask"):
108
+ text_prompt = text_prompt.replace("/ask", "", 1).strip()
109
+
110
+ prompt_to_log = text_prompt.replace("\r", " ").replace("\n", " ") if text_prompt else "voice message"
111
+
112
+ logger.info(
113
+ f"{user_data(update)} sent a new message in the {chat_data(update)}"
114
+ f"{': ' + prompt_to_log if application_settings.log_prompt_data else ''}"
115
+ )
116
+
117
+ get_gtp_chat_answer_task = asyncio.ensure_future(
118
+ get_llm_chat_completion_answer(
119
+ user_id=telegram_user.id,
120
+ user_text_message=text_prompt,
121
+ user_voice_message=voice_prompt,
122
+ context=context,
123
+ update=update,
124
+ )
125
+ )
126
+
127
+ while not get_gtp_chat_answer_task.done():
128
+ await context.bot.send_chat_action(chat_id=telegram_chat.id, action=constants.ChatAction.TYPING)
129
+ await asyncio.sleep(2.5)
130
+
131
+ chat_response: ChatResponseSchema = await get_gtp_chat_answer_task
132
+ usage = chat_response.usage
133
+ usage_message = get_usage_msg(usage)
134
+
135
+ if application_settings.log_prompt_data:
136
+ answer_to_log = chat_response.answer.replace("\r", " ").replace("\n", " ")
137
+ logged_answer = f"Answer: {answer_to_log}"
138
+ else:
139
+ logged_answer = ""
140
+
141
+ if "<chibi>ack</chibi>" in chat_response.answer.lower():
142
+ logger.info(
143
+ f"[{user_data(update)}-{chat_data(update)}] LLM silently received user request "
144
+ f"(answer: {chat_response.answer}). No user notification required. {usage_message}"
145
+ )
146
+ try:
147
+ await telegram_message.set_reaction(reaction="👌", is_big=True)
148
+ except Exception as e:
149
+ logger.error(f"{user_data(update)}: Couldn't set message reaction due to exception: {e}")
150
+ return None
151
+
152
+ logger.info(
153
+ f"{user_data(update)} got {chat_response.provider} ({chat_response.model}) answer in "
154
+ f"the {chat_data(update)}. {logged_answer} {usage_message}"
155
+ )
156
+ await send_gpt_answer_message(gpt_answer=chat_response.answer, update=update, context=context)
157
+ history_is_summarized = await check_history_and_summarize(user_id=telegram_user.id)
158
+ if history_is_summarized:
159
+ logger.info(f"{user_data(update)}: history successfully summarized.")
160
+
161
+
162
+ async def handle_reset(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
163
+ telegram_chat = get_telegram_chat(update=update)
164
+ telegram_user = get_telegram_user(update=update)
165
+ logger.info(f"{user_data(update)}: conversation history reset.")
166
+
167
+ await reset_chat_history(user_id=telegram_user.id)
168
+ await context.bot.send_message(chat_id=telegram_chat.id, text="Done!")
169
+
170
+
171
+ @handle_gpt_exceptions
172
+ async def handle_image_generation(update: Update, context: ContextTypes.DEFAULT_TYPE, prompt: str) -> None:
173
+ set_user_action(context=context, action=UserAction.NONE)
174
+ telegram_user = get_telegram_user(update=update)
175
+ telegram_chat = get_telegram_chat(update=update)
176
+ telegram_message = get_telegram_message(update=update)
177
+ if not telegram_message.text:
178
+ return None
179
+
180
+ if await user_has_reached_images_generation_limit(user_id=telegram_user.id):
181
+ await context.bot.send_message(
182
+ chat_id=telegram_chat.id,
183
+ reply_to_message_id=telegram_message.message_id,
184
+ text=(
185
+ f"Sorry, you have reached your monthly images generation limit "
186
+ f"({gpt_settings.image_generations_monthly_limit}). Please, try again later."
187
+ ),
188
+ )
189
+ return None
190
+
191
+ logger.info(
192
+ f"{user_data(update)} sent image generation request in the {chat_data(update)}"
193
+ f"{': ' + prompt if application_settings.log_prompt_data else ''}"
194
+ )
195
+ generate_image_task = asyncio.ensure_future(generate_image(user_id=telegram_user.id, prompt=prompt))
196
+
197
+ # The user finds it psychologically easier to wait for a response from the chatbot when they see its activity
198
+ # during the entire waiting time.
199
+ while not generate_image_task.done():
200
+ await context.bot.send_chat_action(chat_id=telegram_chat.id, action=constants.ChatAction.UPLOAD_PHOTO)
201
+ await asyncio.sleep(2.5)
202
+
203
+ image_data = await generate_image_task
204
+ await send_images(images=image_data, update=update, context=context)
205
+ log_message = f"{user_data(update)} got a successfully generated image(s)"
206
+ if application_settings.log_prompt_data and isinstance(image_data[0], str):
207
+ log_message += f": {image_data}"
208
+ logger.info(log_message)
209
+
210
+
211
+ async def handle_provider_api_key_set(
212
+ update: Update,
213
+ context: ContextTypes.DEFAULT_TYPE,
214
+ provider_name: str,
215
+ ) -> None:
216
+ telegram_user = get_telegram_user(update=update)
217
+ telegram_chat = get_telegram_chat(update=update)
218
+ telegram_message = get_telegram_message(update=update)
219
+ logger.info(f"{telegram_user.name} provides API Key for provider '{provider_name}'.")
220
+
221
+ api_key = telegram_message.text.strip() if telegram_message.text else None
222
+ if not api_key:
223
+ return None
224
+ provider = RegisteredProviders.get_class(provider_name)
225
+ if not provider:
226
+ return None
227
+
228
+ if not await provider(token=api_key).api_key_is_valid():
229
+ error_msg = "Sorry, but API key you have provided does not seem correct."
230
+ await send_message(update=update, context=context, text=error_msg)
231
+ logger.warning(f"{user_data(update)} provided invalid key.")
232
+ return None
233
+
234
+ await set_api_key(user_id=telegram_user.id, api_key=api_key, provider_name=provider_name)
235
+ RegisteredProviders.register_as_available(provider=provider)
236
+
237
+ msg = f"Your {provider_name} API Key successfully set! 🦾\n\nNow you may check available models in /gpt_models."
238
+ await send_message(update=update, context=context, reply=False, text=msg)
239
+ try:
240
+ await context.bot.delete_message(chat_id=telegram_chat.id, message_id=telegram_message.message_id)
241
+ except Exception:
242
+ pass
243
+ logger.info(f"{user_data(update)} successfully set up {provider_name} Key.")
244
+
245
+
246
+ @handle_gpt_exceptions
247
+ async def handle_available_model_options(
248
+ update: Update,
249
+ context: ContextTypes.DEFAULT_TYPE,
250
+ image_generation: bool = False,
251
+ ) -> InlineKeyboardMarkup:
252
+ telegram_user = get_telegram_user(update=update)
253
+ models_available = await get_models_available(user_id=telegram_user.id, image_generation=image_generation)
254
+ mapped_models: dict[str, ModelChangeSchema] = {str(k): model for k, model in enumerate(models_available)}
255
+ set_user_context(context=context, key=UserContext.MAPPED_MODELS, value=mapped_models)
256
+ keyboard = [
257
+ [InlineKeyboardButton(f"{model.display_name.title()} ({model.provider})", callback_data=key)]
258
+ for key, model in mapped_models.items()
259
+ ]
260
+ for model in models_available:
261
+ logger.debug(f"{model.provider}: {model.name}")
262
+ keyboard.append([InlineKeyboardButton(text="CLOSE (SELECT NOTHING)", callback_data="-1")])
263
+ return InlineKeyboardMarkup(keyboard)
264
+
265
+
266
+ async def handle_available_provider_options() -> InlineKeyboardMarkup:
267
+ keyboard = [
268
+ [InlineKeyboardButton(name, callback_data=name)]
269
+ for name, klass in RegisteredProviders.all.items()
270
+ if name != "Cloudflare"
271
+ # Temporary removing the Cloudflare provider from the "public mode"
272
+ # because we need to handle account id setting first. Will provide
273
+ # such a support in one of the following releases.
274
+ ]
275
+ keyboard.append([InlineKeyboardButton(text="CLOSE (SELECT NOTHING)", callback_data="-1")])
276
+ return InlineKeyboardMarkup(keyboard)
@@ -0,0 +1,20 @@
1
+ import asyncio
2
+ from typing import Hashable
3
+ from weakref import WeakValueDictionary
4
+
5
+ from chibi.utils.app import SingletonMeta
6
+
7
+
8
+ class LockManager(metaclass=SingletonMeta):
9
+ def __init__(self) -> None:
10
+ """Initialize the lock manager."""
11
+ self._locks: WeakValueDictionary[Hashable, asyncio.Lock] = WeakValueDictionary()
12
+ self._dict_lock: asyncio.Lock = asyncio.Lock()
13
+
14
+ async def get_lock(self, key: Hashable) -> asyncio.Lock:
15
+ async with self._dict_lock:
16
+ lock = self._locks.get(key)
17
+ if lock is None:
18
+ lock = asyncio.Lock()
19
+ self._locks[key] = lock
20
+ return lock