chibi-bot 1.6.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. chibi/__init__.py +0 -0
  2. chibi/__main__.py +343 -0
  3. chibi/cli.py +90 -0
  4. chibi/config/__init__.py +6 -0
  5. chibi/config/app.py +123 -0
  6. chibi/config/gpt.py +108 -0
  7. chibi/config/logging.py +15 -0
  8. chibi/config/telegram.py +43 -0
  9. chibi/config_generator.py +233 -0
  10. chibi/constants.py +362 -0
  11. chibi/exceptions.py +58 -0
  12. chibi/models.py +496 -0
  13. chibi/schemas/__init__.py +0 -0
  14. chibi/schemas/anthropic.py +20 -0
  15. chibi/schemas/app.py +54 -0
  16. chibi/schemas/cloudflare.py +65 -0
  17. chibi/schemas/mistralai.py +56 -0
  18. chibi/schemas/suno.py +83 -0
  19. chibi/service.py +135 -0
  20. chibi/services/bot.py +276 -0
  21. chibi/services/lock_manager.py +20 -0
  22. chibi/services/mcp/manager.py +242 -0
  23. chibi/services/metrics.py +54 -0
  24. chibi/services/providers/__init__.py +16 -0
  25. chibi/services/providers/alibaba.py +79 -0
  26. chibi/services/providers/anthropic.py +40 -0
  27. chibi/services/providers/cloudflare.py +98 -0
  28. chibi/services/providers/constants/suno.py +2 -0
  29. chibi/services/providers/customopenai.py +11 -0
  30. chibi/services/providers/deepseek.py +15 -0
  31. chibi/services/providers/eleven_labs.py +85 -0
  32. chibi/services/providers/gemini_native.py +489 -0
  33. chibi/services/providers/grok.py +40 -0
  34. chibi/services/providers/minimax.py +96 -0
  35. chibi/services/providers/mistralai_native.py +312 -0
  36. chibi/services/providers/moonshotai.py +20 -0
  37. chibi/services/providers/openai.py +74 -0
  38. chibi/services/providers/provider.py +892 -0
  39. chibi/services/providers/suno.py +130 -0
  40. chibi/services/providers/tools/__init__.py +23 -0
  41. chibi/services/providers/tools/cmd.py +132 -0
  42. chibi/services/providers/tools/common.py +127 -0
  43. chibi/services/providers/tools/constants.py +78 -0
  44. chibi/services/providers/tools/exceptions.py +1 -0
  45. chibi/services/providers/tools/file_editor.py +875 -0
  46. chibi/services/providers/tools/mcp_management.py +274 -0
  47. chibi/services/providers/tools/mcp_simple.py +72 -0
  48. chibi/services/providers/tools/media.py +451 -0
  49. chibi/services/providers/tools/memory.py +252 -0
  50. chibi/services/providers/tools/schemas.py +10 -0
  51. chibi/services/providers/tools/send.py +435 -0
  52. chibi/services/providers/tools/tool.py +163 -0
  53. chibi/services/providers/tools/utils.py +146 -0
  54. chibi/services/providers/tools/web.py +261 -0
  55. chibi/services/providers/utils.py +182 -0
  56. chibi/services/task_manager.py +93 -0
  57. chibi/services/user.py +269 -0
  58. chibi/storage/abstract.py +54 -0
  59. chibi/storage/database.py +86 -0
  60. chibi/storage/dynamodb.py +257 -0
  61. chibi/storage/local.py +70 -0
  62. chibi/storage/redis.py +91 -0
  63. chibi/utils/__init__.py +0 -0
  64. chibi/utils/app.py +249 -0
  65. chibi/utils/telegram.py +521 -0
  66. chibi_bot-1.6.0b0.dist-info/LICENSE +21 -0
  67. chibi_bot-1.6.0b0.dist-info/METADATA +340 -0
  68. chibi_bot-1.6.0b0.dist-info/RECORD +70 -0
  69. chibi_bot-1.6.0b0.dist-info/WHEEL +4 -0
  70. chibi_bot-1.6.0b0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,489 @@
1
+ import asyncio
2
+ import math
3
+ import random
4
+ from asyncio import sleep
5
+ from copy import copy
6
+ from io import BytesIO
7
+ from typing import Any
8
+ from uuid import uuid4
9
+
10
+ from google.genai.client import Client
11
+ from google.genai.errors import APIError
12
+ from google.genai.types import (
13
+ ContentDict,
14
+ FunctionCallDict,
15
+ FunctionDeclaration,
16
+ FunctionResponseDict,
17
+ GenerateContentConfig,
18
+ GenerateContentResponse,
19
+ GenerateImagesConfig,
20
+ GenerateImagesResponse,
21
+ HttpOptions,
22
+ Image,
23
+ ImageConfig,
24
+ PartDict,
25
+ Tool,
26
+ )
27
+ from loguru import logger
28
+ from telegram import Update
29
+ from telegram.ext import ContextTypes
30
+
31
+ from chibi.config import application_settings, gpt_settings
32
+ from chibi.exceptions import NoResponseError, NotAuthorizedError, ServiceRateLimitError, ServiceResponseError
33
+ from chibi.models import Message, User
34
+ from chibi.schemas.app import ChatResponseSchema, ModelChangeSchema, ModeratorsAnswer
35
+ from chibi.services.metrics import MetricsService
36
+ from chibi.services.providers.provider import RestApiFriendlyProvider
37
+ from chibi.services.providers.tools import RegisteredChibiTools
38
+ from chibi.services.providers.tools.constants import MODERATOR_PROMPT
39
+ from chibi.services.providers.utils import (
40
+ get_usage_from_google_response,
41
+ get_usage_msg,
42
+ prepare_system_prompt,
43
+ send_llm_thoughts,
44
+ )
45
+
46
+
47
+ class Gemini(RestApiFriendlyProvider):
48
+ api_key = gpt_settings.gemini_key
49
+ chat_ready = True
50
+ image_generation_ready = True
51
+ moderation_ready = True
52
+
53
+ name = "Gemini"
54
+ model_name_keywords = ["gemini", "gemma"]
55
+ model_name_keywords_exclude = ["image", "vision", "tts", "embedding", "2.0", "1.5"]
56
+ default_model = "models/gemini-2.5-pro"
57
+ default_image_model = "models/imagen-4.0-fast-generate-001"
58
+ default_moderation_model = "models/gemini-2.5-flash-lite"
59
+ frequency_penalty: float | None = gpt_settings.frequency_penalty
60
+ max_tokens: int = gpt_settings.max_tokens
61
+ presence_penalty: float | None = gpt_settings.presence_penalty
62
+ temperature: float = gpt_settings.temperature
63
+
64
+ def __init__(self, token: str) -> None:
65
+ super().__init__(token=token)
66
+
67
+ @property
68
+ def tools_list(self) -> list[Tool]:
69
+ """Convert our tools format to Google's Tool format.
70
+
71
+ Returns:
72
+ Tools list in Google's Tool format.
73
+ """
74
+ google_tools = []
75
+ for tool in RegisteredChibiTools.get_tool_definitions():
76
+ try:
77
+ google_tool = Tool(
78
+ function_declarations=[
79
+ FunctionDeclaration(
80
+ name=str(tool["function"]["name"]),
81
+ description=str(tool["function"]["description"]),
82
+ parameters=tool["function"]["parameters"],
83
+ )
84
+ ]
85
+ )
86
+ except Exception as e:
87
+ logger.error(f"Failed to register tool {tool['function']['name']} due to exception: {e}")
88
+ import pprint
89
+
90
+ pprint.pprint(tool)
91
+ raise
92
+ google_tools.append(google_tool)
93
+ return google_tools
94
+
95
+ def _get_text(self, response: GenerateContentResponse) -> str | None:
96
+ if not response.candidates or not response.candidates[0].content or not response.candidates[0].content.parts:
97
+ return None
98
+ text = ""
99
+ for part in response.candidates[0].content.parts:
100
+ if part.text is not None and not part.thought:
101
+ text += part.text
102
+ return text if text != "" else None
103
+
104
+ def _get_thought_signature(self, response: GenerateContentResponse) -> bytes | None:
105
+ if not response.candidates:
106
+ return None
107
+ first_candidate = response.candidates[0]
108
+ if not first_candidate.content or not first_candidate.content.parts:
109
+ return None
110
+ for part in first_candidate.content.parts:
111
+ if signature := part.thought_signature:
112
+ return signature
113
+ return None
114
+
115
+ def _get_retry_delay(self, response: Any) -> float | None:
116
+ if not isinstance(response, dict):
117
+ logger.warning(
118
+ f"The Gemini API error response data is not a dict. Skipping getting retry delay. "
119
+ f"Response type: {type(response)}. Response: {response}"
120
+ )
121
+ return None
122
+ per_day_quota: bool = False
123
+ retry_delay = None
124
+ details = response.get("error", {}).get("details", [])
125
+ if not details:
126
+ logger.warning(
127
+ f"The Gemini API error response data does not contain details section. Skipping getting retry delay. "
128
+ f"Response: {response}"
129
+ )
130
+ return None
131
+
132
+ for item in details:
133
+ detail_type = item.get("@type", "")
134
+
135
+ if "QuotaFailure" in detail_type:
136
+ violations = item.get("violations", [])
137
+ for v in violations:
138
+ if "PerDay" in v.get("quotaId", ""):
139
+ per_day_quota = True
140
+ break
141
+
142
+ elif "RetryInfo" in detail_type:
143
+ delay_str = item.get("retryDelay", "")
144
+ if delay_str and delay_str.endswith("s"):
145
+ try:
146
+ val = float(delay_str[:-1])
147
+ retry_delay = math.ceil(val) + 1
148
+ except ValueError:
149
+ retry_delay = None
150
+ if per_day_quota:
151
+ logger.warning("Ooops! Seems we have reached Daily Quota for Gemini API.")
152
+ return None
153
+ return retry_delay
154
+
155
+ async def _generate_content(
156
+ self, model: str, contents: list[ContentDict], config: GenerateContentConfig
157
+ ) -> GenerateContentResponse:
158
+ for attempt in range(gpt_settings.retries):
159
+ try:
160
+ async with Client(api_key=gpt_settings.gemini_key).aio as client:
161
+ response: GenerateContentResponse = await client.models.generate_content(
162
+ model=model,
163
+ contents=contents,
164
+ config=config,
165
+ )
166
+ answer = self._get_text(response)
167
+ if answer is not None or response.function_calls:
168
+ return response
169
+ except APIError as err:
170
+ logger.error(f"Gemini API error: {err.message}")
171
+
172
+ if err.code == 429:
173
+ retry_delay = self._get_retry_delay(err.details)
174
+ if not retry_delay:
175
+ raise ServiceRateLimitError(provider=self.name, model=model, detail=err.details)
176
+ await asyncio.sleep(retry_delay + random.uniform(0.5, 2.5))
177
+ continue
178
+
179
+ elif err.code == 403:
180
+ raise NotAuthorizedError(provider=self.name, model=model, detail=err.details)
181
+
182
+ else:
183
+ raise ServiceResponseError(provider=self.name, model=model, detail=err.details)
184
+
185
+ delay = gpt_settings.backoff_factor * (2**attempt)
186
+ jitter = delay * random.uniform(0.1, 0.5)
187
+ total_delay = delay + jitter
188
+
189
+ logger.warning(
190
+ f"Attempt #{attempt + 1}. Unexpected (empty) response received. Retrying in {total_delay} seconds..."
191
+ )
192
+ await sleep(total_delay)
193
+ raise NoResponseError(provider=self.name, model=model, detail="Unexpected (empty) response received")
194
+
195
+ async def _get_chat_completion_response(
196
+ self,
197
+ messages: list[ContentDict],
198
+ user: User | None = None,
199
+ model: str | None = None,
200
+ system_prompt: str = gpt_settings.assistant_prompt,
201
+ context: ContextTypes.DEFAULT_TYPE | None = None,
202
+ update: Update | None = None,
203
+ ) -> tuple[ChatResponseSchema, list[ContentDict]]:
204
+ model_name = model or self.default_model
205
+
206
+ prepared_system_prompt = await prepare_system_prompt(base_system_prompt=system_prompt, user=user)
207
+
208
+ if "flash" in model_name and self.temperature > 0.4:
209
+ temperature = 0.4
210
+ else:
211
+ temperature = self.temperature
212
+
213
+ http_options = HttpOptions(httpx_async_client=self.get_async_httpx_client())
214
+
215
+ generation_config = GenerateContentConfig(
216
+ system_instruction=prepared_system_prompt if "gemini" in model_name else None,
217
+ temperature=temperature,
218
+ max_output_tokens=self.max_tokens,
219
+ presence_penalty=self.presence_penalty,
220
+ frequency_penalty=self.frequency_penalty,
221
+ tools=self.tools_list if "gemini" in model_name else None,
222
+ http_options=http_options,
223
+ )
224
+
225
+ response: GenerateContentResponse = await self._generate_content(
226
+ model=model_name,
227
+ contents=messages,
228
+ config=generation_config,
229
+ )
230
+ answer = self._get_text(response)
231
+ usage = get_usage_from_google_response(response_message=response)
232
+ if application_settings.is_influx_configured:
233
+ MetricsService.send_usage_metrics(metric=usage, model=model_name, provider=self.name, user=user)
234
+ usage_message = get_usage_msg(usage=usage)
235
+
236
+ if not response.function_calls:
237
+ messages.append(
238
+ ContentDict(
239
+ role="model",
240
+ parts=[
241
+ PartDict(
242
+ text=answer,
243
+ )
244
+ ],
245
+ )
246
+ )
247
+ return ChatResponseSchema(answer=answer, provider=self.name, model=model_name, usage=usage), messages
248
+
249
+ # Tool calls handling
250
+ logger.log("CALL", f"{model} requested the call of {len(response.function_calls)} tools.")
251
+
252
+ if answer:
253
+ await send_llm_thoughts(thoughts=answer, context=context, update=update)
254
+ logger.log("THINK", f"{model}: {answer or 'No thoughts...'}. {usage_message}")
255
+
256
+ tool_context: dict[str, Any] = {
257
+ "user_id": user.id if user else None,
258
+ "telegram_context": context,
259
+ "telegram_update": update,
260
+ "model": model,
261
+ }
262
+
263
+ tool_coroutines = [
264
+ RegisteredChibiTools.call(
265
+ tool_name=str(function_call.name),
266
+ tools_args=tool_context | copy(function_call.args) if function_call.args else tool_context,
267
+ )
268
+ for function_call in response.function_calls
269
+ ]
270
+ results = await asyncio.gather(*tool_coroutines)
271
+
272
+ thought_signature = self._get_thought_signature(response=response)
273
+ if not thought_signature:
274
+ logger.error(
275
+ f"Could not get thought signature for function call, no response candidates found: "
276
+ f"{response.candidates}."
277
+ )
278
+
279
+ for function_call, result in zip(response.function_calls, results):
280
+ function_call_id = function_call.id or str(uuid4())
281
+ tool_call_message: ContentDict = ContentDict(
282
+ role="model",
283
+ parts=[
284
+ PartDict(
285
+ function_call=FunctionCallDict(
286
+ name=function_call.name, args=function_call.args, id=function_call_id
287
+ ),
288
+ thought_signature=thought_signature,
289
+ ),
290
+ ],
291
+ )
292
+
293
+ tool_result_message = ContentDict(
294
+ role="user",
295
+ parts=[
296
+ PartDict(
297
+ function_response=FunctionResponseDict(
298
+ id=function_call_id, name=function_call.name, response=result.model_dump()
299
+ )
300
+ ),
301
+ ],
302
+ )
303
+
304
+ messages.append(tool_call_message)
305
+ messages.append(tool_result_message)
306
+
307
+ logger.log("CALL", "All the function results have been obtained. Returning them to the LLM...")
308
+ return await self._get_chat_completion_response(
309
+ messages=messages,
310
+ model=model_name,
311
+ user=user,
312
+ system_prompt=system_prompt,
313
+ context=context,
314
+ update=update,
315
+ )
316
+
317
+ async def get_chat_response(
318
+ self,
319
+ messages: list[Message],
320
+ user: User | None = None,
321
+ model: str | None = None,
322
+ system_prompt: str = gpt_settings.assistant_prompt,
323
+ update: Update | None = None,
324
+ context: ContextTypes.DEFAULT_TYPE | None = None,
325
+ ) -> tuple[ChatResponseSchema, list[Message]]:
326
+ model = model or self.default_model
327
+ initial_messages = [msg.to_google() for msg in messages]
328
+
329
+ chat_response, updated_messages = await self._get_chat_completion_response(
330
+ messages=initial_messages.copy(),
331
+ user=user,
332
+ model=model,
333
+ system_prompt=system_prompt,
334
+ context=context,
335
+ update=update,
336
+ )
337
+
338
+ new_messages = [msg for msg in updated_messages if msg not in initial_messages]
339
+ return chat_response, [Message.from_google(msg) for msg in new_messages]
340
+
341
+ async def _generate_image_via_content_creation_model(
342
+ self,
343
+ prompt: str,
344
+ model: str,
345
+ ) -> list[Image]:
346
+ image_size = (
347
+ gpt_settings.image_size_nano_banana if "flash" not in model else None
348
+ ) # flash-models don't support it
349
+
350
+ http_options = HttpOptions(httpx_async_client=self.get_async_httpx_client())
351
+
352
+ generation_config = GenerateContentConfig(
353
+ image_config=ImageConfig(
354
+ aspect_ratio=gpt_settings.image_aspect_ratio,
355
+ image_size=image_size,
356
+ )
357
+ )
358
+
359
+ async with Client(api_key=gpt_settings.gemini_key, http_options=http_options).aio as client:
360
+ response: GenerateContentResponse = await client.models.generate_content(
361
+ model=model,
362
+ contents=[prompt],
363
+ config=generation_config,
364
+ )
365
+ if not response.parts:
366
+ raise ServiceResponseError(provider=self.name, model=model, detail="No content-parts in response found")
367
+
368
+ images: list[Image | None] = [part.as_image() for part in response.parts if part]
369
+ return [image for image in images if image]
370
+
371
+ async def _generate_image_by_imagen(
372
+ self,
373
+ prompt: str,
374
+ model: str,
375
+ ) -> list[Image]:
376
+ http_options = HttpOptions(httpx_async_client=self.get_async_httpx_client())
377
+
378
+ if "preview" in model or "fast" in model:
379
+ image_size = None
380
+ else:
381
+ image_size = gpt_settings.image_size_imagen
382
+
383
+ generation_config = GenerateImagesConfig(
384
+ aspect_ratio=gpt_settings.image_aspect_ratio,
385
+ number_of_images=gpt_settings.image_n_choices,
386
+ http_options=http_options,
387
+ image_size=image_size,
388
+ )
389
+ async with Client(api_key=gpt_settings.gemini_key).aio as client:
390
+ response: GenerateImagesResponse = await client.models.generate_images(
391
+ model=model,
392
+ prompt=prompt,
393
+ config=generation_config,
394
+ )
395
+ images_in_response = response.images
396
+
397
+ return [image for image in images_in_response if image]
398
+
399
+ async def moderate_command(self, cmd: str, model: str | None = None) -> ModeratorsAnswer:
400
+ moderator_model = model or self.default_moderation_model or self.default_model
401
+
402
+ http_options = HttpOptions(httpx_async_client=self.get_async_httpx_client())
403
+ generation_config = GenerateContentConfig(
404
+ system_instruction=MODERATOR_PROMPT,
405
+ temperature=0.1,
406
+ max_output_tokens=1024,
407
+ presence_penalty=self.presence_penalty,
408
+ frequency_penalty=self.frequency_penalty,
409
+ http_options=http_options,
410
+ response_schema=ModeratorsAnswer,
411
+ )
412
+ messages = [
413
+ Message(role="user", content=cmd).to_google(),
414
+ ]
415
+ response: GenerateContentResponse = await self._generate_content(
416
+ model=moderator_model,
417
+ contents=messages,
418
+ config=generation_config,
419
+ )
420
+ answer = self._get_text(response)
421
+ if not answer:
422
+ return ModeratorsAnswer(verdict="declined", reason="no moderator answer received", status="error")
423
+
424
+ answer = answer.strip("```").strip("json").strip()
425
+ usage = get_usage_from_google_response(response_message=response)
426
+ if application_settings.is_influx_configured:
427
+ MetricsService.send_usage_metrics(metric=usage, model=moderator_model, provider=self.name)
428
+
429
+ try:
430
+ return ModeratorsAnswer.model_validate_json(answer)
431
+ except Exception as e:
432
+ logger.error(f"Error parsing moderator's response: {answer}. Error: {e}")
433
+ return ModeratorsAnswer(verdict="declined", reason=answer, status="error")
434
+
435
+ async def get_images(self, prompt: str, model: str | None = None) -> list[BytesIO]:
436
+ selected_model = model or self.default_image_model
437
+
438
+ if "imagen-" in selected_model:
439
+ images = await self._generate_image_by_imagen(prompt=prompt, model=selected_model)
440
+ else:
441
+ images = await self._generate_image_via_content_creation_model(prompt=prompt, model=selected_model)
442
+
443
+ return [BytesIO(image.image_bytes) for image in images if image.image_bytes]
444
+
445
+ @classmethod
446
+ def is_image_ready_model(cls, model_name: str) -> bool:
447
+ return "image" in model_name
448
+
449
+ def get_model_display_name(self, model_name: str) -> str:
450
+ if "gemini-3-pro-image" in model_name:
451
+ display_name = model_name.replace("models/gemini-3-pro-image", "Nano Banana Pro")
452
+ return display_name.replace("-", " ").capitalize()
453
+
454
+ if "gemini-2.5-flash-image" in model_name:
455
+ display_name = model_name.replace("models/gemini-2.5-flash-image", "Nano Banana")
456
+ return display_name.replace("-", " ").capitalize()
457
+
458
+ if "imagen" in model_name:
459
+ model_name = model_name.replace("generate-", "")
460
+
461
+ return model_name[7:].replace("-", " ")
462
+
463
+ async def get_available_models(self, image_generation: bool = False) -> list[ModelChangeSchema]:
464
+ try:
465
+ async with Client(api_key=gpt_settings.gemini_key).aio as aclient:
466
+ models = await aclient.models.list()
467
+ except Exception as e:
468
+ logger.error(f"Failed to get available models for provider {self.name} due to exception: {e}")
469
+ return []
470
+
471
+ all_models = [
472
+ ModelChangeSchema(
473
+ provider=self.name,
474
+ name=model.name,
475
+ display_name=self.get_model_display_name(model.name),
476
+ image_generation=self.is_image_ready_model(model.name),
477
+ )
478
+ async for model in models
479
+ if model.name
480
+ ]
481
+ all_models.sort(key=lambda model: model.name)
482
+
483
+ if image_generation:
484
+ return [model for model in all_models if model.image_generation]
485
+
486
+ if gpt_settings.models_whitelist:
487
+ return [model for model in all_models if model.name in gpt_settings.models_whitelist]
488
+
489
+ return [model for model in all_models if self.is_chat_ready_model(model.name)]
@@ -0,0 +1,40 @@
1
+ from openai import NOT_GIVEN
2
+
3
+ from chibi.config import gpt_settings
4
+ from chibi.schemas.app import ModelChangeSchema
5
+ from chibi.services.providers.provider import OpenAIFriendlyProvider
6
+
7
+
8
+ class Grok(OpenAIFriendlyProvider):
9
+ api_key = gpt_settings.grok_key
10
+ chat_ready = True
11
+ image_generation_ready = True
12
+ moderation_ready = True
13
+
14
+ base_url = "https://api.x.ai/v1"
15
+ name = "Grok"
16
+ model_name_keywords = ["grok"]
17
+ model_name_keywords_exclude = ["vision", "image"]
18
+ image_quality = NOT_GIVEN
19
+ image_size = NOT_GIVEN
20
+ default_image_model = "grok-2-image-1212"
21
+ default_model = "grok-4-1-fast-reasoning"
22
+ default_moderation_model = "grok-4-1-fast-non-reasoning"
23
+ presence_penalty = NOT_GIVEN
24
+ frequency_penalty = NOT_GIVEN
25
+ image_n_choices = 1
26
+
27
+ async def get_available_models(self, image_generation: bool = False) -> list[ModelChangeSchema]:
28
+ models = await super().get_available_models(image_generation=image_generation)
29
+
30
+ if not image_generation:
31
+ return models
32
+
33
+ # For some reason we stopped getting a grok-2-image-1212 model from the API. But it still works.
34
+ if not models:
35
+ models.append(
36
+ ModelChangeSchema(
37
+ provider=self.name, name="grok-2-image-1212", display_name="Grok 2 Image", image_generation=True
38
+ )
39
+ )
40
+ return models
@@ -0,0 +1,96 @@
1
+ from anthropic import AsyncClient
2
+ from loguru import logger
3
+
4
+ from chibi.config import gpt_settings
5
+ from chibi.exceptions import NoApiKeyProvidedError
6
+ from chibi.schemas.app import ModelChangeSchema
7
+ from chibi.services.providers.provider import AnthropicFriendlyProvider
8
+
9
+
10
+ class Minimax(AnthropicFriendlyProvider):
11
+ api_key = gpt_settings.minimax_api_key
12
+ chat_ready = True
13
+ tts_ready = True
14
+ moderation_ready = True
15
+
16
+ name = "Minimax"
17
+ base_url = "https://api.minimax.io/anthropic"
18
+ default_model = "MiniMax-M2.1"
19
+ default_moderation_model = "MiniMax-M2.1-lighting"
20
+
21
+ base_tts_url = "https://api.minimax.io/v1/"
22
+ default_tts_model = "speech-2.8-turbo"
23
+ default_tts_voice = "Korean_HaughtyLady"
24
+
25
+ def __init__(self, token: str) -> None:
26
+ self._client: AsyncClient | None = None
27
+ super().__init__(token=token)
28
+
29
+ @property
30
+ def _headers(self) -> dict[str, str]:
31
+ return {
32
+ "Accept": "application/json",
33
+ "Content-Type": "application/json",
34
+ "x-api-key": self.token,
35
+ "anthropic-version": "2023-06-01",
36
+ }
37
+
38
+ @property
39
+ def tts_headers(self) -> dict[str, str]:
40
+ return {
41
+ "Accept": "application/json",
42
+ "Content-Type": "application/json",
43
+ "Authorization": f"Bearer {self.token}",
44
+ }
45
+
46
+ @property
47
+ def client(self) -> AsyncClient:
48
+ if self._client:
49
+ return self._client
50
+
51
+ if not self.token:
52
+ raise NoApiKeyProvidedError(provider=self.name)
53
+
54
+ self._client = AsyncClient(api_key=self.token, base_url=self.base_url)
55
+ return self._client
56
+
57
+ async def get_available_models(self, image_generation: bool = False) -> list[ModelChangeSchema]:
58
+ supported_models = [
59
+ "MiniMax-M2.1",
60
+ "MiniMax-M2.1-lightning",
61
+ "MiniMax-M2",
62
+ ]
63
+ return [
64
+ ModelChangeSchema(
65
+ provider=self.name,
66
+ name=model_name,
67
+ display_name=model_name,
68
+ image_generation=False,
69
+ )
70
+ for model_name in supported_models
71
+ if not gpt_settings.models_whitelist or model_name in gpt_settings.models_whitelist
72
+ ]
73
+
74
+ async def speech(
75
+ self, text: str, voice: str | None = default_tts_voice, model: str | None = default_tts_model
76
+ ) -> bytes:
77
+ logger.info(f"Recording a voice message with model {model}...")
78
+
79
+ url = f"{self.base_tts_url}t2a_v2"
80
+
81
+ data = {
82
+ "model": model,
83
+ "text": text,
84
+ "voice_setting": {
85
+ "voice_id": voice,
86
+ "emotion": "happy",
87
+ "speed": 1.2,
88
+ },
89
+ }
90
+ try:
91
+ response = await self._request(method="POST", url=url, data=data, headers=self.tts_headers)
92
+ except Exception as e:
93
+ logger.error(f"Failed to get available models for provider {self.name} due to exception: {e}")
94
+ return bytes()
95
+ response_data = response.json()["data"]
96
+ return bytes.fromhex(response_data["audio"])