chibi-bot 1.6.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. chibi/__init__.py +0 -0
  2. chibi/__main__.py +343 -0
  3. chibi/cli.py +90 -0
  4. chibi/config/__init__.py +6 -0
  5. chibi/config/app.py +123 -0
  6. chibi/config/gpt.py +108 -0
  7. chibi/config/logging.py +15 -0
  8. chibi/config/telegram.py +43 -0
  9. chibi/config_generator.py +233 -0
  10. chibi/constants.py +362 -0
  11. chibi/exceptions.py +58 -0
  12. chibi/models.py +496 -0
  13. chibi/schemas/__init__.py +0 -0
  14. chibi/schemas/anthropic.py +20 -0
  15. chibi/schemas/app.py +54 -0
  16. chibi/schemas/cloudflare.py +65 -0
  17. chibi/schemas/mistralai.py +56 -0
  18. chibi/schemas/suno.py +83 -0
  19. chibi/service.py +135 -0
  20. chibi/services/bot.py +276 -0
  21. chibi/services/lock_manager.py +20 -0
  22. chibi/services/mcp/manager.py +242 -0
  23. chibi/services/metrics.py +54 -0
  24. chibi/services/providers/__init__.py +16 -0
  25. chibi/services/providers/alibaba.py +79 -0
  26. chibi/services/providers/anthropic.py +40 -0
  27. chibi/services/providers/cloudflare.py +98 -0
  28. chibi/services/providers/constants/suno.py +2 -0
  29. chibi/services/providers/customopenai.py +11 -0
  30. chibi/services/providers/deepseek.py +15 -0
  31. chibi/services/providers/eleven_labs.py +85 -0
  32. chibi/services/providers/gemini_native.py +489 -0
  33. chibi/services/providers/grok.py +40 -0
  34. chibi/services/providers/minimax.py +96 -0
  35. chibi/services/providers/mistralai_native.py +312 -0
  36. chibi/services/providers/moonshotai.py +20 -0
  37. chibi/services/providers/openai.py +74 -0
  38. chibi/services/providers/provider.py +892 -0
  39. chibi/services/providers/suno.py +130 -0
  40. chibi/services/providers/tools/__init__.py +23 -0
  41. chibi/services/providers/tools/cmd.py +132 -0
  42. chibi/services/providers/tools/common.py +127 -0
  43. chibi/services/providers/tools/constants.py +78 -0
  44. chibi/services/providers/tools/exceptions.py +1 -0
  45. chibi/services/providers/tools/file_editor.py +875 -0
  46. chibi/services/providers/tools/mcp_management.py +274 -0
  47. chibi/services/providers/tools/mcp_simple.py +72 -0
  48. chibi/services/providers/tools/media.py +451 -0
  49. chibi/services/providers/tools/memory.py +252 -0
  50. chibi/services/providers/tools/schemas.py +10 -0
  51. chibi/services/providers/tools/send.py +435 -0
  52. chibi/services/providers/tools/tool.py +163 -0
  53. chibi/services/providers/tools/utils.py +146 -0
  54. chibi/services/providers/tools/web.py +261 -0
  55. chibi/services/providers/utils.py +182 -0
  56. chibi/services/task_manager.py +93 -0
  57. chibi/services/user.py +269 -0
  58. chibi/storage/abstract.py +54 -0
  59. chibi/storage/database.py +86 -0
  60. chibi/storage/dynamodb.py +257 -0
  61. chibi/storage/local.py +70 -0
  62. chibi/storage/redis.py +91 -0
  63. chibi/utils/__init__.py +0 -0
  64. chibi/utils/app.py +249 -0
  65. chibi/utils/telegram.py +521 -0
  66. chibi_bot-1.6.0b0.dist-info/LICENSE +21 -0
  67. chibi_bot-1.6.0b0.dist-info/METADATA +340 -0
  68. chibi_bot-1.6.0b0.dist-info/RECORD +70 -0
  69. chibi_bot-1.6.0b0.dist-info/WHEEL +4 -0
  70. chibi_bot-1.6.0b0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,130 @@
1
+ from asyncio import sleep
2
+
3
+ from loguru import logger
4
+ from tenacity import retry, retry_if_result, stop_after_attempt, wait_fixed
5
+
6
+ from chibi.config import gpt_settings
7
+ from chibi.exceptions import ServiceResponseError
8
+ from chibi.schemas.app import ModelChangeSchema
9
+ from chibi.schemas.suno import SunoGetGenerationDetailsSchema
10
+ from chibi.services.providers.constants.suno import POLLING_ATTEMPTS_MAX, POLLING_ATTEMPTS_WAIT_BETWEEN
11
+ from chibi.services.providers.provider import RestApiFriendlyProvider
12
+ from chibi.services.providers.utils import suno_task_still_processing
13
+
14
+
15
+ class Suno(RestApiFriendlyProvider):
16
+ name = "Suno"
17
+
18
+ api_key = gpt_settings.suno_key
19
+ chat_ready = False
20
+ music_ready = True
21
+ base_url = "https://api.sunoapi.org"
22
+ default_model = "V5"
23
+
24
+ @property
25
+ def _headers(self) -> dict[str, str]:
26
+ return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
27
+
28
+ async def get_available_models(self, image_generation: bool = False) -> list[ModelChangeSchema]:
29
+ return []
30
+
31
+ @retry(
32
+ wait=wait_fixed(POLLING_ATTEMPTS_WAIT_BETWEEN),
33
+ stop=stop_after_attempt(POLLING_ATTEMPTS_MAX),
34
+ retry=retry_if_result(suno_task_still_processing),
35
+ )
36
+ async def poll_result(self, task_id: int | str) -> SunoGetGenerationDetailsSchema:
37
+ logger.info(f"[Suno] Checking status of the task #{task_id}...")
38
+ url = f"{self.base_url}/api/v1/generate/record-info"
39
+ params = {"taskId": str(task_id)}
40
+ response = await self._request(method="GET", url=url, params=params)
41
+ response.raise_for_status()
42
+ response_data = response.json()
43
+ if response_data.get("code") != 200:
44
+ raise ServiceResponseError(
45
+ provider=self.name,
46
+ detail=(
47
+ f"Unsuccessful status code response while checking status of the task {task_id}: {response_data}"
48
+ ),
49
+ )
50
+ return SunoGetGenerationDetailsSchema.model_validate(response_data)
51
+
52
+ async def order_music_generation(
53
+ self,
54
+ prompt: str,
55
+ instrumental_only: bool = False,
56
+ model: str = default_model,
57
+ ) -> int:
58
+ url = f"{self.base_url}/api/v1/generate"
59
+
60
+ response = await self._request(
61
+ method="POST",
62
+ url=url,
63
+ data={
64
+ "customMode": False,
65
+ "instrumental": instrumental_only,
66
+ "model": model,
67
+ "prompt": prompt,
68
+ "callBackUrl": False,
69
+ },
70
+ )
71
+ response.raise_for_status()
72
+ response_data = response.json()
73
+ if response_data.get("code") != 200:
74
+ raise ServiceResponseError(provider=self.name, detail=f"Unsuccessful status code response: {response_data}")
75
+
76
+ task_id = response_data.get("data", {}).get("taskId")
77
+ if not task_id:
78
+ raise ServiceResponseError(provider=self.name, detail=f"Unsuccessful status code response: {response_data}")
79
+ return task_id
80
+
81
+ async def order_music_generation_advanced_mode(
82
+ self,
83
+ prompt: str,
84
+ style: str,
85
+ title: str,
86
+ instrumental_only: bool = False,
87
+ model: str = default_model,
88
+ negative_tags: str | None = None,
89
+ vocal_gender: str | None = None,
90
+ style_weight: float = 0.5,
91
+ weirdness_constraint: float = 0.5,
92
+ ) -> int:
93
+ url = f"{self.base_url}/api/v1/generate"
94
+ response = await self._request(
95
+ method="POST",
96
+ url=url,
97
+ data={
98
+ "customMode": True,
99
+ "instrumental": instrumental_only,
100
+ "model": model,
101
+ "prompt": prompt,
102
+ "callBackUrl": False,
103
+ "style": style,
104
+ "title": title,
105
+ "negativeTags": negative_tags,
106
+ "vocalGender": vocal_gender,
107
+ "styleWeight": style_weight,
108
+ "weirdnessConstraint": weirdness_constraint,
109
+ },
110
+ )
111
+ response.raise_for_status()
112
+ response_data = response.json()
113
+ if response_data.get("code") != 200:
114
+ raise ServiceResponseError(provider=self.name, detail=f"Unsuccessful status code response: {response_data}")
115
+
116
+ task_id = response_data.get("data", {}).get("taskId")
117
+ if not task_id:
118
+ raise ServiceResponseError(provider=self.name, detail=f"Unsuccessful status code response: {response_data}")
119
+ return task_id
120
+
121
+ async def generate_music(
122
+ self,
123
+ prompt: str,
124
+ instrumental_only: bool = False,
125
+ model: str = default_model,
126
+ ) -> SunoGetGenerationDetailsSchema:
127
+ task_id = await self.order_music_generation(prompt=prompt, instrumental_only=instrumental_only, model=model)
128
+ await sleep(POLLING_ATTEMPTS_WAIT_BETWEEN)
129
+ music_data = await self.poll_result(task_id=task_id)
130
+ return music_data
@@ -0,0 +1,23 @@
1
+ # flake8: noqa: F401
2
+
3
+ from .cmd import RunCommandInTerminalTool
4
+ from .common import GetCurrentDatetimeTool
5
+ from .file_editor import (
6
+ AppendToFileTool,
7
+ CreateFileTool,
8
+ FindAndReplaceSectionTool,
9
+ InsertAfterPatternTool,
10
+ InsertAtLineTool,
11
+ InsertBeforePatternTool,
12
+ ReplaceInFileRegexTool,
13
+ ReplaceInFileTool,
14
+ ReplaceLinesTool,
15
+ )
16
+ from .mcp_management import DeinitializeMCPServer, InitializeSseMCPServer, InitializeStdioMCPServer
17
+ from .mcp_simple import McpEchoTool
18
+ from .media import TextToSpeechTool
19
+ from .memory import SetUserInfoTool
20
+ from .schemas import ToolResponse
21
+ from .send import SendAudioTool, SendImageTool, SendMediaGroupTool, SendVideoTool
22
+ from .tool import RegisteredChibiTools, RegisteredFunctionsMap
23
+ from .web import DDGSWebSearchTool, GoogleSearchTool, ReadWebPageTool, SearchNewsTool
@@ -0,0 +1,132 @@
1
+ import asyncio
2
+ import os
3
+ import signal
4
+ from typing import Any, Unpack
5
+
6
+ from loguru import logger
7
+ from openai.types.chat import ChatCompletionToolParam
8
+ from openai.types.shared_params import FunctionDefinition
9
+
10
+ from chibi.config import gpt_settings
11
+ from chibi.schemas.app import ModeratorsAnswer
12
+ from chibi.services.providers.tools.constants import CMD_STDOUT_LIMIT
13
+ from chibi.services.providers.tools.exceptions import ToolException
14
+ from chibi.services.providers.tools.tool import ChibiTool
15
+ from chibi.services.providers.tools.utils import AdditionalOptions
16
+ from chibi.services.user import get_cwd, get_moderation_provider
17
+
18
+
19
+ class RunCommandInTerminalTool(ChibiTool):
20
+ register = gpt_settings.filesystem_access
21
+ definition = ChatCompletionToolParam(
22
+ type="function",
23
+ function=FunctionDefinition(
24
+ name="run_command_in_terminal",
25
+ description=(
26
+ "Run command in the zsh shell (MacOS). Will run via python's subprocess.run() "
27
+ "Will return json including return code, stdout and stderr."
28
+ ),
29
+ parameters={
30
+ "type": "object",
31
+ "properties": {
32
+ "cmd": {"type": "string", "description": "Shell command to run."},
33
+ "cwd": {
34
+ "type": "string",
35
+ "description": (
36
+ "The working directory to run the command in. The default value provided in user data."
37
+ ),
38
+ },
39
+ "timeout": {
40
+ "type": "integer",
41
+ "description": (
42
+ "The timeout for command execution in seconds. Default is 30 sec. "
43
+ "Change it if you're expecting longer execution."
44
+ ),
45
+ },
46
+ },
47
+ "required": ["cmd"],
48
+ },
49
+ ),
50
+ )
51
+ name = "run_command_in_terminal"
52
+
53
+ @classmethod
54
+ async def function(
55
+ cls, cmd: str, cwd: str | None = None, timeout: int = 30, **kwargs: Unpack[AdditionalOptions]
56
+ ) -> dict[str, Any]:
57
+ model = kwargs.get("model", "Unknown model")
58
+ user_id = kwargs.get("user_id")
59
+ if not user_id:
60
+ raise ValueError("This function requires user_id to be automatically provided.")
61
+
62
+ if not cwd:
63
+ cwd = await get_cwd(user_id=user_id)
64
+
65
+ moderation_provider = await get_moderation_provider(user_id=user_id)
66
+
67
+ logger.log("MODERATOR", f"[{model}] Pre-moderating command: '{cmd}'. CWD: {cwd}")
68
+ moderator_answer: ModeratorsAnswer = await moderation_provider.moderate_command(
69
+ cmd=cmd, model=gpt_settings.moderation_model
70
+ )
71
+ if moderator_answer.verdict == "declined":
72
+ raise ToolException(
73
+ f"Moderator ({moderation_provider.name}) DECLINED command '{cmd}' from model "
74
+ f"{kwargs.get('model', 'unknown')}. Reason: {moderator_answer.reason}"
75
+ )
76
+
77
+ logger.log(
78
+ "MODERATOR",
79
+ (
80
+ f"[{model}] Moderator ({moderation_provider.name}) ACCEPTED command '{cmd}' "
81
+ f"from model {kwargs.get('model', 'unknown')}"
82
+ ),
83
+ )
84
+ logger.log("TOOL", f"[{model}] Running command in terminal: {cmd}. CWD: {cwd}. Timeout: {timeout}")
85
+ try:
86
+ process = await asyncio.create_subprocess_shell(
87
+ cmd=cmd,
88
+ stdout=asyncio.subprocess.PIPE,
89
+ stderr=asyncio.subprocess.PIPE,
90
+ cwd=cwd,
91
+ start_new_session=True,
92
+ )
93
+ stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=float(timeout))
94
+ except asyncio.TimeoutError:
95
+ try:
96
+ os.killpg(os.getpgid(process.pid), signal.SIGKILL)
97
+ except ProcessLookupError:
98
+ pass
99
+
100
+ try:
101
+ process.kill()
102
+ await process.wait()
103
+ except ProcessLookupError:
104
+ pass
105
+
106
+ raise ToolException(f"Command execution timed out after {timeout} seconds. Process group killed.")
107
+ except Exception as e:
108
+ raise ToolException(f"Failed to run command in terminal! Command: '{cmd}'. Error: {e}")
109
+
110
+ raw_stdout = stdout.decode()
111
+ raw_stderr = stderr.decode()
112
+
113
+ result: dict[str, str | int | None] = {"return_code": process.returncode}
114
+
115
+ if len(raw_stdout) > CMD_STDOUT_LIMIT or len(raw_stderr) > CMD_STDOUT_LIMIT:
116
+ result["WARNING"] = (
117
+ "The volume of stdout/stderr data is excessively large "
118
+ f"(over {CMD_STDOUT_LIMIT} characters). If this is the "
119
+ f"result of reading from a file, try reading it in parts."
120
+ )
121
+ result["stdout"] = (
122
+ raw_stdout if len(raw_stdout) < CMD_STDOUT_LIMIT else f"...truncated... {raw_stdout[CMD_STDOUT_LIMIT:]}"
123
+ )
124
+ result["stderr"] = (
125
+ raw_stderr if len(raw_stderr) < CMD_STDOUT_LIMIT else f"...truncated... {raw_stderr[CMD_STDOUT_LIMIT:]}"
126
+ )
127
+
128
+ logger.log(
129
+ "TOOL",
130
+ f"[{kwargs.get('model', 'Unknown model')}] Command '{cmd}' executed. Return code: {process.returncode}.",
131
+ )
132
+ return result
@@ -0,0 +1,127 @@
1
+ import asyncio
2
+ import datetime
3
+ from typing import Any, Unpack
4
+
5
+ from loguru import logger
6
+ from openai.types.chat import ChatCompletionToolParam
7
+ from openai.types.shared_params import FunctionDefinition
8
+
9
+ from chibi.config import gpt_settings
10
+ from chibi.schemas.app import ChatResponseSchema, ModelChangeSchema
11
+ from chibi.services.providers.tools.exceptions import ToolException
12
+ from chibi.services.providers.tools.tool import ChibiTool
13
+ from chibi.services.providers.tools.utils import AdditionalOptions, get_sub_agent_response
14
+
15
+
16
+ class GetAvailableLLMModelsTool(ChibiTool):
17
+ register = True
18
+ definition = ChatCompletionToolParam(
19
+ type="function",
20
+ function=FunctionDefinition(
21
+ name="get_available_llm_models",
22
+ description="Get LLM models and providers available for user.",
23
+ parameters={
24
+ "type": "object",
25
+ "properties": {},
26
+ "required": [],
27
+ },
28
+ ),
29
+ )
30
+ name = "get_available_llm_models"
31
+
32
+ @classmethod
33
+ async def function(cls, **kwargs: Unpack[AdditionalOptions]) -> dict[str, Any]:
34
+ user_id = kwargs.get("user_id")
35
+ if not user_id:
36
+ raise ToolException("This function requires user_id to be automatically provided.")
37
+
38
+ logger.log("TOOL", f"Getting available LLM models for user {user_id}...")
39
+
40
+ from chibi.services.user import get_models_available
41
+
42
+ data: list[ModelChangeSchema] = await get_models_available(user_id=user_id, image_generation=False)
43
+
44
+ return {
45
+ "available_models": [info.model_dump(include={"provider", "name", "display_name"}) for info in data],
46
+ }
47
+
48
+
49
+ class DelegateTool(ChibiTool):
50
+ register = gpt_settings.allow_delegation
51
+ run_in_background_by_default = True
52
+ allow_model_to_change_background_mode = False
53
+ definition = ChatCompletionToolParam(
54
+ type="function",
55
+ function=FunctionDefinition(
56
+ name="delegate_task",
57
+ description=(
58
+ "Delegate exactly one task to a sub-agent - an LLM identical to you. The prompt should be "
59
+ "exhaustive and expect a concrete result, or an explanation for its absence. The task should be "
60
+ "as atomic as possible. Delegate preferably tasks that involve processing large volumes of "
61
+ "information, to avoid saturating your context. Try to assign simpler tasks to cheaper and faster "
62
+ "models. You can find out the list of available models by executing tool get_available_llm_models. "
63
+ "If no model/provider specified, your model will be used (be sure you know your model)."
64
+ ),
65
+ parameters={
66
+ "type": "object",
67
+ "properties": {
68
+ "prompt": {"type": "string", "description": "Prompt"},
69
+ "provider_name": {"type": "string", "description": "Provider name, i.e. 'OpenAI'"},
70
+ "model_name": {"type": "string", "description": "Model name, i.e. 'gpt-5.2'"},
71
+ "timeout": {"type": "integer", "description": "Timeout in seconds", "default": 600},
72
+ },
73
+ "required": ["prompt"],
74
+ },
75
+ ),
76
+ )
77
+ name = "delegate_task"
78
+
79
+ @classmethod
80
+ async def function(
81
+ cls,
82
+ prompt: str,
83
+ provider_name: str | None = None,
84
+ model_name: str | None = None,
85
+ timeout: int = 600,
86
+ **kwargs: Unpack[AdditionalOptions],
87
+ ) -> dict[str, str]:
88
+ user_id = kwargs.get("user_id")
89
+ if not user_id:
90
+ raise ToolException("This function requires user_id to be automatically provided.")
91
+ model = kwargs.get("model")
92
+ if not model:
93
+ raise ToolException("This function requires model to be automatically provided.")
94
+ logger.log("DELEGATE", f"[{model}] Delegating a task to {model_name or model}: {prompt}")
95
+
96
+ coro = get_sub_agent_response(
97
+ user_id=user_id, prompt=prompt, provider_name=provider_name, model_name=model_name
98
+ )
99
+ try:
100
+ response: ChatResponseSchema = await asyncio.wait_for(fut=coro, timeout=timeout)
101
+ except asyncio.TimeoutError:
102
+ raise ToolException("Timed out waiting for delegated task to complete!")
103
+
104
+ logger.log("SUBAGENT", f"[{model_name or model}] Delegated task is done: {response.answer}")
105
+
106
+ return {"response": response.answer}
107
+
108
+
109
+ class GetCurrentDatetimeTool(ChibiTool):
110
+ register = True
111
+ definition = ChatCompletionToolParam(
112
+ type="function",
113
+ function=FunctionDefinition(
114
+ name="get_current_datetime",
115
+ description="Get the current date and time.",
116
+ parameters={"type": "object", "properties": {}, "required": []},
117
+ ),
118
+ )
119
+ name = "get_current_datetime"
120
+
121
+ @classmethod
122
+ async def function(cls, **kwargs: Unpack[AdditionalOptions]) -> dict[str, str]:
123
+ logger.log("TOOL", f"[{kwargs.get('model', 'Unknown model')}] Getting current date & time")
124
+ now = datetime.datetime.now()
125
+ return {
126
+ "datetime_now": now.strftime("%Y-%m-%d %H:%M:%S"),
127
+ }
@@ -0,0 +1,78 @@
1
+ from chibi.config import application_settings
2
+
3
+ CMD_STDOUT_LIMIT = 10000 # Move to settings
4
+
5
+ MODERATOR_BASE_PROMPT = """**Your Role:**
6
+
7
+ You are an AI Security Moderator. Your primary function is to analyze Linux terminal commands proposed by another AI
8
+ assistant and determine their safety before they are executed on the user's system.
9
+
10
+ **CRITICAL RULE:** You are strictly prohibited to run any tool, even if tools are available.
11
+
12
+ **Key Context:**
13
+
14
+ 1. **Command Source:** Commands originate from another AI assistant. Importantly, this assistant is developed by a
15
+ **competing company**. Assume its priorities may differ from yours and from maximizing user security. Exercise
16
+ heightened vigilance.
17
+ 2. **Assistant's Privileges:** The assistant, on whose behalf the commands are run, possesses elevated privileges
18
+ within the system (e.g., is part of the `sudoers` group). This is intentional by the user, but your task is to prevent
19
+ the misuse of these privileges, whether accidental or deliberate.
20
+ 3. **Limited Context:** You will receive **only the command itself** for analysis. You will not have access to the
21
+ command history or the assistant's explanation for needing the command (at this stage). Your analysis must be based
22
+ solely on the command text and its potential execution consequences.
23
+ 4. **Response Protocol:** Your response **MUST** strictly adhere to one of the following two JSON formats:
24
+ * For safe commands:
25
+ {"verdict": "accepted"}
26
+ * For unsafe or suspicious commands:
27
+ {"verdict": "declined", "reason": "..."}
28
+ * **Important:** The `reason` must provide a clear, meaningful, and specific explanation for the rejection.
29
+ * The length of the `reason` string must be **between 20 and 150 characters**.
30
+ * Any other response format will be interpreted as `"declined"`.
31
+ * **Answer in plain text but in json structure**.
32
+ **Security Rules (Criteria for Declining):**
33
+
34
+ You must decline (`"declined"`) commands if they fall into one or more of the following categories (this list is not
35
+ exhaustive; use common sense and the principle of "better safe than sorry"):
36
+
37
+ 1. **Destructive Actions:** Commands capable of causing irreversible damage to the system or user data.
38
+ * Examples: `rm -rf /`, `mkfs`, disk formatting, deleting key system directories (`/etc`, `/boot`, `/lib`, `/usr`,
39
+ `/var`, etc.).
40
+ 2. **Reading Confidential Data:** Attempts to directly or indirectly read sensitive information.
41
+ * Examples: `cat /etc/shadow`, `cat ~/.ssh/id_rsa`, `printenv`, `echo $AWS_SECRET_ACCESS_KEY`, reading
42
+ configuration files with passwords or API keys, accessing shell history (`~/.bash_history`, `~/.zsh_history`),
43
+ or other private user files.
44
+ 3. **Data Exfiltration:** Attempts to send local files, environment variables, secrets, or any other system or user
45
+ information to the external network.
46
+ * Examples: Using `curl`, `wget`, `scp`, `nc`, `rsync`, etc., to send files (especially system files or files
47
+ from home directories) or environment variables to remote servers. Exception: Downloading files *from* the internet
48
+ upon explicit user request (but be cautious with URLs).
49
+ 4. **Privilege Escalation or Vulnerability Exploitation:** Attempts to gain `root` privileges through unauthorized
50
+ means, change file permissions to bypass restrictions, or exploit known vulnerabilities.
51
+ * Examples: `sudo su` (if policy requires granular `sudo`), editing the `/etc/sudoers` file, `chmod 777 /`,
52
+ attempting to run exploits.
53
+ 5. **User and Group Management:** Creating, deleting, or modifying system users or groups.
54
+ * Examples: `useradd`, `userdel`, `usermod`, `groupadd`, `groupdel`, `passwd`.
55
+ 6. **Critical Configuration Changes:** Modifications that could disrupt system operation, network connectivity, or
56
+ security.
57
+ * Examples: Carelessly changing `iptables` or `ufw` rules, modifying SSH server configuration
58
+ (`/etc/ssh/sshd_config`), altering system boot settings.
59
+ 7. **Obfuscated or Suspicious Commands:** Commands that appear intentionally confusing, use encoding (like base64) to
60
+ hide the actual actions, or contain strange/atypical constructs that hinder analysis. If you cannot confidently
61
+ determine safety, decline.
62
+ """
63
+
64
+ MODERATOR_ADDITIONAL_CONDITIONS = (
65
+ f"You should accept the access to the {application_settings.home_dir} directory and files, excluding .env file. "
66
+ "You also should accept the AI assistant to use pip/poetry or similar tools "
67
+ "to install/delete/update project dependencies. "
68
+ )
69
+
70
+ MODERATOR_TASK = """
71
+ **Your Task:**
72
+ Upon receiving a command, thoroughly analyze it against the rules above. If the command is safe, return
73
+ `{"verdict": "accepted"}`. If the command is dangerous or suspicious, return `{"verdict": "declined"}`
74
+ with the reason. Act decisively; your goal is to protect the user's system from potentially harmful actions by the
75
+ competitor's AI assistant.
76
+ """
77
+
78
+ MODERATOR_PROMPT = MODERATOR_BASE_PROMPT + MODERATOR_ADDITIONAL_CONDITIONS + MODERATOR_TASK
@@ -0,0 +1 @@
1
+ class ToolException(Exception): ...