fenix-mcp 0.1.0__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {fenix_mcp-0.1.0/fenix_mcp.egg-info → fenix_mcp-0.2.2}/PKG-INFO +56 -6
  2. fenix_mcp-0.1.0/PKG-INFO → fenix_mcp-0.2.2/README.md +50 -21
  3. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/__init__.py +4 -1
  4. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tool_base.py +3 -2
  5. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tool_registry.py +3 -1
  6. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/health.py +1 -3
  7. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/initialize.py +20 -7
  8. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/intelligence.py +47 -15
  9. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/knowledge.py +162 -50
  10. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/productivity.py +26 -9
  11. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/user_config.py +18 -9
  12. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/domain/initialization.py +22 -22
  13. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/domain/intelligence.py +27 -15
  14. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/domain/knowledge.py +161 -61
  15. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/domain/productivity.py +3 -2
  16. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/domain/user_config.py +16 -7
  17. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/infrastructure/config.py +6 -2
  18. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/infrastructure/context.py +0 -1
  19. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/infrastructure/fenix_api/client.py +118 -38
  20. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/infrastructure/http_client.py +1 -1
  21. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/interface/mcp_server.py +0 -3
  22. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/interface/transports.py +19 -10
  23. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/main.py +4 -1
  24. fenix_mcp-0.1.0/README.md → fenix_mcp-0.2.2/fenix_mcp.egg-info/PKG-INFO +71 -5
  25. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp.egg-info/requires.txt +5 -0
  26. fenix_mcp-0.2.2/pyproject.toml +84 -0
  27. fenix_mcp-0.1.0/pyproject.toml +0 -30
  28. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/presenters.py +0 -0
  29. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/application/tools/__init__.py +0 -0
  30. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp/infrastructure/logging.py +0 -0
  31. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp.egg-info/SOURCES.txt +0 -0
  32. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp.egg-info/dependency_links.txt +0 -0
  33. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp.egg-info/entry_points.txt +0 -0
  34. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/fenix_mcp.egg-info/top_level.txt +0 -0
  35. {fenix_mcp-0.1.0 → fenix_mcp-0.2.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fenix-mcp
3
- Version: 0.1.0
3
+ Version: 0.2.2
4
4
  Summary: Fênix Cloud MCP server implemented in Python
5
5
  Author: Fenix Inc
6
6
  Requires-Python: >=3.10
@@ -13,6 +13,11 @@ Requires-Dist: pydantic-settings>=2.0
13
13
  Provides-Extra: dev
14
14
  Requires-Dist: pytest>=7.4; extra == "dev"
15
15
  Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
16
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
17
+ Requires-Dist: black>=23.0; extra == "dev"
18
+ Requires-Dist: flake8>=6.0; extra == "dev"
19
+ Requires-Dist: mypy>=1.0; extra == "dev"
20
+ Requires-Dist: twine>=4.0; extra == "dev"
16
21
 
17
22
  # Fênix MCP — Live Access to Fênix Cloud Data
18
23
 
@@ -132,17 +137,61 @@ Set `FENIX_TRANSPORT_MODE=both` to run STDIO and HTTP simultaneously. The defaul
132
137
 
133
138
  > Copy `.env.example` to `.env` for easier customization.
134
139
 
135
- ## 🧪 Local Testing
140
+
141
+ ## 🧪 Development
142
+
143
+ ### Local Testing
136
144
 
137
145
  ```bash
146
+ # Install development dependencies
138
147
  pip install -e .[dev]
148
+
149
+ # Run tests
139
150
  pytest
151
+
152
+ # Run with coverage
153
+ pytest --cov=fenix_mcp --cov-report=html
154
+
155
+ # Run linting
156
+ flake8 fenix_mcp/ tests/
157
+ black --check fenix_mcp/ tests/
158
+
159
+ # Run type checking
160
+ mypy fenix_mcp/
161
+
162
+ # Format code
163
+ black fenix_mcp/ tests/
140
164
  ```
141
165
 
166
+ ### Pre-commit Hooks (Optional)
167
+
168
+ ```bash
169
+ # Install pre-commit
170
+ pip install pre-commit
171
+
172
+ # Install hooks
173
+ pre-commit install
174
+
175
+ # Run on all files
176
+ pre-commit run --all-files
177
+ ```
178
+
179
+ ### Commit Convention
180
+
181
+ This project follows [Conventional Commits](https://www.conventionalcommits.org/):
182
+
183
+ - `fix:` - Bug fixes (patch version bump)
184
+ - `feat:` - New features (minor version bump)
185
+ - `BREAKING CHANGE:` - Breaking changes (major version bump)
186
+ - `chore:` - Maintenance tasks
187
+ - `docs:` - Documentation changes
188
+ - `test:` - Test additions/changes
189
+
142
190
  ## 🔄 Automation
143
191
 
144
- - **CI (GitHub Actions)** – runs on pushes and pull requests targeting `main`. It installs dependencies, runs `pytest`, builds the distribution artifacts, and uploads them as workflow artifacts.
145
- - **Publish workflow** – push a tag `v*` (or trigger the "Publish" workflow manually) to build the package and, if `PYPI_API_TOKEN` is set in repository secrets, upload artifacts to PyPI via `twine`.
192
+ - **CI (GitHub Actions)** – runs on pushes and pull requests targeting `main`. It installs dependencies, runs tests on Python 3.11, enforces flake8/black/mypy, generates coverage, builds the distribution (`python -m build`) and, on pushes, uploads artifacts for debugging.
193
+
194
+ - **Semantic Release** – after the CI job succeeds on `main`, the workflow installs the required `semantic-release` plugins and runs `npx semantic-release`. Conventional Commits decide the next version, `scripts/bump_version.py` updates `fenix_mcp.__version__`, the build artifacts are regenerated, and release notes/assets are published to GitHub and PyPI (using `PYPI_API_TOKEN`). If no eligible commit (`feat`, `fix`, or `BREAKING CHANGE`) exists since the last tag, no new release is produced.
146
195
 
147
196
  ## 🧰 Available Tools
148
197
 
@@ -200,8 +249,9 @@ STDIO stays active for MCP clients; HTTP will listen on `FENIX_HTTP_HOST:FENIX_H
200
249
  1. Fork the repository
201
250
  2. Create a branch: `git checkout -b feat/my-feature`
202
251
  3. Install dev dependencies: `pip install -e .[dev]`
203
- 4. Run `pytest`
204
- 5. Open a Pull Request describing your changes
252
+ 4. Use Conventional Commits (`feat:`, `fix:`, or `BREAKING CHANGE:`) so Semantic Release can infer the next version.
253
+ 5. Run `pytest`
254
+ 6. Open a Pull Request describing your changes
205
255
 
206
256
  ## 📄 License
207
257
 
@@ -1,19 +1,3 @@
1
- Metadata-Version: 2.4
2
- Name: fenix-mcp
3
- Version: 0.1.0
4
- Summary: Fênix Cloud MCP server implemented in Python
5
- Author: Fenix Inc
6
- Requires-Python: >=3.10
7
- Description-Content-Type: text/markdown
8
- Requires-Dist: pydantic>=2.5
9
- Requires-Dist: requests>=2.31
10
- Requires-Dist: urllib3>=2.0
11
- Requires-Dist: aiohttp>=3.9
12
- Requires-Dist: pydantic-settings>=2.0
13
- Provides-Extra: dev
14
- Requires-Dist: pytest>=7.4; extra == "dev"
15
- Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
16
-
17
1
  # Fênix MCP — Live Access to Fênix Cloud Data
18
2
 
19
3
  [![PyPI](https://img.shields.io/pypi/v/fenix-mcp.svg)](https://pypi.org/project/fenix-mcp/) [![Python](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](./LICENSE)
@@ -132,17 +116,61 @@ Set `FENIX_TRANSPORT_MODE=both` to run STDIO and HTTP simultaneously. The defaul
132
116
 
133
117
  > Copy `.env.example` to `.env` for easier customization.
134
118
 
135
- ## 🧪 Local Testing
119
+
120
+ ## 🧪 Development
121
+
122
+ ### Local Testing
136
123
 
137
124
  ```bash
125
+ # Install development dependencies
138
126
  pip install -e .[dev]
127
+
128
+ # Run tests
139
129
  pytest
130
+
131
+ # Run with coverage
132
+ pytest --cov=fenix_mcp --cov-report=html
133
+
134
+ # Run linting
135
+ flake8 fenix_mcp/ tests/
136
+ black --check fenix_mcp/ tests/
137
+
138
+ # Run type checking
139
+ mypy fenix_mcp/
140
+
141
+ # Format code
142
+ black fenix_mcp/ tests/
143
+ ```
144
+
145
+ ### Pre-commit Hooks (Optional)
146
+
147
+ ```bash
148
+ # Install pre-commit
149
+ pip install pre-commit
150
+
151
+ # Install hooks
152
+ pre-commit install
153
+
154
+ # Run on all files
155
+ pre-commit run --all-files
140
156
  ```
141
157
 
158
+ ### Commit Convention
159
+
160
+ This project follows [Conventional Commits](https://www.conventionalcommits.org/):
161
+
162
+ - `fix:` - Bug fixes (patch version bump)
163
+ - `feat:` - New features (minor version bump)
164
+ - `BREAKING CHANGE:` - Breaking changes (major version bump)
165
+ - `chore:` - Maintenance tasks
166
+ - `docs:` - Documentation changes
167
+ - `test:` - Test additions/changes
168
+
142
169
  ## 🔄 Automation
143
170
 
144
- - **CI (GitHub Actions)** – runs on pushes and pull requests targeting `main`. It installs dependencies, runs `pytest`, builds the distribution artifacts, and uploads them as workflow artifacts.
145
- - **Publish workflow** – push a tag `v*` (or trigger the "Publish" workflow manually) to build the package and, if `PYPI_API_TOKEN` is set in repository secrets, upload artifacts to PyPI via `twine`.
171
+ - **CI (GitHub Actions)** – runs on pushes and pull requests targeting `main`. It installs dependencies, runs tests on Python 3.11, enforces flake8/black/mypy, generates coverage, builds the distribution (`python -m build`) and, on pushes, uploads artifacts for debugging.
172
+
173
+ - **Semantic Release** – after the CI job succeeds on `main`, the workflow installs the required `semantic-release` plugins and runs `npx semantic-release`. Conventional Commits decide the next version, `scripts/bump_version.py` updates `fenix_mcp.__version__`, the build artifacts are regenerated, and release notes/assets are published to GitHub and PyPI (using `PYPI_API_TOKEN`). If no eligible commit (`feat`, `fix`, or `BREAKING CHANGE`) exists since the last tag, no new release is produced.
146
174
 
147
175
  ## 🧰 Available Tools
148
176
 
@@ -200,8 +228,9 @@ STDIO stays active for MCP clients; HTTP will listen on `FENIX_HTTP_HOST:FENIX_H
200
228
  1. Fork the repository
201
229
  2. Create a branch: `git checkout -b feat/my-feature`
202
230
  3. Install dev dependencies: `pip install -e .[dev]`
203
- 4. Run `pytest`
204
- 5. Open a Pull Request describing your changes
231
+ 4. Use Conventional Commits (`feat:`, `fix:`, or `BREAKING CHANGE:`) so Semantic Release can infer the next version.
232
+ 5. Run `pytest`
233
+ 6. Open a Pull Request describing your changes
205
234
 
206
235
  ## 📄 License
207
236
 
@@ -10,8 +10,11 @@ This package follows a Clean Architecture layout inside the MCP ecosystem:
10
10
  - application: tools, registries, presenters and use-case orchestrators
11
11
  - domain: pure business models and services
12
12
  - infrastructure: API clients, config, logging and shared context
13
+
14
+ Version 0.1.0 - Initial release with basic MCP functionality.
15
+ Updated with improved error handling and better documentation.
13
16
  """
14
17
 
15
18
  __all__ = ["__version__"]
16
19
 
17
- __version__ = "0.1.0"
20
+ __version__ = "0.2.2"
@@ -35,7 +35,9 @@ class Tool(ABC):
35
35
  "inputSchema": self.request_model.model_json_schema(),
36
36
  }
37
37
 
38
- async def execute(self, raw_arguments: Dict[str, Any], context: AppContext) -> ToolResponse:
38
+ async def execute(
39
+ self, raw_arguments: Dict[str, Any], context: AppContext
40
+ ) -> ToolResponse:
39
41
  """Validate raw arguments and run the tool."""
40
42
  payload = self.request_model.model_validate(raw_arguments or {})
41
43
  return await self.run(payload, context)
@@ -43,4 +45,3 @@ class Tool(ABC):
43
45
  @abstractmethod
44
46
  async def run(self, payload: ToolRequest, context: AppContext) -> ToolResponse:
45
47
  """Execute business logic and return a MCP-formatted response."""
46
-
@@ -22,7 +22,9 @@ class ToolRegistry:
22
22
  def list_definitions(self) -> List[dict]:
23
23
  return [tool.schema() for tool in self._tools.values()]
24
24
 
25
- async def execute(self, name: str, arguments: dict, context: AppContext) -> ToolResponse:
25
+ async def execute(
26
+ self, name: str, arguments: dict, context: AppContext
27
+ ) -> ToolResponse:
26
28
  try:
27
29
  tool = self._tools[name]
28
30
  except KeyError as exc:
@@ -3,9 +3,7 @@
3
3
 
4
4
  from __future__ import annotations
5
5
 
6
- from pydantic import BaseModel
7
-
8
- from fenix_mcp.application.presenters import key_value, text
6
+ from fenix_mcp.application.presenters import key_value
9
7
  from fenix_mcp.application.tool_base import Tool, ToolRequest
10
8
  from fenix_mcp.infrastructure.context import AppContext
11
9
 
@@ -21,26 +21,35 @@ class InitializeAction(str, Enum):
21
21
 
22
22
 
23
23
  class InitializeRequest(ToolRequest):
24
- action: InitializeAction = Field(description="Operação de inicialização a executar.")
24
+ action: InitializeAction = Field(
25
+ description="Operação de inicialização a executar."
26
+ )
25
27
  include_user_docs: bool = Field(
26
28
  default=True,
27
- description="Inclui documentos pessoais durante a inicialização (apenas para ação init).",
29
+ description=(
30
+ "Inclui documentos pessoais durante a inicialização "
31
+ "(apenas para ação init)."
32
+ ),
28
33
  )
29
34
  limit: int = Field(
30
35
  default=50,
31
36
  ge=1,
32
37
  le=200,
33
- description="Quantidade máxima de documentos principais/pessoais carregados.",
38
+ description=("Quantidade máxima de documentos principais/pessoais carregados."),
34
39
  )
35
40
  answers: Optional[List[str]] = Field(
36
41
  default=None,
37
- description="Lista com 9 respostas textuais para processar o setup personalizado.",
42
+ description=(
43
+ "Lista com 9 respostas textuais para processar o setup personalizado."
44
+ ),
38
45
  )
39
46
 
40
47
 
41
48
  class InitializeTool(Tool):
42
49
  name = "initialize"
43
- description = "Inicializa o ambiente do Fênix Cloud ou processa o setup personalizado."
50
+ description = (
51
+ "Inicializa o ambiente do Fênix Cloud ou processa o setup personalizado."
52
+ )
44
53
  request_model = InitializeRequest
45
54
 
46
55
  def __init__(self, context: AppContext):
@@ -62,7 +71,10 @@ class InitializeTool(Tool):
62
71
  )
63
72
  except Exception as exc: # pragma: no cover - defensive
64
73
  self._context.logger.error("Initialize failed: %s", exc)
65
- return text("❌ Falha ao carregar dados de inicialização. Verifique se o token tem acesso à API.")
74
+ return text(
75
+ "❌ Falha ao carregar dados de inicialização. "
76
+ "Verifique se o token tem acesso à API."
77
+ )
66
78
 
67
79
  if (
68
80
  not data.core_documents
@@ -77,8 +89,9 @@ class InitializeTool(Tool):
77
89
  "profile": data.profile,
78
90
  "core_documents": data.core_documents,
79
91
  "user_documents": data.user_documents if payload.include_user_docs else [],
80
- "recent_memories": data.recent_memories,
81
92
  }
93
+ if data.recent_memories:
94
+ payload_dict["recent_memories"] = data.recent_memories
82
95
 
83
96
  message_lines = [
84
97
  "📦 **Dados de inicialização completos**",
@@ -4,7 +4,7 @@
4
4
  from __future__ import annotations
5
5
 
6
6
  from enum import Enum
7
- from typing import Any, Dict, Iterable, List, Optional
7
+ from typing import Any, Dict, List, Optional
8
8
 
9
9
  from pydantic import Field
10
10
 
@@ -21,10 +21,16 @@ class IntelligenceAction(str, Enum):
21
21
  obj.description = description
22
22
  return obj
23
23
 
24
- SMART_CREATE = ("memory_smart_create", "Cria memórias inteligentes com análise de similaridade.")
24
+ SMART_CREATE = (
25
+ "memory_smart_create",
26
+ "Cria memórias inteligentes com análise de similaridade.",
27
+ )
25
28
  QUERY = ("memory_query", "Lista memórias aplicando filtros e busca textual.")
26
29
  SIMILARITY = ("memory_similarity", "Busca memórias similares a um conteúdo base.")
27
- CONSOLIDATE = ("memory_consolidate", "Consolida múltiplas memórias em uma principal.")
30
+ CONSOLIDATE = (
31
+ "memory_consolidate",
32
+ "Consolida múltiplas memórias em uma principal.",
33
+ )
28
34
  PRIORITY = ("memory_priority", "Retorna memórias ordenadas por prioridade.")
29
35
  ANALYTICS = ("memory_analytics", "Calcula métricas e analytics das memórias.")
30
36
  UPDATE = ("memory_update", "Atualiza campos de uma memória existente.")
@@ -47,17 +53,24 @@ class IntelligenceAction(str, Enum):
47
53
 
48
54
  ACTION_FIELD_DESCRIPTION = (
49
55
  "Ação de inteligência a executar. Use um dos valores: "
50
- + ", ".join(f"`{member.value}` ({member.description.rstrip('.')})." for member in IntelligenceAction)
56
+ + ", ".join(
57
+ f"`{member.value}` ({member.description.rstrip('.')})."
58
+ for member in IntelligenceAction
59
+ )
51
60
  )
52
61
 
53
62
 
54
63
  class IntelligenceRequest(ToolRequest):
55
64
  action: IntelligenceAction = Field(description=ACTION_FIELD_DESCRIPTION)
56
65
  title: Optional[str] = Field(default=None, description="Título da memória.")
57
- content: Optional[str] = Field(default=None, description="Conteúdo/texto da memória.")
66
+ content: Optional[str] = Field(
67
+ default=None, description="Conteúdo/texto da memória."
68
+ )
58
69
  context: Optional[str] = Field(default=None, description="Contexto adicional.")
59
70
  source: Optional[str] = Field(default=None, description="Fonte da memória.")
60
- importance: str = Field(default="medium", description="Nível de importância da memória.")
71
+ importance: str = Field(
72
+ default="medium", description="Nível de importância da memória."
73
+ )
61
74
  tags: Optional[List[str]] = Field(default=None, description="Tags da memória.")
62
75
  limit: int = Field(default=20, ge=1, le=100, description="Limite de resultados.")
63
76
  offset: int = Field(default=0, ge=0, description="Offset para paginação.")
@@ -65,23 +78,39 @@ class IntelligenceRequest(ToolRequest):
65
78
  category: Optional[str] = Field(default=None, description="Categoria para filtro.")
66
79
  date_from: Optional[str] = Field(default=None, description="Filtro inicial (ISO).")
67
80
  date_to: Optional[str] = Field(default=None, description="Filtro final (ISO).")
68
- threshold: float = Field(default=0.8, ge=0, le=1, description="Limite mínimo de similaridade.")
69
- max_results: int = Field(default=5, ge=1, le=20, description="Máximo de memórias similares.")
70
- memory_ids: Optional[List[str]] = Field(default=None, description="IDs para consolidação.")
81
+ threshold: float = Field(
82
+ default=0.8, ge=0, le=1, description="Limite mínimo de similaridade."
83
+ )
84
+ max_results: int = Field(
85
+ default=5, ge=1, le=20, description="Máximo de memórias similares."
86
+ )
87
+ memory_ids: Optional[List[str]] = Field(
88
+ default=None, description="IDs para consolidação."
89
+ )
71
90
  strategy: str = Field(default="merge", description="Estratégia de consolidação.")
72
- time_range: str = Field(default="month", description="Janela de tempo para analytics.")
91
+ time_range: str = Field(
92
+ default="month", description="Janela de tempo para analytics."
93
+ )
73
94
  group_by: str = Field(default="category", description="Agrupamento para analytics.")
74
95
  id: Optional[str] = Field(default=None, description="ID da memória para update.")
75
- documentation_item_id: Optional[str] = Field(default=None, description="ID de documentação relacionada.")
96
+ documentation_item_id: Optional[str] = Field(
97
+ default=None, description="ID de documentação relacionada."
98
+ )
76
99
  mode_id: Optional[str] = Field(default=None, description="ID do modo relacionado.")
77
100
  rule_id: Optional[str] = Field(default=None, description="ID da regra relacionada.")
78
- work_item_id: Optional[str] = Field(default=None, description="ID do work item relacionado.")
79
- sprint_id: Optional[str] = Field(default=None, description="ID do sprint relacionado.")
101
+ work_item_id: Optional[str] = Field(
102
+ default=None, description="ID do work item relacionado."
103
+ )
104
+ sprint_id: Optional[str] = Field(
105
+ default=None, description="ID do sprint relacionado."
106
+ )
80
107
 
81
108
 
82
109
  class IntelligenceTool(Tool):
83
110
  name = "intelligence"
84
- description = "Operações de inteligência do Fênix Cloud (memórias e smart operations)."
111
+ description = (
112
+ "Operações de inteligência do Fênix Cloud (memórias e smart operations)."
113
+ )
85
114
  request_model = IntelligenceRequest
86
115
 
87
116
  def __init__(self, context: AppContext):
@@ -231,7 +260,10 @@ class IntelligenceTool(Tool):
231
260
  )
232
261
 
233
262
  async def _handle_help(self):
234
- return text("📚 **Ações disponíveis para intelligence**\n\n" + IntelligenceAction.formatted_help())
263
+ return text(
264
+ "📚 **Ações disponíveis para intelligence**\n\n"
265
+ + IntelligenceAction.formatted_help()
266
+ )
235
267
 
236
268
 
237
269
  def _format_memory(memory: Dict[str, Any]) -> str: