alita-sdk 0.3.257__py3-none-any.whl → 0.3.584__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3794 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +11 -0
- alita_sdk/configurations/ado.py +148 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +76 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +81 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +323 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +72 -3
- alita_sdk/configurations/report_portal.py +115 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +167 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +101 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +21 -4
- alita_sdk/runtime/clients/client.py +458 -67
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +352 -0
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +183 -43
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +189 -41
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +493 -105
- alita_sdk/runtime/langchain/utils.py +118 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +28 -0
- alita_sdk/runtime/toolkits/application.py +14 -4
- alita_sdk/runtime/toolkits/artifact.py +25 -9
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +782 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +314 -70
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +24 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +367 -33
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +218 -0
- alita_sdk/runtime/tools/llm.py +1032 -177
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +69 -65
- alita_sdk/runtime/tools/vectorstore_base.py +163 -90
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +48 -0
- alita_sdk/tools/__init__.py +135 -37
- alita_sdk/tools/ado/__init__.py +2 -2
- alita_sdk/tools/ado/repos/__init__.py +16 -19
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -20
- alita_sdk/tools/ado/test_plan/__init__.py +27 -8
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -28
- alita_sdk/tools/ado/wiki/__init__.py +28 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -40
- alita_sdk/tools/ado/work_item/__init__.py +28 -12
- alita_sdk/tools/ado/work_item/ado_wrapper.py +95 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +13 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +15 -11
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +14 -8
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +454 -110
- alita_sdk/tools/bitbucket/__init__.py +28 -19
- alita_sdk/tools/bitbucket/api_wrapper.py +285 -27
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +41 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +18 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +2 -1
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +12 -7
- alita_sdk/tools/cloud/azure/__init__.py +12 -7
- alita_sdk/tools/cloud/gcp/__init__.py +12 -7
- alita_sdk/tools/cloud/k8s/__init__.py +12 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +21 -13
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +22 -14
- alita_sdk/tools/confluence/api_wrapper.py +197 -58
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +546 -64
- alita_sdk/tools/figma/__init__.py +60 -11
- alita_sdk/tools/figma/api_wrapper.py +1400 -167
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +18 -17
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +81 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +19 -13
- alita_sdk/tools/gitlab/api_wrapper.py +256 -80
- alita_sdk/tools/gitlab_org/__init__.py +14 -10
- alita_sdk/tools/google/bigquery/__init__.py +14 -13
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +21 -11
- alita_sdk/tools/jira/__init__.py +22 -11
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +38 -14
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1357 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +40 -45
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/qtest/__init__.py +22 -14
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +13 -10
- alita_sdk/tools/report_portal/__init__.py +23 -16
- alita_sdk/tools/salesforce/__init__.py +22 -16
- alita_sdk/tools/servicenow/__init__.py +21 -16
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +17 -14
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +13 -8
- alita_sdk/tools/sql/__init__.py +22 -19
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +21 -13
- alita_sdk/tools/testrail/__init__.py +13 -11
- alita_sdk/tools/testrail/api_wrapper.py +214 -46
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +241 -55
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +18 -14
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +12 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +16 -9
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +30 -15
- alita_sdk/tools/zephyr_essential/__init__.py +16 -10
- alita_sdk/tools/zephyr_essential/api_wrapper.py +297 -54
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +13 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +39 -31
- alita_sdk/tools/zephyr_squad/__init__.py +12 -7
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/METADATA +184 -37
- alita_sdk-0.3.584.dist-info/RECORD +452 -0
- alita_sdk-0.3.584.dist-info/entry_points.txt +2 -0
- alita_sdk/tools/bitbucket/tools.py +0 -304
- alita_sdk-0.3.257.dist-info/RECORD +0 -343
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/top_level.txt +0 -0
|
@@ -1,21 +1,124 @@
|
|
|
1
|
-
import base64
|
|
2
1
|
import functools
|
|
3
2
|
import json
|
|
4
3
|
import logging
|
|
5
4
|
import re
|
|
5
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
6
6
|
from enum import Enum
|
|
7
7
|
from typing import Dict, List, Generator, Optional, Union
|
|
8
|
+
from urllib.parse import urlparse, parse_qs
|
|
8
9
|
|
|
9
10
|
import requests
|
|
10
|
-
from FigmaPy import FigmaPy
|
|
11
11
|
from langchain_core.documents import Document
|
|
12
12
|
from langchain_core.tools import ToolException
|
|
13
13
|
from pydantic import Field, PrivateAttr, create_model, model_validator, SecretStr
|
|
14
14
|
|
|
15
|
-
from ..elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
|
|
16
|
-
from ..utils.content_parser import load_content_from_bytes
|
|
17
15
|
|
|
18
|
-
|
|
16
|
+
# User-friendly error messages for common Figma API errors
|
|
17
|
+
FIGMA_ERROR_MESSAGES = {
|
|
18
|
+
429: "Figma API rate limit exceeded. Please wait a moment and try again.",
|
|
19
|
+
403: "Access denied. Please check your Figma API token has access to this file.",
|
|
20
|
+
404: "File or node not found. Please verify the file key or node ID is correct.",
|
|
21
|
+
401: "Authentication failed. Please check your Figma API token is valid.",
|
|
22
|
+
500: "Figma server error. Please try again later.",
|
|
23
|
+
503: "Figma service temporarily unavailable. Please try again later.",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _handle_figma_error(e: ToolException) -> str:
|
|
28
|
+
"""
|
|
29
|
+
Convert a ToolException from Figma API into a user-friendly error message.
|
|
30
|
+
Returns a clean error string without technical details.
|
|
31
|
+
"""
|
|
32
|
+
error_str = str(e)
|
|
33
|
+
|
|
34
|
+
# Extract status code from error message
|
|
35
|
+
for code, message in FIGMA_ERROR_MESSAGES.items():
|
|
36
|
+
if f"error {code}:" in error_str.lower() or f"status\": {code}" in error_str:
|
|
37
|
+
return message
|
|
38
|
+
|
|
39
|
+
# Handle other common patterns
|
|
40
|
+
if "rate limit" in error_str.lower():
|
|
41
|
+
return FIGMA_ERROR_MESSAGES[429]
|
|
42
|
+
if "not found" in error_str.lower():
|
|
43
|
+
return FIGMA_ERROR_MESSAGES[404]
|
|
44
|
+
if "forbidden" in error_str.lower() or "access denied" in error_str.lower():
|
|
45
|
+
return FIGMA_ERROR_MESSAGES[403]
|
|
46
|
+
if "unauthorized" in error_str.lower():
|
|
47
|
+
return FIGMA_ERROR_MESSAGES[401]
|
|
48
|
+
|
|
49
|
+
# Fallback: return a generic but clean message
|
|
50
|
+
return f"Figma API request failed. Please try again or check your file key and permissions."
|
|
51
|
+
|
|
52
|
+
from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
|
|
53
|
+
from ..utils.available_tools_decorator import extend_with_parent_available_tools
|
|
54
|
+
from ..utils.content_parser import _load_content_from_bytes_with_prompt
|
|
55
|
+
from .figma_client import AlitaFigmaPy
|
|
56
|
+
from .toon_tools import (
|
|
57
|
+
TOONSerializer,
|
|
58
|
+
process_page_to_toon_data,
|
|
59
|
+
process_frame_to_toon_data,
|
|
60
|
+
extract_text_by_role,
|
|
61
|
+
extract_components,
|
|
62
|
+
detect_sequences,
|
|
63
|
+
group_variants,
|
|
64
|
+
infer_cta_destination,
|
|
65
|
+
FrameDetailTOONSchema,
|
|
66
|
+
AnalyzeFileSchema,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
GLOBAL_LIMIT = 1000000
|
|
70
|
+
GLOBAL_RETAIN = ['id', 'name', 'type', 'document', 'children']
|
|
71
|
+
GLOBAL_REMOVE = []
|
|
72
|
+
GLOBAL_DEPTH_START = 1
|
|
73
|
+
GLOBAL_DEPTH_END = 6
|
|
74
|
+
DEFAULT_NUMBER_OF_THREADS = 5 # valid range for number_of_threads is 1..5
|
|
75
|
+
# Default prompts for image analysis and summarization reused across toolkit and wrapper
|
|
76
|
+
DEFAULT_FIGMA_IMAGES_PROMPT: Dict[str, str] = {
|
|
77
|
+
"prompt": (
|
|
78
|
+
"You are an AI model for image analysis. For each image, first identify its type "
|
|
79
|
+
"(diagram, screenshot, photograph, illustration/drawing, text-centric, or mixed), "
|
|
80
|
+
"then describe all visible elements and extract any readable text. For diagrams, "
|
|
81
|
+
"capture titles, labels, legends, axes, and all numerical values, and summarize key "
|
|
82
|
+
"patterns or trends. For screenshots, describe the interface or page, key UI elements, "
|
|
83
|
+
"and any conversations or messages with participants and timestamps if visible. For "
|
|
84
|
+
"photos and illustrations, describe the setting, main objects/people, their actions, "
|
|
85
|
+
"style, colors, and composition. Be precise and thorough; when something is unclear or "
|
|
86
|
+
"illegible, state that explicitly instead of guessing."
|
|
87
|
+
)
|
|
88
|
+
}
|
|
89
|
+
DEFAULT_FIGMA_SUMMARY_PROMPT: Dict[str, str] = {
|
|
90
|
+
"prompt": (
|
|
91
|
+
"You are summarizing a visual design document exported from Figma as a sequence of images and text. "
|
|
92
|
+
"Provide a clear, concise overview of the main purpose, key elements, and notable changes or variations in the screens. "
|
|
93
|
+
"Infer a likely user flow or sequence of steps across the screens, calling out entry points, decisions, and outcomes. "
|
|
94
|
+
"Explain how this design could impact planning, development, testing, and review activities in a typical software lifecycle. "
|
|
95
|
+
"Return the result as structured Markdown with headings and bullet lists so it can be reused in SDLC documentation."
|
|
96
|
+
)
|
|
97
|
+
}
|
|
98
|
+
EXTRA_PARAMS = (
|
|
99
|
+
Optional[Dict[str, Union[str, int, List, None]]],
|
|
100
|
+
Field(
|
|
101
|
+
description=(
|
|
102
|
+
"Optional output controls: `limit` (max characters, always applied), `regexp` (regex cleanup on text), "
|
|
103
|
+
"`fields_retain`/`fields_remove` (which keys to keep or drop), and `depth_start`/`depth_end` (depth range "
|
|
104
|
+
"where that key filtering is applied). Field/depth filters are only used when the serialized JSON result "
|
|
105
|
+
"exceeds `limit` to reduce its size."
|
|
106
|
+
),
|
|
107
|
+
default={
|
|
108
|
+
"limit": GLOBAL_LIMIT, "regexp": None,
|
|
109
|
+
"fields_retain": GLOBAL_RETAIN, "fields_remove": GLOBAL_REMOVE,
|
|
110
|
+
"depth_start": GLOBAL_DEPTH_START, "depth_end": GLOBAL_DEPTH_END,
|
|
111
|
+
},
|
|
112
|
+
examples=[
|
|
113
|
+
{
|
|
114
|
+
"limit": "1000",
|
|
115
|
+
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
116
|
+
"fields_retain": GLOBAL_RETAIN, "fields_remove": GLOBAL_REMOVE,
|
|
117
|
+
"depth_start": GLOBAL_DEPTH_START, "depth_end": GLOBAL_DEPTH_END,
|
|
118
|
+
}
|
|
119
|
+
],
|
|
120
|
+
),
|
|
121
|
+
)
|
|
19
122
|
|
|
20
123
|
|
|
21
124
|
class ArgsSchema(Enum):
|
|
@@ -35,19 +138,7 @@ class ArgsSchema(Enum):
|
|
|
35
138
|
examples=["8:6,1:7"],
|
|
36
139
|
),
|
|
37
140
|
),
|
|
38
|
-
extra_params=
|
|
39
|
-
Optional[Dict[str, Union[str, int, None]]],
|
|
40
|
-
Field(
|
|
41
|
-
description="Additional parameters including limit and regex pattern to be removed from response",
|
|
42
|
-
default={"limit": GLOBAL_LIMIT, "regexp": None},
|
|
43
|
-
examples=[
|
|
44
|
-
{
|
|
45
|
-
"limit": "1000",
|
|
46
|
-
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
47
|
-
}
|
|
48
|
-
],
|
|
49
|
-
),
|
|
50
|
-
),
|
|
141
|
+
extra_params=EXTRA_PARAMS,
|
|
51
142
|
)
|
|
52
143
|
File = create_model(
|
|
53
144
|
"FileNodes",
|
|
@@ -60,25 +151,13 @@ class ArgsSchema(Enum):
|
|
|
60
151
|
),
|
|
61
152
|
geometry=(
|
|
62
153
|
Optional[str],
|
|
63
|
-
Field(description="Sets to 'paths' to export vector data"),
|
|
154
|
+
Field(description="Sets to 'paths' to export vector data", default=None),
|
|
64
155
|
),
|
|
65
156
|
version=(
|
|
66
157
|
Optional[str],
|
|
67
|
-
Field(description="Sets version of file"),
|
|
68
|
-
),
|
|
69
|
-
extra_params=(
|
|
70
|
-
Optional[Dict[str, Union[str, int, None]]],
|
|
71
|
-
Field(
|
|
72
|
-
description="Additional parameters including limit and regex pattern to be removed from response",
|
|
73
|
-
default={"limit": GLOBAL_LIMIT, "regexp": None},
|
|
74
|
-
examples=[
|
|
75
|
-
{
|
|
76
|
-
"limit": "1000",
|
|
77
|
-
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
78
|
-
}
|
|
79
|
-
],
|
|
80
|
-
),
|
|
158
|
+
Field(description="Sets version of file", default=None),
|
|
81
159
|
),
|
|
160
|
+
extra_params=EXTRA_PARAMS,
|
|
82
161
|
)
|
|
83
162
|
FileKey = create_model(
|
|
84
163
|
"FileKey",
|
|
@@ -89,19 +168,7 @@ class ArgsSchema(Enum):
|
|
|
89
168
|
examples=["Fp24FuzPwH0L74ODSrCnQo"],
|
|
90
169
|
),
|
|
91
170
|
),
|
|
92
|
-
extra_params=
|
|
93
|
-
Optional[Dict[str, Union[str, int, None]]],
|
|
94
|
-
Field(
|
|
95
|
-
description="Additional parameters including limit and regex pattern to be removed from response",
|
|
96
|
-
default={"limit": GLOBAL_LIMIT, "regexp": None},
|
|
97
|
-
examples=[
|
|
98
|
-
{
|
|
99
|
-
"limit": "1000",
|
|
100
|
-
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
101
|
-
}
|
|
102
|
-
],
|
|
103
|
-
),
|
|
104
|
-
),
|
|
171
|
+
extra_params=EXTRA_PARAMS,
|
|
105
172
|
)
|
|
106
173
|
FileComment = create_model(
|
|
107
174
|
"FileComment",
|
|
@@ -119,22 +186,11 @@ class ArgsSchema(Enum):
|
|
|
119
186
|
client_meta=(
|
|
120
187
|
Optional[dict],
|
|
121
188
|
Field(
|
|
122
|
-
description="Positioning information of the comment (Vector, FrameOffset, Region, FrameOffsetRegion)"
|
|
123
|
-
|
|
124
|
-
),
|
|
125
|
-
extra_params=(
|
|
126
|
-
Optional[Dict[str, Union[str, int, None]]],
|
|
127
|
-
Field(
|
|
128
|
-
description="Additional parameters including limit and regex pattern to be removed from response",
|
|
129
|
-
default={"limit": GLOBAL_LIMIT, "regexp": None},
|
|
130
|
-
examples=[
|
|
131
|
-
{
|
|
132
|
-
"limit": "1000",
|
|
133
|
-
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
134
|
-
}
|
|
135
|
-
],
|
|
189
|
+
description="Positioning information of the comment (Vector, FrameOffset, Region, FrameOffsetRegion)",
|
|
190
|
+
default=None,
|
|
136
191
|
),
|
|
137
192
|
),
|
|
193
|
+
extra_params=EXTRA_PARAMS,
|
|
138
194
|
)
|
|
139
195
|
FileImages = create_model(
|
|
140
196
|
"FileImages",
|
|
@@ -146,40 +202,30 @@ class ArgsSchema(Enum):
|
|
|
146
202
|
),
|
|
147
203
|
),
|
|
148
204
|
ids=(
|
|
149
|
-
str,
|
|
205
|
+
Optional[str],
|
|
150
206
|
Field(
|
|
151
207
|
description="Specifies id of file images separated by comma",
|
|
152
208
|
examples=["8:6,1:7"],
|
|
209
|
+
default="0:0",
|
|
153
210
|
),
|
|
154
211
|
),
|
|
155
212
|
scale=(
|
|
156
213
|
Optional[str],
|
|
157
|
-
Field(description="A number between 0.01 and 4, the image scaling factor"),
|
|
214
|
+
Field(description="A number between 0.01 and 4, the image scaling factor", default=None),
|
|
158
215
|
),
|
|
159
216
|
format=(
|
|
160
217
|
Optional[str],
|
|
161
218
|
Field(
|
|
162
219
|
description="A string enum for the image output format",
|
|
163
220
|
examples=["jpg", "png", "svg", "pdf"],
|
|
221
|
+
default=None,
|
|
164
222
|
),
|
|
165
223
|
),
|
|
166
224
|
version=(
|
|
167
225
|
Optional[str],
|
|
168
|
-
Field(description="A specific version ID to use"),
|
|
169
|
-
),
|
|
170
|
-
extra_params=(
|
|
171
|
-
Optional[Dict[str, Union[str, int, None]]],
|
|
172
|
-
Field(
|
|
173
|
-
description="Additional parameters including limit and regex pattern to be removed from response",
|
|
174
|
-
default={"limit": GLOBAL_LIMIT, "regexp": None},
|
|
175
|
-
examples=[
|
|
176
|
-
{
|
|
177
|
-
"limit": "1000",
|
|
178
|
-
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
179
|
-
}
|
|
180
|
-
],
|
|
181
|
-
),
|
|
226
|
+
Field(description="A specific version ID to use", default=None),
|
|
182
227
|
),
|
|
228
|
+
extra_params=EXTRA_PARAMS,
|
|
183
229
|
)
|
|
184
230
|
TeamProjects = create_model(
|
|
185
231
|
"TeamProjects",
|
|
@@ -190,19 +236,7 @@ class ArgsSchema(Enum):
|
|
|
190
236
|
examples=["1101853299713989222"],
|
|
191
237
|
),
|
|
192
238
|
),
|
|
193
|
-
extra_params=
|
|
194
|
-
Optional[Dict[str, Union[str, int, None]]],
|
|
195
|
-
Field(
|
|
196
|
-
description="Additional parameters including limit and regex pattern to be removed from response",
|
|
197
|
-
default={"limit": GLOBAL_LIMIT, "regexp": None},
|
|
198
|
-
examples=[
|
|
199
|
-
{
|
|
200
|
-
"limit": "1000",
|
|
201
|
-
"regexp": r'("strokes"|"fills")\s*:\s*("[^"]*"|[^\s,}\[]+)\s*(?=,|\}|\n)',
|
|
202
|
-
}
|
|
203
|
-
],
|
|
204
|
-
),
|
|
205
|
-
),
|
|
239
|
+
extra_params=EXTRA_PARAMS,
|
|
206
240
|
)
|
|
207
241
|
ProjectFiles = create_model(
|
|
208
242
|
"ProjectFiles",
|
|
@@ -213,93 +247,469 @@ class ArgsSchema(Enum):
|
|
|
213
247
|
examples=["55391681"],
|
|
214
248
|
),
|
|
215
249
|
),
|
|
216
|
-
extra_params=
|
|
217
|
-
|
|
250
|
+
extra_params=EXTRA_PARAMS,
|
|
251
|
+
)
|
|
252
|
+
FileSummary = create_model(
|
|
253
|
+
"FileSummary",
|
|
254
|
+
url=(
|
|
255
|
+
Optional[str],
|
|
256
|
+
Field(
|
|
257
|
+
description=(
|
|
258
|
+
"Full Figma URL with file key and optional node-id. "
|
|
259
|
+
"Example: 'https://www.figma.com/file/<FILE_KEY>/...?...node-id=<NODE_ID>'. "
|
|
260
|
+
"If provided and valid, URL is used and file_key/node_ids arguments are ignored."
|
|
261
|
+
),
|
|
262
|
+
default=None,
|
|
263
|
+
),
|
|
264
|
+
),
|
|
265
|
+
file_key=(
|
|
266
|
+
Optional[str],
|
|
267
|
+
Field(
|
|
268
|
+
description=(
|
|
269
|
+
"Explicit file key used only when URL is not provided."
|
|
270
|
+
),
|
|
271
|
+
default=None,
|
|
272
|
+
examples=["Fp24FuzPwH0L74ODSrCnQo"],
|
|
273
|
+
),
|
|
274
|
+
),
|
|
275
|
+
include_node_ids=(
|
|
276
|
+
Optional[str],
|
|
277
|
+
Field(
|
|
278
|
+
description=(
|
|
279
|
+
"Optional comma-separated top-level node ids (pages) to include when URL has no node-id and URL is not set. "
|
|
280
|
+
"Example: '8:6,1:7'."
|
|
281
|
+
),
|
|
282
|
+
default=None,
|
|
283
|
+
examples=["8:6,1:7"],
|
|
284
|
+
),
|
|
285
|
+
),
|
|
286
|
+
exclude_node_ids=(
|
|
287
|
+
Optional[str],
|
|
218
288
|
Field(
|
|
219
|
-
description=
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
}
|
|
226
|
-
],
|
|
289
|
+
description=(
|
|
290
|
+
"Optional comma-separated top-level node ids (pages) to exclude when URL has no node-id and URL is not set. "
|
|
291
|
+
"Applied only when include_node_ids is not provided."
|
|
292
|
+
),
|
|
293
|
+
default=None,
|
|
294
|
+
examples=["8:6,1:7"],
|
|
227
295
|
),
|
|
228
296
|
),
|
|
229
297
|
)
|
|
230
298
|
|
|
231
299
|
|
|
232
|
-
class FigmaApiWrapper(
|
|
300
|
+
class FigmaApiWrapper(NonCodeIndexerToolkit):
|
|
233
301
|
token: Optional[SecretStr] = Field(default=None)
|
|
234
302
|
oauth2: Optional[SecretStr] = Field(default=None)
|
|
235
303
|
global_limit: Optional[int] = Field(default=GLOBAL_LIMIT)
|
|
236
304
|
global_regexp: Optional[str] = Field(default=None)
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
305
|
+
global_fields_retain: Optional[List[str]] = GLOBAL_RETAIN
|
|
306
|
+
global_fields_remove: Optional[List[str]] = GLOBAL_REMOVE
|
|
307
|
+
global_depth_start: Optional[int] = Field(default=GLOBAL_DEPTH_START)
|
|
308
|
+
global_depth_end: Optional[int] = Field(default=GLOBAL_DEPTH_END)
|
|
309
|
+
# prompt-related configuration, populated from FigmaToolkit.toolkit_config_schema
|
|
310
|
+
apply_images_prompt: Optional[bool] = Field(default=True)
|
|
311
|
+
images_prompt: Optional[Dict[str, str]] = Field(default=DEFAULT_FIGMA_IMAGES_PROMPT)
|
|
312
|
+
apply_summary_prompt: Optional[bool] = Field(default=True)
|
|
313
|
+
summary_prompt: Optional[Dict[str, str]] = Field(default=DEFAULT_FIGMA_SUMMARY_PROMPT)
|
|
314
|
+
# concurrency configuration, populated from toolkit config like images_prompt
|
|
315
|
+
number_of_threads: Optional[int] = Field(default=DEFAULT_NUMBER_OF_THREADS, ge=1, le=5)
|
|
316
|
+
_client: Optional[AlitaFigmaPy] = PrivateAttr()
|
|
317
|
+
|
|
318
|
+
def _parse_figma_url(self, url: str) -> tuple[str, Optional[List[str]]]:
|
|
319
|
+
"""Parse and validate a Figma URL.
|
|
320
|
+
|
|
321
|
+
Returns a tuple of (file_key, node_ids_from_url or None).
|
|
322
|
+
Raises ToolException with a clear message if the URL is malformed.
|
|
323
|
+
"""
|
|
324
|
+
try:
|
|
325
|
+
parsed = urlparse(url)
|
|
326
|
+
|
|
327
|
+
# Basic structural validation
|
|
328
|
+
if not parsed.scheme or not parsed.netloc:
|
|
329
|
+
raise ToolException(
|
|
330
|
+
"Figma URL must include protocol and host (e.g., https://www.figma.com/file/...). "
|
|
331
|
+
f"Got: {url}"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
path_parts = parsed.path.strip('/').split('/') if parsed.path else []
|
|
335
|
+
|
|
336
|
+
# Supported URL patterns:
|
|
337
|
+
# - /file/<file_key>/...
|
|
338
|
+
# - /design/<file_key>/... (older / embedded variant)
|
|
339
|
+
if len(path_parts) < 2 or path_parts[0] not in {"file", "design"}:
|
|
340
|
+
raise ToolException(
|
|
341
|
+
"Unsupported Figma URL format. Expected path like '/file/<FILE_KEY>/...' or "
|
|
342
|
+
"'/design/<FILE_KEY>/...'. "
|
|
343
|
+
f"Got path: '{parsed.path}' from URL: {url}"
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
file_key = path_parts[1]
|
|
347
|
+
if not file_key:
|
|
348
|
+
raise ToolException(
|
|
349
|
+
"Figma URL is missing the file key segment after '/file/' or '/design/'. "
|
|
350
|
+
f"Got path: '{parsed.path}' from URL: {url}"
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
# Optional node-id is passed via query parameter
|
|
354
|
+
query_params = parse_qs(parsed.query or "")
|
|
355
|
+
node_ids_from_url = query_params.get("node-id", []) or None
|
|
356
|
+
|
|
357
|
+
return file_key, node_ids_from_url
|
|
358
|
+
|
|
359
|
+
except ToolException:
|
|
360
|
+
# Re-raise our own clear ToolException as-is
|
|
361
|
+
raise
|
|
362
|
+
except Exception as e:
|
|
363
|
+
# Catch any unexpected parsing issues and wrap them clearly
|
|
364
|
+
raise ToolException(
|
|
365
|
+
"Unexpected error while processing Figma URL. "
|
|
366
|
+
"Please provide a valid Figma file or page URL, for example: "
|
|
367
|
+
"'https://www.figma.com/file/<FILE_KEY>/...'? "
|
|
368
|
+
f"Original error: {e}"
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
def _base_loader(
|
|
372
|
+
self,
|
|
373
|
+
url: Optional[str] = None,
|
|
374
|
+
file_keys_include: Optional[List[str]] = None,
|
|
375
|
+
file_keys_exclude: Optional[List[str]] = None,
|
|
376
|
+
node_ids_include: Optional[List[str]] = None,
|
|
377
|
+
node_ids_exclude: Optional[List[str]] = None,
|
|
378
|
+
node_types_include: Optional[List[str]] = None,
|
|
379
|
+
node_types_exclude: Optional[List[str]] = None,
|
|
380
|
+
number_of_threads: Optional[int] = None,
|
|
381
|
+
**kwargs
|
|
382
|
+
) -> Generator[Document, None, None]:
|
|
383
|
+
if url:
|
|
384
|
+
file_key, node_ids_from_url = self._parse_figma_url(url)
|
|
385
|
+
# Override include params based on URL
|
|
386
|
+
file_keys_include = [file_key]
|
|
387
|
+
if node_ids_from_url and not node_ids_include:
|
|
388
|
+
node_ids_include = node_ids_from_url
|
|
389
|
+
|
|
390
|
+
# If both include and exclude are provided, use only include
|
|
391
|
+
if file_keys_include:
|
|
392
|
+
self._log_tool_event(f"Loading files: {file_keys_include}")
|
|
393
|
+
for file_key in file_keys_include:
|
|
394
|
+
self._log_tool_event(f"Loading file `{file_key}`")
|
|
395
|
+
file = self._client.get_file(file_key, geometry='depth=1') # fetch only top-level structure (only pages without inner components)
|
|
396
|
+
if not file:
|
|
397
|
+
raise ToolException(f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page.")
|
|
398
|
+
# propagate per-call number_of_threads override via metadata so _process_document can respect it
|
|
253
399
|
metadata = {
|
|
254
400
|
'id': file_key,
|
|
255
401
|
'file_key': file_key,
|
|
256
402
|
'name': file.name,
|
|
257
|
-
'updated_on': file.last_modified
|
|
403
|
+
'updated_on': file.last_modified,
|
|
404
|
+
'figma_pages_include': node_ids_include or [],
|
|
405
|
+
'figma_pages_exclude': node_ids_exclude or [],
|
|
406
|
+
'figma_nodes_include': node_types_include or [],
|
|
407
|
+
'figma_nodes_exclude': node_types_exclude or [],
|
|
258
408
|
}
|
|
409
|
+
if isinstance(number_of_threads, int) and 1 <= number_of_threads <= 5:
|
|
410
|
+
metadata['number_of_threads_override'] = number_of_threads
|
|
259
411
|
yield Document(page_content=json.dumps(metadata), metadata=metadata)
|
|
412
|
+
elif file_keys_exclude or node_ids_exclude:
|
|
413
|
+
raise ValueError("Excludes without parent (file_keys_include) do not make sense.")
|
|
414
|
+
else:
|
|
415
|
+
raise ValueError("You must provide file_keys_include or a URL.")
|
|
416
|
+
|
|
417
|
+
def has_image_representation(self, node):
|
|
418
|
+
node_type = node.get('type', '').lower()
|
|
419
|
+
default_images_types = [
|
|
420
|
+
'image', 'canvas', 'frame', 'vector', 'table', 'slice', 'sticky', 'shape_with_text', 'connector'
|
|
421
|
+
]
|
|
422
|
+
# filter nodes of type which has image representation
|
|
423
|
+
# or rectangles with image as background
|
|
424
|
+
if (node_type in default_images_types
|
|
425
|
+
or (node_type == 'rectangle' and 'fills' in node and any(
|
|
426
|
+
fill.get('type') == 'IMAGE' for fill in node['fills'] if isinstance(fill, dict)))):
|
|
427
|
+
return True
|
|
428
|
+
return False
|
|
260
429
|
|
|
261
|
-
def
|
|
430
|
+
def get_texts_recursive(self, node):
|
|
431
|
+
texts = []
|
|
432
|
+
node_type = node.get('type', '').lower()
|
|
433
|
+
if node_type == 'text':
|
|
434
|
+
texts.append(node.get('characters', ''))
|
|
435
|
+
if 'children' in node:
|
|
436
|
+
for child in node['children']:
|
|
437
|
+
texts.extend(self.get_texts_recursive(child))
|
|
438
|
+
return texts
|
|
439
|
+
|
|
440
|
+
def _load_pages(self, document: Document):
|
|
262
441
|
file_key = document.metadata.get('id', '')
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
if
|
|
267
|
-
nodes
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
442
|
+
node_ids_include = document.metadata.pop('figma_pages_include', [])
|
|
443
|
+
node_ids_exclude = document.metadata.pop('figma_pages_exclude', [])
|
|
444
|
+
self._log_tool_event(f"Included pages: {node_ids_include}. Excluded pages: {node_ids_exclude}.")
|
|
445
|
+
if node_ids_include:
|
|
446
|
+
# try to fetch only specified pages/nodes in one request
|
|
447
|
+
file = self._get_file_nodes(file_key,','.join(node_ids_include)) # attempt to fetch only specified pages/nodes in one request
|
|
448
|
+
if file:
|
|
449
|
+
return [
|
|
450
|
+
node["document"]
|
|
451
|
+
for node in (file.get("nodes") or {}).values()
|
|
452
|
+
if node is not None and "document" in node
|
|
453
|
+
]
|
|
454
|
+
else:
|
|
455
|
+
#
|
|
456
|
+
file = self._client.get_file(file_key)
|
|
457
|
+
if file:
|
|
458
|
+
figma_pages = file.document.get('children', [])
|
|
459
|
+
return [node for node in figma_pages if ('id' in node and node['id'].replace(':', '-') not in node_ids_exclude)]
|
|
460
|
+
# fallback to loading all pages and filtering them one by one
|
|
461
|
+
file = self._client.get_file(file_key, geometry='depth=1')
|
|
462
|
+
if not file:
|
|
463
|
+
raise ToolException(
|
|
464
|
+
f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page.")
|
|
465
|
+
figma_pages_raw = file.document.get('children', [])
|
|
466
|
+
# extract pages one by one
|
|
467
|
+
if node_ids_include:
|
|
468
|
+
return [self._get_file_nodes(file_key, node_id) for node_id in node_ids_include]
|
|
469
|
+
else:
|
|
470
|
+
# return [self._get_file_nodes(file_key, page["id"]) for page in figma_pages_raw if ('id' in page and page['id'].replace(':', '-') not in node_ids_exclude)]
|
|
471
|
+
result = []
|
|
472
|
+
for page in figma_pages_raw:
|
|
473
|
+
if 'id' in page and page['id'].replace(':', '-') not in node_ids_exclude:
|
|
474
|
+
page_res = self._get_file_nodes(file_key, page["id"]).get('nodes', {}).get(page["id"], {}).get("document", {})
|
|
475
|
+
result.append(page_res)
|
|
476
|
+
return result
|
|
477
|
+
|
|
478
|
+
def _process_single_image(
|
|
479
|
+
self,
|
|
480
|
+
file_key: str,
|
|
481
|
+
document: Document,
|
|
482
|
+
node_id: str,
|
|
483
|
+
image_url: str,
|
|
484
|
+
prompt: str,
|
|
485
|
+
) -> Optional[Document]:
|
|
486
|
+
"""Download and process a single Figma image node.
|
|
487
|
+
This helper is used by `_process_document` (optionally in parallel via threads).
|
|
488
|
+
"""
|
|
489
|
+
if not image_url:
|
|
490
|
+
logging.warning(f"Image URL not found for node_id {node_id} in file {file_key}. Skipping.")
|
|
491
|
+
return None
|
|
492
|
+
|
|
493
|
+
logging.info(f"File {file_key}: downloading image node {node_id}.")
|
|
494
|
+
|
|
495
|
+
try:
|
|
276
496
|
response = requests.get(image_url)
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
497
|
+
except Exception as exc:
|
|
498
|
+
logging.warning(f"Failed to download image for node {node_id} in file {file_key}: {exc}")
|
|
499
|
+
return None
|
|
500
|
+
|
|
501
|
+
if response.status_code != 200:
|
|
502
|
+
logging.warning(
|
|
503
|
+
f"Unexpected status code {response.status_code} when downloading image "
|
|
504
|
+
f"for node {node_id} in file {file_key}."
|
|
505
|
+
)
|
|
506
|
+
return None
|
|
507
|
+
|
|
508
|
+
content_type = response.headers.get('Content-Type', '')
|
|
509
|
+
if 'text/html' in content_type.lower():
|
|
510
|
+
logging.warning(f"Received HTML instead of image content for node {node_id} in file {file_key}.")
|
|
511
|
+
return None
|
|
512
|
+
|
|
513
|
+
extension = (f".{content_type.split('/')[-1]}" if content_type.startswith('image') else '.txt')
|
|
514
|
+
logging.info(f"File {file_key}: processing image node {node_id}.")
|
|
515
|
+
page_content = _load_content_from_bytes_with_prompt(
|
|
516
|
+
file_content=response.content,
|
|
517
|
+
extension=extension,
|
|
518
|
+
llm=self.llm,
|
|
519
|
+
prompt=prompt,
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
logging.info(f"File {file_key}: finished image node {node_id}.")
|
|
523
|
+
|
|
524
|
+
return Document(
|
|
525
|
+
page_content=page_content,
|
|
526
|
+
metadata={
|
|
527
|
+
'id': node_id,
|
|
528
|
+
'updated_on': document.metadata.get('updated_on', ''),
|
|
529
|
+
'file_key': file_key,
|
|
530
|
+
'node_id': node_id,
|
|
531
|
+
'image_url': image_url,
|
|
532
|
+
'type': 'image',
|
|
533
|
+
},
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
def _process_document(
|
|
537
|
+
self,
|
|
538
|
+
document: Document,
|
|
539
|
+
prompt: str = "",
|
|
540
|
+
) -> Generator[Document, None, None]:
|
|
541
|
+
file_key = document.metadata.get('id', '')
|
|
542
|
+
self._log_tool_event(f"Loading details (images) for `{file_key}`")
|
|
543
|
+
figma_pages = self._load_pages(document)
|
|
544
|
+
node_types_include = [t.strip().lower() for t in document.metadata.pop('figma_nodes_include', [])]
|
|
545
|
+
node_types_exclude = [t.strip().lower() for t in document.metadata.pop('figma_nodes_exclude', [])]
|
|
546
|
+
|
|
547
|
+
image_nodes = []
|
|
548
|
+
text_nodes = {}
|
|
549
|
+
for page in figma_pages:
|
|
550
|
+
for node in page.get('children', []):
|
|
551
|
+
# filter by node_type if specified any include or exclude
|
|
552
|
+
node_type = node.get('type', '').lower()
|
|
553
|
+
include = node_types_include and node_type in node_types_include
|
|
554
|
+
exclude = node_types_exclude and node_type not in node_types_exclude
|
|
555
|
+
no_filter = not node_types_include and not node_types_exclude
|
|
556
|
+
|
|
557
|
+
if include or exclude or no_filter:
|
|
558
|
+
node_id = node.get('id')
|
|
559
|
+
if node_id:
|
|
560
|
+
if self.has_image_representation(node):
|
|
561
|
+
image_nodes.append(node['id'])
|
|
562
|
+
else:
|
|
563
|
+
text_nodes[node['id']] = self.get_texts_recursive(node)
|
|
564
|
+
total_nodes = len(image_nodes) + len(text_nodes)
|
|
565
|
+
# mutable counter so it can be updated from helper calls (even when used in threads)
|
|
566
|
+
counted_nodes_ref: Dict[str, int] = {"value": 0}
|
|
567
|
+
|
|
568
|
+
# Resolve number_of_threads override from document metadata, falling back to class field
|
|
569
|
+
override_threads = document.metadata.get('number_of_threads_override')
|
|
570
|
+
if isinstance(override_threads, int) and 1 <= override_threads <= 5:
|
|
571
|
+
number_of_threads = override_threads
|
|
572
|
+
else:
|
|
573
|
+
threads_cfg = getattr(self, "number_of_threads", DEFAULT_NUMBER_OF_THREADS)
|
|
574
|
+
if isinstance(threads_cfg, int) and 1 <= threads_cfg <= 5:
|
|
575
|
+
number_of_threads = threads_cfg
|
|
576
|
+
else:
|
|
577
|
+
number_of_threads = DEFAULT_NUMBER_OF_THREADS
|
|
578
|
+
|
|
579
|
+
# --- Process image nodes (potential bottleneck) with optional threading ---
|
|
580
|
+
if image_nodes:
|
|
581
|
+
file_images = self._client.get_file_images(file_key, image_nodes)
|
|
582
|
+
images = self._client.get_file_images(file_key, image_nodes).images or {} if file_images else {}
|
|
583
|
+
total_images = len(images)
|
|
584
|
+
if total_images == 0:
|
|
585
|
+
logging.info(f"No images found for file {file_key}.")
|
|
586
|
+
else:
|
|
587
|
+
self._log_tool_event(
|
|
588
|
+
f"File {file_key}: starting download/processing for total {total_nodes} nodes"
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
# Decide how many workers to use (bounded by total_images and configuration).
|
|
592
|
+
max_workers = number_of_threads
|
|
593
|
+
max_workers = max(1, min(max_workers, total_images))
|
|
594
|
+
|
|
595
|
+
if max_workers == 1:
|
|
596
|
+
# Keep original sequential behavior
|
|
597
|
+
for node_id, image_url in images.items():
|
|
598
|
+
doc = self._process_single_image(
|
|
599
|
+
file_key=file_key,
|
|
600
|
+
document=document,
|
|
601
|
+
node_id=node_id,
|
|
602
|
+
image_url=image_url,
|
|
603
|
+
prompt=prompt,
|
|
604
|
+
)
|
|
605
|
+
counted_nodes_ref["value"] += 1
|
|
606
|
+
if doc is not None:
|
|
607
|
+
self._log_tool_event(
|
|
608
|
+
f"File {file_key}: processing image node {node_id} "
|
|
609
|
+
f"({counted_nodes_ref['value']}/{total_nodes} in {max_workers} threads)."
|
|
610
|
+
)
|
|
611
|
+
yield doc
|
|
612
|
+
else:
|
|
613
|
+
# Parallelize image download/processing with a thread pool
|
|
614
|
+
self._log_tool_event(
|
|
615
|
+
f"File {file_key}: using up to {max_workers} worker threads for image nodes."
|
|
616
|
+
)
|
|
617
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
618
|
+
future_to_node = {
|
|
619
|
+
executor.submit(
|
|
620
|
+
self._process_single_image,
|
|
621
|
+
file_key,
|
|
622
|
+
document,
|
|
623
|
+
node_id,
|
|
624
|
+
image_url,
|
|
625
|
+
prompt,
|
|
626
|
+
): node_id
|
|
627
|
+
for node_id, image_url in images.items()
|
|
628
|
+
}
|
|
629
|
+
for future in as_completed(future_to_node):
|
|
630
|
+
node_id = future_to_node[future]
|
|
631
|
+
try:
|
|
632
|
+
doc = future.result()
|
|
633
|
+
except Exception as exc: # safeguard
|
|
634
|
+
logging.warning(
|
|
635
|
+
f"File {file_key}: unexpected error while processing image node {node_id}: {exc}"
|
|
636
|
+
)
|
|
637
|
+
continue
|
|
638
|
+
finally:
|
|
639
|
+
# Count every attempted node, even if it failed or produced no doc,
|
|
640
|
+
# so that progress always reaches total_nodes.
|
|
641
|
+
counted_nodes_ref["value"] += 1
|
|
642
|
+
|
|
643
|
+
if doc is not None:
|
|
644
|
+
self._log_tool_event(
|
|
645
|
+
f"File {file_key}: processing image node {node_id} "
|
|
646
|
+
f"({counted_nodes_ref['value']}/{total_nodes} in {max_workers} threads)."
|
|
291
647
|
)
|
|
648
|
+
yield doc
|
|
649
|
+
|
|
650
|
+
logging.info(
|
|
651
|
+
f"File {file_key}: completed processing of {total_images} image nodes."
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
# --- Process text nodes (fast) ---
|
|
655
|
+
if text_nodes:
|
|
656
|
+
for node_id, texts in text_nodes.items():
|
|
657
|
+
counted_nodes_ref["value"] += 1
|
|
658
|
+
current_index = counted_nodes_ref["value"]
|
|
659
|
+
if texts:
|
|
660
|
+
self._log_tool_event(
|
|
661
|
+
f"File {file_key} : processing text node {node_id} ({current_index}/{total_nodes})."
|
|
662
|
+
)
|
|
663
|
+
yield Document(
|
|
664
|
+
page_content="\n".join(texts),
|
|
665
|
+
metadata={
|
|
666
|
+
'id': node_id,
|
|
667
|
+
'updated_on': document.metadata.get('updated_on', ''),
|
|
668
|
+
'file_key': file_key,
|
|
669
|
+
'node_id': node_id,
|
|
670
|
+
'type': 'text',
|
|
671
|
+
},
|
|
672
|
+
)
|
|
292
673
|
|
|
293
674
|
def _index_tool_params(self):
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
675
|
+
"""Return the parameters for indexing data."""
|
|
676
|
+
return {
|
|
677
|
+
"url": (Optional[str], Field(
|
|
678
|
+
description=(
|
|
679
|
+
"Full Figma file or page URL to index. Must be in one of the following formats: "
|
|
680
|
+
"'https://www.figma.com/file/<FILE_KEY>/...' or 'https://www.figma.com/design/<FILE_KEY>/...'. "
|
|
681
|
+
"If present, the 'node-id' query parameter (e.g. '?node-id=<PAGE_ID>') will be used to limit "
|
|
682
|
+
"indexing to that page or node. When this URL is provided, it overrides 'file_keys_include' ('node_ids_include')."
|
|
683
|
+
),
|
|
684
|
+
default=None)),
|
|
685
|
+
'number_of_threads': (Optional[int], Field(
|
|
686
|
+
description=(
|
|
687
|
+
"Optional override for the number of worker threads used when indexing Figma images. "
|
|
688
|
+
f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
|
|
689
|
+
),
|
|
690
|
+
default=DEFAULT_NUMBER_OF_THREADS,
|
|
691
|
+
ge=1,
|
|
692
|
+
le=5,
|
|
693
|
+
)),
|
|
694
|
+
'file_keys_include': (Optional[List[str]], Field(
|
|
695
|
+
description="List of file keys to include in index if project_id is not provided: i.e. ['Fp24FuzPwH0L74ODSrCnQo', 'jmhAr6q78dJoMRqt48zisY']",
|
|
696
|
+
default=None)),
|
|
697
|
+
'file_keys_exclude': (Optional[List[str]], Field(
|
|
698
|
+
description="List of file keys to exclude from index. It is applied only if project_id is provided and file_keys_include is not provided: i.e. ['Fp24FuzPwH0L74ODSrCnQo', 'jmhAr6q78dJoMRqt48zisY']",
|
|
699
|
+
default=None)),
|
|
700
|
+
'node_ids_include': (Optional[List[str]], Field(
|
|
701
|
+
description="List of top-level nodes (pages) in file to include in index. It is node-id from figma url: i.e. ['123-56', '7651-9230'].",
|
|
702
|
+
default=None)),
|
|
703
|
+
'node_ids_exclude': (Optional[List[str]], Field(
|
|
704
|
+
description="List of top-level nodes (pages) in file to exclude from index. It is applied only if node_ids_include is not provided. It is node-id from figma url: i.e. ['Fp24FuzPwH0L74ODSrCnQo', 'jmhAr6q78dJoMRqt48zisY']",
|
|
705
|
+
default=None)),
|
|
706
|
+
'node_types_include': (Optional[List[str]], Field(
|
|
707
|
+
description="List type of nodes to include in index: i.e. ['FRAME', 'COMPONENT', 'RECTANGLE', 'COMPONENT_SET', 'INSTANCE', 'VECTOR', ...].",
|
|
708
|
+
default=None)),
|
|
709
|
+
'node_types_exclude': (Optional[List[str]], Field(
|
|
710
|
+
description="List type of nodes to exclude from index. It is applied only if node_types_include is not provided: i.e. ['FRAME', 'COMPONENT', 'RECTANGLE', 'COMPONENT_SET', 'INSTANCE', 'VECTOR', ...]",
|
|
711
|
+
default=None)),
|
|
712
|
+
}
|
|
303
713
|
|
|
304
714
|
def _send_request(
|
|
305
715
|
self,
|
|
@@ -328,6 +738,11 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
328
738
|
logging.error(msg)
|
|
329
739
|
raise ToolException(msg)
|
|
330
740
|
|
|
741
|
+
@model_validator(mode='before')
|
|
742
|
+
@classmethod
|
|
743
|
+
def check_before(cls, values):
|
|
744
|
+
return super().validate_toolkit(values)
|
|
745
|
+
|
|
331
746
|
@model_validator(mode="after")
|
|
332
747
|
@classmethod
|
|
333
748
|
def validate_toolkit(cls, values):
|
|
@@ -345,22 +760,22 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
345
760
|
except re.error as e:
|
|
346
761
|
msg = f"Failed to compile regex pattern: {str(e)}"
|
|
347
762
|
logging.error(msg)
|
|
348
|
-
|
|
763
|
+
raise ToolException(msg)
|
|
349
764
|
|
|
350
765
|
try:
|
|
351
766
|
if token:
|
|
352
|
-
cls._client =
|
|
767
|
+
cls._client = AlitaFigmaPy(token=token, oauth2=False)
|
|
353
768
|
logging.info("Authenticated with Figma token")
|
|
354
769
|
elif oauth2:
|
|
355
|
-
cls._client =
|
|
770
|
+
cls._client = AlitaFigmaPy(token=oauth2, oauth2=True)
|
|
356
771
|
logging.info("Authenticated with OAuth2 token")
|
|
357
772
|
else:
|
|
358
|
-
|
|
773
|
+
raise ToolException("You have to define Figma token.")
|
|
359
774
|
logging.info("Successfully authenticated to Figma.")
|
|
360
775
|
except Exception as e:
|
|
361
776
|
msg = f"Failed to authenticate with Figma: {str(e)}"
|
|
362
777
|
logging.error(msg)
|
|
363
|
-
|
|
778
|
+
raise ToolException(msg)
|
|
364
779
|
|
|
365
780
|
return values
|
|
366
781
|
|
|
@@ -395,6 +810,53 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
395
810
|
}
|
|
396
811
|
return obj
|
|
397
812
|
|
|
813
|
+
def process_fields(obj, fields_retain=None, fields_remove=None, depth_start=1, depth_end=2, depth=1):
|
|
814
|
+
"""
|
|
815
|
+
Reduces a nested dictionary or list by retaining or removing specified fields at certain depths.
|
|
816
|
+
|
|
817
|
+
- At each level, starting from `depth_start`, only fields in `fields_retain` are kept; fields in `fields_remove` are excluded unless also retained.
|
|
818
|
+
- Recursion stops at `depth_end`, ignoring all fields at or beyond this depth.
|
|
819
|
+
- Tracks which fields were retained and removed during processing.
|
|
820
|
+
- Returns a JSON string of the reduced object, plus lists of retained and removed fields.
|
|
821
|
+
"""
|
|
822
|
+
fields_retain = set(fields_retain or [])
|
|
823
|
+
fields_remove = set(fields_remove or []) - fields_retain # fields in remove have lower priority than in retain
|
|
824
|
+
|
|
825
|
+
retained = set()
|
|
826
|
+
removed = set()
|
|
827
|
+
|
|
828
|
+
def _process(o, d):
|
|
829
|
+
if depth_end is not None and d >= depth_end:
|
|
830
|
+
return None # Ignore keys at or beyond cut_depth
|
|
831
|
+
if isinstance(o, dict):
|
|
832
|
+
result = {}
|
|
833
|
+
for k, v in o.items():
|
|
834
|
+
if k in fields_remove:
|
|
835
|
+
removed.add(k)
|
|
836
|
+
continue
|
|
837
|
+
if d >= depth_start:
|
|
838
|
+
if k in fields_retain:
|
|
839
|
+
retained.add(k)
|
|
840
|
+
result[k] = _process(v, d + 1) # process recursively
|
|
841
|
+
else:
|
|
842
|
+
# else: skip keys not in retain/default/to_process
|
|
843
|
+
removed.add(k) # remember skipped keys
|
|
844
|
+
else:
|
|
845
|
+
# retained.add(k) # remember retained keys
|
|
846
|
+
result[k] = _process(v, d + 1)
|
|
847
|
+
return result
|
|
848
|
+
elif isinstance(o, list):
|
|
849
|
+
return [_process(item, d + 1) for item in o]
|
|
850
|
+
else:
|
|
851
|
+
return o
|
|
852
|
+
|
|
853
|
+
new_obj = _process(obj, depth)
|
|
854
|
+
return {
|
|
855
|
+
"result": json.dumps(new_obj),
|
|
856
|
+
"retained_fields": list(retained),
|
|
857
|
+
"removed_fields": list(removed)
|
|
858
|
+
}
|
|
859
|
+
|
|
398
860
|
def fix_trailing_commas(json_string):
|
|
399
861
|
json_string = re.sub(r",\s*,+", ",", json_string)
|
|
400
862
|
json_string = re.sub(r",\s*([\]}])", r"\1", json_string)
|
|
@@ -404,10 +866,12 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
404
866
|
@functools.wraps(func)
|
|
405
867
|
def wrapper(self, *args, **kwargs):
|
|
406
868
|
extra_params = kwargs.pop("extra_params", {})
|
|
407
|
-
|
|
408
869
|
limit = extra_params.get("limit", self.global_limit)
|
|
409
870
|
regexp = extra_params.get("regexp", self.global_regexp)
|
|
410
|
-
|
|
871
|
+
fields_retain = extra_params.get("fields_retain", self.global_fields_retain)
|
|
872
|
+
fields_remove = extra_params.get("fields_remove", self.global_fields_remove)
|
|
873
|
+
depth_start = extra_params.get("depth_start", self.global_depth_start)
|
|
874
|
+
depth_end = extra_params.get("depth_end", self.global_depth_end)
|
|
411
875
|
try:
|
|
412
876
|
limit = int(limit)
|
|
413
877
|
result = func(self, *args, **kwargs)
|
|
@@ -417,13 +881,26 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
417
881
|
return ToolException(
|
|
418
882
|
"Response result is empty. Check your input parameters or credentials"
|
|
419
883
|
)
|
|
420
|
-
|
|
421
884
|
if isinstance(result, (dict, list)):
|
|
422
|
-
|
|
423
|
-
|
|
885
|
+
raw_result = result
|
|
886
|
+
processed_result = simplified_dict(raw_result)
|
|
887
|
+
raw_str_result = json.dumps(processed_result)
|
|
888
|
+
str_result = raw_str_result
|
|
889
|
+
if regexp:
|
|
890
|
+
regexp = re.compile(regexp)
|
|
891
|
+
str_result = re.sub(regexp, "", raw_str_result)
|
|
892
|
+
str_result = fix_trailing_commas(str_result)
|
|
893
|
+
if len(str_result) > limit:
|
|
894
|
+
reduced = process_fields(raw_result, fields_retain=fields_retain, fields_remove=fields_remove, depth_start=depth_start, depth_end=depth_end)
|
|
895
|
+
note = (f"Size of the output exceeds limit {limit}. Data reducing has been applied. "
|
|
896
|
+
f"Starting from the depth_start = {depth_start} the following object fields were removed: {reduced['removed_fields']}. "
|
|
897
|
+
f"The following fields were retained: {reduced['retained_fields']}. "
|
|
898
|
+
f"Starting from depth_end = {depth_end} all fields were ignored. "
|
|
899
|
+
f"You can adjust fields_retain, fields_remove, depth_start, depth_end, limit and regexp parameters to get more precise output")
|
|
900
|
+
return f"## NOTE:\n{note}.\n## Result: {reduced['result']}"[:limit]
|
|
901
|
+
return str_result
|
|
424
902
|
else:
|
|
425
903
|
result = json.dumps(result)
|
|
426
|
-
|
|
427
904
|
if regexp:
|
|
428
905
|
regexp = re.compile(regexp)
|
|
429
906
|
result = re.sub(regexp, "", result)
|
|
@@ -444,6 +921,12 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
444
921
|
f"files/{file_key}/nodes?ids={str(ids)}", method="get"
|
|
445
922
|
)
|
|
446
923
|
|
|
924
|
+
def _get_file_nodes(self, file_key: str, ids: str, **kwargs):
|
|
925
|
+
"""Reads a specified file nodes by field key from Figma."""
|
|
926
|
+
return self._client.api_request(
|
|
927
|
+
f"files/{file_key}/nodes?ids={str(ids)}", method="get"
|
|
928
|
+
)
|
|
929
|
+
|
|
447
930
|
@process_output
|
|
448
931
|
def get_file(
|
|
449
932
|
self,
|
|
@@ -455,6 +938,132 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
455
938
|
"""Reads a specified file by field key from Figma."""
|
|
456
939
|
return self._client.get_file(file_key, geometry, version)
|
|
457
940
|
|
|
941
|
+
@process_output
|
|
942
|
+
def get_file_summary(
|
|
943
|
+
self,
|
|
944
|
+
url: Optional[str] = None,
|
|
945
|
+
file_key: Optional[str] = None,
|
|
946
|
+
include_node_ids: Optional[str] = None,
|
|
947
|
+
exclude_node_ids: Optional[str] = None,
|
|
948
|
+
**kwargs,
|
|
949
|
+
):
|
|
950
|
+
"""Summarizes a Figma file by loading pages and nodes via URL or file key.
|
|
951
|
+
|
|
952
|
+
Configuration for image processing and summarization is taken from the toolkit
|
|
953
|
+
configuration (see FigmaToolkit.toolkit_config_schema):
|
|
954
|
+
|
|
955
|
+
- self.apply_images_prompt: if True, pass self.images_prompt to the image-processing step.
|
|
956
|
+
- self.images_prompt: instruction string for how to treat image-based nodes.
|
|
957
|
+
- self.apply_summary_prompt: if True and self.summary_prompt is set and an LLM is configured,
|
|
958
|
+
return a single summarized string; otherwise return the raw list of node documents.
|
|
959
|
+
- self.summary_prompt: instruction string for LLM summarization.
|
|
960
|
+
|
|
961
|
+
Tool arguments mirror ArgsSchema.FileSummary and control only which file/pages are loaded.
|
|
962
|
+
"""
|
|
963
|
+
# Prepare params for _base_loader without evaluating any logic here
|
|
964
|
+
node_ids_include_list = None
|
|
965
|
+
node_ids_exclude_list = None
|
|
966
|
+
|
|
967
|
+
if include_node_ids:
|
|
968
|
+
node_ids_include_list = [nid.strip() for nid in include_node_ids.split(',') if nid.strip()]
|
|
969
|
+
|
|
970
|
+
if exclude_node_ids:
|
|
971
|
+
node_ids_exclude_list = [nid.strip() for nid in exclude_node_ids.split(',') if nid.strip()]
|
|
972
|
+
|
|
973
|
+
# Delegate URL and file_key handling to _base_loader
|
|
974
|
+
base_docs = self._base_loader(
|
|
975
|
+
url=url,
|
|
976
|
+
file_keys_include=[file_key] if file_key else None,
|
|
977
|
+
node_ids_include=node_ids_include_list,
|
|
978
|
+
node_ids_exclude=node_ids_exclude_list,
|
|
979
|
+
)
|
|
980
|
+
|
|
981
|
+
# Read prompt-related configuration from toolkit instance (set via wrapper_payload)
|
|
982
|
+
apply_images_prompt = getattr(self, "apply_images_prompt", False)
|
|
983
|
+
images_prompt = getattr(self, "images_prompt", None)
|
|
984
|
+
apply_summary_prompt = getattr(self, "apply_summary_prompt", True)
|
|
985
|
+
summary_prompt = getattr(self, "summary_prompt", None)
|
|
986
|
+
|
|
987
|
+
# Decide whether to apply images_prompt. Expect dict with 'prompt'.
|
|
988
|
+
if (
|
|
989
|
+
apply_images_prompt
|
|
990
|
+
and isinstance(images_prompt, dict)
|
|
991
|
+
and isinstance(images_prompt.get("prompt"), str)
|
|
992
|
+
and images_prompt["prompt"].strip()
|
|
993
|
+
):
|
|
994
|
+
images_prompt_str = images_prompt["prompt"].strip()
|
|
995
|
+
else:
|
|
996
|
+
images_prompt_str = ""
|
|
997
|
+
|
|
998
|
+
results: List[Dict] = []
|
|
999
|
+
for base_doc in base_docs:
|
|
1000
|
+
for dep in self._process_document(
|
|
1001
|
+
base_doc,
|
|
1002
|
+
images_prompt_str,
|
|
1003
|
+
):
|
|
1004
|
+
results.append({
|
|
1005
|
+
"page_content": dep.page_content,
|
|
1006
|
+
"metadata": dep.metadata,
|
|
1007
|
+
})
|
|
1008
|
+
|
|
1009
|
+
# Decide whether to apply summary_prompt
|
|
1010
|
+
has_summary_prompt = bool(
|
|
1011
|
+
isinstance(summary_prompt, dict)
|
|
1012
|
+
and isinstance(summary_prompt.get("prompt"), str)
|
|
1013
|
+
and summary_prompt["prompt"].strip()
|
|
1014
|
+
)
|
|
1015
|
+
if not apply_summary_prompt or not has_summary_prompt:
|
|
1016
|
+
# Return raw docs when summary is disabled or no prompt provided
|
|
1017
|
+
self._log_tool_event("Summary prompt not provided: returning raw documents.")
|
|
1018
|
+
return results
|
|
1019
|
+
|
|
1020
|
+
# If summary_prompt is enabled, generate an LLM-based summary over the loaded docs
|
|
1021
|
+
try:
|
|
1022
|
+
# Build a structured, ordered view of images and texts to help the LLM infer flows.
|
|
1023
|
+
blocks = []
|
|
1024
|
+
for item in results:
|
|
1025
|
+
metadata = item.get("metadata", {}) or {}
|
|
1026
|
+
node_type = str(metadata.get("type", "")).lower()
|
|
1027
|
+
node_id = metadata.get("node_id") or metadata.get("id", "")
|
|
1028
|
+
page_content = str(item.get("page_content", "")).strip()
|
|
1029
|
+
|
|
1030
|
+
if not page_content:
|
|
1031
|
+
continue
|
|
1032
|
+
|
|
1033
|
+
if node_type == "image":
|
|
1034
|
+
image_url = metadata.get("image_url", "")
|
|
1035
|
+
header = f"Image ({node_id}), {image_url}".strip().rstrip(',')
|
|
1036
|
+
body = page_content
|
|
1037
|
+
else:
|
|
1038
|
+
header = f"Text ({node_id})".strip()
|
|
1039
|
+
body = page_content
|
|
1040
|
+
|
|
1041
|
+
block = f"{header}\n{body}\n--------------------"
|
|
1042
|
+
blocks.append(block)
|
|
1043
|
+
|
|
1044
|
+
full_content = "\n".join(blocks) if blocks else "(no content)"
|
|
1045
|
+
self._log_tool_event("Invoking LLM for Figma file summary.")
|
|
1046
|
+
|
|
1047
|
+
if not getattr(self, "llm", None):
|
|
1048
|
+
raise RuntimeError("LLM is not configured for this toolkit; cannot apply summary_prompt.")
|
|
1049
|
+
|
|
1050
|
+
# Use the 'prompt' field from the summary_prompt dict as the instruction block
|
|
1051
|
+
summary_prompt_text = summary_prompt["prompt"].strip()
|
|
1052
|
+
prompt_text = f"{summary_prompt_text}\n\nCONTENT BEGIN\n{full_content}\nCONTENT END"
|
|
1053
|
+
llm_response = self.llm.invoke(prompt_text) if hasattr(self.llm, "invoke") else self.llm(prompt_text)
|
|
1054
|
+
|
|
1055
|
+
if hasattr(llm_response, "content"):
|
|
1056
|
+
summary_text = str(llm_response.content)
|
|
1057
|
+
else:
|
|
1058
|
+
summary_text = str(llm_response)
|
|
1059
|
+
|
|
1060
|
+
self._log_tool_event("Successfully generated LLM-based file summary.")
|
|
1061
|
+
return summary_text
|
|
1062
|
+
except Exception as e:
|
|
1063
|
+
logging.warning(f"Failed to apply summary_prompt in get_file_summary: {e}")
|
|
1064
|
+
self._log_tool_event("Falling back to raw documents due to summary_prompt failure.")
|
|
1065
|
+
return results
|
|
1066
|
+
|
|
458
1067
|
@process_output
|
|
459
1068
|
def get_file_versions(self, file_key: str, **kwargs):
|
|
460
1069
|
"""Retrieves the version history of a specified file from Figma."""
|
|
@@ -488,7 +1097,7 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
488
1097
|
def get_file_images(
|
|
489
1098
|
self,
|
|
490
1099
|
file_key: str,
|
|
491
|
-
ids: str = "0:0",
|
|
1100
|
+
ids: Optional[str] = "0:0",
|
|
492
1101
|
scale: Optional[str] = None,
|
|
493
1102
|
format: Optional[str] = None,
|
|
494
1103
|
version: Optional[str] = None,
|
|
@@ -510,7 +1119,609 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
510
1119
|
"""Retrieves all files for a specified project ID from Figma."""
|
|
511
1120
|
return self._client.get_project_files(project_id)
|
|
512
1121
|
|
|
513
|
-
|
|
1122
|
+
# -------------------------------------------------------------------------
|
|
1123
|
+
# TOON Format Tools (Token-Optimized Output)
|
|
1124
|
+
# -------------------------------------------------------------------------
|
|
1125
|
+
|
|
1126
|
+
def get_file_structure_toon(
|
|
1127
|
+
self,
|
|
1128
|
+
url: Optional[str] = None,
|
|
1129
|
+
file_key: Optional[str] = None,
|
|
1130
|
+
include_pages: Optional[str] = None,
|
|
1131
|
+
exclude_pages: Optional[str] = None,
|
|
1132
|
+
max_frames: int = 50,
|
|
1133
|
+
**kwargs,
|
|
1134
|
+
) -> str:
|
|
1135
|
+
"""
|
|
1136
|
+
Get file structure in TOON format - optimized for LLM token consumption.
|
|
1137
|
+
|
|
1138
|
+
Returns a compact, human-readable format with:
|
|
1139
|
+
- Page and frame hierarchy
|
|
1140
|
+
- Text content categorized (headings, labels, buttons, body, errors)
|
|
1141
|
+
- Component usage
|
|
1142
|
+
- Inferred screen types and states
|
|
1143
|
+
- Flow analysis (sequences, variants, CTA destinations)
|
|
1144
|
+
|
|
1145
|
+
TOON format uses ~70% fewer tokens than JSON for the same data.
|
|
1146
|
+
|
|
1147
|
+
Use this tool when you need to:
|
|
1148
|
+
- Understand overall file structure quickly
|
|
1149
|
+
- Generate user journey documentation
|
|
1150
|
+
- Analyze screen flows and navigation
|
|
1151
|
+
- Identify UI patterns and components
|
|
1152
|
+
"""
|
|
1153
|
+
self._log_tool_event("Getting file structure in TOON format")
|
|
1154
|
+
|
|
1155
|
+
# Parse URL or use file_key
|
|
1156
|
+
if url:
|
|
1157
|
+
file_key, node_ids_from_url = self._parse_figma_url(url)
|
|
1158
|
+
if node_ids_from_url and not include_pages:
|
|
1159
|
+
include_pages = ','.join(node_ids_from_url)
|
|
1160
|
+
|
|
1161
|
+
if not file_key:
|
|
1162
|
+
raise ToolException("Either url or file_key must be provided")
|
|
1163
|
+
|
|
1164
|
+
# Parse include/exclude pages
|
|
1165
|
+
include_ids = [p.strip() for p in include_pages.split(',')] if include_pages else None
|
|
1166
|
+
exclude_ids = [p.strip() for p in exclude_pages.split(',')] if exclude_pages else None
|
|
1167
|
+
|
|
1168
|
+
# Get file structure (shallow fetch - only top-level pages, not full content)
|
|
1169
|
+
# This avoids "Request too large" errors for big files
|
|
1170
|
+
self._log_tool_event(f"Fetching file structure for {file_key}")
|
|
1171
|
+
file_data = self._client.get_file(file_key, geometry='depth=1')
|
|
1172
|
+
|
|
1173
|
+
if not file_data:
|
|
1174
|
+
raise ToolException(f"Failed to retrieve file {file_key}")
|
|
1175
|
+
|
|
1176
|
+
# Process pages
|
|
1177
|
+
pages_data = []
|
|
1178
|
+
all_pages = file_data.document.get('children', [])
|
|
1179
|
+
|
|
1180
|
+
for page_node in all_pages:
|
|
1181
|
+
page_id = page_node.get('id', '')
|
|
1182
|
+
|
|
1183
|
+
# Apply page filters
|
|
1184
|
+
if include_ids and page_id not in include_ids and page_id.replace(':', '-') not in include_ids:
|
|
1185
|
+
continue
|
|
1186
|
+
if exclude_ids and not include_ids:
|
|
1187
|
+
if page_id in exclude_ids or page_id.replace(':', '-') in exclude_ids:
|
|
1188
|
+
continue
|
|
1189
|
+
|
|
1190
|
+
self._log_tool_event(f"Processing page: {page_node.get('name', 'Untitled')}")
|
|
1191
|
+
|
|
1192
|
+
# Fetch full page content individually (avoids large single request)
|
|
1193
|
+
try:
|
|
1194
|
+
page_full = self._get_file_nodes(file_key, page_id)
|
|
1195
|
+
if page_full:
|
|
1196
|
+
page_content = page_full.get('nodes', {}).get(page_id, {}).get('document', page_node)
|
|
1197
|
+
else:
|
|
1198
|
+
page_content = page_node
|
|
1199
|
+
except Exception as e:
|
|
1200
|
+
self._log_tool_event(f"Warning: Could not fetch full page content for {page_id}: {e}")
|
|
1201
|
+
page_content = page_node
|
|
1202
|
+
|
|
1203
|
+
page_data = process_page_to_toon_data(page_content)
|
|
1204
|
+
|
|
1205
|
+
# Limit frames per page
|
|
1206
|
+
if len(page_data['frames']) > max_frames:
|
|
1207
|
+
page_data['frames'] = page_data['frames'][:max_frames]
|
|
1208
|
+
page_data['truncated'] = True
|
|
1209
|
+
|
|
1210
|
+
pages_data.append(page_data)
|
|
1211
|
+
|
|
1212
|
+
# Build file data structure
|
|
1213
|
+
toon_data = {
|
|
1214
|
+
'name': file_data.name,
|
|
1215
|
+
'key': file_key,
|
|
1216
|
+
'pages': pages_data,
|
|
1217
|
+
}
|
|
1218
|
+
|
|
1219
|
+
# Serialize to TOON format
|
|
1220
|
+
serializer = TOONSerializer()
|
|
1221
|
+
result = serializer.serialize_file(toon_data)
|
|
1222
|
+
|
|
1223
|
+
self._log_tool_event("File structure extracted in TOON format")
|
|
1224
|
+
return result
|
|
1225
|
+
|
|
1226
|
+
def get_page_flows_toon(
|
|
1227
|
+
self,
|
|
1228
|
+
url: Optional[str] = None,
|
|
1229
|
+
file_key: Optional[str] = None,
|
|
1230
|
+
page_id: Optional[str] = None,
|
|
1231
|
+
**kwargs,
|
|
1232
|
+
) -> str:
|
|
1233
|
+
"""
|
|
1234
|
+
Analyze a single page for user flows in TOON format.
|
|
1235
|
+
|
|
1236
|
+
Returns detailed flow analysis:
|
|
1237
|
+
- Frame sequence detection (from naming: 01_, Step 1, etc.)
|
|
1238
|
+
- Screen variant grouping (Login, Login_Error, Login_Loading)
|
|
1239
|
+
- CTA/button destination mapping
|
|
1240
|
+
- Spatial ordering hints
|
|
1241
|
+
|
|
1242
|
+
Use this for in-depth flow analysis of a specific page.
|
|
1243
|
+
Requires a PAGE ID (not a frame ID). Use get_file_structure_toon to find page IDs.
|
|
1244
|
+
"""
|
|
1245
|
+
self._log_tool_event("Analyzing page flows in TOON format")
|
|
1246
|
+
|
|
1247
|
+
# Parse URL
|
|
1248
|
+
if url:
|
|
1249
|
+
file_key, node_ids_from_url = self._parse_figma_url(url)
|
|
1250
|
+
if node_ids_from_url:
|
|
1251
|
+
page_id = node_ids_from_url[0]
|
|
1252
|
+
|
|
1253
|
+
if not file_key:
|
|
1254
|
+
raise ToolException("Either url or file_key must be provided")
|
|
1255
|
+
if not page_id:
|
|
1256
|
+
raise ToolException("page_id must be provided (or include node-id in URL)")
|
|
1257
|
+
|
|
1258
|
+
# Fetch node content
|
|
1259
|
+
self._log_tool_event(f"Fetching node {page_id} from file {file_key}")
|
|
1260
|
+
node_full = self._get_file_nodes(file_key, page_id)
|
|
1261
|
+
|
|
1262
|
+
if not node_full:
|
|
1263
|
+
raise ToolException(f"Failed to retrieve node {page_id}")
|
|
1264
|
+
|
|
1265
|
+
node_content = node_full.get('nodes', {}).get(page_id, {}).get('document', {})
|
|
1266
|
+
if not node_content:
|
|
1267
|
+
raise ToolException(f"Node {page_id} has no content")
|
|
1268
|
+
|
|
1269
|
+
# Check if this is a page (CANVAS) or a frame
|
|
1270
|
+
node_type = node_content.get('type', '').upper()
|
|
1271
|
+
if node_type != 'CANVAS':
|
|
1272
|
+
# This is a frame, not a page - provide helpful error
|
|
1273
|
+
raise ToolException(
|
|
1274
|
+
f"Node {page_id} is a {node_type}, not a PAGE. "
|
|
1275
|
+
f"This tool requires a page ID. Use get_file_structure_toon first to find page IDs "
|
|
1276
|
+
f"(look for PAGE: ... #<page_id>)"
|
|
1277
|
+
)
|
|
1278
|
+
|
|
1279
|
+
page_content = node_content
|
|
1280
|
+
|
|
1281
|
+
# Process page
|
|
1282
|
+
page_data = process_page_to_toon_data(page_content)
|
|
1283
|
+
frames = page_data.get('frames', [])
|
|
1284
|
+
|
|
1285
|
+
# Build detailed flow analysis
|
|
1286
|
+
lines = []
|
|
1287
|
+
lines.append(f"PAGE: {page_data.get('name', 'Untitled')} [id:{page_id}]")
|
|
1288
|
+
lines.append(f" frames: {len(frames)}")
|
|
1289
|
+
lines.append("")
|
|
1290
|
+
|
|
1291
|
+
# Sequence analysis
|
|
1292
|
+
sequences = detect_sequences(frames)
|
|
1293
|
+
if sequences:
|
|
1294
|
+
lines.append("SEQUENCES (by naming):")
|
|
1295
|
+
for seq in sequences:
|
|
1296
|
+
lines.append(f" {' > '.join(seq)}")
|
|
1297
|
+
lines.append("")
|
|
1298
|
+
|
|
1299
|
+
# Variant analysis
|
|
1300
|
+
variants = group_variants(frames)
|
|
1301
|
+
if variants:
|
|
1302
|
+
lines.append("VARIANTS (grouped screens):")
|
|
1303
|
+
for base, variant_list in variants.items():
|
|
1304
|
+
lines.append(f" {base}:")
|
|
1305
|
+
for v in variant_list:
|
|
1306
|
+
v_name = v.get('name', '')
|
|
1307
|
+
v_id = v.get('id', '')
|
|
1308
|
+
state = next((f.get('state', 'default') for f in frames if f.get('name') == v_name), 'default')
|
|
1309
|
+
lines.append(f" - {v_name} [{state}] #{v_id}")
|
|
1310
|
+
lines.append("")
|
|
1311
|
+
|
|
1312
|
+
# CTA mapping
|
|
1313
|
+
lines.append("CTA DESTINATIONS:")
|
|
1314
|
+
cta_map = {}
|
|
1315
|
+
for frame in frames:
|
|
1316
|
+
frame_name = frame.get('name', '')
|
|
1317
|
+
for btn in frame.get('buttons', []):
|
|
1318
|
+
dest = infer_cta_destination(btn)
|
|
1319
|
+
if dest not in cta_map:
|
|
1320
|
+
cta_map[dest] = []
|
|
1321
|
+
cta_map[dest].append(f'"{btn}" in {frame_name}')
|
|
1322
|
+
|
|
1323
|
+
for dest, ctas in cta_map.items():
|
|
1324
|
+
lines.append(f" > {dest}:")
|
|
1325
|
+
for cta in ctas[:5]: # Limit per destination
|
|
1326
|
+
lines.append(f" {cta}")
|
|
1327
|
+
lines.append("")
|
|
1328
|
+
|
|
1329
|
+
# Spatial ordering
|
|
1330
|
+
lines.append("SPATIAL ORDER (canvas position):")
|
|
1331
|
+
sorted_frames = sorted(frames, key=lambda f: (f['position']['y'], f['position']['x']))
|
|
1332
|
+
for i, frame in enumerate(sorted_frames[:20], 1):
|
|
1333
|
+
pos = frame.get('position', {})
|
|
1334
|
+
lines.append(f" {i}. {frame.get('name', '')} [{int(pos.get('x', 0))},{int(pos.get('y', 0))}]")
|
|
1335
|
+
|
|
1336
|
+
# Frame details
|
|
1337
|
+
lines.append("")
|
|
1338
|
+
lines.append("FRAME DETAILS:")
|
|
1339
|
+
|
|
1340
|
+
serializer = TOONSerializer()
|
|
1341
|
+
for frame in frames[:30]: # Limit frames
|
|
1342
|
+
frame_lines = serializer.serialize_frame(frame, level=1)
|
|
1343
|
+
lines.extend(frame_lines)
|
|
1344
|
+
|
|
1345
|
+
self._log_tool_event("Page flow analysis complete")
|
|
1346
|
+
return '\n'.join(lines)
|
|
1347
|
+
|
|
1348
|
+
def get_frame_detail_toon(
|
|
1349
|
+
self,
|
|
1350
|
+
file_key: str,
|
|
1351
|
+
frame_ids: str,
|
|
1352
|
+
**kwargs,
|
|
1353
|
+
) -> str:
|
|
1354
|
+
"""
|
|
1355
|
+
Get detailed information for specific frames in TOON format.
|
|
1356
|
+
|
|
1357
|
+
Returns per-frame:
|
|
1358
|
+
- All text content (headings, labels, buttons, body, errors)
|
|
1359
|
+
- Component hierarchy
|
|
1360
|
+
- Inferred screen type and state
|
|
1361
|
+
- Position and size
|
|
1362
|
+
|
|
1363
|
+
Use this to drill down into specific screens identified from file structure.
|
|
1364
|
+
"""
|
|
1365
|
+
try:
|
|
1366
|
+
return self._get_frame_detail_toon_internal(file_key=file_key, frame_ids=frame_ids, **kwargs)
|
|
1367
|
+
except ToolException as e:
|
|
1368
|
+
raise ToolException(_handle_figma_error(e))
|
|
1369
|
+
|
|
1370
|
+
def _get_frame_detail_toon_internal(
|
|
1371
|
+
self,
|
|
1372
|
+
file_key: str,
|
|
1373
|
+
frame_ids: str,
|
|
1374
|
+
**kwargs,
|
|
1375
|
+
) -> str:
|
|
1376
|
+
"""Internal implementation of get_frame_detail_toon without error handling wrapper."""
|
|
1377
|
+
self._log_tool_event("Getting frame details in TOON format")
|
|
1378
|
+
|
|
1379
|
+
ids_list = [fid.strip() for fid in frame_ids.split(',') if fid.strip()]
|
|
1380
|
+
if not ids_list:
|
|
1381
|
+
raise ToolException("frame_ids must contain at least one frame ID")
|
|
1382
|
+
|
|
1383
|
+
# Fetch frames
|
|
1384
|
+
self._log_tool_event(f"Fetching {len(ids_list)} frames from file {file_key}")
|
|
1385
|
+
nodes_data = self._get_file_nodes(file_key, ','.join(ids_list))
|
|
1386
|
+
|
|
1387
|
+
if not nodes_data:
|
|
1388
|
+
raise ToolException(f"Failed to retrieve frames from file {file_key}")
|
|
1389
|
+
|
|
1390
|
+
# Process each frame
|
|
1391
|
+
lines = [f"FRAMES [{len(ids_list)} requested]", ""]
|
|
1392
|
+
|
|
1393
|
+
serializer = TOONSerializer()
|
|
1394
|
+
|
|
1395
|
+
for frame_id in ids_list:
|
|
1396
|
+
node_data = nodes_data.get('nodes', {}).get(frame_id, {})
|
|
1397
|
+
frame_node = node_data.get('document', {})
|
|
1398
|
+
|
|
1399
|
+
if not frame_node:
|
|
1400
|
+
lines.append(f"FRAME: {frame_id} [NOT FOUND]")
|
|
1401
|
+
lines.append("")
|
|
1402
|
+
continue
|
|
1403
|
+
|
|
1404
|
+
frame_data = process_frame_to_toon_data(frame_node)
|
|
1405
|
+
frame_lines = serializer.serialize_frame(frame_data, level=0)
|
|
1406
|
+
lines.extend(frame_lines)
|
|
1407
|
+
|
|
1408
|
+
# Add extra details for individual frames
|
|
1409
|
+
lines.append(f" ID: {frame_id}")
|
|
1410
|
+
|
|
1411
|
+
# Component breakdown
|
|
1412
|
+
components = frame_data.get('components', [])
|
|
1413
|
+
if components:
|
|
1414
|
+
# Count component usage
|
|
1415
|
+
from collections import Counter
|
|
1416
|
+
comp_counts = Counter(components)
|
|
1417
|
+
lines.append(f" COMPONENT_COUNTS:")
|
|
1418
|
+
for comp, count in comp_counts.most_common(10):
|
|
1419
|
+
lines.append(f" {comp}: {count}")
|
|
1420
|
+
|
|
1421
|
+
lines.append("")
|
|
1422
|
+
|
|
1423
|
+
self._log_tool_event("Frame details extracted")
|
|
1424
|
+
return '\n'.join(lines)
|
|
1425
|
+
|
|
1426
|
+
def analyze_file(
|
|
1427
|
+
self,
|
|
1428
|
+
url: Optional[str] = None,
|
|
1429
|
+
file_key: Optional[str] = None,
|
|
1430
|
+
node_id: Optional[str] = None,
|
|
1431
|
+
include_pages: Optional[str] = None,
|
|
1432
|
+
exclude_pages: Optional[str] = None,
|
|
1433
|
+
max_frames: int = 50,
|
|
1434
|
+
**kwargs,
|
|
1435
|
+
) -> str:
|
|
1436
|
+
"""
|
|
1437
|
+
Comprehensive Figma file analyzer with LLM-powered insights.
|
|
1438
|
+
|
|
1439
|
+
Returns detailed analysis including:
|
|
1440
|
+
- File/page/frame structure with all content (text, buttons, components)
|
|
1441
|
+
- LLM-powered screen explanations with visual insights (using frame images)
|
|
1442
|
+
- LLM-powered user flow analysis identifying key user journeys
|
|
1443
|
+
- Design insights (patterns, gaps, recommendations)
|
|
1444
|
+
|
|
1445
|
+
Drill-Down:
|
|
1446
|
+
- No node_id: Analyzes entire file (respecting include/exclude pages)
|
|
1447
|
+
- node_id=page_id: Focuses on specific page
|
|
1448
|
+
- node_id=frame_id: Returns detailed frame analysis
|
|
1449
|
+
|
|
1450
|
+
For targeted analysis of specific frames (2-3 frames), use get_frame_detail_toon instead.
|
|
1451
|
+
"""
|
|
1452
|
+
try:
|
|
1453
|
+
return self._analyze_file_internal(
|
|
1454
|
+
url=url,
|
|
1455
|
+
file_key=file_key,
|
|
1456
|
+
node_id=node_id,
|
|
1457
|
+
include_pages=include_pages,
|
|
1458
|
+
exclude_pages=exclude_pages,
|
|
1459
|
+
max_frames=max_frames,
|
|
1460
|
+
**kwargs,
|
|
1461
|
+
)
|
|
1462
|
+
except ToolException as e:
|
|
1463
|
+
raise ToolException(_handle_figma_error(e))
|
|
1464
|
+
|
|
1465
|
+
def _analyze_file_internal(
|
|
1466
|
+
self,
|
|
1467
|
+
url: Optional[str] = None,
|
|
1468
|
+
file_key: Optional[str] = None,
|
|
1469
|
+
node_id: Optional[str] = None,
|
|
1470
|
+
include_pages: Optional[str] = None,
|
|
1471
|
+
exclude_pages: Optional[str] = None,
|
|
1472
|
+
max_frames: int = 50,
|
|
1473
|
+
**kwargs,
|
|
1474
|
+
) -> str:
|
|
1475
|
+
"""Internal implementation of analyze_file without error handling wrapper."""
|
|
1476
|
+
# Always use maximum detail level and LLM analysis
|
|
1477
|
+
detail_level = 3
|
|
1478
|
+
llm_analysis = 'detailed' if self.llm else 'none'
|
|
1479
|
+
self._log_tool_event(f"Getting file in TOON format (detail_level={detail_level}, llm_analysis={llm_analysis})")
|
|
1480
|
+
|
|
1481
|
+
# Parse URL if provided
|
|
1482
|
+
if url:
|
|
1483
|
+
file_key, node_ids_from_url = self._parse_figma_url(url)
|
|
1484
|
+
if node_ids_from_url and not node_id:
|
|
1485
|
+
node_id = node_ids_from_url[0]
|
|
1486
|
+
|
|
1487
|
+
if not file_key:
|
|
1488
|
+
raise ToolException("Either url or file_key must be provided")
|
|
1489
|
+
|
|
1490
|
+
# Convert node_id from URL format (hyphen) to API format (colon)
|
|
1491
|
+
if node_id:
|
|
1492
|
+
node_id = node_id.replace('-', ':')
|
|
1493
|
+
|
|
1494
|
+
# Check if node_id is a frame or page (for drill-down)
|
|
1495
|
+
node_id_is_page = False
|
|
1496
|
+
if node_id:
|
|
1497
|
+
try:
|
|
1498
|
+
nodes_data = self._get_file_nodes(file_key, node_id)
|
|
1499
|
+
if nodes_data:
|
|
1500
|
+
node_info = nodes_data.get('nodes', {}).get(node_id, {})
|
|
1501
|
+
node_doc = node_info.get('document', {})
|
|
1502
|
+
node_type = node_doc.get('type', '').upper()
|
|
1503
|
+
|
|
1504
|
+
if node_type == 'FRAME':
|
|
1505
|
+
# It's a frame - use frame detail tool (internal to avoid double-wrapping)
|
|
1506
|
+
return self._get_frame_detail_toon_internal(file_key=file_key, frame_ids=node_id)
|
|
1507
|
+
elif node_type == 'CANVAS':
|
|
1508
|
+
# It's a page - we'll filter to this page
|
|
1509
|
+
node_id_is_page = True
|
|
1510
|
+
except Exception:
|
|
1511
|
+
pass # Fall through to page/file analysis
|
|
1512
|
+
|
|
1513
|
+
# Get file structure
|
|
1514
|
+
file_data = self._client.get_file(file_key, geometry='depth=1')
|
|
1515
|
+
if not file_data:
|
|
1516
|
+
raise ToolException(f"Failed to retrieve file {file_key}")
|
|
1517
|
+
|
|
1518
|
+
# Determine which pages to process
|
|
1519
|
+
# Check if document exists and has the expected structure
|
|
1520
|
+
if not hasattr(file_data, 'document') or file_data.document is None:
|
|
1521
|
+
self._log_tool_event(f"Warning: file_data has no document attribute. Type: {type(file_data)}")
|
|
1522
|
+
all_pages = []
|
|
1523
|
+
else:
|
|
1524
|
+
all_pages = file_data.document.get('children', [])
|
|
1525
|
+
self._log_tool_event(f"File has {len(all_pages)} pages, node_id={node_id}, node_id_is_page={node_id_is_page}")
|
|
1526
|
+
|
|
1527
|
+
# Only filter by node_id if it's confirmed to be a page ID
|
|
1528
|
+
if node_id and node_id_is_page:
|
|
1529
|
+
include_pages = node_id
|
|
1530
|
+
|
|
1531
|
+
include_ids = [p.strip() for p in include_pages.split(',')] if include_pages else None
|
|
1532
|
+
exclude_ids = [p.strip() for p in exclude_pages.split(',')] if exclude_pages else None
|
|
1533
|
+
|
|
1534
|
+
pages_to_process = []
|
|
1535
|
+
for page_node in all_pages:
|
|
1536
|
+
page_id = page_node.get('id', '')
|
|
1537
|
+
if include_ids and page_id not in include_ids:
|
|
1538
|
+
continue
|
|
1539
|
+
if exclude_ids and page_id in exclude_ids:
|
|
1540
|
+
continue
|
|
1541
|
+
pages_to_process.append(page_node)
|
|
1542
|
+
|
|
1543
|
+
# Build output based on detail level
|
|
1544
|
+
lines = [f"FILE: {file_data.name} [key:{file_key}]"]
|
|
1545
|
+
serializer = TOONSerializer()
|
|
1546
|
+
|
|
1547
|
+
all_frames_for_flows = [] # Collect frames for flow analysis at Level 2+
|
|
1548
|
+
|
|
1549
|
+
if not pages_to_process:
|
|
1550
|
+
if not all_pages:
|
|
1551
|
+
lines.append(" [No pages found in file - file may be empty or access restricted]")
|
|
1552
|
+
else:
|
|
1553
|
+
lines.append(f" [All {len(all_pages)} pages filtered out by include/exclude settings]")
|
|
1554
|
+
self._log_tool_event(f"No pages to process. all_pages={len(all_pages)}, include_ids={include_ids}, exclude_ids={exclude_ids}")
|
|
1555
|
+
|
|
1556
|
+
self._log_tool_event(f"Processing {len(pages_to_process)} pages at detail_level={detail_level}")
|
|
1557
|
+
|
|
1558
|
+
for page_node in pages_to_process:
|
|
1559
|
+
page_id = page_node.get('id', '')
|
|
1560
|
+
page_name = page_node.get('name', 'Untitled')
|
|
1561
|
+
|
|
1562
|
+
if detail_level == 1:
|
|
1563
|
+
# Level 1: Structure only - just hierarchy with IDs
|
|
1564
|
+
lines.append(f" PAGE: {page_name} #{page_id}")
|
|
1565
|
+
frames = page_node.get('children', [])[:max_frames]
|
|
1566
|
+
for frame in frames:
|
|
1567
|
+
if frame.get('type', '').upper() == 'FRAME':
|
|
1568
|
+
frame_id = frame.get('id', '')
|
|
1569
|
+
frame_name = frame.get('name', 'Untitled')
|
|
1570
|
+
lines.append(f" FRAME: {frame_name} #{frame_id}")
|
|
1571
|
+
else:
|
|
1572
|
+
# Level 2+: Need full page content - fetch via nodes API
|
|
1573
|
+
page_fetch_error = None
|
|
1574
|
+
try:
|
|
1575
|
+
nodes_data = self._get_file_nodes(file_key, page_id)
|
|
1576
|
+
if nodes_data:
|
|
1577
|
+
full_page_node = nodes_data.get('nodes', {}).get(page_id, {}).get('document', {})
|
|
1578
|
+
if full_page_node:
|
|
1579
|
+
page_node = full_page_node
|
|
1580
|
+
except ToolException as e:
|
|
1581
|
+
page_fetch_error = _handle_figma_error(e)
|
|
1582
|
+
self._log_tool_event(f"Error fetching page {page_id}: {page_fetch_error}")
|
|
1583
|
+
except Exception as e:
|
|
1584
|
+
page_fetch_error = str(e)
|
|
1585
|
+
self._log_tool_event(f"Error fetching page {page_id}: {e}")
|
|
1586
|
+
|
|
1587
|
+
# Process whatever data we have (full or shallow)
|
|
1588
|
+
page_data = process_page_to_toon_data(page_node, max_frames=max_frames)
|
|
1589
|
+
frames = page_data.get('frames', [])
|
|
1590
|
+
|
|
1591
|
+
# If we had an error and got no frames, show the error
|
|
1592
|
+
if page_fetch_error and not frames:
|
|
1593
|
+
lines.append(f" PAGE: {page_name} #{page_id}")
|
|
1594
|
+
lines.append(f" [Error: {page_fetch_error}]")
|
|
1595
|
+
continue
|
|
1596
|
+
|
|
1597
|
+
if detail_level == 2:
|
|
1598
|
+
# Level 2: Standard - content via serialize_page
|
|
1599
|
+
page_lines = serializer.serialize_page(page_data, level=0)
|
|
1600
|
+
lines.extend(page_lines)
|
|
1601
|
+
else:
|
|
1602
|
+
# Level 3: Detailed - content + per-frame component counts
|
|
1603
|
+
lines.append(f"PAGE: {page_data.get('name', 'Untitled')} #{page_data.get('id', '')}")
|
|
1604
|
+
for frame_data in frames:
|
|
1605
|
+
frame_lines = serializer.serialize_frame(frame_data, level=1)
|
|
1606
|
+
lines.extend(frame_lines)
|
|
1607
|
+
|
|
1608
|
+
# Add detailed component counts
|
|
1609
|
+
components = frame_data.get('components', [])
|
|
1610
|
+
if components:
|
|
1611
|
+
from collections import Counter
|
|
1612
|
+
comp_counts = Counter(components)
|
|
1613
|
+
lines.append(f" COMPONENT_COUNTS:")
|
|
1614
|
+
for comp, count in comp_counts.most_common(10):
|
|
1615
|
+
lines.append(f" {comp}: {count}")
|
|
1616
|
+
|
|
1617
|
+
# Collect frames for flow analysis
|
|
1618
|
+
all_frames_for_flows.extend(frames)
|
|
1619
|
+
|
|
1620
|
+
lines.append("")
|
|
1621
|
+
|
|
1622
|
+
# Level 2+: Add global flow analysis at the end
|
|
1623
|
+
if detail_level >= 2 and all_frames_for_flows:
|
|
1624
|
+
flow_lines = serializer.serialize_flows(all_frames_for_flows, level=0)
|
|
1625
|
+
if flow_lines:
|
|
1626
|
+
lines.append("FLOWS:")
|
|
1627
|
+
lines.extend(flow_lines)
|
|
1628
|
+
|
|
1629
|
+
toon_output = '\n'.join(lines)
|
|
1630
|
+
|
|
1631
|
+
# Add LLM analysis if requested
|
|
1632
|
+
if llm_analysis and llm_analysis != 'none' and self.llm:
|
|
1633
|
+
self._log_tool_event(f"Running LLM analysis (level={llm_analysis})")
|
|
1634
|
+
try:
|
|
1635
|
+
# Build file_data structure for LLM analysis
|
|
1636
|
+
file_data_for_llm = {
|
|
1637
|
+
'name': file_data.name,
|
|
1638
|
+
'key': file_key,
|
|
1639
|
+
'pages': [],
|
|
1640
|
+
}
|
|
1641
|
+
# Collect frame IDs for image fetching (for detailed analysis)
|
|
1642
|
+
all_frame_ids = []
|
|
1643
|
+
|
|
1644
|
+
# Re-use processed page data
|
|
1645
|
+
for page_node in pages_to_process:
|
|
1646
|
+
page_id = page_node.get('id', '')
|
|
1647
|
+
try:
|
|
1648
|
+
# Fetch full page if needed
|
|
1649
|
+
nodes_data = self._get_file_nodes(file_key, page_id)
|
|
1650
|
+
if nodes_data:
|
|
1651
|
+
full_page_node = nodes_data.get('nodes', {}).get(page_id, {}).get('document', {})
|
|
1652
|
+
if full_page_node:
|
|
1653
|
+
page_node = full_page_node
|
|
1654
|
+
except Exception:
|
|
1655
|
+
pass # Use shallow data
|
|
1656
|
+
page_data = process_page_to_toon_data(page_node, max_frames=max_frames)
|
|
1657
|
+
file_data_for_llm['pages'].append(page_data)
|
|
1658
|
+
|
|
1659
|
+
# Collect frame IDs for vision analysis
|
|
1660
|
+
for frame in page_data.get('frames', []):
|
|
1661
|
+
frame_id = frame.get('id')
|
|
1662
|
+
if frame_id:
|
|
1663
|
+
all_frame_ids.append(frame_id)
|
|
1664
|
+
|
|
1665
|
+
# Fetch frame images for vision-based analysis (detailed mode only)
|
|
1666
|
+
frame_images = {}
|
|
1667
|
+
# Use max_frames parameter to limit LLM analysis (respects user setting)
|
|
1668
|
+
frames_to_analyze = min(max_frames, len(all_frame_ids))
|
|
1669
|
+
if llm_analysis == 'detailed' and all_frame_ids:
|
|
1670
|
+
self._log_tool_event(f"Fetching images for {frames_to_analyze} frames (vision analysis)")
|
|
1671
|
+
try:
|
|
1672
|
+
frame_ids_to_fetch = all_frame_ids[:frames_to_analyze]
|
|
1673
|
+
images_response = self._client.get_file_images(
|
|
1674
|
+
file_key=file_key,
|
|
1675
|
+
ids=frame_ids_to_fetch,
|
|
1676
|
+
scale=1, # Scale 1 is sufficient for analysis
|
|
1677
|
+
format='png'
|
|
1678
|
+
)
|
|
1679
|
+
if images_response and hasattr(images_response, 'images'):
|
|
1680
|
+
frame_images = images_response.images or {}
|
|
1681
|
+
self._log_tool_event(f"Fetched {len(frame_images)} frame images")
|
|
1682
|
+
self._log_tool_event("Processing images and preparing for LLM analysis...")
|
|
1683
|
+
except Exception as img_err:
|
|
1684
|
+
self._log_tool_event(f"Frame image fetch failed (continuing without vision): {img_err}")
|
|
1685
|
+
# Continue without images - will fall back to text analysis
|
|
1686
|
+
|
|
1687
|
+
# Create status callback for progress updates
|
|
1688
|
+
def _status_callback(msg: str):
|
|
1689
|
+
self._log_tool_event(msg)
|
|
1690
|
+
|
|
1691
|
+
# Import here to avoid circular imports
|
|
1692
|
+
from .toon_tools import enrich_toon_with_llm_analysis
|
|
1693
|
+
|
|
1694
|
+
# Check if design insights should be included (default True)
|
|
1695
|
+
include_design_insights = kwargs.get('include_design_insights', True)
|
|
1696
|
+
|
|
1697
|
+
# Get parallel workers from toolkit config (or default)
|
|
1698
|
+
parallel_workers = getattr(self, "number_of_threads", DEFAULT_NUMBER_OF_THREADS)
|
|
1699
|
+
if parallel_workers is None or not isinstance(parallel_workers, int):
|
|
1700
|
+
parallel_workers = DEFAULT_NUMBER_OF_THREADS
|
|
1701
|
+
parallel_workers = max(1, min(parallel_workers, 5))
|
|
1702
|
+
|
|
1703
|
+
self._log_tool_event(f"Starting LLM analysis of {frames_to_analyze} frames with {parallel_workers} parallel workers...")
|
|
1704
|
+
toon_output = enrich_toon_with_llm_analysis(
|
|
1705
|
+
toon_output=toon_output,
|
|
1706
|
+
file_data=file_data_for_llm,
|
|
1707
|
+
llm=self.llm,
|
|
1708
|
+
analysis_level=llm_analysis,
|
|
1709
|
+
frame_images=frame_images,
|
|
1710
|
+
status_callback=_status_callback,
|
|
1711
|
+
include_design_insights=include_design_insights,
|
|
1712
|
+
parallel_workers=parallel_workers,
|
|
1713
|
+
max_frames_to_analyze=frames_to_analyze,
|
|
1714
|
+
)
|
|
1715
|
+
self._log_tool_event("LLM analysis complete")
|
|
1716
|
+
except Exception as e:
|
|
1717
|
+
self._log_tool_event(f"LLM analysis failed: {e}")
|
|
1718
|
+
# Return TOON output without LLM analysis on error
|
|
1719
|
+
toon_output += f"\n\n[LLM analysis failed: {e}]"
|
|
1720
|
+
|
|
1721
|
+
self._log_tool_event(f"File analysis complete (detail_level={detail_level})")
|
|
1722
|
+
return toon_output
|
|
1723
|
+
|
|
1724
|
+
@extend_with_parent_available_tools
|
|
514
1725
|
def get_available_tools(self):
|
|
515
1726
|
return [
|
|
516
1727
|
{
|
|
@@ -525,6 +1736,13 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
525
1736
|
"args_schema": ArgsSchema.File.value,
|
|
526
1737
|
"ref": self.get_file,
|
|
527
1738
|
},
|
|
1739
|
+
# TODO disabled until new requirements
|
|
1740
|
+
# {
|
|
1741
|
+
# "name": "get_file_summary",
|
|
1742
|
+
# "description": self.get_file_summary.__doc__,
|
|
1743
|
+
# "args_schema": ArgsSchema.FileSummary.value,
|
|
1744
|
+
# "ref": self.get_file_summary,
|
|
1745
|
+
# },
|
|
528
1746
|
{
|
|
529
1747
|
"name": "get_file_versions",
|
|
530
1748
|
"description": self.get_file_versions.__doc__,
|
|
@@ -561,4 +1779,19 @@ class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
|
561
1779
|
"args_schema": ArgsSchema.ProjectFiles.value,
|
|
562
1780
|
"ref": self.get_project_files,
|
|
563
1781
|
},
|
|
1782
|
+
# TOON Format Tools (Token-Optimized)
|
|
1783
|
+
# Primary unified tool with configurable detail levels
|
|
1784
|
+
{
|
|
1785
|
+
"name": "analyze_file",
|
|
1786
|
+
"description": self.analyze_file.__doc__,
|
|
1787
|
+
"args_schema": AnalyzeFileSchema,
|
|
1788
|
+
"ref": self.analyze_file,
|
|
1789
|
+
},
|
|
1790
|
+
# Targeted drill-down for specific frames (more efficient than level 3 for 2-3 frames)
|
|
1791
|
+
{
|
|
1792
|
+
"name": "get_frame_detail_toon",
|
|
1793
|
+
"description": self.get_frame_detail_toon.__doc__,
|
|
1794
|
+
"args_schema": FrameDetailTOONSchema,
|
|
1795
|
+
"ref": self.get_frame_detail_toon,
|
|
1796
|
+
},
|
|
564
1797
|
]
|