alita-sdk 0.3.428b2__py3-none-any.whl → 0.3.428.post2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/clients/client.py +31 -12
- alita_sdk/tools/figma/__init__.py +49 -3
- alita_sdk/tools/figma/api_wrapper.py +1157 -123
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/qtest/api_wrapper.py +1168 -117
- alita_sdk/tools/utils/content_parser.py +36 -1
- {alita_sdk-0.3.428b2.dist-info → alita_sdk-0.3.428.post2.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.428b2.dist-info → alita_sdk-0.3.428.post2.dist-info}/RECORD +12 -10
- {alita_sdk-0.3.428b2.dist-info → alita_sdk-0.3.428.post2.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.428b2.dist-info → alita_sdk-0.3.428.post2.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.428b2.dist-info → alita_sdk-0.3.428.post2.dist-info}/top_level.txt +0 -0
|
@@ -206,18 +206,37 @@ class AlitaClient:
|
|
|
206
206
|
|
|
207
207
|
logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
|
|
208
208
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
209
|
+
try:
|
|
210
|
+
from tools import this # pylint: disable=E0401,C0415
|
|
211
|
+
worker_config = this.for_module("indexer_worker").descriptor.config
|
|
212
|
+
except: # pylint: disable=W0702
|
|
213
|
+
worker_config = {}
|
|
214
|
+
|
|
215
|
+
use_responses_api = False
|
|
216
|
+
|
|
217
|
+
if worker_config and isinstance(worker_config, dict):
|
|
218
|
+
for target_name_tag in worker_config.get("use_responses_api_for", []):
|
|
219
|
+
if target_name_tag in model_name:
|
|
220
|
+
use_responses_api = True
|
|
221
|
+
break
|
|
222
|
+
|
|
223
|
+
target_kwargs = {
|
|
224
|
+
"base_url": f"{self.base_url}{self.llm_path}",
|
|
225
|
+
"model": model_name,
|
|
226
|
+
"api_key": self.auth_token,
|
|
227
|
+
"streaming": model_config.get("streaming", True),
|
|
228
|
+
"stream_usage": model_config.get("stream_usage", True),
|
|
229
|
+
"max_tokens": model_config.get("max_tokens", None),
|
|
230
|
+
"temperature": model_config.get("temperature"),
|
|
231
|
+
"max_retries": model_config.get("max_retries", 3),
|
|
232
|
+
"seed": model_config.get("seed", None),
|
|
233
|
+
"openai_organization": str(self.project_id),
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
if use_responses_api:
|
|
237
|
+
target_kwargs["use_responses_api"] = True
|
|
238
|
+
|
|
239
|
+
return ChatOpenAI(**target_kwargs)
|
|
221
240
|
|
|
222
241
|
def generate_image(self,
|
|
223
242
|
prompt: str,
|
|
@@ -1,10 +1,16 @@
|
|
|
1
|
-
from typing import List, Literal, Optional
|
|
1
|
+
from typing import Dict, List, Literal, Optional
|
|
2
2
|
|
|
3
3
|
from langchain_core.tools import BaseTool, BaseToolkit
|
|
4
4
|
from pydantic import BaseModel, ConfigDict, Field, create_model
|
|
5
5
|
|
|
6
6
|
from ..base.tool import BaseAction
|
|
7
|
-
from .api_wrapper import
|
|
7
|
+
from .api_wrapper import (
|
|
8
|
+
FigmaApiWrapper,
|
|
9
|
+
GLOBAL_LIMIT,
|
|
10
|
+
DEFAULT_FIGMA_IMAGES_PROMPT,
|
|
11
|
+
DEFAULT_FIGMA_SUMMARY_PROMPT,
|
|
12
|
+
DEFAULT_NUMBER_OF_THREADS,
|
|
13
|
+
)
|
|
8
14
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
9
15
|
from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
|
|
10
16
|
from ...configurations.figma import FigmaConfiguration
|
|
@@ -28,7 +34,14 @@ def get_tools(tool):
|
|
|
28
34
|
collection_name=str(tool['toolkit_name']),
|
|
29
35
|
doctype='doc',
|
|
30
36
|
embedding_model=tool['settings'].get('embedding_model'),
|
|
31
|
-
vectorstore_type="PGVector"
|
|
37
|
+
vectorstore_type="PGVector",
|
|
38
|
+
# figma summary/image prompt settings (toolkit-level)
|
|
39
|
+
# TODO disabled until new requirements
|
|
40
|
+
# apply_images_prompt=tool["settings"].get("apply_images_prompt"),
|
|
41
|
+
# images_prompt=tool["settings"].get("images_prompt"),
|
|
42
|
+
# apply_summary_prompt=tool["settings"].get("apply_summary_prompt"),
|
|
43
|
+
# summary_prompt=tool["settings"].get("summary_prompt"),
|
|
44
|
+
# number_of_threads=tool["settings"].get("number_of_threads"),
|
|
32
45
|
)
|
|
33
46
|
.get_tools()
|
|
34
47
|
)
|
|
@@ -47,6 +60,39 @@ class FigmaToolkit(BaseToolkit):
|
|
|
47
60
|
FigmaToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
48
61
|
return create_model(
|
|
49
62
|
name,
|
|
63
|
+
# TODO disabled until new requirements
|
|
64
|
+
# apply_images_prompt=(Optional[bool], Field(
|
|
65
|
+
# description="Enable advanced image processing instructions for Figma image nodes.",
|
|
66
|
+
# default=True,
|
|
67
|
+
# )),
|
|
68
|
+
# images_prompt=(Optional[Dict[str, str]], Field(
|
|
69
|
+
# description=(
|
|
70
|
+
# "Instruction for how to analyze image-based nodes "
|
|
71
|
+
# "(screenshots, diagrams, etc.) during Figma file retrieving. "
|
|
72
|
+
# "Must contain a single 'prompt' key with the text."
|
|
73
|
+
# ),
|
|
74
|
+
# default=DEFAULT_FIGMA_IMAGES_PROMPT,
|
|
75
|
+
# )),
|
|
76
|
+
# apply_summary_prompt=(Optional[bool], Field(
|
|
77
|
+
# description="Enable LLM-based summarization over loaded Figma data.",
|
|
78
|
+
# default=True,
|
|
79
|
+
# )),
|
|
80
|
+
# summary_prompt=(Optional[Dict[str, str]], Field(
|
|
81
|
+
# description=(
|
|
82
|
+
# "Instruction for the LLM on how to summarize loaded Figma data. "
|
|
83
|
+
# "Must contain a single 'prompt' key with the text."
|
|
84
|
+
# ),
|
|
85
|
+
# default=DEFAULT_FIGMA_SUMMARY_PROMPT,
|
|
86
|
+
# )),
|
|
87
|
+
number_of_threads=(Optional[int], Field(
|
|
88
|
+
description=(
|
|
89
|
+
"Number of worker threads to use when downloading and processing Figma images. "
|
|
90
|
+
f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
|
|
91
|
+
),
|
|
92
|
+
default=DEFAULT_NUMBER_OF_THREADS,
|
|
93
|
+
ge=1,
|
|
94
|
+
le=5,
|
|
95
|
+
)),
|
|
50
96
|
global_limit=(Optional[int], Field(description="Global limit", default=GLOBAL_LIMIT)),
|
|
51
97
|
global_regexp=(Optional[str], Field(description="Global regex pattern", default=None)),
|
|
52
98
|
selected_tools=(
|