alita-sdk 0.3.603__py3-none-any.whl → 0.3.611__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/agents.py +108 -826
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/configurations/openapi.py +2 -2
- alita_sdk/runtime/clients/artifact.py +1 -1
- alita_sdk/runtime/langchain/langraph_agent.py +21 -6
- alita_sdk/runtime/tools/artifact.py +253 -8
- alita_sdk/runtime/tools/function.py +25 -6
- alita_sdk/runtime/tools/llm.py +12 -11
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/tools/bitbucket/api_wrapper.py +31 -30
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/confluence/api_wrapper.py +8 -1
- alita_sdk/tools/elitea_base.py +40 -36
- alita_sdk/tools/figma/api_wrapper.py +140 -83
- alita_sdk/tools/github/graphql_client_wrapper.py +1 -0
- alita_sdk/tools/utils/text_operations.py +156 -52
- {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/RECORD +34 -20
- {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/entry_points.txt +0 -0
- {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/top_level.txt +0 -0
|
@@ -369,50 +369,94 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
|
|
|
369
369
|
)
|
|
370
370
|
|
|
371
371
|
def _base_loader(
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
number_of_threads: Optional[int] = None,
|
|
381
|
-
**kwargs
|
|
372
|
+
self,
|
|
373
|
+
urls_or_file_keys: Optional[str] = None,
|
|
374
|
+
node_ids_include: Optional[List[str]] = None,
|
|
375
|
+
node_ids_exclude: Optional[List[str]] = None,
|
|
376
|
+
node_types_include: Optional[List[str]] = None,
|
|
377
|
+
node_types_exclude: Optional[List[str]] = None,
|
|
378
|
+
number_of_threads: Optional[int] = None,
|
|
379
|
+
**kwargs,
|
|
382
380
|
) -> Generator[Document, None, None]:
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
381
|
+
"""Base loader used by the indexer tool.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
urls_or_file_keys: Comma-separated list of Figma file URLs or raw file keys. Each
|
|
385
|
+
entry can be:
|
|
386
|
+
- a full Figma URL (https://www.figma.com/file/... or /design/...) optionally
|
|
387
|
+
with a node-id query parameter, or
|
|
388
|
+
- a bare file key string.
|
|
389
|
+
URL entries are parsed via _parse_figma_url; raw keys are used as-is.
|
|
390
|
+
node_ids_include: Optional list of top-level node IDs (pages) to include when an
|
|
391
|
+
entry does not specify node-id in the URL and is not otherwise constrained.
|
|
392
|
+
node_ids_exclude: Optional list of top-level node IDs (pages) to exclude when
|
|
393
|
+
node_ids_include is not provided.
|
|
394
|
+
node_types_include: Optional list of node types to include within each page.
|
|
395
|
+
node_types_exclude: Optional list of node types to exclude when node_types_include
|
|
396
|
+
is not provided.
|
|
397
|
+
number_of_threads: Optional override for number of worker threads to use when
|
|
398
|
+
processing images.
|
|
399
|
+
"""
|
|
400
|
+
if not urls_or_file_keys:
|
|
401
|
+
raise ValueError("You must provide urls_or_file_keys with at least one URL or file key.")
|
|
402
|
+
|
|
403
|
+
# Parse the comma-separated entries into concrete (file_key, per_entry_node_ids_include)
|
|
404
|
+
entries = [item.strip() for item in urls_or_file_keys.split(',') if item.strip()]
|
|
405
|
+
if not entries:
|
|
406
|
+
raise ValueError("You must provide urls_or_file_keys with at least one non-empty value.")
|
|
407
|
+
|
|
408
|
+
# Validate number_of_threads override once and pass via metadata
|
|
409
|
+
metadata_threads_override: Optional[int] = None
|
|
410
|
+
if isinstance(number_of_threads, int) and 1 <= number_of_threads <= 5:
|
|
411
|
+
metadata_threads_override = number_of_threads
|
|
412
|
+
|
|
413
|
+
for entry in entries:
|
|
414
|
+
per_file_node_ids_include: Optional[List[str]] = None
|
|
415
|
+
file_key: Optional[str] = None
|
|
416
|
+
|
|
417
|
+
# Heuristic: treat as URL if it has a scheme and figma.com host
|
|
418
|
+
if entry.startswith("http://") or entry.startswith("https://"):
|
|
419
|
+
file_key, node_ids_from_url = self._parse_figma_url(entry)
|
|
420
|
+
per_file_node_ids_include = node_ids_from_url
|
|
421
|
+
else:
|
|
422
|
+
# Assume this is a raw file key
|
|
423
|
+
file_key = entry
|
|
424
|
+
|
|
425
|
+
if not file_key:
|
|
426
|
+
continue
|
|
427
|
+
|
|
428
|
+
# If URL-derived node IDs exist, they take precedence over global include list
|
|
429
|
+
effective_node_ids_include = per_file_node_ids_include or node_ids_include or []
|
|
430
|
+
|
|
431
|
+
self._log_tool_event(f"Loading file `{file_key}`")
|
|
432
|
+
try:
|
|
433
|
+
file = self._client.get_file(file_key, geometry='depth=1')
|
|
434
|
+
except ToolException as e:
|
|
435
|
+
# Enrich the error message with the file_key for easier troubleshooting
|
|
436
|
+
raise ToolException(
|
|
437
|
+
f"Failed to retrieve Figma file '{file_key}'. Original error: {e}"
|
|
438
|
+
) from e
|
|
439
|
+
|
|
440
|
+
if not file:
|
|
441
|
+
raise ToolException(
|
|
442
|
+
f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page."
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
metadata = {
|
|
446
|
+
'id': file_key,
|
|
447
|
+
'file_key': file_key,
|
|
448
|
+
'name': file.name,
|
|
449
|
+
'updated_on': file.last_modified,
|
|
450
|
+
'figma_pages_include': effective_node_ids_include,
|
|
451
|
+
'figma_pages_exclude': node_ids_exclude or [],
|
|
452
|
+
'figma_nodes_include': node_types_include or [],
|
|
453
|
+
'figma_nodes_exclude': node_types_exclude or [],
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
if metadata_threads_override is not None:
|
|
457
|
+
metadata['number_of_threads_override'] = metadata_threads_override
|
|
458
|
+
|
|
459
|
+
yield Document(page_content=json.dumps(metadata), metadata=metadata)
|
|
416
460
|
|
|
417
461
|
def has_image_representation(self, node):
|
|
418
462
|
node_type = node.get('type', '').lower()
|
|
@@ -672,44 +716,58 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
|
|
|
672
716
|
)
|
|
673
717
|
|
|
674
718
|
def _index_tool_params(self):
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
719
|
+
"""Return the parameters for indexing data."""
|
|
720
|
+
return {
|
|
721
|
+
"urls_or_file_keys": (str, Field(
|
|
722
|
+
description=(
|
|
723
|
+
"Comma-separated list of Figma file URLs or raw file keys to index. "
|
|
724
|
+
"Each entry may be a full Figma URL (with optional node-id query) or a file key. "
|
|
725
|
+
"Example: 'https://www.figma.com/file/<FILE_KEY>/...?node-id=<NODE_ID>,Fp24FuzPwH0L74ODSrCnQo'."
|
|
726
|
+
))),
|
|
727
|
+
'number_of_threads': (Optional[int], Field(
|
|
728
|
+
description=(
|
|
729
|
+
"Optional override for the number of worker threads used when indexing Figma images. "
|
|
730
|
+
f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
|
|
731
|
+
),
|
|
732
|
+
default=DEFAULT_NUMBER_OF_THREADS,
|
|
733
|
+
ge=1,
|
|
734
|
+
le=5,
|
|
735
|
+
)),
|
|
736
|
+
'node_ids_include': (Optional[List[str]], Field(
|
|
737
|
+
description=(
|
|
738
|
+
"List of top-level node IDs (pages) to include in the index. Values should match "
|
|
739
|
+
"Figma node-id format like ['123-56', '7651-9230']. These include rules are applied "
|
|
740
|
+
"for each entry in urls_or_file_keys when the URL does not specify node-id and for "
|
|
741
|
+
"each raw file_key entry."
|
|
742
|
+
),
|
|
743
|
+
default=None,
|
|
744
|
+
)),
|
|
745
|
+
'node_ids_exclude': (Optional[List[str]], Field(
|
|
746
|
+
description=(
|
|
747
|
+
"List of top-level node IDs (pages) to exclude from the index when node_ids_include "
|
|
748
|
+
"is not provided. Values should match Figma node-id format. These exclude rules are "
|
|
749
|
+
"applied for each entry in urls_or_file_keys (URLs without node-id and raw fileKey "
|
|
750
|
+
"entries)."
|
|
751
|
+
),
|
|
752
|
+
default=None,
|
|
753
|
+
)),
|
|
754
|
+
'node_types_include': (Optional[List[str]], Field(
|
|
755
|
+
description=(
|
|
756
|
+
"List of node types to include in the index, e.g. ['FRAME', 'COMPONENT', 'RECTANGLE', "
|
|
757
|
+
"'COMPONENT_SET', 'INSTANCE', 'VECTOR', ...]. If provided, only these types are indexed "
|
|
758
|
+
"for each page loaded from each urls_or_file_keys entry."
|
|
759
|
+
),
|
|
760
|
+
default=None,
|
|
761
|
+
)),
|
|
762
|
+
'node_types_exclude': (Optional[List[str]], Field(
|
|
763
|
+
description=(
|
|
764
|
+
"List of node types to exclude from the index when node_types_include is not provided. "
|
|
765
|
+
"These exclude rules are applied to nodes within each page loaded from each "
|
|
766
|
+
"urls_or_file_keys entry."
|
|
767
|
+
),
|
|
768
|
+
default=None,
|
|
769
|
+
)),
|
|
770
|
+
}
|
|
713
771
|
|
|
714
772
|
def _send_request(
|
|
715
773
|
self,
|
|
@@ -972,8 +1030,7 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
|
|
|
972
1030
|
|
|
973
1031
|
# Delegate URL and file_key handling to _base_loader
|
|
974
1032
|
base_docs = self._base_loader(
|
|
975
|
-
|
|
976
|
-
file_keys_include=[file_key] if file_key else None,
|
|
1033
|
+
urls_or_file_keys=url or file_key,
|
|
977
1034
|
node_ids_include=node_ids_include_list,
|
|
978
1035
|
node_ids_exclude=node_ids_exclude_list,
|
|
979
1036
|
)
|
|
@@ -26,48 +26,119 @@ TEXT_EDITABLE_EXTENSIONS = {
|
|
|
26
26
|
def parse_old_new_markers(file_query: str) -> List[Tuple[str, str]]:
|
|
27
27
|
"""Parse OLD/NEW marker-based edit instructions.
|
|
28
28
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
29
|
+
Format:
|
|
30
|
+
|
|
31
|
+
OLD <<<<[optional text or newline]
|
|
32
|
+
... OLD content ...
|
|
33
|
+
>>>> OLD
|
|
34
|
+
NEW <<<<[optional text or newline]
|
|
35
|
+
... NEW content ...
|
|
36
|
+
>>>> NEW
|
|
37
|
+
|
|
38
|
+
Rules:
|
|
39
|
+
- OLD block:
|
|
40
|
+
- Starts at the first line that contains "OLD <<<<".
|
|
41
|
+
- OLD content includes:
|
|
42
|
+
* On that line: everything after the first "OLD <<<<" (if any),
|
|
43
|
+
* Plus all following lines up to (but not including) the first line
|
|
44
|
+
that contains ">>>> OLD".
|
|
45
|
+
- NEW block:
|
|
46
|
+
- Starts at the first line, after the OLD block, that contains "NEW <<<<".
|
|
47
|
+
- NEW content includes:
|
|
48
|
+
* On that line: everything after the first "NEW <<<<" (if any),
|
|
49
|
+
* Plus all following lines up to (but not including) the first line
|
|
50
|
+
that contains ">>>> NEW".
|
|
51
|
+
- Only the first complete OLD/NEW pair is returned.
|
|
39
52
|
|
|
40
53
|
Args:
|
|
41
54
|
file_query: String containing marked old and new content sections.
|
|
42
55
|
|
|
43
56
|
Returns:
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
57
|
+
A list with at most one (old_content, new_content) tuple. Each
|
|
58
|
+
content string includes newlines but excludes the marker substrings
|
|
59
|
+
and their closing lines.
|
|
47
60
|
"""
|
|
48
|
-
# Primary pattern: correct 4-< markers
|
|
49
|
-
pattern_primary = re.compile(
|
|
50
|
-
r"OLD <<<<(\s*.*?\s*)>>>> OLD" # OLD block
|
|
51
|
-
r"\s*" # optional whitespace between OLD/NEW
|
|
52
|
-
r"NEW <<<<(\s*.*?\s*)>>>> NEW", # NEW block
|
|
53
|
-
re.DOTALL,
|
|
54
|
-
)
|
|
55
61
|
|
|
56
|
-
|
|
62
|
+
if not file_query:
|
|
63
|
+
return []
|
|
57
64
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
65
|
+
lines = file_query.splitlines(keepends=True)
|
|
66
|
+
|
|
67
|
+
old_open = "OLD <<<<"
|
|
68
|
+
old_close = ">>>> OLD"
|
|
69
|
+
new_open = "NEW <<<<"
|
|
70
|
+
new_close = ">>>> NEW"
|
|
71
|
+
|
|
72
|
+
state = "search_old" # -> in_old -> search_new -> in_new
|
|
73
|
+
old_parts: list[str] = []
|
|
74
|
+
new_parts: list[str] = []
|
|
75
|
+
|
|
76
|
+
i = 0
|
|
77
|
+
n = len(lines)
|
|
78
|
+
|
|
79
|
+
# 1. Find OLD block
|
|
80
|
+
while i < n and state == "search_old":
|
|
81
|
+
line = lines[i]
|
|
82
|
+
pos = line.find(old_open)
|
|
83
|
+
if pos != -1:
|
|
84
|
+
# Start OLD content after the first "OLD <<<<" on this line
|
|
85
|
+
after = line[pos + len(old_open):]
|
|
86
|
+
old_parts.append(after)
|
|
87
|
+
state = "in_old"
|
|
88
|
+
i += 1
|
|
89
|
+
|
|
90
|
+
if state != "in_old":
|
|
91
|
+
# No OLD block found
|
|
92
|
+
return []
|
|
93
|
+
|
|
94
|
+
# Collect until a line containing ">>>> OLD"
|
|
95
|
+
while i < n and state == "in_old":
|
|
96
|
+
line = lines[i]
|
|
97
|
+
if old_close in line:
|
|
98
|
+
# Stop before this line; do not include any part of it
|
|
99
|
+
state = "search_new"
|
|
100
|
+
else:
|
|
101
|
+
old_parts.append(line)
|
|
102
|
+
i += 1
|
|
103
|
+
|
|
104
|
+
if state != "search_new":
|
|
105
|
+
# Didn't find a proper OLD close
|
|
106
|
+
return []
|
|
107
|
+
|
|
108
|
+
# 2. Find NEW block after OLD
|
|
109
|
+
while i < n and state == "search_new":
|
|
110
|
+
line = lines[i]
|
|
111
|
+
pos = line.find(new_open)
|
|
112
|
+
if pos != -1:
|
|
113
|
+
# NEW content starts from the *next* line after the marker line
|
|
114
|
+
state = "in_new"
|
|
115
|
+
i += 1
|
|
116
|
+
break
|
|
117
|
+
i += 1
|
|
118
|
+
|
|
119
|
+
if state != "in_new":
|
|
120
|
+
# No NEW block found
|
|
121
|
+
return []
|
|
122
|
+
|
|
123
|
+
# Collect until a line containing ">>>> NEW"
|
|
124
|
+
while i < n and state == "in_new":
|
|
125
|
+
line = lines[i]
|
|
126
|
+
close_pos = line.rfind(new_close)
|
|
127
|
+
if close_pos != -1:
|
|
128
|
+
# Include content up to but not including the *last* ">>>> NEW" on the line
|
|
129
|
+
before_close = line[:close_pos]
|
|
130
|
+
new_parts.append(before_close)
|
|
131
|
+
break
|
|
132
|
+
else:
|
|
133
|
+
new_parts.append(line)
|
|
134
|
+
i += 1
|
|
135
|
+
|
|
136
|
+
if not old_parts or not new_parts:
|
|
137
|
+
return []
|
|
67
138
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
return [(
|
|
139
|
+
old_content = "".join(old_parts)
|
|
140
|
+
new_content = "".join(new_parts)
|
|
141
|
+
return [(old_content, new_content)]
|
|
71
142
|
|
|
72
143
|
|
|
73
144
|
def is_text_editable(filename: str) -> bool:
|
|
@@ -250,7 +321,7 @@ def try_apply_edit(
|
|
|
250
321
|
old_text: str,
|
|
251
322
|
new_text: str,
|
|
252
323
|
file_path: Optional[str] = None,
|
|
253
|
-
) -> Tuple[str,
|
|
324
|
+
) -> Tuple[str, Optional[str]]:
|
|
254
325
|
"""Apply a single OLD/NEW edit with a tolerant fallback.
|
|
255
326
|
|
|
256
327
|
This helper is used by edit_file to apply one (old_text, new_text) pair:
|
|
@@ -271,21 +342,53 @@ def try_apply_edit(
|
|
|
271
342
|
file_path: Optional path for logging context
|
|
272
343
|
|
|
273
344
|
Returns:
|
|
274
|
-
(updated_content,
|
|
345
|
+
(updated_content, warning_message)
|
|
346
|
+
- updated_content: resulting content (may be unchanged)
|
|
347
|
+
- warning_message: human-readable warning if no edit was applied
|
|
348
|
+
or if the operation was ambiguous; None if an edit was
|
|
349
|
+
successfully and unambiguously applied.
|
|
275
350
|
"""
|
|
276
351
|
# Stage 1: exact match
|
|
277
|
-
if old_text
|
|
278
|
-
|
|
352
|
+
if old_text:
|
|
353
|
+
occurrences = content.count(old_text)
|
|
354
|
+
if occurrences == 1:
|
|
355
|
+
return content.replace(old_text, new_text, 1), None
|
|
356
|
+
if occurrences > 1:
|
|
357
|
+
msg = (
|
|
358
|
+
"Exact OLD block appears %d times in %s; no replacement applied to avoid ambiguity. "
|
|
359
|
+
"OLD value: %r" % (
|
|
360
|
+
occurrences,
|
|
361
|
+
file_path or "<unknown>",
|
|
362
|
+
old_text,
|
|
363
|
+
)
|
|
364
|
+
)
|
|
365
|
+
logger.warning(msg)
|
|
366
|
+
return content, msg
|
|
279
367
|
|
|
280
368
|
# Stage 2: tolerant match
|
|
281
|
-
if not old_text.strip() or not content:
|
|
282
|
-
|
|
369
|
+
if not old_text or not old_text.strip() or not content:
|
|
370
|
+
msg = None
|
|
371
|
+
if not old_text or not old_text.strip():
|
|
372
|
+
msg = (
|
|
373
|
+
"OLD block is empty or whitespace-only; no replacement applied. "
|
|
374
|
+
"OLD value: %r" % (old_text,)
|
|
375
|
+
)
|
|
376
|
+
elif not content:
|
|
377
|
+
msg = "Content is empty; no replacement applied."
|
|
378
|
+
if msg:
|
|
379
|
+
logger.warning(msg)
|
|
380
|
+
return content, msg
|
|
283
381
|
|
|
284
382
|
# Logical OLD: drop empty/whitespace-only lines
|
|
285
383
|
old_lines_raw = old_text.splitlines()
|
|
286
384
|
old_lines = [l for l in old_lines_raw if l.strip()]
|
|
287
385
|
if not old_lines:
|
|
288
|
-
|
|
386
|
+
msg = (
|
|
387
|
+
"OLD block contains only empty/whitespace lines; no replacement applied. "
|
|
388
|
+
"OLD value: %r" % (old_text,)
|
|
389
|
+
)
|
|
390
|
+
logger.warning(msg)
|
|
391
|
+
return content, msg
|
|
289
392
|
|
|
290
393
|
# Precompute normalized OLD (joined by '\n')
|
|
291
394
|
norm_old = _normalize_for_match("\n".join(old_lines))
|
|
@@ -318,29 +421,30 @@ def try_apply_edit(
|
|
|
318
421
|
candidates.append((start, idx, block))
|
|
319
422
|
|
|
320
423
|
if not candidates:
|
|
321
|
-
|
|
322
|
-
"
|
|
323
|
-
file_path or "<unknown>",
|
|
424
|
+
msg = (
|
|
425
|
+
"Normalized OLD block not found in %s. OLD value: %r"
|
|
426
|
+
% (file_path or "<unknown>", old_text)
|
|
324
427
|
)
|
|
325
|
-
|
|
428
|
+
logger.warning(msg)
|
|
429
|
+
return content, msg
|
|
326
430
|
|
|
327
431
|
if len(candidates) > 1:
|
|
328
|
-
|
|
329
|
-
"
|
|
330
|
-
"no change applied to avoid ambiguity."
|
|
331
|
-
file_path or "<unknown>",
|
|
432
|
+
msg = (
|
|
433
|
+
"Multiple candidate regions for OLD block in %s; "
|
|
434
|
+
"no change applied to avoid ambiguity. OLD value: %r"
|
|
435
|
+
% (file_path or "<unknown>", old_text)
|
|
332
436
|
)
|
|
333
|
-
|
|
437
|
+
logger.warning(msg)
|
|
438
|
+
return content, msg
|
|
334
439
|
|
|
335
440
|
start_idx, end_idx, candidate_block = candidates[0]
|
|
336
441
|
updated = content.replace(candidate_block, new_text, 1)
|
|
337
442
|
|
|
338
443
|
logger.info(
|
|
339
|
-
"
|
|
444
|
+
"Applied tolerant OLD/NEW replacement in %s around lines %d-%d",
|
|
340
445
|
file_path or "<unknown>",
|
|
341
446
|
start_idx + 1,
|
|
342
447
|
start_idx + len(old_lines),
|
|
343
448
|
)
|
|
344
449
|
|
|
345
|
-
return updated,
|
|
346
|
-
|
|
450
|
+
return updated, None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: alita_sdk
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.611
|
|
4
4
|
Summary: SDK for building langchain agents using resources from Alita
|
|
5
5
|
Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|