alita-sdk 0.3.603__py3-none-any.whl → 0.3.611__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (34) hide show
  1. alita_sdk/cli/agents.py +108 -826
  2. alita_sdk/cli/testcases/__init__.py +94 -0
  3. alita_sdk/cli/testcases/data_generation.py +119 -0
  4. alita_sdk/cli/testcases/discovery.py +96 -0
  5. alita_sdk/cli/testcases/executor.py +84 -0
  6. alita_sdk/cli/testcases/logger.py +85 -0
  7. alita_sdk/cli/testcases/parser.py +172 -0
  8. alita_sdk/cli/testcases/prompts.py +91 -0
  9. alita_sdk/cli/testcases/reporting.py +125 -0
  10. alita_sdk/cli/testcases/setup.py +108 -0
  11. alita_sdk/cli/testcases/test_runner.py +282 -0
  12. alita_sdk/cli/testcases/utils.py +39 -0
  13. alita_sdk/cli/testcases/validation.py +90 -0
  14. alita_sdk/cli/testcases/workflow.py +196 -0
  15. alita_sdk/configurations/openapi.py +2 -2
  16. alita_sdk/runtime/clients/artifact.py +1 -1
  17. alita_sdk/runtime/langchain/langraph_agent.py +21 -6
  18. alita_sdk/runtime/tools/artifact.py +253 -8
  19. alita_sdk/runtime/tools/function.py +25 -6
  20. alita_sdk/runtime/tools/llm.py +12 -11
  21. alita_sdk/runtime/utils/serialization.py +155 -0
  22. alita_sdk/tools/bitbucket/api_wrapper.py +31 -30
  23. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
  24. alita_sdk/tools/confluence/api_wrapper.py +8 -1
  25. alita_sdk/tools/elitea_base.py +40 -36
  26. alita_sdk/tools/figma/api_wrapper.py +140 -83
  27. alita_sdk/tools/github/graphql_client_wrapper.py +1 -0
  28. alita_sdk/tools/utils/text_operations.py +156 -52
  29. {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/METADATA +1 -1
  30. {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/RECORD +34 -20
  31. {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/WHEEL +0 -0
  32. {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/entry_points.txt +0 -0
  33. {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/licenses/LICENSE +0 -0
  34. {alita_sdk-0.3.603.dist-info → alita_sdk-0.3.611.dist-info}/top_level.txt +0 -0
@@ -369,50 +369,94 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
369
369
  )
370
370
 
371
371
  def _base_loader(
372
- self,
373
- url: Optional[str] = None,
374
- file_keys_include: Optional[List[str]] = None,
375
- file_keys_exclude: Optional[List[str]] = None,
376
- node_ids_include: Optional[List[str]] = None,
377
- node_ids_exclude: Optional[List[str]] = None,
378
- node_types_include: Optional[List[str]] = None,
379
- node_types_exclude: Optional[List[str]] = None,
380
- number_of_threads: Optional[int] = None,
381
- **kwargs
372
+ self,
373
+ urls_or_file_keys: Optional[str] = None,
374
+ node_ids_include: Optional[List[str]] = None,
375
+ node_ids_exclude: Optional[List[str]] = None,
376
+ node_types_include: Optional[List[str]] = None,
377
+ node_types_exclude: Optional[List[str]] = None,
378
+ number_of_threads: Optional[int] = None,
379
+ **kwargs,
382
380
  ) -> Generator[Document, None, None]:
383
- if url:
384
- file_key, node_ids_from_url = self._parse_figma_url(url)
385
- # Override include params based on URL
386
- file_keys_include = [file_key]
387
- if node_ids_from_url and not node_ids_include:
388
- node_ids_include = node_ids_from_url
389
-
390
- # If both include and exclude are provided, use only include
391
- if file_keys_include:
392
- self._log_tool_event(f"Loading files: {file_keys_include}")
393
- for file_key in file_keys_include:
394
- self._log_tool_event(f"Loading file `{file_key}`")
395
- file = self._client.get_file(file_key, geometry='depth=1') # fetch only top-level structure (only pages without inner components)
396
- if not file:
397
- raise ToolException(f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page.")
398
- # propagate per-call number_of_threads override via metadata so _process_document can respect it
399
- metadata = {
400
- 'id': file_key,
401
- 'file_key': file_key,
402
- 'name': file.name,
403
- 'updated_on': file.last_modified,
404
- 'figma_pages_include': node_ids_include or [],
405
- 'figma_pages_exclude': node_ids_exclude or [],
406
- 'figma_nodes_include': node_types_include or [],
407
- 'figma_nodes_exclude': node_types_exclude or [],
408
- }
409
- if isinstance(number_of_threads, int) and 1 <= number_of_threads <= 5:
410
- metadata['number_of_threads_override'] = number_of_threads
411
- yield Document(page_content=json.dumps(metadata), metadata=metadata)
412
- elif file_keys_exclude or node_ids_exclude:
413
- raise ValueError("Excludes without parent (file_keys_include) do not make sense.")
414
- else:
415
- raise ValueError("You must provide file_keys_include or a URL.")
381
+ """Base loader used by the indexer tool.
382
+
383
+ Args:
384
+ urls_or_file_keys: Comma-separated list of Figma file URLs or raw file keys. Each
385
+ entry can be:
386
+ - a full Figma URL (https://www.figma.com/file/... or /design/...) optionally
387
+ with a node-id query parameter, or
388
+ - a bare file key string.
389
+ URL entries are parsed via _parse_figma_url; raw keys are used as-is.
390
+ node_ids_include: Optional list of top-level node IDs (pages) to include when an
391
+ entry does not specify node-id in the URL and is not otherwise constrained.
392
+ node_ids_exclude: Optional list of top-level node IDs (pages) to exclude when
393
+ node_ids_include is not provided.
394
+ node_types_include: Optional list of node types to include within each page.
395
+ node_types_exclude: Optional list of node types to exclude when node_types_include
396
+ is not provided.
397
+ number_of_threads: Optional override for number of worker threads to use when
398
+ processing images.
399
+ """
400
+ if not urls_or_file_keys:
401
+ raise ValueError("You must provide urls_or_file_keys with at least one URL or file key.")
402
+
403
+ # Parse the comma-separated entries into concrete (file_key, per_entry_node_ids_include)
404
+ entries = [item.strip() for item in urls_or_file_keys.split(',') if item.strip()]
405
+ if not entries:
406
+ raise ValueError("You must provide urls_or_file_keys with at least one non-empty value.")
407
+
408
+ # Validate number_of_threads override once and pass via metadata
409
+ metadata_threads_override: Optional[int] = None
410
+ if isinstance(number_of_threads, int) and 1 <= number_of_threads <= 5:
411
+ metadata_threads_override = number_of_threads
412
+
413
+ for entry in entries:
414
+ per_file_node_ids_include: Optional[List[str]] = None
415
+ file_key: Optional[str] = None
416
+
417
+ # Heuristic: treat as URL if it has a scheme and figma.com host
418
+ if entry.startswith("http://") or entry.startswith("https://"):
419
+ file_key, node_ids_from_url = self._parse_figma_url(entry)
420
+ per_file_node_ids_include = node_ids_from_url
421
+ else:
422
+ # Assume this is a raw file key
423
+ file_key = entry
424
+
425
+ if not file_key:
426
+ continue
427
+
428
+ # If URL-derived node IDs exist, they take precedence over global include list
429
+ effective_node_ids_include = per_file_node_ids_include or node_ids_include or []
430
+
431
+ self._log_tool_event(f"Loading file `{file_key}`")
432
+ try:
433
+ file = self._client.get_file(file_key, geometry='depth=1')
434
+ except ToolException as e:
435
+ # Enrich the error message with the file_key for easier troubleshooting
436
+ raise ToolException(
437
+ f"Failed to retrieve Figma file '{file_key}'. Original error: {e}"
438
+ ) from e
439
+
440
+ if not file:
441
+ raise ToolException(
442
+ f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page."
443
+ )
444
+
445
+ metadata = {
446
+ 'id': file_key,
447
+ 'file_key': file_key,
448
+ 'name': file.name,
449
+ 'updated_on': file.last_modified,
450
+ 'figma_pages_include': effective_node_ids_include,
451
+ 'figma_pages_exclude': node_ids_exclude or [],
452
+ 'figma_nodes_include': node_types_include or [],
453
+ 'figma_nodes_exclude': node_types_exclude or [],
454
+ }
455
+
456
+ if metadata_threads_override is not None:
457
+ metadata['number_of_threads_override'] = metadata_threads_override
458
+
459
+ yield Document(page_content=json.dumps(metadata), metadata=metadata)
416
460
 
417
461
  def has_image_representation(self, node):
418
462
  node_type = node.get('type', '').lower()
@@ -672,44 +716,58 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
672
716
  )
673
717
 
674
718
  def _index_tool_params(self):
675
- """Return the parameters for indexing data."""
676
- return {
677
- "url": (Optional[str], Field(
678
- description=(
679
- "Full Figma file or page URL to index. Must be in one of the following formats: "
680
- "'https://www.figma.com/file/<FILE_KEY>/...' or 'https://www.figma.com/design/<FILE_KEY>/...'. "
681
- "If present, the 'node-id' query parameter (e.g. '?node-id=<PAGE_ID>') will be used to limit "
682
- "indexing to that page or node. When this URL is provided, it overrides 'file_keys_include' ('node_ids_include')."
683
- ),
684
- default=None)),
685
- 'number_of_threads': (Optional[int], Field(
686
- description=(
687
- "Optional override for the number of worker threads used when indexing Figma images. "
688
- f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
689
- ),
690
- default=DEFAULT_NUMBER_OF_THREADS,
691
- ge=1,
692
- le=5,
693
- )),
694
- 'file_keys_include': (Optional[List[str]], Field(
695
- description="List of file keys to include in index if project_id is not provided: i.e. ['Fp24FuzPwH0L74ODSrCnQo', 'jmhAr6q78dJoMRqt48zisY']",
696
- default=None)),
697
- 'file_keys_exclude': (Optional[List[str]], Field(
698
- description="List of file keys to exclude from index. It is applied only if project_id is provided and file_keys_include is not provided: i.e. ['Fp24FuzPwH0L74ODSrCnQo', 'jmhAr6q78dJoMRqt48zisY']",
699
- default=None)),
700
- 'node_ids_include': (Optional[List[str]], Field(
701
- description="List of top-level nodes (pages) in file to include in index. It is node-id from figma url: i.e. ['123-56', '7651-9230'].",
702
- default=None)),
703
- 'node_ids_exclude': (Optional[List[str]], Field(
704
- description="List of top-level nodes (pages) in file to exclude from index. It is applied only if node_ids_include is not provided. It is node-id from figma url: i.e. ['Fp24FuzPwH0L74ODSrCnQo', 'jmhAr6q78dJoMRqt48zisY']",
705
- default=None)),
706
- 'node_types_include': (Optional[List[str]], Field(
707
- description="List type of nodes to include in index: i.e. ['FRAME', 'COMPONENT', 'RECTANGLE', 'COMPONENT_SET', 'INSTANCE', 'VECTOR', ...].",
708
- default=None)),
709
- 'node_types_exclude': (Optional[List[str]], Field(
710
- description="List type of nodes to exclude from index. It is applied only if node_types_include is not provided: i.e. ['FRAME', 'COMPONENT', 'RECTANGLE', 'COMPONENT_SET', 'INSTANCE', 'VECTOR', ...]",
711
- default=None)),
712
- }
719
+ """Return the parameters for indexing data."""
720
+ return {
721
+ "urls_or_file_keys": (str, Field(
722
+ description=(
723
+ "Comma-separated list of Figma file URLs or raw file keys to index. "
724
+ "Each entry may be a full Figma URL (with optional node-id query) or a file key. "
725
+ "Example: 'https://www.figma.com/file/<FILE_KEY>/...?node-id=<NODE_ID>,Fp24FuzPwH0L74ODSrCnQo'."
726
+ ))),
727
+ 'number_of_threads': (Optional[int], Field(
728
+ description=(
729
+ "Optional override for the number of worker threads used when indexing Figma images. "
730
+ f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
731
+ ),
732
+ default=DEFAULT_NUMBER_OF_THREADS,
733
+ ge=1,
734
+ le=5,
735
+ )),
736
+ 'node_ids_include': (Optional[List[str]], Field(
737
+ description=(
738
+ "List of top-level node IDs (pages) to include in the index. Values should match "
739
+ "Figma node-id format like ['123-56', '7651-9230']. These include rules are applied "
740
+ "for each entry in urls_or_file_keys when the URL does not specify node-id and for "
741
+ "each raw file_key entry."
742
+ ),
743
+ default=None,
744
+ )),
745
+ 'node_ids_exclude': (Optional[List[str]], Field(
746
+ description=(
747
+ "List of top-level node IDs (pages) to exclude from the index when node_ids_include "
748
+ "is not provided. Values should match Figma node-id format. These exclude rules are "
749
+ "applied for each entry in urls_or_file_keys (URLs without node-id and raw fileKey "
750
+ "entries)."
751
+ ),
752
+ default=None,
753
+ )),
754
+ 'node_types_include': (Optional[List[str]], Field(
755
+ description=(
756
+ "List of node types to include in the index, e.g. ['FRAME', 'COMPONENT', 'RECTANGLE', "
757
+ "'COMPONENT_SET', 'INSTANCE', 'VECTOR', ...]. If provided, only these types are indexed "
758
+ "for each page loaded from each urls_or_file_keys entry."
759
+ ),
760
+ default=None,
761
+ )),
762
+ 'node_types_exclude': (Optional[List[str]], Field(
763
+ description=(
764
+ "List of node types to exclude from the index when node_types_include is not provided. "
765
+ "These exclude rules are applied to nodes within each page loaded from each "
766
+ "urls_or_file_keys entry."
767
+ ),
768
+ default=None,
769
+ )),
770
+ }
713
771
 
714
772
  def _send_request(
715
773
  self,
@@ -972,8 +1030,7 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
972
1030
 
973
1031
  # Delegate URL and file_key handling to _base_loader
974
1032
  base_docs = self._base_loader(
975
- url=url,
976
- file_keys_include=[file_key] if file_key else None,
1033
+ urls_or_file_keys=url or file_key,
977
1034
  node_ids_include=node_ids_include_list,
978
1035
  node_ids_exclude=node_ids_exclude_list,
979
1036
  )
@@ -1464,6 +1464,7 @@ class GraphQLClientWrapper(BaseModel):
1464
1464
 
1465
1465
  missing_fields = []
1466
1466
  fields_to_update = []
1467
+ updated_fields = []
1467
1468
 
1468
1469
  if fields:
1469
1470
  try:
@@ -26,48 +26,119 @@ TEXT_EDITABLE_EXTENSIONS = {
26
26
  def parse_old_new_markers(file_query: str) -> List[Tuple[str, str]]:
27
27
  """Parse OLD/NEW marker-based edit instructions.
28
28
 
29
- Extracts pairs of old and new content from a file query using markers in
30
- a minimal, regex-based way without additional line splitting logic.
31
-
32
- Supported forms (OLD/NEW blocks must appear in pairs):
33
- - OLD <<<< ... >>>> OLD
34
- NEW <<<< ... >>>> NEW
35
- - If no such pairs are found, we also accept the slightly incorrect
36
- "<<<" form as a fallback:
37
- OLD <<< ... >>> OLD
38
- NEW <<< ... >>> NEW
29
+ Format:
30
+
31
+ OLD <<<<[optional text or newline]
32
+ ... OLD content ...
33
+ >>>> OLD
34
+ NEW <<<<[optional text or newline]
35
+ ... NEW content ...
36
+ >>>> NEW
37
+
38
+ Rules:
39
+ - OLD block:
40
+ - Starts at the first line that contains "OLD <<<<".
41
+ - OLD content includes:
42
+ * On that line: everything after the first "OLD <<<<" (if any),
43
+ * Plus all following lines up to (but not including) the first line
44
+ that contains ">>>> OLD".
45
+ - NEW block:
46
+ - Starts at the first line, after the OLD block, that contains "NEW <<<<".
47
+ - NEW content includes:
48
+ * On that line: everything after the first "NEW <<<<" (if any),
49
+ * Plus all following lines up to (but not including) the first line
50
+ that contains ">>>> NEW".
51
+ - Only the first complete OLD/NEW pair is returned.
39
52
 
40
53
  Args:
41
54
  file_query: String containing marked old and new content sections.
42
55
 
43
56
  Returns:
44
- List of (old_content, new_content) tuples, where each content string
45
- is the raw inner block (with leading/trailing whitespace stripped),
46
- but otherwise unmodified.
57
+ A list with at most one (old_content, new_content) tuple. Each
58
+ content string includes newlines but excludes the marker substrings
59
+ and their closing lines.
47
60
  """
48
- # Primary pattern: correct 4-< markers
49
- pattern_primary = re.compile(
50
- r"OLD <<<<(\s*.*?\s*)>>>> OLD" # OLD block
51
- r"\s*" # optional whitespace between OLD/NEW
52
- r"NEW <<<<(\s*.*?\s*)>>>> NEW", # NEW block
53
- re.DOTALL,
54
- )
55
61
 
56
- matches = pattern_primary.findall(file_query)
62
+ if not file_query:
63
+ return []
57
64
 
58
- # Fallback pattern: accept 3-< markers if no proper 4-< markers found
59
- if not matches:
60
- pattern_fallback = re.compile(
61
- r"OLD <<<(\s*.*?\s*)>>>> OLD" # OLD block (3 < and 4 > to support previous version)
62
- r"\s*" # optional whitespace between OLD/NEW
63
- r"NEW <<<(\s*.*?\s*)>>>> NEW", # NEW block (3 < and 4 > to support previous version)
64
- re.DOTALL,
65
- )
66
- matches = pattern_fallback.findall(file_query)
65
+ lines = file_query.splitlines(keepends=True)
66
+
67
+ old_open = "OLD <<<<"
68
+ old_close = ">>>> OLD"
69
+ new_open = "NEW <<<<"
70
+ new_close = ">>>> NEW"
71
+
72
+ state = "search_old" # -> in_old -> search_new -> in_new
73
+ old_parts: list[str] = []
74
+ new_parts: list[str] = []
75
+
76
+ i = 0
77
+ n = len(lines)
78
+
79
+ # 1. Find OLD block
80
+ while i < n and state == "search_old":
81
+ line = lines[i]
82
+ pos = line.find(old_open)
83
+ if pos != -1:
84
+ # Start OLD content after the first "OLD <<<<" on this line
85
+ after = line[pos + len(old_open):]
86
+ old_parts.append(after)
87
+ state = "in_old"
88
+ i += 1
89
+
90
+ if state != "in_old":
91
+ # No OLD block found
92
+ return []
93
+
94
+ # Collect until a line containing ">>>> OLD"
95
+ while i < n and state == "in_old":
96
+ line = lines[i]
97
+ if old_close in line:
98
+ # Stop before this line; do not include any part of it
99
+ state = "search_new"
100
+ else:
101
+ old_parts.append(line)
102
+ i += 1
103
+
104
+ if state != "search_new":
105
+ # Didn't find a proper OLD close
106
+ return []
107
+
108
+ # 2. Find NEW block after OLD
109
+ while i < n and state == "search_new":
110
+ line = lines[i]
111
+ pos = line.find(new_open)
112
+ if pos != -1:
113
+ # NEW content starts from the *next* line after the marker line
114
+ state = "in_new"
115
+ i += 1
116
+ break
117
+ i += 1
118
+
119
+ if state != "in_new":
120
+ # No NEW block found
121
+ return []
122
+
123
+ # Collect until a line containing ">>>> NEW"
124
+ while i < n and state == "in_new":
125
+ line = lines[i]
126
+ close_pos = line.rfind(new_close)
127
+ if close_pos != -1:
128
+ # Include content up to but not including the *last* ">>>> NEW" on the line
129
+ before_close = line[:close_pos]
130
+ new_parts.append(before_close)
131
+ break
132
+ else:
133
+ new_parts.append(line)
134
+ i += 1
135
+
136
+ if not old_parts or not new_parts:
137
+ return []
67
138
 
68
- # Preserve block content exactly as captured so Stage 1 can use exact
69
- # substring replacement (including indentation and trailing spaces).
70
- return [(old_block, new_block) for old_block, new_block in matches]
139
+ old_content = "".join(old_parts)
140
+ new_content = "".join(new_parts)
141
+ return [(old_content, new_content)]
71
142
 
72
143
 
73
144
  def is_text_editable(filename: str) -> bool:
@@ -250,7 +321,7 @@ def try_apply_edit(
250
321
  old_text: str,
251
322
  new_text: str,
252
323
  file_path: Optional[str] = None,
253
- ) -> Tuple[str, bool]:
324
+ ) -> Tuple[str, Optional[str]]:
254
325
  """Apply a single OLD/NEW edit with a tolerant fallback.
255
326
 
256
327
  This helper is used by edit_file to apply one (old_text, new_text) pair:
@@ -271,21 +342,53 @@ def try_apply_edit(
271
342
  file_path: Optional path for logging context
272
343
 
273
344
  Returns:
274
- (updated_content, used_fallback)
345
+ (updated_content, warning_message)
346
+ - updated_content: resulting content (may be unchanged)
347
+ - warning_message: human-readable warning if no edit was applied
348
+ or if the operation was ambiguous; None if an edit was
349
+ successfully and unambiguously applied.
275
350
  """
276
351
  # Stage 1: exact match
277
- if old_text in content:
278
- return content.replace(old_text, new_text), False
352
+ if old_text:
353
+ occurrences = content.count(old_text)
354
+ if occurrences == 1:
355
+ return content.replace(old_text, new_text, 1), None
356
+ if occurrences > 1:
357
+ msg = (
358
+ "Exact OLD block appears %d times in %s; no replacement applied to avoid ambiguity. "
359
+ "OLD value: %r" % (
360
+ occurrences,
361
+ file_path or "<unknown>",
362
+ old_text,
363
+ )
364
+ )
365
+ logger.warning(msg)
366
+ return content, msg
279
367
 
280
368
  # Stage 2: tolerant match
281
- if not old_text.strip() or not content:
282
- return content, False
369
+ if not old_text or not old_text.strip() or not content:
370
+ msg = None
371
+ if not old_text or not old_text.strip():
372
+ msg = (
373
+ "OLD block is empty or whitespace-only; no replacement applied. "
374
+ "OLD value: %r" % (old_text,)
375
+ )
376
+ elif not content:
377
+ msg = "Content is empty; no replacement applied."
378
+ if msg:
379
+ logger.warning(msg)
380
+ return content, msg
283
381
 
284
382
  # Logical OLD: drop empty/whitespace-only lines
285
383
  old_lines_raw = old_text.splitlines()
286
384
  old_lines = [l for l in old_lines_raw if l.strip()]
287
385
  if not old_lines:
288
- return content, False
386
+ msg = (
387
+ "OLD block contains only empty/whitespace lines; no replacement applied. "
388
+ "OLD value: %r" % (old_text,)
389
+ )
390
+ logger.warning(msg)
391
+ return content, msg
289
392
 
290
393
  # Precompute normalized OLD (joined by '\n')
291
394
  norm_old = _normalize_for_match("\n".join(old_lines))
@@ -318,29 +421,30 @@ def try_apply_edit(
318
421
  candidates.append((start, idx, block))
319
422
 
320
423
  if not candidates:
321
- logger.warning(
322
- "Fallback match: normalized OLD block not found in %s.",
323
- file_path or "<unknown>",
424
+ msg = (
425
+ "Normalized OLD block not found in %s. OLD value: %r"
426
+ % (file_path or "<unknown>", old_text)
324
427
  )
325
- return content, False
428
+ logger.warning(msg)
429
+ return content, msg
326
430
 
327
431
  if len(candidates) > 1:
328
- logger.warning(
329
- "Fallback match: multiple candidate regions for OLD block in %s; "
330
- "no change applied to avoid ambiguity.",
331
- file_path or "<unknown>",
432
+ msg = (
433
+ "Multiple candidate regions for OLD block in %s; "
434
+ "no change applied to avoid ambiguity. OLD value: %r"
435
+ % (file_path or "<unknown>", old_text)
332
436
  )
333
- return content, False
437
+ logger.warning(msg)
438
+ return content, msg
334
439
 
335
440
  start_idx, end_idx, candidate_block = candidates[0]
336
441
  updated = content.replace(candidate_block, new_text, 1)
337
442
 
338
443
  logger.info(
339
- "Fallback match: applied tolerant OLD/NEW replacement in %s around lines %d-%d",
444
+ "Applied tolerant OLD/NEW replacement in %s around lines %d-%d",
340
445
  file_path or "<unknown>",
341
446
  start_idx + 1,
342
447
  start_idx + len(old_lines),
343
448
  )
344
449
 
345
- return updated, True
346
-
450
+ return updated, None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.603
3
+ Version: 0.3.611
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0