lionagi 0.16.2__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. lionagi/adapters/_utils.py +10 -23
  2. lionagi/adapters/async_postgres_adapter.py +83 -79
  3. lionagi/ln/__init__.py +4 -4
  4. lionagi/ln/_json_dump.py +0 -6
  5. lionagi/ln/fuzzy/__init__.py +4 -1
  6. lionagi/ln/fuzzy/_fuzzy_validate.py +109 -0
  7. lionagi/ln/fuzzy/_to_dict.py +388 -0
  8. lionagi/models/__init__.py +0 -2
  9. lionagi/operations/__init__.py +0 -6
  10. lionagi/operations/_visualize_graph.py +285 -0
  11. lionagi/operations/brainstorm/brainstorm.py +14 -12
  12. lionagi/operations/builder.py +23 -302
  13. lionagi/operations/communicate/communicate.py +1 -1
  14. lionagi/operations/flow.py +14 -11
  15. lionagi/operations/node.py +14 -3
  16. lionagi/operations/operate/operate.py +5 -11
  17. lionagi/operations/parse/parse.py +2 -3
  18. lionagi/operations/types.py +0 -2
  19. lionagi/operations/utils.py +11 -5
  20. lionagi/protocols/generic/pile.py +3 -7
  21. lionagi/protocols/graph/graph.py +23 -6
  22. lionagi/protocols/graph/node.py +0 -2
  23. lionagi/protocols/messages/message.py +0 -1
  24. lionagi/protocols/operatives/operative.py +2 -2
  25. lionagi/protocols/types.py +0 -15
  26. lionagi/service/connections/endpoint.py +11 -5
  27. lionagi/service/connections/match_endpoint.py +2 -10
  28. lionagi/service/connections/providers/types.py +1 -3
  29. lionagi/service/hooks/hook_event.py +1 -1
  30. lionagi/service/hooks/hook_registry.py +1 -1
  31. lionagi/service/rate_limited_processor.py +1 -1
  32. lionagi/session/branch.py +24 -18
  33. lionagi/session/session.py +2 -18
  34. lionagi/utils.py +3 -335
  35. lionagi/version.py +1 -1
  36. {lionagi-0.16.2.dist-info → lionagi-0.17.0.dist-info}/METADATA +4 -13
  37. {lionagi-0.16.2.dist-info → lionagi-0.17.0.dist-info}/RECORD +39 -61
  38. lionagi/adapters/postgres_model_adapter.py +0 -131
  39. lionagi/libs/concurrency.py +0 -1
  40. lionagi/libs/nested/__init__.py +0 -3
  41. lionagi/libs/nested/flatten.py +0 -172
  42. lionagi/libs/nested/nfilter.py +0 -59
  43. lionagi/libs/nested/nget.py +0 -45
  44. lionagi/libs/nested/ninsert.py +0 -104
  45. lionagi/libs/nested/nmerge.py +0 -158
  46. lionagi/libs/nested/npop.py +0 -69
  47. lionagi/libs/nested/nset.py +0 -94
  48. lionagi/libs/nested/unflatten.py +0 -83
  49. lionagi/libs/nested/utils.py +0 -189
  50. lionagi/libs/parse.py +0 -31
  51. lionagi/libs/schema/json_schema.py +0 -231
  52. lionagi/libs/unstructured/__init__.py +0 -0
  53. lionagi/libs/unstructured/pdf_to_image.py +0 -45
  54. lionagi/libs/unstructured/read_image_to_base64.py +0 -33
  55. lionagi/libs/validate/fuzzy_match_keys.py +0 -7
  56. lionagi/libs/validate/fuzzy_validate_mapping.py +0 -144
  57. lionagi/libs/validate/string_similarity.py +0 -7
  58. lionagi/libs/validate/xml_parser.py +0 -203
  59. lionagi/models/note.py +0 -387
  60. lionagi/protocols/graph/_utils.py +0 -22
  61. lionagi/service/connections/providers/claude_code_.py +0 -299
  62. {lionagi-0.16.2.dist-info → lionagi-0.17.0.dist-info}/WHEEL +0 -0
  63. {lionagi-0.16.2.dist-info → lionagi-0.17.0.dist-info}/licenses/LICENSE +0 -0
lionagi/libs/parse.py DELETED
@@ -1,31 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from lionagi.libs.schema.as_readable import as_readable
6
- from lionagi.libs.schema.extract_code_block import extract_code_block
7
- from lionagi.libs.schema.function_to_schema import function_to_schema
8
- from lionagi.libs.validate.fuzzy_match_keys import fuzzy_match_keys
9
- from lionagi.libs.validate.fuzzy_validate_mapping import fuzzy_validate_mapping
10
- from lionagi.libs.validate.string_similarity import string_similarity
11
- from lionagi.libs.validate.to_num import to_num
12
- from lionagi.utils import fuzzy_parse_json, to_dict, to_json
13
-
14
- validate_keys = fuzzy_match_keys # for backward compatibility
15
- validate_mapping = fuzzy_validate_mapping # for backward compatibility
16
-
17
-
18
- __all__ = (
19
- "as_readable",
20
- "extract_code_block",
21
- "function_to_schema",
22
- "fuzzy_match_keys",
23
- "fuzzy_validate_mapping",
24
- "string_similarity",
25
- "validate_keys",
26
- "validate_mapping",
27
- "to_dict",
28
- "to_json",
29
- "to_num",
30
- "fuzzy_parse_json",
31
- )
@@ -1,231 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from typing import Any, Literal
6
-
7
- from ..nested.flatten import flatten
8
-
9
-
10
- def extract_json_schema(
11
- data: Any,
12
- *,
13
- sep: str = "|",
14
- coerce_keys: bool = True,
15
- dynamic: bool = True,
16
- coerce_sequence: Literal["dict", "list"] | None = None,
17
- max_depth: int | None = None,
18
- ) -> dict[str, Any]:
19
- """
20
- Extract a JSON schema from JSON data.
21
-
22
- This function uses the flatten function to create a flat representation
23
- of the JSON data, then builds a schema based on the flattened structure.
24
-
25
- Args:
26
- data: The JSON data to extract the schema from.
27
- sep: Separator used in flattened keys.
28
- coerce_keys: Whether to coerce keys to strings.
29
- dynamic: Whether to use dynamic flattening.
30
- coerce_sequence: How to coerce sequences ("dict", "list", or None).
31
- max_depth: Maximum depth to flatten.
32
-
33
- Returns:
34
- A dictionary representing the JSON schema.
35
- """
36
- flattened = flatten(
37
- data,
38
- sep=sep,
39
- coerce_keys=coerce_keys,
40
- dynamic=dynamic,
41
- coerce_sequence=coerce_sequence,
42
- max_depth=max_depth,
43
- )
44
-
45
- schema = {}
46
- for key, value in flattened.items():
47
- key_parts = key.split(sep) if isinstance(key, str) else key
48
- current = schema
49
- for part in key_parts[:-1]:
50
- if part not in current:
51
- current[part] = {}
52
- current = current[part]
53
-
54
- current[key_parts[-1]] = _get_type(value)
55
-
56
- return {"type": "object", "properties": _consolidate_schema(schema)}
57
-
58
-
59
- def _get_type(value: Any) -> dict[str, Any]:
60
- if isinstance(value, str):
61
- return {"type": "string"}
62
- elif isinstance(value, bool):
63
- return {"type": "boolean"}
64
- elif isinstance(value, int):
65
- return {"type": "integer"}
66
- elif isinstance(value, float):
67
- return {"type": "number"}
68
- elif isinstance(value, list):
69
- if not value:
70
- return {"type": "array", "items": {}}
71
- item_types = [_get_type(item) for item in value]
72
- if all(item_type == item_types[0] for item_type in item_types):
73
- return {"type": "array", "items": item_types[0]}
74
- else:
75
- return {"type": "array", "items": {"oneOf": item_types}}
76
- elif isinstance(value, dict):
77
- return {
78
- "type": "object",
79
- "properties": _consolidate_schema(
80
- {k: _get_type(v) for k, v in value.items()}
81
- ),
82
- }
83
- elif value is None:
84
- return {"type": "null"}
85
- else:
86
- return {"type": "any"}
87
-
88
-
89
- def _consolidate_schema(schema: dict) -> dict:
90
- """
91
- Consolidate the schema to handle lists and nested structures.
92
- """
93
- consolidated = {}
94
- for key, value in schema.items():
95
- if isinstance(value, dict) and all(k.isdigit() for k in value.keys()):
96
- # This is likely a list
97
- item_types = list(value.values())
98
- if all(item_type == item_types[0] for item_type in item_types):
99
- consolidated[key] = {"type": "array", "items": item_types[0]}
100
- else:
101
- consolidated[key] = {
102
- "type": "array",
103
- "items": {"oneOf": item_types},
104
- }
105
- elif isinstance(value, dict) and "type" in value:
106
- consolidated[key] = value
107
- else:
108
- consolidated[key] = _consolidate_schema(value)
109
- return consolidated
110
-
111
-
112
- def json_schema_to_cfg(
113
- schema: dict[str, Any], start_symbol: str = "S"
114
- ) -> list[tuple[str, list[str]]]:
115
- productions = []
116
- visited = set()
117
- symbol_counter = 0
118
-
119
- def generate_symbol(base: str) -> str:
120
- nonlocal symbol_counter
121
- symbol = f"{base}@{symbol_counter}"
122
- symbol_counter += 1
123
- return symbol
124
-
125
- def generate_rules(s: dict[str, Any], symbol: str):
126
- if symbol in visited:
127
- return
128
- visited.add(symbol)
129
-
130
- if s.get("type") == "object":
131
- properties = s.get("properties", {})
132
- if properties:
133
- props_symbol = generate_symbol("PROPS")
134
- productions.append((symbol, ["{", props_symbol, "}"]))
135
-
136
- productions.append((props_symbol, [])) # Empty object
137
- for i, prop in enumerate(properties):
138
- prop_symbol = generate_symbol(prop)
139
- if i == 0:
140
- productions.append((props_symbol, [prop_symbol]))
141
- else:
142
- productions.append(
143
- (props_symbol, [props_symbol, ",", prop_symbol])
144
- )
145
-
146
- for prop, prop_schema in properties.items():
147
- prop_symbol = generate_symbol(prop)
148
- value_symbol = generate_symbol("VALUE")
149
- productions.append(
150
- (prop_symbol, [f'"{prop}"', ":", value_symbol])
151
- )
152
- generate_rules(prop_schema, value_symbol)
153
- else:
154
- productions.append((symbol, ["{", "}"]))
155
-
156
- elif s.get("type") == "array":
157
- items = s.get("items", {})
158
- items_symbol = generate_symbol("ITEMS")
159
- value_symbol = generate_symbol("VALUE")
160
- productions.append((symbol, ["[", "]"]))
161
- productions.append((symbol, ["[", items_symbol, "]"]))
162
- productions.append((items_symbol, [value_symbol]))
163
- productions.append(
164
- (items_symbol, [value_symbol, ",", items_symbol])
165
- )
166
- generate_rules(items, value_symbol)
167
-
168
- elif s.get("type") == "string":
169
- productions.append((symbol, ["STRING"]))
170
-
171
- elif s.get("type") == "number":
172
- productions.append((symbol, ["NUMBER"]))
173
-
174
- elif s.get("type") == "integer":
175
- productions.append((symbol, ["INTEGER"]))
176
-
177
- elif s.get("type") == "boolean":
178
- productions.append((symbol, ["BOOLEAN"]))
179
-
180
- elif s.get("type") == "null":
181
- productions.append((symbol, ["NULL"]))
182
-
183
- generate_rules(schema, start_symbol)
184
- return productions
185
-
186
-
187
- def json_schema_to_regex(schema: dict[str, Any]) -> str:
188
- def schema_to_regex(s):
189
- if s.get("type") == "object":
190
- properties = s.get("properties", {})
191
- prop_patterns = [
192
- rf'"{prop}"\s*:\s*{schema_to_regex(prop_schema)}'
193
- for prop, prop_schema in properties.items()
194
- ]
195
- return (
196
- r"\{"
197
- + r"\s*("
198
- + r"|".join(prop_patterns)
199
- + r")"
200
- + r"(\s*,\s*("
201
- + r"|".join(prop_patterns)
202
- + r"))*\s*\}"
203
- )
204
- elif s.get("type") == "array":
205
- items = s.get("items", {})
206
- return (
207
- r"\[\s*("
208
- + schema_to_regex(items)
209
- + r"(\s*,\s*"
210
- + schema_to_regex(items)
211
- + r")*)?\s*\]"
212
- )
213
- elif s.get("type") == "string":
214
- return r'"[^"]*"'
215
- elif s.get("type") == "integer":
216
- return r"-?\d+"
217
- elif s.get("type") == "number":
218
- return r"-?\d+(\.\d+)?"
219
- elif s.get("type") == "boolean":
220
- return r"(true|false)"
221
- elif s.get("type") == "null":
222
- return r"null"
223
- else:
224
- return r".*"
225
-
226
- return "^" + schema_to_regex(schema) + "$"
227
-
228
-
229
- def print_cfg(productions: list[tuple[str, list[str]]]):
230
- for lhs, rhs in productions:
231
- print(f"{lhs} -> {' '.join(rhs)}")
File without changes
@@ -1,45 +0,0 @@
1
- from lionagi.utils import import_module, is_import_installed
2
-
3
- _HAS_PDF2IMAGE = is_import_installed("pdf2image")
4
-
5
-
6
- def pdf_to_images(
7
- pdf_path: str, output_folder: str, dpi: int = 300, fmt: str = "jpeg"
8
- ) -> list:
9
- """
10
- Convert a PDF file into images, one image per page.
11
-
12
- Args:
13
- pdf_path (str): Path to the input PDF file.
14
- output_folder (str): Directory to save the output images.
15
- dpi (int): Dots per inch (resolution) for conversion (default: 300).
16
- fmt (str): Image format (default: 'jpeg'). Use 'png' if preferred.
17
-
18
- Returns:
19
- list: A list of file paths for the saved images.
20
- """
21
- if not _HAS_PDF2IMAGE:
22
- raise ModuleNotFoundError(
23
- "pdf2image is not installed, please install it with `pip install lionagi[unstructured]`"
24
- )
25
-
26
- import os
27
-
28
- convert_from_path = import_module(
29
- "pdf2image", import_name="convert_from_path"
30
- )
31
-
32
- # Ensure the output folder exists
33
- os.makedirs(output_folder, exist_ok=True)
34
-
35
- # Convert PDF to a list of PIL Image objects
36
- images = convert_from_path(pdf_path, dpi=dpi)
37
-
38
- saved_paths = []
39
- for i, image in enumerate(images):
40
- # Construct the output file name
41
- image_file = os.path.join(output_folder, f"page_{i + 1}.{fmt}")
42
- image.save(image_file, fmt.upper())
43
- saved_paths.append(image_file)
44
-
45
- return saved_paths
@@ -1,33 +0,0 @@
1
- from pathlib import Path
2
-
3
- from lionagi.utils import is_import_installed
4
-
5
- _HAS_OPENCV = is_import_installed("cv2")
6
-
7
-
8
- __all__ = ("read_image_to_base64",)
9
-
10
-
11
- def read_image_to_base64(image_path: str | Path) -> str:
12
- if not _HAS_OPENCV:
13
- raise ModuleNotFoundError(
14
- "OpenCV is not installed, please install it with `pip install lionagi[unstructured]`"
15
- )
16
-
17
- import base64
18
-
19
- import cv2
20
-
21
- image_path = str(image_path)
22
- image = cv2.imread(image_path, cv2.COLOR_BGR2RGB)
23
-
24
- if image is None:
25
- raise ValueError(f"Could not read image from path: {image_path}")
26
-
27
- file_extension = "." + image_path.split(".")[-1]
28
-
29
- success, buffer = cv2.imencode(file_extension, image)
30
- if not success:
31
- raise ValueError(f"Could not encode image to {file_extension} format.")
32
- encoded_image = base64.b64encode(buffer).decode("utf-8")
33
- return encoded_image
@@ -1,7 +0,0 @@
1
- from lionagi.ln.fuzzy._fuzzy_match import (
2
- FuzzyMatchKeysParams,
3
- HandleUnmatched,
4
- fuzzy_match_keys,
5
- )
6
-
7
- __all__ = ("fuzzy_match_keys", "FuzzyMatchKeysParams", "HandleUnmatched")
@@ -1,144 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from collections.abc import Callable, Sequence
6
- from typing import Any, Literal
7
-
8
- from lionagi.utils import KeysDict, Params, to_dict, to_json
9
-
10
- from .fuzzy_match_keys import fuzzy_match_keys
11
- from .string_similarity import SIMILARITY_TYPE
12
-
13
-
14
- class FuzzyValidateMappingParams(Params):
15
- similarity_algo: SIMILARITY_TYPE | Callable[[str, str], float] = (
16
- "jaro_winkler"
17
- )
18
- similarity_threshold: float = 0.85
19
- fuzzy_match: bool = True
20
- handle_unmatched: Literal["ignore", "raise", "remove", "fill", "force"] = (
21
- "ignore"
22
- )
23
- fill_value: Any = None
24
- fill_mapping: dict[str, Any] | None = None
25
- strict: bool = False
26
- suppress_conversion_errors: bool = False
27
-
28
- def __call__(
29
- self, d_: dict[str, Any], keys: Sequence[str] | KeysDict
30
- ) -> dict[str, Any]:
31
- return fuzzy_validate_mapping(
32
- d_,
33
- keys,
34
- similarity_algo=self.similarity_algo,
35
- similarity_threshold=self.similarity_threshold,
36
- fuzzy_match=self.fuzzy_match,
37
- handle_unmatched=self.handle_unmatched,
38
- fill_value=self.fill_value,
39
- fill_mapping=self.fill_mapping,
40
- strict=self.strict,
41
- suppress_conversion_errors=self.suppress_conversion_errors,
42
- )
43
-
44
-
45
- def fuzzy_validate_mapping(
46
- d: Any,
47
- keys: Sequence[str] | KeysDict,
48
- /,
49
- *,
50
- similarity_algo: (
51
- SIMILARITY_TYPE | Callable[[str, str], float]
52
- ) = "jaro_winkler",
53
- similarity_threshold: float = 0.85,
54
- fuzzy_match: bool = True,
55
- handle_unmatched: Literal[
56
- "ignore", "raise", "remove", "fill", "force"
57
- ] = "ignore",
58
- fill_value: Any = None,
59
- fill_mapping: dict[str, Any] | None = None,
60
- strict: bool = False,
61
- suppress_conversion_errors: bool = False,
62
- ) -> dict[str, Any]:
63
- """
64
- Validate and correct any input into a dictionary with expected keys.
65
-
66
- Args:
67
- d: Input to validate. Can be:
68
- - Dictionary
69
- - JSON string or markdown code block
70
- - XML string
71
- - Object with to_dict/model_dump method
72
- - Any type convertible to dictionary
73
- keys: List of expected keys or dictionary mapping keys to types.
74
- similarity_algo: String similarity algorithm or custom function.
75
- similarity_threshold: Minimum similarity score for fuzzy matching.
76
- fuzzy_match: If True, use fuzzy matching for key correction.
77
- handle_unmatched: How to handle unmatched keys:
78
- - "ignore": Keep unmatched keys
79
- - "raise": Raise error for unmatched keys
80
- - "remove": Remove unmatched keys
81
- - "fill": Fill missing keys with default values
82
- - "force": Combine "fill" and "remove" behaviors
83
- fill_value: Default value for filling unmatched keys.
84
- fill_mapping: Dictionary mapping keys to default values.
85
- strict: Raise error if any expected key is missing.
86
- suppress_conversion_errors: Return empty dict on conversion errors.
87
-
88
- Returns:
89
- Validated and corrected dictionary.
90
-
91
- Raises:
92
- ValueError: If input cannot be converted or validation fails.
93
- TypeError: If input types are invalid.
94
- """
95
- if d is None:
96
- raise TypeError("Input cannot be None")
97
-
98
- # Try converting to dictionary
99
- try:
100
- if isinstance(d, str):
101
- # First try to_json for JSON strings and code blocks
102
- try:
103
- json_result = to_json(d)
104
- dict_input = (
105
- json_result[0]
106
- if isinstance(json_result, list)
107
- else json_result
108
- )
109
- except Exception:
110
- # Fall back to to_dict for other string formats
111
- dict_input = to_dict(
112
- d, str_type="json", fuzzy_parse=True, suppress=True
113
- )
114
- else:
115
- dict_input = to_dict(
116
- d, use_model_dump=True, fuzzy_parse=True, suppress=True
117
- )
118
-
119
- if not isinstance(dict_input, dict):
120
- if suppress_conversion_errors:
121
- dict_input = {}
122
- else:
123
- raise ValueError(
124
- f"Failed to convert input to dictionary: {type(dict_input)}"
125
- )
126
-
127
- except Exception as e:
128
- if suppress_conversion_errors:
129
- dict_input = {}
130
- else:
131
- raise ValueError(f"Failed to convert input to dictionary: {e}")
132
-
133
- # Validate the dictionary
134
- return fuzzy_match_keys(
135
- dict_input,
136
- keys,
137
- similarity_algo=similarity_algo,
138
- similarity_threshold=similarity_threshold,
139
- fuzzy_match=fuzzy_match,
140
- handle_unmatched=handle_unmatched,
141
- fill_value=fill_value,
142
- fill_mapping=fill_mapping,
143
- strict=strict,
144
- )
@@ -1,7 +0,0 @@
1
- from lionagi.ln.fuzzy._string_similarity import (
2
- SIMILARITY_TYPE,
3
- SimilarityFunc,
4
- string_similarity,
5
- )
6
-
7
- __all__ = ("string_similarity", "SIMILARITY_TYPE", "SimilarityFunc")