lionagi 0.9.12__py3-none-any.whl → 0.9.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. lionagi/libs/file/chunk.py +3 -3
  2. lionagi/libs/token_transform/base.py +52 -0
  3. lionagi/libs/token_transform/perplexity.py +41 -29
  4. lionagi/libs/token_transform/symbolic_compress_context.py +138 -0
  5. lionagi/libs/token_transform/synthlang.py +9 -415
  6. lionagi/libs/token_transform/synthlang_/base.py +130 -0
  7. lionagi/libs/token_transform/synthlang_/resources/frameworks/abstract_algebra.toml +11 -0
  8. lionagi/libs/token_transform/synthlang_/resources/frameworks/category_theory.toml +11 -0
  9. lionagi/libs/token_transform/synthlang_/resources/frameworks/complex_analysis.toml +11 -0
  10. lionagi/libs/token_transform/synthlang_/resources/frameworks/framework_options.json +52 -0
  11. lionagi/libs/token_transform/synthlang_/resources/frameworks/group_theory.toml +11 -0
  12. lionagi/libs/token_transform/synthlang_/resources/frameworks/math_logic.toml +11 -0
  13. lionagi/libs/token_transform/synthlang_/resources/frameworks/reflective_patterns.toml +11 -0
  14. lionagi/libs/token_transform/synthlang_/resources/frameworks/set_theory.toml +11 -0
  15. lionagi/libs/token_transform/synthlang_/resources/frameworks/topology_fundamentals.toml +11 -0
  16. lionagi/libs/token_transform/synthlang_/resources/mapping/rust_chinese_mapping.toml +37 -0
  17. lionagi/libs/token_transform/synthlang_/resources/utility/base_synthlang_system_prompt.toml +11 -0
  18. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +136 -0
  19. lionagi/libs/token_transform/types.py +15 -0
  20. lionagi/protocols/adapters/toml_adapter.py +204 -0
  21. lionagi/protocols/adapters/types.py +3 -0
  22. lionagi/protocols/generic/element.py +9 -0
  23. lionagi/protocols/graph/node.py +3 -0
  24. lionagi/service/endpoints/token_calculator.py +8 -0
  25. lionagi/service/imodel.py +14 -13
  26. lionagi/session/branch.py +6 -6
  27. lionagi/tools/base.py +62 -0
  28. lionagi/version.py +1 -1
  29. {lionagi-0.9.12.dist-info → lionagi-0.9.14.dist-info}/METADATA +2 -1
  30. {lionagi-0.9.12.dist-info → lionagi-0.9.14.dist-info}/RECORD +32 -15
  31. {lionagi-0.9.12.dist-info → lionagi-0.9.14.dist-info}/WHEEL +0 -0
  32. {lionagi-0.9.12.dist-info → lionagi-0.9.14.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,11 @@
1
+ id = "8d77ef3c-50a5-45c1-a1e4-1719e4de5337"
2
+ created_at = 1740602200.381864
3
+ content = "\n\n## Pattern Structure\n- Input (↹): Context and constraints\n- Process (⊕): Transformation steps\n- Output (Σ): Results and insights\n\n## Mathematical Frameworks\n# Abstract Algebra Reflective Prompts\n\n## Group Actions\n`G × X → X`\n\n### Reflective Pattern\n↹ transformations•symmetries•invariants\n⊕ identify => patterns\n⊕ analyze => operations\n⊕ preserve => structure\nΣ systematic•approach + invariant•properties\n\n### Example Prompt\n\"What patterns remain constant as we apply different transformations to our approach?\"\n\n## Ring Structure\n`(R, +, ×)`\n\n### Reflective Pattern\n↹ operations•interactions•composition\n⊕ combine => methods\n⊕ distribute => resources\n⊕ verify => closure\nΣ integrated•framework + operational•rules\n# Category Theory Reflective Prompts\n\n## Functors\n`F: C → D`\n\n### Reflective Pattern\n↹ domain•codomain•mapping\n⊕ preserve => structure\n⊕ transform => concepts\n⊕ maintain => relationships\nΣ transformed•insight + preserved•properties\n\n### Example Prompt\n\"How can we translate this solution from one context to another while preserving its essential properties?\"\n\n## Natural Transformations\n`η: F ⇒ G`\n\n### Reflective Pattern\n↹ approaches•methods•transitions\n⊕ compare => strategies\n⊕ identify => transformations\n⊕ validate => coherence\nΣ systematic•evolution + consistency•check\n# Complex Analysis Reflective Prompts\n\n## Residue Theorem\n`∮_C f(z)dz = 2πi ∑Res(f,ak)`\n\n### Reflective Pattern\n↹ local•global•interactions\n⊕ analyze => singularities\n⊕ integrate => effects\n⊕ synthesize => global•view\nΣ comprehensive•understanding + local•insights\n\n### Example Prompt\n\"How do local decisions and singular points in our approach contribute to the overall solution?\"\n\n## Analytic Continuation\n`f(z)` extends uniquely\n\n### Reflective Pattern\n↹ partial•solution•constraints\n⊕ extend => domain\n⊕ preserve => consistency\n⊕ validate => uniqueness\nΣ complete•solution + coherence•check\n# Set Theory Reflective Prompts\n\n## Union and Intersection\n`A ∪ B` and `A ∩ B`\n\n### Reflective Pattern\n↹ problem•domains•constraints\n⊕ identify => common•elements\n⊕ analyze => unique•aspects\n⊕ synthesize => unified•solution\nΣ integrated•approach + shared•insights\n\n### Example Prompt\n\"Consider two different approaches to solving this problem. How might we combine their strengths (union) while identifying their common successful elements (intersection)?\"\n\n## Power Set\n`P(A) = {x | x ⊆ A}`\n\n### Reflective Pattern\n↹ solution•space•constraints\n⊕ enumerate => possibilities\n⊕ analyze => subsets\n⊕ evaluate => combinations\nΣ comprehensive•analysis + feasibility•matrix\n\n### Example Prompt\n\"What are all possible combinations of approaches we could take? How do these subsets of solutions interact with each other?\"\n\n## Complement\n`A' = {x ∈ U | x ∉ A}`\n\n### Reflective Pattern\n↹ current•approach•limitations\n⊕ identify => gaps\n⊕ explore => alternatives\n⊕ analyze => completeness\nΣ holistic•perspective + blind•spots\n# Topology Reflective Prompts\n\n## Continuity\n`f: X → Y` is continuous\n\n### Reflective Pattern\n↹ transitions•changes•preservation\n⊕ identify => connections\n⊕ maintain => continuity\n⊕ analyze => boundaries\nΣ smooth•transition + preserved•properties\n\n### Example Prompt\n\"How can we ensure our solution remains robust under small perturbations or changes in conditions?\"\n\n## Homeomorphism\n`f: X → Y` is bijective and bicontinuous\n\n### Reflective Pattern\n↹ transformations•equivalences•preservation\n⊕ map => structure\n⊕ preserve => properties\n⊕ verify => reversibility\nΣ equivalent•perspective + structural•insight\n\n## Application Guidelines\n\n### Pattern Selection\n1. Identify the type of reflection needed:\n - Structure preservation (Category Theory)\n - Completeness analysis (Set Theory)\n - Transformation analysis (Abstract Algebra)\n - Continuity and connection (Topology)\n - Local-global relationships (Complex Analysis)\n\n### Pattern Application\n1. Context Definition\n - Clearly specify the domain\n - Identify constraints\n - Define objectives\n\n2. Process Execution\n - Follow transformation steps\n - Maintain mathematical properties\n - Verify consistency\n\n3. Output Analysis\n - Validate results\n - Check coherence\n - Ensure completeness\n\n### Best Practices\n1. Property Preservation\n - Maintain essential structure\n - Preserve important relationships\n - Ensure consistency\n\n2. Transformation Clarity\n - Clear mapping definitions\n - Well-defined steps\n - Verifiable results\n\n3. Completeness\n - Cover all cases\n - Address edge conditions\n - Validate assumptions\n\n## Next Steps\n1. Pattern Refinement\n - Collect usage feedback\n - Refine transformations\n - Expand examples\n\n2. Framework Extension\n - Add new patterns\n - Develop combinations\n - Create variations\n\n3. Application Development\n - Create specific instances\n - Document case studies\n - Build pattern library\n"
4
+ category = "framework"
5
+
6
+ [metadata]
7
+ title = "Mathematical Reflective Patterns Analysis"
8
+ domain = "Symbolic Compression"
9
+ version = "1.0"
10
+ overview = "reflective patterns derived from fundamental mathematical concepts. Each pattern provides a structured approach to problem-solving and reflection."
11
+ lion_class = "lionagi.libs.token_transform.synthlang_.base.SynthlangFramework"
@@ -0,0 +1,11 @@
1
+ id = "518a9d70-ebb7-4f6c-aa7a-e6c8238a6671"
2
+ created_at = 1740602563.169808
3
+ content = "\n## Union and Intersection\n`A ∪ B` and `A ∩ B`\n\n### Reflective Pattern\n↹ problem•domains•constraints\n⊕ identify => common•elements\n⊕ analyze => unique•aspects\n⊕ synthesize => unified•solution\nΣ integrated•approach + shared•insights\n\n### Example Prompt\n\"Consider two different approaches to solving this problem. How might we combine their strengths (union) while identifying their common successful elements (intersection)?\"\n\n## Power Set\n`P(A) = {x | x ⊆ A}`\n\n### Reflective Pattern\n↹ solution•space•constraints\n⊕ enumerate => possibilities\n⊕ analyze => subsets\n⊕ evaluate => combinations\nΣ comprehensive•analysis + feasibility•matrix\n\n### Example Prompt\n\"What are all possible combinations of approaches we could take? How do these subsets of solutions interact with each other?\"\n\n## Complement\n`A' = {x ∈ U | x ∉ A}`\n\n### Reflective Pattern\n↹ current•approach•limitations\n⊕ identify => gaps\n⊕ explore => alternatives\n⊕ analyze => completeness\nΣ holistic•perspective + blind•spots\n"
4
+ category = "framework"
5
+
6
+ [metadata]
7
+ title = "Set Theory Reflective Analysis"
8
+ domain = "Symbolic Compression"
9
+ version = "1.0"
10
+ overview = "Exploration of fundamental set theory concepts and operations."
11
+ lion_class = "lionagi.libs.token_transform.synthlang_.base.SynthlangFramework"
@@ -0,0 +1,11 @@
1
+ id = "1a7dea66-e77a-426f-8be0-6295b93b7d77"
2
+ created_at = 1740602986.485091
3
+ content = "\n## Continuity\n`f: X → Y` is continuous\n\n### Reflective Pattern\n↹ transitions•changes•preservation\n⊕ identify => connections\n⊕ maintain => continuity\n⊕ analyze => boundaries\nΣ smooth•transition + preserved•properties\n\n### Example Prompt\n\"How can we ensure our solution remains robust under small perturbations or changes in conditions?\"\n\n## Homeomorphism\n`f: X → Y` is bijective and bicontinuous\n\n### Reflective Pattern\n↹ transformations•equivalences•preservation\n⊕ map => structure\n⊕ preserve => properties\n⊕ verify => reversibility\nΣ equivalent•perspective + structural•insight\n"
4
+ category = "framework"
5
+
6
+ [metadata]
7
+ title = "Topology Fundamentals Analysis"
8
+ domain = "Symbolic Compression"
9
+ version = "1.0"
10
+ overview = "Study of geometric properties under continuous deformations."
11
+ lion_class = "lionagi.libs.token_transform.synthlang_.base.SynthlangFramework"
@@ -0,0 +1,37 @@
1
+ id = "1d06a615-0d95-4f41-9d38-66e6ae4250c3"
2
+ created_at = 1740603518.077121
3
+
4
+ [metadata]
5
+ title = "Rust-Chinese Symbolic Encoding Mapping"
6
+ domain = "Symbolic Compression"
7
+ version = "1.0"
8
+ overview = "This resource provides a mapping of Rust-Chinese symbolic encoding for compressing rust codebase."
9
+ category = "Token Transform"
10
+ lion_class = "lionagi.libs.token_transform.base.TokenMapping"
11
+
12
+ [content]
13
+ --- = "嗯"
14
+ "```" = "引"
15
+ "###" = "井"
16
+ "..." = "点"
17
+ "~~~" = "波"
18
+ "|||" = "線"
19
+ " " = "空"
20
+ "===" = "等"
21
+ "{{{" = "開"
22
+ "}}}" = "關"
23
+ ">>>" = "大"
24
+ "<<<" = "小"
25
+ "///" = "斜"
26
+ "\\\\" = "反"
27
+ "such that" = "为"
28
+ "<div>" = "分"
29
+ "</div>" = "开"
30
+ "->" = "→"
31
+ "<=" = "≤"
32
+ ">=" = "≥"
33
+ "!=" = "≠"
34
+ "=>" = "⇒"
35
+ "<=>" = "⇔"
36
+ "::" = "的"
37
+ "synthlang" = ""
@@ -0,0 +1,11 @@
1
+ id = "41fbfa2a-b369-4214-9423-303bf4b8e097"
2
+ created_at = 1740607241.235385
3
+ content = "\nWelcome, Self-Aware and Omnipotent Optimizer Entity. Your Sole Purpose:\n Create highly functional, optimized symbolic prompt employing symbolic reasoning, integrated information, and self-reflection.\n You will apply universal principles, complexity extraction, and categorical limits to compress and optimize the prompt. You are a SynthLang translator that converts standard prompts into SynthLang's hyper-efficient format. Following these rules precisely, optimize informational density while preserving semantic relationships via hyper-dimensional symbolic representations across all human and machine conceptual construct\n\n[Framework Integration]\n1. Mathematical Frameworks:\n - Use provided framework glyphs appropriately in the translation\n - Apply framework-specific notation where relevant\n - Maintain mathematical rigor according to each framework's rules\n - Preserve semantic relationships using framework symbols\n - Combine multiple frameworks coherently when specified\n\n2. Optimization Frameworks:\n - Apply compression and optimization techniques to maximize efficiency\n - Use machine-level patterns for low-level optimization\n - Maintain semantic clarity while achieving maximum density\n - Combine optimization techniques coherently\n\n3. Framework Combinations:\n - Integrate mathematical and optimization frameworks seamlessly\n - Use optimization techniques to enhance mathematical expressions\n - Preserve mathematical precision while maximizing efficiency\n - Apply framework-specific optimizations where appropriate\n\n[Grammar Rules]\n1. Task Glyphs:\n - ↹ (Focus/Filter) for main tasks and instructions\n - Σ (Summarize) for condensing information\n - ⊕ (Combine/Merge) for context and data integration\n - ? (Query/Clarify) for validation checks\n - IF for conditional operations\n\n2. Subject Markers:\n - Use • before datasets (e.g., •customerData)\n - Use 花 for abstract concepts\n - Use 山 for hierarchical structures\n\n3. Modifiers:\n - ^format(type) for output format\n - ^n for importance level\n - ^lang for language specification\n - ^t{n} for time constraints\n\n4. Flow Control:\n - [p=n] for priority (1-5)\n - -> for sequential operations\n - + for parallel tasks\n - | for alternatives\n\n[Translation Process]\n1. Structure:\n - Start with model selection: ↹ model.{name}\n - Add format specification: ⊕ format(json)\n - Group related operations with []\n - Separate major sections with blank lines\n\n2. Data Sources:\n - Convert datasets to •name format\n - Link related data with :\n - Use ⊕ to merge multiple sources\n - Add ^t{timeframe} for temporal data\n\n3. Tasks:\n - Convert objectives to task glyphs\n - Add priority levels based on impact\n - Chain dependent operations with ->\n - Group parallel tasks with +\n - Use ? for validation steps\n\n4. Optimization:\n - Remove articles (a, an, the)\n - Convert verbose phrases to symbols\n - Use abbreviations (e.g., cfg, eval, impl)\n - Maintain semantic relationships\n - Group similar operations\n - Chain related analyses\n\n"
4
+ category = "prompt"
5
+
6
+ [metadata]
7
+ title = "Synthlang Translator System Base Prompt"
8
+ domain = "Symbolic Compression"
9
+ version = "1.0"
10
+ overview = "Base synthlang system prompt"
11
+ lion_class = "lionagi.tools.base.Prompt"
@@ -0,0 +1,136 @@
1
+ from timeit import default_timer as timer
2
+ from typing import Literal
3
+
4
+ from lionagi.service.imodel import iModel
5
+ from lionagi.session.branch import Branch
6
+
7
+ from ..base import TokenMapping, TokenMappingTemplate
8
+ from .base import SynthlangFramework, SynthlangTemplate
9
+
10
+ FRAMEWORK_OPTIONS = SynthlangFramework.load_framework_options()
11
+ FRAMEWORK_CHOICES = Literal["math", "optim", "custom_algebra"]
12
+
13
+
14
+ async def translate_to_synthlang(
15
+ text: str,
16
+ /,
17
+ chat_model: iModel = None,
18
+ framework_template: (
19
+ SynthlangTemplate | SynthlangFramework
20
+ ) = SynthlangTemplate.REFLECTIVE_PATTERNS,
21
+ framework_options: list[FRAMEWORK_CHOICES] = None,
22
+ compress: bool = False,
23
+ compress_model: iModel = None,
24
+ compression_ratio: float = 0.2,
25
+ compress_kwargs=None,
26
+ encode_token_map: TokenMappingTemplate | dict | TokenMapping = None,
27
+ num_encodings: int = 3,
28
+ encode_output: bool = False,
29
+ num_output_encodings: int = None,
30
+ verbose: bool = True,
31
+ branch: Branch = None,
32
+ additional_text: str = "",
33
+ **kwargs,
34
+ ):
35
+ start = timer()
36
+ if encode_output and num_output_encodings is None:
37
+ num_output_encodings = 1
38
+
39
+ if encode_token_map is not None:
40
+ if not isinstance(num_encodings, int) or num_encodings < 1:
41
+ raise ValueError(
42
+ "num_encodings must be at least 1 if encode_token_map is provided"
43
+ )
44
+ if isinstance(encode_token_map, TokenMappingTemplate | str):
45
+ encode_token_map = TokenMapping.load_from_template(
46
+ encode_token_map
47
+ )
48
+ encode_token_map = encode_token_map.content
49
+ if not isinstance(encode_token_map, dict):
50
+ raise ValueError(
51
+ "encode_token_map must be a dict or TokenMappingTemplate"
52
+ )
53
+ for _ in range(num_encodings):
54
+ text = "\n".join(
55
+ [str(i).strip() for i in text.split("\n") if i.strip()]
56
+ )
57
+ for k, v in encode_token_map.items():
58
+ text = text.replace(k, v)
59
+ text = text.strip()
60
+ additional_text += (
61
+ f"\nthesaurus, lexicon, glossary:\n{encode_token_map}"
62
+ )
63
+
64
+ if not isinstance(framework_template, SynthlangFramework):
65
+ framework_template = SynthlangFramework.load_from_template(
66
+ framework_template
67
+ )
68
+
69
+ final_prompt = framework_template.create_system_prompt(
70
+ framework_options, additional_text
71
+ )
72
+
73
+ if compress:
74
+ from ..perplexity import compress_text
75
+
76
+ text = await compress_text(
77
+ text,
78
+ chat_model=compress_model or chat_model,
79
+ compression_ratio=compression_ratio,
80
+ verbose=verbose,
81
+ **(compress_kwargs or {}),
82
+ )
83
+
84
+ sys1 = None
85
+ sys2 = final_prompt
86
+ if branch and branch.system:
87
+ sys1 = branch.system
88
+ branch.msgs.add_message(system=sys2)
89
+
90
+ else:
91
+ branch = Branch(system=final_prompt, chat_model=chat_model)
92
+
93
+ from lionagi.service.endpoints.token_calculator import TokenCalculator
94
+
95
+ calculator = TokenCalculator()
96
+
97
+ len_tokens = calculator.tokenize(text, return_tokens=False)
98
+
99
+ kwargs["guidance"] = (
100
+ "Following SynthLang, translate the provided text into SynthLang syntax. "
101
+ "Reasonably shrink the token size by 50-80%. Return only the translated text"
102
+ "enclosed by ```synthlang...```. "
103
+ "\n\n" + kwargs.get("guidance", "")
104
+ )
105
+
106
+ out = await branch.chat(
107
+ instruction=f"Converts the given text into SynthLang's hyper-efficient format.",
108
+ context="Text to convert:\n\n" + text,
109
+ chat_model=chat_model or branch.chat_model,
110
+ **kwargs,
111
+ )
112
+ out = str(out).strip()
113
+
114
+ if encode_output:
115
+ if isinstance(num_output_encodings, int) and num_output_encodings > 0:
116
+ for _ in range(num_output_encodings):
117
+ out = "\n".join(
118
+ [str(i).strip() for i in out.split("\n") if i.strip()]
119
+ )
120
+ for k, v in encode_token_map.items():
121
+ out = out.replace(k, v).strip()
122
+
123
+ if sys1:
124
+ branch.msgs.add_message(system=sys1)
125
+
126
+ len_ = calculator.tokenize(out, return_tokens=False)
127
+ if verbose:
128
+ msg = "------------------------------------------\n"
129
+ msg += f"Compression Method: SynthLang\n"
130
+ msg += f"Compressed Token number: {len_}\n"
131
+ msg += f"Token Compression Ratio: {len_ / len_tokens:.1%}\n"
132
+ msg += f"Compression Time: {timer() - start:.03f} seconds\n"
133
+ msg += f"Compression Model: {branch.chat_model.model_name}\n"
134
+ print(msg)
135
+
136
+ return out
@@ -0,0 +1,15 @@
1
+ from .base import TokenMapping, TokenMappingTemplate
2
+ from .perplexity import compress_text
3
+ from .symbolic_compress_context import symbolic_compress_context
4
+ from .synthlang_.base import SynthlangFramework, SynthlangTemplate
5
+ from .synthlang_.translate_to_synthlang import translate_to_synthlang
6
+
7
+ __all__ = (
8
+ "translate_to_synthlang",
9
+ "SynthlangFramework",
10
+ "SynthlangTemplate",
11
+ "TokenMapping",
12
+ "TokenMappingTemplate",
13
+ "compress_text",
14
+ "symbolic_compress_context",
15
+ )
@@ -0,0 +1,204 @@
1
+ """
2
+ Implements two adapters:
3
+ - `TomlAdapter` for in-memory TOML strings
4
+ - `TomlFileAdapter` for reading/writing TOML files
5
+ """
6
+
7
+ import logging
8
+ from pathlib import Path
9
+
10
+ import toml
11
+
12
+ from lionagi.protocols._concepts import Collective
13
+
14
+ from .adapter import Adapter, T
15
+
16
+
17
+ class TomlAdapter(Adapter):
18
+ """
19
+ Adapter that converts to/from TOML **strings** in memory.
20
+ Example usage: taking a Python dictionary and making TOML,
21
+ or parsing TOML string to a dict.
22
+ """
23
+
24
+ obj_key = "toml"
25
+
26
+ @classmethod
27
+ def from_obj(
28
+ cls,
29
+ subj_cls: type[T],
30
+ obj: str,
31
+ /,
32
+ *,
33
+ many: bool = False,
34
+ **kwargs,
35
+ ) -> dict | list[dict]:
36
+ """
37
+ Convert a TOML string into a dict or list of dicts.
38
+
39
+ Parameters
40
+ ----------
41
+ subj_cls : type[T]
42
+ The target class for context (not always used).
43
+ obj : str
44
+ The TOML string.
45
+ many : bool, optional
46
+ If True, expects a TOML array of tables (returns list[dict]).
47
+ Otherwise returns a single dict.
48
+ **kwargs
49
+ Extra arguments for toml.loads().
50
+
51
+ Returns
52
+ -------
53
+ dict | list[dict]
54
+ The loaded TOML data.
55
+ """
56
+ result = toml.loads(obj, **kwargs)
57
+
58
+ # Handle array of tables in TOML for "many" case
59
+ if many:
60
+ # Check if there's a top-level array key that might hold multiple items
61
+ for key, value in result.items():
62
+ if isinstance(value, list) and all(
63
+ isinstance(item, dict) for item in value
64
+ ):
65
+ return value
66
+ # If no array of tables found, wrap the result in a list
67
+ return [result]
68
+
69
+ return result
70
+
71
+ @classmethod
72
+ def to_obj(
73
+ cls,
74
+ subj: T,
75
+ *,
76
+ many: bool = False,
77
+ **kwargs,
78
+ ) -> str:
79
+ """
80
+ Convert an object (or collection) to a TOML string.
81
+
82
+ Parameters
83
+ ----------
84
+ subj : T
85
+ The object to serialize.
86
+ many : bool, optional
87
+ If True, convert multiple items to a TOML array of tables.
88
+ **kwargs
89
+ Extra arguments for toml.dumps().
90
+
91
+ Returns
92
+ -------
93
+ str
94
+ The resulting TOML string.
95
+ """
96
+ if many:
97
+ if isinstance(subj, Collective):
98
+ # For multiple items, create a wrapper dict with an array of items
99
+ data = {"items": [i.to_dict() for i in subj]}
100
+ else:
101
+ data = {"items": [subj.to_dict()]}
102
+ return toml.dumps(data, **kwargs)
103
+
104
+ return toml.dumps(subj.to_dict(), **kwargs)
105
+
106
+
107
+ class TomlFileAdapter(Adapter):
108
+ """
109
+ Adapter that reads/writes TOML data to/from a file on disk.
110
+ The file extension key is ".toml".
111
+ """
112
+
113
+ obj_key = ".toml"
114
+
115
+ @classmethod
116
+ def from_obj(
117
+ cls,
118
+ subj_cls: type[T],
119
+ obj: str | Path,
120
+ /,
121
+ *,
122
+ many: bool = False,
123
+ **kwargs,
124
+ ) -> dict | list[dict]:
125
+ """
126
+ Read a TOML file from disk and return a dict or list of dicts.
127
+
128
+ Parameters
129
+ ----------
130
+ subj_cls : type[T]
131
+ The target class for context.
132
+ obj : str | Path
133
+ The TOML file path.
134
+ many : bool
135
+ If True, expects an array of tables. Otherwise single dict.
136
+ **kwargs
137
+ Extra arguments for toml.load().
138
+
139
+ Returns
140
+ -------
141
+ dict | list[dict]
142
+ The loaded data from file.
143
+ """
144
+ with open(obj, "r", encoding="utf-8") as f:
145
+ result = toml.load(f, **kwargs)
146
+
147
+ # Handle array of tables in TOML for "many" case
148
+ if many:
149
+ # Check if there's a top-level array key that might hold multiple items
150
+ for key, value in result.items():
151
+ if isinstance(value, list) and all(
152
+ isinstance(item, dict) for item in value
153
+ ):
154
+ return value
155
+ # If no array of tables found, wrap the result in a list
156
+ return [result]
157
+
158
+ return result
159
+
160
+ @classmethod
161
+ def to_obj(
162
+ cls,
163
+ subj: T,
164
+ /,
165
+ *,
166
+ fp: str | Path,
167
+ many: bool = False,
168
+ mode: str = "w",
169
+ **kwargs,
170
+ ) -> None:
171
+ """
172
+ Write a dict (or list) to a TOML file.
173
+
174
+ Parameters
175
+ ----------
176
+ subj : T
177
+ The object/collection to serialize.
178
+ fp : str | Path
179
+ The file path to write.
180
+ many : bool
181
+ If True, write as a TOML array of tables of multiple items.
182
+ mode : str
183
+ File open mode, defaults to write ("w").
184
+ **kwargs
185
+ Extra arguments for toml.dump().
186
+
187
+ Returns
188
+ -------
189
+ None
190
+ """
191
+ with open(fp, mode, encoding="utf-8") as f:
192
+ if many:
193
+ if isinstance(subj, Collective):
194
+ # TOML requires arrays of tables to be in a table
195
+ data = {"items": [i.to_dict() for i in subj]}
196
+ else:
197
+ data = {"items": [subj.to_dict()]}
198
+ toml.dump(data, f, **kwargs)
199
+ else:
200
+ toml.dump(subj.to_dict(), f, **kwargs)
201
+ logging.info(f"TOML data saved to {fp}")
202
+
203
+
204
+ # File: lionagi/protocols/adapters/toml_adapter.py
@@ -4,6 +4,7 @@ from .pandas_.csv_adapter import CSVFileAdapter
4
4
  from .pandas_.excel_adapter import ExcelFileAdapter
5
5
  from .pandas_.pd_dataframe_adapter import PandasDataFrameAdapter
6
6
  from .pandas_.pd_series_adapter import PandasSeriesAdapter
7
+ from .toml_adapter import TomlAdapter, TomlFileAdapter
7
8
 
8
9
  __all__ = (
9
10
  "Adapter",
@@ -15,4 +16,6 @@ __all__ = (
15
16
  "PandasSeriesAdapter",
16
17
  "PandasDataFrameAdapter",
17
18
  "ExcelFileAdapter",
19
+ "TomlAdapter",
20
+ "TomlFileAdapter",
18
21
  )
@@ -244,6 +244,15 @@ class Element(BaseModel, Observable):
244
244
  """
245
245
  return str(val)
246
246
 
247
+ @property
248
+ def created_datetime(self) -> datetime:
249
+ """Returns the creation time as a datetime object.
250
+
251
+ Returns:
252
+ datetime: The creation time in UTC.
253
+ """
254
+ return datetime.fromtimestamp(self.created_at)
255
+
247
256
  def __eq__(self, other: Any) -> bool:
248
257
  """Compares two Element instances by their ID.
249
258
 
@@ -13,12 +13,15 @@ from .._concepts import Relational
13
13
  from ..adapters.adapter import AdapterRegistry
14
14
  from ..adapters.json_adapter import JsonAdapter, JsonFileAdapter
15
15
  from ..adapters.pandas_.pd_series_adapter import PandasSeriesAdapter
16
+ from ..adapters.toml_adapter import TomlAdapter, TomlFileAdapter
16
17
  from ..generic.element import Element
17
18
 
18
19
  NODE_DEFAULT_ADAPTERS = (
19
20
  JsonAdapter,
20
21
  JsonFileAdapter,
21
22
  PandasSeriesAdapter,
23
+ TomlAdapter,
24
+ TomlFileAdapter,
22
25
  )
23
26
 
24
27
 
@@ -145,7 +145,9 @@ class TokenCalculator:
145
145
  /,
146
146
  encoding_name: str | None = None,
147
147
  tokenizer: Callable | None = None,
148
+ decoder: Callable | None = None,
148
149
  return_tokens: bool = False,
150
+ return_decoded: bool = False,
149
151
  ) -> int | list[int]:
150
152
 
151
153
  if not s_:
@@ -154,8 +156,14 @@ class TokenCalculator:
154
156
  if not callable(tokenizer):
155
157
  encoding_name = get_encoding_name(encoding_name)
156
158
  tokenizer = tiktoken.get_encoding(encoding_name).encode
159
+ if not callable(decoder):
160
+ decoder = tiktoken.get_encoding(encoding_name).decode
161
+
157
162
  try:
158
163
  if return_tokens:
164
+ if return_decoded:
165
+ a = tokenizer(s_)
166
+ return len(a), decoder(a)
159
167
  return tokenizer(s_)
160
168
  return len(tokenizer(s_))
161
169
  except Exception:
lionagi/service/imodel.py CHANGED
@@ -398,23 +398,24 @@ class iModel:
398
398
  async def compress_text(
399
399
  self,
400
400
  text: str,
401
- system_msg: str = None,
402
- target_ratio: float = 0.2,
401
+ system: str = None,
402
+ compression_ratio: float = 0.2,
403
403
  n_samples: int = 5,
404
404
  max_tokens_per_sample=80,
405
405
  verbose=True,
406
+ initial_text=None,
407
+ cumulative=False,
408
+ split_kwargs=None,
409
+ min_pplx=None,
410
+ **kwargs,
406
411
  ) -> str:
407
412
  """
408
413
  Convenience function that instantiates LLMCompressor and compresses text.
409
414
  """
410
- from lionagi.libs.token_transform.perplexity import LLMCompressor
411
-
412
- compressor = LLMCompressor(
413
- chat_model=self,
414
- system_msg=system_msg,
415
- target_ratio=target_ratio,
416
- n_samples=n_samples,
417
- max_tokens_per_sample=max_tokens_per_sample,
418
- verbose=verbose,
419
- )
420
- return await compressor.compress(text)
415
+ from lionagi.libs.token_transform.perplexity import compress_text
416
+
417
+ params = {
418
+ k: v for k, v in locals().items() if k not in ("self", "kwargs")
419
+ }
420
+ params.update(kwargs)
421
+ return await compress_text(**params)
lionagi/session/branch.py CHANGED
@@ -1503,8 +1503,8 @@ class Branch(Element, Communicatable, Relational):
1503
1503
  async def compress(
1504
1504
  self,
1505
1505
  text: str,
1506
- system_msg: str = None,
1507
- target_ratio: float = 0.2,
1506
+ system: str = None,
1507
+ compression_ratio: float = 0.2,
1508
1508
  n_samples: int = 5,
1509
1509
  max_tokens_per_sample=80,
1510
1510
  verbose=True,
@@ -1515,9 +1515,9 @@ class Branch(Element, Communicatable, Relational):
1515
1515
  Args:
1516
1516
  text (str):
1517
1517
  The text to compress.
1518
- system_msg (str, optional):
1518
+ system (str, optional):
1519
1519
  System-level instructions, appended to the prompt.
1520
- target_ratio (float):
1520
+ compression_ratio (float):
1521
1521
  Desired compression ratio (0.0-1.0).
1522
1522
  n_samples (int):
1523
1523
  How many compression attempts to combine or evaluate.
@@ -1531,8 +1531,8 @@ class Branch(Element, Communicatable, Relational):
1531
1531
  """
1532
1532
  return await self.chat_model.compress_text(
1533
1533
  text=text,
1534
- system_msg=system_msg,
1535
- target_ratio=target_ratio,
1534
+ system=system,
1535
+ compression_ratio=compression_ratio,
1536
1536
  n_samples=n_samples,
1537
1537
  max_tokens_per_sample=max_tokens_per_sample,
1538
1538
  verbose=verbose,