lionagi 0.16.1__py3-none-any.whl → 0.16.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. lionagi/adapters/_utils.py +0 -14
  2. lionagi/libs/file/save.py +8 -1
  3. lionagi/ln/__init__.py +10 -0
  4. lionagi/ln/_json_dump.py +322 -49
  5. lionagi/ln/fuzzy/__init__.py +4 -1
  6. lionagi/ln/fuzzy/_fuzzy_validate.py +109 -0
  7. lionagi/ln/fuzzy/_to_dict.py +388 -0
  8. lionagi/models/__init__.py +0 -2
  9. lionagi/operations/brainstorm/brainstorm.py +10 -10
  10. lionagi/operations/communicate/communicate.py +1 -1
  11. lionagi/operations/parse/parse.py +1 -1
  12. lionagi/protocols/generic/element.py +5 -14
  13. lionagi/protocols/generic/log.py +2 -2
  14. lionagi/protocols/generic/pile.py +1 -1
  15. lionagi/protocols/messages/message.py +8 -1
  16. lionagi/protocols/operatives/operative.py +2 -2
  17. lionagi/service/connections/endpoint.py +7 -0
  18. lionagi/service/connections/match_endpoint.py +2 -10
  19. lionagi/service/connections/providers/types.py +1 -3
  20. lionagi/service/hooks/hook_event.py +1 -1
  21. lionagi/service/hooks/hook_registry.py +1 -1
  22. lionagi/service/rate_limited_processor.py +1 -1
  23. lionagi/session/branch.py +1 -101
  24. lionagi/session/session.py +9 -14
  25. lionagi/utils.py +3 -334
  26. lionagi/version.py +1 -1
  27. {lionagi-0.16.1.dist-info → lionagi-0.16.3.dist-info}/METADATA +3 -13
  28. {lionagi-0.16.1.dist-info → lionagi-0.16.3.dist-info}/RECORD +30 -78
  29. lionagi/adapters/postgres_model_adapter.py +0 -131
  30. lionagi/libs/concurrency.py +0 -1
  31. lionagi/libs/file/params.py +0 -175
  32. lionagi/libs/nested/__init__.py +0 -3
  33. lionagi/libs/nested/flatten.py +0 -172
  34. lionagi/libs/nested/nfilter.py +0 -59
  35. lionagi/libs/nested/nget.py +0 -45
  36. lionagi/libs/nested/ninsert.py +0 -104
  37. lionagi/libs/nested/nmerge.py +0 -158
  38. lionagi/libs/nested/npop.py +0 -69
  39. lionagi/libs/nested/nset.py +0 -94
  40. lionagi/libs/nested/unflatten.py +0 -83
  41. lionagi/libs/nested/utils.py +0 -189
  42. lionagi/libs/parse.py +0 -31
  43. lionagi/libs/schema/json_schema.py +0 -231
  44. lionagi/libs/token_transform/__init__.py +0 -0
  45. lionagi/libs/token_transform/base.py +0 -54
  46. lionagi/libs/token_transform/llmlingua.py +0 -1
  47. lionagi/libs/token_transform/perplexity.py +0 -450
  48. lionagi/libs/token_transform/symbolic_compress_context.py +0 -152
  49. lionagi/libs/token_transform/synthlang.py +0 -9
  50. lionagi/libs/token_transform/synthlang_/base.py +0 -128
  51. lionagi/libs/token_transform/synthlang_/resources/frameworks/abstract_algebra.toml +0 -11
  52. lionagi/libs/token_transform/synthlang_/resources/frameworks/category_theory.toml +0 -11
  53. lionagi/libs/token_transform/synthlang_/resources/frameworks/complex_analysis.toml +0 -11
  54. lionagi/libs/token_transform/synthlang_/resources/frameworks/framework_options.json +0 -52
  55. lionagi/libs/token_transform/synthlang_/resources/frameworks/group_theory.toml +0 -11
  56. lionagi/libs/token_transform/synthlang_/resources/frameworks/math_logic.toml +0 -11
  57. lionagi/libs/token_transform/synthlang_/resources/frameworks/reflective_patterns.toml +0 -11
  58. lionagi/libs/token_transform/synthlang_/resources/frameworks/set_theory.toml +0 -11
  59. lionagi/libs/token_transform/synthlang_/resources/frameworks/topology_fundamentals.toml +0 -11
  60. lionagi/libs/token_transform/synthlang_/resources/mapping/lion_emoji_mapping.toml +0 -61
  61. lionagi/libs/token_transform/synthlang_/resources/mapping/python_math_mapping.toml +0 -41
  62. lionagi/libs/token_transform/synthlang_/resources/mapping/rust_chinese_mapping.toml +0 -60
  63. lionagi/libs/token_transform/synthlang_/resources/utility/base_synthlang_system_prompt.toml +0 -11
  64. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +0 -140
  65. lionagi/libs/token_transform/types.py +0 -15
  66. lionagi/libs/unstructured/__init__.py +0 -0
  67. lionagi/libs/unstructured/pdf_to_image.py +0 -45
  68. lionagi/libs/unstructured/read_image_to_base64.py +0 -33
  69. lionagi/libs/validate/fuzzy_match_keys.py +0 -7
  70. lionagi/libs/validate/fuzzy_validate_mapping.py +0 -144
  71. lionagi/libs/validate/string_similarity.py +0 -7
  72. lionagi/libs/validate/xml_parser.py +0 -203
  73. lionagi/models/note.py +0 -383
  74. lionagi/operations/translate/__init__.py +0 -0
  75. lionagi/operations/translate/translate.py +0 -47
  76. lionagi/service/connections/providers/claude_code_.py +0 -294
  77. lionagi/tools/memory/tools.py +0 -495
  78. {lionagi-0.16.1.dist-info → lionagi-0.16.3.dist-info}/WHEEL +0 -0
  79. {lionagi-0.16.1.dist-info → lionagi-0.16.3.dist-info}/licenses/LICENSE +0 -0
@@ -1,495 +0,0 @@
1
- """
2
- Memory Tools - Proper lionagi tool implementation following reader pattern
3
- """
4
-
5
- from datetime import datetime, timezone
6
- from enum import Enum
7
- from typing import Any
8
-
9
- from pydantic import BaseModel, Field, model_validator
10
-
11
- from lionagi.libs.validate.to_num import to_num
12
- from lionagi.protocols.action.tool import Tool
13
- from lionagi.tools.base import LionTool
14
-
15
-
16
- class MemoryAction(str, Enum):
17
- """
18
- Memory action types following reader pattern.
19
- - 'store': Store new memory with embeddings and metadata
20
- - 'recall': Retrieve memories based on semantic similarity
21
- - 'search': Search memories using multiple criteria
22
- - 'relate': Create relationships between memories
23
- - 'explore': Deep multi-step memory exploration
24
- - 'synthesize': Combine multiple memories into insights
25
- """
26
-
27
- store = "store"
28
- recall = "recall"
29
- search = "search"
30
- relate = "relate"
31
- explore = "explore"
32
- synthesize = "synthesize"
33
-
34
-
35
- class MemoryLayer(str, Enum):
36
- """Memory storage layers"""
37
-
38
- static = "static" # File-based persistent memory
39
- temporal = "temporal" # Time-based conversational memory
40
- experience = "experience" # High-value insights and patterns
41
-
42
-
43
- class MemoryRequest(BaseModel):
44
- """
45
- Request model for MemoryTool following reader pattern.
46
- Supports multiple memory operations with unified interface.
47
- """
48
-
49
- action: MemoryAction = Field(
50
- ...,
51
- description=(
52
- "Memory action to perform. Must be one of: "
53
- "- 'store': Save new memory with automatic embedding and indexing. "
54
- "- 'recall': Retrieve semantically similar memories. "
55
- "- 'search': Advanced search with multiple criteria. "
56
- "- 'relate': Create relationships between memories. "
57
- "- 'explore': Deep exploration with multiple strategies. "
58
- "- 'synthesize': Combine memories into new insights."
59
- ),
60
- )
61
-
62
- # Store action fields
63
- content: str | None = Field(
64
- None,
65
- description=(
66
- "Content to store. REQUIRED if action='store'. For other actions, leave it None."
67
- ),
68
- )
69
-
70
- tags: list[str] | None = Field(
71
- None,
72
- description=(
73
- "Tags for categorization. Used with action='store'. "
74
- "Can also be used to filter in 'search' and 'recall'."
75
- ),
76
- )
77
-
78
- importance: float | None = Field(
79
- 0.5,
80
- description=(
81
- "Importance score (0.0-1.0) for prioritization. "
82
- "Used with action='store'. Higher scores are retained longer."
83
- ),
84
- )
85
-
86
- layer: MemoryLayer | None = Field(
87
- MemoryLayer.temporal,
88
- description=(
89
- "Memory layer for storage/search. "
90
- "static: persistent files, temporal: conversations, experience: insights"
91
- ),
92
- )
93
-
94
- # Recall/Search fields
95
- query: str | None = Field(
96
- None,
97
- description=(
98
- "Query text for semantic search. REQUIRED for actions: 'recall', 'search', 'explore'."
99
- ),
100
- )
101
-
102
- limit: int | None = Field(
103
- 5,
104
- description="Maximum number of results to return. Default is 5.",
105
- )
106
-
107
- threshold: float | None = Field(
108
- 0.7,
109
- description=(
110
- "Similarity threshold (0.0-1.0) for filtering results. "
111
- "Only memories above this threshold are returned."
112
- ),
113
- )
114
-
115
- # Relate action fields
116
- source_id: str | None = Field(
117
- None,
118
- description="Source memory ID for creating relationships. REQUIRED if action='relate'.",
119
- )
120
-
121
- target_id: str | None = Field(
122
- None,
123
- description="Target memory ID for creating relationships. REQUIRED if action='relate'.",
124
- )
125
-
126
- relationship: str | None = Field(
127
- None,
128
- description=(
129
- "Type of relationship (e.g., 'relates_to', 'contradicts', 'supports'). "
130
- "REQUIRED if action='relate'."
131
- ),
132
- )
133
-
134
- # Explore action fields
135
- depth: int | None = Field(
136
- 3,
137
- description="Exploration depth for multi-step exploration. Default is 3.",
138
- )
139
-
140
- strategies: list[str] | None = Field(
141
- None,
142
- description=(
143
- "Exploration strategies to use: 'semantic', 'temporal', 'relational', 'contextual'. "
144
- "If None, uses appropriate defaults."
145
- ),
146
- )
147
-
148
- # Synthesize action fields
149
- memory_ids: list[str] | None = Field(
150
- None,
151
- description="List of memory IDs to synthesize. Used with action='synthesize'.",
152
- )
153
-
154
- synthesis_mode: str | None = Field(
155
- "intelligent",
156
- description=(
157
- "Synthesis complexity: 'simple' (aggregation), "
158
- "'intelligent' (AI-powered), 'deep' (multi-step reasoning)"
159
- ),
160
- )
161
-
162
- @model_validator(mode="before")
163
- def _validate_request(cls, values):
164
- """Clean up empty dicts and validate numeric fields"""
165
- for k, v in values.items():
166
- if v == {}:
167
- values[k] = None
168
- if k in ["limit", "depth"]:
169
- try:
170
- values[k] = to_num(v, num_type=int)
171
- except ValueError:
172
- values[k] = None
173
- if k in ["importance", "threshold"]:
174
- try:
175
- values[k] = to_num(v, num_type=float)
176
- except ValueError:
177
- values[k] = None
178
- return values
179
-
180
-
181
- class MemoryInfo(BaseModel):
182
- """Information about a stored memory"""
183
-
184
- memory_id: str
185
- timestamp: str
186
- layer: MemoryLayer
187
- tags: list[str] = Field(default_factory=list)
188
- importance: float
189
- token_count: int | None = None
190
-
191
-
192
- class MemoryMatch(BaseModel):
193
- """A memory match from recall/search operations"""
194
-
195
- memory_id: str
196
- content: str
197
- similarity: float
198
- timestamp: str
199
- tags: list[str] = Field(default_factory=list)
200
- metadata: dict[str, Any] = Field(default_factory=dict)
201
-
202
-
203
- class ExplorationResult(BaseModel):
204
- """Result from memory exploration"""
205
-
206
- query: str
207
- depth_reached: int
208
- strategies_used: list[str]
209
- insights: list[dict[str, Any]]
210
- connections_found: int
211
- processing_time: float
212
-
213
-
214
- class SynthesisResult(BaseModel):
215
- """Result from memory synthesis"""
216
-
217
- synthesis_id: str
218
- source_memories: list[str]
219
- synthesis_content: str
220
- confidence: float
221
- insights: list[str]
222
-
223
-
224
- class MemoryResponse(BaseModel):
225
- """
226
- Response from MemoryTool following reader pattern.
227
- Different fields are populated based on the action performed.
228
- """
229
-
230
- success: bool = Field(
231
- ...,
232
- description="Indicates if the requested action was performed successfully.",
233
- )
234
-
235
- error: str | None = Field(
236
- None,
237
- description="Describes any error that occurred, if success=False.",
238
- )
239
-
240
- # Store response
241
- memory_info: MemoryInfo | None = Field(
242
- None,
243
- description="Populated when action='store' succeeds, with memory ID and metadata.",
244
- )
245
-
246
- # Recall/Search response
247
- matches: list[MemoryMatch] | None = Field(
248
- None,
249
- description="Populated for recall/search actions, containing matching memories.",
250
- )
251
-
252
- # Exploration response
253
- exploration: ExplorationResult | None = Field(
254
- None,
255
- description="Populated when action='explore' succeeds, with deep insights.",
256
- )
257
-
258
- # Synthesis response
259
- synthesis: SynthesisResult | None = Field(
260
- None,
261
- description="Populated when action='synthesize' succeeds, with combined insights.",
262
- )
263
-
264
- # Relationship response
265
- relationship_created: bool | None = Field(
266
- None,
267
- description="Indicates if relationship was created successfully.",
268
- )
269
-
270
-
271
- class MemoryTool(LionTool):
272
- """
273
- Memory tool following lionagi's reader pattern.
274
- Provides unified interface for all memory operations.
275
- """
276
-
277
- is_lion_system_tool = True
278
- system_tool_name = "memory_tool"
279
-
280
- def __init__(self, memory_backend=None):
281
- super().__init__()
282
- self.backend = memory_backend
283
- self._tool = None
284
-
285
- # Memory caches
286
- self._embedding_cache = {}
287
- self._search_cache = {}
288
-
289
- def handle_request(self, request: MemoryRequest) -> MemoryResponse:
290
- """
291
- Handle memory requests based on action type.
292
- Routes to appropriate handler method.
293
- """
294
- if isinstance(request, dict):
295
- request = MemoryRequest(**request)
296
-
297
- try:
298
- if request.action == "store":
299
- return self._store_memory(request)
300
- elif request.action == "recall":
301
- return self._recall_memories(request)
302
- elif request.action == "search":
303
- return self._search_memories(request)
304
- elif request.action == "relate":
305
- return self._relate_memories(request)
306
- elif request.action == "explore":
307
- return self._explore_memories(request)
308
- elif request.action == "synthesize":
309
- return self._synthesize_memories(request)
310
- else:
311
- return MemoryResponse(
312
- success=False,
313
- error=f"Unknown action type: {request.action}",
314
- )
315
- except Exception as e:
316
- return MemoryResponse(
317
- success=False, error=f"Memory operation failed: {str(e)}"
318
- )
319
-
320
- def _store_memory(self, request: MemoryRequest) -> MemoryResponse:
321
- """Store new memory with embedding and metadata"""
322
- if not request.content:
323
- return MemoryResponse(
324
- success=False, error="Content is required for store action"
325
- )
326
-
327
- # Generate embedding
328
- embedding = self._get_embedding(request.content)
329
-
330
- # Create memory ID
331
- memory_id = f"MEM_{int(datetime.now(timezone.utc).timestamp())}_{hash(request.content) % 10000}"
332
-
333
- # Store in backend
334
- if self.backend:
335
- self.backend.store(
336
- {
337
- "memory_id": memory_id,
338
- "content": request.content,
339
- "embedding": embedding,
340
- "tags": request.tags or [],
341
- "importance": request.importance,
342
- "layer": request.layer,
343
- "timestamp": datetime.now(timezone.utc).isoformat(),
344
- }
345
- )
346
-
347
- return MemoryResponse(
348
- success=True,
349
- memory_info=MemoryInfo(
350
- memory_id=memory_id,
351
- timestamp=datetime.now(timezone.utc).isoformat(),
352
- layer=request.layer,
353
- tags=request.tags or [],
354
- importance=request.importance,
355
- token_count=len(
356
- request.content.split()
357
- ), # Simple token estimate
358
- ),
359
- )
360
-
361
- def _recall_memories(self, request: MemoryRequest) -> MemoryResponse:
362
- """Recall memories based on semantic similarity"""
363
- if not request.query:
364
- return MemoryResponse(
365
- success=False, error="Query is required for recall action"
366
- )
367
-
368
- # Get query embedding
369
- query_embedding = self._get_embedding(request.query)
370
-
371
- # Search in backend
372
- if self.backend:
373
- matches = self.backend.search_similar(
374
- query_embedding,
375
- limit=request.limit,
376
- threshold=request.threshold,
377
- layer=request.layer,
378
- tags=request.tags,
379
- )
380
- else:
381
- # Mock response for testing
382
- matches = [
383
- MemoryMatch(
384
- memory_id=f"MEM_TEST_{i}",
385
- content=f"Test memory {i} related to: {request.query}",
386
- similarity=0.9 - (i * 0.1),
387
- timestamp=datetime.now(timezone.utc).isoformat(),
388
- tags=["test", "mock"],
389
- )
390
- for i in range(min(3, request.limit))
391
- ]
392
-
393
- return MemoryResponse(success=True, matches=matches)
394
-
395
- def _get_embedding(self, text: str) -> list[float]:
396
- """Get embedding for text (cached)"""
397
- if text in self._embedding_cache:
398
- return self._embedding_cache[text]
399
-
400
- # Generate embedding (mock for now)
401
- # In production, use actual embedding service
402
- import hashlib
403
-
404
- text_hash = hashlib.md5(text.encode()).hexdigest()
405
- embedding = [
406
- float(int(text_hash[i : i + 2], 16)) / 255.0
407
- for i in range(0, 32, 2)
408
- ]
409
-
410
- # Cache and return
411
- self._embedding_cache[text] = embedding
412
- return embedding
413
-
414
- def to_tool(self):
415
- """Convert to lionagi Tool following reader pattern"""
416
- if self._tool is None:
417
-
418
- def memory_tool(**kwargs):
419
- """
420
- Unified memory tool for AI agents supporting:
421
- - store: Save memories with automatic embedding and indexing
422
- - recall: Retrieve semantically similar memories
423
- - search: Advanced multi-criteria memory search
424
- - relate: Create knowledge graph relationships
425
- - explore: Deep multi-strategy exploration
426
- - synthesize: Combine memories into insights
427
- """
428
- return self.handle_request(
429
- MemoryRequest(**kwargs)
430
- ).model_dump()
431
-
432
- if self.system_tool_name != "memory_tool":
433
- memory_tool.__name__ = self.system_tool_name
434
-
435
- self._tool = Tool(
436
- func_callable=memory_tool,
437
- request_options=MemoryRequest,
438
- )
439
-
440
- return self._tool
441
-
442
- # Placeholder implementations for other actions
443
- def _search_memories(self, request: MemoryRequest) -> MemoryResponse:
444
- """Advanced search with multiple criteria"""
445
- # Similar to recall but with more filters
446
- return self._recall_memories(request)
447
-
448
- def _relate_memories(self, request: MemoryRequest) -> MemoryResponse:
449
- """Create relationships between memories"""
450
- if not all(
451
- [request.source_id, request.target_id, request.relationship]
452
- ):
453
- return MemoryResponse(
454
- success=False,
455
- error="source_id, target_id, and relationship are required for relate action",
456
- )
457
-
458
- # Create relationship in backend
459
- if self.backend:
460
- self.backend.create_relationship(
461
- request.source_id, request.target_id, request.relationship
462
- )
463
-
464
- return MemoryResponse(success=True, relationship_created=True)
465
-
466
- def _explore_memories(self, request: MemoryRequest) -> MemoryResponse:
467
- """Deep multi-strategy exploration"""
468
- # Placeholder for complex exploration
469
- return MemoryResponse(
470
- success=True,
471
- exploration=ExplorationResult(
472
- query=request.query,
473
- depth_reached=request.depth,
474
- strategies_used=request.strategies or ["semantic"],
475
- insights=[
476
- {"type": "pattern", "content": "Exploration insight"}
477
- ],
478
- connections_found=5,
479
- processing_time=2.3,
480
- ),
481
- )
482
-
483
- def _synthesize_memories(self, request: MemoryRequest) -> MemoryResponse:
484
- """Synthesize multiple memories into insights"""
485
- # Placeholder for synthesis
486
- return MemoryResponse(
487
- success=True,
488
- synthesis=SynthesisResult(
489
- synthesis_id=f"SYN_{int(datetime.now(timezone.utc).timestamp())}",
490
- source_memories=request.memory_ids or [],
491
- synthesis_content="Synthesized insight from memories",
492
- confidence=0.85,
493
- insights=["Key insight 1", "Key insight 2"],
494
- ),
495
- )