lionagi 0.12.2__py3-none-any.whl → 0.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. lionagi/config.py +123 -0
  2. lionagi/fields/file.py +1 -1
  3. lionagi/fields/reason.py +1 -1
  4. lionagi/libs/file/concat.py +1 -6
  5. lionagi/libs/file/concat_files.py +1 -5
  6. lionagi/libs/file/save.py +1 -1
  7. lionagi/libs/package/imports.py +8 -177
  8. lionagi/libs/parse.py +30 -0
  9. lionagi/libs/schema/load_pydantic_model_from_schema.py +259 -0
  10. lionagi/libs/token_transform/perplexity.py +2 -4
  11. lionagi/libs/token_transform/synthlang_/resources/frameworks/framework_options.json +46 -46
  12. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +1 -1
  13. lionagi/operations/chat/chat.py +2 -2
  14. lionagi/operations/communicate/communicate.py +20 -5
  15. lionagi/operations/parse/parse.py +131 -43
  16. lionagi/protocols/generic/log.py +1 -2
  17. lionagi/protocols/generic/pile.py +18 -4
  18. lionagi/protocols/messages/assistant_response.py +20 -1
  19. lionagi/protocols/messages/templates/README.md +6 -10
  20. lionagi/service/connections/__init__.py +15 -0
  21. lionagi/service/connections/api_calling.py +230 -0
  22. lionagi/service/connections/endpoint.py +410 -0
  23. lionagi/service/connections/endpoint_config.py +137 -0
  24. lionagi/service/connections/header_factory.py +56 -0
  25. lionagi/service/connections/match_endpoint.py +49 -0
  26. lionagi/service/connections/providers/__init__.py +3 -0
  27. lionagi/service/connections/providers/anthropic_.py +87 -0
  28. lionagi/service/connections/providers/exa_.py +33 -0
  29. lionagi/service/connections/providers/oai_.py +166 -0
  30. lionagi/service/connections/providers/ollama_.py +122 -0
  31. lionagi/service/connections/providers/perplexity_.py +29 -0
  32. lionagi/service/imodel.py +36 -144
  33. lionagi/service/manager.py +1 -7
  34. lionagi/service/{endpoints/rate_limited_processor.py → rate_limited_processor.py} +4 -2
  35. lionagi/service/resilience.py +545 -0
  36. lionagi/service/third_party/README.md +71 -0
  37. lionagi/service/third_party/__init__.py +0 -0
  38. lionagi/service/third_party/anthropic_models.py +159 -0
  39. lionagi/service/third_party/exa_models.py +165 -0
  40. lionagi/service/third_party/openai_models.py +18241 -0
  41. lionagi/service/third_party/pplx_models.py +156 -0
  42. lionagi/service/types.py +5 -4
  43. lionagi/session/branch.py +12 -7
  44. lionagi/tools/file/reader.py +1 -1
  45. lionagi/tools/memory/tools.py +497 -0
  46. lionagi/utils.py +921 -123
  47. lionagi/version.py +1 -1
  48. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/METADATA +33 -16
  49. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/RECORD +53 -63
  50. lionagi/libs/file/create_path.py +0 -80
  51. lionagi/libs/file/file_util.py +0 -358
  52. lionagi/libs/parse/__init__.py +0 -3
  53. lionagi/libs/parse/fuzzy_parse_json.py +0 -117
  54. lionagi/libs/parse/to_dict.py +0 -336
  55. lionagi/libs/parse/to_json.py +0 -61
  56. lionagi/libs/parse/to_num.py +0 -378
  57. lionagi/libs/parse/to_xml.py +0 -57
  58. lionagi/libs/parse/xml_parser.py +0 -148
  59. lionagi/libs/schema/breakdown_pydantic_annotation.py +0 -48
  60. lionagi/service/endpoints/__init__.py +0 -3
  61. lionagi/service/endpoints/base.py +0 -706
  62. lionagi/service/endpoints/chat_completion.py +0 -116
  63. lionagi/service/endpoints/match_endpoint.py +0 -72
  64. lionagi/service/providers/__init__.py +0 -3
  65. lionagi/service/providers/anthropic_/__init__.py +0 -3
  66. lionagi/service/providers/anthropic_/messages.py +0 -99
  67. lionagi/service/providers/exa_/models.py +0 -3
  68. lionagi/service/providers/exa_/search.py +0 -80
  69. lionagi/service/providers/exa_/types.py +0 -7
  70. lionagi/service/providers/groq_/__init__.py +0 -3
  71. lionagi/service/providers/groq_/chat_completions.py +0 -56
  72. lionagi/service/providers/ollama_/__init__.py +0 -3
  73. lionagi/service/providers/ollama_/chat_completions.py +0 -134
  74. lionagi/service/providers/openai_/__init__.py +0 -3
  75. lionagi/service/providers/openai_/chat_completions.py +0 -101
  76. lionagi/service/providers/openai_/spec.py +0 -14
  77. lionagi/service/providers/openrouter_/__init__.py +0 -3
  78. lionagi/service/providers/openrouter_/chat_completions.py +0 -62
  79. lionagi/service/providers/perplexity_/__init__.py +0 -3
  80. lionagi/service/providers/perplexity_/chat_completions.py +0 -44
  81. lionagi/service/providers/perplexity_/models.py +0 -5
  82. lionagi/service/providers/types.py +0 -17
  83. /lionagi/{service/providers/exa_/__init__.py → py.typed} +0 -0
  84. /lionagi/service/{endpoints/token_calculator.py → token_calculator.py} +0 -0
  85. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/WHEEL +0 -0
  86. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,497 @@
1
+ """
2
+ Memory Tools - Proper lionagi tool implementation following reader pattern
3
+ """
4
+
5
+ from datetime import datetime, timezone
6
+ from enum import Enum
7
+ from typing import Any, Dict, List, Optional
8
+
9
+ from pydantic import BaseModel, Field, model_validator
10
+
11
+ from lionagi.protocols.action.tool import Tool
12
+ from lionagi.tools.base import LionTool
13
+ from lionagi.utils import to_num
14
+
15
+
16
+ class MemoryAction(str, Enum):
17
+ """
18
+ Memory action types following reader pattern.
19
+ - 'store': Store new memory with embeddings and metadata
20
+ - 'recall': Retrieve memories based on semantic similarity
21
+ - 'search': Search memories using multiple criteria
22
+ - 'relate': Create relationships between memories
23
+ - 'explore': Deep multi-step memory exploration
24
+ - 'synthesize': Combine multiple memories into insights
25
+ """
26
+
27
+ store = "store"
28
+ recall = "recall"
29
+ search = "search"
30
+ relate = "relate"
31
+ explore = "explore"
32
+ synthesize = "synthesize"
33
+
34
+
35
+ class MemoryLayer(str, Enum):
36
+ """Memory storage layers"""
37
+
38
+ static = "static" # File-based persistent memory
39
+ temporal = "temporal" # Time-based conversational memory
40
+ experience = "experience" # High-value insights and patterns
41
+
42
+
43
+ class MemoryRequest(BaseModel):
44
+ """
45
+ Request model for MemoryTool following reader pattern.
46
+ Supports multiple memory operations with unified interface.
47
+ """
48
+
49
+ action: MemoryAction = Field(
50
+ ...,
51
+ description=(
52
+ "Memory action to perform. Must be one of: "
53
+ "- 'store': Save new memory with automatic embedding and indexing. "
54
+ "- 'recall': Retrieve semantically similar memories. "
55
+ "- 'search': Advanced search with multiple criteria. "
56
+ "- 'relate': Create relationships between memories. "
57
+ "- 'explore': Deep exploration with multiple strategies. "
58
+ "- 'synthesize': Combine memories into new insights."
59
+ ),
60
+ )
61
+
62
+ # Store action fields
63
+ content: str | None = Field(
64
+ None,
65
+ description=(
66
+ "Content to store. REQUIRED if action='store'. "
67
+ "For other actions, leave it None."
68
+ ),
69
+ )
70
+
71
+ tags: list[str] | None = Field(
72
+ None,
73
+ description=(
74
+ "Tags for categorization. Used with action='store'. "
75
+ "Can also be used to filter in 'search' and 'recall'."
76
+ ),
77
+ )
78
+
79
+ importance: float | None = Field(
80
+ 0.5,
81
+ description=(
82
+ "Importance score (0.0-1.0) for prioritization. "
83
+ "Used with action='store'. Higher scores are retained longer."
84
+ ),
85
+ )
86
+
87
+ layer: MemoryLayer | None = Field(
88
+ MemoryLayer.temporal,
89
+ description=(
90
+ "Memory layer for storage/search. "
91
+ "static: persistent files, temporal: conversations, experience: insights"
92
+ ),
93
+ )
94
+
95
+ # Recall/Search fields
96
+ query: str | None = Field(
97
+ None,
98
+ description=(
99
+ "Query text for semantic search. "
100
+ "REQUIRED for actions: 'recall', 'search', 'explore'."
101
+ ),
102
+ )
103
+
104
+ limit: int | None = Field(
105
+ 5,
106
+ description="Maximum number of results to return. Default is 5.",
107
+ )
108
+
109
+ threshold: float | None = Field(
110
+ 0.7,
111
+ description=(
112
+ "Similarity threshold (0.0-1.0) for filtering results. "
113
+ "Only memories above this threshold are returned."
114
+ ),
115
+ )
116
+
117
+ # Relate action fields
118
+ source_id: str | None = Field(
119
+ None,
120
+ description="Source memory ID for creating relationships. REQUIRED if action='relate'.",
121
+ )
122
+
123
+ target_id: str | None = Field(
124
+ None,
125
+ description="Target memory ID for creating relationships. REQUIRED if action='relate'.",
126
+ )
127
+
128
+ relationship: str | None = Field(
129
+ None,
130
+ description=(
131
+ "Type of relationship (e.g., 'relates_to', 'contradicts', 'supports'). "
132
+ "REQUIRED if action='relate'."
133
+ ),
134
+ )
135
+
136
+ # Explore action fields
137
+ depth: int | None = Field(
138
+ 3,
139
+ description="Exploration depth for multi-step exploration. Default is 3.",
140
+ )
141
+
142
+ strategies: list[str] | None = Field(
143
+ None,
144
+ description=(
145
+ "Exploration strategies to use: 'semantic', 'temporal', 'relational', 'contextual'. "
146
+ "If None, uses appropriate defaults."
147
+ ),
148
+ )
149
+
150
+ # Synthesize action fields
151
+ memory_ids: list[str] | None = Field(
152
+ None,
153
+ description="List of memory IDs to synthesize. Used with action='synthesize'.",
154
+ )
155
+
156
+ synthesis_mode: str | None = Field(
157
+ "intelligent",
158
+ description=(
159
+ "Synthesis complexity: 'simple' (aggregation), "
160
+ "'intelligent' (AI-powered), 'deep' (multi-step reasoning)"
161
+ ),
162
+ )
163
+
164
+ @model_validator(mode="before")
165
+ def _validate_request(cls, values):
166
+ """Clean up empty dicts and validate numeric fields"""
167
+ for k, v in values.items():
168
+ if v == {}:
169
+ values[k] = None
170
+ if k in ["limit", "depth"]:
171
+ try:
172
+ values[k] = to_num(v, num_type=int)
173
+ except ValueError:
174
+ values[k] = None
175
+ if k in ["importance", "threshold"]:
176
+ try:
177
+ values[k] = to_num(v, num_type=float)
178
+ except ValueError:
179
+ values[k] = None
180
+ return values
181
+
182
+
183
+ class MemoryInfo(BaseModel):
184
+ """Information about a stored memory"""
185
+
186
+ memory_id: str
187
+ timestamp: str
188
+ layer: MemoryLayer
189
+ tags: list[str] = Field(default_factory=list)
190
+ importance: float
191
+ token_count: int | None = None
192
+
193
+
194
+ class MemoryMatch(BaseModel):
195
+ """A memory match from recall/search operations"""
196
+
197
+ memory_id: str
198
+ content: str
199
+ similarity: float
200
+ timestamp: str
201
+ tags: list[str] = Field(default_factory=list)
202
+ metadata: dict[str, Any] = Field(default_factory=dict)
203
+
204
+
205
+ class ExplorationResult(BaseModel):
206
+ """Result from memory exploration"""
207
+
208
+ query: str
209
+ depth_reached: int
210
+ strategies_used: list[str]
211
+ insights: list[dict[str, Any]]
212
+ connections_found: int
213
+ processing_time: float
214
+
215
+
216
+ class SynthesisResult(BaseModel):
217
+ """Result from memory synthesis"""
218
+
219
+ synthesis_id: str
220
+ source_memories: list[str]
221
+ synthesis_content: str
222
+ confidence: float
223
+ insights: list[str]
224
+
225
+
226
+ class MemoryResponse(BaseModel):
227
+ """
228
+ Response from MemoryTool following reader pattern.
229
+ Different fields are populated based on the action performed.
230
+ """
231
+
232
+ success: bool = Field(
233
+ ...,
234
+ description="Indicates if the requested action was performed successfully.",
235
+ )
236
+
237
+ error: str | None = Field(
238
+ None,
239
+ description="Describes any error that occurred, if success=False.",
240
+ )
241
+
242
+ # Store response
243
+ memory_info: MemoryInfo | None = Field(
244
+ None,
245
+ description="Populated when action='store' succeeds, with memory ID and metadata.",
246
+ )
247
+
248
+ # Recall/Search response
249
+ matches: list[MemoryMatch] | None = Field(
250
+ None,
251
+ description="Populated for recall/search actions, containing matching memories.",
252
+ )
253
+
254
+ # Exploration response
255
+ exploration: ExplorationResult | None = Field(
256
+ None,
257
+ description="Populated when action='explore' succeeds, with deep insights.",
258
+ )
259
+
260
+ # Synthesis response
261
+ synthesis: SynthesisResult | None = Field(
262
+ None,
263
+ description="Populated when action='synthesize' succeeds, with combined insights.",
264
+ )
265
+
266
+ # Relationship response
267
+ relationship_created: bool | None = Field(
268
+ None,
269
+ description="Indicates if relationship was created successfully.",
270
+ )
271
+
272
+
273
+ class MemoryTool(LionTool):
274
+ """
275
+ Memory tool following lionagi's reader pattern.
276
+ Provides unified interface for all memory operations.
277
+ """
278
+
279
+ is_lion_system_tool = True
280
+ system_tool_name = "memory_tool"
281
+
282
+ def __init__(self, memory_backend=None):
283
+ super().__init__()
284
+ self.backend = memory_backend
285
+ self._tool = None
286
+
287
+ # Memory caches
288
+ self._embedding_cache = {}
289
+ self._search_cache = {}
290
+
291
+ def handle_request(self, request: MemoryRequest) -> MemoryResponse:
292
+ """
293
+ Handle memory requests based on action type.
294
+ Routes to appropriate handler method.
295
+ """
296
+ if isinstance(request, dict):
297
+ request = MemoryRequest(**request)
298
+
299
+ try:
300
+ if request.action == "store":
301
+ return self._store_memory(request)
302
+ elif request.action == "recall":
303
+ return self._recall_memories(request)
304
+ elif request.action == "search":
305
+ return self._search_memories(request)
306
+ elif request.action == "relate":
307
+ return self._relate_memories(request)
308
+ elif request.action == "explore":
309
+ return self._explore_memories(request)
310
+ elif request.action == "synthesize":
311
+ return self._synthesize_memories(request)
312
+ else:
313
+ return MemoryResponse(
314
+ success=False,
315
+ error=f"Unknown action type: {request.action}",
316
+ )
317
+ except Exception as e:
318
+ return MemoryResponse(
319
+ success=False, error=f"Memory operation failed: {str(e)}"
320
+ )
321
+
322
+ def _store_memory(self, request: MemoryRequest) -> MemoryResponse:
323
+ """Store new memory with embedding and metadata"""
324
+ if not request.content:
325
+ return MemoryResponse(
326
+ success=False, error="Content is required for store action"
327
+ )
328
+
329
+ # Generate embedding
330
+ embedding = self._get_embedding(request.content)
331
+
332
+ # Create memory ID
333
+ memory_id = f"MEM_{int(datetime.now(timezone.utc).timestamp())}_{hash(request.content) % 10000}"
334
+
335
+ # Store in backend
336
+ if self.backend:
337
+ self.backend.store(
338
+ {
339
+ "memory_id": memory_id,
340
+ "content": request.content,
341
+ "embedding": embedding,
342
+ "tags": request.tags or [],
343
+ "importance": request.importance,
344
+ "layer": request.layer,
345
+ "timestamp": datetime.now(timezone.utc).isoformat(),
346
+ }
347
+ )
348
+
349
+ return MemoryResponse(
350
+ success=True,
351
+ memory_info=MemoryInfo(
352
+ memory_id=memory_id,
353
+ timestamp=datetime.now(timezone.utc).isoformat(),
354
+ layer=request.layer,
355
+ tags=request.tags or [],
356
+ importance=request.importance,
357
+ token_count=len(
358
+ request.content.split()
359
+ ), # Simple token estimate
360
+ ),
361
+ )
362
+
363
+ def _recall_memories(self, request: MemoryRequest) -> MemoryResponse:
364
+ """Recall memories based on semantic similarity"""
365
+ if not request.query:
366
+ return MemoryResponse(
367
+ success=False, error="Query is required for recall action"
368
+ )
369
+
370
+ # Get query embedding
371
+ query_embedding = self._get_embedding(request.query)
372
+
373
+ # Search in backend
374
+ if self.backend:
375
+ matches = self.backend.search_similar(
376
+ query_embedding,
377
+ limit=request.limit,
378
+ threshold=request.threshold,
379
+ layer=request.layer,
380
+ tags=request.tags,
381
+ )
382
+ else:
383
+ # Mock response for testing
384
+ matches = [
385
+ MemoryMatch(
386
+ memory_id=f"MEM_TEST_{i}",
387
+ content=f"Test memory {i} related to: {request.query}",
388
+ similarity=0.9 - (i * 0.1),
389
+ timestamp=datetime.now(timezone.utc).isoformat(),
390
+ tags=["test", "mock"],
391
+ )
392
+ for i in range(min(3, request.limit))
393
+ ]
394
+
395
+ return MemoryResponse(success=True, matches=matches)
396
+
397
+ def _get_embedding(self, text: str) -> list[float]:
398
+ """Get embedding for text (cached)"""
399
+ if text in self._embedding_cache:
400
+ return self._embedding_cache[text]
401
+
402
+ # Generate embedding (mock for now)
403
+ # In production, use actual embedding service
404
+ import hashlib
405
+
406
+ text_hash = hashlib.md5(text.encode()).hexdigest()
407
+ embedding = [
408
+ float(int(text_hash[i : i + 2], 16)) / 255.0
409
+ for i in range(0, 32, 2)
410
+ ]
411
+
412
+ # Cache and return
413
+ self._embedding_cache[text] = embedding
414
+ return embedding
415
+
416
+ def to_tool(self):
417
+ """Convert to lionagi Tool following reader pattern"""
418
+ if self._tool is None:
419
+
420
+ def memory_tool(**kwargs):
421
+ """
422
+ Unified memory tool for AI agents supporting:
423
+ - store: Save memories with automatic embedding and indexing
424
+ - recall: Retrieve semantically similar memories
425
+ - search: Advanced multi-criteria memory search
426
+ - relate: Create knowledge graph relationships
427
+ - explore: Deep multi-strategy exploration
428
+ - synthesize: Combine memories into insights
429
+ """
430
+ return self.handle_request(
431
+ MemoryRequest(**kwargs)
432
+ ).model_dump()
433
+
434
+ if self.system_tool_name != "memory_tool":
435
+ memory_tool.__name__ = self.system_tool_name
436
+
437
+ self._tool = Tool(
438
+ func_callable=memory_tool,
439
+ request_options=MemoryRequest,
440
+ )
441
+
442
+ return self._tool
443
+
444
+ # Placeholder implementations for other actions
445
+ def _search_memories(self, request: MemoryRequest) -> MemoryResponse:
446
+ """Advanced search with multiple criteria"""
447
+ # Similar to recall but with more filters
448
+ return self._recall_memories(request)
449
+
450
+ def _relate_memories(self, request: MemoryRequest) -> MemoryResponse:
451
+ """Create relationships between memories"""
452
+ if not all(
453
+ [request.source_id, request.target_id, request.relationship]
454
+ ):
455
+ return MemoryResponse(
456
+ success=False,
457
+ error="source_id, target_id, and relationship are required for relate action",
458
+ )
459
+
460
+ # Create relationship in backend
461
+ if self.backend:
462
+ self.backend.create_relationship(
463
+ request.source_id, request.target_id, request.relationship
464
+ )
465
+
466
+ return MemoryResponse(success=True, relationship_created=True)
467
+
468
+ def _explore_memories(self, request: MemoryRequest) -> MemoryResponse:
469
+ """Deep multi-strategy exploration"""
470
+ # Placeholder for complex exploration
471
+ return MemoryResponse(
472
+ success=True,
473
+ exploration=ExplorationResult(
474
+ query=request.query,
475
+ depth_reached=request.depth,
476
+ strategies_used=request.strategies or ["semantic"],
477
+ insights=[
478
+ {"type": "pattern", "content": "Exploration insight"}
479
+ ],
480
+ connections_found=5,
481
+ processing_time=2.3,
482
+ ),
483
+ )
484
+
485
+ def _synthesize_memories(self, request: MemoryRequest) -> MemoryResponse:
486
+ """Synthesize multiple memories into insights"""
487
+ # Placeholder for synthesis
488
+ return MemoryResponse(
489
+ success=True,
490
+ synthesis=SynthesisResult(
491
+ synthesis_id=f"SYN_{int(datetime.now(timezone.utc).timestamp())}",
492
+ source_memories=request.memory_ids or [],
493
+ synthesis_content="Synthesized insight from memories",
494
+ confidence=0.85,
495
+ insights=["Key insight 1", "Key insight 2"],
496
+ ),
497
+ )