lionagi 0.6.1__py3-none-any.whl → 0.7.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (78) hide show
  1. lionagi/libs/token_transform/__init__.py +0 -0
  2. lionagi/libs/token_transform/llmlingua.py +1 -0
  3. lionagi/libs/token_transform/perplexity.py +439 -0
  4. lionagi/libs/token_transform/synthlang.py +409 -0
  5. lionagi/operations/ReAct/ReAct.py +126 -0
  6. lionagi/operations/ReAct/utils.py +28 -0
  7. lionagi/operations/__init__.py +1 -9
  8. lionagi/operations/_act/act.py +73 -0
  9. lionagi/operations/chat/__init__.py +3 -0
  10. lionagi/operations/chat/chat.py +173 -0
  11. lionagi/operations/communicate/__init__.py +0 -0
  12. lionagi/operations/communicate/communicate.py +108 -0
  13. lionagi/operations/instruct/__init__.py +3 -0
  14. lionagi/operations/instruct/instruct.py +29 -0
  15. lionagi/operations/interpret/__init__.py +3 -0
  16. lionagi/operations/interpret/interpret.py +39 -0
  17. lionagi/operations/operate/__init__.py +3 -0
  18. lionagi/operations/operate/operate.py +194 -0
  19. lionagi/operations/parse/__init__.py +3 -0
  20. lionagi/operations/parse/parse.py +89 -0
  21. lionagi/operations/plan/plan.py +3 -3
  22. lionagi/operations/select/__init__.py +0 -4
  23. lionagi/operations/select/select.py +11 -30
  24. lionagi/operations/select/utils.py +13 -2
  25. lionagi/operations/translate/__init__.py +0 -0
  26. lionagi/operations/translate/translate.py +47 -0
  27. lionagi/operations/types.py +16 -0
  28. lionagi/operatives/action/manager.py +115 -93
  29. lionagi/operatives/action/request_response_model.py +31 -0
  30. lionagi/operatives/action/tool.py +50 -20
  31. lionagi/operatives/strategies/__init__.py +3 -0
  32. lionagi/protocols/_concepts.py +1 -1
  33. lionagi/protocols/adapters/adapter.py +25 -0
  34. lionagi/protocols/adapters/json_adapter.py +107 -27
  35. lionagi/protocols/adapters/pandas_/csv_adapter.py +55 -11
  36. lionagi/protocols/adapters/pandas_/excel_adapter.py +52 -10
  37. lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +54 -4
  38. lionagi/protocols/adapters/pandas_/pd_series_adapter.py +40 -0
  39. lionagi/protocols/generic/element.py +1 -1
  40. lionagi/protocols/generic/pile.py +5 -8
  41. lionagi/protocols/graph/edge.py +1 -1
  42. lionagi/protocols/graph/graph.py +16 -8
  43. lionagi/protocols/graph/node.py +1 -1
  44. lionagi/protocols/mail/exchange.py +126 -15
  45. lionagi/protocols/mail/mail.py +33 -0
  46. lionagi/protocols/mail/mailbox.py +62 -0
  47. lionagi/protocols/mail/manager.py +97 -41
  48. lionagi/protocols/mail/package.py +57 -3
  49. lionagi/protocols/messages/action_request.py +77 -26
  50. lionagi/protocols/messages/action_response.py +55 -26
  51. lionagi/protocols/messages/assistant_response.py +50 -15
  52. lionagi/protocols/messages/base.py +36 -0
  53. lionagi/protocols/messages/instruction.py +175 -145
  54. lionagi/protocols/messages/manager.py +152 -56
  55. lionagi/protocols/messages/message.py +61 -25
  56. lionagi/protocols/messages/system.py +54 -19
  57. lionagi/service/imodel.py +24 -0
  58. lionagi/session/branch.py +1116 -939
  59. lionagi/utils.py +1 -0
  60. lionagi/version.py +1 -1
  61. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/METADATA +1 -1
  62. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/RECORD +75 -56
  63. lionagi/libs/compress/models.py +0 -66
  64. lionagi/libs/compress/utils.py +0 -69
  65. lionagi/operations/select/prompt.py +0 -5
  66. /lionagi/{libs/compress → operations/ReAct}/__init__.py +0 -0
  67. /lionagi/operations/{strategies → _act}/__init__.py +0 -0
  68. /lionagi/{operations → operatives}/strategies/base.py +0 -0
  69. /lionagi/{operations → operatives}/strategies/concurrent.py +0 -0
  70. /lionagi/{operations → operatives}/strategies/concurrent_chunk.py +0 -0
  71. /lionagi/{operations → operatives}/strategies/concurrent_sequential_chunk.py +0 -0
  72. /lionagi/{operations → operatives}/strategies/params.py +0 -0
  73. /lionagi/{operations → operatives}/strategies/sequential.py +0 -0
  74. /lionagi/{operations → operatives}/strategies/sequential_chunk.py +0 -0
  75. /lionagi/{operations → operatives}/strategies/sequential_concurrent_chunk.py +0 -0
  76. /lionagi/{operations → operatives}/strategies/utils.py +0 -0
  77. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/WHEEL +0 -0
  78. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/licenses/LICENSE +0 -0
File without changes
@@ -0,0 +1 @@
1
+ # TODO
@@ -0,0 +1,439 @@
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+ from timeit import default_timer as timer
4
+
5
+ import numpy as np
6
+ from pydantic import BaseModel
7
+
8
+ from lionagi.protocols.generic.event import EventStatus
9
+ from lionagi.protocols.generic.log import Log
10
+ from lionagi.service.endpoints.base import APICalling
11
+ from lionagi.service.imodel import iModel
12
+ from lionagi.utils import alcall, lcall, to_dict, to_list
13
+
14
+
15
+ @dataclass
16
+ class PerplexityScores:
17
+ """
18
+ Stores logprobs, tokens, and derived perplexity from a completion response.
19
+ """
20
+
21
+ completion_response: BaseModel | dict
22
+ original_tokens: list[str]
23
+ n_samples: int
24
+
25
+ @property
26
+ def logprobs(self) -> list[float]:
27
+ """Return list of logprobs extracted from the model response."""
28
+ return [i["logprob"] for i in self.perplexity_scores]
29
+
30
+ @property
31
+ def perplexity(self) -> float:
32
+ """
33
+ e^(mean logprob), if logprobs exist. Fallback to 1.0 if empty.
34
+ """
35
+ if not self.logprobs:
36
+ return 1.0
37
+ return np.exp(np.mean(self.logprobs))
38
+
39
+ @property
40
+ def perplexity_scores(self) -> list[dict]:
41
+ """
42
+ Return [{'token': ..., 'logprob': ...}, ...].
43
+ Handles two possible logprob structures:
44
+ - "tokens" + "token_logprobs"
45
+ - "content" (older style)
46
+ """
47
+ outs = []
48
+ try:
49
+ if isinstance(self.completion_response, dict):
50
+ log_prob = self.completion_response["choices"][0]["logprobs"]
51
+ else:
52
+ # Pydantic or other object
53
+ log_prob = self.completion_response.choices[0].logprobs
54
+ except Exception:
55
+ return outs
56
+
57
+ if not log_prob:
58
+ return outs
59
+
60
+ if "tokens" in log_prob and "token_logprobs" in log_prob:
61
+ # OpenAI style logprobs
62
+ for token, lp in zip(
63
+ log_prob["tokens"], log_prob["token_logprobs"]
64
+ ):
65
+ outs.append({"token": token, "logprob": lp})
66
+ elif "content" in log_prob:
67
+ # Old style logprobs
68
+ for item in log_prob["content"]:
69
+ outs.append(
70
+ {"token": item["token"], "logprob": item["logprob"]}
71
+ )
72
+ return outs
73
+
74
+ def to_dict(self) -> dict:
75
+ """
76
+ Construct a dictionary representation, including perplexity, usage, etc.
77
+ """
78
+ # usage info
79
+ usage = {}
80
+ if isinstance(self.completion_response, dict):
81
+ usage = self.completion_response.get("usage", {})
82
+ else:
83
+ usage = to_dict(self.completion_response.usage)
84
+
85
+ return {
86
+ "perplexity": self.perplexity,
87
+ "original_tokens": self.original_tokens,
88
+ "prompt_tokens": usage.get("prompt_tokens", 0),
89
+ "completion_tokens": usage.get("completion_tokens", 0),
90
+ "total_tokens": usage.get("total_tokens", 0),
91
+ }
92
+
93
+ def to_log(self) -> Log:
94
+ """
95
+ Return a Log object for convenience.
96
+ """
97
+ return Log(content=self.to_dict())
98
+
99
+
100
+ async def compute_perplexity(
101
+ chat_model: iModel,
102
+ initial_context: str = None,
103
+ tokens: list[str] = None,
104
+ system_msg: str = None,
105
+ n_samples: int = 1,
106
+ use_residue: bool = True,
107
+ **kwargs,
108
+ ) -> list[PerplexityScores]:
109
+ """
110
+ Splits tokens into n_samples chunks, calls the model with logprobs=True,
111
+ and returns PerplexityScores for each chunk.
112
+ """
113
+ context = initial_context or ""
114
+ n_samples = n_samples or len(tokens)
115
+
116
+ sample_token_len, residue = divmod(len(tokens), n_samples)
117
+ if n_samples == 1:
118
+ samples = [tokens]
119
+ else:
120
+ samples = [
121
+ tokens[: (i + 1) * sample_token_len] for i in range(n_samples)
122
+ ]
123
+ if use_residue and residue != 0:
124
+ samples.append(tokens[-residue:])
125
+
126
+ # Build text for each chunk
127
+ sampless = [context + " " + " ".join(s) for s in samples]
128
+ kwargs["logprobs"] = True
129
+
130
+ async def _inner(api_call: APICalling):
131
+ await api_call.invoke()
132
+ elapsed = 0
133
+ while (
134
+ api_call.status not in [EventStatus.COMPLETED, EventStatus.FAILED]
135
+ and elapsed < 5
136
+ ):
137
+ await asyncio.sleep(0.1)
138
+ elapsed += 0.1
139
+ return api_call.response
140
+
141
+ # Create and schedule calls
142
+ api_calls = []
143
+ for sample_txt in sampless:
144
+ messages = []
145
+ if system_msg:
146
+ if not chat_model.sequential_exchange:
147
+ messages.append({"role": "system", "content": system_msg})
148
+ messages.append({"role": "user", "content": sample_txt})
149
+ else:
150
+ messages.append({"role": "user", "content": sample_txt})
151
+
152
+ api_calls.append(
153
+ chat_model.create_api_calling(messages=messages, **kwargs)
154
+ )
155
+
156
+ results = await alcall(api_calls, _inner, max_concurrent=50)
157
+
158
+ def _pplx_score(input_):
159
+ idx, resp = input_
160
+ return PerplexityScores(resp, samples[idx], n_samples)
161
+
162
+ return lcall(enumerate(results), _pplx_score)
163
+
164
+
165
+ class LLMCompressor:
166
+ """
167
+ Compress text by selecting segments with highest perplexity tokens
168
+ (or in practice, rank segments by logprob).
169
+ """
170
+
171
+ def __init__(
172
+ self,
173
+ chat_model: iModel,
174
+ system_msg=None,
175
+ tokenizer=None,
176
+ splitter=None,
177
+ target_ratio=0.2,
178
+ n_samples=5,
179
+ chunk_size=64,
180
+ max_tokens_per_sample=80,
181
+ min_pplx=0,
182
+ split_overlap=0,
183
+ split_threshold=0,
184
+ verbose=True,
185
+ ):
186
+ # Must have "logprobs" support
187
+ if "logprobs" not in chat_model.endpoint.acceptable_kwargs:
188
+ raise ValueError(
189
+ f"Model {chat_model.model_name} does not support logprobs. "
190
+ "Please use a model that supports logprobs."
191
+ )
192
+
193
+ self.chat_model = chat_model
194
+ self.tokenizer = tokenizer
195
+ self.splitter = splitter
196
+ self.system_msg = (
197
+ system_msg or "Concisely summarize content for storage:"
198
+ )
199
+ self.target_ratio = target_ratio
200
+ self.n_samples = n_samples
201
+ self.chunk_size = chunk_size
202
+ self.max_tokens_per_sample = max_tokens_per_sample
203
+ self.min_pplx = min_pplx
204
+ self.verbose = verbose
205
+ self.split_overlap = split_overlap
206
+ self.split_threshold = split_threshold
207
+
208
+ def tokenize(self, text: str, **kwargs) -> list[str]:
209
+ """
210
+ Tokenize text. If no custom tokenizer, use the default from lionagi.
211
+ """
212
+ if not self.tokenizer:
213
+ from lionagi.service.endpoints.token_calculator import (
214
+ TokenCalculator,
215
+ )
216
+
217
+ return TokenCalculator.tokenize(
218
+ text,
219
+ encoding_name=self.chat_model.model_name,
220
+ return_tokens=True,
221
+ )
222
+ if hasattr(self.tokenizer, "tokenize"):
223
+ return self.tokenizer.tokenize(text, **kwargs)
224
+ return self.tokenizer(text, **kwargs)
225
+
226
+ def split(
227
+ self,
228
+ text: str,
229
+ chunk_size=None,
230
+ overlap=None,
231
+ threshold=None,
232
+ by_chars=False,
233
+ return_tokens=False,
234
+ **kwargs,
235
+ ) -> list:
236
+ """
237
+ Split text into segments. If no custom splitter, default to chunk_content from lionagi.
238
+ """
239
+ if not self.splitter:
240
+ from lionagi.libs.file.chunk import chunk_content
241
+
242
+ contents = chunk_content(
243
+ content=text,
244
+ chunk_size=chunk_size or self.chunk_size,
245
+ overlap=overlap or self.split_overlap,
246
+ threshold=threshold or self.split_threshold,
247
+ return_tokens=return_tokens,
248
+ chunk_by="chars" if by_chars else "tokens",
249
+ )
250
+ return [i["chunk_content"] for i in contents]
251
+
252
+ # If user provided an object with .split or .chunk or .segment
253
+ for meth in ["split", "chunk", "segment"]:
254
+ if hasattr(self.splitter, meth):
255
+ return getattr(self.splitter, meth)(text, **kwargs)
256
+ raise ValueError(
257
+ "No valid method found in splitter: must have .split/.chunk/.segment"
258
+ )
259
+
260
+ async def rank_by_pplex(
261
+ self,
262
+ items: list,
263
+ initial_text=None,
264
+ cumulative=False,
265
+ n_samples=None,
266
+ use_residue=True,
267
+ **kwargs,
268
+ ) -> list:
269
+ """
270
+ Rank items (token lists or strings) by perplexity descending.
271
+ If cumulative=True, each item is appended to the context.
272
+ """
273
+
274
+ async def _get_item_perplexity(item):
275
+ # Ensure item is a list of tokens
276
+ item_toks = item if isinstance(item, list) else [item]
277
+ if len(item_toks) > self.max_tokens_per_sample:
278
+ item_toks = item_toks[: self.max_tokens_per_sample]
279
+ pplex_scores = await compute_perplexity(
280
+ chat_model=self.chat_model,
281
+ initial_context=initial_text,
282
+ tokens=item_toks,
283
+ n_samples=n_samples or self.n_samples,
284
+ system_msg=self.system_msg,
285
+ use_residue=use_residue,
286
+ **kwargs,
287
+ )
288
+ # Usually we only look at pplex_scores[0], as there's one chunk
289
+ return pplex_scores
290
+
291
+ # If user passed a single string, tokenize it
292
+ if isinstance(items, str):
293
+ items = self.tokenize(items)
294
+
295
+ if len(items) == 1:
296
+ single_scores = await _get_item_perplexity(items[0])
297
+ return [(items[0], single_scores[0])]
298
+
299
+ segments = []
300
+ if cumulative:
301
+ ctx = initial_text or ""
302
+ for i in items:
303
+ seg_toks = i if isinstance(i, list) else [i]
304
+ joined = " ".join(seg_toks)
305
+ ctx += " " + joined
306
+ segments.append(ctx)
307
+ else:
308
+ for i in items:
309
+ seg_toks = i if isinstance(i, list) else [i]
310
+ segments.append(" ".join(seg_toks))
311
+
312
+ tasks = [
313
+ asyncio.create_task(_get_item_perplexity(seg)) for seg in segments
314
+ ]
315
+ results = await asyncio.gather(*tasks)
316
+ # Pair each item with the first pplex (p[0]) if multiple were returned
317
+ pairs = [(itm, pplex[0]) for itm, pplex in zip(items, results)]
318
+
319
+ # Sort descending by perplexity
320
+ return sorted(pairs, key=lambda x: x[1].perplexity, reverse=True)
321
+
322
+ async def compress(
323
+ self,
324
+ text: str,
325
+ compression_ratio=None,
326
+ initial_text=None,
327
+ cumulative=False,
328
+ split_kwargs=None,
329
+ min_pplx=None,
330
+ **kwargs,
331
+ ) -> str:
332
+ """
333
+ Main method to compress text:
334
+ 1) Split text
335
+ 2) Rank by perplexity
336
+ 3) Select best segments until reaching target ratio
337
+ """
338
+ start = timer()
339
+ if split_kwargs is None:
340
+ split_kwargs = {
341
+ "chunk_size": self.max_tokens_per_sample,
342
+ "overlap": self.split_overlap,
343
+ "threshold": self.split_threshold,
344
+ "return_tokens": True,
345
+ }
346
+
347
+ # Tokenize once to get total length
348
+ all_tokens = self.tokenize(text)
349
+ original_len = len(all_tokens)
350
+
351
+ # Split text
352
+ items = self.split(text, **split_kwargs)
353
+ # items -> list of token-lists
354
+
355
+ # Rank
356
+ ranked = await self.rank_by_pplex(
357
+ items=items,
358
+ initial_text=initial_text,
359
+ cumulative=cumulative,
360
+ **kwargs,
361
+ )
362
+
363
+ # Select
364
+ selected = self.select_by_pplex(
365
+ ranked_items=ranked,
366
+ target_compression_ratio=compression_ratio or self.target_ratio,
367
+ original_length=original_len,
368
+ min_pplx=min_pplx or self.min_pplx,
369
+ )
370
+
371
+ if self.verbose:
372
+ compressed_len = sum(
373
+ len(to_list(self.tokenize(x), dropna=True, flatten=True))
374
+ for x in selected
375
+ )
376
+ ratio = compressed_len / original_len if original_len else 1
377
+ print(
378
+ f"Original tokens: {original_len}\n"
379
+ f"Selected tokens: {compressed_len}\n"
380
+ f"Compression ratio: {ratio:.3f}\n"
381
+ f"Time: {timer() - start:.3f}s\n"
382
+ )
383
+
384
+ # Join final
385
+ out_str = " ".join(selected)
386
+ return out_str.strip()
387
+
388
+ def select_by_pplex(
389
+ self,
390
+ ranked_items: list,
391
+ target_compression_ratio: float,
392
+ original_length: int,
393
+ min_pplx=0,
394
+ ) -> list[str]:
395
+ """
396
+ From highest perplexity to lowest, pick items until we reach the desired ratio.
397
+ Items below min_pplx are skipped.
398
+ """
399
+ desired_len = int(original_length * target_compression_ratio)
400
+
401
+ chosen = []
402
+ current_len = 0
403
+ for item, info in ranked_items:
404
+ if info.perplexity > min_pplx:
405
+ if isinstance(item, list):
406
+ item_toks = to_list(item, dropna=True, flatten=True)
407
+ else:
408
+ item_toks = self.tokenize(item)
409
+ if current_len + len(item_toks) > desired_len:
410
+ break
411
+ chosen.append(" ".join(item_toks))
412
+ current_len += len(item_toks)
413
+
414
+ return chosen
415
+
416
+
417
+ # Helper function to quickly compress text using perplexity
418
+ # (If you don't want to manually create LLMCompressor instance everywhere)
419
+ async def compress_text(
420
+ text: str,
421
+ chat_model: iModel,
422
+ system_msg: str = None,
423
+ target_ratio: float = 0.2,
424
+ n_samples: int = 5,
425
+ max_tokens_per_sample=80,
426
+ verbose=True,
427
+ ) -> str:
428
+ """
429
+ Convenience function that instantiates LLMCompressor and compresses text.
430
+ """
431
+ compressor = LLMCompressor(
432
+ chat_model=chat_model,
433
+ system_msg=system_msg,
434
+ target_ratio=target_ratio,
435
+ n_samples=n_samples,
436
+ max_tokens_per_sample=max_tokens_per_sample,
437
+ verbose=verbose,
438
+ )
439
+ return await compressor.compress(text)