lionagi 0.5.3__py3-none-any.whl → 0.5.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (29) hide show
  1. lionagi/core/action/action_manager.py +2 -0
  2. lionagi/core/communication/assistant_response.py +10 -0
  3. lionagi/core/communication/message.py +1 -1
  4. lionagi/core/communication/message_manager.py +13 -0
  5. lionagi/core/communication/utils.py +4 -2
  6. lionagi/core/session/branch_mixins.py +76 -39
  7. lionagi/core/session/session.py +3 -3
  8. lionagi/integrations/anthropic_/AnthropicModel.py +4 -9
  9. lionagi/integrations/anthropic_/AnthropicService.py +10 -0
  10. lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +5 -0
  11. lionagi/integrations/anthropic_/anthropic_price_data.yaml +26 -6
  12. lionagi/integrations/anthropic_/version.py +1 -1
  13. lionagi/integrations/groq_/GroqService.py +5 -0
  14. lionagi/integrations/groq_/version.py +1 -1
  15. lionagi/integrations/litellm_/imodel.py +5 -0
  16. lionagi/integrations/openai_/OpenAIModel.py +0 -4
  17. lionagi/integrations/openai_/OpenAIService.py +9 -0
  18. lionagi/integrations/openai_/version.py +1 -1
  19. lionagi/integrations/perplexity_/PerplexityService.py +5 -0
  20. lionagi/integrations/perplexity_/version.py +1 -1
  21. lionagi/libs/func/async_calls/alcall.py +7 -0
  22. lionagi/operations/brainstorm/brainstorm.py +318 -93
  23. lionagi/operations/plan/plan.py +280 -67
  24. lionagi/service/imodel.py +5 -0
  25. lionagi/version.py +1 -1
  26. {lionagi-0.5.3.dist-info → lionagi-0.5.5.dist-info}/METADATA +1 -1
  27. {lionagi-0.5.3.dist-info → lionagi-0.5.5.dist-info}/RECORD +29 -29
  28. {lionagi-0.5.3.dist-info → lionagi-0.5.5.dist-info}/WHEEL +0 -0
  29. {lionagi-0.5.3.dist-info → lionagi-0.5.5.dist-info}/licenses/LICENSE +0 -0
@@ -253,6 +253,8 @@ class ActionManager:
253
253
  ValueError: If a specified tool is not registered.
254
254
  TypeError: If an unsupported tool type is provided.
255
255
  """
256
+ if isinstance(tools, list | tuple) and len(tools) == 1:
257
+ tools = tools[0]
256
258
  if isinstance(tools, bool):
257
259
  if tools:
258
260
  tool_kwarg = {"tools": self.schema_list}
@@ -146,6 +146,16 @@ class AssistantResponse(RoledMessage):
146
146
  """
147
147
  return copy(self.content["assistant_response"])
148
148
 
149
+ @response.setter
150
+ def response(self, value: str) -> None:
151
+ """
152
+ Set the assistant response content.
153
+
154
+ Args:
155
+ value: The new response content
156
+ """
157
+ self.content["assistant_response"] = value
158
+
149
159
  @property
150
160
  def model_response(self) -> dict | list[dict]:
151
161
  """
@@ -68,7 +68,7 @@ class MessageField(str, Enum):
68
68
  LION_CLASS = "lion_class"
69
69
  ROLE = "role"
70
70
  CONTENT = "content"
71
- id = "id"
71
+ LN_ID = "ln_id"
72
72
  SENDER = "sender"
73
73
  RECIPIENT = "recipient"
74
74
  METADATA = "metadata"
@@ -498,6 +498,19 @@ class MessageManager:
498
498
  ]
499
499
  )
500
500
 
501
+ def remove_last_instruction_tool_schemas(self) -> None:
502
+ id_ = self.last_instruction.ln_id
503
+ self.messages[id_].tool_schemas = None
504
+
505
+ def concat_recent_action_responses_to_instruction(
506
+ self, instruction: Instruction
507
+ ) -> None:
508
+ for i in reversed(self.messages.progress):
509
+ if isinstance(self.messages[i], ActionResponse):
510
+ instruction.context.append(self.messages[i].content.to_dict())
511
+ else:
512
+ break
513
+
501
514
  def to_chat_msgs(self, progress=None) -> list[dict]:
502
515
  """
503
516
  Convert messages to chat format.
@@ -92,9 +92,11 @@ def format_text_item(item: Any) -> str:
92
92
  for j in item:
93
93
  if isinstance(j, dict):
94
94
  for k, v in j.items():
95
- msg += f"- {k}: {v} \n\n"
95
+ if v is not None:
96
+ msg += f"- {k}: {v} \n\n"
96
97
  else:
97
- msg += f"{j}\n"
98
+ if j is not None:
99
+ msg += f"{j}\n"
98
100
  return msg
99
101
 
100
102
 
@@ -33,6 +33,7 @@ from ..communication.types import (
33
33
  ActionResponse,
34
34
  AssistantResponse,
35
35
  Instruction,
36
+ RoledMessage,
36
37
  )
37
38
 
38
39
 
@@ -149,6 +150,8 @@ class BranchOperationMixin(ABC):
149
150
  if auto_retry_parse is True:
150
151
  operative.auto_retry_parse = True
151
152
 
153
+ if actions:
154
+ tools = tools or True
152
155
  if invoke_actions and tools:
153
156
  tool_schemas = self.get_tool_schema(tools)
154
157
 
@@ -275,7 +278,7 @@ class BranchOperationMixin(ABC):
275
278
  **kwargs,
276
279
  ) -> tuple[Instruction, AssistantResponse]:
277
280
 
278
- ins = self.msgs.create_instruction(
281
+ ins: Instruction = self.msgs.create_instruction(
279
282
  instruction=instruction,
280
283
  guidance=guidance,
281
284
  context=context,
@@ -289,7 +292,57 @@ class BranchOperationMixin(ABC):
289
292
  )
290
293
 
291
294
  progress = progress or self.msgs.progress
292
- messages = [self.msgs.messages[i] for i in progress]
295
+ messages: list[RoledMessage] = [
296
+ self.msgs.messages[i] for i in progress
297
+ ]
298
+
299
+ use_ins = None
300
+ if imodel.sequential_exchange:
301
+ _to_use = []
302
+ _action_responses: set[ActionResponse] = set()
303
+
304
+ for i in messages:
305
+ if isinstance(i, ActionResponse):
306
+ _action_responses.add(i)
307
+ if isinstance(i, AssistantResponse):
308
+ _to_use.append(i.model_copy())
309
+ if isinstance(i, Instruction):
310
+ if _action_responses:
311
+ j = i.model_copy()
312
+ d_ = [k.content.to_dict() for k in _action_responses]
313
+ for z in d_:
314
+ if z not in j.context:
315
+ j.context.append(z)
316
+
317
+ _to_use.append(j)
318
+ _action_responses = set()
319
+ else:
320
+ _to_use.append(i)
321
+
322
+ messages = _to_use
323
+ if _action_responses:
324
+ j = ins.model_copy()
325
+ d_ = [k.content.to_dict() for k in _action_responses]
326
+ for z in d_:
327
+ if z not in j.context:
328
+ j.context.append(z)
329
+ use_ins = j
330
+
331
+ if messages and len(messages) > 1:
332
+ _msgs = [messages[0]]
333
+
334
+ for i in messages[1:]:
335
+ if isinstance(i, AssistantResponse):
336
+ if isinstance(_msgs[-1], AssistantResponse):
337
+ _msgs[-1].response = (
338
+ f"{_msgs[-1].response}\n\n{i.response}"
339
+ )
340
+ else:
341
+ _msgs.append(i)
342
+ else:
343
+ if isinstance(_msgs[-1], AssistantResponse):
344
+ _msgs.append(i)
345
+ messages = _msgs
293
346
 
294
347
  if self.msgs.system and "system" not in imodel.allowed_roles:
295
348
  messages = [msg for msg in messages if msg.role != "system"]
@@ -312,9 +365,10 @@ class BranchOperationMixin(ABC):
312
365
  first_instruction.guidance or ""
313
366
  )
314
367
  messages[0] = first_instruction
368
+ messages.append(use_ins or ins)
315
369
 
316
370
  else:
317
- messages.append(ins)
371
+ messages.append(use_ins or ins)
318
372
 
319
373
  kwargs["messages"] = [i.chat_msg for i in messages]
320
374
  imodel = imodel or self.imodel
@@ -331,6 +385,7 @@ class BranchOperationMixin(ABC):
331
385
  sender=self,
332
386
  recipient=self.user,
333
387
  )
388
+
334
389
  return ins, res
335
390
 
336
391
  async def communicate(
@@ -346,7 +401,6 @@ class BranchOperationMixin(ABC):
346
401
  imodel: iModel = None,
347
402
  images: list = None,
348
403
  image_detail: Literal["low", "high", "auto"] = None,
349
- tools: str | FUNCTOOL | list[FUNCTOOL | str] | bool = None,
350
404
  num_parse_retries: int = 0,
351
405
  retry_imodel: iModel = None,
352
406
  retry_kwargs: dict = {},
@@ -355,7 +409,6 @@ class BranchOperationMixin(ABC):
355
409
  ] = "return_value",
356
410
  skip_validation: bool = False,
357
411
  clear_messages: bool = False,
358
- invoke_action: bool = True,
359
412
  response_format: (
360
413
  type[BaseModel] | BaseModel
361
414
  ) = None, # alias of request_model
@@ -380,10 +433,6 @@ class BranchOperationMixin(ABC):
380
433
  )
381
434
  num_parse_retries = 5
382
435
 
383
- tool_schemas = None
384
- if invoke_action and tools:
385
- tool_schemas = self.get_tool_schema(tools)
386
-
387
436
  ins, res = await self._invoke_imodel(
388
437
  instruction=instruction,
389
438
  guidance=guidance,
@@ -395,36 +444,14 @@ class BranchOperationMixin(ABC):
395
444
  imodel=imodel,
396
445
  images=images,
397
446
  image_detail=image_detail,
398
- tool_schemas=tool_schemas,
399
447
  **kwargs,
400
448
  )
401
- await self.msgs.a_add_message(instruction=ins)
402
- await self.msgs.a_add_message(assistant_response=res)
403
-
404
- action_request_models = None
405
- action_response_models = None
449
+ self.msgs.add_message(instruction=ins)
450
+ self.msgs.add_message(assistant_response=res)
406
451
 
407
452
  if skip_validation:
408
453
  return res.response
409
454
 
410
- if invoke_action and tools:
411
- action_request_models = ActionRequestModel.create(res.response)
412
-
413
- if action_request_models and invoke_action:
414
- action_response_models = await alcall(
415
- action_request_models,
416
- self.invoke_action,
417
- suppress_errors=True,
418
- )
419
-
420
- if action_request_models and not action_response_models:
421
- for i in action_request_models:
422
- await self.msgs.a_add_message(
423
- action_request_model=i,
424
- sender=self,
425
- recipient=None,
426
- )
427
-
428
455
  _d = None
429
456
  if request_fields is not None or request_model is not None:
430
457
  parse_success = None
@@ -475,9 +502,12 @@ class BranchOperationMixin(ABC):
475
502
  if _d and isinstance(_d, dict):
476
503
  parse_success = True
477
504
  if res not in self.msgs.messages:
478
- await self.msgs.a_add_message(
479
- assistant_response=res
480
- )
505
+ if isinstance(
506
+ self.msgs.messages[-1], AssistantResponse
507
+ ):
508
+ self.msgs.messages[-1].response = res.response
509
+ else:
510
+ self.msgs.add_message(assistant_response=res)
481
511
  return _d
482
512
 
483
513
  elif request_model:
@@ -495,9 +525,16 @@ class BranchOperationMixin(ABC):
495
525
  _d = request_model.model_validate(_d)
496
526
  parse_success = True
497
527
  if res not in self.msgs.messages:
498
- await self.msgs.a_add_message(
499
- assistant_response=res
500
- )
528
+ if isinstance(
529
+ self.msgs.messages[-1], AssistantResponse
530
+ ):
531
+ self.msgs.messages[-1].response = (
532
+ res.response
533
+ )
534
+ else:
535
+ self.msgs.add_message(
536
+ assistant_response=res
537
+ )
501
538
  return _d
502
539
  except Exception as e:
503
540
  logging.warning(
@@ -128,9 +128,9 @@ class Session(Component):
128
128
  branch: The branch to set as default or its identifier.
129
129
  """
130
130
  branch = self.branches[branch]
131
- if branch and len(branch) == 1:
132
- self.default_branch = branch
133
- raise ValueError("Session can only have one default branch.")
131
+ if not isinstance(branch, Branch):
132
+ raise ValueError("Input value for branch is not a valid branch.")
133
+ self.default_branch = branch
134
134
 
135
135
  def to_df(self, branches: ID.RefSeq = None) -> pd.DataFrame:
136
136
  out = self.concat_messages(branches=branches)
@@ -4,6 +4,7 @@
4
4
 
5
5
  from pathlib import Path
6
6
 
7
+ import yaml
7
8
  from dotenv import load_dotenv
8
9
  from pydantic import (
9
10
  BaseModel,
@@ -33,12 +34,6 @@ price_config_file_name = path / "anthropic_price_data.yaml"
33
34
  max_output_token_file_name = path / "anthropic_max_output_token_data.yaml"
34
35
 
35
36
 
36
- class _ModuleImportClass:
37
- from lionagi.libs.package.imports import check_import
38
-
39
- yaml = check_import("yaml", pip_name="pyyaml")
40
-
41
-
42
37
  class AnthropicModel(BaseModel):
43
38
  model: str = Field(description="ID of the model to use.")
44
39
 
@@ -239,7 +234,7 @@ class AnthropicModel(BaseModel):
239
234
  )
240
235
  if estimated_output_len == 0:
241
236
  with open(max_output_token_file_name) as file:
242
- output_token_config = _ModuleImportClass.yaml.safe_load(file)
237
+ output_token_config = yaml.safe_load(file)
243
238
  estimated_output_len = output_token_config.get(self.model, 0)
244
239
  self.estimated_output_len = estimated_output_len
245
240
 
@@ -261,13 +256,13 @@ class AnthropicModel(BaseModel):
261
256
  num_of_input_tokens = self.text_token_calculator.calculate(input_text)
262
257
 
263
258
  with open(price_config_file_name) as file:
264
- price_config = _ModuleImportClass.yaml.safe_load(file)
259
+ price_config = yaml.safe_load(file)
265
260
 
266
261
  model_price_info_dict = price_config["model"][self.model]
267
262
  estimated_price = (
268
263
  model_price_info_dict["input_tokens"] * num_of_input_tokens
269
264
  + model_price_info_dict["output_tokens"]
270
265
  * estimated_num_of_output_tokens
271
- )
266
+ ) / 1_000_000
272
267
 
273
268
  return estimated_price
@@ -51,6 +51,11 @@ class AnthropicService(Service):
51
51
  # Map model versions to their base models for shared rate limiting
52
52
  shared_models = {
53
53
  "claude-3-opus-20240229": "claude-3-opus",
54
+ "claude-3-sonnet-20241022": "claude-3-sonnet",
55
+ "claude-3-haiku-20241022": "claude-3-haiku",
56
+ "claude-3-opus-latest": "claude-3-opus",
57
+ "claude-3-sonnet-latest": "claude-3-sonnet",
58
+ "claude-3-haiku-latest": "claude-3-haiku",
54
59
  "claude-3-sonnet-20240229": "claude-3-sonnet",
55
60
  "claude-3-haiku-20240307": "claude-3-haiku",
56
61
  }
@@ -115,3 +120,8 @@ class AnthropicService(Service):
115
120
  @property
116
121
  def allowed_roles(self):
117
122
  return ["user", "assistant"]
123
+
124
+ @property
125
+ def sequential_exchange(self):
126
+ """whether the service requires user/assistant exchange"""
127
+ return True
@@ -2,6 +2,11 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ claude-3-5-sonnet-20241022: 8192 # Latest model
6
+ claude-3-5-haiku-20241022: 8192 # Latest model
5
7
  claude-3-opus-20240229: 4096
6
8
  claude-3-sonnet-20240229: 4096
7
9
  claude-3-haiku-20240307: 4096
10
+ claude-2.1: 4096
11
+ claude-2.0: 4096
12
+ claude-instant-1.2: 4096
@@ -3,12 +3,32 @@
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  model:
6
+ # Latest Models (3.5 series)
7
+ claude-3-5-sonnet-20241022:
8
+ input_tokens: 3.0
9
+ output_tokens: 15.0
10
+ claude-3-5-haiku-20241022:
11
+ input_tokens: 0.80
12
+ output_tokens: 4.0
13
+
14
+ # Claude 3 Models
6
15
  claude-3-opus-20240229:
7
- input_tokens: 0.000015
8
- output_tokens: 0.000075
16
+ input_tokens: 15.0
17
+ output_tokens: 75.0
9
18
  claude-3-sonnet-20240229:
10
- input_tokens: 0.000003
11
- output_tokens: 0.000015
19
+ input_tokens: 3.0
20
+ output_tokens: 15.0
12
21
  claude-3-haiku-20240307:
13
- input_tokens: 0.0000005
14
- output_tokens: 0.0000025
22
+ input_tokens: 0.25
23
+ output_tokens: 1.25
24
+
25
+ # Legacy Models
26
+ claude-2.1:
27
+ input_tokens: 8.0
28
+ output_tokens: 24.0
29
+ claude-2.0:
30
+ input_tokens: 8.0
31
+ output_tokens: 24.0
32
+ claude-instant-1.2:
33
+ input_tokens: 0.8
34
+ output_tokens: 2.4
@@ -2,4 +2,4 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- __version__ = "1.0.0"
5
+ __version__ = "1.0.1"
@@ -149,3 +149,8 @@ class GroqService(Service):
149
149
  @property
150
150
  def allowed_roles(self):
151
151
  return ["user", "assistant", "system"]
152
+
153
+ @property
154
+ def sequential_exchange(self):
155
+ """whether the service requires user/assistant exchange"""
156
+ return True
@@ -2,4 +2,4 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- __version__ = "1.0.0"
5
+ __version__ = "1.0.1"
@@ -69,3 +69,8 @@ class LiteiModel:
69
69
  @property
70
70
  def allowed_roles(self):
71
71
  return ["user", "assistant", "system"]
72
+
73
+ @property
74
+ def sequential_exchange(self):
75
+ """whether the service requires user/assistant exchange"""
76
+ return False
@@ -417,7 +417,3 @@ class OpenAIModel(BaseModel):
417
417
  "Estimating price currently only supports chat/completions endpoint"
418
418
  )
419
419
  return estimated_price
420
-
421
- @property
422
- def allowed_roles(self):
423
- return ["user", "assistant", "system"]
@@ -424,3 +424,12 @@ class OpenAIService(Service):
424
424
  method="POST",
425
425
  content_type="application/json",
426
426
  )
427
+
428
+ @property
429
+ def allowed_roles(self):
430
+ return ["user", "assistant", "system"]
431
+
432
+ @property
433
+ def sequential_exchange(self):
434
+ """whether the service requires user/assistant exchange"""
435
+ return False
@@ -1 +1 @@
1
- __version__ = "1.0.2"
1
+ __version__ = "1.0.3"
@@ -111,3 +111,8 @@ class PerplexityService(Service):
111
111
  @property
112
112
  def allowed_roles(self):
113
113
  return ["user", "assistant", "system"]
114
+
115
+ @property
116
+ def sequential_exchange(self):
117
+ """whether the service requires user/assistant exchange"""
118
+ return True
@@ -1 +1 @@
1
- __version__ = "1.0.0"
1
+ __version__ = "1.0.1"
@@ -148,6 +148,13 @@ async def alcall(
148
148
  ucall(func, i, **kwargs), retry_timeout
149
149
  )
150
150
  return index, result
151
+
152
+ except InterruptedError:
153
+ return index, None
154
+
155
+ except asyncio.CancelledError:
156
+ return index, None
157
+
151
158
  except TimeoutError as e:
152
159
  raise TimeoutError(
153
160
  f"{error_msg or ''} Timeout {retry_timeout} seconds "