lionagi 0.5.1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. lionagi/__init__.py +0 -1
  2. lionagi/core/action/tool.py +3 -5
  3. lionagi/core/communication/action_request.py +3 -3
  4. lionagi/core/communication/message.py +3 -3
  5. lionagi/core/communication/utils.py +3 -3
  6. lionagi/core/generic/component.py +4 -4
  7. lionagi/core/generic/element.py +51 -47
  8. lionagi/core/generic/graph.py +1 -1
  9. lionagi/core/generic/log.py +2 -2
  10. lionagi/core/generic/pile.py +10 -11
  11. lionagi/core/generic/progression.py +19 -12
  12. lionagi/core/generic/utils.py +6 -3
  13. lionagi/core/models/base.py +11 -68
  14. lionagi/core/models/field_model.py +42 -19
  15. lionagi/core/models/{new_model_params.py → model_params.py} +5 -6
  16. lionagi/core/models/note.py +2 -2
  17. lionagi/core/models/operable_model.py +8 -4
  18. lionagi/core/models/schema_model.py +9 -31
  19. lionagi/core/models/types.py +15 -6
  20. lionagi/core/session/branch.py +10 -7
  21. lionagi/core/session/branch_mixins.py +11 -12
  22. lionagi/core/session/session.py +1 -2
  23. lionagi/core/typing/__init__.py +4 -4
  24. lionagi/core/typing/{concepts.py → _concepts.py} +43 -2
  25. lionagi/core/typing/_id.py +104 -0
  26. lionagi/integrations/anthropic_/AnthropicModel.py +8 -3
  27. lionagi/integrations/groq_/GroqModel.py +11 -4
  28. lionagi/integrations/litellm_/imodel.py +6 -8
  29. lionagi/integrations/openai_/OpenAIModel.py +8 -3
  30. lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +14 -8
  31. lionagi/integrations/perplexity_/PerplexityModel.py +8 -3
  32. lionagi/libs/func/async_calls/__init__.py +6 -3
  33. lionagi/libs/func/async_calls/alcall.py +46 -0
  34. lionagi/libs/func/async_calls/bcall.py +49 -1
  35. lionagi/libs/func/async_calls/rcall.py +32 -0
  36. lionagi/libs/utils.py +12 -1
  37. lionagi/operations/brainstorm/brainstorm.py +3 -3
  38. lionagi/operations/plan/plan.py +3 -3
  39. lionagi/protocols/__init__.py +3 -0
  40. lionagi/protocols/configs/__init__.py +0 -15
  41. lionagi/protocols/configs/branch_config.py +1 -1
  42. lionagi/protocols/configs/imodel_config.py +2 -2
  43. lionagi/protocols/configs/log_config.py +1 -1
  44. lionagi/protocols/configs/types.py +15 -0
  45. lionagi/protocols/operatives/__init__.py +3 -15
  46. lionagi/protocols/operatives/action.py +4 -0
  47. lionagi/protocols/operatives/instruct.py +6 -2
  48. lionagi/protocols/operatives/operative.py +9 -21
  49. lionagi/protocols/operatives/prompts.py +4 -0
  50. lionagi/protocols/operatives/reason.py +4 -0
  51. lionagi/protocols/operatives/step.py +11 -23
  52. lionagi/protocols/operatives/types.py +19 -0
  53. lionagi/protocols/registries/__init__.py +3 -0
  54. lionagi/protocols/registries/_component_registry.py +4 -0
  55. lionagi/protocols/registries/_pile_registry.py +4 -0
  56. lionagi/service/__init__.py +3 -0
  57. lionagi/service/service_match_util.py +4 -4
  58. lionagi/settings.py +10 -18
  59. lionagi/strategies/base.py +4 -5
  60. lionagi/strategies/concurrent.py +4 -3
  61. lionagi/strategies/concurrent_chunk.py +3 -3
  62. lionagi/strategies/concurrent_sequential_chunk.py +3 -3
  63. lionagi/strategies/params.py +7 -4
  64. lionagi/version.py +1 -1
  65. {lionagi-0.5.1.dist-info → lionagi-0.5.2.dist-info}/METADATA +4 -2
  66. {lionagi-0.5.1.dist-info → lionagi-0.5.2.dist-info}/RECORD +71 -70
  67. lionagi/core/typing/config.py +0 -15
  68. lionagi/core/typing/id.py +0 -221
  69. /lionagi/core/typing/{pydantic_.py → _pydantic.py} +0 -0
  70. /lionagi/core/typing/{typing_.py → _typing.py} +0 -0
  71. /lionagi/integrations/{services.py → _services.py} +0 -0
  72. {lionagi-0.5.1.dist-info → lionagi-0.5.2.dist-info}/WHEEL +0 -0
  73. {lionagi-0.5.1.dist-info → lionagi-0.5.2.dist-info}/licenses/LICENSE +0 -0
@@ -5,13 +5,6 @@
5
5
  import json
6
6
  import os
7
7
 
8
- import litellm
9
- from dotenv import load_dotenv
10
-
11
- litellm.drop_params = True
12
- load_dotenv()
13
-
14
-
15
8
  RESERVED_PARAMS = [
16
9
  "invoke_action",
17
10
  "instruction",
@@ -21,6 +14,11 @@ RESERVED_PARAMS = [
21
14
 
22
15
  class LiteiModel:
23
16
 
17
+ from lionagi.libs.package.imports import check_import
18
+
19
+ litellm = check_import("litellm")
20
+ litellm.drop_params = True
21
+
24
22
  def __init__(self, **kwargs):
25
23
  if "api_key" in kwargs:
26
24
  try:
@@ -33,7 +31,7 @@ class LiteiModel:
33
31
  except Exception:
34
32
  pass
35
33
  self.kwargs = kwargs
36
- self.acompletion = litellm.acompletion
34
+ self.acompletion = self.litellm.acompletion
37
35
 
38
36
  def to_dict(self) -> dict:
39
37
  dict_ = {
@@ -1,7 +1,6 @@
1
1
  import warnings
2
2
  from pathlib import Path
3
3
 
4
- import yaml
5
4
  from dotenv import load_dotenv
6
5
  from pydantic import (
7
6
  BaseModel,
@@ -35,6 +34,12 @@ price_config_file_name = path / "openai_max_output_token_data.yaml"
35
34
  max_output_token_file_name = path / "openai_price_data.yaml"
36
35
 
37
36
 
37
+ class _ModuleImportClass:
38
+ from lionagi.libs.package.imports import check_import
39
+
40
+ yaml = check_import("yaml", pip_name="pyyaml")
41
+
42
+
38
43
  class OpenAIModel(BaseModel):
39
44
  model: str = Field(description="ID of the model to use.")
40
45
 
@@ -303,7 +308,7 @@ class OpenAIModel(BaseModel):
303
308
  )
304
309
  if estimated_output_len == 0:
305
310
  with open(max_output_token_file_name) as file:
306
- output_token_config = yaml.safe_load(file)
311
+ output_token_config = _ModuleImportClass.yaml.safe_load(file)
307
312
  estimated_output_len = output_token_config.get(self.model, 0)
308
313
  self.estimated_output_len = (
309
314
  estimated_output_len # update to default max output len
@@ -388,7 +393,7 @@ class OpenAIModel(BaseModel):
388
393
 
389
394
  # read openai price info from config file
390
395
  with open(price_config_file_name) as file:
391
- price_config = yaml.safe_load(file)
396
+ price_config = _ModuleImportClass.yaml.safe_load(file)
392
397
 
393
398
  if self.request_model.endpoint == "chat/completions":
394
399
  model_price_info_dict = price_config["model"][self.model]
@@ -4,9 +4,6 @@ import os
4
4
  from io import BytesIO
5
5
  from typing import Literal
6
6
 
7
- import aiohttp
8
- import yaml
9
- from PIL import Image
10
7
  from pydantic import BaseModel, Field, field_validator
11
8
 
12
9
  image_token_config_file_name = os.path.join(
@@ -14,6 +11,15 @@ image_token_config_file_name = os.path.join(
14
11
  )
15
12
 
16
13
 
14
+ class _ModuelImportClass:
15
+
16
+ from lionagi.libs.package.imports import check_import
17
+
18
+ yaml = check_import("yaml", pip_name="pyyaml")
19
+ Image = check_import("PIL.Image", pip_name="Pillow")
20
+ aiohttp = check_import("aiohttp")
21
+
22
+
17
23
  class OpenAIImageTokenCalculator(BaseModel):
18
24
  model: str = Field(description="ID of the model to use.")
19
25
 
@@ -21,7 +27,7 @@ class OpenAIImageTokenCalculator(BaseModel):
21
27
  @classmethod
22
28
  def validate_model_image_function(cls, value: str):
23
29
  with open(image_token_config_file_name) as file:
24
- token_config = yaml.safe_load(file)
30
+ token_config = _ModuelImportClass.yaml.safe_load(file)
25
31
 
26
32
  token_config = token_config.get(value, None)
27
33
 
@@ -36,16 +42,16 @@ class OpenAIImageTokenCalculator(BaseModel):
36
42
  image_base64 = url.split("data:image/jpeg;base64,")[1]
37
43
  image_base64.strip("{}")
38
44
  image_data = base64.b64decode(image_base64)
39
- image = Image.open(BytesIO(image_data))
45
+ image = _ModuelImportClass.Image.open(BytesIO(image_data))
40
46
  else:
41
47
  # image url
42
- async with aiohttp.ClientSession() as client:
48
+ async with _ModuelImportClass.aiohttp.ClientSession() as client:
43
49
  async with client.get(url=url) as response:
44
50
  response.raise_for_status()
45
51
 
46
52
  content = await response.read()
47
53
 
48
- image = Image.open(BytesIO(content))
54
+ image = _ModuelImportClass.Image.open(BytesIO(content))
49
55
 
50
56
  return image.size
51
57
 
@@ -58,7 +64,7 @@ class OpenAIImageTokenCalculator(BaseModel):
58
64
  )
59
65
 
60
66
  with open(image_token_config_file_name) as file:
61
- token_config = yaml.safe_load(file)
67
+ token_config = _ModuelImportClass.yaml.safe_load(file)
62
68
 
63
69
  token_config = token_config.get(self.model, None)
64
70
 
@@ -5,7 +5,6 @@
5
5
  import os
6
6
  from pathlib import Path
7
7
 
8
- import yaml
9
8
  from dotenv import load_dotenv
10
9
  from pydantic import (
11
10
  BaseModel,
@@ -35,6 +34,12 @@ price_config_file_name = path / "perplexity_price_data.yaml"
35
34
  max_output_token_file_name = path / "perplexity_max_output_token_data.yaml"
36
35
 
37
36
 
37
+ class _ModuleImportClass:
38
+ from lionagi.libs.package.imports import check_import
39
+
40
+ yaml = check_import("yaml", pip_name="pyyaml")
41
+
42
+
38
43
  class PerplexityModel(BaseModel):
39
44
  model: str = Field(description="ID of the model to use.")
40
45
 
@@ -235,7 +240,7 @@ class PerplexityModel(BaseModel):
235
240
  )
236
241
  if estimated_output_len == 0:
237
242
  with open(max_output_token_file_name) as file:
238
- output_token_config = yaml.safe_load(file)
243
+ output_token_config = _ModuleImportClass.yaml.safe_load(file)
239
244
  estimated_output_len = output_token_config.get(self.model, 0)
240
245
  self.estimated_output_len = estimated_output_len
241
246
 
@@ -257,7 +262,7 @@ class PerplexityModel(BaseModel):
257
262
  num_of_input_tokens = self.text_token_calculator.calculate(input_text)
258
263
 
259
264
  with open(price_config_file_name) as file:
260
- price_config = yaml.safe_load(file)
265
+ price_config = _ModuleImportClass.yaml.safe_load(file)
261
266
 
262
267
  model_price_info_dict = price_config["model"][self.model]
263
268
  estimated_price = (
@@ -2,11 +2,11 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from .alcall import alcall
6
- from .bcall import bcall
5
+ from .alcall import ALCallParams, alcall
6
+ from .bcall import BCallParams, bcall
7
7
  from .mcall import mcall
8
8
  from .pcall import pcall
9
- from .rcall import rcall
9
+ from .rcall import RCallParams, rcall
10
10
  from .tcall import tcall
11
11
  from .ucall import ucall
12
12
 
@@ -18,4 +18,7 @@ __all__ = [
18
18
  "rcall",
19
19
  "tcall",
20
20
  "ucall",
21
+ "ALCallParams",
22
+ "BCallParams",
23
+ "RCallParams",
21
24
  ]
@@ -1,7 +1,10 @@
1
1
  import asyncio
2
2
  from collections.abc import Callable
3
+ from dataclasses import dataclass
3
4
  from typing import Any, TypeVar
4
5
 
6
+ from lionagi.libs.utils import DataClass
7
+
5
8
  from ...constants import UNDEFINED
6
9
  from ...parse import to_list
7
10
  from .ucall import ucall
@@ -9,6 +12,49 @@ from .ucall import ucall
9
12
  T = TypeVar("T")
10
13
 
11
14
 
15
+ @dataclass
16
+ class ALCallParams(DataClass):
17
+ function: Callable[..., T]
18
+ num_retries: int = 0
19
+ initial_delay: float = 0
20
+ retry_delay: float = 0
21
+ backoff_factor: float = 1
22
+ retry_default: Any = UNDEFINED
23
+ retry_timeout: float | None = None
24
+ retry_timing: bool = False
25
+ verbose: bool = True
26
+ error_msg: str | None = None
27
+ error_map: dict[type, Callable[[Exception], None]] | None = None
28
+ max_concurrent: int | None = None
29
+ throttle_period: float | None = None
30
+ flatten: bool = False
31
+ dropna: bool = False
32
+ unique: bool = False
33
+
34
+ async def __call__(self, input_, *args, **kwargs):
35
+ return await alcall(
36
+ input_,
37
+ self.function,
38
+ *args,
39
+ num_retries=self.num_retries,
40
+ initial_delay=self.initial_delay,
41
+ retry_delay=self.retry_delay,
42
+ backoff_factor=self.backoff_factor,
43
+ retry_default=self.retry_default,
44
+ retry_timeout=self.retry_timeout,
45
+ retry_timing=self.retry_timing,
46
+ verbose_retry=self.verbose,
47
+ error_msg=self.error_msg,
48
+ error_map=self.error_map,
49
+ max_concurrent=self.max_concurrent,
50
+ throttle_period=self.throttle_period,
51
+ flatten=self.flatten,
52
+ dropna=self.dropna,
53
+ unique=self.unique,
54
+ **kwargs,
55
+ )
56
+
57
+
12
58
  async def alcall(
13
59
  input_: list[Any],
14
60
  func: Callable[..., T],
@@ -1,13 +1,61 @@
1
- import asyncio
2
1
  from collections.abc import AsyncGenerator, Callable
2
+ from dataclasses import dataclass
3
3
  from typing import Any, TypeVar
4
4
 
5
+ from lionagi.libs.constants import UNDEFINED
6
+ from lionagi.libs.utils import DataClass
7
+
5
8
  from ...parse import to_list
6
9
  from .alcall import alcall
7
10
 
8
11
  T = TypeVar("T")
9
12
 
10
13
 
14
+ @dataclass
15
+ class BCallParams(DataClass):
16
+ function: Callable[..., T]
17
+ batch_size: int
18
+ num_retries: int = 0
19
+ initial_delay: float = 0
20
+ retry_delay: float = 0
21
+ backoff_factor: float = 1
22
+ retry_default: Any = UNDEFINED
23
+ retry_timeout: float | None = None
24
+ retry_timing: bool = False
25
+ verbose_retry: bool = True
26
+ error_msg: str | None = None
27
+ error_map: dict[type, Callable[[Exception], None]] | None = None
28
+ max_concurrent: int | None = None
29
+ throttle_period: float | None = None
30
+ flatten: bool = False
31
+ dropna: bool = False
32
+ unique: bool = False
33
+
34
+ async def __call__(self, input_, *args, **kwargs):
35
+ return await bcall(
36
+ input_,
37
+ self.function,
38
+ *args,
39
+ batch_size=self.batch_size,
40
+ num_retries=self.num_retries,
41
+ initial_delay=self.initial_delay,
42
+ retry_delay=self.retry_delay,
43
+ backoff_factor=self.backoff_factor,
44
+ default=self.retry_default,
45
+ timeout=self.retry_timeout,
46
+ timing=self.retry_timing,
47
+ verbose=self.verbose_retry,
48
+ error_msg=self.error_msg,
49
+ error_map=self.error_map,
50
+ max_concurrent=self.max_concurrent,
51
+ throttle_period=self.throttle_period,
52
+ flatten=self.flatten,
53
+ dropna=self.dropna,
54
+ unique=self.unique,
55
+ **kwargs,
56
+ )
57
+
58
+
11
59
  async def bcall(
12
60
  input_: Any,
13
61
  func: Callable[..., T],
@@ -1,7 +1,10 @@
1
1
  import asyncio
2
2
  from collections.abc import Callable
3
+ from dataclasses import dataclass
3
4
  from typing import Any, TypeVar
4
5
 
6
+ from lionagi.libs.utils import DataClass
7
+
5
8
  from ...constants import UNDEFINED
6
9
  from ...utils import time as _t
7
10
  from .ucall import ucall
@@ -9,6 +12,35 @@ from .ucall import ucall
9
12
  T = TypeVar("T")
10
13
 
11
14
 
15
+ @dataclass
16
+ class RCallParams(DataClass):
17
+ function: Callable[..., T]
18
+ num_retries: int = 0
19
+ initial_delay: float = 0
20
+ retry_delay: float = 0
21
+ backoff_factor: float = 1
22
+ retry_default: Any = UNDEFINED
23
+ retry_timeout: float | None = None
24
+ retry_timing: bool = False
25
+ verbose_retry: bool = True
26
+
27
+ async def __call__(self, input_, *args, **kwargs):
28
+ return await rcall(
29
+ input_,
30
+ self.function,
31
+ *args,
32
+ num_retries=self.num_retries,
33
+ initial_delay=self.initial_delay,
34
+ retry_delay=self.retry_delay,
35
+ backoff_factor=self.backoff_factor,
36
+ retry_default=self.retry_default,
37
+ retry_timeout=self.retry_timeout,
38
+ retry_timing=self.retry_timing,
39
+ verbose_retry=self.verbose_retry,
40
+ **kwargs,
41
+ )
42
+
43
+
12
44
  async def rcall(
13
45
  func: Callable[..., T],
14
46
  /,
lionagi/libs/utils.py CHANGED
@@ -8,13 +8,24 @@ import random
8
8
  import subprocess
9
9
  import sys
10
10
  from collections.abc import Mapping, Sequence
11
+ from dataclasses import asdict
11
12
  from datetime import datetime, timezone
12
13
  from hashlib import sha256
13
- from typing import Literal, TypeVar
14
+ from typing import Literal, Self, TypeVar
14
15
 
15
16
  T = TypeVar("T")
16
17
 
17
18
 
19
+ class DataClass:
20
+
21
+ def to_dict(self) -> dict:
22
+ return asdict(self)
23
+
24
+ @classmethod
25
+ def from_dict(cls, data: dict) -> Self:
26
+ return cls(**data)
27
+
28
+
18
29
  def unique_hash(n: int = 32) -> str:
19
30
  """unique random hash"""
20
31
  current_time = datetime.now().isoformat().encode("utf-8")
@@ -9,7 +9,7 @@ from lionagi.core.typing import ID, Any, BaseModel
9
9
  from lionagi.libs.func import alcall
10
10
  from lionagi.libs.parse import to_flat_list
11
11
  from lionagi.protocols.operatives.instruct import (
12
- INSTRUCT_MODEL_FIELD,
12
+ INSTRUCT_FIELD_MODEL,
13
13
  Instruct,
14
14
  InstructResponse,
15
15
  )
@@ -118,8 +118,8 @@ async def brainstorm(
118
118
  print(f"Starting brainstorming...")
119
119
 
120
120
  field_models: list = kwargs.get("field_models", [])
121
- if INSTRUCT_MODEL_FIELD not in field_models:
122
- field_models.append(INSTRUCT_MODEL_FIELD)
121
+ if INSTRUCT_FIELD_MODEL not in field_models:
122
+ field_models.append(INSTRUCT_FIELD_MODEL)
123
123
 
124
124
  kwargs["field_models"] = field_models
125
125
  session, branch = prepare_session(session, branch, branch_kwargs)
@@ -7,7 +7,7 @@ from lionagi.core.session.branch import Branch
7
7
  from lionagi.core.session.session import Session
8
8
  from lionagi.core.typing import ID, Any, BaseModel, Literal
9
9
  from lionagi.protocols.operatives.instruct import (
10
- INSTRUCT_MODEL_FIELD,
10
+ INSTRUCT_FIELD_MODEL,
11
11
  Instruct,
12
12
  InstructResponse,
13
13
  )
@@ -95,8 +95,8 @@ async def plan(
95
95
  print(f"Planning execution with {num_steps} steps...")
96
96
 
97
97
  field_models: list = kwargs.get("field_models", [])
98
- if INSTRUCT_MODEL_FIELD not in field_models:
99
- field_models.append(INSTRUCT_MODEL_FIELD)
98
+ if INSTRUCT_FIELD_MODEL not in field_models:
99
+ field_models.append(INSTRUCT_FIELD_MODEL)
100
100
  kwargs["field_models"] = field_models
101
101
  session, branch = prepare_session(session, branch, branch_kwargs)
102
102
  execute_branch: Branch = session.split(branch)
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -1,15 +0,0 @@
1
- from .branch_config import BranchConfig, MessageConfig
2
- from .id_config import LionIDConfig
3
- from .imodel_config import iModelConfig
4
- from .log_config import LogConfig
5
- from .retry_config import RetryConfig, TimedFuncCallConfig
6
-
7
- __all__ = [
8
- "LogConfig",
9
- "LionIDConfig",
10
- "RetryConfig",
11
- "TimedFuncCallConfig",
12
- "iModelConfig",
13
- "BranchConfig",
14
- "MessageConfig",
15
- ]
@@ -5,7 +5,7 @@
5
5
  from typing import Any, Literal
6
6
 
7
7
  from lionagi.core.models.schema_model import SchemaModel
8
- from lionagi.core.typing.pydantic_ import Field
8
+ from lionagi.core.typing._pydantic import Field
9
9
 
10
10
  from .imodel_config import iModelConfig
11
11
  from .log_config import LogConfig
@@ -6,8 +6,8 @@ import json
6
6
  import os
7
7
 
8
8
  from lionagi.core.models.schema_model import SchemaModel
9
- from lionagi.core.typing.pydantic_ import Field
10
- from lionagi.core.typing.typing_ import Any
9
+ from lionagi.core.typing._pydantic import Field
10
+ from lionagi.core.typing._typing import Any
11
11
 
12
12
 
13
13
  class iModelConfig(SchemaModel):
@@ -8,7 +8,7 @@ from typing import Any
8
8
  from pydantic import field_validator
9
9
 
10
10
  from lionagi.core.models.schema_model import SchemaModel
11
- from lionagi.core.typing.pydantic_ import Field
11
+ from lionagi.core.typing._pydantic import Field
12
12
 
13
13
 
14
14
  class LogConfig(SchemaModel):
@@ -0,0 +1,15 @@
1
+ from .branch_config import BranchConfig, MessageConfig
2
+ from .id_config import LionIDConfig
3
+ from .imodel_config import iModelConfig
4
+ from .log_config import LogConfig
5
+ from .retry_config import RetryConfig, TimedFuncCallConfig
6
+
7
+ __all__ = [
8
+ "LogConfig",
9
+ "LionIDConfig",
10
+ "RetryConfig",
11
+ "TimedFuncCallConfig",
12
+ "iModelConfig",
13
+ "BranchConfig",
14
+ "MessageConfig",
15
+ ]
@@ -1,15 +1,3 @@
1
- from .action import ActionRequestModel, ActionResponseModel
2
- from .instruct import Instruct
3
- from .operative import Operative
4
- from .reason import ReasonModel
5
- from .step import Step, StepModel
6
-
7
- __all__: list[str] = [
8
- "Operative",
9
- "Step",
10
- "ActionRequestModel",
11
- "ActionResponseModel",
12
- "StepModel",
13
- "Instruct",
14
- "ReasonModel",
15
- ]
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -1,3 +1,7 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
1
5
  import re
2
6
 
3
7
  from lionagi.core.typing import (
@@ -1,3 +1,7 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
1
5
  """Field definitions and validation for InstructModel components."""
2
6
 
3
7
  from typing import Any, ClassVar
@@ -169,7 +173,7 @@ class OperationInstruct(Instruct):
169
173
  return ACTIONS_FIELD.validator(cls, v)
170
174
 
171
175
 
172
- INSTRUCT_MODEL_FIELD = FieldModel(
176
+ INSTRUCT_FIELD_MODEL = FieldModel(
173
177
  name="instruct_models",
174
178
  annotation=list[Instruct],
175
179
  default_factory=list,
@@ -191,6 +195,6 @@ __all__ = [
191
195
  "REASON_FIELD",
192
196
  "ACTIONS_FIELD",
193
197
  "Instruct",
194
- "INSTRUCT_MODEL_FIELD",
198
+ "INSTRUCT_FIELD_MODEL",
195
199
  "InstructResponse",
196
200
  ]
@@ -1,25 +1,13 @@
1
- """
2
- Copyright 2024 HaiyangLi
3
-
4
- Licensed under the Apache License, Version 2.0 (the "License");
5
- you may not use this file except in compliance with the License.
6
- You may obtain a copy of the License at
7
-
8
- http://www.apache.org/licenses/LICENSE-2.0
9
-
10
- Unless required by applicable law or agreed to in writing, software
11
- distributed under the License is distributed on an "AS IS" BASIS,
12
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- See the License for the specific language governing permissions and
14
- limitations under the License.
15
- """
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
16
4
 
17
5
  from typing import Self
18
6
 
19
7
  from pydantic import BaseModel, Field, PrivateAttr, model_validator
20
8
  from pydantic.fields import FieldInfo
21
9
 
22
- from lionagi.core.models import FieldModel, NewModelParams, OperableModel
10
+ from lionagi.core.models import FieldModel, ModelParams, OperableModel
23
11
  from lionagi.core.typing import UNDEFINED
24
12
  from lionagi.libs.parse import to_json, validate_keys
25
13
 
@@ -29,10 +17,10 @@ class Operative(OperableModel):
29
17
 
30
18
  name: str | None = None
31
19
 
32
- request_params: NewModelParams | None = Field(default=None)
20
+ request_params: ModelParams | None = Field(default=None)
33
21
  request_type: type[BaseModel] | None = Field(default=None)
34
22
 
35
- response_params: NewModelParams | None = Field(default=None)
23
+ response_params: ModelParams | None = Field(default=None)
36
24
  response_type: type[BaseModel] | None = Field(default=None)
37
25
  response_model: OperableModel | None = Field(default=None)
38
26
  response_str_dict: dict | str | None = Field(default=None)
@@ -140,7 +128,7 @@ class Operative(OperableModel):
140
128
 
141
129
  def create_response_type(
142
130
  self,
143
- response_params: NewModelParams | None = None,
131
+ response_params: ModelParams | None = None,
144
132
  field_models: list[FieldModel] | None = None,
145
133
  parameter_fields: dict[str, FieldInfo] | None = None,
146
134
  exclude_fields: list[str] | None = None,
@@ -154,7 +142,7 @@ class Operative(OperableModel):
154
142
  """Creates a new response type based on the provided parameters.
155
143
 
156
144
  Args:
157
- response_params (NewModelParams, optional): Parameters for the new response model.
145
+ response_params (ModelParams, optional): Parameters for the new response model.
158
146
  field_models (list[FieldModel], optional): List of field models.
159
147
  parameter_fields (dict[str, FieldInfo], optional): Dictionary of parameter fields.
160
148
  exclude_fields (list, optional): List of fields to exclude.
@@ -165,7 +153,7 @@ class Operative(OperableModel):
165
153
  frozen (bool, optional): Whether the model is frozen.
166
154
  validators (dict, optional): Dictionary of validators.
167
155
  """
168
- self.response_params = response_params or NewModelParams(
156
+ self.response_params = response_params or ModelParams(
169
157
  parameter_fields=parameter_fields,
170
158
  field_models=field_models,
171
159
  exclude_fields=exclude_fields,
@@ -1,3 +1,7 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
1
5
  from pydantic import JsonValue
2
6
 
3
7
  function_field_description = (
@@ -1,3 +1,7 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
1
5
  from pydantic import BaseModel, field_validator
2
6
 
3
7
  from lionagi.core.models import FieldModel