lionagi 0.8.2__py3-none-any.whl → 0.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lionagi/__init__.py CHANGED
@@ -2,6 +2,8 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ import logging
6
+
5
7
  from pydantic import BaseModel, Field
6
8
 
7
9
  from . import _types as types
@@ -13,6 +15,9 @@ from .version import __version__
13
15
 
14
16
  LiteiModel = iModel
15
17
 
18
+ logger = logging.getLogger(__name__)
19
+ logger.setLevel(logging.INFO)
20
+
16
21
  __all__ = (
17
22
  "Session",
18
23
  "Branch",
@@ -24,4 +29,5 @@ __all__ = (
24
29
  "__version__",
25
30
  "BaseModel",
26
31
  "Field",
32
+ "logger",
27
33
  )
@@ -11,6 +11,8 @@ from lionagi.operatives.types import Instruct
11
11
  from lionagi.service.imodel import iModel
12
12
  from lionagi.utils import copy
13
13
 
14
+ from .utils import ReActAnalysis
15
+
14
16
  if TYPE_CHECKING:
15
17
  from lionagi.session.branch import Branch
16
18
 
@@ -19,14 +21,19 @@ async def ReAct(
19
21
  branch: "Branch",
20
22
  instruct: Instruct | dict[str, Any],
21
23
  interpret: bool = False,
24
+ interpret_domain: str | None = None,
25
+ interpret_style: str | None = None,
26
+ interpret_sample: str | None = None,
27
+ interpret_kwargs: dict | None = None,
22
28
  tools: Any = None,
23
29
  tool_schemas: Any = None,
24
30
  response_format: type[BaseModel] | BaseModel = None,
25
- extension_allowed: bool = False,
26
- max_extensions: int | None = None,
31
+ extension_allowed: bool = True,
32
+ max_extensions: int | None = 3,
27
33
  response_kwargs: dict | None = None,
28
34
  return_analysis: bool = False,
29
35
  analysis_model: iModel | None = None,
36
+ verbose_analysis: bool = False,
30
37
  **kwargs,
31
38
  ):
32
39
  # If no tools or tool schemas are provided, default to "all tools"
@@ -41,8 +48,14 @@ async def ReAct(
41
48
  instruct.to_dict()
42
49
  if isinstance(instruct, Instruct)
43
50
  else instruct
44
- )
51
+ ),
52
+ domain=interpret_domain,
53
+ style=interpret_style,
54
+ sample_writing=interpret_sample,
55
+ **(interpret_kwargs or {}),
45
56
  )
57
+ if verbose_analysis:
58
+ print(f"Interpreted instruction: {instruction_str}")
46
59
 
47
60
  # Convert Instruct to dict if necessary
48
61
  instruct_dict = (
@@ -50,21 +63,22 @@ async def ReAct(
50
63
  if isinstance(instruct, Instruct)
51
64
  else dict(instruct)
52
65
  )
53
- # Overwrite the "instruction" field with the interpreted string (if any)
54
- instruct_dict["instruction"] = instruction_str or instruct_dict.get(
55
- "instruction"
56
- )
66
+
67
+ # Overwrite "instruction" with the interpreted prompt (if any) plus a note about expansions
68
+ max_ext_info = f"\nIf needed, you can do up to {max_extensions or 0 if extension_allowed else 0} expansions."
69
+ instruct_dict["instruction"] = (
70
+ instruction_str
71
+ or (instruct_dict.get("instruction") or "") # in case it's missing
72
+ ) + max_ext_info
57
73
 
58
74
  # Prepare a copy of user-provided kwargs for the first operate call
59
75
  kwargs_for_operate = copy(kwargs)
60
76
  kwargs_for_operate["actions"] = True
61
77
  kwargs_for_operate["reason"] = True
62
78
 
63
- # We'll pass the refined instruct_dict plus the user's other kwargs
64
- from .utils import ReActAnalysis
65
-
66
79
  # Step 1: Generate initial ReAct analysis
67
80
  analysis: ReActAnalysis = await branch.operate(
81
+ instruct=instruct_dict,
68
82
  response_format=ReActAnalysis,
69
83
  tools=tools,
70
84
  tool_schemas=tool_schemas,
@@ -73,17 +87,27 @@ async def ReAct(
73
87
  )
74
88
  analyses = [analysis]
75
89
 
90
+ # If verbose, show round #1 analysis
91
+ if verbose_analysis:
92
+ print(
93
+ f"ReAct Round #1 Analysis:\n {analysis.model_dump_json(indent=2)}",
94
+ )
95
+
76
96
  # Validate and clamp max_extensions if needed
77
- if max_extensions and max_extensions > 5:
78
- logging.warning("max_extensions should not exceed 5; defaulting to 5.")
79
- max_extensions = 5
97
+ if max_extensions and max_extensions > 100:
98
+ logging.warning(
99
+ "max_extensions should not exceed 100; defaulting to 100."
100
+ )
101
+ max_extensions = 100
80
102
 
81
103
  # Step 2: Possibly loop through expansions if extension_needed
82
104
  extensions = max_extensions
105
+ round_count = 1
106
+
83
107
  while (
84
108
  extension_allowed
85
109
  and analysis.extension_needed
86
- and (extensions if extensions else 1) > 0
110
+ and (extensions if max_extensions else 0) > 0
87
111
  ):
88
112
  new_instruction = None
89
113
  if extensions == max_extensions:
@@ -95,20 +119,28 @@ async def ReAct(
95
119
  extensions=extensions
96
120
  )
97
121
 
98
- # Each expansion uses a fresh copy of instruct_dict + forcibly "reason" + "actions"
99
- expanded_kwargs = copy(instruct_dict)
100
- expanded_kwargs["instruction"] = new_instruction
101
- expanded_kwargs["reason"] = True
102
- expanded_kwargs["actions"] = True
122
+ operate_kwargs = copy(kwargs)
123
+ operate_kwargs["actions"] = True
124
+ operate_kwargs["reason"] = True
125
+ operate_kwargs["response_format"] = ReActAnalysis
126
+ operate_kwargs["action_strategy"] = analysis.action_strategy
127
+ if analysis.action_batch_size:
128
+ operate_kwargs["action_batch_size"] = analysis.action_batch_size
103
129
 
104
130
  analysis = await branch.operate(
105
- response_format=ReActAnalysis,
131
+ instruction=new_instruction,
106
132
  tools=tools,
107
133
  tool_schemas=tool_schemas,
108
- **expanded_kwargs,
134
+ **operate_kwargs,
109
135
  )
110
136
  analyses.append(analysis)
137
+ round_count += 1
111
138
 
139
+ # If verbose, show round analysis
140
+ if verbose_analysis:
141
+ print(
142
+ f"ReAct Round #{round_count} Analysis:\n {analysis.model_dump_json(indent=2)}",
143
+ )
112
144
  if extensions:
113
145
  extensions -= 1
114
146
 
@@ -2,27 +2,91 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from typing import ClassVar
5
+ from typing import ClassVar, Literal
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
9
9
 
10
+ class PlannedAction(BaseModel):
11
+ """
12
+ Short descriptor for an upcoming action/tool invocation the LLM wants to perform.
13
+ The model can hold multiple actions in a single round if needed.
14
+ """
15
+
16
+ action_type: str = Field(
17
+ ...,
18
+ description="The name or type of tool/action to invoke (e.g., 'search_exa', 'reader_tool').",
19
+ )
20
+ description: str = Field(
21
+ ...,
22
+ description="A short explanation of why or what is intended to achieve with this action.",
23
+ )
24
+
25
+
10
26
  class ReActAnalysis(BaseModel):
27
+ """
28
+ Captures the ReAct chain-of-thought output each round:
29
+ 1) The LLM's 'analysis' (reasoning),
30
+ 2) A list of planned actions to perform before finalizing,
31
+ 3) Indication whether more expansions/rounds are needed,
32
+ 4) Additional tuning knobs: how to handle validation, how to execute actions, etc.
33
+ """
11
34
 
35
+ # Standard ReAct strings for controlling expansions:
12
36
  FIRST_EXT_PROMPT: ClassVar[str] = (
13
- "You are provided with another round to perform reason action to provide an accurate final answer. you have max another {extensions} rounds, set extension_needed to False if you are done and ready to provide final answer."
37
+ "You can perform multiple reason-action steps for accuracy. "
38
+ "If you are not ready to finalize, set extension_needed to True. "
39
+ "You have up to {extensions} expansions. Please continue."
14
40
  )
15
-
16
41
  CONTINUE_EXT_PROMPT: ClassVar[str] = (
17
- "You are provided with another round, you have max another {extensions} rounds"
42
+ "Another round is available. You may do multiple actions if needed. "
43
+ "You have up to {extensions} expansions. Please continue."
18
44
  )
19
-
20
45
  ANSWER_PROMPT: ClassVar[str] = (
21
- "given above reason and actions, please provide final answer to the original user request {instruction}"
46
+ "Given your reasoning and actions, please now provide the final answer "
47
+ "to the user's request:\n\n{instruction}"
48
+ )
49
+
50
+ analysis: str = Field(
51
+ ...,
52
+ description="Free-form reasoning or chain-of-thought summary. Must be consistent with the plan.",
53
+ )
54
+
55
+ planned_actions: list[PlannedAction] = Field(
56
+ default_factory=list,
57
+ description=(
58
+ "One or more short descriptors of the tool calls or operations "
59
+ "the LLM wants to perform this round. For example, read the doc, "
60
+ "then run a search."
61
+ ),
22
62
  )
23
63
 
24
- analysis: str
25
64
  extension_needed: bool = Field(
26
65
  False,
27
- description="Set to True if more steps are needed to provide an accurate answer. If True, additional rounds are allowed.",
66
+ description="Set True if more expansions are needed. If False, final answer is next.",
67
+ )
68
+
69
+ milestone: str | None = Field(
70
+ None,
71
+ description=(
72
+ "A sub-goal or mini-checkpoint to reach before finalizing. "
73
+ "E.g. 'Validate results from search_exa, then summarize outcomes.'"
74
+ ),
75
+ )
76
+
77
+ action_strategy: Literal["sequential", "concurrent", "batch"] = Field(
78
+ "concurrent",
79
+ description=(
80
+ "Specifies how to invoke the planned actions:\n"
81
+ "'sequential' => Each action is run in order, \n"
82
+ "'concurrent' => All actions run in parallel, \n"
83
+ "'batch' => Divide actions into async batches of N (if reasonable)."
84
+ ),
85
+ )
86
+
87
+ action_batch_size: int | None = Field(
88
+ None,
89
+ description=(
90
+ "provide if and only if action_strategy is 'batch', this specifies the number of actions to run in parallel per batch."
91
+ ),
28
92
  )
@@ -18,6 +18,7 @@ async def _act(
18
18
  branch: "Branch",
19
19
  action_request: BaseModel | dict,
20
20
  suppress_errors: bool = False,
21
+ verbose_action: bool = False,
21
22
  ) -> "ActionResponseModel":
22
23
 
23
24
  _request = {}
@@ -35,6 +36,11 @@ async def _act(
35
36
 
36
37
  try:
37
38
  func_call = await branch._action_manager.invoke(_request)
39
+ if verbose_action:
40
+ print(
41
+ f"Action {_request['function']} invoked, status: {func_call.status}."
42
+ )
43
+
38
44
  except Exception as e:
39
45
  content = {
40
46
  "error": str(e),
@@ -43,6 +49,8 @@ async def _act(
43
49
  "branch": str(branch.id),
44
50
  }
45
51
  branch._log_manager.log(Log(content=content))
52
+ if verbose_action:
53
+ print(f"Action {_request['function']} failed, error: {str(e)}.")
46
54
  if suppress_errors:
47
55
  logging.error(
48
56
  f"Error invoking action '{_request['function']}': {e}"
@@ -16,15 +16,46 @@ async def interpret(
16
16
  sample_writing: str | None = None,
17
17
  **kwargs,
18
18
  ) -> str:
19
- instruction = (
20
- "Rewrite the following user input into a clear, structured prompt or "
21
- "query for an LLM, ensuring any implicit details are made explicit. "
22
- "Return only the improved user prompt."
23
- )
19
+ instruction = """
20
+ You are given a user's raw instruction or question. Your task is to rewrite it into a clearer, more structured prompt for an LLM or system, making any implicit or missing details explicit.
21
+
22
+ Follow these guidelines:
23
+
24
+ 1. **Dissect the user's request**:
25
+ - If the user references a local file, note it clearly (e.g., "paper_file_path": "…").
26
+ - If the user might need external references or up-to-date data, mention that possibility.
27
+ - If the user's question is ambiguous, propose clarifications.
28
+
29
+ 2. **Be explicit about the user's final objective**:
30
+ - For example, if the user wants a comparison with other works, add that as a bullet point or sub-question.
31
+ - If the user wants a summary plus code snippet, highlight that in your structured prompt.
32
+
33
+ 3. **Do NOT produce final system actions**:
34
+ - You're not calling any tools directly here; only rewriting the user query to reflect potential next steps.
35
+ - If the user's request might require searching or doc reading, note it as an *option*, e.g. "Potential tool usage: {search, partial doc read}."
36
+
37
+ 4. **Return only the improved user prompt**:
38
+ - The final output should be a single text block or short JSON specifying the clarified user request.
39
+ - Keep it concise yet thorough.
40
+
41
+ For instance, if the user's original text is:
42
+ "Please read my local PDF on RL and compare it to the newest research methods from exa or perplexity."
43
+
44
+ A re-written version might be:
45
+ "**Task**:
46
+ - Summarize the local PDF (paper_file_path: 'myRLpaper.pdf').
47
+ - Compare its approach with recent reinforcement learning research found via exa/perplexity searches.
48
+ **Potential Tool Usage**:
49
+ - Doc reading (reader_tool)
50
+ - External search (search_exa, search_perplexity)
51
+ **Output**:
52
+ - A structured summary + comparative analysis."
53
+
54
+ Now, apply this rewriting to the input below. Return only the re-written prompt.
55
+ """
24
56
  guidance = (
25
57
  f"Domain hint: {domain or 'general'}. "
26
58
  f"Desired style: {style or 'concise'}. "
27
- "You can add or clarify context if needed."
28
59
  )
29
60
  if sample_writing:
30
61
  guidance += f" Sample writing: {sample_writing}"
@@ -32,11 +63,11 @@ async def interpret(
32
63
  context = [f"User input: {text}"]
33
64
 
34
65
  # Default temperature if none provided
35
- kwargs["temperature"] = kwargs.get("temperature", 0.1)
36
66
  kwargs["guidance"] = guidance + "\n" + kwargs.get("guidance", "")
67
+ kwargs["instruction"] = instruction + "\n" + kwargs.get("instruction", "")
68
+ kwargs["temperature"] = kwargs.get("temperature", 0.1)
37
69
 
38
70
  refined_prompt = await branch.chat(
39
- instruction=instruction,
40
71
  context=context,
41
72
  **kwargs,
42
73
  )
@@ -50,6 +50,8 @@ async def operate(
50
50
  action_strategy: Literal[
51
51
  "sequential", "concurrent", "batch"
52
52
  ] = "concurrent",
53
+ action_batch_size: int = None,
54
+ verbose_action: bool = False,
53
55
  field_models: list[FieldModel] = None,
54
56
  exclude_fields: list | dict | None = None,
55
57
  request_params: ModelParams = None,
@@ -189,9 +191,13 @@ async def operate(
189
191
  if instruct.action_strategy
190
192
  else action_kwargs.get("strategy", "concurrent")
191
193
  )
194
+ if action_batch_size:
195
+ action_kwargs["batch_size"] = action_batch_size
192
196
 
193
197
  action_response_models = await branch.act(
194
- response_model.action_requests, **action_kwargs
198
+ response_model.action_requests,
199
+ verbose_action=verbose_action,
200
+ **action_kwargs,
195
201
  )
196
202
  # Possibly refine the operative with the tool outputs
197
203
  operative = Step.respond_operative(
@@ -5,7 +5,7 @@
5
5
  import asyncio
6
6
  from typing import Any
7
7
 
8
- from pydantic import Field, model_validator
8
+ from pydantic import BaseModel, Field, field_validator, model_validator
9
9
  from typing_extensions import Self
10
10
 
11
11
  from lionagi.protocols.generic.event import Event, EventStatus
@@ -27,12 +27,22 @@ class FunctionCalling(Event):
27
27
  exclude=True,
28
28
  )
29
29
 
30
- arguments: dict[str, Any] = Field(
30
+ arguments: dict[str, Any] | BaseModel = Field(
31
31
  ..., description="Dictionary of arguments to pass to the function"
32
32
  )
33
33
 
34
+ @field_validator("arguments", mode="before")
35
+ def _validate_argument(cls, value):
36
+ if isinstance(value, BaseModel):
37
+ return value.model_dump(exclude_unset=True)
38
+ return value
39
+
34
40
  @model_validator(mode="after")
35
41
  def _validate_strict_tool(self) -> Self:
42
+ if self.func_tool.request_options:
43
+ args: BaseModel = self.func_tool.request_options(**self.arguments)
44
+ self.arguments = args.model_dump(exclude_unset=True)
45
+
36
46
  if self.func_tool.strict_func_call is True:
37
47
  if (
38
48
  not set(self.arguments.keys())
@@ -49,6 +49,11 @@ class Tool(Element):
49
49
  description="Schema describing the function's parameters and structure",
50
50
  )
51
51
 
52
+ request_options: type | None = Field(
53
+ default=None,
54
+ description="Optional Pydantic model for validating the function's input",
55
+ )
56
+
52
57
  preprocessor: Callable[[Any], Any] | None = Field(
53
58
  default=None,
54
59
  description="Optional function for preprocessing inputs before execution",
@@ -88,6 +93,11 @@ class Tool(Element):
88
93
  def _validate_tool_schema(self) -> Self:
89
94
  if self.tool_schema is None:
90
95
  self.tool_schema = function_to_schema(self.func_callable)
96
+ if self.request_options is not None:
97
+ schema_ = self.request_options.model_json_schema()
98
+ schema_.pop("title", None)
99
+ self.tool_schema["function"]["parameters"] = schema_
100
+
91
101
  return self
92
102
 
93
103
  @property
@@ -1,5 +1,4 @@
1
1
  # forms/flow.py
2
- from typing import List
3
2
 
4
3
  from pydantic import BaseModel, ConfigDict, Field
5
4
 
@@ -1,6 +1,6 @@
1
1
  # forms/form.py
2
2
 
3
- from typing import Any, Optional
3
+ from typing import Any
4
4
 
5
5
  from pydantic import ConfigDict, Field, model_validator
6
6
  from typing_extensions import Self
@@ -81,68 +81,121 @@ def format_text_item(item: Any) -> str:
81
81
 
82
82
  def format_text_content(content: dict) -> str:
83
83
  """
84
- Convert a dictionary with keys like 'guidance', 'instruction', 'context', etc.
85
- into a readable text block.
84
+ Convert a content dictionary into a minimal textual summary for LLM consumption.
86
85
 
87
- Args:
88
- content (dict): The content dictionary.
89
-
90
- Returns:
91
- str: A textual summary.
86
+ Emphasizes brevity and clarity:
87
+ - Skips empty or None fields.
88
+ - Bullet-points for lists.
89
+ - Key-value pairs for dicts.
90
+ - Minimal headings for known fields (guidance, instruction, etc.).
92
91
  """
93
- if "plain_content" in content and isinstance(
94
- content["plain_content"], str
95
- ):
96
- return content["plain_content"]
97
92
 
98
- msg = "\n---\n # Task\n"
93
+ if isinstance(content.get("plain_content"), str):
94
+ return content["plain_content"]
99
95
 
100
- for k in [
96
+ lines = []
97
+ # We only want minimal headings for certain known fields:
98
+ known_field_order = [
101
99
  "guidance",
102
100
  "instruction",
103
101
  "context",
104
102
  "tool_schemas",
105
103
  "respond_schema_info",
106
104
  "request_response_format",
107
- ]:
108
- if k in content:
109
- v = content[k]
110
-
111
- if k == "tool_schemas":
112
- if "tools" in v:
113
- v = v["tools"]
114
-
115
- if isinstance(v, list):
116
- z = []
117
- for idx, z_ in enumerate(v):
118
- if isinstance(z_, dict) and "function" in z_:
119
- z.append({f"Tool {idx+1}": z_["function"]})
120
- v = z
121
-
122
- if k == "request_response_format":
123
- k = "response format"
124
-
125
- if v not in [None, [], {}, UNDEFINED]:
126
- if isinstance(v, list):
127
- msg += f"## - **{k}**\n"
128
- for i in v:
129
- if (
130
- len(format_text_item(v).replace("\n", "").strip())
131
- > 0
132
- ):
133
- msg += format_text_item(i).strip()
134
- msg += "\n"
135
- else:
136
- if len(format_text_item(v).replace("\n", "").strip()) > 0:
137
- msg += (
138
- f"## - **{k}**\n{format_text_item(v).strip()}\n\n"
139
- )
140
-
141
- if not msg.endswith("\n\n"):
142
- msg += "\n\n---\n"
105
+ ]
106
+
107
+ # Render known fields in that order
108
+ for field in known_field_order:
109
+ if field in content:
110
+ val = content[field]
111
+ if _is_not_empty(val):
112
+ if field == "request_response_format":
113
+ field = "response format"
114
+ elif field == "respond_schema_info":
115
+ field = "response schema info"
116
+ lines.append(f"\n## {field.upper()}:\n")
117
+ rendered = _render_value(val)
118
+ # Indent or bullet the rendered result if multiline
119
+ # We'll keep it minimal: each line is prefixed with " ".
120
+ lines.extend(
121
+ f" {line}"
122
+ for line in rendered.split("\n")
123
+ if line.strip()
124
+ )
125
+
126
+ # Join all lines into a single string
127
+ return "\n".join(lines).strip()
128
+
129
+
130
+ def _render_value(val) -> str:
131
+ """
132
+ Render an arbitrary value (scalar, list, dict) in minimal form:
133
+ - Lists become bullet points.
134
+ - Dicts become key-value lines.
135
+ - Strings returned directly.
136
+ """
137
+ if isinstance(val, dict):
138
+ return _render_dict(val)
139
+ elif isinstance(val, list):
140
+ return _render_list(val)
143
141
  else:
144
- msg += "---\n"
145
- return msg
142
+ return str(val).strip()
143
+
144
+
145
+ def _render_dict(dct: dict) -> str:
146
+ """
147
+ Minimal bullet list for dictionary items:
148
+ key: rendered subvalue
149
+ """
150
+ lines = []
151
+ for k, v in dct.items():
152
+ if not _is_not_empty(v):
153
+ continue
154
+ subrendered = _render_value(v)
155
+ # Indent subrendered if multiline
156
+ sublines = subrendered.split("\n")
157
+ if len(sublines) == 1:
158
+ if sublines[0].startswith("- "):
159
+ lines.append(f"- {k}: {sublines[0][2:]}")
160
+ else:
161
+ lines.append(f"- {k}: {sublines[0]}")
162
+ else:
163
+ lines.append(f"- {k}:")
164
+ for s in sublines:
165
+ lines.append(f" {s}")
166
+ return "\n".join(lines)
167
+
168
+
169
+ def _render_list(lst: list) -> str:
170
+ """
171
+ Each item in the list gets a bullet. Nested structures are recursed.
172
+ """
173
+ lines = []
174
+ for idx, item in enumerate(lst, 1):
175
+ sub = _render_value(item)
176
+ sublines = sub.split("\n")
177
+ if len(sublines) == 1:
178
+ if sublines[0].startswith("- "):
179
+ lines.append(f"- {sublines[0][2:]}")
180
+ else:
181
+ lines.append(f"- {sublines[0]}")
182
+ else:
183
+ lines.append("-")
184
+ lines.extend(f" {s}" for s in sublines)
185
+ return "\n".join(lines)
186
+
187
+
188
+ def _is_not_empty(x) -> bool:
189
+ """
190
+ Returns True if x is neither None, nor empty string/list/dict.
191
+ """
192
+ if x is None:
193
+ return False
194
+ if isinstance(x, (list, dict)) and not x:
195
+ return False
196
+ if isinstance(x, str) and not x.strip():
197
+ return False
198
+ return True
146
199
 
147
200
 
148
201
  def format_image_content(
@@ -61,6 +61,7 @@ class EndpointConfig(BaseModel):
61
61
  use_enum_values=True,
62
62
  )
63
63
 
64
+ name: str | None = None
64
65
  provider: str | None = None
65
66
  base_url: str | None = None
66
67
  endpoint: str
@@ -75,6 +76,7 @@ class EndpointConfig(BaseModel):
75
76
  requires_tokens: bool = False
76
77
  api_version: str | None = None
77
78
  allowed_roles: list[str] | None = None
79
+ request_options: type | None = None
78
80
 
79
81
 
80
82
  class EndPoint(ABC):
@@ -100,6 +102,11 @@ class EndPoint(ABC):
100
102
  config.update(kwargs)
101
103
  self.config = EndpointConfig(**config)
102
104
 
105
+ @property
106
+ def name(self) -> str | None:
107
+ """str | None: The name of the endpoint, if any."""
108
+ return self.config.name or self.endpoint
109
+
103
110
  @property
104
111
  def is_streamable(self) -> bool:
105
112
  """bool: Whether this endpoint supports streaming responses."""
@@ -185,6 +192,10 @@ class EndPoint(ABC):
185
192
  """bool: Indicates if this endpoint uses role-based messages."""
186
193
  return self.allowed_roles is not None
187
194
 
195
+ @property
196
+ def request_options(self) -> type | None:
197
+ return self.config.request_options
198
+
188
199
  def create_payload(self, **kwargs) -> dict:
189
200
  """Generates a request payload (and headers) for this endpoint.
190
201