lionagi 0.7.6__py3-none-any.whl → 0.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lionagi/__init__.py CHANGED
@@ -2,6 +2,8 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ from pydantic import BaseModel, Field
6
+
5
7
  from . import _types as types
6
8
  from .operations import types as op
7
9
  from .operatives import types as ops_types # deprecated
@@ -20,4 +22,6 @@ __all__ = (
20
22
  "ops_types",
21
23
  "op",
22
24
  "__version__",
25
+ "BaseModel",
26
+ "Field",
23
27
  )
@@ -387,10 +387,16 @@ async def translate_to_synthlang(
387
387
  calculator = TokenCalculator()
388
388
 
389
389
  len_tokens = calculator.tokenize(text, return_tokens=False)
390
- out = await branch.communicate(
390
+
391
+ kwargs["guidance"] = (
392
+ "Following SynthLang, translate the provided text into SynthLang syntax. "
393
+ "Shrink the token size by 60-85%. Return only the translated text.\n\n"
394
+ + kwargs.get("guidance", "")
395
+ )
396
+
397
+ out = await branch.chat(
391
398
  instruction=f"Converts the given text into SynthLang's hyper-efficient format.",
392
399
  context="Text to convert:\n\n" + text,
393
- guidance=f"Following SynthLang, translate the provided text into SynthLang syntax. Shrink the token size by 60-85%. Return only the translated text.",
394
400
  **kwargs,
395
401
  )
396
402
  if sys1:
@@ -13,6 +13,7 @@ async def interpret(
13
13
  text: str,
14
14
  domain: str | None = None,
15
15
  style: str | None = None,
16
+ sample_writing: str | None = None,
16
17
  **kwargs,
17
18
  ) -> str:
18
19
  instruction = (
@@ -25,14 +26,17 @@ async def interpret(
25
26
  f"Desired style: {style or 'concise'}. "
26
27
  "You can add or clarify context if needed."
27
28
  )
29
+ if sample_writing:
30
+ guidance += f" Sample writing: {sample_writing}"
31
+
28
32
  context = [f"User input: {text}"]
29
33
 
30
34
  # Default temperature if none provided
31
35
  kwargs["temperature"] = kwargs.get("temperature", 0.1)
36
+ kwargs["guidance"] = guidance + "\n" + kwargs.get("guidance", "")
32
37
 
33
38
  refined_prompt = await branch.chat(
34
39
  instruction=instruction,
35
- guidance=guidance,
36
40
  context=context,
37
41
  **kwargs,
38
42
  )
@@ -0,0 +1,22 @@
1
+ from typing import Callable
2
+
3
+ from lionagi.protocols._concepts import Manager
4
+
5
+ """
6
+ experimental
7
+ """
8
+
9
+
10
+ class OperationManager(Manager):
11
+
12
+ def __init__(self, *args, **kwargs):
13
+ super().__init__()
14
+ self.registry: dict[str, Callable] = {}
15
+ self.register_operations(*args, **kwargs)
16
+
17
+ def register_operations(self, *args, **kwargs) -> None:
18
+ operations = {}
19
+ if args:
20
+ operations = {i.__name__ for i in args if hasattr(i, "__name__")}
21
+ operations.update(kwargs)
22
+ self.registry.update(operations)
@@ -11,50 +11,55 @@ def match_endpoint(
11
11
  endpoint: str,
12
12
  endpoint_params: list[str] | None = None,
13
13
  ) -> EndPoint:
14
- if endpoint not in ["chat/completions", "chat", "messages"]:
15
- raise ValueError(
16
- "Invalid endpoint, must be one of 'chat/completions' (openai compatible), 'chat' (an alias), 'messages' (anthropic), other endpoints are not supported yet"
14
+
15
+ if endpoint in ["chat/completions", "chat", "messages"]:
16
+ from ..providers.openai_.chat_completions import (
17
+ OpenAIChatCompletionEndPoint,
17
18
  )
18
- from ..providers.openai_.chat_completions import (
19
- OpenAIChatCompletionEndPoint,
20
- )
21
19
 
22
- if provider == "openai":
23
- return OpenAIChatCompletionEndPoint()
20
+ if provider == "openai":
21
+ return OpenAIChatCompletionEndPoint()
24
22
 
25
- if provider == "anthropic":
26
- from ..providers.anthropic_.messages import (
27
- AnthropicChatCompletionEndPoint,
28
- )
23
+ if provider == "anthropic":
24
+ from ..providers.anthropic_.messages import (
25
+ AnthropicChatCompletionEndPoint,
26
+ )
29
27
 
30
- return AnthropicChatCompletionEndPoint()
28
+ return AnthropicChatCompletionEndPoint()
31
29
 
32
- if provider == "groq":
33
- from ..providers.groq_.chat_completions import (
34
- GroqChatCompletionEndPoint,
35
- )
30
+ if provider == "groq":
31
+ from ..providers.groq_.chat_completions import (
32
+ GroqChatCompletionEndPoint,
33
+ )
36
34
 
37
- return GroqChatCompletionEndPoint()
35
+ return GroqChatCompletionEndPoint()
38
36
 
39
- if provider == "perplexity":
40
- from ..providers.perplexity_.chat_completions import (
41
- PerplexityChatCompletionEndPoint,
42
- )
37
+ if provider == "perplexity":
38
+ from ..providers.perplexity_.chat_completions import (
39
+ PerplexityChatCompletionEndPoint,
40
+ )
43
41
 
44
- return PerplexityChatCompletionEndPoint()
42
+ return PerplexityChatCompletionEndPoint()
45
43
 
46
- if provider == "openrouter":
47
- from ..providers.openrouter_.chat_completions import (
48
- OpenRouterChatCompletionEndPoint,
44
+ if provider == "openrouter":
45
+ from ..providers.openrouter_.chat_completions import (
46
+ OpenRouterChatCompletionEndPoint,
47
+ )
48
+
49
+ return OpenRouterChatCompletionEndPoint()
50
+
51
+ return OpenAIChatCompletionEndPoint(
52
+ config={
53
+ "provider": provider,
54
+ "base_url": base_url,
55
+ "endpoint": endpoint,
56
+ "endpoint_params": endpoint_params,
57
+ }
49
58
  )
50
59
 
51
- return OpenRouterChatCompletionEndPoint()
60
+ if provider == "exa" and endpoint == "search":
61
+ from ..providers.exa_.search import ExaSearchEndPoint
62
+
63
+ return ExaSearchEndPoint()
52
64
 
53
- return OpenAIChatCompletionEndPoint(
54
- config={
55
- "provider": provider,
56
- "base_url": base_url,
57
- "endpoint": endpoint,
58
- "endpoint_params": endpoint_params,
59
- }
60
- )
65
+ raise ValueError(f"Unsupported endpoint: {endpoint}")
lionagi/service/imodel.py CHANGED
@@ -102,6 +102,8 @@ class iModel:
102
102
  api_key = "PERPLEXITY_API_KEY"
103
103
  case "groq":
104
104
  api_key = "GROQ_API_KEY"
105
+ case "exa":
106
+ api_key = "EXA_API_KEY"
105
107
 
106
108
  if os.getenv(api_key, None) is not None:
107
109
  self.api_key_scheme = api_key
@@ -198,7 +200,9 @@ class iModel:
198
200
  except Exception as e:
199
201
  raise ValueError(f"Failed to stream API call: {e}")
200
202
 
201
- async def invoke(self, **kwargs) -> APICalling | None:
203
+ async def invoke(
204
+ self, api_call: APICalling = None, **kwargs
205
+ ) -> APICalling | None:
202
206
  """Invokes a rate-limited API call with the given arguments.
203
207
 
204
208
  Args:
@@ -215,8 +219,9 @@ class iModel:
215
219
  If the call fails or if an error occurs during invocation.
216
220
  """
217
221
  try:
218
- kwargs.pop("stream", None)
219
- api_call = self.create_api_calling(**kwargs)
222
+ if api_call is None:
223
+ kwargs.pop("stream", None)
224
+ api_call = self.create_api_calling(**kwargs)
220
225
  if (
221
226
  self.executor.processor is None
222
227
  or self.executor.processor.is_stopped()
@@ -252,6 +257,15 @@ class iModel:
252
257
  """
253
258
  return self.endpoint.sequential_exchange
254
259
 
260
+ @property
261
+ def model_name(self) -> str:
262
+ """str: The name of the model used by the endpoint.
263
+
264
+ Returns:
265
+ The model name if available; otherwise, an empty string.
266
+ """
267
+ return self.kwargs.get("model", "")
268
+
255
269
  def to_dict(self):
256
270
  kwargs = self.kwargs
257
271
  if "kwargs" in self.kwargs:
File without changes
@@ -0,0 +1,160 @@
1
+ from enum import Enum
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class CategoryEnum(str, Enum):
7
+ company = "company"
8
+ research_paper = "research paper"
9
+ news = "news"
10
+ pdf = "pdf"
11
+ github = "github"
12
+ tweet = "tweet"
13
+ personal_site = "personal site"
14
+ linkedin_profile = "linkedin profile"
15
+ financial_report = "financial report"
16
+
17
+
18
+ class LivecrawlEnum(str, Enum):
19
+ never = "never"
20
+ fallback = "fallback"
21
+ always = "always"
22
+
23
+
24
+ class SearchTypeEnum(str, Enum):
25
+ keyword = "keyword"
26
+ neural = "neural"
27
+ auto = "auto"
28
+
29
+
30
+ class ContentsText(BaseModel):
31
+ includeHtmlTags: bool | None = Field(
32
+ default=False,
33
+ description="Whether to include HTML tags in the text. Set to True if you want to retain HTML structure for the LLM to interpret.",
34
+ )
35
+ maxCharacters: int | None = Field(
36
+ default=None,
37
+ description="The maximum number of characters to return from the webpage text.",
38
+ )
39
+
40
+
41
+ class ContentsHighlights(BaseModel):
42
+ highlightsPerUrl: int | None = Field(
43
+ default=1,
44
+ description="The number of highlight snippets you want per page.",
45
+ )
46
+ numSentences: int | None = Field(
47
+ default=5,
48
+ description="Number of sentences to return in each highlight snippet.",
49
+ )
50
+ query: None | str = Field(
51
+ default=None,
52
+ description="A specific query used to generate the highlight snippets.",
53
+ )
54
+
55
+
56
+ class ContentsSummary(BaseModel):
57
+ query: None | str = Field(
58
+ default=None,
59
+ description="A specific query used to generate a summary of the webpage.",
60
+ )
61
+
62
+
63
+ class ContentsExtras(BaseModel):
64
+ links: int | None = Field(
65
+ default=None, description="Number of links to return from each page."
66
+ )
67
+ imageLinks: int | None = Field(
68
+ default=None, description="Number of images to return for each result."
69
+ )
70
+
71
+
72
+ class Contents(BaseModel):
73
+ text: None | ContentsText = Field(
74
+ default=None,
75
+ description="Return full or partial text for each page, with optional HTML structure or size limit.",
76
+ )
77
+ highlights: None | ContentsHighlights = Field(
78
+ default=None, description="Return snippet highlights for each page."
79
+ )
80
+ summary: None | ContentsSummary = Field(
81
+ default=None, description="Return a short summary of each page."
82
+ )
83
+ livecrawl: None | LivecrawlEnum = Field(
84
+ default=LivecrawlEnum.never,
85
+ description="Livecrawling setting for each page. Options: never, fallback, always.",
86
+ )
87
+ livecrawlTimeout: int | None = Field(
88
+ default=10000,
89
+ description="Timeout in milliseconds for livecrawling. Default 10000.",
90
+ )
91
+ subpages: int | None = Field(
92
+ default=None,
93
+ description="Number of subpages to crawl within each URL.",
94
+ )
95
+ subpageTarget: None | str | list[str] = Field(
96
+ default=None,
97
+ description="A target subpage or multiple subpages (list) to crawl, e.g. 'cited papers'.",
98
+ )
99
+ extras: None | ContentsExtras = Field(
100
+ default=None,
101
+ description="Additional extras like links or images to return for each page.",
102
+ )
103
+
104
+
105
+ class ExaSearchRequest(BaseModel):
106
+ query: str = Field(
107
+ ...,
108
+ description="The main query string describing what you're looking for.",
109
+ )
110
+ category: None | CategoryEnum = Field(
111
+ default=None,
112
+ description="A data category to focus on, such as 'company', 'research paper', 'news', etc.",
113
+ )
114
+ type: None | SearchTypeEnum = Field(
115
+ default=None,
116
+ description="The type of search to run. Can be 'auto', 'keyword', or 'neural'.",
117
+ )
118
+ useAutoprompt: None | bool = Field(
119
+ default=False,
120
+ description="If True, Exa auto-optimizes your query for best results (neural or auto search only).",
121
+ )
122
+ numResults: int | None = Field(
123
+ default=10, description="Number of results to return. Default is 10."
124
+ )
125
+ includeDomains: None | list[str] = Field(
126
+ default=None,
127
+ description="List of domains you want to include exclusively.",
128
+ )
129
+ excludeDomains: None | list[str] = Field(
130
+ default=None,
131
+ description="List of domains you do NOT want to see in the results.",
132
+ )
133
+ startCrawlDate: None | str = Field(
134
+ default=None,
135
+ description="Include results crawled after this ISO date (e.g., '2023-01-01T00:00:00.000Z').",
136
+ )
137
+ endCrawlDate: None | str = Field(
138
+ default=None,
139
+ description="Include results crawled before this ISO date.",
140
+ )
141
+ startPublishedDate: None | str = Field(
142
+ default=None,
143
+ description="Only return results published after this ISO date.",
144
+ )
145
+ endPublishedDate: None | str = Field(
146
+ default=None,
147
+ description="Only return results published before this ISO date.",
148
+ )
149
+ includeText: None | list[str] = Field(
150
+ default=None,
151
+ description="Strings that must appear in the webpage text. Only a single string up to 5 words is currently supported.",
152
+ )
153
+ excludeText: None | list[str] = Field(
154
+ default=None,
155
+ description="Strings that must NOT appear in the webpage text. Only a single string up to 5 words is currently supported.",
156
+ )
157
+ contents: None | Contents = Field(
158
+ default=None,
159
+ description="Dict defining the different ways you want to retrieve webpage contents, including text, highlights, or summaries.",
160
+ )
@@ -0,0 +1,80 @@
1
+ from typing import TYPE_CHECKING, Literal
2
+
3
+ from lionagi.service.endpoints.base import EndPoint
4
+
5
+ if TYPE_CHECKING:
6
+ from .models import ExaSearchRequest
7
+
8
+
9
+ CATEGORY_OPTIONS = Literal[
10
+ "article",
11
+ "book",
12
+ "company",
13
+ "research paper",
14
+ "news",
15
+ "pdf",
16
+ "github",
17
+ "tweet",
18
+ "personal site",
19
+ "linkedin profile",
20
+ "financial report",
21
+ ]
22
+
23
+ SEARCH_CONFIG = {
24
+ "provider": "exa",
25
+ "base_url": "https://api.exa.ai",
26
+ "endpoint": "search",
27
+ "method": "post",
28
+ "openai_compatible": False,
29
+ "is_invokeable": False,
30
+ "requires_tokens": False,
31
+ "is_streamable": False,
32
+ "required_kwargs": {
33
+ "query",
34
+ },
35
+ "optional_kwargs": {
36
+ "category",
37
+ "contents",
38
+ "endCrawlDate",
39
+ "endPublishedDate",
40
+ "excludeDomains",
41
+ "excludeText",
42
+ "includeDomains",
43
+ "includeText",
44
+ "numResults",
45
+ "startCrawlDate",
46
+ "startPublishedDate",
47
+ "type", # keyword, neural, auto
48
+ "useAutoPrompt",
49
+ },
50
+ }
51
+
52
+
53
+ class ExaSearchEndPoint(EndPoint):
54
+
55
+ def __init__(self, config: dict = SEARCH_CONFIG):
56
+ super().__init__(config)
57
+
58
+ def create_payload(
59
+ self, request_obj: "ExaSearchRequest" = None, **kwargs
60
+ ) -> dict:
61
+ if request_obj is not None:
62
+ kwargs.update(request_obj.to_dict(exclude_none=True))
63
+
64
+ payload = {}
65
+ is_cached = kwargs.get("is_cached", False)
66
+ headers = kwargs.get("headers", {})
67
+
68
+ for k, v in kwargs.items():
69
+ if k in self.acceptable_kwargs:
70
+ payload[k] = v
71
+ if "api_key" in kwargs:
72
+ headers["x-api-key"] = kwargs["api_key"]
73
+ if "content-type" not in kwargs:
74
+ headers["content-type"] = "application/json"
75
+
76
+ return {
77
+ "payload": payload,
78
+ "headers": headers,
79
+ "is_cached": is_cached,
80
+ }
@@ -0,0 +1,7 @@
1
+ from .models import ExaSearchRequest
2
+ from .search import ExaSearchEndPoint
3
+
4
+ __all__ = (
5
+ "ExaSearchRequest",
6
+ "ExaSearchEndPoint",
7
+ )
@@ -0,0 +1,17 @@
1
+ from .anthropic_.messages import AnthropicChatCompletionEndPoint
2
+ from .exa_.models import ExaSearchRequest
3
+ from .exa_.search import ExaSearchEndPoint
4
+ from .groq_.chat_completions import GroqChatCompletionEndPoint
5
+ from .openai_.chat_completions import OpenAIChatCompletionEndPoint
6
+ from .openrouter_.chat_completions import OpenRouterChatCompletionEndPoint
7
+ from .perplexity_.chat_completions import PerplexityChatCompletionEndPoint
8
+
9
+ __all__ = (
10
+ "AnthropicChatCompletionEndPoint",
11
+ "ExaSearchEndPoint",
12
+ "ExaSearchRequest",
13
+ "GroqChatCompletionEndPoint",
14
+ "OpenAIChatCompletionEndPoint",
15
+ "OpenRouterChatCompletionEndPoint",
16
+ "PerplexityChatCompletionEndPoint",
17
+ )
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.7.6"
1
+ __version__ = "0.7.7"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lionagi
3
- Version: 0.7.6
3
+ Version: 0.7.7
4
4
  Summary: An Intelligence Operating System.
5
5
  Author-email: HaiyangLi <quantocean.li@gmail.com>
6
6
  License: Apache License
@@ -1,10 +1,10 @@
1
- lionagi/__init__.py,sha256=Z_cWmXAAYFrUDQsB9xJR8SqCrc7fKShllJFQd1N11BI,505
1
+ lionagi/__init__.py,sha256=-tKLn-wTj_VK4XQr3CV16ioTNnaUYD2fhUAtJOZZ0vU,574
2
2
  lionagi/_class_registry.py,sha256=dutMsw-FQNqVV5gGH-NEIv90uBkSr8fERJ_x3swbb-s,3112
3
3
  lionagi/_errors.py,sha256=wNKdnVQvE_CHEstK7htrrj334RA_vbGcIds-3pUiRkc,455
4
4
  lionagi/_types.py,sha256=9g7iytvSj3UjZxD-jL06_fxuNfgZyWT3Qnp0XYp1wQU,63
5
5
  lionagi/settings.py,sha256=k9zRJXv57TveyfHO3Vr9VGiKrSwlRUUVKt5zf6v9RU4,1627
6
6
  lionagi/utils.py,sha256=X12H-O8Lx9tUOKGtjpoxHjRsKYHRqty0qD9i2W12kpI,73121
7
- lionagi/version.py,sha256=wu65dmVM9fKR1rBHH263ls8Ca2FZzb0ejYcrP_Ld0iY,22
7
+ lionagi/version.py,sha256=eOm8myGPtPLNpkuxL0xhVmstPQbwXv3Ok7FbH0re-TA,22
8
8
  lionagi/libs/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
9
9
  lionagi/libs/parse.py,sha256=tpEbmIRGuHhLCJlUlm6fjmqm_Z6XJLAXGNFHNuk422I,1011
10
10
  lionagi/libs/file/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
@@ -37,7 +37,7 @@ lionagi/libs/schema/json_schema.py,sha256=Z45azAI_7lr5BWj-X6VQwUhuvuqXibciXgla8w
37
37
  lionagi/libs/token_transform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  lionagi/libs/token_transform/llmlingua.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
39
39
  lionagi/libs/token_transform/perplexity.py,sha256=XX-RoqGbeVYSl7fG1T_hF3I2pCAJ0A1LSKkrUpqsbeI,14091
40
- lionagi/libs/token_transform/synthlang.py,sha256=dD2JD0WCB0xLMF2PG8nJluJXEQ6VSTyqmTnmrFm9IgU,12709
40
+ lionagi/libs/token_transform/synthlang.py,sha256=xQYbpKIX5T7mK9PoBFikJGB8dp18JlLHdwpFhrIRMgQ,12778
41
41
  lionagi/libs/validate/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
42
42
  lionagi/libs/validate/common_field_validators.py,sha256=5pu3kH13dVy1Nq-AQcHx0bIPusOnhvvCwUmaBX5l8bw,4778
43
43
  lionagi/libs/validate/fuzzy_match_keys.py,sha256=S7kzJnh-aKDMFbecmtMjFHNk2h-xMJqtQygMnq839lY,6255
@@ -45,6 +45,7 @@ lionagi/libs/validate/fuzzy_validate_mapping.py,sha256=SQqyxgrAgJ5zBKxIAnulWsZXb
45
45
  lionagi/libs/validate/string_similarity.py,sha256=7x8D4LZCMNJGz2ZCeKmct7Nc4i8m_ORoHIeGvkeMZOA,8733
46
46
  lionagi/libs/validate/validate_boolean.py,sha256=h3d7Dn7asJokBozWaKxaV_3Y6vUWBc0-zfNJjTQ9Bo8,3614
47
47
  lionagi/operations/__init__.py,sha256=O7nV0tedpUe7_OlUWmCcduGPFtqtzWZcR_SIOnjLsro,134
48
+ lionagi/operations/manager.py,sha256=qjDMSOJ35XjDXkVq0SlEb4inTS7OXGwxly2doiQCNas,556
48
49
  lionagi/operations/types.py,sha256=LIa68xcyKLVafof-DSFwKtSkneuYPFqrtGyClohYI6o,704
49
50
  lionagi/operations/utils.py,sha256=Twy6L_UFt9JqJFRYuKKTKVZIXsePidNl5ipcYcCbesI,1220
50
51
  lionagi/operations/ReAct/ReAct.py,sha256=tAZ-3Ya68tVUa112wgOMUJpBVw-RWBSYTfgicbInRuQ,3954
@@ -62,7 +63,7 @@ lionagi/operations/communicate/communicate.py,sha256=1PzBpzgATcAdBog0EUxtj5_uJGi
62
63
  lionagi/operations/instruct/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
63
64
  lionagi/operations/instruct/instruct.py,sha256=CYICzWvmgALtSPIetfF3csx6WDsCuiu4NRRPL56UA2g,795
64
65
  lionagi/operations/interpret/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
65
- lionagi/operations/interpret/interpret.py,sha256=EtgXTeOezEPR03HSUVz8PGK-xf8p2nXoUMUhFkAntK0,1081
66
+ lionagi/operations/interpret/interpret.py,sha256=mjTcOCOixc34JNnX0WowSVACtZXz9IBuzjp9VyOus5w,1244
66
67
  lionagi/operations/operate/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
67
68
  lionagi/operations/operate/operate.py,sha256=kzH7R4J3O-1Ue-PYqIoEdrXHrkYYQVfHa6gSC4Km_sc,7003
68
69
  lionagi/operations/parse/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
@@ -160,18 +161,23 @@ lionagi/protocols/messages/templates/instruction_message.jinja2,sha256=L-ptw5OHx
160
161
  lionagi/protocols/messages/templates/system_message.jinja2,sha256=JRKJ0aFpYfaXSFouKc_N4unZ35C3yZTOWhIrIdCB5qk,215
161
162
  lionagi/protocols/messages/templates/tool_schemas.jinja2,sha256=ozIaSDCRjIAhLyA8VM6S-YqS0w2NcctALSwx4LjDwII,126
162
163
  lionagi/service/__init__.py,sha256=DMGXIqPsmut9H5GT0ZeSzQIzYzzPwI-2gLXydpbwiV8,21
163
- lionagi/service/imodel.py,sha256=MaiBcKM00vH0tq0nD_C7jV6S7OyV71a4YwVDrDfPMrA,11539
164
+ lionagi/service/imodel.py,sha256=Mv5wVsmZj4hYmrvXfHVmA-fskfAKrDY0mbdmgOCAGoE,11946
164
165
  lionagi/service/manager.py,sha256=MKSYBkg23s7YhZy5GEFdnpspEnhPVfFhpkpoJe20D7k,1435
165
166
  lionagi/service/types.py,sha256=v9SAn5-GTmds4Mar13Or_VFrRHCinBK99dmeDUd-QNk,486
166
167
  lionagi/service/endpoints/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
167
168
  lionagi/service/endpoints/base.py,sha256=RbBBQIbtNtkP1LT9U3ZrhL35SfmVIpaq-AGW8d538k4,18275
168
169
  lionagi/service/endpoints/chat_completion.py,sha256=9ltSQaKPH43WdEDW32_-f5x07I9hOU8g-T_PAG-nYsQ,2529
169
- lionagi/service/endpoints/match_endpoint.py,sha256=n7F9NoTXfUBL29HrDcFLF5AXYR8pcx_IumQ7BiJXC-w,1740
170
+ lionagi/service/endpoints/match_endpoint.py,sha256=hIGYyok1y53FfI6av5NfYMygRIpDWYZbdCj0pJJfmPY,1874
170
171
  lionagi/service/endpoints/rate_limited_processor.py,sha256=umri0FofbyBSFdAQBEsviDB5K6N12LkRiXQgSOorGKg,4663
171
172
  lionagi/service/endpoints/token_calculator.py,sha256=MflqImGUr_1jh465hB7cUAaIPICBkjirvre1fWGXLrA,6161
172
173
  lionagi/service/providers/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
174
+ lionagi/service/providers/types.py,sha256=NS91ysRFwOs0cpNeQgFhmtl7JrSz2pJm-tt7sZILmQY,683
173
175
  lionagi/service/providers/anthropic_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
174
176
  lionagi/service/providers/anthropic_/messages.py,sha256=PTZZ2VXVMRHWY84YFIzrft9gVrcH2V-NIq_Phi9_-xI,1760
177
+ lionagi/service/providers/exa_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
178
+ lionagi/service/providers/exa_/models.py,sha256=263KP-JSxbxmomNrFeYjB_cebquoMOsCJeWsiKZ0mL4,5420
179
+ lionagi/service/providers/exa_/search.py,sha256=Z9bOxTjPZbtI_qH4fSePYKSXhkc8N3ZFbCHdsml8YSA,1903
180
+ lionagi/service/providers/exa_/types.py,sha256=8ODjXpFajBE9-DGqBJNS--GObwmLSDi667xS84z_AgA,139
175
181
  lionagi/service/providers/groq_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
176
182
  lionagi/service/providers/groq_/chat_completions.py,sha256=578NqQYyrIYjIemyL3bagvFGE6ear_w4S1HNlPWA5mg,1343
177
183
  lionagi/service/providers/openai_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
@@ -183,7 +189,7 @@ lionagi/service/providers/perplexity_/chat_completions.py,sha256=SsDbrtXwQsR4Yu2
183
189
  lionagi/session/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
184
190
  lionagi/session/branch.py,sha256=JvErd9YUuGdWyLj37rtKOteSqV0ltn9lg0R2G8GO40c,62539
185
191
  lionagi/session/session.py,sha256=po6C7PnM0iu_ISHUo4PBzzQ61HFOgcsAUfPoO--eLak,8987
186
- lionagi-0.7.6.dist-info/METADATA,sha256=2qmdgrSm0NVeD-qf--7C-fT2q_14os4hDMBEogmw0uw,22819
187
- lionagi-0.7.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
188
- lionagi-0.7.6.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
189
- lionagi-0.7.6.dist-info/RECORD,,
192
+ lionagi-0.7.7.dist-info/METADATA,sha256=XzhIG5ZehXeF7UXYkd4dvIuqT1W8E969aAOliftxmJU,22819
193
+ lionagi-0.7.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
194
+ lionagi-0.7.7.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
195
+ lionagi-0.7.7.dist-info/RECORD,,