lionagi 0.12.0__py3-none-any.whl → 0.12.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,160 +1,3 @@
1
- from enum import Enum
1
+ from khive.providers.exa_ import ExaSearchRequest
2
2
 
3
- from pydantic import BaseModel, Field
4
-
5
-
6
- class CategoryEnum(str, Enum):
7
- company = "company"
8
- research_paper = "research paper"
9
- news = "news"
10
- pdf = "pdf"
11
- github = "github"
12
- tweet = "tweet"
13
- personal_site = "personal site"
14
- linkedin_profile = "linkedin profile"
15
- financial_report = "financial report"
16
-
17
-
18
- class LivecrawlEnum(str, Enum):
19
- never = "never"
20
- fallback = "fallback"
21
- always = "always"
22
-
23
-
24
- class SearchTypeEnum(str, Enum):
25
- keyword = "keyword"
26
- neural = "neural"
27
- auto = "auto"
28
-
29
-
30
- class ContentsText(BaseModel):
31
- includeHtmlTags: bool | None = Field(
32
- default=False,
33
- description="Whether to include HTML tags in the text. Set to True if you want to retain HTML structure for the LLM to interpret.",
34
- )
35
- maxCharacters: int | None = Field(
36
- default=None,
37
- description="The maximum number of characters to return from the webpage text.",
38
- )
39
-
40
-
41
- class ContentsHighlights(BaseModel):
42
- highlightsPerUrl: int | None = Field(
43
- default=1,
44
- description="The number of highlight snippets you want per page.",
45
- )
46
- numSentences: int | None = Field(
47
- default=5,
48
- description="Number of sentences to return in each highlight snippet.",
49
- )
50
- query: None | str = Field(
51
- default=None,
52
- description="A specific query used to generate the highlight snippets.",
53
- )
54
-
55
-
56
- class ContentsSummary(BaseModel):
57
- query: None | str = Field(
58
- default=None,
59
- description="A specific query used to generate a summary of the webpage.",
60
- )
61
-
62
-
63
- class ContentsExtras(BaseModel):
64
- links: int | None = Field(
65
- default=None, description="Number of links to return from each page."
66
- )
67
- imageLinks: int | None = Field(
68
- default=None, description="Number of images to return for each result."
69
- )
70
-
71
-
72
- class Contents(BaseModel):
73
- text: None | ContentsText = Field(
74
- default=None,
75
- description="Return full or partial text for each page, with optional HTML structure or size limit.",
76
- )
77
- highlights: None | ContentsHighlights = Field(
78
- default=None, description="Return snippet highlights for each page."
79
- )
80
- summary: None | ContentsSummary = Field(
81
- default=None, description="Return a short summary of each page."
82
- )
83
- livecrawl: None | LivecrawlEnum = Field(
84
- default=LivecrawlEnum.never,
85
- description="Livecrawling setting for each page. Options: never, fallback, always.",
86
- )
87
- livecrawlTimeout: int | None = Field(
88
- default=10000,
89
- description="Timeout in milliseconds for livecrawling. Default 10000.",
90
- )
91
- subpages: int | None = Field(
92
- default=None,
93
- description="Number of subpages to crawl within each URL.",
94
- )
95
- subpageTarget: None | str | list[str] = Field(
96
- default=None,
97
- description="A target subpage or multiple subpages (list) to crawl, e.g. 'cited papers'.",
98
- )
99
- extras: None | ContentsExtras = Field(
100
- default=None,
101
- description="Additional extras like links or images to return for each page.",
102
- )
103
-
104
-
105
- class ExaSearchRequest(BaseModel):
106
- query: str = Field(
107
- ...,
108
- description="The main query string describing what you're looking for.",
109
- )
110
- category: None | CategoryEnum = Field(
111
- default=None,
112
- description="A data category to focus on, such as 'company', 'research paper', 'news', etc.",
113
- )
114
- type: None | SearchTypeEnum = Field(
115
- default=None,
116
- description="The type of search to run. Can be 'auto', 'keyword', or 'neural'.",
117
- )
118
- useAutoprompt: None | bool = Field(
119
- default=False,
120
- description="If True, Exa auto-optimizes your query for best results (neural or auto search only).",
121
- )
122
- numResults: int | None = Field(
123
- default=10, description="Number of results to return. Default is 10."
124
- )
125
- includeDomains: None | list[str] = Field(
126
- default=None,
127
- description="List of domains you want to include exclusively.",
128
- )
129
- excludeDomains: None | list[str] = Field(
130
- default=None,
131
- description="List of domains you do NOT want to see in the results.",
132
- )
133
- startCrawlDate: None | str = Field(
134
- default=None,
135
- description="Include results crawled after this ISO date (e.g., '2023-01-01T00:00:00.000Z').",
136
- )
137
- endCrawlDate: None | str = Field(
138
- default=None,
139
- description="Include results crawled before this ISO date.",
140
- )
141
- startPublishedDate: None | str = Field(
142
- default=None,
143
- description="Only return results published after this ISO date.",
144
- )
145
- endPublishedDate: None | str = Field(
146
- default=None,
147
- description="Only return results published before this ISO date.",
148
- )
149
- includeText: None | list[str] = Field(
150
- default=None,
151
- description="Strings that must appear in the webpage text. Only a single string up to 5 words is currently supported.",
152
- )
153
- excludeText: None | list[str] = Field(
154
- default=None,
155
- description="Strings that must NOT appear in the webpage text. Only a single string up to 5 words is currently supported.",
156
- )
157
- contents: None | Contents = Field(
158
- default=None,
159
- description="Dict defining the different ways you want to retrieve webpage contents, including text, highlights, or summaries.",
160
- )
3
+ __all__ = ("ExaSearchRequest",)
@@ -1,144 +1,5 @@
1
- from enum import Enum
2
- from typing import Any
1
+ from khive.providers.perplexity_ import (
2
+ PerplexityChatRequest as PerplexityChatCompletionRequest,
3
+ )
3
4
 
4
- from pydantic import BaseModel, Field, model_validator
5
-
6
-
7
- class PerplexityRole(str, Enum):
8
- """Roles allowed in Perplexity's messages."""
9
-
10
- system = "system"
11
- user = "user"
12
- assistant = "assistant"
13
-
14
-
15
- class PerplexityMessage(BaseModel):
16
- """
17
- A single message in the conversation.
18
- `role` can be 'system', 'user', or 'assistant'.
19
- `content` is the text for that conversation turn.
20
- """
21
-
22
- role: PerplexityRole = Field(
23
- ...,
24
- description="The role of the speaker. Must be system, user, or assistant.",
25
- )
26
- content: str = Field(..., description="The text content of this message.")
27
-
28
-
29
- class PerplexityChatCompletionRequest(BaseModel):
30
- """
31
- Represents the request body for Perplexity's Chat Completions endpoint.
32
- Endpoint: POST https://api.perplexity.ai/chat/completions
33
- """
34
-
35
- model: str = Field(
36
- "sonar",
37
- description="The model name, e.g. 'sonar', (the only model available at the time when this request model was updated, check doc for latest info).",
38
- )
39
- messages: list[PerplexityMessage] = Field(
40
- ..., description="A list of messages forming the conversation so far."
41
- )
42
-
43
- # Optional parameters
44
- frequency_penalty: float | None = Field(
45
- default=None,
46
- gt=0,
47
- description=(
48
- "Multiplicative penalty > 0. Values > 1.0 penalize repeated tokens more strongly. "
49
- "Value=1.0 means no penalty. Incompatible with presence_penalty."
50
- ),
51
- )
52
- presence_penalty: float | None = Field(
53
- default=None,
54
- ge=-2.0,
55
- le=2.0,
56
- description=(
57
- "Penalizes tokens that have appeared so far (range -2 to 2). "
58
- "Positive values encourage talking about new topics. Incompatible with frequency_penalty."
59
- ),
60
- )
61
- max_tokens: int | None = Field(
62
- default=None,
63
- description=(
64
- "Maximum number of completion tokens. If omitted, model generates tokens until it "
65
- "hits stop or context limit."
66
- ),
67
- )
68
- return_images: bool | None = Field(
69
- default=None,
70
- description="If True, attempt to return images (closed beta feature).",
71
- )
72
- return_related_questions: bool | None = Field(
73
- default=None,
74
- description="If True, attempt to return related questions (closed beta feature).",
75
- )
76
- search_domain_filter: list[Any] | None = Field(
77
- default=None,
78
- description=(
79
- "List of domains to limit or exclude in the online search. Example: ['example.com', '-twitter.com']. "
80
- "Supports up to 3 entries. (Closed beta feature.)"
81
- ),
82
- )
83
- search_recency_filter: str | None = Field(
84
- default=None,
85
- description=(
86
- "Returns search results within a specified time interval: 'month', 'week', 'day', or 'hour'."
87
- ),
88
- )
89
- stream: bool | None = Field(
90
- default=None,
91
- description=(
92
- "If True, response is returned incrementally via Server-Sent Events (SSE)."
93
- ),
94
- )
95
- temperature: float | None = Field(
96
- default=None,
97
- ge=0.0,
98
- lt=2.0,
99
- description=(
100
- "Controls randomness of sampling, range [0, 2). Higher => more random. "
101
- "Defaults to 0.2."
102
- ),
103
- )
104
- top_k: int | None = Field(
105
- default=None,
106
- ge=0,
107
- le=2048,
108
- description=(
109
- "Top-K filtering. 0 disables top-k filtering. If set, only the top K tokens are considered. "
110
- "We recommend altering either top_k or top_p, but not both."
111
- ),
112
- )
113
- top_p: float | None = Field(
114
- default=None,
115
- ge=0.0,
116
- le=1.0,
117
- description=(
118
- "Nucleus sampling threshold. We recommend altering either top_k or top_p, but not both."
119
- ),
120
- )
121
-
122
- @model_validator(mode="before")
123
- def validate_penalties(cls, values):
124
- """
125
- Disallow using both frequency_penalty != 1.0 and presence_penalty != 0.0 at once,
126
- since the docs say they're incompatible.
127
- """
128
- freq_pen = values.get("frequency_penalty", 1.0)
129
- pres_pen = values.get("presence_penalty", 0.0)
130
-
131
- # The doc states frequency_penalty is incompatible with presence_penalty.
132
- # We'll enforce that if presence_penalty != 0, frequency_penalty must be 1.0
133
- # or vice versa. Adjust logic as needed.
134
- if pres_pen != 0.0 and freq_pen != 1.0:
135
- raise ValueError(
136
- "presence_penalty is incompatible with frequency_penalty. "
137
- "Please use only one: either presence_penalty=0 with freq_pen !=1, "
138
- "or presence_penalty!=0 with freq_pen=1."
139
- )
140
- return values
141
-
142
- def to_dict(self) -> dict:
143
- """Return a dict suitable for JSON serialization and sending to Perplexity API."""
144
- return self.model_dump(exclude_none=True)
5
+ __all__ = ("PerplexityChatCompletionRequest",)
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.12.0"
1
+ __version__ = "0.12.2"
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lionagi
3
- Version: 0.12.0
3
+ Version: 0.12.2
4
4
  Summary: An Intelligence Operating System.
5
- Author-email: HaiyangLi <quantocean.li@gmail.com>
5
+ Author-email: HaiyangLi <quantocean.li@gmail.com>, Liangbingyan Luo <llby_luo@outlook.com>
6
6
  License: Apache License
7
7
  Version 2.0, January 2004
8
8
  http://www.apache.org/licenses/
@@ -220,7 +220,7 @@ Classifier: Programming Language :: Python :: 3.12
220
220
  Classifier: Programming Language :: Python :: 3.13
221
221
  Requires-Python: >=3.10
222
222
  Requires-Dist: jinja2>=3.0.0
223
- Requires-Dist: khive>=0.2.6
223
+ Requires-Dist: khive>=0.2.7
224
224
  Requires-Dist: pandas>=2.0.0
225
225
  Requires-Dist: tiktoken>=0.8.0
226
226
  Provides-Extra: all
@@ -391,7 +391,7 @@ We welcome issues, ideas, and pull requests:
391
391
 
392
392
  ```
393
393
  @software{Li_LionAGI_2023,
394
- author = {Haiyang Li},
394
+ author = {Haiyang Li, Liangbingyan Luo},
395
395
  month = {12},
396
396
  year = {2023},
397
397
  title = {LionAGI: Towards Automated General Intelligence},
@@ -4,7 +4,7 @@ lionagi/_errors.py,sha256=JlBTFJnRWtVYcRxKb7fWFiJHLbykl1E19mSJ8sXYVxg,455
4
4
  lionagi/_types.py,sha256=iDdYewsP9rDrM7QY19_NDTcWUk7swp8vnGCrloHMtUM,53
5
5
  lionagi/settings.py,sha256=W52mM34E6jXF3GyqCFzVREKZrmnUqtZm_BVDsUiDI_s,1627
6
6
  lionagi/utils.py,sha256=KDcxyMcDrAMxixkQ1FhGkC81bXjae5_nYWB-vh8lyEc,53114
7
- lionagi/version.py,sha256=eHjt9DPsMbptabS2yGx9Yhbyzq5hFSUHXb7zc8Q_8-o,23
7
+ lionagi/version.py,sha256=NJQQPiZZfrBXFMqZlsia0JrhloS2PexbdxYYUs0c2Us,23
8
8
  lionagi/adapters/__init__.py,sha256=FJBV1Fb7GR9mcRApEB9bNP3IRMQ9Qjg5aVTouZFyTBU,45
9
9
  lionagi/adapters/adapter.py,sha256=aW7s1OKAdxHd8HBv2UcThn-r2Q08EyArssNyFobMLuA,3357
10
10
  lionagi/adapters/json_adapter.py,sha256=EJj0Jev46ZhU3ZMnlYwyzN2rLxjLCVrMDpHkEuggBvk,4561
@@ -193,7 +193,7 @@ lionagi/service/providers/types.py,sha256=NS91ysRFwOs0cpNeQgFhmtl7JrSz2pJm-tt7sZ
193
193
  lionagi/service/providers/anthropic_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
194
194
  lionagi/service/providers/anthropic_/messages.py,sha256=EnV2vh60k0aQvtnUitHzTlSmyrFxTVxcXAldANg7Rzc,3148
195
195
  lionagi/service/providers/exa_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
196
- lionagi/service/providers/exa_/models.py,sha256=263KP-JSxbxmomNrFeYjB_cebquoMOsCJeWsiKZ0mL4,5420
196
+ lionagi/service/providers/exa_/models.py,sha256=wwcMB-NOQ0uswK4ymc89bgCudcA_rkj_xogt6jF_rrI,83
197
197
  lionagi/service/providers/exa_/search.py,sha256=Z3pyJH8KiWiquJSJw8Rd6D7x43BwTFHb2ESsgSicCk0,1932
198
198
  lionagi/service/providers/exa_/types.py,sha256=8ODjXpFajBE9-DGqBJNS--GObwmLSDi667xS84z_AgA,139
199
199
  lionagi/service/providers/groq_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
@@ -207,7 +207,7 @@ lionagi/service/providers/openrouter_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcK
207
207
  lionagi/service/providers/openrouter_/chat_completions.py,sha256=0pdXjJCXmCPPbKKVubrnqofaodTOxWTJam8fd3NgrNk,1525
208
208
  lionagi/service/providers/perplexity_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
209
209
  lionagi/service/providers/perplexity_/chat_completions.py,sha256=O4MIS_3xIINGjkAZdlw0Bu_jAfBDR4VZA1F8JW2EU1M,1197
210
- lionagi/service/providers/perplexity_/models.py,sha256=Fm5NbmWMdFkDKS0Cec__bNvs3St27lgqxFbHKyNCLsw,4945
210
+ lionagi/service/providers/perplexity_/models.py,sha256=T2GFWKvCTDjp4kFobuAqzuAYBX0_VlmHhYacHloR3uo,154
211
211
  lionagi/session/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
212
212
  lionagi/session/branch.py,sha256=-4VZJna20HrftkVJoYiEgCb4HrUlP6aBRA-XlrHageQ,69722
213
213
  lionagi/session/prompts.py,sha256=AhuHL19s0TijVZX3tMKUKMi6l88xeVdpkuEn2vJSRyU,3236
@@ -217,7 +217,7 @@ lionagi/tools/base.py,sha256=cld32pyjaTUdyiqZ8hNyJjWKAhcJ8RQNhgImI7R8b-E,1940
217
217
  lionagi/tools/types.py,sha256=XtJLY0m-Yi_ZLWhm0KycayvqMCZd--HxfQ0x9vFUYDE,230
218
218
  lionagi/tools/file/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
219
219
  lionagi/tools/file/reader.py,sha256=K0PtEI1s-Ol-uthBUdGFzDLrQNFr4l1bRkosn2jios8,9539
220
- lionagi-0.12.0.dist-info/METADATA,sha256=8UbRJtTf_toFM9q7tTmMWBt-g9Tkdh9cwt5NRa1CJqE,18364
221
- lionagi-0.12.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
222
- lionagi-0.12.0.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
223
- lionagi-0.12.0.dist-info/RECORD,,
220
+ lionagi-0.12.2.dist-info/METADATA,sha256=JrekthbdkAaX3sK7DWVfz_tN6vfb_YrSzfIXca88npQ,18423
221
+ lionagi-0.12.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
222
+ lionagi-0.12.2.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
223
+ lionagi-0.12.2.dist-info/RECORD,,