lionagi 0.12.2__py3-none-any.whl → 0.12.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,144 @@
1
- from khive.providers.perplexity_ import (
2
- PerplexityChatRequest as PerplexityChatCompletionRequest,
3
- )
1
+ from enum import Enum
2
+ from typing import Any
4
3
 
5
- __all__ = ("PerplexityChatCompletionRequest",)
4
+ from pydantic import BaseModel, Field, model_validator
5
+
6
+
7
+ class PerplexityRole(str, Enum):
8
+ """Roles allowed in Perplexity's messages."""
9
+
10
+ system = "system"
11
+ user = "user"
12
+ assistant = "assistant"
13
+
14
+
15
+ class PerplexityMessage(BaseModel):
16
+ """
17
+ A single message in the conversation.
18
+ `role` can be 'system', 'user', or 'assistant'.
19
+ `content` is the text for that conversation turn.
20
+ """
21
+
22
+ role: PerplexityRole = Field(
23
+ ...,
24
+ description="The role of the speaker. Must be system, user, or assistant.",
25
+ )
26
+ content: str = Field(..., description="The text content of this message.")
27
+
28
+
29
+ class PerplexityChatCompletionRequest(BaseModel):
30
+ """
31
+ Represents the request body for Perplexity's Chat Completions endpoint.
32
+ Endpoint: POST https://api.perplexity.ai/chat/completions
33
+ """
34
+
35
+ model: str = Field(
36
+ "sonar",
37
+ description="The model name, e.g. 'sonar', (the only model available at the time when this request model was updated, check doc for latest info).",
38
+ )
39
+ messages: list[PerplexityMessage] = Field(
40
+ ..., description="A list of messages forming the conversation so far."
41
+ )
42
+
43
+ # Optional parameters
44
+ frequency_penalty: float | None = Field(
45
+ default=None,
46
+ gt=0,
47
+ description=(
48
+ "Multiplicative penalty > 0. Values > 1.0 penalize repeated tokens more strongly. "
49
+ "Value=1.0 means no penalty. Incompatible with presence_penalty."
50
+ ),
51
+ )
52
+ presence_penalty: float | None = Field(
53
+ default=None,
54
+ ge=-2.0,
55
+ le=2.0,
56
+ description=(
57
+ "Penalizes tokens that have appeared so far (range -2 to 2). "
58
+ "Positive values encourage talking about new topics. Incompatible with frequency_penalty."
59
+ ),
60
+ )
61
+ max_tokens: int | None = Field(
62
+ default=None,
63
+ description=(
64
+ "Maximum number of completion tokens. If omitted, model generates tokens until it "
65
+ "hits stop or context limit."
66
+ ),
67
+ )
68
+ return_images: bool | None = Field(
69
+ default=None,
70
+ description="If True, attempt to return images (closed beta feature).",
71
+ )
72
+ return_related_questions: bool | None = Field(
73
+ default=None,
74
+ description="If True, attempt to return related questions (closed beta feature).",
75
+ )
76
+ search_domain_filter: list[Any] | None = Field(
77
+ default=None,
78
+ description=(
79
+ "List of domains to limit or exclude in the online search. Example: ['example.com', '-twitter.com']. "
80
+ "Supports up to 3 entries. (Closed beta feature.)"
81
+ ),
82
+ )
83
+ search_recency_filter: str | None = Field(
84
+ default=None,
85
+ description=(
86
+ "Returns search results within a specified time interval: 'month', 'week', 'day', or 'hour'."
87
+ ),
88
+ )
89
+ stream: bool | None = Field(
90
+ default=None,
91
+ description=(
92
+ "If True, response is returned incrementally via Server-Sent Events (SSE)."
93
+ ),
94
+ )
95
+ temperature: float | None = Field(
96
+ default=None,
97
+ ge=0.0,
98
+ lt=2.0,
99
+ description=(
100
+ "Controls randomness of sampling, range [0, 2). Higher => more random. "
101
+ "Defaults to 0.2."
102
+ ),
103
+ )
104
+ top_k: int | None = Field(
105
+ default=None,
106
+ ge=0,
107
+ le=2048,
108
+ description=(
109
+ "Top-K filtering. 0 disables top-k filtering. If set, only the top K tokens are considered. "
110
+ "We recommend altering either top_k or top_p, but not both."
111
+ ),
112
+ )
113
+ top_p: float | None = Field(
114
+ default=None,
115
+ ge=0.0,
116
+ le=1.0,
117
+ description=(
118
+ "Nucleus sampling threshold. We recommend altering either top_k or top_p, but not both."
119
+ ),
120
+ )
121
+
122
+ @model_validator(mode="before")
123
+ def validate_penalties(cls, values):
124
+ """
125
+ Disallow using both frequency_penalty != 1.0 and presence_penalty != 0.0 at once,
126
+ since the docs say they're incompatible.
127
+ """
128
+ freq_pen = values.get("frequency_penalty", 1.0)
129
+ pres_pen = values.get("presence_penalty", 0.0)
130
+
131
+ # The doc states frequency_penalty is incompatible with presence_penalty.
132
+ # We'll enforce that if presence_penalty != 0, frequency_penalty must be 1.0
133
+ # or vice versa. Adjust logic as needed.
134
+ if pres_pen != 0.0 and freq_pen != 1.0:
135
+ raise ValueError(
136
+ "presence_penalty is incompatible with frequency_penalty. "
137
+ "Please use only one: either presence_penalty=0 with freq_pen !=1, "
138
+ "or presence_penalty!=0 with freq_pen=1."
139
+ )
140
+ return values
141
+
142
+ def to_dict(self) -> dict:
143
+ """Return a dict suitable for JSON serialization and sending to Perplexity API."""
144
+ return self.model_dump(exclude_none=True)