lionagi 0.12.2__py3-none-any.whl → 0.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. lionagi/config.py +123 -0
  2. lionagi/fields/file.py +1 -1
  3. lionagi/fields/reason.py +1 -1
  4. lionagi/libs/file/concat.py +1 -6
  5. lionagi/libs/file/concat_files.py +1 -5
  6. lionagi/libs/file/save.py +1 -1
  7. lionagi/libs/package/imports.py +8 -177
  8. lionagi/libs/parse.py +30 -0
  9. lionagi/libs/schema/load_pydantic_model_from_schema.py +259 -0
  10. lionagi/libs/token_transform/perplexity.py +2 -4
  11. lionagi/libs/token_transform/synthlang_/resources/frameworks/framework_options.json +46 -46
  12. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +1 -1
  13. lionagi/operations/chat/chat.py +2 -2
  14. lionagi/operations/communicate/communicate.py +20 -5
  15. lionagi/operations/parse/parse.py +131 -43
  16. lionagi/protocols/generic/log.py +1 -2
  17. lionagi/protocols/generic/pile.py +18 -4
  18. lionagi/protocols/messages/assistant_response.py +20 -1
  19. lionagi/protocols/messages/templates/README.md +6 -10
  20. lionagi/service/connections/__init__.py +15 -0
  21. lionagi/service/connections/api_calling.py +230 -0
  22. lionagi/service/connections/endpoint.py +410 -0
  23. lionagi/service/connections/endpoint_config.py +137 -0
  24. lionagi/service/connections/header_factory.py +56 -0
  25. lionagi/service/connections/match_endpoint.py +49 -0
  26. lionagi/service/connections/providers/__init__.py +3 -0
  27. lionagi/service/connections/providers/anthropic_.py +87 -0
  28. lionagi/service/connections/providers/exa_.py +33 -0
  29. lionagi/service/connections/providers/oai_.py +166 -0
  30. lionagi/service/connections/providers/ollama_.py +122 -0
  31. lionagi/service/connections/providers/perplexity_.py +29 -0
  32. lionagi/service/imodel.py +36 -144
  33. lionagi/service/manager.py +1 -7
  34. lionagi/service/{endpoints/rate_limited_processor.py → rate_limited_processor.py} +4 -2
  35. lionagi/service/resilience.py +545 -0
  36. lionagi/service/third_party/README.md +71 -0
  37. lionagi/service/third_party/__init__.py +0 -0
  38. lionagi/service/third_party/anthropic_models.py +159 -0
  39. lionagi/service/third_party/exa_models.py +165 -0
  40. lionagi/service/third_party/openai_models.py +18241 -0
  41. lionagi/service/third_party/pplx_models.py +156 -0
  42. lionagi/service/types.py +5 -4
  43. lionagi/session/branch.py +12 -7
  44. lionagi/tools/file/reader.py +1 -1
  45. lionagi/tools/memory/tools.py +497 -0
  46. lionagi/utils.py +921 -123
  47. lionagi/version.py +1 -1
  48. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/METADATA +33 -16
  49. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/RECORD +53 -63
  50. lionagi/libs/file/create_path.py +0 -80
  51. lionagi/libs/file/file_util.py +0 -358
  52. lionagi/libs/parse/__init__.py +0 -3
  53. lionagi/libs/parse/fuzzy_parse_json.py +0 -117
  54. lionagi/libs/parse/to_dict.py +0 -336
  55. lionagi/libs/parse/to_json.py +0 -61
  56. lionagi/libs/parse/to_num.py +0 -378
  57. lionagi/libs/parse/to_xml.py +0 -57
  58. lionagi/libs/parse/xml_parser.py +0 -148
  59. lionagi/libs/schema/breakdown_pydantic_annotation.py +0 -48
  60. lionagi/service/endpoints/__init__.py +0 -3
  61. lionagi/service/endpoints/base.py +0 -706
  62. lionagi/service/endpoints/chat_completion.py +0 -116
  63. lionagi/service/endpoints/match_endpoint.py +0 -72
  64. lionagi/service/providers/__init__.py +0 -3
  65. lionagi/service/providers/anthropic_/__init__.py +0 -3
  66. lionagi/service/providers/anthropic_/messages.py +0 -99
  67. lionagi/service/providers/exa_/models.py +0 -3
  68. lionagi/service/providers/exa_/search.py +0 -80
  69. lionagi/service/providers/exa_/types.py +0 -7
  70. lionagi/service/providers/groq_/__init__.py +0 -3
  71. lionagi/service/providers/groq_/chat_completions.py +0 -56
  72. lionagi/service/providers/ollama_/__init__.py +0 -3
  73. lionagi/service/providers/ollama_/chat_completions.py +0 -134
  74. lionagi/service/providers/openai_/__init__.py +0 -3
  75. lionagi/service/providers/openai_/chat_completions.py +0 -101
  76. lionagi/service/providers/openai_/spec.py +0 -14
  77. lionagi/service/providers/openrouter_/__init__.py +0 -3
  78. lionagi/service/providers/openrouter_/chat_completions.py +0 -62
  79. lionagi/service/providers/perplexity_/__init__.py +0 -3
  80. lionagi/service/providers/perplexity_/chat_completions.py +0 -44
  81. lionagi/service/providers/perplexity_/models.py +0 -5
  82. lionagi/service/providers/types.py +0 -17
  83. /lionagi/{service/providers/exa_/__init__.py → py.typed} +0 -0
  84. /lionagi/service/{endpoints/token_calculator.py → token_calculator.py} +0 -0
  85. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/WHEEL +0 -0
  86. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,159 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ """Anthropic API models for request/response validation."""
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import Literal, Optional, Union
10
+
11
+ from pydantic import BaseModel, Field, field_validator
12
+
13
+
14
+ class TextContentBlock(BaseModel):
15
+ type: Literal["text"] = "text"
16
+ text: str
17
+ cache_control: dict | None = None
18
+
19
+
20
+ class ImageSource(BaseModel):
21
+ type: Literal["base64"] = "base64"
22
+ media_type: Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
23
+ data: str
24
+
25
+
26
+ class ImageContentBlock(BaseModel):
27
+ type: Literal["image"] = "image"
28
+ source: ImageSource
29
+
30
+
31
+ ContentBlock = Union[TextContentBlock, ImageContentBlock]
32
+
33
+
34
+ class Message(BaseModel):
35
+ role: Literal["user", "assistant"]
36
+ content: str | list[str | ContentBlock]
37
+
38
+ @field_validator("content", mode="before")
39
+ def validate_content(cls, v):
40
+ """Convert string content to proper format."""
41
+ if isinstance(v, str):
42
+ return v
43
+ if isinstance(v, list):
44
+ # Ensure all items are either strings or proper content blocks
45
+ result = []
46
+ for item in v:
47
+ if isinstance(item, str):
48
+ result.append({"type": "text", "text": item})
49
+ else:
50
+ result.append(item)
51
+ return result
52
+ return v
53
+
54
+
55
+ class ToolDefinition(BaseModel):
56
+ name: str = Field(
57
+ ..., min_length=1, max_length=64, pattern="^[a-zA-Z0-9_-]+$"
58
+ )
59
+ description: str | None = None
60
+ input_schema: dict
61
+
62
+
63
+ class ToolChoice(BaseModel):
64
+ type: Literal["auto", "any", "tool"]
65
+ name: str | None = None
66
+
67
+
68
+ class CreateMessageRequest(BaseModel):
69
+ """Request model for Anthropic messages API."""
70
+
71
+ model: str = Field(..., min_length=1, max_length=256)
72
+ messages: list[Message]
73
+ max_tokens: int = Field(..., ge=1)
74
+
75
+ # Optional fields
76
+ system: str | list[ContentBlock] | None = None
77
+ temperature: float | None = Field(None, ge=0, le=1)
78
+ top_p: float | None = Field(None, ge=0, le=1)
79
+ top_k: int | None = Field(None, ge=0)
80
+ stop_sequences: list[str] | None = None
81
+ stream: bool | None = False
82
+ metadata: dict | None = None
83
+ tools: list[ToolDefinition] | None = None
84
+ tool_choice: ToolChoice | dict | None = None
85
+
86
+ class Config:
87
+ extra = "forbid"
88
+
89
+
90
+ class Usage(BaseModel):
91
+ """Token usage information."""
92
+
93
+ input_tokens: int
94
+ output_tokens: int
95
+
96
+
97
+ class ContentBlockResponse(BaseModel):
98
+ """Response content block."""
99
+
100
+ type: Literal["text"]
101
+ text: str
102
+
103
+
104
+ class CreateMessageResponse(BaseModel):
105
+ """Response model for Anthropic messages API."""
106
+
107
+ id: str
108
+ type: Literal["message"] = "message"
109
+ role: Literal["assistant"] = "assistant"
110
+ content: list[ContentBlockResponse]
111
+ model: str
112
+ stop_reason: None | (
113
+ Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]
114
+ ) = None
115
+ stop_sequence: str | None = None
116
+ usage: Usage
117
+
118
+
119
+ # Streaming response models
120
+ class MessageStartEvent(BaseModel):
121
+ type: Literal["message_start"] = "message_start"
122
+ message: CreateMessageResponse
123
+
124
+
125
+ class ContentBlockStartEvent(BaseModel):
126
+ type: Literal["content_block_start"] = "content_block_start"
127
+ index: int
128
+ content_block: ContentBlockResponse
129
+
130
+
131
+ class ContentBlockDeltaEvent(BaseModel):
132
+ type: Literal["content_block_delta"] = "content_block_delta"
133
+ index: int
134
+ delta: dict
135
+
136
+
137
+ class ContentBlockStopEvent(BaseModel):
138
+ type: Literal["content_block_stop"] = "content_block_stop"
139
+ index: int
140
+
141
+
142
+ class MessageDeltaEvent(BaseModel):
143
+ type: Literal["message_delta"] = "message_delta"
144
+ delta: dict
145
+ usage: Usage | None = None
146
+
147
+
148
+ class MessageStopEvent(BaseModel):
149
+ type: Literal["message_stop"] = "message_stop"
150
+
151
+
152
+ StreamEvent = Union[
153
+ MessageStartEvent,
154
+ ContentBlockStartEvent,
155
+ ContentBlockDeltaEvent,
156
+ ContentBlockStopEvent,
157
+ MessageDeltaEvent,
158
+ MessageStopEvent,
159
+ ]
@@ -0,0 +1,165 @@
1
+ from enum import Enum
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class SearchCategory(str, Enum):
7
+ company = "company"
8
+ research_paper = "research paper"
9
+ news = "news"
10
+ pdf = "pdf"
11
+ github = "github"
12
+ tweet = "tweet"
13
+ personal_site = "personal site"
14
+ linkedin_profile = "linkedin profile"
15
+ financial_report = "financial report"
16
+
17
+
18
+ class LivecrawlType(str, Enum):
19
+ never = "never"
20
+ fallback = "fallback"
21
+ always = "always"
22
+
23
+
24
+ class SearchType(str, Enum):
25
+ keyword = "keyword"
26
+ neural = "neural"
27
+ auto = "auto"
28
+
29
+
30
+ class ContentsText(BaseModel):
31
+ includeHtmlTags: bool | None = Field(
32
+ default=False,
33
+ description="Whether to include HTML tags in the text. Set to True if you want"
34
+ " to retain HTML structure for the LLM to interpret.",
35
+ )
36
+ maxCharacters: int | None = Field(
37
+ default=None,
38
+ description="The maximum number of characters to return from the webpage text",
39
+ )
40
+
41
+
42
+ class ContentsHighlights(BaseModel):
43
+ highlightsPerUrl: int | None = Field(
44
+ default=1,
45
+ description="The number of highlight snippets you want per page.",
46
+ )
47
+ numSentences: int | None = Field(
48
+ default=5,
49
+ description="Number of sentences to return in each highlight snippet.",
50
+ )
51
+ query: None | str = Field(
52
+ default=None,
53
+ description="A specific query used to generate the highlight snippets.",
54
+ )
55
+
56
+
57
+ class ContentsSummary(BaseModel):
58
+ query: None | str = Field(
59
+ default=None,
60
+ description="A specific query used to generate a summary of the webpage.",
61
+ )
62
+
63
+
64
+ class ContentsExtras(BaseModel):
65
+ links: int | None = Field(
66
+ default=None, description="Number of links to return from each page."
67
+ )
68
+ imageLinks: int | None = Field(
69
+ default=None, description="Number of images to return for each result."
70
+ )
71
+
72
+
73
+ class Contents(BaseModel):
74
+ text: None | ContentsText = Field(
75
+ default=None,
76
+ description="Return full or partial text for each page, with optional HTML "
77
+ "structure or size limit.",
78
+ )
79
+ highlights: None | ContentsHighlights = Field(
80
+ default=None, description="Return snippet highlights for each page."
81
+ )
82
+ summary: None | ContentsSummary = Field(
83
+ default=None, description="Return a short summary of each page."
84
+ )
85
+ livecrawl: None | LivecrawlType = Field(
86
+ default=LivecrawlType.never,
87
+ description="Livecrawling setting for each page. Options: never, fallback, always.",
88
+ )
89
+ livecrawlTimeout: int | None = Field(
90
+ default=10000,
91
+ description="Timeout in milliseconds for livecrawling. Default 10000.",
92
+ )
93
+ subpages: int | None = Field(
94
+ default=None,
95
+ description="Number of subpages to crawl within each URL.",
96
+ )
97
+ subpageTarget: None | str | list[str] = Field(
98
+ default=None,
99
+ description="A target subpage or multiple subpages (list) to crawl, e.g. 'cited papers'.",
100
+ )
101
+ extras: None | ContentsExtras = Field(
102
+ default=None,
103
+ description="Additional extras like links or images to return for each page.",
104
+ )
105
+
106
+
107
+ class ExaSearchRequest(BaseModel):
108
+ query: str = Field(
109
+ ...,
110
+ description="The main query string describing what you're looking for.",
111
+ )
112
+ category: None | SearchCategory = Field(
113
+ default=None,
114
+ description="A data category to focus on, such as 'company', 'research paper', 'news', etc.",
115
+ )
116
+ type: None | SearchType = Field(
117
+ default=None,
118
+ description="The type of search to run. Can be 'auto', 'keyword', or 'neural'.",
119
+ )
120
+ useAutoprompt: None | bool = Field(
121
+ default=False,
122
+ description="If True, Exa auto-optimizes your query for best results (neural or auto search only).",
123
+ )
124
+ numResults: int | None = Field(
125
+ default=10, description="Number of results to return. Default is 10."
126
+ )
127
+ includeDomains: None | list[str] = Field(
128
+ default=None,
129
+ description="List of domains you want to include exclusively.",
130
+ )
131
+ excludeDomains: None | list[str] = Field(
132
+ default=None,
133
+ description="List of domains you do NOT want to see in the results.",
134
+ )
135
+ startCrawlDate: None | str = Field(
136
+ default=None,
137
+ description="Include results crawled after this ISO date (e.g., '2023-01-01T00:00:00.000Z').",
138
+ )
139
+ endCrawlDate: None | str = Field(
140
+ default=None,
141
+ description="Include results crawled before this ISO date.",
142
+ )
143
+ startPublishedDate: None | str = Field(
144
+ default=None,
145
+ description="Only return results published after this ISO date.",
146
+ )
147
+ endPublishedDate: None | str = Field(
148
+ default=None,
149
+ description="Only return results published before this ISO date.",
150
+ )
151
+ includeText: None | list[str] = Field(
152
+ default=None,
153
+ description="Strings that must appear in the webpage text. Only a single string up to "
154
+ "5 words is currently supported.",
155
+ )
156
+ excludeText: None | list[str] = Field(
157
+ default=None,
158
+ description="Strings that must NOT appear in the webpage text. Only a single string up to "
159
+ "5 words is currently supported.",
160
+ )
161
+ contents: None | Contents = Field(
162
+ default=None,
163
+ description="Dict defining the different ways you want to retrieve webpage contents, "
164
+ "including text, highlights, or summaries.",
165
+ )