beekeeper-core 1.0.9__tar.gz → 1.0.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/PKG-INFO +2 -2
  2. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/llms/base.py +15 -10
  3. beekeeper_core-1.0.11/beekeeper/core/llms/decorators.py +83 -0
  4. beekeeper_core-1.0.11/beekeeper/core/llms/types.py +57 -0
  5. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/pyproject.toml +2 -2
  6. beekeeper_core-1.0.9/beekeeper/core/llms/decorators.py +0 -162
  7. beekeeper_core-1.0.9/beekeeper/core/llms/types.py +0 -33
  8. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/.gitignore +0 -0
  9. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/README.md +0 -0
  10. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/__init__.py +0 -0
  11. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/document/__init__.py +0 -0
  12. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/document/base.py +0 -0
  13. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/embeddings/__init__.py +0 -0
  14. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/embeddings/base.py +0 -0
  15. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/evaluation/__init__.py +0 -0
  16. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/evaluation/context_similarity.py +0 -0
  17. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/flows/__init__.py +0 -0
  18. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/flows/ingestion_flow.py +0 -0
  19. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/guardrails/__init__.py +0 -0
  20. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/guardrails/base.py +0 -0
  21. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/guardrails/types.py +0 -0
  22. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/llms/__init__.py +0 -0
  23. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/monitors/__init__.py +0 -0
  24. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/monitors/base.py +0 -0
  25. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/monitors/types.py +0 -0
  26. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/observers/__init__.py +0 -0
  27. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/observers/base.py +0 -0
  28. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/observers/types.py +0 -0
  29. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/prompts/__init__.py +0 -0
  30. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/prompts/base.py +0 -0
  31. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/prompts/utils.py +0 -0
  32. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/readers/__init__.py +0 -0
  33. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/readers/base.py +0 -0
  34. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/readers/directory.py +0 -0
  35. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/schema.py +0 -0
  36. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/text_chunkers/__init__.py +0 -0
  37. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/text_chunkers/base.py +0 -0
  38. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/text_chunkers/semantic.py +0 -0
  39. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/text_chunkers/sentence.py +0 -0
  40. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/text_chunkers/token.py +0 -0
  41. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/text_chunkers/utils.py +0 -0
  42. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/tools/__init__.py +0 -0
  43. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/tools/base.py +0 -0
  44. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/utils/pairwise.py +0 -0
  45. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/vector_stores/__init__.py +0 -0
  46. {beekeeper_core-1.0.9 → beekeeper_core-1.0.11}/beekeeper/core/vector_stores/base.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beekeeper-core
3
- Version: 1.0.9
3
+ Version: 1.0.11
4
4
  Summary: Load any data in one line of code and connect with AI applications
5
5
  Project-URL: Repository, https://github.com/beekeeper-ai/beekeeper
6
6
  Author-email: Leonardo Furnielis <leonardofurnielis@outlook.com>
@@ -9,7 +9,7 @@ Keywords: AI,LLM,QA,RAG,data,observability,retrieval,semantic-search
9
9
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
10
10
  Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
11
11
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
12
- Requires-Python: <4.0,>=3.11
12
+ Requires-Python: <3.14,>=3.11
13
13
  Requires-Dist: deprecated<2.0.0,>=1.3.1
14
14
  Requires-Dist: nltk<4.0.0,>=3.9.2
15
15
  Requires-Dist: numpy<1.27.0,>=1.26.4
@@ -1,35 +1,40 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Any, List, Optional
2
+ from typing import Any, Optional
3
3
 
4
4
  from beekeeper.core.llms.types import ChatMessage, ChatResponse, GenerateResponse
5
5
  from beekeeper.core.monitors import BaseMonitor
6
6
  from pydantic import BaseModel
7
7
 
8
8
 
9
- class BaseLLM(ABC, BaseModel):
9
+ class BaseLLM(BaseModel, ABC):
10
10
  """Abstract base class defining the interface for LLMs."""
11
11
 
12
12
  model_config = {"arbitrary_types_allowed": True}
13
+ model: str
13
14
  callback_manager: Optional[BaseMonitor] = None
14
15
 
15
16
  @classmethod
16
17
  def class_name(cls) -> str:
17
18
  return "BaseLLM"
18
19
 
19
- def convert_chat_messages(self, messages: List[ChatMessage]) -> List[dict]:
20
- """Convert ChatMessage to LLM message dict format."""
21
- return [message.model_dump() for message in messages]
20
+ def text_completion(self, prompt: str, **kwargs: Any) -> str:
21
+ """
22
+ Generates a chat completion for LLM. Using OpenAI's standard endpoint (/completions).
23
+
24
+ Args:
25
+ prompt (str): The input prompt to generate a completion for.
26
+ **kwargs (Any): Additional keyword arguments to customize the LLM completion request.
27
+ """
28
+ response = self.completion(prompt=prompt, **kwargs)
29
+
30
+ return response.text
22
31
 
23
32
  @abstractmethod
24
33
  def completion(self, prompt: str, **kwargs: Any) -> GenerateResponse:
25
34
  """Generates a completion for LLM."""
26
35
 
27
- @abstractmethod
28
- def text_completion(self, prompt: str, **kwargs: Any) -> str:
29
- """Generates a text completion for LLM."""
30
-
31
36
  @abstractmethod
32
37
  def chat_completion(
33
- self, messages: List[ChatMessage], **kwargs: Any
38
+ self, messages: list[ChatMessage | dict], **kwargs: Any
34
39
  ) -> ChatResponse:
35
40
  """Generates a chat completion for LLM."""
@@ -0,0 +1,83 @@
1
+ import asyncio
2
+ import threading
3
+ import time
4
+ from logging import getLogger
5
+ from typing import Callable
6
+
7
+ from beekeeper.core.llms.types import ChatMessage
8
+ from beekeeper.core.monitors.types import PayloadRecord
9
+
10
+ logger = getLogger(__name__)
11
+
12
+
13
+ def llm_chat_monitor() -> Callable:
14
+ """
15
+ Decorator to wrap a method with llm handler logic.
16
+ Looks for observability instances in `self.callback_manager`.
17
+ """
18
+
19
+ def decorator(f: Callable) -> Callable:
20
+ def async_wrapper(self, *args, **kwargs):
21
+ callback_manager_fns = getattr(self, "callback_manager", None)
22
+
23
+ start_time = time.time()
24
+ llm_return_val = f(self, *args, **kwargs)
25
+ response_time = int((time.time() - start_time) * 1000)
26
+
27
+ if callback_manager_fns:
28
+
29
+ def async_callback_thread():
30
+ try:
31
+ # Extract input messages
32
+ if len(args) > 0 and isinstance(args[0], ChatMessage):
33
+ input_chat_messages = args[0]
34
+ elif "messages" in kwargs:
35
+ input_chat_messages = kwargs["messages"]
36
+ else:
37
+ raise ValueError(
38
+ "No messages provided in positional or keyword arguments"
39
+ )
40
+
41
+ # Get the user's latest message after each interaction to chat observability.
42
+ user_messages = [
43
+ msg for msg in input_chat_messages if msg.role == "user"
44
+ ]
45
+ last_user_message = (
46
+ user_messages[-1].content if user_messages else None
47
+ )
48
+
49
+ # Get the system/instruct (first) message to chat observability.
50
+ system_messages = [
51
+ msg for msg in input_chat_messages if msg.role == "system"
52
+ ]
53
+ system_message = (
54
+ system_messages[0].content if system_messages else None
55
+ )
56
+
57
+ callback = callback_manager_fns(
58
+ payload=PayloadRecord(
59
+ input_text=(system_message or "") + last_user_message,
60
+ generated_text=llm_return_val.message.content,
61
+ generated_token_count=llm_return_val.raw["usage"][
62
+ "completion_tokens"
63
+ ],
64
+ input_token_count=llm_return_val.raw["usage"][
65
+ "prompt_tokens"
66
+ ],
67
+ response_time=response_time,
68
+ )
69
+ )
70
+
71
+ if asyncio.iscoroutine(callback):
72
+ asyncio.run(callback)
73
+
74
+ except Exception as e:
75
+ logger.error(f"Observability callback: {e}")
76
+
77
+ threading.Thread(target=async_callback_thread).start()
78
+
79
+ return llm_return_val
80
+
81
+ return async_wrapper
82
+
83
+ return decorator
@@ -0,0 +1,57 @@
1
+ from enum import Enum
2
+ from typing import Any, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class MessageRole(str, Enum):
8
+ ASSISTANT = "assistant"
9
+ SYSTEM = "system"
10
+ USER = "user"
11
+ TOOL = "tool"
12
+
13
+
14
+ class ChatMessage(BaseModel):
15
+ """Chat message."""
16
+
17
+ model_config = {"use_enum_values": True}
18
+ role: MessageRole = Field(default=MessageRole.USER)
19
+ content: Optional[str] = Field(default=None)
20
+
21
+ def to_dict(self) -> dict:
22
+ """Convert ChatMessage to dict."""
23
+ return self.model_dump(exclude_none=True)
24
+
25
+ @classmethod
26
+ def from_value(cls, value: dict) -> "ChatMessage":
27
+ if value is None:
28
+ raise ValueError("Invalid 'ChatMessage', cannot be None")
29
+
30
+ if isinstance(value, cls):
31
+ return value
32
+
33
+ if isinstance(value, dict):
34
+ try:
35
+ return cls.model_validate(value)
36
+ except Exception as e:
37
+ raise ValueError(
38
+ "Invalid 'ChatMessage' dict. Received: '{}'.".format(e)
39
+ )
40
+
41
+ raise TypeError(
42
+ f"Invalid 'ChatMessage' type. Expected dict or ChatMessage, but received {type(value).__name__}."
43
+ )
44
+
45
+
46
+ class GenerateResponse(BaseModel):
47
+ """Generate response."""
48
+
49
+ text: str = Field(..., description="Generated text response")
50
+ raw: Optional[Any] = Field(default=None)
51
+
52
+
53
+ class ChatResponse(BaseModel):
54
+ """Chat completion response."""
55
+
56
+ message: ChatMessage
57
+ raw: Optional[Any] = None
@@ -4,12 +4,12 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "beekeeper-core"
7
- version = "1.0.9"
7
+ version = "1.0.11"
8
8
  description = "Load any data in one line of code and connect with AI applications"
9
9
  authors = [{ name = "Leonardo Furnielis", email = "leonardofurnielis@outlook.com" }]
10
10
  license = { text = "Apache-2.0" }
11
11
  readme = "README.md"
12
- requires-python = ">=3.11,<4.0"
12
+ requires-python = ">=3.11,<3.14"
13
13
  keywords = ["AI", "LLM", "QA", "RAG", "data", "observability", "retrieval", "semantic-search"]
14
14
  classifiers = [
15
15
  "Topic :: Scientific/Engineering :: Artificial Intelligence",
@@ -1,162 +0,0 @@
1
- import asyncio
2
- import threading
3
- import time
4
- from logging import getLogger
5
- from typing import Callable
6
-
7
- from beekeeper.core.llms.types import ChatMessage
8
- from beekeeper.core.monitors.types import PayloadRecord
9
- from deprecated import deprecated
10
-
11
- logger = getLogger(__name__)
12
-
13
-
14
- @deprecated(
15
- reason="'llm_chat_observer()' is deprecated and will be removed in a future version. Use 'llm_chat_monitor()'.",
16
- version="1.0.8",
17
- action="always",
18
- )
19
- def llm_chat_observer() -> Callable:
20
- """
21
- Decorator to wrap a method with llm handler logic.
22
- Looks for observability instances in `self.callback_manager`.
23
- """
24
-
25
- def decorator(f: Callable) -> Callable:
26
- def async_wrapper(self, *args, **kwargs):
27
- callback_manager_fns = getattr(self, "callback_manager", None)
28
-
29
- start_time = time.time()
30
- llm_return_val = f(self, *args, **kwargs)
31
- response_time = int((time.time() - start_time) * 1000)
32
-
33
- if callback_manager_fns:
34
-
35
- def async_callback_thread():
36
- try:
37
- # Extract input messages
38
- if len(args) > 0 and isinstance(args[0], ChatMessage):
39
- input_chat_messages = args[0]
40
- elif "messages" in kwargs:
41
- input_chat_messages = kwargs["messages"]
42
- else:
43
- raise ValueError(
44
- "No messages provided in positional or keyword arguments"
45
- )
46
-
47
- # Get the user's latest message after each interaction to chat observability.
48
- user_messages = [
49
- msg for msg in input_chat_messages if msg.role == "user"
50
- ]
51
- last_user_message = (
52
- user_messages[-1].content if user_messages else None
53
- )
54
-
55
- # Get the system/instruct (first) message to chat observability.
56
- system_messages = [
57
- msg for msg in input_chat_messages if msg.role == "system"
58
- ]
59
- system_message = (
60
- system_messages[0].content if system_messages else None
61
- )
62
-
63
- callback = callback_manager_fns(
64
- payload=PayloadRecord(
65
- input_text=(system_message or "") + last_user_message,
66
- generated_text=llm_return_val.message.content,
67
- generated_token_count=llm_return_val.raw["usage"][
68
- "completion_tokens"
69
- ],
70
- input_token_count=llm_return_val.raw["usage"][
71
- "prompt_tokens"
72
- ],
73
- response_time=response_time,
74
- )
75
- )
76
-
77
- if asyncio.iscoroutine(callback):
78
- asyncio.run(callback)
79
-
80
- except Exception as e:
81
- logger.error(f"Observability callback error: {e}")
82
-
83
- threading.Thread(target=async_callback_thread).start()
84
-
85
- return llm_return_val
86
-
87
- return async_wrapper
88
-
89
- return decorator
90
-
91
-
92
- def llm_chat_monitor() -> Callable:
93
- """
94
- Decorator to wrap a method with llm handler logic.
95
- Looks for observability instances in `self.callback_manager`.
96
- """
97
-
98
- def decorator(f: Callable) -> Callable:
99
- def async_wrapper(self, *args, **kwargs):
100
- callback_manager_fns = getattr(self, "callback_manager", None)
101
-
102
- start_time = time.time()
103
- llm_return_val = f(self, *args, **kwargs)
104
- response_time = int((time.time() - start_time) * 1000)
105
-
106
- if callback_manager_fns:
107
-
108
- def async_callback_thread():
109
- try:
110
- # Extract input messages
111
- if len(args) > 0 and isinstance(args[0], ChatMessage):
112
- input_chat_messages = args[0]
113
- elif "messages" in kwargs:
114
- input_chat_messages = kwargs["messages"]
115
- else:
116
- raise ValueError(
117
- "No messages provided in positional or keyword arguments"
118
- )
119
-
120
- # Get the user's latest message after each interaction to chat observability.
121
- user_messages = [
122
- msg for msg in input_chat_messages if msg.role == "user"
123
- ]
124
- last_user_message = (
125
- user_messages[-1].content if user_messages else None
126
- )
127
-
128
- # Get the system/instruct (first) message to chat observability.
129
- system_messages = [
130
- msg for msg in input_chat_messages if msg.role == "system"
131
- ]
132
- system_message = (
133
- system_messages[0].content if system_messages else None
134
- )
135
-
136
- callback = callback_manager_fns(
137
- payload=PayloadRecord(
138
- input_text=(system_message or "") + last_user_message,
139
- generated_text=llm_return_val.message.content,
140
- generated_token_count=llm_return_val.raw["usage"][
141
- "completion_tokens"
142
- ],
143
- input_token_count=llm_return_val.raw["usage"][
144
- "prompt_tokens"
145
- ],
146
- response_time=response_time,
147
- )
148
- )
149
-
150
- if asyncio.iscoroutine(callback):
151
- asyncio.run(callback)
152
-
153
- except Exception as e:
154
- logger.error(f"Observability callback error: {e}")
155
-
156
- threading.Thread(target=async_callback_thread).start()
157
-
158
- return llm_return_val
159
-
160
- return async_wrapper
161
-
162
- return decorator
@@ -1,33 +0,0 @@
1
- from enum import Enum
2
- from typing import Any, Optional
3
-
4
- from pydantic import BaseModel, Field
5
-
6
-
7
- class MessageRole(str, Enum):
8
- ASSISTANT = "assistant"
9
- SYSTEM = "system"
10
- USER = "user"
11
- TOOL = "tool"
12
-
13
-
14
- class ChatMessage(BaseModel):
15
- """Chat message."""
16
-
17
- model_config = {"use_enum_values": True}
18
- role: MessageRole = Field(default=MessageRole.USER)
19
- content: Optional[str] = Field(default=None)
20
-
21
-
22
- class GenerateResponse(BaseModel):
23
- """Generate response."""
24
-
25
- text: str = Field(..., description="Generated text response")
26
- raw: Optional[Any] = Field(default=None)
27
-
28
-
29
- class ChatResponse(BaseModel):
30
- """Chat completion response."""
31
-
32
- message: ChatMessage
33
- raw: Optional[Any] = None