beekeeper-core 1.0.6__tar.gz → 1.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/PKG-INFO +1 -1
  2. beekeeper_core-1.0.8/beekeeper/core/guardrails/__init__.py +4 -0
  3. beekeeper_core-1.0.8/beekeeper/core/guardrails/base.py +15 -0
  4. beekeeper_core-1.0.8/beekeeper/core/guardrails/types.py +13 -0
  5. beekeeper_core-1.0.8/beekeeper/core/llms/decorators.py +162 -0
  6. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/pyproject.toml +1 -1
  7. beekeeper_core-1.0.6/beekeeper/core/llms/decorators.py +0 -83
  8. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/.gitignore +0 -0
  9. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/README.md +0 -0
  10. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/__init__.py +0 -0
  11. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/document/__init__.py +0 -0
  12. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/document/base.py +0 -0
  13. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/embeddings/__init__.py +0 -0
  14. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/embeddings/base.py +0 -0
  15. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/evaluation/__init__.py +0 -0
  16. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/evaluation/context_similarity.py +0 -0
  17. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/flows/__init__.py +0 -0
  18. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/flows/ingestion_flow.py +0 -0
  19. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/llms/__init__.py +0 -0
  20. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/llms/base.py +0 -0
  21. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/llms/types.py +0 -0
  22. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/monitors/__init__.py +0 -0
  23. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/monitors/base.py +0 -0
  24. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/monitors/types.py +0 -0
  25. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/observers/__init__.py +0 -0
  26. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/observers/base.py +0 -0
  27. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/observers/types.py +0 -0
  28. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/prompts/__init__.py +0 -0
  29. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/prompts/base.py +0 -0
  30. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/prompts/utils.py +0 -0
  31. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/readers/__init__.py +0 -0
  32. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/readers/base.py +0 -0
  33. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/readers/directory.py +0 -0
  34. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/schema.py +0 -0
  35. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/text_chunkers/__init__.py +0 -0
  36. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/text_chunkers/base.py +0 -0
  37. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/text_chunkers/semantic.py +0 -0
  38. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/text_chunkers/sentence.py +0 -0
  39. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/text_chunkers/token.py +0 -0
  40. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/text_chunkers/utils.py +0 -0
  41. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/tools/__init__.py +0 -0
  42. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/tools/base.py +0 -0
  43. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/utils/pairwise.py +0 -0
  44. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/vector_stores/__init__.py +0 -0
  45. {beekeeper_core-1.0.6 → beekeeper_core-1.0.8}/beekeeper/core/vector_stores/base.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beekeeper-core
3
- Version: 1.0.6
3
+ Version: 1.0.8
4
4
  Summary: Load any data in one line of code and connect with AI applications
5
5
  Project-URL: Repository, https://github.com/beekeeper-ai/beekeeper
6
6
  Author-email: Leonardo Furnielis <leonardofurnielis@outlook.com>
@@ -0,0 +1,4 @@
1
+ from beekeeper.core.guardrails.base import BaseGuardrail
2
+ from beekeeper.core.guardrails.types import GuardrailResponse
3
+
4
+ __all__ = ["BaseGuardrail", "GuardrailResponse"]
@@ -0,0 +1,15 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from beekeeper.core.guardrails.types import GuardrailResponse
4
+
5
+
6
+ class BaseGuardrail(ABC):
7
+ """Abstract base class defining the interface for LLMs."""
8
+
9
+ @classmethod
10
+ def class_name(cls) -> str:
11
+ return "BaseGuardrail"
12
+
13
+ @abstractmethod
14
+ def enforce(self, text: str, direction: str) -> GuardrailResponse:
15
+ """Runs policies enforcement to specified guardrail."""
@@ -0,0 +1,13 @@
1
+ from typing import Any, Optional
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class GuardrailResponse(BaseModel):
7
+ """Guardrail response."""
8
+
9
+ text: str = Field(..., description="Generated text response")
10
+ action: Optional[str] = Field(
11
+ default=None, description="Action taken by the guardrail"
12
+ )
13
+ raw: Optional[Any] = Field(default=None)
@@ -0,0 +1,162 @@
1
+ import asyncio
2
+ import threading
3
+ import time
4
+ from logging import getLogger
5
+ from typing import Callable
6
+
7
+ from beekeeper.core.llms.types import ChatMessage
8
+ from beekeeper.core.monitors.types import PayloadRecord
9
+ from deprecated import deprecated
10
+
11
+ logger = getLogger(__name__)
12
+
13
+
14
+ @deprecated(
15
+ reason="'llm_chat_observer()' is deprecated and will be removed in a future version. Use 'llm_chat_monitor()'.",
16
+ version="1.0.8",
17
+ action="always",
18
+ )
19
+ def llm_chat_observer() -> Callable:
20
+ """
21
+ Decorator to wrap a method with llm handler logic.
22
+ Looks for observability instances in `self.callback_manager`.
23
+ """
24
+
25
+ def decorator(f: Callable) -> Callable:
26
+ def async_wrapper(self, *args, **kwargs):
27
+ callback_manager_fns = getattr(self, "callback_manager", None)
28
+
29
+ start_time = time.time()
30
+ llm_return_val = f(self, *args, **kwargs)
31
+ response_time = int((time.time() - start_time) * 1000)
32
+
33
+ if callback_manager_fns:
34
+
35
+ def async_callback_thread():
36
+ try:
37
+ # Extract input messages
38
+ if len(args) > 0 and isinstance(args[0], ChatMessage):
39
+ input_chat_messages = args[0]
40
+ elif "messages" in kwargs:
41
+ input_chat_messages = kwargs["messages"]
42
+ else:
43
+ raise ValueError(
44
+ "No messages provided in positional or keyword arguments"
45
+ )
46
+
47
+ # Get the user's latest message after each interaction to chat observability.
48
+ user_messages = [
49
+ msg for msg in input_chat_messages if msg.role == "user"
50
+ ]
51
+ last_user_message = (
52
+ user_messages[-1].content if user_messages else None
53
+ )
54
+
55
+ # Get the system/instruct (first) message to chat observability.
56
+ system_messages = [
57
+ msg for msg in input_chat_messages if msg.role == "system"
58
+ ]
59
+ system_message = (
60
+ system_messages[0].content if system_messages else None
61
+ )
62
+
63
+ callback = callback_manager_fns(
64
+ payload=PayloadRecord(
65
+ input_text=(system_message or "") + last_user_message,
66
+ generated_text=llm_return_val.message.content,
67
+ generated_token_count=llm_return_val.raw["usage"][
68
+ "completion_tokens"
69
+ ],
70
+ input_token_count=llm_return_val.raw["usage"][
71
+ "prompt_tokens"
72
+ ],
73
+ response_time=response_time,
74
+ )
75
+ )
76
+
77
+ if asyncio.iscoroutine(callback):
78
+ asyncio.run(callback)
79
+
80
+ except Exception as e:
81
+ logger.error(f"Observability callback error: {e}")
82
+
83
+ threading.Thread(target=async_callback_thread).start()
84
+
85
+ return llm_return_val
86
+
87
+ return async_wrapper
88
+
89
+ return decorator
90
+
91
+
92
+ def llm_chat_monitor() -> Callable:
93
+ """
94
+ Decorator to wrap a method with llm handler logic.
95
+ Looks for observability instances in `self.callback_manager`.
96
+ """
97
+
98
+ def decorator(f: Callable) -> Callable:
99
+ def async_wrapper(self, *args, **kwargs):
100
+ callback_manager_fns = getattr(self, "callback_manager", None)
101
+
102
+ start_time = time.time()
103
+ llm_return_val = f(self, *args, **kwargs)
104
+ response_time = int((time.time() - start_time) * 1000)
105
+
106
+ if callback_manager_fns:
107
+
108
+ def async_callback_thread():
109
+ try:
110
+ # Extract input messages
111
+ if len(args) > 0 and isinstance(args[0], ChatMessage):
112
+ input_chat_messages = args[0]
113
+ elif "messages" in kwargs:
114
+ input_chat_messages = kwargs["messages"]
115
+ else:
116
+ raise ValueError(
117
+ "No messages provided in positional or keyword arguments"
118
+ )
119
+
120
+ # Get the user's latest message after each interaction to chat observability.
121
+ user_messages = [
122
+ msg for msg in input_chat_messages if msg.role == "user"
123
+ ]
124
+ last_user_message = (
125
+ user_messages[-1].content if user_messages else None
126
+ )
127
+
128
+ # Get the system/instruct (first) message to chat observability.
129
+ system_messages = [
130
+ msg for msg in input_chat_messages if msg.role == "system"
131
+ ]
132
+ system_message = (
133
+ system_messages[0].content if system_messages else None
134
+ )
135
+
136
+ callback = callback_manager_fns(
137
+ payload=PayloadRecord(
138
+ input_text=(system_message or "") + last_user_message,
139
+ generated_text=llm_return_val.message.content,
140
+ generated_token_count=llm_return_val.raw["usage"][
141
+ "completion_tokens"
142
+ ],
143
+ input_token_count=llm_return_val.raw["usage"][
144
+ "prompt_tokens"
145
+ ],
146
+ response_time=response_time,
147
+ )
148
+ )
149
+
150
+ if asyncio.iscoroutine(callback):
151
+ asyncio.run(callback)
152
+
153
+ except Exception as e:
154
+ logger.error(f"Observability callback error: {e}")
155
+
156
+ threading.Thread(target=async_callback_thread).start()
157
+
158
+ return llm_return_val
159
+
160
+ return async_wrapper
161
+
162
+ return decorator
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "beekeeper-core"
7
- version = "1.0.6"
7
+ version = "1.0.8"
8
8
  description = "Load any data in one line of code and connect with AI applications"
9
9
  authors = [{ name = "Leonardo Furnielis", email = "leonardofurnielis@outlook.com" }]
10
10
  license = { text = "Apache-2.0" }
@@ -1,83 +0,0 @@
1
- import asyncio
2
- import threading
3
- import time
4
- from logging import getLogger
5
- from typing import Callable
6
-
7
- from beekeeper.core.llms.types import ChatMessage
8
- from beekeeper.core.observers.types import PayloadRecord
9
-
10
- logger = getLogger(__name__)
11
-
12
-
13
- def llm_chat_observer() -> Callable:
14
- """
15
- Decorator to wrap a method with llm handler logic.
16
- Looks for observability instances in `self.callback_manager`.
17
- """
18
-
19
- def decorator(f: Callable) -> Callable:
20
- def async_wrapper(self, *args, **kwargs):
21
- callback_manager_fns = getattr(self, "callback_manager", None)
22
-
23
- start_time = time.time()
24
- llm_return_val = f(self, *args, **kwargs)
25
- response_time = int((time.time() - start_time) * 1000)
26
-
27
- if callback_manager_fns:
28
-
29
- def async_callback_thread():
30
- try:
31
- # Extract input messages
32
- if len(args) > 0 and isinstance(args[0], ChatMessage):
33
- input_chat_messages = args[0]
34
- elif "messages" in kwargs:
35
- input_chat_messages = kwargs["messages"]
36
- else:
37
- raise ValueError(
38
- "No messages provided in positional or keyword arguments"
39
- )
40
-
41
- # Get the user's latest message after each interaction to chat observability.
42
- user_messages = [
43
- msg for msg in input_chat_messages if msg.role == "user"
44
- ]
45
- last_user_message = (
46
- user_messages[-1].content if user_messages else None
47
- )
48
-
49
- # Get the system/instruct (first) message to chat observability.
50
- system_messages = [
51
- msg for msg in input_chat_messages if msg.role == "system"
52
- ]
53
- system_message = (
54
- system_messages[0].content if system_messages else None
55
- )
56
-
57
- callback = callback_manager_fns(
58
- payload=PayloadRecord(
59
- input_text=(system_message or "") + last_user_message,
60
- generated_text=llm_return_val.message.content,
61
- generated_token_count=llm_return_val.raw["usage"][
62
- "completion_tokens"
63
- ],
64
- input_token_count=llm_return_val.raw["usage"][
65
- "prompt_tokens"
66
- ],
67
- response_time=response_time,
68
- )
69
- )
70
-
71
- if asyncio.iscoroutine(callback):
72
- asyncio.run(callback)
73
-
74
- except Exception as e:
75
- logger.error(f"Observability callback error: {e}")
76
-
77
- threading.Thread(target=async_callback_thread).start()
78
-
79
- return llm_return_val
80
-
81
- return async_wrapper
82
-
83
- return decorator
File without changes