beekeeper-core 1.0.4__tar.gz → 1.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/PKG-INFO +6 -6
  2. beekeeper_core-1.0.12/beekeeper/core/guardrails/__init__.py +4 -0
  3. beekeeper_core-1.0.12/beekeeper/core/guardrails/base.py +15 -0
  4. beekeeper_core-1.0.12/beekeeper/core/guardrails/types.py +13 -0
  5. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/llms/base.py +15 -10
  6. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/llms/decorators.py +21 -7
  7. beekeeper_core-1.0.12/beekeeper/core/llms/types.py +57 -0
  8. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/monitors/base.py +1 -1
  9. beekeeper_core-1.0.12/beekeeper/core/monitors/types.py +14 -0
  10. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/prompts/base.py +12 -0
  11. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/pyproject.toml +6 -6
  12. beekeeper_core-1.0.4/beekeeper/core/llms/types.py +0 -33
  13. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/.gitignore +0 -0
  14. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/README.md +0 -0
  15. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/__init__.py +0 -0
  16. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/document/__init__.py +0 -0
  17. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/document/base.py +0 -0
  18. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/embeddings/__init__.py +0 -0
  19. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/embeddings/base.py +0 -0
  20. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/evaluation/__init__.py +0 -0
  21. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/evaluation/context_similarity.py +0 -0
  22. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/flows/__init__.py +0 -0
  23. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/flows/ingestion_flow.py +0 -0
  24. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/llms/__init__.py +0 -0
  25. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/monitors/__init__.py +0 -0
  26. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/observers/__init__.py +0 -0
  27. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/observers/base.py +0 -0
  28. {beekeeper_core-1.0.4/beekeeper/core/monitors → beekeeper_core-1.0.12/beekeeper/core/observers}/types.py +0 -0
  29. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/prompts/__init__.py +0 -0
  30. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/prompts/utils.py +0 -0
  31. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/readers/__init__.py +0 -0
  32. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/readers/base.py +0 -0
  33. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/readers/directory.py +0 -0
  34. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/schema.py +0 -0
  35. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/text_chunkers/__init__.py +0 -0
  36. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/text_chunkers/base.py +0 -0
  37. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/text_chunkers/semantic.py +0 -0
  38. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/text_chunkers/sentence.py +0 -0
  39. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/text_chunkers/token.py +0 -0
  40. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/text_chunkers/utils.py +0 -0
  41. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/tools/__init__.py +0 -0
  42. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/tools/base.py +0 -0
  43. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/utils/pairwise.py +0 -0
  44. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/vector_stores/__init__.py +0 -0
  45. {beekeeper_core-1.0.4 → beekeeper_core-1.0.12}/beekeeper/core/vector_stores/base.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beekeeper-core
3
- Version: 1.0.4
3
+ Version: 1.0.12
4
4
  Summary: Load any data in one line of code and connect with AI applications
5
5
  Project-URL: Repository, https://github.com/beekeeper-ai/beekeeper
6
6
  Author-email: Leonardo Furnielis <leonardofurnielis@outlook.com>
@@ -9,14 +9,14 @@ Keywords: AI,LLM,QA,RAG,data,observability,retrieval,semantic-search
9
9
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
10
10
  Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
11
11
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
12
- Requires-Python: <4.0,>=3.10
13
- Requires-Dist: deprecated<2.0.0,>=1.2.18
12
+ Requires-Python: <3.14,>=3.11
13
+ Requires-Dist: deprecated<2.0.0,>=1.3.1
14
14
  Requires-Dist: nltk<4.0.0,>=3.9.2
15
15
  Requires-Dist: numpy<1.27.0,>=1.26.4
16
- Requires-Dist: pydantic<3.0.0,>=2.11.10
17
- Requires-Dist: tiktoken<0.10.0,>=0.9.0
16
+ Requires-Dist: pydantic<3.0.0,>=2.12.5
17
+ Requires-Dist: tiktoken<0.13.0,>=0.12.0
18
18
  Provides-Extra: dev
19
- Requires-Dist: ruff>=0.11.13; extra == 'dev'
19
+ Requires-Dist: ruff>=0.14.9; extra == 'dev'
20
20
  Description-Content-Type: text/markdown
21
21
 
22
22
  # Beekeeper Core
@@ -0,0 +1,4 @@
1
+ from beekeeper.core.guardrails.base import BaseGuardrail
2
+ from beekeeper.core.guardrails.types import GuardrailResponse
3
+
4
+ __all__ = ["BaseGuardrail", "GuardrailResponse"]
@@ -0,0 +1,15 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from beekeeper.core.guardrails.types import GuardrailResponse
4
+
5
+
6
+ class BaseGuardrail(ABC):
7
+ """Abstract base class defining the interface for LLMs."""
8
+
9
+ @classmethod
10
+ def class_name(cls) -> str:
11
+ return "BaseGuardrail"
12
+
13
+ @abstractmethod
14
+ def enforce(self, text: str, direction: str) -> GuardrailResponse:
15
+ """Runs policies enforcement to specified guardrail."""
@@ -0,0 +1,13 @@
1
+ from typing import Any, Optional
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class GuardrailResponse(BaseModel):
7
+ """Guardrail response."""
8
+
9
+ text: str = Field(..., description="Generated text response")
10
+ action: Optional[str] = Field(
11
+ default=None, description="Action taken by the guardrail"
12
+ )
13
+ raw: Optional[Any] = Field(default=None)
@@ -1,35 +1,40 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Any, List, Optional
2
+ from typing import Any, Optional
3
3
 
4
4
  from beekeeper.core.llms.types import ChatMessage, ChatResponse, GenerateResponse
5
5
  from beekeeper.core.monitors import BaseMonitor
6
6
  from pydantic import BaseModel
7
7
 
8
8
 
9
- class BaseLLM(ABC, BaseModel):
9
+ class BaseLLM(BaseModel, ABC):
10
10
  """Abstract base class defining the interface for LLMs."""
11
11
 
12
12
  model_config = {"arbitrary_types_allowed": True}
13
+ model: str
13
14
  callback_manager: Optional[BaseMonitor] = None
14
15
 
15
16
  @classmethod
16
17
  def class_name(cls) -> str:
17
18
  return "BaseLLM"
18
19
 
19
- def convert_chat_messages(self, messages: List[ChatMessage]) -> List[dict]:
20
- """Convert ChatMessage to LLM message dict format."""
21
- return [message.model_dump() for message in messages]
20
+ def text_completion(self, prompt: str, **kwargs: Any) -> str:
21
+ """
22
+ Generates a chat completion for LLM. Using OpenAI's standard endpoint (/completions).
23
+
24
+ Args:
25
+ prompt (str): The input prompt to generate a completion for.
26
+ **kwargs (Any): Additional keyword arguments to customize the LLM completion request.
27
+ """
28
+ response = self.completion(prompt=prompt, **kwargs)
29
+
30
+ return response.text
22
31
 
23
32
  @abstractmethod
24
33
  def completion(self, prompt: str, **kwargs: Any) -> GenerateResponse:
25
34
  """Generates a completion for LLM."""
26
35
 
27
- @abstractmethod
28
- def text_completion(self, prompt: str, **kwargs: Any) -> str:
29
- """Generates a text completion for LLM."""
30
-
31
36
  @abstractmethod
32
37
  def chat_completion(
33
- self, messages: List[ChatMessage], **kwargs: Any
38
+ self, messages: list[ChatMessage | dict], **kwargs: Any
34
39
  ) -> ChatResponse:
35
40
  """Generates a chat completion for LLM."""
@@ -5,12 +5,13 @@ from logging import getLogger
5
5
  from typing import Callable
6
6
 
7
7
  from beekeeper.core.llms.types import ChatMessage
8
- from beekeeper.core.observers.types import PayloadRecord
8
+ from beekeeper.core.monitors.types import PayloadRecord
9
+ from beekeeper.core.prompts.utils import extract_template_vars
9
10
 
10
11
  logger = getLogger(__name__)
11
12
 
12
13
 
13
- def llm_chat_observer() -> Callable:
14
+ def llm_chat_monitor() -> Callable:
14
15
  """
15
16
  Decorator to wrap a method with llm handler logic.
16
17
  Looks for observability instances in `self.callback_manager`.
@@ -54,16 +55,29 @@ def llm_chat_observer() -> Callable:
54
55
  system_messages[0].content if system_messages else None
55
56
  )
56
57
 
58
+ # Extract template variables values from the prompt template if available
59
+ template_var_values = (
60
+ extract_template_vars(
61
+ callback_manager_fns.prompt_template.template,
62
+ (system_message or ""),
63
+ )
64
+ if callback_manager_fns.prompt_template
65
+ else {}
66
+ )
67
+
57
68
  callback = callback_manager_fns(
58
69
  payload=PayloadRecord(
59
- input_text=(system_message or "") + last_user_message,
70
+ system_prompt=(system_message or ""),
71
+ input_text=last_user_message,
72
+ prompt_variables=list(template_var_values.keys()),
73
+ prompt_variable_values=template_var_values,
60
74
  generated_text=llm_return_val.message.content,
61
- generated_token_count=llm_return_val.raw["usage"][
62
- "completion_tokens"
63
- ],
64
75
  input_token_count=llm_return_val.raw["usage"][
65
76
  "prompt_tokens"
66
77
  ],
78
+ generated_token_count=llm_return_val.raw["usage"][
79
+ "completion_tokens"
80
+ ],
67
81
  response_time=response_time,
68
82
  )
69
83
  )
@@ -72,7 +86,7 @@ def llm_chat_observer() -> Callable:
72
86
  asyncio.run(callback)
73
87
 
74
88
  except Exception as e:
75
- logger.error(f"Observability callback error: {e}")
89
+ logger.error(f"Observability callback: {e}")
76
90
 
77
91
  threading.Thread(target=async_callback_thread).start()
78
92
 
@@ -0,0 +1,57 @@
1
+ from enum import Enum
2
+ from typing import Any, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class MessageRole(str, Enum):
8
+ ASSISTANT = "assistant"
9
+ SYSTEM = "system"
10
+ USER = "user"
11
+ TOOL = "tool"
12
+
13
+
14
+ class ChatMessage(BaseModel):
15
+ """Chat message."""
16
+
17
+ model_config = {"use_enum_values": True}
18
+ role: MessageRole = Field(default=MessageRole.USER)
19
+ content: Optional[str] = Field(default=None)
20
+
21
+ def to_dict(self) -> dict:
22
+ """Convert ChatMessage to dict."""
23
+ return self.model_dump(exclude_none=True)
24
+
25
+ @classmethod
26
+ def from_value(cls, value: dict) -> "ChatMessage":
27
+ if value is None:
28
+ raise ValueError("Invalid 'ChatMessage', cannot be None")
29
+
30
+ if isinstance(value, cls):
31
+ return value
32
+
33
+ if isinstance(value, dict):
34
+ try:
35
+ return cls.model_validate(value)
36
+ except Exception as e:
37
+ raise ValueError(
38
+ "Invalid 'ChatMessage' dict. Received: '{}'.".format(e)
39
+ )
40
+
41
+ raise TypeError(
42
+ f"Invalid 'ChatMessage' type. Expected dict or ChatMessage, but received {type(value).__name__}."
43
+ )
44
+
45
+
46
+ class GenerateResponse(BaseModel):
47
+ """Generate response."""
48
+
49
+ text: str = Field(..., description="Generated text response")
50
+ raw: Optional[Any] = Field(default=None)
51
+
52
+
53
+ class ChatResponse(BaseModel):
54
+ """Chat completion response."""
55
+
56
+ message: ChatMessage
57
+ raw: Optional[Any] = None
@@ -17,7 +17,7 @@ class PromptMonitor(BaseMonitor):
17
17
  """Abstract base class defining the interface for prompt observability."""
18
18
 
19
19
  def __init__(self, prompt_template: Optional[PromptTemplate] = None) -> None:
20
- self.prompt_template = prompt_template
20
+ self.prompt_template = PromptTemplate.from_value(prompt_template)
21
21
 
22
22
  @classmethod
23
23
  def class_name(cls) -> str:
@@ -0,0 +1,14 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class PayloadRecord(BaseModel):
5
+ """Payload record."""
6
+
7
+ system_prompt: str | None = None
8
+ input_text: str
9
+ prompt_variables: list[str] | None = None
10
+ prompt_variable_values: dict[str, str] | None = None
11
+ generated_text: str
12
+ input_token_count: int
13
+ generated_token_count: int
14
+ response_time: int
@@ -22,6 +22,18 @@ class PromptTemplate(BaseModel):
22
22
  def __init__(self, template: str):
23
23
  super().__init__(template=template)
24
24
 
25
+ @classmethod
26
+ def from_value(cls, value: str) -> "PromptTemplate":
27
+ if isinstance(value, cls):
28
+ return value
29
+
30
+ if isinstance(value, str):
31
+ return cls(value)
32
+
33
+ raise TypeError(
34
+ f"Invalid type for parameter 'prompt_template'. Expected str or PromptTemplate, but received {type(value).__name__}."
35
+ )
36
+
25
37
  def format(self, **kwargs):
26
38
  """
27
39
  Formats the template using the provided dynamic variables.
@@ -4,12 +4,12 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "beekeeper-core"
7
- version = "1.0.4"
7
+ version = "1.0.12"
8
8
  description = "Load any data in one line of code and connect with AI applications"
9
9
  authors = [{ name = "Leonardo Furnielis", email = "leonardofurnielis@outlook.com" }]
10
10
  license = { text = "Apache-2.0" }
11
11
  readme = "README.md"
12
- requires-python = ">=3.10,<4.0"
12
+ requires-python = ">=3.11,<3.14"
13
13
  keywords = ["AI", "LLM", "QA", "RAG", "data", "observability", "retrieval", "semantic-search"]
14
14
  classifiers = [
15
15
  "Topic :: Scientific/Engineering :: Artificial Intelligence",
@@ -17,16 +17,16 @@ classifiers = [
17
17
  "Topic :: Software Development :: Libraries :: Application Frameworks",
18
18
  ]
19
19
  dependencies = [
20
- "deprecated>=1.2.18,<2.0.0",
20
+ "deprecated>=1.3.1,<2.0.0",
21
21
  "nltk>=3.9.2,<4.0.0",
22
22
  "numpy>=1.26.4,<1.27.0",
23
- "pydantic>=2.11.10,<3.0.0",
24
- "tiktoken>=0.9.0,<0.10.0",
23
+ "pydantic>=2.12.5,<3.0.0",
24
+ "tiktoken>=0.12.0,<0.13.0",
25
25
  ]
26
26
 
27
27
  [project.optional-dependencies]
28
28
  dev = [
29
- "ruff>=0.11.13",
29
+ "ruff>=0.14.9",
30
30
  ]
31
31
 
32
32
  [project.urls]
@@ -1,33 +0,0 @@
1
- from enum import Enum
2
- from typing import Any, Optional
3
-
4
- from pydantic import BaseModel, Field
5
-
6
-
7
- class MessageRole(str, Enum):
8
- ASSISTANT = "assistant"
9
- SYSTEM = "system"
10
- USER = "user"
11
- TOOL = "tool"
12
-
13
-
14
- class ChatMessage(BaseModel):
15
- """Chat message."""
16
-
17
- model_config = {"use_enum_values": True}
18
- role: MessageRole = Field(default=MessageRole.USER)
19
- content: Optional[str] = Field(default=None)
20
-
21
-
22
- class GenerateResponse(BaseModel):
23
- """Generate response."""
24
-
25
- text: str = Field(..., description="Generated text response")
26
- raw: Optional[Any] = Field(default=None)
27
-
28
-
29
- class ChatResponse(BaseModel):
30
- """Chat completion response."""
31
-
32
- message: ChatMessage
33
- raw: Optional[Any] = None