crewplus 0.2.19__tar.gz → 0.2.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

Files changed (22) hide show
  1. {crewplus-0.2.19 → crewplus-0.2.21}/PKG-INFO +1 -1
  2. crewplus-0.2.21/crewplus/services/__init__.py +13 -0
  3. crewplus-0.2.21/crewplus/services/azure_chat_model.py +201 -0
  4. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/services/model_load_balancer.py +4 -3
  5. {crewplus-0.2.19 → crewplus-0.2.21}/pyproject.toml +1 -1
  6. crewplus-0.2.19/crewplus/services/__init__.py +0 -6
  7. {crewplus-0.2.19 → crewplus-0.2.21}/LICENSE +0 -0
  8. {crewplus-0.2.19 → crewplus-0.2.21}/README.md +0 -0
  9. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/__init__.py +0 -0
  10. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/services/gemini_chat_model.py +0 -0
  11. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/services/init_services.py +0 -0
  12. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/utils/__init__.py +0 -0
  13. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/utils/schema_action.py +0 -0
  14. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/utils/schema_document_updater.py +0 -0
  15. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/vectorstores/milvus/__init__.py +0 -0
  16. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/vectorstores/milvus/milvus_schema_manager.py +0 -0
  17. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/vectorstores/milvus/schema_milvus.py +0 -0
  18. {crewplus-0.2.19 → crewplus-0.2.21}/crewplus/vectorstores/milvus/vdb_service.py +0 -0
  19. {crewplus-0.2.19 → crewplus-0.2.21}/docs/GeminiChatModel.md +0 -0
  20. {crewplus-0.2.19 → crewplus-0.2.21}/docs/ModelLoadBalancer.md +0 -0
  21. {crewplus-0.2.19 → crewplus-0.2.21}/docs/VDBService.md +0 -0
  22. {crewplus-0.2.19 → crewplus-0.2.21}/docs/index.md +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.19
3
+ Version: 0.2.21
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -0,0 +1,13 @@
1
+ from .gemini_chat_model import GeminiChatModel
2
+ from .init_services import init_load_balancer, get_model_balancer
3
+ from .model_load_balancer import ModelLoadBalancer
4
+ from .azure_chat_model import TracedAzureChatOpenAI
5
+
6
+ __all__ = [
7
+ "GeminiChatModel",
8
+ "init_load_balancer",
9
+ "get_model_balancer",
10
+ "ModelLoadBalancer",
11
+ "init_services",
12
+ "TracedAzureChatOpenAI"
13
+ ]
@@ -0,0 +1,201 @@
1
+ import os
2
+ import logging
3
+ from typing import Any, Optional
4
+
5
+ from langchain_openai.chat_models.azure import AzureChatOpenAI
6
+ from pydantic import Field
7
+
8
+ # Langfuse imports with graceful fallback
9
+ try:
10
+ from langfuse.langchain import CallbackHandler as LangfuseCallbackHandler
11
+ LANGFUSE_AVAILABLE = True
12
+ except ImportError:
13
+ LANGFUSE_AVAILABLE = False
14
+ LangfuseCallbackHandler = None
15
+
16
+ class TracedAzureChatOpenAI(AzureChatOpenAI):
17
+ """
18
+ Wrapper for AzureChatOpenAI that integrates with Langfuse for tracing.
19
+
20
+ This class automatically handles Langfuse callback integration, making it easier
21
+ to trace and debug your interactions with the Azure OpenAI service.
22
+
23
+ **Langfuse Integration:**
24
+ Langfuse tracing is automatically enabled when environment variables are set:
25
+ - LANGFUSE_PUBLIC_KEY: Your Langfuse public key
26
+ - LANGFUSE_SECRET_KEY: Your Langfuse secret key
27
+ - LANGFUSE_HOST: Langfuse host URL (optional, defaults to https://cloud.langfuse.com)
28
+
29
+ You can also configure it explicitly or disable it. Session and user tracking
30
+ can be set per call via metadata in the `config` argument.
31
+
32
+ Attributes:
33
+ logger (Optional[logging.Logger]): An optional logger instance.
34
+ enable_langfuse (Optional[bool]): Enable/disable Langfuse tracing (auto-detect if None).
35
+
36
+ Example:
37
+ .. code-block:: python
38
+
39
+ # Set Langfuse environment variables (optional)
40
+ import os
41
+ os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..."
42
+ os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..."
43
+
44
+ from crewplus.services.azure_chat_model import TracedAzureChatOpenAI
45
+ from langchain_core.messages import HumanMessage
46
+
47
+ # Initialize the model
48
+ model = TracedAzureChatOpenAI(
49
+ azure_deployment="your-deployment",
50
+ api_version="2024-05-01-preview",
51
+ )
52
+
53
+ # --- Text-only usage (automatically traced if env vars set) ---
54
+ response = model.invoke("Hello, how are you?")
55
+ print("Text response:", response.content)
56
+
57
+ # --- Langfuse tracing with session/user tracking ---
58
+ response = model.invoke(
59
+ "What is AI?",
60
+ config={
61
+ "metadata": {
62
+ "langfuse_session_id": "chat-session-123",
63
+ "langfuse_user_id": "user-456"
64
+ }
65
+ }
66
+ )
67
+
68
+ # --- Disable Langfuse for specific calls ---
69
+ response = model.invoke(
70
+ "Hello without tracing",
71
+ config={"metadata": {"langfuse_disabled": True}}
72
+ )
73
+
74
+ # --- Asynchronous Streaming Usage ---
75
+ import asyncio
76
+ from langchain_core.messages import HumanMessage
77
+
78
+ async def main():
79
+ messages = [HumanMessage(content="Tell me a short story about a brave robot.")]
80
+ print("\nAsync Streaming response:")
81
+ async for chunk in model.astream(messages):
82
+ print(chunk.content, end="", flush=True)
83
+ print()
84
+
85
+ # In a real application, you would run this with:
86
+ # asyncio.run(main())
87
+ """
88
+ logger: Optional[logging.Logger] = Field(default=None, description="Optional logger instance", exclude=True)
89
+ enable_langfuse: Optional[bool] = Field(default=None, description="Enable Langfuse tracing (auto-detect if None)")
90
+
91
+ langfuse_handler: Optional[LangfuseCallbackHandler] = Field(default=None, exclude=True)
92
+
93
+ def __init__(self, **kwargs: Any):
94
+ super().__init__(**kwargs)
95
+
96
+ # Initialize logger
97
+ if self.logger is None:
98
+ self.logger = logging.getLogger(f"{self.__class__.__module__}.{self.__class__.__name__}")
99
+ if not self.logger.handlers:
100
+ self.logger.addHandler(logging.StreamHandler())
101
+ self.logger.setLevel(logging.INFO)
102
+
103
+ # Initialize Langfuse handler
104
+ self._initialize_langfuse()
105
+
106
+ def _initialize_langfuse(self):
107
+ """Initialize Langfuse handler if enabled and available."""
108
+ if not LANGFUSE_AVAILABLE:
109
+ if self.enable_langfuse is True:
110
+ self.logger.warning("Langfuse is not installed. Install with: pip install langfuse")
111
+ return
112
+
113
+ # Auto-detect if Langfuse should be enabled
114
+ if self.enable_langfuse is None:
115
+ langfuse_env_vars = ["LANGFUSE_PUBLIC_KEY", "LANGFUSE_SECRET_KEY"]
116
+ self.enable_langfuse = any(os.getenv(var) for var in langfuse_env_vars)
117
+
118
+ if not self.enable_langfuse:
119
+ return
120
+
121
+ try:
122
+ self.langfuse_handler = LangfuseCallbackHandler()
123
+ self.logger.info(f"Langfuse tracing enabled for TracedAzureChatOpenAI with deployment: {self.deployment_name}")
124
+ except Exception as e:
125
+ self.logger.warning(f"Failed to initialize Langfuse: {e}")
126
+ self.langfuse_handler = None
127
+
128
+ def invoke(self, input, config=None, **kwargs):
129
+ """Override invoke to add Langfuse callback automatically."""
130
+ if config is None:
131
+ config = {}
132
+
133
+ if self.langfuse_handler:
134
+ # Do not trace if disabled via metadata
135
+ if config.get("metadata", {}).get("langfuse_disabled"):
136
+ return super().invoke(input, config=config, **kwargs)
137
+
138
+ callbacks = config.get("callbacks", [])
139
+ has_langfuse = any(isinstance(callback, LangfuseCallbackHandler) for callback in callbacks)
140
+
141
+ if not has_langfuse:
142
+ callbacks = callbacks + [self.langfuse_handler]
143
+ config = {**config, "callbacks": callbacks}
144
+
145
+ return super().invoke(input, config=config, **kwargs)
146
+
147
+ async def ainvoke(self, input, config=None, **kwargs):
148
+ """Override ainvoke to add Langfuse callback automatically."""
149
+ if config is None:
150
+ config = {}
151
+
152
+ if self.langfuse_handler:
153
+ # Do not trace if disabled via metadata
154
+ if config.get("metadata", {}).get("langfuse_disabled"):
155
+ return await super().ainvoke(input, config=config, **kwargs)
156
+
157
+ callbacks = config.get("callbacks", [])
158
+ has_langfuse = any(isinstance(callback, LangfuseCallbackHandler) for callback in callbacks)
159
+
160
+ if not has_langfuse:
161
+ callbacks = callbacks + [self.langfuse_handler]
162
+ config = {**config, "callbacks": callbacks}
163
+
164
+ return await super().ainvoke(input, config=config, **kwargs)
165
+
166
+ def stream(self, input, config=None, **kwargs):
167
+ """Override stream to add Langfuse callback and request usage metadata."""
168
+ if config is None:
169
+ config = {}
170
+
171
+ # Add stream_options to get usage data for Langfuse
172
+ stream_options = kwargs.get("stream_options", {})
173
+ stream_options["include_usage"] = True
174
+ kwargs["stream_options"] = stream_options
175
+
176
+ # Add Langfuse callback if enabled and not already present
177
+ if self.langfuse_handler and not config.get("metadata", {}).get("langfuse_disabled"):
178
+ callbacks = config.get("callbacks", [])
179
+ if not any(isinstance(c, LangfuseCallbackHandler) for c in callbacks):
180
+ config["callbacks"] = callbacks + [self.langfuse_handler]
181
+
182
+ yield from super().stream(input, config=config, **kwargs)
183
+
184
+ async def astream(self, input, config=None, **kwargs) :
185
+ """Override astream to add Langfuse callback and request usage metadata."""
186
+ if config is None:
187
+ config = {}
188
+
189
+ # Add stream_options to get usage data for Langfuse
190
+ stream_options = kwargs.get("stream_options", {})
191
+ stream_options["include_usage"] = True
192
+ kwargs["stream_options"] = stream_options
193
+
194
+ # Add Langfuse callback if enabled and not already present
195
+ if self.langfuse_handler and not config.get("metadata", {}).get("langfuse_disabled"):
196
+ callbacks = config.get("callbacks", [])
197
+ if not any(isinstance(c, LangfuseCallbackHandler) for c in callbacks):
198
+ config["callbacks"] = callbacks + [self.langfuse_handler]
199
+
200
+ async for chunk in super().astream(input, config=config, **kwargs):
201
+ yield chunk
@@ -3,8 +3,9 @@ import random
3
3
  import logging
4
4
  from typing import Dict, List, Optional, Union
5
5
  from collections import defaultdict
6
- from langchain_openai import AzureChatOpenAI, ChatOpenAI, AzureOpenAIEmbeddings
6
+ from langchain_openai import ChatOpenAI, AzureOpenAIEmbeddings
7
7
  from .gemini_chat_model import GeminiChatModel
8
+ from .azure_chat_model import TracedAzureChatOpenAI
8
9
 
9
10
 
10
11
  class ModelLoadBalancer:
@@ -30,7 +31,7 @@ class ModelLoadBalancer:
30
31
  self.config_data = config_data
31
32
  self.logger = logger or logging.getLogger(__name__)
32
33
  self.models_config: List[Dict] = []
33
- self.models: Dict[int, Union[AzureChatOpenAI, ChatOpenAI, AzureOpenAIEmbeddings, GeminiChatModel]] = {}
34
+ self.models: Dict[int, Union[TracedAzureChatOpenAI, ChatOpenAI, AzureOpenAIEmbeddings, GeminiChatModel]] = {}
34
35
  self._initialize_state()
35
36
  self._config_loaded = False # Flag to check if config is loaded
36
37
 
@@ -131,7 +132,7 @@ class ModelLoadBalancer:
131
132
  kwargs['temperature'] = model_config['temperature']
132
133
  if model_config.get('deployment_name') == 'o1-mini':
133
134
  kwargs['disable_streaming'] = True
134
- return AzureChatOpenAI(**kwargs)
135
+ return TracedAzureChatOpenAI(**kwargs)
135
136
  elif provider == 'openai':
136
137
  kwargs = {
137
138
  'openai_api_key': model_config['api_key']
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "crewplus"
9
- version = "0.2.19"
9
+ version = "0.2.21"
10
10
  description = "Base services for CrewPlus AI applications"
11
11
  authors = [
12
12
  { name = "Tim Liu", email = "tim@opsmateai.com" },
@@ -1,6 +0,0 @@
1
- from .gemini_chat_model import GeminiChatModel
2
- from .init_services import init_load_balancer, get_model_balancer
3
- from .model_load_balancer import ModelLoadBalancer
4
-
5
-
6
- __all__ = ["GeminiChatModel", "init_load_balancer", "get_model_balancer", "ModelLoadBalancer"]
File without changes
File without changes
File without changes
File without changes