crewplus 0.2.40__tar.gz → 0.2.42__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

Files changed (24) hide show
  1. {crewplus-0.2.40 → crewplus-0.2.42}/PKG-INFO +1 -1
  2. crewplus-0.2.42/crewplus/callbacks/__init__.py +1 -0
  3. crewplus-0.2.42/crewplus/callbacks/async_langfuse_handler.py +99 -0
  4. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/services/azure_chat_model.py +8 -5
  5. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/services/tracing_manager.py +22 -7
  6. {crewplus-0.2.40 → crewplus-0.2.42}/pyproject.toml +1 -1
  7. {crewplus-0.2.40 → crewplus-0.2.42}/LICENSE +0 -0
  8. {crewplus-0.2.40 → crewplus-0.2.42}/README.md +0 -0
  9. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/__init__.py +0 -0
  10. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/services/__init__.py +0 -0
  11. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/services/gemini_chat_model.py +0 -0
  12. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/services/init_services.py +0 -0
  13. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/services/model_load_balancer.py +0 -0
  14. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/utils/__init__.py +0 -0
  15. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/utils/schema_action.py +0 -0
  16. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/utils/schema_document_updater.py +0 -0
  17. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/vectorstores/milvus/__init__.py +0 -0
  18. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/vectorstores/milvus/milvus_schema_manager.py +0 -0
  19. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/vectorstores/milvus/schema_milvus.py +0 -0
  20. {crewplus-0.2.40 → crewplus-0.2.42}/crewplus/vectorstores/milvus/vdb_service.py +0 -0
  21. {crewplus-0.2.40 → crewplus-0.2.42}/docs/GeminiChatModel.md +0 -0
  22. {crewplus-0.2.40 → crewplus-0.2.42}/docs/ModelLoadBalancer.md +0 -0
  23. {crewplus-0.2.40 → crewplus-0.2.42}/docs/VDBService.md +0 -0
  24. {crewplus-0.2.40 → crewplus-0.2.42}/docs/index.md +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.40
3
+ Version: 0.2.42
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -0,0 +1 @@
1
+ # This file makes the 'callbacks' directory a Python package.
@@ -0,0 +1,99 @@
1
+ # File: crewplus/callbacks/async_langfuse_handler.py
2
+ import asyncio
3
+ import contextvars
4
+ from contextlib import contextmanager
5
+ from typing import Any, Dict, List, Union
6
+
7
+ try:
8
+ from langfuse.langchain import CallbackHandler as LangfuseCallbackHandler
9
+ from langchain_core.callbacks import AsyncCallbackHandler
10
+ from langchain_core.outputs import LLMResult
11
+ LANGFUSE_AVAILABLE = True
12
+ except ImportError:
13
+ LANGFUSE_AVAILABLE = False
14
+ LangfuseCallbackHandler = None
15
+ AsyncCallbackHandler = object
16
+
17
+ # This token is a simple flag to indicate that we are in an async context.
18
+ # We use a context variable to make it available only within the async task.
19
+ _ASYNC_CONTEXT_TOKEN = "in_async_context"
20
+ in_async_context = contextvars.ContextVar(_ASYNC_CONTEXT_TOKEN, default=False)
21
+
22
+ @contextmanager
23
+ def async_context():
24
+ """A context manager to signal that we are in an async execution context."""
25
+ token = in_async_context.set(True)
26
+ try:
27
+ yield
28
+ finally:
29
+ in_async_context.reset(token)
30
+
31
+ class AsyncLangfuseCallbackHandler(AsyncCallbackHandler):
32
+ """
33
+ Wraps the synchronous LangfuseCallbackHandler to make it compatible with
34
+ LangChain's async methods.
35
+
36
+ This works by running the synchronous handler's methods in a separate thread
37
+ using `asyncio.to_thread`. This is crucial because `asyncio`'s default
38
+ executor can correctly propagate `contextvars`, which solves the
39
+ `ValueError: <Token ...> was created in a different Context` from OpenTelemetry.
40
+ """
41
+ def __init__(self, *args: Any, **kwargs: Any):
42
+ if not LANGFUSE_AVAILABLE:
43
+ raise ImportError("Langfuse is not available. Please install it with 'pip install langfuse'")
44
+ self.sync_handler = LangfuseCallbackHandler(*args, **kwargs)
45
+
46
+ def __getattr__(self, name: str) -> Any:
47
+ # Delegate any other attribute access to the sync handler
48
+ return getattr(self.sync_handler, name)
49
+
50
+ async def on_llm_start(
51
+ self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
52
+ ) -> None:
53
+ # WORKAROUND: LangChain's async implementation can sometimes pass a raw
54
+ # string for prompts instead of a list. We wrap it in a list to ensure
55
+ # compatibility with the synchronous handler.
56
+ corrected_prompts = prompts if isinstance(prompts, list) else [prompts]
57
+ await asyncio.to_thread(
58
+ self.sync_handler.on_llm_start, serialized, corrected_prompts, **kwargs
59
+ )
60
+
61
+ async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
62
+ await asyncio.to_thread(
63
+ self.sync_handler.on_llm_end, response, **kwargs
64
+ )
65
+
66
+ async def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
67
+ await asyncio.to_thread(
68
+ self.sync_handler.on_llm_error, error, **kwargs
69
+ )
70
+
71
+ async def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
72
+ await asyncio.to_thread(
73
+ self.sync_handler.on_tool_start, serialized, input_str, **kwargs
74
+ )
75
+
76
+ async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
77
+ await asyncio.to_thread(
78
+ self.sync_handler.on_tool_end, output, **kwargs
79
+ )
80
+
81
+ async def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
82
+ await asyncio.to_thread(
83
+ self.sync_handler.on_tool_error, error, **kwargs
84
+ )
85
+
86
+ async def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> Any:
87
+ await asyncio.to_thread(
88
+ self.sync_handler.on_chain_start, serialized, inputs, **kwargs
89
+ )
90
+
91
+ async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
92
+ await asyncio.to_thread(
93
+ self.sync_handler.on_chain_end, outputs, **kwargs
94
+ )
95
+
96
+ async def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
97
+ await asyncio.to_thread(
98
+ self.sync_handler.on_chain_error, error, **kwargs
99
+ )
@@ -5,6 +5,7 @@ from typing import Any, Optional
5
5
  from langchain_openai.chat_models.azure import AzureChatOpenAI
6
6
  from pydantic import Field
7
7
  from .tracing_manager import TracingManager, TracingContext
8
+ from ..callbacks.async_langfuse_handler import async_context
8
9
 
9
10
  class TracedAzureChatOpenAI(AzureChatOpenAI):
10
11
  """
@@ -106,8 +107,9 @@ class TracedAzureChatOpenAI(AzureChatOpenAI):
106
107
  return super().invoke(input, config=config, **kwargs)
107
108
 
108
109
  async def ainvoke(self, input, config=None, **kwargs):
109
- config = self._tracing_manager.add_callbacks_to_config(config)
110
- return await super().ainvoke(input, config=config, **kwargs)
110
+ with async_context():
111
+ config = self._tracing_manager.add_callbacks_to_config(config)
112
+ return await super().ainvoke(input, config=config, **kwargs)
111
113
 
112
114
  def stream(self, input, config=None, **kwargs):
113
115
  # Add stream_options to get usage data for Langfuse
@@ -124,6 +126,7 @@ class TracedAzureChatOpenAI(AzureChatOpenAI):
124
126
  stream_options["include_usage"] = True
125
127
  kwargs["stream_options"] = stream_options
126
128
 
127
- config = self._tracing_manager.add_callbacks_to_config(config)
128
- async for chunk in super().astream(input, config=config, **kwargs):
129
- yield chunk
129
+ with async_context():
130
+ config = self._tracing_manager.add_callbacks_to_config(config)
131
+ async for chunk in super().astream(input, config=config, **kwargs):
132
+ yield chunk
@@ -8,10 +8,13 @@ import logging
8
8
  # even if the langfuse library is not installed.
9
9
  try:
10
10
  from langfuse.langchain import CallbackHandler as LangfuseCallbackHandler
11
+ from ..callbacks.async_langfuse_handler import AsyncLangfuseCallbackHandler, in_async_context
11
12
  LANGFUSE_AVAILABLE = True
12
13
  except ImportError:
13
14
  LANGFUSE_AVAILABLE = False
14
15
  LangfuseCallbackHandler = None
16
+ AsyncLangfuseCallbackHandler = None
17
+ in_async_context = None
15
18
 
16
19
  class TracingContext(Protocol):
17
20
  """
@@ -65,7 +68,8 @@ class TracingManager:
65
68
  to the TracingContext protocol.
66
69
  """
67
70
  self.context = context
68
- self._handlers: List[Any] = []
71
+ self._sync_handlers: List[Any] = []
72
+ self._async_handlers: List[Any] = []
69
73
  self._initialize_handlers()
70
74
 
71
75
  def _initialize_handlers(self):
@@ -73,7 +77,8 @@ class TracingManager:
73
77
  Initializes all supported tracing handlers. This is the central point
74
78
  for adding new observability tools.
75
79
  """
76
- self._handlers = []
80
+ self._sync_handlers = []
81
+ self._async_handlers = []
77
82
  self._initialize_langfuse()
78
83
  # To add a new handler (e.g., Helicone), you would add a call to
79
84
  # self._initialize_helicone() here.
@@ -94,8 +99,14 @@ class TracingManager:
94
99
 
95
100
  if enable_langfuse:
96
101
  try:
97
- handler = LangfuseCallbackHandler()
98
- self._handlers.append(handler)
102
+ # Create both sync and async handlers. We'll pick one at runtime.
103
+ sync_handler = LangfuseCallbackHandler()
104
+ self._sync_handlers.append(sync_handler)
105
+
106
+ if AsyncLangfuseCallbackHandler:
107
+ async_handler = AsyncLangfuseCallbackHandler()
108
+ self._async_handlers.append(async_handler)
109
+
99
110
  self.context.logger.info(f"Langfuse tracing enabled for {self.context.get_model_identifier()}")
100
111
  except Exception as e:
101
112
  self.context.logger.warning(f"Failed to initialize Langfuse: {e}")
@@ -118,15 +129,19 @@ class TracingManager:
118
129
  if config is None:
119
130
  config = {}
120
131
 
132
+ # Decide which handlers to use based on the async context flag.
133
+ is_async = in_async_context.get() if in_async_context else False
134
+ handlers = self._async_handlers if is_async else self._sync_handlers
135
+
121
136
  # Respect a global disable flag for this specific call.
122
- if not self._handlers or config.get("metadata", {}).get("tracing_disabled"):
137
+ if not handlers or config.get("metadata", {}).get("tracing_disabled"):
123
138
  return config
124
139
 
125
140
  callbacks = config.get("callbacks")
126
141
 
127
142
  # Case 1: The 'callbacks' key holds a CallbackManager instance
128
143
  if hasattr(callbacks, 'add_handler') and hasattr(callbacks, 'handlers'):
129
- for handler in self._handlers:
144
+ for handler in handlers:
130
145
  if not any(isinstance(cb, type(handler)) for cb in callbacks.handlers):
131
146
  callbacks.add_handler(handler, inherit=True)
132
147
  return config # Return the original, now-mutated config
@@ -135,7 +150,7 @@ class TracingManager:
135
150
  current_callbacks = callbacks or []
136
151
  new_callbacks = list(current_callbacks)
137
152
 
138
- for handler in self._handlers:
153
+ for handler in handlers:
139
154
  if not any(isinstance(cb, type(handler)) for cb in new_callbacks):
140
155
  new_callbacks.append(handler)
141
156
 
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "crewplus"
9
- version = "0.2.40"
9
+ version = "0.2.42"
10
10
  description = "Base services for CrewPlus AI applications"
11
11
  authors = [
12
12
  { name = "Tim Liu", email = "tim@opsmateai.com" },
File without changes
File without changes
File without changes
File without changes