crewplus 0.2.44__py3-none-any.whl → 0.2.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

@@ -2,20 +2,29 @@
2
2
  import asyncio
3
3
  import contextvars
4
4
  from contextlib import contextmanager
5
- from typing import Any, Dict, List, Union
5
+ from typing import Any, Dict, List, Union, Optional, Sequence
6
+ from uuid import UUID
6
7
 
7
8
  try:
8
9
  from langfuse.langchain import CallbackHandler as LangfuseCallbackHandler
9
10
  from langchain_core.callbacks import AsyncCallbackHandler
10
- from langchain_core.outputs import LLMResult
11
+ from langchain_core.outputs import LLMResult, ChatGeneration
12
+ from langchain_core.messages import BaseMessage
13
+ from langchain.schema.agent import AgentAction, AgentFinish
14
+ from langchain.schema.document import Document
11
15
  LANGFUSE_AVAILABLE = True
12
16
  except ImportError:
13
17
  LANGFUSE_AVAILABLE = False
14
18
  LangfuseCallbackHandler = None
15
19
  AsyncCallbackHandler = object
20
+ # Define dummy types if langchain is not available
21
+ LLMResult = object
22
+ BaseMessage = object
23
+ AgentAction = object
24
+ AgentFinish = object
25
+ Document = object
26
+
16
27
 
17
- # This token is a simple flag to indicate that we are in an async context.
18
- # We use a context variable to make it available only within the async task.
19
28
  _ASYNC_CONTEXT_TOKEN = "in_async_context"
20
29
  in_async_context = contextvars.ContextVar(_ASYNC_CONTEXT_TOKEN, default=False)
21
30
 
@@ -30,13 +39,8 @@ def async_context():
30
39
 
31
40
  class AsyncLangfuseCallbackHandler(AsyncCallbackHandler):
32
41
  """
33
- Wraps the synchronous LangfuseCallbackHandler to make it compatible with
34
- LangChain's async methods.
35
-
36
- This works by running the synchronous handler's methods in a separate thread
37
- using `asyncio.to_thread`. This is crucial because `asyncio`'s default
38
- executor can correctly propagate `contextvars`, which solves the
39
- `ValueError: <Token ...> was created in a different Context` from OpenTelemetry.
42
+ Wraps the synchronous LangfuseCallbackHandler to make it fully compatible with
43
+ LangChain's async methods by handling all relevant events.
40
44
  """
41
45
  def __init__(self, *args: Any, **kwargs: Any):
42
46
  if not LANGFUSE_AVAILABLE:
@@ -44,64 +48,115 @@ class AsyncLangfuseCallbackHandler(AsyncCallbackHandler):
44
48
  self.sync_handler = LangfuseCallbackHandler(*args, **kwargs)
45
49
 
46
50
  def __getattr__(self, name: str) -> Any:
47
- # Delegate any other attribute access to the sync handler
48
51
  return getattr(self.sync_handler, name)
49
52
 
53
+ # LLM Events
50
54
  async def on_llm_start(
51
- self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
55
+ self, serialized: Dict[str, Any], prompts: List[str], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
52
56
  ) -> None:
53
- # --- DEBUGGING: Inspect the arguments from LangChain ---
54
- print("--- [DEBUG] AsyncLangfuseCallbackHandler.on_llm_start ---")
55
- print(f"Received prompts type: {type(prompts)}")
56
- print(f"Received prompts value: {prompts!r}") # Using !r to see quotes if it's a string
57
- print("----------------------------------------------------------")
58
- # --- END DEBUGGING ---
59
-
60
- # WORKAROUND: LangChain's async implementation can sometimes pass a raw
61
- # string for prompts instead of a list. We wrap it in a list to ensure
62
- # compatibility with the synchronous handler.
63
57
  corrected_prompts = prompts if isinstance(prompts, list) else [prompts]
64
-
65
58
  await asyncio.to_thread(
66
- self.sync_handler.on_llm_start, serialized, corrected_prompts, **kwargs
59
+ self.sync_handler.on_llm_start, serialized, corrected_prompts, run_id=run_id, parent_run_id=parent_run_id, **kwargs
60
+ )
61
+
62
+ async def on_chat_model_start(
63
+ self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
64
+ ) -> Any:
65
+ await asyncio.to_thread(
66
+ self.sync_handler.on_chat_model_start, serialized, messages, run_id=run_id, parent_run_id=parent_run_id, **kwargs
67
+ )
68
+
69
+ async def on_llm_end(
70
+ self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
71
+ ) -> None:
72
+ await asyncio.to_thread(
73
+ self.sync_handler.on_llm_end, response, run_id=run_id, parent_run_id=parent_run_id, **kwargs
74
+ )
75
+
76
+ async def on_llm_error(
77
+ self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
78
+ ) -> None:
79
+ await asyncio.to_thread(
80
+ self.sync_handler.on_llm_error, error, run_id=run_id, parent_run_id=parent_run_id, **kwargs
81
+ )
82
+
83
+ # Chain Events
84
+ async def on_chain_start(
85
+ self, serialized: Dict[str, Any], inputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
86
+ ) -> Any:
87
+ await asyncio.to_thread(
88
+ self.sync_handler.on_chain_start, serialized, inputs, run_id=run_id, parent_run_id=parent_run_id, **kwargs
67
89
  )
68
90
 
69
- async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
91
+ async def on_chain_end(
92
+ self, outputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
93
+ ) -> Any:
70
94
  await asyncio.to_thread(
71
- self.sync_handler.on_llm_end, response, **kwargs
95
+ self.sync_handler.on_chain_end, outputs, run_id=run_id, parent_run_id=parent_run_id, **kwargs
72
96
  )
73
-
74
- async def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
97
+
98
+ async def on_chain_error(
99
+ self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
100
+ ) -> Any:
75
101
  await asyncio.to_thread(
76
- self.sync_handler.on_llm_error, error, **kwargs
102
+ self.sync_handler.on_chain_error, error, run_id=run_id, parent_run_id=parent_run_id, **kwargs
77
103
  )
78
-
79
- async def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
104
+
105
+ # Tool Events
106
+ async def on_tool_start(
107
+ self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
108
+ ) -> Any:
80
109
  await asyncio.to_thread(
81
- self.sync_handler.on_tool_start, serialized, input_str, **kwargs
110
+ self.sync_handler.on_tool_start, serialized, input_str, run_id=run_id, parent_run_id=parent_run_id, **kwargs
82
111
  )
83
112
 
84
- async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
113
+ async def on_tool_end(
114
+ self, output: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
115
+ ) -> Any:
85
116
  await asyncio.to_thread(
86
- self.sync_handler.on_tool_end, output, **kwargs
117
+ self.sync_handler.on_tool_end, output, run_id=run_id, parent_run_id=parent_run_id, **kwargs
87
118
  )
88
119
 
89
- async def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
120
+ async def on_tool_error(
121
+ self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
122
+ ) -> Any:
90
123
  await asyncio.to_thread(
91
- self.sync_handler.on_tool_error, error, **kwargs
124
+ self.sync_handler.on_tool_error, error, run_id=run_id, parent_run_id=parent_run_id, **kwargs
125
+ )
126
+
127
+ # Retriever Events
128
+ async def on_retriever_start(
129
+ self, serialized: Dict[str, Any], query: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
130
+ ) -> Any:
131
+ await asyncio.to_thread(
132
+ self.sync_handler.on_retriever_start, serialized, query, run_id=run_id, parent_run_id=parent_run_id, **kwargs
92
133
  )
93
134
 
94
- async def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> Any:
135
+ async def on_retriever_end(
136
+ self, documents: Sequence[Document], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
137
+ ) -> Any:
95
138
  await asyncio.to_thread(
96
- self.sync_handler.on_chain_start, serialized, inputs, **kwargs
139
+ self.sync_handler.on_retriever_end, documents, run_id=run_id, parent_run_id=parent_run_id, **kwargs
97
140
  )
98
141
 
99
- async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
142
+ async def on_retriever_error(
143
+ self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
144
+ ) -> Any:
145
+ await asyncio.to_thread(
146
+ self.sync_handler.on_retriever_error, error, run_id=run_id, parent_run_id=parent_run_id, **kwargs
147
+ )
148
+
149
+ # Agent Events
150
+ async def on_agent_action(
151
+ self, action: AgentAction, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
152
+ ) -> Any:
100
153
  await asyncio.to_thread(
101
- self.sync_handler.on_chain_end, outputs, **kwargs
154
+ self.sync_handler.on_agent_action, action, run_id=run_id, parent_run_id=parent_run_id, **kwargs
102
155
  )
103
156
 
104
- async def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
157
+ async def on_agent_finish(
158
+ self, finish: AgentFinish, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any
159
+ ) -> Any:
105
160
  await asyncio.to_thread(
106
- self.sync_handler.on_chain_error, error, **kwargs
161
+ self.sync_handler.on_agent_finish, finish, run_id=run_id, parent_run_id=parent_run_id, **kwargs
107
162
  )
@@ -330,22 +330,22 @@ class GeminiChatModel(BaseChatModel):
330
330
 
331
331
  def invoke(self, input, config=None, **kwargs):
332
332
  """Override invoke to add tracing callbacks automatically."""
333
- config = self._tracing_manager.add_callbacks_to_config(config)
333
+ config = self._tracing_manager.add_sync_callbacks_to_config(config)
334
334
  return super().invoke(input, config=config, **kwargs)
335
335
 
336
336
  async def ainvoke(self, input, config=None, **kwargs):
337
337
  """Override ainvoke to add tracing callbacks automatically."""
338
- config = self._tracing_manager.add_callbacks_to_config(config)
338
+ config = self._tracing_manager.add_async_callbacks_to_config(config)
339
339
  return await super().ainvoke(input, config=config, **kwargs)
340
340
 
341
341
  def stream(self, input, config=None, **kwargs):
342
342
  """Override stream to add tracing callbacks automatically."""
343
- config = self._tracing_manager.add_callbacks_to_config(config)
343
+ config = self._tracing_manager.add_sync_callbacks_to_config(config)
344
344
  return super().stream(input, config=config, **kwargs)
345
345
 
346
346
  async def astream(self, input, config=None, **kwargs):
347
347
  """Override astream to add tracing callbacks automatically."""
348
- config = self._tracing_manager.add_callbacks_to_config(config)
348
+ config = self._tracing_manager.add_async_callbacks_to_config(config)
349
349
  # We must call an async generator,
350
350
  async for chunk in super().astream(input, config=config, **kwargs):
351
351
  yield chunk
@@ -41,9 +41,9 @@ class VDBService(object):
41
41
  >>> # Initialize with a full settings dictionary
42
42
  >>> settings = {
43
43
  ... "embedder": {
44
- ... "provider": "azure-openai",
44
+ ... "provider": "azure-openai-embeddings",
45
45
  ... "config": {
46
- ... "model": "text-embedding-3-small",
46
+ ... "model": "text-embedding-ada-002",
47
47
  ... "api_version": "2023-05-15",
48
48
  ... "api_key": "YOUR_AZURE_OPENAI_KEY",
49
49
  ... "openai_base_url": "YOUR_AZURE_OPENAI_ENDPOINT",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.44
3
+ Version: 0.2.47
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -1,13 +1,13 @@
1
- crewplus-0.2.44.dist-info/METADATA,sha256=KdBsfThzg0wjMW0ydjEsKw_Kp5c-nX76GQ721K4FOHY,5362
2
- crewplus-0.2.44.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- crewplus-0.2.44.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- crewplus-0.2.44.dist-info/licenses/LICENSE,sha256=2_NHSHRTKB_cTcT_GXgcenOCtIZku8j343mOgAguTfc,1087
1
+ crewplus-0.2.47.dist-info/METADATA,sha256=ZYdwfWzswjuz_ZN6PqvUrB_JDLRViZ1ou9iC3Ldjfqk,5362
2
+ crewplus-0.2.47.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ crewplus-0.2.47.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ crewplus-0.2.47.dist-info/licenses/LICENSE,sha256=2_NHSHRTKB_cTcT_GXgcenOCtIZku8j343mOgAguTfc,1087
5
5
  crewplus/__init__.py,sha256=m46HkZL1Y4toD619NL47Sn2Qe084WFFSFD7e6VoYKZc,284
6
6
  crewplus/callbacks/__init__.py,sha256=YG7ieeb91qEjp1zF0-inEN7mjZ7yT_D2yzdWFT8Z1Ws,63
7
- crewplus/callbacks/async_langfuse_handler.py,sha256=XnHpQ-sqcUCb6BCrE_JgldtTbaV61lH9aJnfy_IUp9Y,4614
7
+ crewplus/callbacks/async_langfuse_handler.py,sha256=A4uFeLpvOUdc58M7sZoE65_C1V98u0QCvx5jUquM0pM,7006
8
8
  crewplus/services/__init__.py,sha256=V1CG8b2NOmRzNgQH7BPl4KVxWSYJH5vfEsW1wVErKNE,375
9
9
  crewplus/services/azure_chat_model.py,sha256=iWzJ2GQFSNmwJx-2O5_xKPSB6VVc-7T6bcfFI8_WezA,5521
10
- crewplus/services/gemini_chat_model.py,sha256=VsOB_st1qRmDkwLXzo-gCShhUsZHpk0V-G-ulQXGN3g,40081
10
+ crewplus/services/gemini_chat_model.py,sha256=DYqz01H2TIHiCDQesSozVfOsMigno6QGwOtIweg7UHk,40103
11
11
  crewplus/services/init_services.py,sha256=7oZ1GmesK32EDB_DYnTzW17MEpXjXK41_U_1pmqu_m4,2183
12
12
  crewplus/services/model_load_balancer.py,sha256=Q9Gx3GrbKworU-Ytxeqp0ggHSgZ1Q6brtTk-nCl4sak,12095
13
13
  crewplus/services/tracing_manager.py,sha256=0KR-F0BKYEMdADANWofFZH9D9jcWDHzDICUE7nDhzJc,6579
@@ -17,9 +17,9 @@ crewplus/utils/schema_document_updater.py,sha256=frvffxn2vbi71fHFPoGb9hq7gH2azmm
17
17
  crewplus/vectorstores/milvus/__init__.py,sha256=OeYv2rdyG7tcREIjBJPyt2TbE54NvyeRoWMe7LwopRE,245
18
18
  crewplus/vectorstores/milvus/milvus_schema_manager.py,sha256=2IZT61LVui21Pt5Z3y8YYS2dYcwzkgUKxMq2NA0-lQE,9222
19
19
  crewplus/vectorstores/milvus/schema_milvus.py,sha256=DtHP8jHRSpLqt9ixAnJE5R4CId9NLYXxOVqRxPCEyv4,26131
20
- crewplus/vectorstores/milvus/vdb_service.py,sha256=CaUMLIMeOCm2R4t5EKtAupIddFXQu0NSb8RpTkInGd4,22498
20
+ crewplus/vectorstores/milvus/vdb_service.py,sha256=-cZmlLNAPBz4vBRWNC_rfNZh1VWKbmgYfCYsQFyeoSM,22509
21
21
  docs/GeminiChatModel.md,sha256=zZYyl6RmjZTUsKxxMiC9O4yV70MC4TD-IGUmWhIDBKA,8677
22
22
  docs/ModelLoadBalancer.md,sha256=aGHES1dcXPz4c7Y8kB5-vsCNJjriH2SWmjBkSGoYKiI,4398
23
23
  docs/VDBService.md,sha256=Dw286Rrf_fsi13jyD3Bo4Sy7nZ_G7tYm7d8MZ2j9hxk,9375
24
24
  docs/index.md,sha256=3tlc15uR8lzFNM5WjdoZLw0Y9o1P1gwgbEnOdIBspqc,1643
25
- crewplus-0.2.44.dist-info/RECORD,,
25
+ crewplus-0.2.47.dist-info/RECORD,,