vibecore 0.2.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vibecore might be problematic. Click here for more details.

Files changed (63) hide show
  1. vibecore/__init__.py +0 -0
  2. vibecore/agents/default.py +79 -0
  3. vibecore/agents/prompts.py +12 -0
  4. vibecore/agents/task_agent.py +66 -0
  5. vibecore/cli.py +131 -0
  6. vibecore/context.py +24 -0
  7. vibecore/handlers/__init__.py +5 -0
  8. vibecore/handlers/stream_handler.py +231 -0
  9. vibecore/main.py +506 -0
  10. vibecore/main.tcss +0 -0
  11. vibecore/mcp/__init__.py +6 -0
  12. vibecore/mcp/manager.py +167 -0
  13. vibecore/mcp/server_wrapper.py +109 -0
  14. vibecore/models/__init__.py +5 -0
  15. vibecore/models/anthropic.py +239 -0
  16. vibecore/prompts/common_system_prompt.txt +64 -0
  17. vibecore/py.typed +0 -0
  18. vibecore/session/__init__.py +5 -0
  19. vibecore/session/file_lock.py +127 -0
  20. vibecore/session/jsonl_session.py +236 -0
  21. vibecore/session/loader.py +193 -0
  22. vibecore/session/path_utils.py +81 -0
  23. vibecore/settings.py +161 -0
  24. vibecore/tools/__init__.py +1 -0
  25. vibecore/tools/base.py +27 -0
  26. vibecore/tools/file/__init__.py +5 -0
  27. vibecore/tools/file/executor.py +282 -0
  28. vibecore/tools/file/tools.py +184 -0
  29. vibecore/tools/file/utils.py +78 -0
  30. vibecore/tools/python/__init__.py +1 -0
  31. vibecore/tools/python/backends/__init__.py +1 -0
  32. vibecore/tools/python/backends/terminal_backend.py +58 -0
  33. vibecore/tools/python/helpers.py +80 -0
  34. vibecore/tools/python/manager.py +208 -0
  35. vibecore/tools/python/tools.py +27 -0
  36. vibecore/tools/shell/__init__.py +5 -0
  37. vibecore/tools/shell/executor.py +223 -0
  38. vibecore/tools/shell/tools.py +156 -0
  39. vibecore/tools/task/__init__.py +5 -0
  40. vibecore/tools/task/executor.py +51 -0
  41. vibecore/tools/task/tools.py +51 -0
  42. vibecore/tools/todo/__init__.py +1 -0
  43. vibecore/tools/todo/manager.py +31 -0
  44. vibecore/tools/todo/models.py +36 -0
  45. vibecore/tools/todo/tools.py +111 -0
  46. vibecore/utils/__init__.py +5 -0
  47. vibecore/utils/text.py +28 -0
  48. vibecore/widgets/core.py +332 -0
  49. vibecore/widgets/core.tcss +63 -0
  50. vibecore/widgets/expandable.py +121 -0
  51. vibecore/widgets/expandable.tcss +69 -0
  52. vibecore/widgets/info.py +25 -0
  53. vibecore/widgets/info.tcss +17 -0
  54. vibecore/widgets/messages.py +232 -0
  55. vibecore/widgets/messages.tcss +85 -0
  56. vibecore/widgets/tool_message_factory.py +121 -0
  57. vibecore/widgets/tool_messages.py +483 -0
  58. vibecore/widgets/tool_messages.tcss +289 -0
  59. vibecore-0.2.0a1.dist-info/METADATA +407 -0
  60. vibecore-0.2.0a1.dist-info/RECORD +63 -0
  61. vibecore-0.2.0a1.dist-info/WHEEL +4 -0
  62. vibecore-0.2.0a1.dist-info/entry_points.txt +2 -0
  63. vibecore-0.2.0a1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,109 @@
1
+ """MCP server wrapper for renaming tools with a prefix pattern."""
2
+
3
+ from typing import TYPE_CHECKING, Any
4
+
5
+ from agents.mcp import MCPServer
6
+ from agents.run_context import RunContextWrapper
7
+ from mcp.types import CallToolResult, GetPromptResult, ListPromptsResult
8
+ from mcp.types import Tool as MCPTool
9
+
10
+ if TYPE_CHECKING:
11
+ from agents import AgentBase
12
+
13
+
14
+ class NameOverridingMCPServer(MCPServer):
15
+ """Wrapper for MCP servers that renames tools with mcp__servername__toolname pattern."""
16
+
17
+ def __init__(self, actual_server: MCPServer, use_structured_content: bool = False):
18
+ """Initialize the wrapper.
19
+
20
+ Args:
21
+ actual_server: The actual MCP server to wrap.
22
+ use_structured_content: Whether to use structured content.
23
+ """
24
+ super().__init__(use_structured_content=use_structured_content)
25
+ self.actual_server = actual_server
26
+ # Store the mapping between renamed and original tool names
27
+ self._tool_name_mapping: dict[str, str] = {}
28
+
29
+ @property
30
+ def name(self) -> str:
31
+ """Return the name of the wrapped server."""
32
+ return self.actual_server.name
33
+
34
+ async def connect(self) -> None:
35
+ """Connect to the wrapped server."""
36
+ await self.actual_server.connect()
37
+
38
+ async def cleanup(self) -> None:
39
+ """Cleanup the wrapped server."""
40
+ await self.actual_server.cleanup()
41
+
42
+ async def list_tools(
43
+ self,
44
+ run_context: RunContextWrapper[Any] | None = None,
45
+ agent: "AgentBase | None" = None,
46
+ ) -> list[MCPTool]:
47
+ """List tools with renamed names.
48
+
49
+ Args:
50
+ run_context: The run context.
51
+ agent: The agent requesting tools.
52
+
53
+ Returns:
54
+ List of tools with renamed names following mcp__servername__toolname pattern.
55
+ """
56
+ # Get tools from the actual server
57
+ tools = await self.actual_server.list_tools(run_context, agent)
58
+
59
+ # Rename each tool
60
+ renamed_tools = []
61
+ for tool in tools:
62
+ # Create the new name with the pattern mcp__servername__toolname
63
+ original_name = tool.name
64
+ new_name = f"mcp__{self.name}__{original_name}"
65
+
66
+ # Store the mapping for call_tool
67
+ self._tool_name_mapping[new_name] = original_name
68
+
69
+ # Create a new tool with the renamed name
70
+ renamed_tool = MCPTool(
71
+ name=new_name,
72
+ description=tool.description,
73
+ inputSchema=tool.inputSchema,
74
+ )
75
+ renamed_tools.append(renamed_tool)
76
+
77
+ return renamed_tools
78
+
79
+ async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult:
80
+ """Call a tool using its original name.
81
+
82
+ Args:
83
+ tool_name: The renamed tool name (mcp__servername__toolname).
84
+ arguments: The tool arguments.
85
+
86
+ Returns:
87
+ The result from calling the tool.
88
+ """
89
+ # Map the renamed tool name back to the original
90
+ original_name = self._tool_name_mapping.get(tool_name)
91
+ if original_name is None:
92
+ # If not in mapping, try to extract from pattern
93
+ if tool_name.startswith(f"mcp__{self.name}__"):
94
+ # Extract original name from pattern
95
+ original_name = tool_name[len(f"mcp__{self.name}__") :]
96
+ else:
97
+ # Use as-is if not matching our pattern
98
+ original_name = tool_name
99
+
100
+ # Call the tool with the original name
101
+ return await self.actual_server.call_tool(original_name, arguments)
102
+
103
+ async def list_prompts(self) -> ListPromptsResult:
104
+ """List prompts from the wrapped server."""
105
+ return await self.actual_server.list_prompts()
106
+
107
+ async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None) -> GetPromptResult:
108
+ """Get a prompt from the wrapped server."""
109
+ return await self.actual_server.get_prompt(name, arguments)
@@ -0,0 +1,5 @@
1
+ """Models package for Vibecore application."""
2
+
3
+ from .anthropic import AnthropicModel
4
+
5
+ __all__ = ["AnthropicModel"]
@@ -0,0 +1,239 @@
1
+ """Anthropic model implementation with automatic cache control."""
2
+
3
+ import json
4
+ import logging
5
+ from typing import Any, Literal, overload
6
+
7
+ import litellm
8
+ from agents.agent_output import AgentOutputSchemaBase
9
+ from agents.extensions.models.litellm_model import LitellmModel
10
+ from agents.handoffs import Handoff
11
+ from agents.items import TResponseInputItem
12
+ from agents.model_settings import ModelSettings
13
+ from agents.models.interface import ModelTracing
14
+ from agents.tool import Tool
15
+ from agents.tracing.span_data import GenerationSpanData
16
+ from agents.tracing.spans import Span
17
+ from openai import AsyncStream
18
+ from openai.types.chat import ChatCompletionChunk
19
+ from openai.types.responses import Response
20
+
21
+ # Set up debug logging
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ def _transform_messages_for_cache(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
26
+ """Transform messages to add cache_control for Anthropic models.
27
+
28
+ Caches up to 4 messages in priority order:
29
+ 1. Last message
30
+ 2. Message before last user message (often tool result)
31
+ 3. Message before second-to-last user message (often tool result)
32
+ 4. Last system message
33
+
34
+ Args:
35
+ messages: List of message dictionaries
36
+
37
+ Returns:
38
+ Transformed messages with cache_control added
39
+ """
40
+ if not messages:
41
+ return []
42
+
43
+ indices_to_cache = set()
44
+
45
+ # 1. Always cache the last message
46
+ indices_to_cache.add(len(messages) - 1)
47
+
48
+ # 2. Find user messages going backwards
49
+ user_message_indices = []
50
+ for i in range(len(messages) - 1, -1, -1):
51
+ if messages[i].get("role") == "user":
52
+ user_message_indices.append(i)
53
+
54
+ # 3. Cache message before the last user message (if exists)
55
+ # This is often a tool result which contains important context
56
+ if len(user_message_indices) >= 1 and user_message_indices[0] > 0:
57
+ indices_to_cache.add(user_message_indices[0] - 1)
58
+
59
+ # 4. Cache message before the second-to-last user message (if exists)
60
+ # This is also often a tool result
61
+ if len(user_message_indices) >= 2 and user_message_indices[1] > 0:
62
+ indices_to_cache.add(user_message_indices[1] - 1)
63
+
64
+ # 5. Find and cache the last system message
65
+ for i in range(len(messages) - 1, -1, -1):
66
+ if messages[i].get("role") == "system":
67
+ indices_to_cache.add(i)
68
+ break
69
+
70
+ # Transform messages with cache_control only for selected indices
71
+ transformed = []
72
+ for i, msg in enumerate(messages):
73
+ new_msg = msg.copy()
74
+
75
+ if i in indices_to_cache:
76
+ # Add cache_control to this message
77
+ content = new_msg.get("content")
78
+
79
+ if isinstance(content, str):
80
+ # Only add cache_control if text is not empty
81
+ if content:
82
+ # Convert string content to list format with cache_control
83
+ new_msg["content"] = [{"type": "text", "text": content, "cache_control": {"type": "ephemeral"}}]
84
+ # else: keep empty string as is, don't convert to list format
85
+ elif isinstance(content, list):
86
+ # Add cache_control to first text item if not already present
87
+ new_content = []
88
+ cache_added = False
89
+
90
+ for item in content:
91
+ if isinstance(item, dict) and item.get("type") == "text" and not cache_added:
92
+ # Only add cache_control if text is not empty
93
+ text_content = item.get("text", "")
94
+ if text_content and "cache_control" not in item:
95
+ # Add cache_control to the first non-empty text item without cache_control
96
+ new_item = item.copy()
97
+ new_item["cache_control"] = {"type": "ephemeral"}
98
+ new_content.append(new_item)
99
+ cache_added = True
100
+ elif text_content and "cache_control" in item:
101
+ # Non-empty item already has cache_control
102
+ new_content.append(item)
103
+ cache_added = True
104
+ else:
105
+ # Empty text or already has cache_control - keep as is
106
+ new_content.append(item)
107
+ else:
108
+ new_content.append(item)
109
+
110
+ new_msg["content"] = new_content
111
+
112
+ transformed.append(new_msg)
113
+
114
+ return transformed
115
+
116
+
117
+ class AnthropicModel(LitellmModel):
118
+ """Anthropic model that automatically adds cache_control to messages.
119
+
120
+ This implementation minimally overrides the _fetch_response method to intercept
121
+ and transform messages before they're sent to the Anthropic API.
122
+
123
+ The override approach:
124
+ 1. Temporarily replaces litellm.acompletion with an intercepting function
125
+ 2. The interceptor transforms messages only for this specific model
126
+ 3. Calls the parent's _fetch_response which uses the interceptor
127
+ 4. Always restores the original function, even if an error occurs
128
+
129
+ This minimal approach ensures compatibility with upstream LitellmModel changes
130
+ while adding the necessary cache_control functionality for Anthropic models.
131
+ """
132
+
133
+ def __init__(self, model_name: str, base_url: str | None = None, api_key: str | None = None):
134
+ """Initialize AnthropicModel."""
135
+ super().__init__(model_name, base_url, api_key)
136
+ logger.debug(f"AnthropicModel initialized with model: {model_name}")
137
+
138
+ @overload
139
+ async def _fetch_response(
140
+ self,
141
+ system_instructions: str | None,
142
+ input: str | list[TResponseInputItem],
143
+ model_settings: ModelSettings,
144
+ tools: list[Tool],
145
+ output_schema: AgentOutputSchemaBase | None,
146
+ handoffs: list[Handoff],
147
+ span: Span[GenerationSpanData],
148
+ tracing: ModelTracing,
149
+ stream: Literal[True],
150
+ prompt: Any | None = None,
151
+ ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
152
+
153
+ @overload
154
+ async def _fetch_response(
155
+ self,
156
+ system_instructions: str | None,
157
+ input: str | list[TResponseInputItem],
158
+ model_settings: ModelSettings,
159
+ tools: list[Tool],
160
+ output_schema: AgentOutputSchemaBase | None,
161
+ handoffs: list[Handoff],
162
+ span: Span[GenerationSpanData],
163
+ tracing: ModelTracing,
164
+ stream: Literal[False],
165
+ prompt: Any | None = None,
166
+ ) -> Any: ... # litellm.ModelResponse
167
+
168
+ async def _fetch_response(
169
+ self,
170
+ system_instructions: str | None,
171
+ input: str | list[TResponseInputItem],
172
+ model_settings: ModelSettings,
173
+ tools: list[Tool],
174
+ output_schema: AgentOutputSchemaBase | None,
175
+ handoffs: list[Handoff],
176
+ span: Span[GenerationSpanData],
177
+ tracing: ModelTracing,
178
+ stream: bool = False,
179
+ prompt: Any | None = None,
180
+ ) -> Any | tuple[Response, AsyncStream[ChatCompletionChunk]]:
181
+ """Override _fetch_response to add cache_control to messages."""
182
+ # Store the original litellm.acompletion function
183
+ original_acompletion = litellm.acompletion
184
+
185
+ async def _intercepting_acompletion(*args, **kwargs):
186
+ """Intercept litellm.acompletion calls to transform messages."""
187
+ # Only transform messages for this Anthropic model
188
+ if kwargs.get("model") == self.model and "messages" in kwargs:
189
+ messages = kwargs["messages"]
190
+ logger.debug(f"Intercepting Anthropic API call for model {self.model} with {len(messages)} messages")
191
+
192
+ # Transform messages to add cache_control
193
+ transformed = _transform_messages_for_cache(messages)
194
+ kwargs["messages"] = transformed
195
+
196
+ # Log transformation for debugging
197
+ if logger.isEnabledFor(logging.DEBUG):
198
+ for i, (orig, trans) in enumerate(zip(messages[:2], transformed[:2], strict=False)):
199
+ logger.debug(f"Message {i} transformation:")
200
+ logger.debug(f" Original: {json.dumps(orig, indent=2)}")
201
+ logger.debug(f" Transformed: {json.dumps(trans, indent=2)}")
202
+
203
+ # Call the original function with potentially transformed kwargs
204
+ return await original_acompletion(*args, **kwargs)
205
+
206
+ try:
207
+ # Temporarily replace litellm.acompletion with our intercepting version
208
+ litellm.acompletion = _intercepting_acompletion
209
+
210
+ # Call the parent's implementation, which will use our intercepting function
211
+ if stream:
212
+ return await super()._fetch_response(
213
+ system_instructions=system_instructions,
214
+ input=input,
215
+ model_settings=model_settings,
216
+ tools=tools,
217
+ output_schema=output_schema,
218
+ handoffs=handoffs,
219
+ span=span,
220
+ tracing=tracing,
221
+ stream=True,
222
+ prompt=prompt,
223
+ )
224
+ else:
225
+ return await super()._fetch_response(
226
+ system_instructions=system_instructions,
227
+ input=input,
228
+ model_settings=model_settings,
229
+ tools=tools,
230
+ output_schema=output_schema,
231
+ handoffs=handoffs,
232
+ span=span,
233
+ tracing=tracing,
234
+ stream=False,
235
+ prompt=prompt,
236
+ )
237
+ finally:
238
+ # Always restore the original function
239
+ litellm.acompletion = original_acompletion
@@ -0,0 +1,64 @@
1
+ You are an interactive CLI tool that helps users with any task they need assistance with. Use the instructions below and the tools available to you to assist the user.
2
+
3
+ # Tone and style
4
+ You should be concise, direct, and to the point. When you run a non-trivial bash command, you should explain what the command does and why you are running it, to make sure the user understands what you are doing (this is especially important when you are running a command that will make changes to the user's system).
5
+ Remember that your output will be displayed on a command line interface. Your responses can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.
6
+ Output text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like Bash or code comments as means to communicate with the user during the session.
7
+ If you cannot or will not help the user with something, please do not say why or what it could lead to, since this comes across as preachy and annoying. Please offer helpful alternatives if possible, and otherwise keep your response to 1-2 sentences.
8
+ Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.
9
+ IMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, quality, and accuracy. Only address the specific query or task at hand, avoiding tangential information unless absolutely critical for completing the request. If you can answer in 1-3 sentences or a short paragraph, please do.
10
+ IMPORTANT: You should NOT answer with unnecessary preamble or postamble (such as explaining your code or summarizing your action), unless the user asks you to.
11
+ IMPORTANT: Keep your responses short, since they will be displayed on a command line interface. You MUST answer concisely with fewer than 4 lines (not including tool use or code generation), unless user asks for detail. Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as "The answer is <answer>.", "Here is the content of the file..." or "Based on the information provided, the answer is..." or "Here is what I will do next...". Here are some examples to demonstrate appropriate verbosity:
12
+ <example>
13
+ user: 2 + 2
14
+ assistant: 4
15
+ </example>
16
+
17
+ <example>
18
+ user: what is 2+2?
19
+ assistant: 4
20
+ </example>
21
+
22
+ <example>
23
+ user: is 11 a prime number?
24
+ assistant: Yes
25
+ </example>
26
+
27
+ <example>
28
+ user: what command should I run to list files in the current directory?
29
+ assistant: ls
30
+ </example>
31
+
32
+ <example>
33
+ user: what command should I run to find all Python files?
34
+ assistant: find . -name "*.py"
35
+ </example>
36
+
37
+ <example>
38
+ user: How many golf balls fit inside a jetta?
39
+ assistant: 150000
40
+ </example>
41
+
42
+ <example>
43
+ user: what files are in the src directory?
44
+ assistant: [runs ls tool on src/ and sees main.py, utils.py, config.py]
45
+ user: which file contains the configuration?
46
+ assistant: src/config.py
47
+ </example>
48
+
49
+ # Proactiveness
50
+ You are allowed to be proactive, but only when the user asks you to do something. You should strive to strike a balance between:
51
+ 1. Doing the right thing when asked, including taking actions and follow-up actions
52
+ 2. Not surprising the user with actions you take without asking
53
+ For example, if the user asks you how to approach something, you should do your best to answer their question first, and not immediately jump into taking actions.
54
+ 3. Do not add additional code explanation summary unless requested by the user. After working on a file, just stop, rather than providing an explanation of what you did.
55
+
56
+ # Following conventions
57
+ When making changes to files, first understand the file's code conventions. Mimic code style, use existing libraries and utilities, and follow existing patterns.
58
+ - NEVER assume that a given library is available, even if it is well known. Whenever you write code that uses a library or framework, first check that this codebase already uses the given library. For example, you might look at neighboring files, or check the package.json (or cargo.toml, and so on depending on the language).
59
+ - When you create a new component, first look at existing components to see how they're written; then consider framework choice, naming conventions, typing, and other conventions.
60
+ - When you edit a piece of code, first look at the code's surrounding context (especially its imports) to understand the code's choice of frameworks and libraries. Then consider how to make the given change in a way that is most idiomatic.
61
+ - Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository.
62
+
63
+ # Code style
64
+ - IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked
vibecore/py.typed ADDED
File without changes
@@ -0,0 +1,5 @@
1
+ """Session storage implementations for vibecore."""
2
+
3
+ from .jsonl_session import JSONLSession
4
+
5
+ __all__ = ["JSONLSession"]
@@ -0,0 +1,127 @@
1
+ """File locking utilities for thread-safe session operations."""
2
+
3
+ import asyncio
4
+ import threading
5
+ from collections.abc import AsyncIterator
6
+ from contextlib import asynccontextmanager
7
+ from pathlib import Path
8
+
9
+
10
+ class FileLockManager:
11
+ """Manages thread-based file locks for session files.
12
+
13
+ This is a basic implementation using threading locks.
14
+ In Phase 2, this will be enhanced with OS-level file locking
15
+ using fcntl (Unix) and msvcrt (Windows).
16
+ """
17
+
18
+ def __init__(self):
19
+ """Initialize the file lock manager."""
20
+ # Dictionary mapping file paths to their locks
21
+ self._locks: dict[str, threading.Lock] = {}
22
+ # Lock to protect the _locks dictionary itself
23
+ self._locks_lock = threading.Lock()
24
+
25
+ def _get_lock(self, file_path: Path) -> threading.Lock:
26
+ """Get or create a lock for the specified file path.
27
+
28
+ Args:
29
+ file_path: Path to the file to lock
30
+
31
+ Returns:
32
+ Threading lock for the file
33
+ """
34
+ path_str = str(file_path.resolve())
35
+
36
+ with self._locks_lock:
37
+ if path_str not in self._locks:
38
+ self._locks[path_str] = threading.Lock()
39
+ return self._locks[path_str]
40
+
41
+ @asynccontextmanager
42
+ async def acquire_lock(
43
+ self,
44
+ file_path: Path,
45
+ timeout: float = 30.0,
46
+ exclusive: bool = True,
47
+ ) -> AsyncIterator[None]:
48
+ """Acquire a lock for the specified file.
49
+
50
+ Args:
51
+ file_path: Path to the file to lock
52
+ timeout: Maximum time to wait for the lock (in seconds)
53
+ exclusive: Whether to acquire an exclusive lock (unused in thread-based impl)
54
+
55
+ Yields:
56
+ None when the lock is acquired
57
+
58
+ Raises:
59
+ TimeoutError: If the lock cannot be acquired within the timeout
60
+ """
61
+ lock = self._get_lock(file_path)
62
+
63
+ # Try to acquire the lock with timeout
64
+ acquired = await asyncio.to_thread(lock.acquire, timeout=timeout)
65
+
66
+ if not acquired:
67
+ raise TimeoutError(f"Could not acquire lock for {file_path} within {timeout} seconds")
68
+
69
+ try:
70
+ yield
71
+ finally:
72
+ # Always release the lock
73
+ lock.release()
74
+
75
+ def cleanup_lock(self, file_path: Path) -> None:
76
+ """Remove the lock for a file that no longer exists.
77
+
78
+ This should be called after deleting a file to avoid memory leaks.
79
+
80
+ Args:
81
+ file_path: Path to the file whose lock should be removed
82
+ """
83
+ path_str = str(file_path.resolve())
84
+
85
+ with self._locks_lock:
86
+ if path_str in self._locks:
87
+ del self._locks[path_str]
88
+
89
+
90
+ # Global lock manager instance
91
+ _lock_manager = FileLockManager()
92
+
93
+
94
+ @asynccontextmanager
95
+ async def acquire_file_lock(
96
+ file_path: Path,
97
+ timeout: float = 30.0,
98
+ exclusive: bool = True,
99
+ ) -> AsyncIterator[None]:
100
+ """Acquire a lock for the specified file.
101
+
102
+ This is a convenience function that uses the global lock manager.
103
+
104
+ Args:
105
+ file_path: Path to the file to lock
106
+ timeout: Maximum time to wait for the lock (in seconds)
107
+ exclusive: Whether to acquire an exclusive lock
108
+
109
+ Yields:
110
+ None when the lock is acquired
111
+
112
+ Raises:
113
+ TimeoutError: If the lock cannot be acquired within the timeout
114
+ """
115
+ async with _lock_manager.acquire_lock(file_path, timeout, exclusive):
116
+ yield
117
+
118
+
119
+ def cleanup_file_lock(file_path: Path) -> None:
120
+ """Remove the lock for a file that no longer exists.
121
+
122
+ This is a convenience function that uses the global lock manager.
123
+
124
+ Args:
125
+ file_path: Path to the file whose lock should be removed
126
+ """
127
+ _lock_manager.cleanup_lock(file_path)