vedana-core 0.1.0.dev6__tar.gz → 0.6.0.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/CHANGELOG.md +2 -1
  2. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/PKG-INFO +1 -1
  3. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/pyproject.toml +0 -1
  4. vedana_core-0.6.0.dev1/tests/test_llm.py +68 -0
  5. vedana_core-0.1.0.dev6/tests/test_llm.py +0 -42
  6. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/.env.example +0 -0
  7. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/.gitignore +0 -0
  8. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/.python-version +0 -0
  9. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/README.md +0 -0
  10. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/__init__.py +0 -0
  11. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/app.py +0 -0
  12. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/data_model.py +0 -0
  13. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/data_provider.py +0 -0
  14. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/db.py +0 -0
  15. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/graph.py +0 -0
  16. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/llm.py +0 -0
  17. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/py.typed +0 -0
  18. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/rag_agent.py +0 -0
  19. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/rag_pipeline.py +0 -0
  20. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/settings.py +0 -0
  21. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/start_pipeline.py +0 -0
  22. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/utils.py +0 -0
  23. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/src/vedana_core/vts.py +0 -0
  24. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/tests/.litellm_cache/.gitattributes +0 -0
  25. {vedana_core-0.1.0.dev6 → vedana_core-0.6.0.dev1}/tests/test_data_model.py +0 -0
@@ -1,5 +1,6 @@
1
- # WIP
1
+ # 2026.01.29 - 0.6.0
2
2
 
3
+ * Republish to PyPI
3
4
  * remove DataModel loading/caching from vedana-core, rely on vedana-etl for updates
4
5
  * change DataModel attributes structure - split anchor/edge attributes
5
6
  * optimize DataModel calls
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vedana-core
3
- Version: 0.1.0.dev6
3
+ Version: 0.6.0.dev1
4
4
  Summary: Semantic Graph RAG App
5
5
  Author-email: Andrey Tatarinov <a@tatarinov.co>, Timur Sheydaev <tsheyd@epoch8.co>
6
6
  Requires-Python: >=3.12
@@ -81,7 +81,6 @@ custom_steps = """
81
81
  MEMGRAPH_USER: ""
82
82
  MEMGRAPH_PWD: ""
83
83
  EMBEDDINGS_DIM: "1024"
84
- OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
85
84
  run: |
86
85
  uv run pytest libs/vedana-core/
87
86
  """
@@ -0,0 +1,68 @@
1
+ from unittest.mock import AsyncMock, MagicMock
2
+
3
+ import pytest
4
+ from jims_core.llms.llm_provider import LLMProvider
5
+ from pydantic import BaseModel
6
+
7
+ from vedana_core.llm import LLM, Tool
8
+
9
+
10
+ def mock_msg(content: str | None = None, tool_calls: list | None = None):
11
+ """Create a mock message with to_dict() method."""
12
+ m = MagicMock(content=content, tool_calls=tool_calls, role="assistant")
13
+ m.to_dict.return_value = {"role": "assistant", "content": content}
14
+ return m
15
+
16
+
17
+ def mock_tool_call(name: str, args: str, id: str = "call_1"):
18
+ """Create a mock tool call."""
19
+ tc = MagicMock(id=id)
20
+ tc.function.name = name
21
+ tc.function.arguments = args
22
+ return tc
23
+
24
+
25
+ @pytest.mark.asyncio
26
+ async def test_llm_completion_with_tools() -> None:
27
+ llm_provider = LLMProvider()
28
+
29
+ tc = mock_tool_call("hello_world", '{"name": "Alice"}')
30
+ llm_provider.chat_completion_with_tools = AsyncMock(
31
+ side_effect=[(mock_msg(tool_calls=[tc]), [tc]), (mock_msg("Done!"), [])]
32
+ )
33
+
34
+ llm = LLM(llm_provider=llm_provider, prompt_templates={})
35
+
36
+ class HelloWorldArgs(BaseModel):
37
+ name: str
38
+
39
+ res_messages, res_content = await llm.create_completion_with_tools(
40
+ messages=[{"role": "system", "content": "Call hello_world with name='Alice'."}],
41
+ tools=[
42
+ Tool(
43
+ name="hello_world", description="Says hello.", args_cls=HelloWorldArgs, fn=lambda a: f"Hello, {a.name}!"
44
+ )
45
+ ],
46
+ )
47
+
48
+ tool_msg = next(m for m in res_messages if m.get("role") == "tool")
49
+ assert tool_msg.get("content") == "Hello, Alice!"
50
+
51
+
52
+ @pytest.mark.asyncio
53
+ async def test_llm_completion_no_tool_calls() -> None:
54
+ llm_provider = LLMProvider()
55
+ llm_provider.chat_completion_with_tools = AsyncMock(return_value=(mock_msg("No tools needed."), []))
56
+
57
+ llm = LLM(llm_provider=llm_provider, prompt_templates={})
58
+
59
+ class DummyArgs(BaseModel):
60
+ value: str
61
+
62
+ res_messages, res_content = await llm.create_completion_with_tools(
63
+ messages=[{"role": "user", "content": "Hello"}],
64
+ tools=[Tool(name="dummy", description="Dummy.", args_cls=DummyArgs, fn=lambda a: a.value)],
65
+ )
66
+
67
+ assert res_content == "No tools needed."
68
+ assert not any(m.get("role") == "tool" for m in res_messages)
@@ -1,42 +0,0 @@
1
- import litellm
2
- import pytest
3
- from jims_core.llms.llm_provider import LLMProvider
4
- from pydantic import BaseModel
5
-
6
- from vedana_core.llm import LLM, Tool
7
-
8
- litellm._turn_on_debug()
9
-
10
-
11
- @pytest.fixture(scope="module")
12
- def vcr_config():
13
- return {"filter_headers": ["authorization"], "ignore_hosts": ["test"]}
14
-
15
-
16
- @pytest.mark.asyncio
17
- async def test_llm_completion_with_tools() -> None:
18
- llm_provider = LLMProvider()
19
- llm = LLM(llm_provider=llm_provider, prompt_templates={})
20
-
21
- class HelloWorldArgs(BaseModel):
22
- name: str
23
-
24
- res_messages, res_content = await llm.create_completion_with_tools(
25
- messages=[
26
- {
27
- "role": "system",
28
- "content": "Call a tool 'hell_world' with argument 'name' set to 'Alice'.",
29
- },
30
- ],
31
- tools=[
32
- Tool(
33
- name="hello_world",
34
- description="A tool that says hello to the world.",
35
- args_cls=HelloWorldArgs,
36
- fn=lambda args: f"Hello, {args.name}!",
37
- ),
38
- ],
39
- )
40
-
41
- tool_call_msg = [msg for msg in res_messages if msg.get("role") == "tool"][0]
42
- assert tool_call_msg.get("content") == "Hello, Alice!"