graphiti-core 0.14.0__tar.gz → 0.15.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (182) hide show
  1. graphiti_core-0.15.0/.github/workflows/typecheck.yml +42 -0
  2. graphiti_core-0.15.0/.github/workflows/unit_tests.yml +51 -0
  3. {graphiti_core-0.14.0/server → graphiti_core-0.15.0}/Makefile +2 -2
  4. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/PKG-INFO +44 -21
  5. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/README.md +42 -19
  6. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/quickstart/quickstart_falkordb.py +16 -10
  7. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/cross_encoder/__init__.py +2 -1
  8. graphiti_core-0.15.0/graphiti_core/cross_encoder/gemini_reranker_client.py +146 -0
  9. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/driver/__init__.py +4 -1
  10. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/driver/falkordb_driver.py +47 -21
  11. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/driver/neo4j_driver.py +5 -3
  12. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/embedder/voyage.py +1 -1
  13. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/graphiti.py +7 -2
  14. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/helpers.py +3 -2
  15. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/gemini_client.py +135 -23
  16. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/nodes.py +10 -2
  17. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/search_filters.py +4 -5
  18. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/search_utils.py +1 -7
  19. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/community_operations.py +1 -1
  20. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/edge_operations.py +1 -1
  21. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/graph_data_operations.py +3 -5
  22. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/Dockerfile +1 -1
  23. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/pyproject.toml +1 -1
  24. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/uv.lock +43 -4
  25. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/poetry.lock +1508 -1327
  26. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/pyproject.toml +9 -11
  27. graphiti_core-0.15.0/pytest.ini +4 -0
  28. {graphiti_core-0.14.0 → graphiti_core-0.15.0/server}/Makefile +2 -2
  29. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/config.py +1 -1
  30. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/pyproject.toml +6 -1
  31. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/uv.lock +24 -59
  32. graphiti_core-0.15.0/tests/cross_encoder/test_gemini_reranker_client.py +353 -0
  33. graphiti_core-0.15.0/tests/driver/__init__.py +1 -0
  34. graphiti_core-0.15.0/tests/driver/test_falkordb_driver.py +421 -0
  35. graphiti_core-0.15.0/tests/embedder/test_gemini.py +381 -0
  36. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/helpers_test.py +1 -1
  37. graphiti_core-0.15.0/tests/llm_client/test_gemini_client.py +393 -0
  38. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/test_entity_exclusion_int.py +105 -97
  39. graphiti_core-0.15.0/tests/test_graphiti_falkordb_int.py +170 -0
  40. graphiti_core-0.15.0/tests/test_node_falkordb_int.py +148 -0
  41. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/uv.lock +25 -60
  42. graphiti_core-0.14.0/.github/workflows/typecheck.yml +0 -47
  43. graphiti_core-0.14.0/.github/workflows/unit_tests.yml +0 -30
  44. graphiti_core-0.14.0/pytest.ini +0 -3
  45. graphiti_core-0.14.0/tests/embedder/test_gemini.py +0 -127
  46. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.env.example +0 -0
  47. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.github/dependabot.yml +0 -0
  48. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.github/secret_scanning.yml +0 -0
  49. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.github/workflows/cla.yml +0 -0
  50. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.github/workflows/codeql.yml +0 -0
  51. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.github/workflows/lint.yml +0 -0
  52. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.github/workflows/release-graphiti-core.yml +0 -0
  53. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/.gitignore +0 -0
  54. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/CLAUDE.md +0 -0
  55. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/CODE_OF_CONDUCT.md +0 -0
  56. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/CONTRIBUTING.md +0 -0
  57. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/Dockerfile +0 -0
  58. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/LICENSE +0 -0
  59. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/SECURITY.md +0 -0
  60. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/Zep-CLA.md +0 -0
  61. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/conftest.py +0 -0
  62. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/depot.json +0 -0
  63. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/docker-compose.test.yml +0 -0
  64. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/docker-compose.yml +0 -0
  65. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/ellipsis.yaml +0 -0
  66. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/data/manybirds_products.json +0 -0
  67. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/ecommerce/runner.ipynb +0 -0
  68. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/ecommerce/runner.py +0 -0
  69. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/langgraph-agent/agent.ipynb +0 -0
  70. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/langgraph-agent/tinybirds-jess.png +0 -0
  71. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/podcast/podcast_runner.py +0 -0
  72. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/podcast/podcast_transcript.txt +0 -0
  73. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/podcast/transcript_parser.py +0 -0
  74. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/quickstart/README.md +0 -0
  75. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/quickstart/quickstart_neo4j.py +0 -0
  76. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/quickstart/requirements.txt +0 -0
  77. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/wizard_of_oz/parser.py +0 -0
  78. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/wizard_of_oz/runner.py +0 -0
  79. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/examples/wizard_of_oz/woo.txt +0 -0
  80. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/__init__.py +0 -0
  81. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/cross_encoder/bge_reranker_client.py +0 -0
  82. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/cross_encoder/client.py +0 -0
  83. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/cross_encoder/openai_reranker_client.py +0 -0
  84. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/driver/driver.py +0 -0
  85. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/edges.py +0 -0
  86. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/embedder/__init__.py +0 -0
  87. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/embedder/azure_openai.py +0 -0
  88. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/embedder/client.py +0 -0
  89. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/embedder/gemini.py +0 -0
  90. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/embedder/openai.py +0 -0
  91. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/errors.py +0 -0
  92. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/graph_queries.py +0 -0
  93. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/graphiti_types.py +0 -0
  94. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/__init__.py +0 -0
  95. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/anthropic_client.py +0 -0
  96. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/azure_openai_client.py +0 -0
  97. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/client.py +0 -0
  98. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/config.py +0 -0
  99. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/errors.py +0 -0
  100. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/groq_client.py +0 -0
  101. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/openai_base_client.py +0 -0
  102. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/openai_client.py +0 -0
  103. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/openai_generic_client.py +0 -0
  104. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/llm_client/utils.py +0 -0
  105. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/models/__init__.py +0 -0
  106. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/models/edges/__init__.py +0 -0
  107. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/models/edges/edge_db_queries.py +0 -0
  108. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/models/nodes/__init__.py +0 -0
  109. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/models/nodes/node_db_queries.py +0 -0
  110. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/__init__.py +0 -0
  111. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/dedupe_edges.py +0 -0
  112. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/dedupe_nodes.py +0 -0
  113. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/eval.py +0 -0
  114. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/extract_edge_dates.py +0 -0
  115. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/extract_edges.py +0 -0
  116. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/extract_nodes.py +0 -0
  117. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/invalidate_edges.py +0 -0
  118. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/lib.py +0 -0
  119. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/models.py +0 -0
  120. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/prompt_helpers.py +0 -0
  121. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/prompts/summarize_nodes.py +0 -0
  122. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/py.typed +0 -0
  123. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/__init__.py +0 -0
  124. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/search.py +0 -0
  125. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/search_config.py +0 -0
  126. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/search_config_recipes.py +0 -0
  127. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/search/search_helpers.py +0 -0
  128. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/telemetry/__init__.py +0 -0
  129. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/telemetry/telemetry.py +0 -0
  130. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/__init__.py +0 -0
  131. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/bulk_utils.py +0 -0
  132. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/datetime_utils.py +0 -0
  133. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/__init__.py +0 -0
  134. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/node_operations.py +0 -0
  135. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/temporal_operations.py +0 -0
  136. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/maintenance/utils.py +0 -0
  137. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/graphiti_core/utils/ontology_utils/entity_types_utils.py +0 -0
  138. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/images/arxiv-screenshot.png +0 -0
  139. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/images/graphiti-graph-intro.gif +0 -0
  140. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/images/graphiti-intro-slides-stock-2.gif +0 -0
  141. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/images/simple_graph.svg +0 -0
  142. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/.env.example +0 -0
  143. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/.python-version +0 -0
  144. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/README.md +0 -0
  145. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/cursor_rules.md +0 -0
  146. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/docker-compose.yml +0 -0
  147. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/graphiti_mcp_server.py +0 -0
  148. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/mcp_config_sse_example.json +0 -0
  149. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/mcp_server/mcp_config_stdio_example.json +0 -0
  150. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/py.typed +0 -0
  151. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/.env.example +0 -0
  152. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/README.md +0 -0
  153. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/__init__.py +0 -0
  154. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/dto/__init__.py +0 -0
  155. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/dto/common.py +0 -0
  156. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/dto/ingest.py +0 -0
  157. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/dto/retrieve.py +0 -0
  158. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/main.py +0 -0
  159. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/routers/__init__.py +0 -0
  160. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/routers/ingest.py +0 -0
  161. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/routers/retrieve.py +0 -0
  162. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/server/graph_service/zep_graphiti.py +0 -0
  163. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/signatures/version1/cla.json +0 -0
  164. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/cross_encoder/test_bge_reranker_client.py +0 -0
  165. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/embedder/embedder_fixtures.py +0 -0
  166. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/embedder/test_openai.py +0 -0
  167. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/embedder/test_voyage.py +0 -0
  168. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/evals/data/longmemeval_data/README.md +0 -0
  169. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/evals/data/longmemeval_data/longmemeval_oracle.json +0 -0
  170. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/evals/eval_cli.py +0 -0
  171. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/evals/eval_e2e_graph_building.py +0 -0
  172. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/evals/pytest.ini +0 -0
  173. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/evals/utils.py +0 -0
  174. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/llm_client/test_anthropic_client.py +0 -0
  175. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/llm_client/test_anthropic_client_int.py +0 -0
  176. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/llm_client/test_client.py +0 -0
  177. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/llm_client/test_errors.py +0 -0
  178. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/test_graphiti_int.py +0 -0
  179. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/test_node_int.py +0 -0
  180. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/utils/maintenance/test_edge_operations.py +0 -0
  181. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/utils/maintenance/test_temporal_operations_int.py +0 -0
  182. {graphiti_core-0.14.0 → graphiti_core-0.15.0}/tests/utils/search/search_utils_test.py +0 -0
@@ -0,0 +1,42 @@
1
+ name: Pyright Type Check
2
+
3
+ permissions:
4
+ contents: read
5
+
6
+ on:
7
+ push:
8
+ branches: ["main"]
9
+ pull_request:
10
+ branches: ["main"]
11
+
12
+ jobs:
13
+ pyright:
14
+ runs-on: depot-ubuntu-22.04
15
+ environment: development
16
+ steps:
17
+ - uses: actions/checkout@v4
18
+ - name: Set up Python
19
+ id: setup-python
20
+ uses: actions/setup-python@v5
21
+ with:
22
+ python-version: "3.10"
23
+ - name: Install uv
24
+ uses: astral-sh/setup-uv@v3
25
+ with:
26
+ version: "latest"
27
+ - name: Install dependencies
28
+ run: uv sync --all-extras
29
+ - name: Run Pyright for graphiti-core
30
+ shell: bash
31
+ run: |
32
+ uv run pyright ./graphiti_core
33
+ - name: Install graph-service dependencies
34
+ shell: bash
35
+ run: |
36
+ cd server
37
+ uv sync --all-extras
38
+ - name: Run Pyright for graph-service
39
+ shell: bash
40
+ run: |
41
+ cd server
42
+ uv run pyright .
@@ -0,0 +1,51 @@
1
+ name: Unit Tests
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ permissions:
10
+ contents: read
11
+
12
+ jobs:
13
+ test:
14
+ runs-on: depot-ubuntu-22.04
15
+ environment:
16
+ name: development
17
+ services:
18
+ falkordb:
19
+ image: falkordb/falkordb:latest
20
+ ports:
21
+ - 6379:6379
22
+ options: --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
23
+ steps:
24
+ - uses: actions/checkout@v4
25
+ - name: Set up Python
26
+ uses: actions/setup-python@v5
27
+ with:
28
+ python-version: "3.10"
29
+ - name: Install uv
30
+ uses: astral-sh/setup-uv@v3
31
+ with:
32
+ version: "latest"
33
+ - name: Install redis-cli for FalkorDB health check
34
+ run: sudo apt-get update && sudo apt-get install -y redis-tools
35
+ - name: Install dependencies
36
+ run: uv sync --all-extras
37
+ - name: Run non-integration tests
38
+ env:
39
+ PYTHONPATH: ${{ github.workspace }}
40
+ run: |
41
+ uv run pytest -m "not integration"
42
+ - name: Wait for FalkorDB
43
+ run: |
44
+ timeout 60 bash -c 'until redis-cli -h localhost -p 6379 ping; do sleep 1; done'
45
+ - name: Run FalkorDB integration tests
46
+ env:
47
+ PYTHONPATH: ${{ github.workspace }}
48
+ FALKORDB_HOST: localhost
49
+ FALKORDB_PORT: 6379
50
+ run: |
51
+ uv run pytest tests/driver/test_falkordb_driver.py
@@ -5,7 +5,7 @@ PYTHON = python3
5
5
  UV = uv
6
6
  PYTEST = $(UV) run pytest
7
7
  RUFF = $(UV) run ruff
8
- MYPY = $(UV) run mypy
8
+ PYRIGHT = $(UV) run pyright
9
9
 
10
10
  # Default target
11
11
  all: format lint test
@@ -22,7 +22,7 @@ format:
22
22
  # Lint code
23
23
  lint:
24
24
  $(RUFF) check
25
- $(MYPY) . --show-column-numbers --show-error-codes --pretty
25
+ $(PYRIGHT) ./graphiti_core
26
26
 
27
27
  # Run tests
28
28
  test:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: graphiti-core
3
- Version: 0.14.0
3
+ Version: 0.15.0
4
4
  Summary: A temporal graph building library
5
5
  Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
6
6
  Project-URL: Repository, https://github.com/getzep/graphiti
@@ -29,7 +29,7 @@ Requires-Dist: langchain-anthropic>=0.2.4; extra == 'dev'
29
29
  Requires-Dist: langchain-openai>=0.2.6; extra == 'dev'
30
30
  Requires-Dist: langgraph>=0.2.15; extra == 'dev'
31
31
  Requires-Dist: langsmith>=0.1.108; extra == 'dev'
32
- Requires-Dist: mypy>=1.11.1; extra == 'dev'
32
+ Requires-Dist: pyright>=1.1.380; extra == 'dev'
33
33
  Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
34
34
  Requires-Dist: pytest-xdist>=3.6.1; extra == 'dev'
35
35
  Requires-Dist: pytest>=8.3.3; extra == 'dev'
@@ -153,7 +153,7 @@ Requirements:
153
153
 
154
154
  - Python 3.10 or higher
155
155
  - Neo4j 5.26 / FalkorDB 1.1.2 or higher (serves as the embeddings storage backend)
156
- - OpenAI API key (for LLM inference and embedding)
156
+ - OpenAI API key (Graphiti defaults to OpenAI for LLM inference and embedding)
157
157
 
158
158
  > [!IMPORTANT]
159
159
  > Graphiti works best with LLM services that support Structured Output (such as OpenAI and Gemini).
@@ -167,6 +167,12 @@ Optional:
167
167
  > [!TIP]
168
168
  > The simplest way to install Neo4j is via [Neo4j Desktop](https://neo4j.com/download/). It provides a user-friendly
169
169
  > interface to manage Neo4j instances and databases.
170
+ > Alternatively, you can use FalkorDB on-premises via Docker and instantly start with the quickstart example:
171
+
172
+ ```bash
173
+ docker run -p 6379:6379 -p 3000:3000 -it --rm falkordb/falkordb:latest
174
+
175
+ ```
170
176
 
171
177
  ```bash
172
178
  pip install graphiti-core
@@ -197,13 +203,13 @@ pip install graphiti-core[anthropic,groq,google-genai]
197
203
  ## Quick Start
198
204
 
199
205
  > [!IMPORTANT]
200
- > Graphiti uses OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your environment.
206
+ > Graphiti defaults to using OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your environment.
201
207
  > Support for Anthropic and Groq LLM inferences is available, too. Other LLM providers may be supported via OpenAI
202
208
  > compatible APIs.
203
209
 
204
210
  For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory. The quickstart demonstrates:
205
211
 
206
- 1. Connecting to a Neo4j database
212
+ 1. Connecting to a Neo4j or FalkorDB database
207
213
  2. Initializing Graphiti indices and constraints
208
214
  3. Adding episodes to the graph (both text and structured JSON)
209
215
  4. Searching for relationships (edges) using hybrid search
@@ -247,7 +253,7 @@ as such this feature is off by default.
247
253
 
248
254
  ## Using Graphiti with Azure OpenAI
249
255
 
250
- Graphiti supports Azure OpenAI for both LLM inference and embeddings. To use Azure OpenAI, you'll need to configure both the LLM client and embedder with your Azure OpenAI credentials.
256
+ Graphiti supports Azure OpenAI for both LLM inference and embeddings. Azure deployments often require different endpoints for LLM and embedding services, and separate deployments for default and small models.
251
257
 
252
258
  ```python
253
259
  from openai import AsyncAzureOpenAI
@@ -256,19 +262,26 @@ from graphiti_core.llm_client import LLMConfig, OpenAIClient
256
262
  from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig
257
263
  from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient
258
264
 
259
- # Azure OpenAI configuration
265
+ # Azure OpenAI configuration - use separate endpoints for different services
260
266
  api_key = "<your-api-key>"
261
267
  api_version = "<your-api-version>"
262
- azure_endpoint = "<your-azure-endpoint>"
268
+ llm_endpoint = "<your-llm-endpoint>" # e.g., "https://your-llm-resource.openai.azure.com/"
269
+ embedding_endpoint = "<your-embedding-endpoint>" # e.g., "https://your-embedding-resource.openai.azure.com/"
270
+
271
+ # Create separate Azure OpenAI clients for different services
272
+ llm_client_azure = AsyncAzureOpenAI(
273
+ api_key=api_key,
274
+ api_version=api_version,
275
+ azure_endpoint=llm_endpoint
276
+ )
263
277
 
264
- # Create Azure OpenAI client for LLM
265
- azure_openai_client = AsyncAzureOpenAI(
278
+ embedding_client_azure = AsyncAzureOpenAI(
266
279
  api_key=api_key,
267
280
  api_version=api_version,
268
- azure_endpoint=azure_endpoint
281
+ azure_endpoint=embedding_endpoint
269
282
  )
270
283
 
271
- # Create LLM Config with your Azure deployed model names
284
+ # Create LLM Config with your Azure deployment names
272
285
  azure_llm_config = LLMConfig(
273
286
  small_model="gpt-4.1-nano",
274
287
  model="gpt-4.1-mini",
@@ -281,29 +294,30 @@ graphiti = Graphiti(
281
294
  "password",
282
295
  llm_client=OpenAIClient(
283
296
  llm_config=azure_llm_config,
284
- client=azure_openai_client
297
+ client=llm_client_azure
285
298
  ),
286
299
  embedder=OpenAIEmbedder(
287
300
  config=OpenAIEmbedderConfig(
288
- embedding_model="text-embedding-3-small" # Use your Azure deployed embedding model name
301
+ embedding_model="text-embedding-3-small-deployment" # Your Azure embedding deployment name
289
302
  ),
290
- client=azure_openai_client
303
+ client=embedding_client_azure
291
304
  ),
292
- # Optional: Configure the OpenAI cross encoder with Azure OpenAI
293
305
  cross_encoder=OpenAIRerankerClient(
294
- llm_config=azure_llm_config,
295
- client=azure_openai_client
306
+ llm_config=LLMConfig(
307
+ model=azure_llm_config.small_model # Use small model for reranking
308
+ ),
309
+ client=llm_client_azure
296
310
  )
297
311
  )
298
312
 
299
313
  # Now you can use Graphiti with Azure OpenAI
300
314
  ```
301
315
 
302
- Make sure to replace the placeholder values with your actual Azure OpenAI credentials and specify the correct embedding model name that's deployed in your Azure OpenAI service.
316
+ Make sure to replace the placeholder values with your actual Azure OpenAI credentials and deployment names that match your Azure OpenAI service configuration.
303
317
 
304
318
  ## Using Graphiti with Google Gemini
305
319
 
306
- Graphiti supports Google's Gemini models for both LLM inference and embeddings. To use Gemini, you'll need to configure both the LLM client and embedder with your Google API key.
320
+ Graphiti supports Google's Gemini models for LLM inference, embeddings, and cross-encoding/reranking. To use Gemini, you'll need to configure the LLM client, embedder, and the cross-encoder with your Google API key.
307
321
 
308
322
  Install Graphiti:
309
323
 
@@ -319,6 +333,7 @@ pip install "graphiti-core[google-genai]"
319
333
  from graphiti_core import Graphiti
320
334
  from graphiti_core.llm_client.gemini_client import GeminiClient, LLMConfig
321
335
  from graphiti_core.embedder.gemini import GeminiEmbedder, GeminiEmbedderConfig
336
+ from graphiti_core.cross_encoder.gemini_reranker_client import GeminiRerankerClient
322
337
 
323
338
  # Google API key configuration
324
339
  api_key = "<your-google-api-key>"
@@ -339,12 +354,20 @@ graphiti = Graphiti(
339
354
  api_key=api_key,
340
355
  embedding_model="embedding-001"
341
356
  )
357
+ ),
358
+ cross_encoder=GeminiRerankerClient(
359
+ config=LLMConfig(
360
+ api_key=api_key,
361
+ model="gemini-2.5-flash-lite-preview-06-17"
362
+ )
342
363
  )
343
364
  )
344
365
 
345
- # Now you can use Graphiti with Google Gemini
366
+ # Now you can use Graphiti with Google Gemini for all components
346
367
  ```
347
368
 
369
+ The Gemini reranker uses the `gemini-2.5-flash-lite-preview-06-17` model by default, which is optimized for cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI reranker, leveraging Gemini's log probabilities feature to rank passage relevance.
370
+
348
371
  ## Using Graphiti with Ollama (Local LLM)
349
372
 
350
373
  Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal for privacy-focused applications or when you want to avoid API costs.
@@ -106,7 +106,7 @@ Requirements:
106
106
 
107
107
  - Python 3.10 or higher
108
108
  - Neo4j 5.26 / FalkorDB 1.1.2 or higher (serves as the embeddings storage backend)
109
- - OpenAI API key (for LLM inference and embedding)
109
+ - OpenAI API key (Graphiti defaults to OpenAI for LLM inference and embedding)
110
110
 
111
111
  > [!IMPORTANT]
112
112
  > Graphiti works best with LLM services that support Structured Output (such as OpenAI and Gemini).
@@ -120,6 +120,12 @@ Optional:
120
120
  > [!TIP]
121
121
  > The simplest way to install Neo4j is via [Neo4j Desktop](https://neo4j.com/download/). It provides a user-friendly
122
122
  > interface to manage Neo4j instances and databases.
123
+ > Alternatively, you can use FalkorDB on-premises via Docker and instantly start with the quickstart example:
124
+
125
+ ```bash
126
+ docker run -p 6379:6379 -p 3000:3000 -it --rm falkordb/falkordb:latest
127
+
128
+ ```
123
129
 
124
130
  ```bash
125
131
  pip install graphiti-core
@@ -150,13 +156,13 @@ pip install graphiti-core[anthropic,groq,google-genai]
150
156
  ## Quick Start
151
157
 
152
158
  > [!IMPORTANT]
153
- > Graphiti uses OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your environment.
159
+ > Graphiti defaults to using OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your environment.
154
160
  > Support for Anthropic and Groq LLM inferences is available, too. Other LLM providers may be supported via OpenAI
155
161
  > compatible APIs.
156
162
 
157
163
  For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory. The quickstart demonstrates:
158
164
 
159
- 1. Connecting to a Neo4j database
165
+ 1. Connecting to a Neo4j or FalkorDB database
160
166
  2. Initializing Graphiti indices and constraints
161
167
  3. Adding episodes to the graph (both text and structured JSON)
162
168
  4. Searching for relationships (edges) using hybrid search
@@ -200,7 +206,7 @@ as such this feature is off by default.
200
206
 
201
207
  ## Using Graphiti with Azure OpenAI
202
208
 
203
- Graphiti supports Azure OpenAI for both LLM inference and embeddings. To use Azure OpenAI, you'll need to configure both the LLM client and embedder with your Azure OpenAI credentials.
209
+ Graphiti supports Azure OpenAI for both LLM inference and embeddings. Azure deployments often require different endpoints for LLM and embedding services, and separate deployments for default and small models.
204
210
 
205
211
  ```python
206
212
  from openai import AsyncAzureOpenAI
@@ -209,19 +215,26 @@ from graphiti_core.llm_client import LLMConfig, OpenAIClient
209
215
  from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig
210
216
  from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient
211
217
 
212
- # Azure OpenAI configuration
218
+ # Azure OpenAI configuration - use separate endpoints for different services
213
219
  api_key = "<your-api-key>"
214
220
  api_version = "<your-api-version>"
215
- azure_endpoint = "<your-azure-endpoint>"
221
+ llm_endpoint = "<your-llm-endpoint>" # e.g., "https://your-llm-resource.openai.azure.com/"
222
+ embedding_endpoint = "<your-embedding-endpoint>" # e.g., "https://your-embedding-resource.openai.azure.com/"
223
+
224
+ # Create separate Azure OpenAI clients for different services
225
+ llm_client_azure = AsyncAzureOpenAI(
226
+ api_key=api_key,
227
+ api_version=api_version,
228
+ azure_endpoint=llm_endpoint
229
+ )
216
230
 
217
- # Create Azure OpenAI client for LLM
218
- azure_openai_client = AsyncAzureOpenAI(
231
+ embedding_client_azure = AsyncAzureOpenAI(
219
232
  api_key=api_key,
220
233
  api_version=api_version,
221
- azure_endpoint=azure_endpoint
234
+ azure_endpoint=embedding_endpoint
222
235
  )
223
236
 
224
- # Create LLM Config with your Azure deployed model names
237
+ # Create LLM Config with your Azure deployment names
225
238
  azure_llm_config = LLMConfig(
226
239
  small_model="gpt-4.1-nano",
227
240
  model="gpt-4.1-mini",
@@ -234,29 +247,30 @@ graphiti = Graphiti(
234
247
  "password",
235
248
  llm_client=OpenAIClient(
236
249
  llm_config=azure_llm_config,
237
- client=azure_openai_client
250
+ client=llm_client_azure
238
251
  ),
239
252
  embedder=OpenAIEmbedder(
240
253
  config=OpenAIEmbedderConfig(
241
- embedding_model="text-embedding-3-small" # Use your Azure deployed embedding model name
254
+ embedding_model="text-embedding-3-small-deployment" # Your Azure embedding deployment name
242
255
  ),
243
- client=azure_openai_client
256
+ client=embedding_client_azure
244
257
  ),
245
- # Optional: Configure the OpenAI cross encoder with Azure OpenAI
246
258
  cross_encoder=OpenAIRerankerClient(
247
- llm_config=azure_llm_config,
248
- client=azure_openai_client
259
+ llm_config=LLMConfig(
260
+ model=azure_llm_config.small_model # Use small model for reranking
261
+ ),
262
+ client=llm_client_azure
249
263
  )
250
264
  )
251
265
 
252
266
  # Now you can use Graphiti with Azure OpenAI
253
267
  ```
254
268
 
255
- Make sure to replace the placeholder values with your actual Azure OpenAI credentials and specify the correct embedding model name that's deployed in your Azure OpenAI service.
269
+ Make sure to replace the placeholder values with your actual Azure OpenAI credentials and deployment names that match your Azure OpenAI service configuration.
256
270
 
257
271
  ## Using Graphiti with Google Gemini
258
272
 
259
- Graphiti supports Google's Gemini models for both LLM inference and embeddings. To use Gemini, you'll need to configure both the LLM client and embedder with your Google API key.
273
+ Graphiti supports Google's Gemini models for LLM inference, embeddings, and cross-encoding/reranking. To use Gemini, you'll need to configure the LLM client, embedder, and the cross-encoder with your Google API key.
260
274
 
261
275
  Install Graphiti:
262
276
 
@@ -272,6 +286,7 @@ pip install "graphiti-core[google-genai]"
272
286
  from graphiti_core import Graphiti
273
287
  from graphiti_core.llm_client.gemini_client import GeminiClient, LLMConfig
274
288
  from graphiti_core.embedder.gemini import GeminiEmbedder, GeminiEmbedderConfig
289
+ from graphiti_core.cross_encoder.gemini_reranker_client import GeminiRerankerClient
275
290
 
276
291
  # Google API key configuration
277
292
  api_key = "<your-google-api-key>"
@@ -292,12 +307,20 @@ graphiti = Graphiti(
292
307
  api_key=api_key,
293
308
  embedding_model="embedding-001"
294
309
  )
310
+ ),
311
+ cross_encoder=GeminiRerankerClient(
312
+ config=LLMConfig(
313
+ api_key=api_key,
314
+ model="gemini-2.5-flash-lite-preview-06-17"
315
+ )
295
316
  )
296
317
  )
297
318
 
298
- # Now you can use Graphiti with Google Gemini
319
+ # Now you can use Graphiti with Google Gemini for all components
299
320
  ```
300
321
 
322
+ The Gemini reranker uses the `gemini-2.5-flash-lite-preview-06-17` model by default, which is optimized for cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI reranker, leveraging Gemini's log probabilities feature to rank passage relevance.
323
+
301
324
  ## Using Graphiti with Ollama (Local LLM)
302
325
 
303
326
  Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal for privacy-focused applications or when you want to avoid API costs.
@@ -46,14 +46,20 @@ logger = logging.getLogger(__name__)
46
46
  load_dotenv()
47
47
 
48
48
  # FalkorDB connection parameters
49
- # Make sure FalkorDB on premises is running, see https://docs.falkordb.com/
50
- falkor_uri = os.environ.get('FALKORDB_URI', 'falkor://localhost:6379')
51
- falkor_user = os.environ.get('FALKORDB_USER', 'falkor')
52
- falkor_password = os.environ.get('FALKORDB_PASSWORD', '')
53
-
54
- if not falkor_uri:
55
- raise ValueError('FALKORDB_URI must be set')
56
-
49
+ # Make sure FalkorDB (on-premises) is running see https://docs.falkordb.com/
50
+ # By default, FalkorDB does not require a username or password,
51
+ # but you can set them via environment variables for added security.
52
+ #
53
+ # If you're using FalkorDB Cloud, set the environment variables accordingly.
54
+ # For on-premises use, you can leave them as None or set them to your preferred values.
55
+ #
56
+ # The default host and port are 'localhost' and '6379', respectively.
57
+ # You can override these values in your environment variables or directly in the code.
58
+
59
+ falkor_username = os.environ.get('FALKORDB_USERNAME', None)
60
+ falkor_password = os.environ.get('FALKORDB_PASSWORD', None)
61
+ falkor_host = os.environ.get('FALKORDB_HOST', 'localhost')
62
+ falkor_port = os.environ.get('FALKORDB_PORT', '6379')
57
63
 
58
64
  async def main():
59
65
  #################################################
@@ -65,8 +71,8 @@ async def main():
65
71
  #################################################
66
72
 
67
73
  # Initialize Graphiti with FalkorDB connection
68
- falkor_driver = FalkorDriver(uri=falkor_uri, user=falkor_user, password=falkor_password)
69
- graphiti = Graphiti(uri=falkor_uri, graph_driver=falkor_driver)
74
+ falkor_driver = FalkorDriver(host=falkor_host, port=falkor_port, username=falkor_username, password=falkor_password)
75
+ graphiti = Graphiti(graph_driver=falkor_driver)
70
76
 
71
77
  try:
72
78
  # Initialize the graph database with graphiti's indices. This only needs to be done once.
@@ -15,6 +15,7 @@ limitations under the License.
15
15
  """
16
16
 
17
17
  from .client import CrossEncoderClient
18
+ from .gemini_reranker_client import GeminiRerankerClient
18
19
  from .openai_reranker_client import OpenAIRerankerClient
19
20
 
20
- __all__ = ['CrossEncoderClient', 'OpenAIRerankerClient']
21
+ __all__ = ['CrossEncoderClient', 'GeminiRerankerClient', 'OpenAIRerankerClient']
@@ -0,0 +1,146 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import logging
18
+ import re
19
+
20
+ from google import genai # type: ignore
21
+ from google.genai import types # type: ignore
22
+
23
+ from ..helpers import semaphore_gather
24
+ from ..llm_client import LLMConfig, RateLimitError
25
+ from .client import CrossEncoderClient
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ DEFAULT_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
30
+
31
+
32
+ class GeminiRerankerClient(CrossEncoderClient):
33
+ def __init__(
34
+ self,
35
+ config: LLMConfig | None = None,
36
+ client: genai.Client | None = None,
37
+ ):
38
+ """
39
+ Initialize the GeminiRerankerClient with the provided configuration and client.
40
+
41
+ The Gemini Developer API does not yet support logprobs. Unlike the OpenAI reranker,
42
+ this reranker uses the Gemini API to perform direct relevance scoring of passages.
43
+ Each passage is scored individually on a 0-100 scale.
44
+
45
+ Args:
46
+ config (LLMConfig | None): The configuration for the LLM client, including API key, model, base URL, temperature, and max tokens.
47
+ client (genai.Client | None): An optional async client instance to use. If not provided, a new genai.Client is created.
48
+ """
49
+ if config is None:
50
+ config = LLMConfig()
51
+
52
+ self.config = config
53
+ if client is None:
54
+ self.client = genai.Client(api_key=config.api_key)
55
+ else:
56
+ self.client = client
57
+
58
+ async def rank(self, query: str, passages: list[str]) -> list[tuple[str, float]]:
59
+ """
60
+ Rank passages based on their relevance to the query using direct scoring.
61
+
62
+ Each passage is scored individually on a 0-100 scale, then normalized to [0,1].
63
+ """
64
+ if len(passages) <= 1:
65
+ return [(passage, 1.0) for passage in passages]
66
+
67
+ # Generate scoring prompts for each passage
68
+ scoring_prompts = []
69
+ for passage in passages:
70
+ prompt = f"""Rate how well this passage answers or relates to the query. Use a scale from 0 to 100.
71
+
72
+ Query: {query}
73
+
74
+ Passage: {passage}
75
+
76
+ Provide only a number between 0 and 100 (no explanation, just the number):"""
77
+
78
+ scoring_prompts.append(
79
+ [
80
+ types.Content(
81
+ role='user',
82
+ parts=[types.Part.from_text(text=prompt)],
83
+ ),
84
+ ]
85
+ )
86
+
87
+ try:
88
+ # Execute all scoring requests concurrently - O(n) API calls
89
+ responses = await semaphore_gather(
90
+ *[
91
+ self.client.aio.models.generate_content(
92
+ model=self.config.model or DEFAULT_MODEL,
93
+ contents=prompt_messages, # type: ignore
94
+ config=types.GenerateContentConfig(
95
+ system_instruction='You are an expert at rating passage relevance. Respond with only a number from 0-100.',
96
+ temperature=0.0,
97
+ max_output_tokens=3,
98
+ ),
99
+ )
100
+ for prompt_messages in scoring_prompts
101
+ ]
102
+ )
103
+
104
+ # Extract scores and create results
105
+ results = []
106
+ for passage, response in zip(passages, responses, strict=True):
107
+ try:
108
+ if hasattr(response, 'text') and response.text:
109
+ # Extract numeric score from response
110
+ score_text = response.text.strip()
111
+ # Handle cases where model might return non-numeric text
112
+ score_match = re.search(r'\b(\d{1,3})\b', score_text)
113
+ if score_match:
114
+ score = float(score_match.group(1))
115
+ # Normalize to [0, 1] range and clamp to valid range
116
+ normalized_score = max(0.0, min(1.0, score / 100.0))
117
+ results.append((passage, normalized_score))
118
+ else:
119
+ logger.warning(
120
+ f'Could not extract numeric score from response: {score_text}'
121
+ )
122
+ results.append((passage, 0.0))
123
+ else:
124
+ logger.warning('Empty response from Gemini for passage scoring')
125
+ results.append((passage, 0.0))
126
+ except (ValueError, AttributeError) as e:
127
+ logger.warning(f'Error parsing score from Gemini response: {e}')
128
+ results.append((passage, 0.0))
129
+
130
+ # Sort by score in descending order (highest relevance first)
131
+ results.sort(reverse=True, key=lambda x: x[1])
132
+ return results
133
+
134
+ except Exception as e:
135
+ # Check if it's a rate limit error based on Gemini API error codes
136
+ error_message = str(e).lower()
137
+ if (
138
+ 'rate limit' in error_message
139
+ or 'quota' in error_message
140
+ or 'resource_exhausted' in error_message
141
+ or '429' in str(e)
142
+ ):
143
+ raise RateLimitError from e
144
+
145
+ logger.error(f'Error in generating LLM response: {e}')
146
+ raise
@@ -14,4 +14,7 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- __all__ = ['GraphDriver', 'Neo4jDriver', 'FalkorDriver']
17
+ from falkordb import FalkorDB
18
+ from neo4j import Neo4jDriver
19
+
20
+ __all__ = ['Neo4jDriver', 'FalkorDB']