graphiti-core 0.18.3__tar.gz → 0.18.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (183) hide show
  1. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/PKG-INFO +3 -3
  2. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/README.md +2 -2
  3. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/graphiti.py +12 -1
  4. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/graphiti_types.py +1 -0
  5. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/nodes.py +13 -8
  6. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/dedupe_edges.py +4 -4
  7. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/dedupe_nodes.py +9 -9
  8. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/eval.py +4 -4
  9. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/extract_edges.py +4 -4
  10. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/extract_nodes.py +8 -8
  11. graphiti_core-0.18.5/graphiti_core/prompts/prompt_helpers.py +24 -0
  12. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/summarize_nodes.py +6 -6
  13. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/search_filters.py +1 -1
  14. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/search_helpers.py +8 -7
  15. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/bulk_utils.py +7 -1
  16. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/community_operations.py +33 -12
  17. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/edge_operations.py +5 -0
  18. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/node_operations.py +9 -0
  19. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/temporal_operations.py +11 -2
  20. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/pyproject.toml +1 -1
  21. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/signatures/version1/cla.json +32 -0
  22. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/uv.lock +2 -2
  23. graphiti_core-0.18.3/graphiti_core/prompts/prompt_helpers.py +0 -1
  24. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.env.example +0 -0
  25. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  26. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/dependabot.yml +0 -0
  27. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/pull_request_template.md +0 -0
  28. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/secret_scanning.yml +0 -0
  29. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/cla.yml +0 -0
  30. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/claude-code-review.yml +0 -0
  31. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/claude.yml +0 -0
  32. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/codeql.yml +0 -0
  33. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/lint.yml +0 -0
  34. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/mcp-server-docker.yml +0 -0
  35. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/release-graphiti-core.yml +0 -0
  36. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/typecheck.yml +0 -0
  37. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.github/workflows/unit_tests.yml +0 -0
  38. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/.gitignore +0 -0
  39. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/CLAUDE.md +0 -0
  40. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/CODE_OF_CONDUCT.md +0 -0
  41. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/CONTRIBUTING.md +0 -0
  42. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/Dockerfile +0 -0
  43. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/LICENSE +0 -0
  44. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/Makefile +0 -0
  45. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/SECURITY.md +0 -0
  46. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/Zep-CLA.md +0 -0
  47. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/conftest.py +0 -0
  48. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/depot.json +0 -0
  49. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/docker-compose.test.yml +0 -0
  50. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/docker-compose.yml +0 -0
  51. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/ellipsis.yaml +0 -0
  52. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/data/manybirds_products.json +0 -0
  53. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/ecommerce/runner.ipynb +0 -0
  54. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/ecommerce/runner.py +0 -0
  55. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/langgraph-agent/agent.ipynb +0 -0
  56. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/langgraph-agent/tinybirds-jess.png +0 -0
  57. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/podcast/podcast_runner.py +0 -0
  58. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/podcast/podcast_transcript.txt +0 -0
  59. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/podcast/transcript_parser.py +0 -0
  60. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/quickstart/README.md +0 -0
  61. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/quickstart/quickstart_falkordb.py +0 -0
  62. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/quickstart/quickstart_neo4j.py +0 -0
  63. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/quickstart/requirements.txt +0 -0
  64. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/wizard_of_oz/parser.py +0 -0
  65. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/wizard_of_oz/runner.py +0 -0
  66. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/examples/wizard_of_oz/woo.txt +0 -0
  67. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/__init__.py +0 -0
  68. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/cross_encoder/__init__.py +0 -0
  69. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/cross_encoder/bge_reranker_client.py +0 -0
  70. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/cross_encoder/client.py +0 -0
  71. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/cross_encoder/gemini_reranker_client.py +0 -0
  72. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/cross_encoder/openai_reranker_client.py +0 -0
  73. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/driver/__init__.py +0 -0
  74. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/driver/driver.py +0 -0
  75. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/driver/falkordb_driver.py +0 -0
  76. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/driver/neo4j_driver.py +0 -0
  77. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/edges.py +0 -0
  78. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/embedder/__init__.py +0 -0
  79. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/embedder/azure_openai.py +0 -0
  80. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/embedder/client.py +0 -0
  81. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/embedder/gemini.py +0 -0
  82. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/embedder/openai.py +0 -0
  83. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/embedder/voyage.py +0 -0
  84. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/errors.py +0 -0
  85. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/graph_queries.py +0 -0
  86. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/helpers.py +0 -0
  87. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/__init__.py +0 -0
  88. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/anthropic_client.py +0 -0
  89. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/azure_openai_client.py +0 -0
  90. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/client.py +0 -0
  91. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/config.py +0 -0
  92. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/errors.py +0 -0
  93. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/gemini_client.py +0 -0
  94. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/groq_client.py +0 -0
  95. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/openai_base_client.py +0 -0
  96. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/openai_client.py +0 -0
  97. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/openai_generic_client.py +0 -0
  98. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/llm_client/utils.py +0 -0
  99. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/models/__init__.py +0 -0
  100. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/models/edges/__init__.py +0 -0
  101. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/models/edges/edge_db_queries.py +0 -0
  102. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/models/nodes/__init__.py +0 -0
  103. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/models/nodes/node_db_queries.py +0 -0
  104. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/__init__.py +0 -0
  105. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/extract_edge_dates.py +0 -0
  106. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/invalidate_edges.py +0 -0
  107. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/lib.py +0 -0
  108. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/prompts/models.py +0 -0
  109. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/py.typed +0 -0
  110. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/__init__.py +0 -0
  111. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/search.py +0 -0
  112. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/search_config.py +0 -0
  113. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/search_config_recipes.py +0 -0
  114. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/search/search_utils.py +0 -0
  115. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/telemetry/__init__.py +0 -0
  116. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/telemetry/telemetry.py +0 -0
  117. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/__init__.py +0 -0
  118. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/datetime_utils.py +0 -0
  119. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/__init__.py +0 -0
  120. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/graph_data_operations.py +0 -0
  121. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/maintenance/utils.py +0 -0
  122. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/graphiti_core/utils/ontology_utils/entity_types_utils.py +0 -0
  123. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/images/arxiv-screenshot.png +0 -0
  124. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/images/graphiti-graph-intro.gif +0 -0
  125. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/images/graphiti-intro-slides-stock-2.gif +0 -0
  126. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/images/simple_graph.svg +0 -0
  127. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/.env.example +0 -0
  128. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/.python-version +0 -0
  129. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/Dockerfile +0 -0
  130. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/README.md +0 -0
  131. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/cursor_rules.md +0 -0
  132. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/docker-compose.yml +0 -0
  133. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/graphiti_mcp_server.py +0 -0
  134. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/mcp_config_sse_example.json +0 -0
  135. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/mcp_config_stdio_example.json +0 -0
  136. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/pyproject.toml +0 -0
  137. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/mcp_server/uv.lock +0 -0
  138. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/poetry.lock +0 -0
  139. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/py.typed +0 -0
  140. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/pytest.ini +0 -0
  141. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/.env.example +0 -0
  142. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/Makefile +0 -0
  143. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/README.md +0 -0
  144. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/__init__.py +0 -0
  145. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/config.py +0 -0
  146. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/dto/__init__.py +0 -0
  147. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/dto/common.py +0 -0
  148. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/dto/ingest.py +0 -0
  149. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/dto/retrieve.py +0 -0
  150. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/main.py +0 -0
  151. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/routers/__init__.py +0 -0
  152. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/routers/ingest.py +0 -0
  153. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/routers/retrieve.py +0 -0
  154. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/graph_service/zep_graphiti.py +0 -0
  155. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/pyproject.toml +0 -0
  156. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/server/uv.lock +0 -0
  157. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/cross_encoder/test_bge_reranker_client.py +0 -0
  158. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/cross_encoder/test_gemini_reranker_client.py +0 -0
  159. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/driver/__init__.py +0 -0
  160. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/driver/test_falkordb_driver.py +0 -0
  161. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/embedder/embedder_fixtures.py +0 -0
  162. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/embedder/test_gemini.py +0 -0
  163. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/embedder/test_openai.py +0 -0
  164. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/embedder/test_voyage.py +0 -0
  165. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/evals/data/longmemeval_data/README.md +0 -0
  166. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/evals/data/longmemeval_data/longmemeval_oracle.json +0 -0
  167. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/evals/eval_cli.py +0 -0
  168. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/evals/eval_e2e_graph_building.py +0 -0
  169. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/evals/pytest.ini +0 -0
  170. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/evals/utils.py +0 -0
  171. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/helpers_test.py +0 -0
  172. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/llm_client/test_anthropic_client.py +0 -0
  173. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/llm_client/test_anthropic_client_int.py +0 -0
  174. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/llm_client/test_client.py +0 -0
  175. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/llm_client/test_errors.py +0 -0
  176. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/llm_client/test_gemini_client.py +0 -0
  177. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/test_edge_int.py +0 -0
  178. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/test_entity_exclusion_int.py +0 -0
  179. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/test_graphiti_int.py +0 -0
  180. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/test_node_int.py +0 -0
  181. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/utils/maintenance/test_edge_operations.py +0 -0
  182. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/utils/maintenance/test_temporal_operations_int.py +0 -0
  183. {graphiti_core-0.18.3 → graphiti_core-0.18.5}/tests/utils/search/search_utils_test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: graphiti-core
3
- Version: 0.18.3
3
+ Version: 0.18.5
4
4
  Summary: A temporal graph building library
5
5
  Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
6
6
  Project-URL: Repository, https://github.com/getzep/graphiti
@@ -369,7 +369,7 @@ graphiti = Graphiti(
369
369
  "neo4j",
370
370
  "password",
371
371
  llm_client=OpenAIClient(
372
- llm_config=azure_llm_config,
372
+ config=azure_llm_config,
373
373
  client=llm_client_azure
374
374
  ),
375
375
  embedder=OpenAIEmbedder(
@@ -379,7 +379,7 @@ graphiti = Graphiti(
379
379
  client=embedding_client_azure
380
380
  ),
381
381
  cross_encoder=OpenAIRerankerClient(
382
- llm_config=LLMConfig(
382
+ config=LLMConfig(
383
383
  model=azure_llm_config.small_model # Use small model for reranking
384
384
  ),
385
385
  client=llm_client_azure
@@ -317,7 +317,7 @@ graphiti = Graphiti(
317
317
  "neo4j",
318
318
  "password",
319
319
  llm_client=OpenAIClient(
320
- llm_config=azure_llm_config,
320
+ config=azure_llm_config,
321
321
  client=llm_client_azure
322
322
  ),
323
323
  embedder=OpenAIEmbedder(
@@ -327,7 +327,7 @@ graphiti = Graphiti(
327
327
  client=embedding_client_azure
328
328
  ),
329
329
  cross_encoder=OpenAIRerankerClient(
330
- llm_config=LLMConfig(
330
+ config=LLMConfig(
331
331
  model=azure_llm_config.small_model # Use small model for reranking
332
332
  ),
333
333
  client=llm_client_azure
@@ -123,6 +123,7 @@ class Graphiti:
123
123
  store_raw_episode_content: bool = True,
124
124
  graph_driver: GraphDriver | None = None,
125
125
  max_coroutines: int | None = None,
126
+ ensure_ascii: bool = False,
126
127
  ):
127
128
  """
128
129
  Initialize a Graphiti instance.
@@ -155,6 +156,10 @@ class Graphiti:
155
156
  max_coroutines : int | None, optional
156
157
  The maximum number of concurrent operations allowed. Overrides SEMAPHORE_LIMIT set in the environment.
157
158
  If not set, the Graphiti default is used.
159
+ ensure_ascii : bool, optional
160
+ Whether to escape non-ASCII characters in JSON serialization for prompts. Defaults to False.
161
+ Set to False to preserve non-ASCII characters (e.g., Korean, Japanese, Chinese) in their
162
+ original form, making them readable in LLM logs and improving model understanding.
158
163
 
159
164
  Returns
160
165
  -------
@@ -184,6 +189,7 @@ class Graphiti:
184
189
 
185
190
  self.store_raw_episode_content = store_raw_episode_content
186
191
  self.max_coroutines = max_coroutines
192
+ self.ensure_ascii = ensure_ascii
187
193
  if llm_client:
188
194
  self.llm_client = llm_client
189
195
  else:
@@ -202,6 +208,7 @@ class Graphiti:
202
208
  llm_client=self.llm_client,
203
209
  embedder=self.embedder,
204
210
  cross_encoder=self.cross_encoder,
211
+ ensure_ascii=self.ensure_ascii,
205
212
  )
206
213
 
207
214
  # Capture telemetry event
@@ -541,7 +548,9 @@ class Graphiti:
541
548
  if update_communities:
542
549
  communities, community_edges = await semaphore_gather(
543
550
  *[
544
- update_community(self.driver, self.llm_client, self.embedder, node)
551
+ update_community(
552
+ self.driver, self.llm_client, self.embedder, node, self.ensure_ascii
553
+ )
545
554
  for node in nodes
546
555
  ],
547
556
  max_coroutines=self.max_coroutines,
@@ -1021,6 +1030,8 @@ class Graphiti:
1021
1030
  entity_edges=[],
1022
1031
  group_id=edge.group_id,
1023
1032
  ),
1033
+ None,
1034
+ self.ensure_ascii,
1024
1035
  )
1025
1036
 
1026
1037
  edges: list[EntityEdge] = [resolved_edge] + invalidated_edges
@@ -27,5 +27,6 @@ class GraphitiClients(BaseModel):
27
27
  llm_client: LLMClient
28
28
  embedder: EmbedderClient
29
29
  cross_encoder: CrossEncoderClient
30
+ ensure_ascii: bool = False
30
31
 
31
32
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -118,7 +118,7 @@ class Node(BaseModel, ABC):
118
118
  return False
119
119
 
120
120
  @classmethod
121
- async def delete_by_group_id(cls, driver: GraphDriver, group_id: str):
121
+ async def delete_by_group_id(cls, driver: GraphDriver, group_id: str, batch_size: int = 100):
122
122
  if driver.provider == GraphProvider.FALKORDB:
123
123
  for label in ['Entity', 'Episodic', 'Community']:
124
124
  await driver.execute_query(
@@ -129,13 +129,18 @@ class Node(BaseModel, ABC):
129
129
  group_id=group_id,
130
130
  )
131
131
  else:
132
- await driver.execute_query(
133
- """
134
- MATCH (n:Entity|Episodic|Community {group_id: $group_id})
135
- DETACH DELETE n
136
- """,
137
- group_id=group_id,
138
- )
132
+ async with driver.session() as session:
133
+ await session.run(
134
+ """
135
+ MATCH (n:Entity|Episodic|Community {group_id: $group_id})
136
+ CALL {
137
+ WITH n
138
+ DETACH DELETE n
139
+ } IN TRANSACTIONS OF $batch_size ROWS
140
+ """,
141
+ group_id=group_id,
142
+ batch_size=batch_size,
143
+ )
139
144
 
140
145
  @classmethod
141
146
  async def get_by_uuid(cls, driver: GraphDriver, uuid: str): ...
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  from typing import Any, Protocol, TypedDict
19
18
 
20
19
  from pydantic import BaseModel, Field
21
20
 
22
21
  from .models import Message, PromptFunction, PromptVersion
22
+ from .prompt_helpers import to_prompt_json
23
23
 
24
24
 
25
25
  class EdgeDuplicate(BaseModel):
@@ -67,11 +67,11 @@ def edge(context: dict[str, Any]) -> list[Message]:
67
67
  Given the following context, determine whether the New Edge represents any of the edges in the list of Existing Edges.
68
68
 
69
69
  <EXISTING EDGES>
70
- {json.dumps(context['related_edges'], indent=2)}
70
+ {to_prompt_json(context['related_edges'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
71
71
  </EXISTING EDGES>
72
72
 
73
73
  <NEW EDGE>
74
- {json.dumps(context['extracted_edges'], indent=2)}
74
+ {to_prompt_json(context['extracted_edges'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
75
75
  </NEW EDGE>
76
76
 
77
77
  Task:
@@ -98,7 +98,7 @@ def edge_list(context: dict[str, Any]) -> list[Message]:
98
98
  Given the following context, find all of the duplicates in a list of facts:
99
99
 
100
100
  Facts:
101
- {json.dumps(context['edges'], indent=2)}
101
+ {to_prompt_json(context['edges'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
102
102
 
103
103
  Task:
104
104
  If any facts in Facts is a duplicate of another fact, return a new fact with one of their uuid's.
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  from typing import Any, Protocol, TypedDict
19
18
 
20
19
  from pydantic import BaseModel, Field
21
20
 
22
21
  from .models import Message, PromptFunction, PromptVersion
22
+ from .prompt_helpers import to_prompt_json
23
23
 
24
24
 
25
25
  class NodeDuplicate(BaseModel):
@@ -64,20 +64,20 @@ def node(context: dict[str, Any]) -> list[Message]:
64
64
  role='user',
65
65
  content=f"""
66
66
  <PREVIOUS MESSAGES>
67
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
67
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
68
68
  </PREVIOUS MESSAGES>
69
69
  <CURRENT MESSAGE>
70
70
  {context['episode_content']}
71
71
  </CURRENT MESSAGE>
72
72
  <NEW ENTITY>
73
- {json.dumps(context['extracted_node'], indent=2)}
73
+ {to_prompt_json(context['extracted_node'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
74
74
  </NEW ENTITY>
75
75
  <ENTITY TYPE DESCRIPTION>
76
- {json.dumps(context['entity_type_description'], indent=2)}
76
+ {to_prompt_json(context['entity_type_description'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
77
77
  </ENTITY TYPE DESCRIPTION>
78
78
 
79
79
  <EXISTING ENTITIES>
80
- {json.dumps(context['existing_nodes'], indent=2)}
80
+ {to_prompt_json(context['existing_nodes'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
81
81
  </EXISTING ENTITIES>
82
82
 
83
83
  Given the above EXISTING ENTITIES and their attributes, MESSAGE, and PREVIOUS MESSAGES; Determine if the NEW ENTITY extracted from the conversation
@@ -114,7 +114,7 @@ def nodes(context: dict[str, Any]) -> list[Message]:
114
114
  role='user',
115
115
  content=f"""
116
116
  <PREVIOUS MESSAGES>
117
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
117
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
118
118
  </PREVIOUS MESSAGES>
119
119
  <CURRENT MESSAGE>
120
120
  {context['episode_content']}
@@ -139,11 +139,11 @@ def nodes(context: dict[str, Any]) -> list[Message]:
139
139
  }}
140
140
 
141
141
  <ENTITIES>
142
- {json.dumps(context['extracted_nodes'], indent=2)}
142
+ {to_prompt_json(context['extracted_nodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
143
143
  </ENTITIES>
144
144
 
145
145
  <EXISTING ENTITIES>
146
- {json.dumps(context['existing_nodes'], indent=2)}
146
+ {to_prompt_json(context['existing_nodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
147
147
  </EXISTING ENTITIES>
148
148
 
149
149
  For each of the above ENTITIES, determine if the entity is a duplicate of any of the EXISTING ENTITIES.
@@ -180,7 +180,7 @@ def node_list(context: dict[str, Any]) -> list[Message]:
180
180
  Given the following context, deduplicate a list of nodes:
181
181
 
182
182
  Nodes:
183
- {json.dumps(context['nodes'], indent=2)}
183
+ {to_prompt_json(context['nodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
184
184
 
185
185
  Task:
186
186
  1. Group nodes together such that all duplicate nodes are in the same list of uuids
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  from typing import Any, Protocol, TypedDict
19
18
 
20
19
  from pydantic import BaseModel, Field
21
20
 
22
21
  from .models import Message, PromptFunction, PromptVersion
22
+ from .prompt_helpers import to_prompt_json
23
23
 
24
24
 
25
25
  class QueryExpansion(BaseModel):
@@ -68,7 +68,7 @@ def query_expansion(context: dict[str, Any]) -> list[Message]:
68
68
  Bob is asking Alice a question, are you able to rephrase the question into a simpler one about Alice in the third person
69
69
  that maintains the relevant context?
70
70
  <QUESTION>
71
- {json.dumps(context['query'])}
71
+ {to_prompt_json(context['query'], ensure_ascii=context.get('ensure_ascii', False))}
72
72
  </QUESTION>
73
73
  """
74
74
  return [
@@ -84,10 +84,10 @@ def qa_prompt(context: dict[str, Any]) -> list[Message]:
84
84
  Your task is to briefly answer the question in the way that you think Alice would answer the question.
85
85
  You are given the following entity summaries and facts to help you determine the answer to your question.
86
86
  <ENTITY_SUMMARIES>
87
- {json.dumps(context['entity_summaries'])}
87
+ {to_prompt_json(context['entity_summaries'], ensure_ascii=context.get('ensure_ascii', False))}
88
88
  </ENTITY_SUMMARIES>
89
89
  <FACTS>
90
- {json.dumps(context['facts'])}
90
+ {to_prompt_json(context['facts'], ensure_ascii=context.get('ensure_ascii', False))}
91
91
  </FACTS>
92
92
  <QUESTION>
93
93
  {context['query']}
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  from typing import Any, Protocol, TypedDict
19
18
 
20
19
  from pydantic import BaseModel, Field
21
20
 
22
21
  from .models import Message, PromptFunction, PromptVersion
22
+ from .prompt_helpers import to_prompt_json
23
23
 
24
24
 
25
25
  class Edge(BaseModel):
@@ -73,7 +73,7 @@ def edge(context: dict[str, Any]) -> list[Message]:
73
73
  </FACT TYPES>
74
74
 
75
75
  <PREVIOUS_MESSAGES>
76
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
76
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
77
77
  </PREVIOUS_MESSAGES>
78
78
 
79
79
  <CURRENT_MESSAGE>
@@ -132,7 +132,7 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
132
132
 
133
133
  user_prompt = f"""
134
134
  <PREVIOUS MESSAGES>
135
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
135
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
136
136
  </PREVIOUS MESSAGES>
137
137
  <CURRENT MESSAGE>
138
138
  {context['episode_content']}
@@ -166,7 +166,7 @@ def extract_attributes(context: dict[str, Any]) -> list[Message]:
166
166
  content=f"""
167
167
 
168
168
  <MESSAGE>
169
- {json.dumps(context['episode_content'], indent=2)}
169
+ {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
170
170
  </MESSAGE>
171
171
  <REFERENCE TIME>
172
172
  {context['reference_time']}
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  from typing import Any, Protocol, TypedDict
19
18
 
20
19
  from pydantic import BaseModel, Field
21
20
 
22
21
  from .models import Message, PromptFunction, PromptVersion
22
+ from .prompt_helpers import to_prompt_json
23
23
 
24
24
 
25
25
  class ExtractedEntity(BaseModel):
@@ -89,7 +89,7 @@ def extract_message(context: dict[str, Any]) -> list[Message]:
89
89
  </ENTITY TYPES>
90
90
 
91
91
  <PREVIOUS MESSAGES>
92
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
92
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
93
93
  </PREVIOUS MESSAGES>
94
94
 
95
95
  <CURRENT MESSAGE>
@@ -196,7 +196,7 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
196
196
 
197
197
  user_prompt = f"""
198
198
  <PREVIOUS MESSAGES>
199
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
199
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
200
200
  </PREVIOUS MESSAGES>
201
201
  <CURRENT MESSAGE>
202
202
  {context['episode_content']}
@@ -220,7 +220,7 @@ def classify_nodes(context: dict[str, Any]) -> list[Message]:
220
220
 
221
221
  user_prompt = f"""
222
222
  <PREVIOUS MESSAGES>
223
- {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
223
+ {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
224
224
  </PREVIOUS MESSAGES>
225
225
  <CURRENT MESSAGE>
226
226
  {context['episode_content']}
@@ -258,8 +258,8 @@ def extract_attributes(context: dict[str, Any]) -> list[Message]:
258
258
  content=f"""
259
259
 
260
260
  <MESSAGES>
261
- {json.dumps(context['previous_episodes'], indent=2)}
262
- {json.dumps(context['episode_content'], indent=2)}
261
+ {to_prompt_json(context['previous_episodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
262
+ {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
263
263
  </MESSAGES>
264
264
 
265
265
  Given the above MESSAGES and the following ENTITY, update any of its attributes based on the information provided
@@ -288,8 +288,8 @@ def extract_summary(context: dict[str, Any]) -> list[Message]:
288
288
  content=f"""
289
289
 
290
290
  <MESSAGES>
291
- {json.dumps(context['previous_episodes'], indent=2)}
292
- {json.dumps(context['episode_content'], indent=2)}
291
+ {to_prompt_json(context['previous_episodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
292
+ {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
293
293
  </MESSAGES>
294
294
 
295
295
  Given the above MESSAGES and the following ENTITY, update the summary that combines relevant information about the entity
@@ -0,0 +1,24 @@
1
+ import json
2
+ from typing import Any
3
+
4
+ DO_NOT_ESCAPE_UNICODE = '\nDo not escape unicode characters.\n'
5
+
6
+
7
+ def to_prompt_json(data: Any, ensure_ascii: bool = True, indent: int = 2) -> str:
8
+ """
9
+ Serialize data to JSON for use in prompts.
10
+
11
+ Args:
12
+ data: The data to serialize
13
+ ensure_ascii: If True, escape non-ASCII characters. If False, preserve them.
14
+ indent: Number of spaces for indentation
15
+
16
+ Returns:
17
+ JSON string representation of the data
18
+
19
+ Notes:
20
+ When ensure_ascii=False, non-ASCII characters (e.g., Korean, Japanese, Chinese)
21
+ are preserved in their original form in the prompt, making them readable
22
+ in LLM logs and improving model understanding.
23
+ """
24
+ return json.dumps(data, ensure_ascii=ensure_ascii, indent=indent)
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  from typing import Any, Protocol, TypedDict
19
18
 
20
19
  from pydantic import BaseModel, Field
21
20
 
22
21
  from .models import Message, PromptFunction, PromptVersion
22
+ from .prompt_helpers import to_prompt_json
23
23
 
24
24
 
25
25
  class Summary(BaseModel):
@@ -59,7 +59,7 @@ def summarize_pair(context: dict[str, Any]) -> list[Message]:
59
59
  Summaries must be under 250 words.
60
60
 
61
61
  Summaries:
62
- {json.dumps(context['node_summaries'], indent=2)}
62
+ {to_prompt_json(context['node_summaries'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
63
63
  """,
64
64
  ),
65
65
  ]
@@ -76,8 +76,8 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
76
76
  content=f"""
77
77
 
78
78
  <MESSAGES>
79
- {json.dumps(context['previous_episodes'], indent=2)}
80
- {json.dumps(context['episode_content'], indent=2)}
79
+ {to_prompt_json(context['previous_episodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
80
+ {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
81
81
  </MESSAGES>
82
82
 
83
83
  Given the above MESSAGES and the following ENTITY name, create a summary for the ENTITY. Your summary must only use
@@ -100,7 +100,7 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
100
100
  </ENTITY CONTEXT>
101
101
 
102
102
  <ATTRIBUTES>
103
- {json.dumps(context['attributes'], indent=2)}
103
+ {to_prompt_json(context['attributes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
104
104
  </ATTRIBUTES>
105
105
  """,
106
106
  ),
@@ -120,7 +120,7 @@ def summary_description(context: dict[str, Any]) -> list[Message]:
120
120
  Summaries must be under 250 words.
121
121
 
122
122
  Summary:
123
- {json.dumps(context['summary'], indent=2)}
123
+ {to_prompt_json(context['summary'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
124
124
  """,
125
125
  ),
126
126
  ]
@@ -31,7 +31,7 @@ class ComparisonOperator(Enum):
31
31
 
32
32
 
33
33
  class DateFilter(BaseModel):
34
- date: datetime = Field(description='A datetime to filter on')
34
+ date: datetime | None = Field(description='A datetime to filter on')
35
35
  comparison_operator: ComparisonOperator = Field(
36
36
  description='Comparison operator for date filter'
37
37
  )
@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
-
19
17
  from graphiti_core.edges import EntityEdge
18
+ from graphiti_core.prompts.prompt_helpers import to_prompt_json
20
19
  from graphiti_core.search.search_config import SearchResults
21
20
 
22
21
 
@@ -25,7 +24,9 @@ def format_edge_date_range(edge: EntityEdge) -> str:
25
24
  return f'{edge.valid_at if edge.valid_at else "date unknown"} - {(edge.invalid_at if edge.invalid_at else "present")}'
26
25
 
27
26
 
28
- def search_results_to_context_string(search_results: SearchResults) -> str:
27
+ def search_results_to_context_string(
28
+ search_results: SearchResults, ensure_ascii: bool = False
29
+ ) -> str:
29
30
  """Reformats a set of SearchResults into a single string to pass directly to an LLM as context"""
30
31
  fact_json = [
31
32
  {
@@ -57,16 +58,16 @@ def search_results_to_context_string(search_results: SearchResults) -> str:
57
58
  These are the most relevant facts and their valid and invalid dates. Facts are considered valid
58
59
  between their valid_at and invalid_at dates. Facts with an invalid_at date of "Present" are considered valid.
59
60
  <FACTS>
60
- {json.dumps(fact_json, indent=12)}
61
+ {to_prompt_json(fact_json, ensure_ascii=ensure_ascii, indent=12)}
61
62
  </FACTS>
62
63
  <ENTITIES>
63
- {json.dumps(entity_json, indent=12)}
64
+ {to_prompt_json(entity_json, ensure_ascii=ensure_ascii, indent=12)}
64
65
  </ENTITIES>
65
66
  <EPISODES>
66
- {json.dumps(episode_json, indent=12)}
67
+ {to_prompt_json(episode_json, ensure_ascii=ensure_ascii, indent=12)}
67
68
  </EPISODES>
68
69
  <COMMUNITIES>
69
- {json.dumps(community_json, indent=12)}
70
+ {to_prompt_json(community_json, ensure_ascii=ensure_ascii, indent=12)}
70
71
  </COMMUNITIES>
71
72
  """
72
73
 
@@ -343,7 +343,13 @@ async def dedupe_edges_bulk(
343
343
  ] = await semaphore_gather(
344
344
  *[
345
345
  resolve_extracted_edge(
346
- clients.llm_client, edge, candidates, candidates, episode, edge_types
346
+ clients.llm_client,
347
+ edge,
348
+ candidates,
349
+ candidates,
350
+ episode,
351
+ edge_types,
352
+ clients.ensure_ascii,
347
353
  )
348
354
  for episode, edge, candidates in dedupe_tuples
349
355
  ]
@@ -122,9 +122,14 @@ def label_propagation(projection: dict[str, list[Neighbor]]) -> list[list[str]]:
122
122
  return clusters
123
123
 
124
124
 
125
- async def summarize_pair(llm_client: LLMClient, summary_pair: tuple[str, str]) -> str:
125
+ async def summarize_pair(
126
+ llm_client: LLMClient, summary_pair: tuple[str, str], ensure_ascii: bool = True
127
+ ) -> str:
126
128
  # Prepare context for LLM
127
- context = {'node_summaries': [{'summary': summary} for summary in summary_pair]}
129
+ context = {
130
+ 'node_summaries': [{'summary': summary} for summary in summary_pair],
131
+ 'ensure_ascii': ensure_ascii,
132
+ }
128
133
 
129
134
  llm_response = await llm_client.generate_response(
130
135
  prompt_library.summarize_nodes.summarize_pair(context), response_model=Summary
@@ -135,8 +140,13 @@ async def summarize_pair(llm_client: LLMClient, summary_pair: tuple[str, str]) -
135
140
  return pair_summary
136
141
 
137
142
 
138
- async def generate_summary_description(llm_client: LLMClient, summary: str) -> str:
139
- context = {'summary': summary}
143
+ async def generate_summary_description(
144
+ llm_client: LLMClient, summary: str, ensure_ascii: bool = True
145
+ ) -> str:
146
+ context = {
147
+ 'summary': summary,
148
+ 'ensure_ascii': ensure_ascii,
149
+ }
140
150
 
141
151
  llm_response = await llm_client.generate_response(
142
152
  prompt_library.summarize_nodes.summary_description(context),
@@ -149,7 +159,7 @@ async def generate_summary_description(llm_client: LLMClient, summary: str) -> s
149
159
 
150
160
 
151
161
  async def build_community(
152
- llm_client: LLMClient, community_cluster: list[EntityNode]
162
+ llm_client: LLMClient, community_cluster: list[EntityNode], ensure_ascii: bool = True
153
163
  ) -> tuple[CommunityNode, list[CommunityEdge]]:
154
164
  summaries = [entity.summary for entity in community_cluster]
155
165
  length = len(summaries)
@@ -161,7 +171,9 @@ async def build_community(
161
171
  new_summaries: list[str] = list(
162
172
  await semaphore_gather(
163
173
  *[
164
- summarize_pair(llm_client, (str(left_summary), str(right_summary)))
174
+ summarize_pair(
175
+ llm_client, (str(left_summary), str(right_summary)), ensure_ascii
176
+ )
165
177
  for left_summary, right_summary in zip(
166
178
  summaries[: int(length / 2)], summaries[int(length / 2) :], strict=False
167
179
  )
@@ -174,7 +186,7 @@ async def build_community(
174
186
  length = len(summaries)
175
187
 
176
188
  summary = summaries[0]
177
- name = await generate_summary_description(llm_client, summary)
189
+ name = await generate_summary_description(llm_client, summary, ensure_ascii)
178
190
  now = utc_now()
179
191
  community_node = CommunityNode(
180
192
  name=name,
@@ -191,7 +203,10 @@ async def build_community(
191
203
 
192
204
 
193
205
  async def build_communities(
194
- driver: GraphDriver, llm_client: LLMClient, group_ids: list[str] | None
206
+ driver: GraphDriver,
207
+ llm_client: LLMClient,
208
+ group_ids: list[str] | None,
209
+ ensure_ascii: bool = True,
195
210
  ) -> tuple[list[CommunityNode], list[CommunityEdge]]:
196
211
  community_clusters = await get_community_clusters(driver, group_ids)
197
212
 
@@ -199,7 +214,7 @@ async def build_communities(
199
214
 
200
215
  async def limited_build_community(cluster):
201
216
  async with semaphore:
202
- return await build_community(llm_client, cluster)
217
+ return await build_community(llm_client, cluster, ensure_ascii)
203
218
 
204
219
  communities: list[tuple[CommunityNode, list[CommunityEdge]]] = list(
205
220
  await semaphore_gather(
@@ -285,15 +300,21 @@ async def determine_entity_community(
285
300
 
286
301
 
287
302
  async def update_community(
288
- driver: GraphDriver, llm_client: LLMClient, embedder: EmbedderClient, entity: EntityNode
303
+ driver: GraphDriver,
304
+ llm_client: LLMClient,
305
+ embedder: EmbedderClient,
306
+ entity: EntityNode,
307
+ ensure_ascii: bool = True,
289
308
  ) -> tuple[list[CommunityNode], list[CommunityEdge]]:
290
309
  community, is_new = await determine_entity_community(driver, entity)
291
310
 
292
311
  if community is None:
293
312
  return [], []
294
313
 
295
- new_summary = await summarize_pair(llm_client, (entity.summary, community.summary))
296
- new_name = await generate_summary_description(llm_client, new_summary)
314
+ new_summary = await summarize_pair(
315
+ llm_client, (entity.summary, community.summary), ensure_ascii
316
+ )
317
+ new_name = await generate_summary_description(llm_client, new_summary, ensure_ascii)
297
318
 
298
319
  community.summary = new_summary
299
320
  community.name = new_name