graphiti-core 0.17.0__tar.gz → 0.17.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of graphiti-core might be problematic. Click here for more details.
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/PKG-INFO +35 -4
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/README.md +34 -3
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/podcast/podcast_runner.py +1 -1
- graphiti_core-0.17.2/graphiti_core/embedder/gemini.py +177 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/client.py +15 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/gemini_client.py +122 -25
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/search_filters.py +1 -1
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/docker-compose.yml +1 -1
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/pyproject.toml +1 -1
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/signatures/version1/cla.json +8 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/embedder/test_gemini.py +24 -10
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/llm_client/test_gemini_client.py +114 -21
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/uv.lock +1 -1
- graphiti_core-0.17.0/graphiti_core/embedder/gemini.py +0 -113
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.env.example +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/dependabot.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/pull_request_template.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/secret_scanning.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/cla.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/claude-code-review.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/claude.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/codeql.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/lint.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/mcp-server-docker.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/release-graphiti-core.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/typecheck.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.github/workflows/unit_tests.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/.gitignore +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/CLAUDE.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/CODE_OF_CONDUCT.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/CONTRIBUTING.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/Dockerfile +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/LICENSE +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/Makefile +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/SECURITY.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/Zep-CLA.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/conftest.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/depot.json +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/docker-compose.test.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/docker-compose.yml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/ellipsis.yaml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/data/manybirds_products.json +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/ecommerce/runner.ipynb +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/ecommerce/runner.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/langgraph-agent/agent.ipynb +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/langgraph-agent/tinybirds-jess.png +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/podcast/podcast_transcript.txt +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/podcast/transcript_parser.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/quickstart/README.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/quickstart/quickstart_falkordb.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/quickstart/quickstart_neo4j.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/quickstart/requirements.txt +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/wizard_of_oz/parser.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/wizard_of_oz/runner.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/examples/wizard_of_oz/woo.txt +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/cross_encoder/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/cross_encoder/bge_reranker_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/cross_encoder/client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/cross_encoder/gemini_reranker_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/cross_encoder/openai_reranker_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/driver/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/driver/driver.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/driver/falkordb_driver.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/driver/neo4j_driver.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/edges.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/embedder/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/embedder/azure_openai.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/embedder/client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/embedder/openai.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/embedder/voyage.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/errors.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/graph_queries.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/graphiti.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/graphiti_types.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/helpers.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/anthropic_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/azure_openai_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/config.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/errors.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/groq_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/openai_base_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/openai_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/openai_generic_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/llm_client/utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/models/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/models/edges/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/models/edges/edge_db_queries.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/models/nodes/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/models/nodes/node_db_queries.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/nodes.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/dedupe_edges.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/dedupe_nodes.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/eval.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/extract_edge_dates.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/extract_edges.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/extract_nodes.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/invalidate_edges.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/lib.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/models.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/prompt_helpers.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/prompts/summarize_nodes.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/py.typed +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/search.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/search_config.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/search_config_recipes.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/search_helpers.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/search/search_utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/telemetry/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/telemetry/telemetry.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/bulk_utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/datetime_utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/community_operations.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/edge_operations.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/graph_data_operations.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/node_operations.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/temporal_operations.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/maintenance/utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/graphiti_core/utils/ontology_utils/entity_types_utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/images/arxiv-screenshot.png +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/images/graphiti-graph-intro.gif +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/images/graphiti-intro-slides-stock-2.gif +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/images/simple_graph.svg +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/.env.example +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/.python-version +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/Dockerfile +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/README.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/cursor_rules.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/graphiti_mcp_server.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/mcp_config_sse_example.json +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/mcp_config_stdio_example.json +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/pyproject.toml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/mcp_server/uv.lock +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/poetry.lock +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/py.typed +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/pytest.ini +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/.env.example +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/Makefile +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/README.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/config.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/dto/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/dto/common.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/dto/ingest.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/dto/retrieve.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/main.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/routers/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/routers/ingest.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/routers/retrieve.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/graph_service/zep_graphiti.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/pyproject.toml +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/server/uv.lock +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/cross_encoder/test_bge_reranker_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/cross_encoder/test_gemini_reranker_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/driver/__init__.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/driver/test_falkordb_driver.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/embedder/embedder_fixtures.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/embedder/test_openai.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/embedder/test_voyage.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/evals/data/longmemeval_data/README.md +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/evals/data/longmemeval_data/longmemeval_oracle.json +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/evals/eval_cli.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/evals/eval_e2e_graph_building.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/evals/pytest.ini +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/evals/utils.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/helpers_test.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/llm_client/test_anthropic_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/llm_client/test_anthropic_client_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/llm_client/test_client.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/llm_client/test_errors.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/test_entity_exclusion_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/test_graphiti_falkordb_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/test_graphiti_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/test_node_falkordb_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/test_node_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/utils/maintenance/test_edge_operations.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/utils/maintenance/test_temporal_operations_int.py +0 -0
- {graphiti_core-0.17.0 → graphiti_core-0.17.2}/tests/utils/search/search_utils_test.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: graphiti-core
|
|
3
|
-
Version: 0.17.
|
|
3
|
+
Version: 0.17.2
|
|
4
4
|
Summary: A temporal graph building library
|
|
5
5
|
Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
|
|
6
6
|
Project-URL: Repository, https://github.com/getzep/graphiti
|
|
@@ -272,15 +272,46 @@ Database names are configured directly in the driver constructors:
|
|
|
272
272
|
- **Neo4j**: Database name defaults to `neo4j` (hardcoded in Neo4jDriver)
|
|
273
273
|
- **FalkorDB**: Database name defaults to `default_db` (hardcoded in FalkorDriver)
|
|
274
274
|
|
|
275
|
-
|
|
275
|
+
As of v0.17.0, if you need to customize your database configuration, you can instantiate a database driver and pass it to the Graphiti constructor using the `graph_driver` parameter.
|
|
276
|
+
|
|
277
|
+
#### Neo4j with Custom Database Name
|
|
276
278
|
|
|
277
279
|
```python
|
|
280
|
+
from graphiti_core import Graphiti
|
|
278
281
|
from graphiti_core.driver.neo4j_driver import Neo4jDriver
|
|
279
282
|
|
|
280
|
-
#
|
|
281
|
-
driver = Neo4jDriver(
|
|
283
|
+
# Create a Neo4j driver with custom database name
|
|
284
|
+
driver = Neo4jDriver(
|
|
285
|
+
uri="bolt://localhost:7687",
|
|
286
|
+
user="neo4j",
|
|
287
|
+
password="password",
|
|
288
|
+
database="my_custom_database" # Custom database name
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Pass the driver to Graphiti
|
|
292
|
+
graphiti = Graphiti(graph_driver=driver)
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
#### FalkorDB with Custom Database Name
|
|
296
|
+
|
|
297
|
+
```python
|
|
298
|
+
from graphiti_core import Graphiti
|
|
299
|
+
from graphiti_core.driver.falkordb_driver import FalkorDriver
|
|
300
|
+
|
|
301
|
+
# Create a FalkorDB driver with custom database name
|
|
302
|
+
driver = FalkorDriver(
|
|
303
|
+
host="localhost",
|
|
304
|
+
port=6379,
|
|
305
|
+
username="falkor_user", # Optional
|
|
306
|
+
password="falkor_password", # Optional
|
|
307
|
+
database="my_custom_graph" # Custom database name
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
# Pass the driver to Graphiti
|
|
311
|
+
graphiti = Graphiti(graph_driver=driver)
|
|
282
312
|
```
|
|
283
313
|
|
|
314
|
+
|
|
284
315
|
### Performance Configuration
|
|
285
316
|
|
|
286
317
|
`USE_PARALLEL_RUNTIME` is an optional boolean variable that can be set to true if you wish
|
|
@@ -220,15 +220,46 @@ Database names are configured directly in the driver constructors:
|
|
|
220
220
|
- **Neo4j**: Database name defaults to `neo4j` (hardcoded in Neo4jDriver)
|
|
221
221
|
- **FalkorDB**: Database name defaults to `default_db` (hardcoded in FalkorDriver)
|
|
222
222
|
|
|
223
|
-
|
|
223
|
+
As of v0.17.0, if you need to customize your database configuration, you can instantiate a database driver and pass it to the Graphiti constructor using the `graph_driver` parameter.
|
|
224
|
+
|
|
225
|
+
#### Neo4j with Custom Database Name
|
|
224
226
|
|
|
225
227
|
```python
|
|
228
|
+
from graphiti_core import Graphiti
|
|
226
229
|
from graphiti_core.driver.neo4j_driver import Neo4jDriver
|
|
227
230
|
|
|
228
|
-
#
|
|
229
|
-
driver = Neo4jDriver(
|
|
231
|
+
# Create a Neo4j driver with custom database name
|
|
232
|
+
driver = Neo4jDriver(
|
|
233
|
+
uri="bolt://localhost:7687",
|
|
234
|
+
user="neo4j",
|
|
235
|
+
password="password",
|
|
236
|
+
database="my_custom_database" # Custom database name
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Pass the driver to Graphiti
|
|
240
|
+
graphiti = Graphiti(graph_driver=driver)
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
#### FalkorDB with Custom Database Name
|
|
244
|
+
|
|
245
|
+
```python
|
|
246
|
+
from graphiti_core import Graphiti
|
|
247
|
+
from graphiti_core.driver.falkordb_driver import FalkorDriver
|
|
248
|
+
|
|
249
|
+
# Create a FalkorDB driver with custom database name
|
|
250
|
+
driver = FalkorDriver(
|
|
251
|
+
host="localhost",
|
|
252
|
+
port=6379,
|
|
253
|
+
username="falkor_user", # Optional
|
|
254
|
+
password="falkor_password", # Optional
|
|
255
|
+
database="my_custom_graph" # Custom database name
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Pass the driver to Graphiti
|
|
259
|
+
graphiti = Graphiti(graph_driver=driver)
|
|
230
260
|
```
|
|
231
261
|
|
|
262
|
+
|
|
232
263
|
### Performance Configuration
|
|
233
264
|
|
|
234
265
|
`USE_PARALLEL_RUNTIME` is an optional boolean variable that can be set to true if you wish
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Copyright 2024, Zep Software, Inc.
|
|
3
|
+
|
|
4
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
you may not use this file except in compliance with the License.
|
|
6
|
+
You may obtain a copy of the License at
|
|
7
|
+
|
|
8
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
|
|
10
|
+
Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
See the License for the specific language governing permissions and
|
|
14
|
+
limitations under the License.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import logging
|
|
18
|
+
from collections.abc import Iterable
|
|
19
|
+
from typing import TYPE_CHECKING
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from google import genai
|
|
23
|
+
from google.genai import types
|
|
24
|
+
else:
|
|
25
|
+
try:
|
|
26
|
+
from google import genai
|
|
27
|
+
from google.genai import types
|
|
28
|
+
except ImportError:
|
|
29
|
+
raise ImportError(
|
|
30
|
+
'google-genai is required for GeminiEmbedder. '
|
|
31
|
+
'Install it with: pip install graphiti-core[google-genai]'
|
|
32
|
+
) from None
|
|
33
|
+
|
|
34
|
+
from pydantic import Field
|
|
35
|
+
|
|
36
|
+
from .client import EmbedderClient, EmbedderConfig
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger(__name__)
|
|
39
|
+
|
|
40
|
+
DEFAULT_EMBEDDING_MODEL = 'text-embedding-001' # gemini-embedding-001 or text-embedding-005
|
|
41
|
+
|
|
42
|
+
DEFAULT_BATCH_SIZE = 100
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class GeminiEmbedderConfig(EmbedderConfig):
|
|
46
|
+
embedding_model: str = Field(default=DEFAULT_EMBEDDING_MODEL)
|
|
47
|
+
api_key: str | None = None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class GeminiEmbedder(EmbedderClient):
|
|
51
|
+
"""
|
|
52
|
+
Google Gemini Embedder Client
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(
|
|
56
|
+
self,
|
|
57
|
+
config: GeminiEmbedderConfig | None = None,
|
|
58
|
+
client: 'genai.Client | None' = None,
|
|
59
|
+
batch_size: int | None = None,
|
|
60
|
+
):
|
|
61
|
+
"""
|
|
62
|
+
Initialize the GeminiEmbedder with the provided configuration and client.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
config (GeminiEmbedderConfig | None): The configuration for the GeminiEmbedder, including API key, model, base URL, temperature, and max tokens.
|
|
66
|
+
client (genai.Client | None): An optional async client instance to use. If not provided, a new genai.Client is created.
|
|
67
|
+
batch_size (int | None): An optional batch size to use. If not provided, the default batch size will be used.
|
|
68
|
+
"""
|
|
69
|
+
if config is None:
|
|
70
|
+
config = GeminiEmbedderConfig()
|
|
71
|
+
|
|
72
|
+
self.config = config
|
|
73
|
+
|
|
74
|
+
if client is None:
|
|
75
|
+
self.client = genai.Client(api_key=config.api_key)
|
|
76
|
+
else:
|
|
77
|
+
self.client = client
|
|
78
|
+
|
|
79
|
+
if batch_size is None and self.config.embedding_model == 'gemini-embedding-001':
|
|
80
|
+
# Gemini API has a limit on the number of instances per request
|
|
81
|
+
#https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api
|
|
82
|
+
self.batch_size = 1
|
|
83
|
+
elif batch_size is None:
|
|
84
|
+
self.batch_size = DEFAULT_BATCH_SIZE
|
|
85
|
+
else:
|
|
86
|
+
self.batch_size = batch_size
|
|
87
|
+
|
|
88
|
+
async def create(
|
|
89
|
+
self, input_data: str | list[str] | Iterable[int] | Iterable[Iterable[int]]
|
|
90
|
+
) -> list[float]:
|
|
91
|
+
"""
|
|
92
|
+
Create embeddings for the given input data using Google's Gemini embedding model.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
input_data: The input data to create embeddings for. Can be a string, list of strings,
|
|
96
|
+
or an iterable of integers or iterables of integers.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
A list of floats representing the embedding vector.
|
|
100
|
+
"""
|
|
101
|
+
# Generate embeddings
|
|
102
|
+
result = await self.client.aio.models.embed_content(
|
|
103
|
+
model=self.config.embedding_model or DEFAULT_EMBEDDING_MODEL,
|
|
104
|
+
contents=[input_data], # type: ignore[arg-type] # mypy fails on broad union type
|
|
105
|
+
config=types.EmbedContentConfig(output_dimensionality=self.config.embedding_dim),
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
if not result.embeddings or len(result.embeddings) == 0 or not result.embeddings[0].values:
|
|
109
|
+
raise ValueError('No embeddings returned from Gemini API in create()')
|
|
110
|
+
|
|
111
|
+
return result.embeddings[0].values
|
|
112
|
+
|
|
113
|
+
async def create_batch(self, input_data_list: list[str]) -> list[list[float]]:
|
|
114
|
+
"""
|
|
115
|
+
Create embeddings for a batch of input data using Google's Gemini embedding model.
|
|
116
|
+
|
|
117
|
+
This method handles batching to respect the Gemini API's limits on the number
|
|
118
|
+
of instances that can be processed in a single request.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
input_data_list: A list of strings to create embeddings for.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
A list of embedding vectors (each vector is a list of floats).
|
|
125
|
+
"""
|
|
126
|
+
if not input_data_list:
|
|
127
|
+
return []
|
|
128
|
+
|
|
129
|
+
batch_size = self.batch_size
|
|
130
|
+
all_embeddings = []
|
|
131
|
+
|
|
132
|
+
# Process inputs in batches
|
|
133
|
+
for i in range(0, len(input_data_list), batch_size):
|
|
134
|
+
batch = input_data_list[i:i + batch_size]
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
# Generate embeddings for this batch
|
|
138
|
+
result = await self.client.aio.models.embed_content(
|
|
139
|
+
model=self.config.embedding_model or DEFAULT_EMBEDDING_MODEL,
|
|
140
|
+
contents=batch, # type: ignore[arg-type] # mypy fails on broad union type
|
|
141
|
+
config=types.EmbedContentConfig(output_dimensionality=self.config.embedding_dim),
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
if not result.embeddings or len(result.embeddings) == 0:
|
|
145
|
+
raise Exception('No embeddings returned')
|
|
146
|
+
|
|
147
|
+
# Process embeddings from this batch
|
|
148
|
+
for embedding in result.embeddings:
|
|
149
|
+
if not embedding.values:
|
|
150
|
+
raise ValueError('Empty embedding values returned')
|
|
151
|
+
all_embeddings.append(embedding.values)
|
|
152
|
+
|
|
153
|
+
except Exception as e:
|
|
154
|
+
# If batch processing fails, fall back to individual processing
|
|
155
|
+
logger.warning(f"Batch embedding failed for batch {i//batch_size + 1}, falling back to individual processing: {e}")
|
|
156
|
+
|
|
157
|
+
for item in batch:
|
|
158
|
+
try:
|
|
159
|
+
# Process each item individually
|
|
160
|
+
result = await self.client.aio.models.embed_content(
|
|
161
|
+
model=self.config.embedding_model or DEFAULT_EMBEDDING_MODEL,
|
|
162
|
+
contents=[item], # type: ignore[arg-type] # mypy fails on broad union type
|
|
163
|
+
config=types.EmbedContentConfig(output_dimensionality=self.config.embedding_dim),
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
if not result.embeddings or len(result.embeddings) == 0:
|
|
167
|
+
raise ValueError('No embeddings returned from Gemini API')
|
|
168
|
+
if not result.embeddings[0].values:
|
|
169
|
+
raise ValueError('Empty embedding values returned')
|
|
170
|
+
|
|
171
|
+
all_embeddings.append(result.embeddings[0].values)
|
|
172
|
+
|
|
173
|
+
except Exception as individual_error:
|
|
174
|
+
logger.error(f"Failed to embed individual item: {individual_error}")
|
|
175
|
+
raise individual_error
|
|
176
|
+
|
|
177
|
+
return all_embeddings
|
|
@@ -167,3 +167,18 @@ class LLMClient(ABC):
|
|
|
167
167
|
self.cache_dir.set(cache_key, response)
|
|
168
168
|
|
|
169
169
|
return response
|
|
170
|
+
|
|
171
|
+
def _get_failed_generation_log(self, messages: list[Message], output: str | None) -> str:
|
|
172
|
+
"""
|
|
173
|
+
Log the full input messages, the raw output (if any), and the exception for debugging failed generations.
|
|
174
|
+
"""
|
|
175
|
+
log = ""
|
|
176
|
+
log += f"Input messages: {json.dumps([m.model_dump() for m in messages], indent=2)}\n"
|
|
177
|
+
if output is not None:
|
|
178
|
+
if len(output) > 4000:
|
|
179
|
+
log += f"Raw output: {output[:2000]}... (truncated) ...{output[-2000:]}\n"
|
|
180
|
+
else:
|
|
181
|
+
log += f"Raw output: {output}\n"
|
|
182
|
+
else:
|
|
183
|
+
log += "No raw output available"
|
|
184
|
+
return log
|
|
@@ -16,6 +16,7 @@ limitations under the License.
|
|
|
16
16
|
|
|
17
17
|
import json
|
|
18
18
|
import logging
|
|
19
|
+
import re
|
|
19
20
|
import typing
|
|
20
21
|
from typing import TYPE_CHECKING, ClassVar
|
|
21
22
|
|
|
@@ -23,7 +24,7 @@ from pydantic import BaseModel
|
|
|
23
24
|
|
|
24
25
|
from ..prompts.models import Message
|
|
25
26
|
from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient
|
|
26
|
-
from .config import
|
|
27
|
+
from .config import LLMConfig, ModelSize
|
|
27
28
|
from .errors import RateLimitError
|
|
28
29
|
|
|
29
30
|
if TYPE_CHECKING:
|
|
@@ -44,7 +45,26 @@ else:
|
|
|
44
45
|
logger = logging.getLogger(__name__)
|
|
45
46
|
|
|
46
47
|
DEFAULT_MODEL = 'gemini-2.5-flash'
|
|
47
|
-
DEFAULT_SMALL_MODEL = '
|
|
48
|
+
DEFAULT_SMALL_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
|
|
49
|
+
|
|
50
|
+
# Maximum output tokens for different Gemini models
|
|
51
|
+
GEMINI_MODEL_MAX_TOKENS = {
|
|
52
|
+
# Gemini 2.5 models
|
|
53
|
+
'gemini-2.5-pro': 65536,
|
|
54
|
+
'gemini-2.5-flash': 65536,
|
|
55
|
+
'gemini-2.5-flash-lite': 64000,
|
|
56
|
+
'models/gemini-2.5-flash-lite-preview-06-17': 64000,
|
|
57
|
+
# Gemini 2.0 models
|
|
58
|
+
'gemini-2.0-flash': 8192,
|
|
59
|
+
'gemini-2.0-flash-lite': 8192,
|
|
60
|
+
# Gemini 1.5 models
|
|
61
|
+
'gemini-1.5-pro': 8192,
|
|
62
|
+
'gemini-1.5-flash': 8192,
|
|
63
|
+
'gemini-1.5-flash-8b': 8192,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
# Default max tokens for models not in the mapping
|
|
67
|
+
DEFAULT_GEMINI_MAX_TOKENS = 8192
|
|
48
68
|
|
|
49
69
|
|
|
50
70
|
class GeminiClient(LLMClient):
|
|
@@ -74,7 +94,7 @@ class GeminiClient(LLMClient):
|
|
|
74
94
|
self,
|
|
75
95
|
config: LLMConfig | None = None,
|
|
76
96
|
cache: bool = False,
|
|
77
|
-
max_tokens: int =
|
|
97
|
+
max_tokens: int | None = None,
|
|
78
98
|
thinking_config: types.ThinkingConfig | None = None,
|
|
79
99
|
client: 'genai.Client | None' = None,
|
|
80
100
|
):
|
|
@@ -146,11 +166,76 @@ class GeminiClient(LLMClient):
|
|
|
146
166
|
else:
|
|
147
167
|
return self.model or DEFAULT_MODEL
|
|
148
168
|
|
|
169
|
+
def _get_max_tokens_for_model(self, model: str) -> int:
|
|
170
|
+
"""Get the maximum output tokens for a specific Gemini model."""
|
|
171
|
+
return GEMINI_MODEL_MAX_TOKENS.get(model, DEFAULT_GEMINI_MAX_TOKENS)
|
|
172
|
+
|
|
173
|
+
def _resolve_max_tokens(self, requested_max_tokens: int | None, model: str) -> int:
|
|
174
|
+
"""
|
|
175
|
+
Resolve the maximum output tokens to use based on precedence rules.
|
|
176
|
+
|
|
177
|
+
Precedence order (highest to lowest):
|
|
178
|
+
1. Explicit max_tokens parameter passed to generate_response()
|
|
179
|
+
2. Instance max_tokens set during client initialization
|
|
180
|
+
3. Model-specific maximum tokens from GEMINI_MODEL_MAX_TOKENS mapping
|
|
181
|
+
4. DEFAULT_MAX_TOKENS as final fallback
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
requested_max_tokens: The max_tokens parameter passed to generate_response()
|
|
185
|
+
model: The model name to look up model-specific limits
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
int: The resolved maximum tokens to use
|
|
189
|
+
"""
|
|
190
|
+
# 1. Use explicit parameter if provided
|
|
191
|
+
if requested_max_tokens is not None:
|
|
192
|
+
return requested_max_tokens
|
|
193
|
+
|
|
194
|
+
# 2. Use instance max_tokens if set during initialization
|
|
195
|
+
if self.max_tokens is not None:
|
|
196
|
+
return self.max_tokens
|
|
197
|
+
|
|
198
|
+
# 3. Use model-specific maximum or return DEFAULT_GEMINI_MAX_TOKENS
|
|
199
|
+
return self._get_max_tokens_for_model(model)
|
|
200
|
+
|
|
201
|
+
def salvage_json(self, raw_output: str) -> dict[str, typing.Any] | None:
|
|
202
|
+
"""
|
|
203
|
+
Attempt to salvage a JSON object if the raw output is truncated.
|
|
204
|
+
|
|
205
|
+
This is accomplished by looking for the last closing bracket for an array or object.
|
|
206
|
+
If found, it will try to load the JSON object from the raw output.
|
|
207
|
+
If the JSON object is not valid, it will return None.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
raw_output (str): The raw output from the LLM.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
dict[str, typing.Any]: The salvaged JSON object.
|
|
214
|
+
None: If no salvage is possible.
|
|
215
|
+
"""
|
|
216
|
+
if not raw_output:
|
|
217
|
+
return None
|
|
218
|
+
# Try to salvage a JSON array
|
|
219
|
+
array_match = re.search(r'\]\s*$', raw_output)
|
|
220
|
+
if array_match:
|
|
221
|
+
try:
|
|
222
|
+
return json.loads(raw_output[:array_match.end()])
|
|
223
|
+
except Exception:
|
|
224
|
+
pass
|
|
225
|
+
# Try to salvage a JSON object
|
|
226
|
+
obj_match = re.search(r'\}\s*$', raw_output)
|
|
227
|
+
if obj_match:
|
|
228
|
+
try:
|
|
229
|
+
return json.loads(raw_output[:obj_match.end()])
|
|
230
|
+
except Exception:
|
|
231
|
+
pass
|
|
232
|
+
return None
|
|
233
|
+
|
|
149
234
|
async def _generate_response(
|
|
150
235
|
self,
|
|
151
236
|
messages: list[Message],
|
|
152
237
|
response_model: type[BaseModel] | None = None,
|
|
153
|
-
max_tokens: int =
|
|
238
|
+
max_tokens: int | None = None,
|
|
154
239
|
model_size: ModelSize = ModelSize.medium,
|
|
155
240
|
) -> dict[str, typing.Any]:
|
|
156
241
|
"""
|
|
@@ -159,7 +244,7 @@ class GeminiClient(LLMClient):
|
|
|
159
244
|
Args:
|
|
160
245
|
messages (list[Message]): A list of messages to send to the language model.
|
|
161
246
|
response_model (type[BaseModel] | None): An optional Pydantic model to parse the response into.
|
|
162
|
-
max_tokens (int): The maximum number of tokens to generate in the response.
|
|
247
|
+
max_tokens (int | None): The maximum number of tokens to generate in the response. If None, uses precedence rules.
|
|
163
248
|
model_size (ModelSize): The size of the model to use (small or medium).
|
|
164
249
|
|
|
165
250
|
Returns:
|
|
@@ -199,10 +284,13 @@ class GeminiClient(LLMClient):
|
|
|
199
284
|
# Get the appropriate model for the requested size
|
|
200
285
|
model = self._get_model_for_size(model_size)
|
|
201
286
|
|
|
287
|
+
# Resolve max_tokens using precedence rules (see _resolve_max_tokens for details)
|
|
288
|
+
resolved_max_tokens = self._resolve_max_tokens(max_tokens, model)
|
|
289
|
+
|
|
202
290
|
# Create generation config
|
|
203
291
|
generation_config = types.GenerateContentConfig(
|
|
204
292
|
temperature=self.temperature,
|
|
205
|
-
max_output_tokens=
|
|
293
|
+
max_output_tokens=resolved_max_tokens,
|
|
206
294
|
response_mime_type='application/json' if response_model else None,
|
|
207
295
|
response_schema=response_model if response_model else None,
|
|
208
296
|
system_instruction=system_prompt,
|
|
@@ -216,6 +304,9 @@ class GeminiClient(LLMClient):
|
|
|
216
304
|
config=generation_config,
|
|
217
305
|
)
|
|
218
306
|
|
|
307
|
+
# Always capture the raw output for debugging
|
|
308
|
+
raw_output = getattr(response, 'text', None)
|
|
309
|
+
|
|
219
310
|
# Check for safety and prompt blocks
|
|
220
311
|
self._check_safety_blocks(response)
|
|
221
312
|
self._check_prompt_blocks(response)
|
|
@@ -223,18 +314,26 @@ class GeminiClient(LLMClient):
|
|
|
223
314
|
# If this was a structured output request, parse the response into the Pydantic model
|
|
224
315
|
if response_model is not None:
|
|
225
316
|
try:
|
|
226
|
-
if not
|
|
317
|
+
if not raw_output:
|
|
227
318
|
raise ValueError('No response text')
|
|
228
319
|
|
|
229
|
-
validated_model = response_model.model_validate(json.loads(
|
|
320
|
+
validated_model = response_model.model_validate(json.loads(raw_output))
|
|
230
321
|
|
|
231
322
|
# Return as a dictionary for API consistency
|
|
232
323
|
return validated_model.model_dump()
|
|
233
324
|
except Exception as e:
|
|
325
|
+
if raw_output:
|
|
326
|
+
logger.error("🦀 LLM generation failed parsing as JSON, will try to salvage.")
|
|
327
|
+
logger.error(self._get_failed_generation_log(gemini_messages, raw_output))
|
|
328
|
+
# Try to salvage
|
|
329
|
+
salvaged = self.salvage_json(raw_output)
|
|
330
|
+
if salvaged is not None:
|
|
331
|
+
logger.warning("Salvaged partial JSON from truncated/malformed output.")
|
|
332
|
+
return salvaged
|
|
234
333
|
raise Exception(f'Failed to parse structured response: {e}') from e
|
|
235
334
|
|
|
236
335
|
# Otherwise, return the response text as a dictionary
|
|
237
|
-
return {'content':
|
|
336
|
+
return {'content': raw_output}
|
|
238
337
|
|
|
239
338
|
except Exception as e:
|
|
240
339
|
# Check if it's a rate limit error based on Gemini API error codes
|
|
@@ -248,7 +347,7 @@ class GeminiClient(LLMClient):
|
|
|
248
347
|
raise RateLimitError from e
|
|
249
348
|
|
|
250
349
|
logger.error(f'Error in generating LLM response: {e}')
|
|
251
|
-
raise
|
|
350
|
+
raise Exception from e
|
|
252
351
|
|
|
253
352
|
async def generate_response(
|
|
254
353
|
self,
|
|
@@ -270,16 +369,14 @@ class GeminiClient(LLMClient):
|
|
|
270
369
|
Returns:
|
|
271
370
|
dict[str, typing.Any]: The response from the language model.
|
|
272
371
|
"""
|
|
273
|
-
if max_tokens is None:
|
|
274
|
-
max_tokens = self.max_tokens
|
|
275
|
-
|
|
276
372
|
retry_count = 0
|
|
277
373
|
last_error = None
|
|
374
|
+
last_output = None
|
|
278
375
|
|
|
279
376
|
# Add multilingual extraction instructions
|
|
280
377
|
messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES
|
|
281
378
|
|
|
282
|
-
while retry_count
|
|
379
|
+
while retry_count < self.MAX_RETRIES:
|
|
283
380
|
try:
|
|
284
381
|
response = await self._generate_response(
|
|
285
382
|
messages=messages,
|
|
@@ -287,22 +384,19 @@ class GeminiClient(LLMClient):
|
|
|
287
384
|
max_tokens=max_tokens,
|
|
288
385
|
model_size=model_size,
|
|
289
386
|
)
|
|
387
|
+
last_output = response.get('content') if isinstance(response, dict) and 'content' in response else None
|
|
290
388
|
return response
|
|
291
|
-
except RateLimitError:
|
|
389
|
+
except RateLimitError as e:
|
|
292
390
|
# Rate limit errors should not trigger retries (fail fast)
|
|
293
|
-
raise
|
|
391
|
+
raise e
|
|
294
392
|
except Exception as e:
|
|
295
393
|
last_error = e
|
|
296
394
|
|
|
297
395
|
# Check if this is a safety block - these typically shouldn't be retried
|
|
298
|
-
|
|
396
|
+
error_text = str(e) or (str(e.__cause__) if e.__cause__ else '')
|
|
397
|
+
if 'safety' in error_text.lower() or 'blocked' in error_text.lower():
|
|
299
398
|
logger.warning(f'Content blocked by safety filters: {e}')
|
|
300
|
-
raise
|
|
301
|
-
|
|
302
|
-
# Don't retry if we've hit the max retries
|
|
303
|
-
if retry_count >= self.MAX_RETRIES:
|
|
304
|
-
logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
|
|
305
|
-
raise
|
|
399
|
+
raise Exception(f'Content blocked by safety filters: {e}') from e
|
|
306
400
|
|
|
307
401
|
retry_count += 1
|
|
308
402
|
|
|
@@ -321,5 +415,8 @@ class GeminiClient(LLMClient):
|
|
|
321
415
|
f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
|
|
322
416
|
)
|
|
323
417
|
|
|
324
|
-
# If we
|
|
325
|
-
|
|
418
|
+
# If we exit the loop without returning, all retries are exhausted
|
|
419
|
+
logger.error("🦀 LLM generation failed and retries are exhausted.")
|
|
420
|
+
logger.error(self._get_failed_generation_log(messages, last_output))
|
|
421
|
+
logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {last_error}')
|
|
422
|
+
raise last_error or Exception("Max retries exceeded")
|
|
@@ -157,7 +157,7 @@ def edge_search_filter_query_constructor(
|
|
|
157
157
|
filter_query += created_at_filter
|
|
158
158
|
|
|
159
159
|
if filters.expired_at is not None:
|
|
160
|
-
expired_at_filter = 'AND ('
|
|
160
|
+
expired_at_filter = ' AND ('
|
|
161
161
|
for i, or_list in enumerate(filters.expired_at):
|
|
162
162
|
for j, date_filter in enumerate(or_list):
|
|
163
163
|
filter_params['expired_at_' + str(j)] = date_filter.date
|
|
@@ -5,7 +5,7 @@ services:
|
|
|
5
5
|
- "7474:7474" # HTTP
|
|
6
6
|
- "7687:7687" # Bolt
|
|
7
7
|
environment:
|
|
8
|
-
- NEO4J_AUTH
|
|
8
|
+
- NEO4J_AUTH=${NEO4J_USER:-neo4j}/${NEO4J_PASSWORD:-demodemo}
|
|
9
9
|
- NEO4J_server_memory_heap_initial__size=512m
|
|
10
10
|
- NEO4J_server_memory_heap_max__size=1G
|
|
11
11
|
- NEO4J_server_memory_pagecache_size=512m
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "graphiti-core"
|
|
3
3
|
description = "A temporal graph building library"
|
|
4
|
-
version = "0.17.
|
|
4
|
+
version = "0.17.2"
|
|
5
5
|
authors = [
|
|
6
6
|
{ "name" = "Paul Paliychuk", "email" = "paul@getzep.com" },
|
|
7
7
|
{ "name" = "Preston Rasmussen", "email" = "preston@getzep.com" },
|
|
@@ -199,6 +199,14 @@
|
|
|
199
199
|
"created_at": "2025-07-06T03:41:19Z",
|
|
200
200
|
"repoId": 840056306,
|
|
201
201
|
"pullRequestNo": 679
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
"name": "charlesmcchan",
|
|
205
|
+
"id": 425857,
|
|
206
|
+
"comment_id": 3066732289,
|
|
207
|
+
"created_at": "2025-07-13T08:54:26Z",
|
|
208
|
+
"repoId": 840056306,
|
|
209
|
+
"pullRequestNo": 711
|
|
202
210
|
}
|
|
203
211
|
]
|
|
204
212
|
}
|