usecortex-ai 0.3.6__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. usecortex_ai/__init__.py +82 -70
  2. usecortex_ai/client.py +25 -23
  3. usecortex_ai/dashboard/client.py +448 -0
  4. usecortex_ai/{user_memory → dashboard}/raw_client.py +371 -530
  5. usecortex_ai/embeddings/client.py +229 -102
  6. usecortex_ai/embeddings/raw_client.py +323 -211
  7. usecortex_ai/errors/__init__.py +2 -0
  8. usecortex_ai/errors/bad_request_error.py +1 -2
  9. usecortex_ai/errors/forbidden_error.py +1 -2
  10. usecortex_ai/errors/internal_server_error.py +1 -2
  11. usecortex_ai/errors/not_found_error.py +1 -2
  12. usecortex_ai/errors/service_unavailable_error.py +1 -2
  13. usecortex_ai/errors/too_many_requests_error.py +11 -0
  14. usecortex_ai/errors/unauthorized_error.py +1 -2
  15. usecortex_ai/fetch/client.py +350 -29
  16. usecortex_ai/fetch/raw_client.py +919 -65
  17. usecortex_ai/raw_client.py +8 -2
  18. usecortex_ai/search/client.py +313 -257
  19. usecortex_ai/search/raw_client.py +463 -344
  20. usecortex_ai/search/types/alpha.py +1 -1
  21. usecortex_ai/sources/client.py +29 -216
  22. usecortex_ai/sources/raw_client.py +51 -589
  23. usecortex_ai/tenant/client.py +155 -118
  24. usecortex_ai/tenant/raw_client.py +227 -350
  25. usecortex_ai/types/__init__.py +76 -66
  26. usecortex_ai/types/add_memory_response.py +39 -0
  27. usecortex_ai/types/{scored_triplet_response.py → api_key_info.py} +16 -12
  28. usecortex_ai/types/app_sources_upload_data.py +15 -6
  29. usecortex_ai/types/{file_upload_result.py → collection_stats.py} +5 -5
  30. usecortex_ai/types/custom_property_definition.py +75 -0
  31. usecortex_ai/types/dashboard_apis_response.py +33 -0
  32. usecortex_ai/types/dashboard_sources_response.py +33 -0
  33. usecortex_ai/types/dashboard_tenants_response.py +33 -0
  34. usecortex_ai/types/{list_sources_response.py → delete_result.py} +10 -7
  35. usecortex_ai/types/delete_user_memory_response.py +1 -1
  36. usecortex_ai/types/entity.py +4 -4
  37. usecortex_ai/types/fetch_mode.py +5 -0
  38. usecortex_ai/types/{relations.py → forceful_relations_payload.py} +4 -4
  39. usecortex_ai/types/graph_context.py +26 -0
  40. usecortex_ai/types/{delete_sources.py → infra.py} +4 -3
  41. usecortex_ai/types/{fetch_content_data.py → insert_result.py} +12 -8
  42. usecortex_ai/types/memory_item.py +88 -0
  43. usecortex_ai/types/memory_result_item.py +47 -0
  44. usecortex_ai/types/milvus_data_type.py +21 -0
  45. usecortex_ai/types/path_triplet.py +3 -18
  46. usecortex_ai/types/processing_status.py +3 -2
  47. usecortex_ai/types/processing_status_indexing_status.py +7 -0
  48. usecortex_ai/types/qn_a_search_response.py +49 -0
  49. usecortex_ai/types/{retrieve_response.py → raw_embedding_document.py} +11 -8
  50. usecortex_ai/types/raw_embedding_search_result.py +47 -0
  51. usecortex_ai/types/{user_memory.py → raw_embedding_vector.py} +6 -6
  52. usecortex_ai/types/relation_evidence.py +24 -5
  53. usecortex_ai/types/retrieval_result.py +30 -0
  54. usecortex_ai/types/scored_path_response.py +5 -19
  55. usecortex_ai/types/search_mode.py +5 -0
  56. usecortex_ai/types/{batch_upload_data.py → source_delete_response.py} +8 -8
  57. usecortex_ai/types/{list_user_memories_response.py → source_delete_result_item.py} +11 -7
  58. usecortex_ai/types/source_fetch_response.py +70 -0
  59. usecortex_ai/types/{graph_relations_response.py → source_graph_relations_response.py} +3 -3
  60. usecortex_ai/types/{single_upload_data.py → source_list_response.py} +7 -10
  61. usecortex_ai/types/source_model.py +11 -1
  62. usecortex_ai/types/source_status.py +5 -0
  63. usecortex_ai/types/source_upload_response.py +35 -0
  64. usecortex_ai/types/source_upload_result_item.py +38 -0
  65. usecortex_ai/types/supported_llm_providers.py +5 -0
  66. usecortex_ai/types/{embeddings_create_collection_data.py → tenant_create_response.py} +9 -7
  67. usecortex_ai/types/{webpage_scrape_request.py → tenant_info.py} +10 -5
  68. usecortex_ai/types/tenant_metadata_schema_info.py +36 -0
  69. usecortex_ai/types/{tenant_create_data.py → tenant_stats_response.py} +9 -8
  70. usecortex_ai/types/{triple_with_evidence.py → triplet_with_evidence.py} +5 -1
  71. usecortex_ai/types/user_assistant_pair.py +4 -0
  72. usecortex_ai/types/{search_chunk.py → vector_store_chunk.py} +5 -11
  73. usecortex_ai/upload/__init__.py +3 -0
  74. usecortex_ai/upload/client.py +233 -1937
  75. usecortex_ai/upload/raw_client.py +364 -4401
  76. usecortex_ai/upload/types/__init__.py +7 -0
  77. usecortex_ai/upload/types/body_upload_app_ingestion_upload_app_post_app_sources.py +7 -0
  78. {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/METADATA +2 -2
  79. usecortex_ai-0.5.0.dist-info/RECORD +114 -0
  80. {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/WHEEL +1 -1
  81. {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/licenses/LICENSE +21 -21
  82. {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/top_level.txt +0 -0
  83. usecortex_ai/document/client.py +0 -139
  84. usecortex_ai/document/raw_client.py +0 -312
  85. usecortex_ai/types/add_user_memory_response.py +0 -41
  86. usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -17
  87. usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -17
  88. usecortex_ai/types/chunk_graph_relations_response.py +0 -33
  89. usecortex_ai/types/delete_memory_request.py +0 -32
  90. usecortex_ai/types/delete_sub_tenant_data.py +0 -42
  91. usecortex_ai/types/embeddings_delete_data.py +0 -37
  92. usecortex_ai/types/embeddings_get_data.py +0 -37
  93. usecortex_ai/types/embeddings_search_data.py +0 -37
  94. usecortex_ai/types/extended_context.py +0 -17
  95. usecortex_ai/types/markdown_upload_request.py +0 -41
  96. usecortex_ai/types/related_chunk.py +0 -22
  97. usecortex_ai/types/retrieve_user_memory_response.py +0 -38
  98. usecortex_ai/types/source.py +0 -52
  99. usecortex_ai/types/sub_tenant_ids_data.py +0 -47
  100. usecortex_ai/types/tenant_stats.py +0 -42
  101. usecortex_ai/user/__init__.py +0 -4
  102. usecortex_ai/user/client.py +0 -145
  103. usecortex_ai/user/raw_client.py +0 -316
  104. usecortex_ai/user_memory/__init__.py +0 -4
  105. usecortex_ai/user_memory/client.py +0 -515
  106. usecortex_ai-0.3.6.dist-info/RECORD +0 -112
  107. /usecortex_ai/{document → dashboard}/__init__.py +0 -0
@@ -6,25 +6,29 @@ import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
7
 
8
8
 
9
- class FetchContentData(UniversalBaseModel):
10
- file_id: str = pydantic.Field()
9
+ class InsertResult(UniversalBaseModel):
11
10
  """
12
- Unique identifier for the file being fetched
11
+ Result of an insert operation.
13
12
  """
14
13
 
15
- file_content: typing.Optional[str] = pydantic.Field(default=None)
14
+ insert_count: int = pydantic.Field()
16
15
  """
17
- Content of the fetched file
16
+ Number of entities inserted
17
+ """
18
+
19
+ ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
20
+ """
21
+ Inserted entity IDs
18
22
  """
19
23
 
20
24
  success: typing.Optional[bool] = pydantic.Field(default=None)
21
25
  """
22
- Indicates whether the fetch operation completed successfully
26
+ Whether insert succeeded
23
27
  """
24
28
 
25
- message: typing.Optional[str] = pydantic.Field(default=None)
29
+ error: typing.Optional[str] = pydantic.Field(default=None)
26
30
  """
27
- Status message about the fetch operation
31
+ Error message if failed
28
32
  """
29
33
 
30
34
  if IS_PYDANTIC_V2:
@@ -0,0 +1,88 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .forceful_relations_payload import ForcefulRelationsPayload
8
+ from .user_assistant_pair import UserAssistantPair
9
+
10
+
11
+ class MemoryItem(UniversalBaseModel):
12
+ """
13
+ Represents a single memory item for ingestion.
14
+ Supports raw text, markdown, and user/assistant conversation pairs.
15
+ """
16
+
17
+ source_id: typing.Optional[str] = pydantic.Field(default=None)
18
+ """
19
+ Optional unique identifier. Auto-generated if not provided.
20
+ """
21
+
22
+ title: typing.Optional[str] = pydantic.Field(default=None)
23
+ """
24
+ Display title for this memory item.
25
+ """
26
+
27
+ text: typing.Optional[str] = pydantic.Field(default=None)
28
+ """
29
+ Raw text or markdown content to be indexed.
30
+ """
31
+
32
+ user_assistant_pairs: typing.Optional[typing.List[UserAssistantPair]] = pydantic.Field(default=None)
33
+ """
34
+ Array of user/assistant conversation pairs to store as memory.
35
+ """
36
+
37
+ is_markdown: typing.Optional[bool] = pydantic.Field(default=None)
38
+ """
39
+ Whether the text is markdown formatted.
40
+ """
41
+
42
+ infer: typing.Optional[bool] = pydantic.Field(default=None)
43
+ """
44
+ If true, process and extract additional insights/inferences from the contentbefore indexingUseful for extracting implicit information from conversations
45
+ """
46
+
47
+ custom_instructions: typing.Optional[str] = pydantic.Field(default=None)
48
+ """
49
+ Custom instructions to guide inference processing.
50
+ """
51
+
52
+ user_name: typing.Optional[str] = pydantic.Field(default=None)
53
+ """
54
+ User's name for personalization in conversation pairs.
55
+ """
56
+
57
+ expiry_time: typing.Optional[int] = pydantic.Field(default=None)
58
+ """
59
+ Optional TTL in seconds for this memory.
60
+ """
61
+
62
+ tenant_metadata: typing.Optional[str] = pydantic.Field(default=None)
63
+ """
64
+ JSON string containing tenant-level document metadata (e.g., department, compliance_tag)
65
+
66
+ Example: > "{"department":"Finance","compliance_tag":"GDPR"}"
67
+ """
68
+
69
+ document_metadata: typing.Optional[str] = pydantic.Field(default=None)
70
+ """
71
+ JSON string containing document-specific metadata (e.g., title, author, file_id). If file_id is not provided, the system will generate an ID automatically.
72
+
73
+ Example: > "{"title":"Q1 Report.pdf","author":"Alice Smith","file_id":"custom_file_123"}"
74
+ """
75
+
76
+ relations: typing.Optional[ForcefulRelationsPayload] = pydantic.Field(default=None)
77
+ """
78
+ Forcefully connect 2 sources based on cortex source ids or common properties.
79
+ """
80
+
81
+ if IS_PYDANTIC_V2:
82
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
83
+ else:
84
+
85
+ class Config:
86
+ frozen = True
87
+ smart_union = True
88
+ extra = pydantic.Extra.allow
@@ -0,0 +1,47 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .source_status import SourceStatus
8
+
9
+
10
+ class MemoryResultItem(UniversalBaseModel):
11
+ """
12
+ Result for a single ingested memory item.
13
+ """
14
+
15
+ source_id: str = pydantic.Field()
16
+ """
17
+ Unique identifier for the ingested source.
18
+ """
19
+
20
+ title: typing.Optional[str] = pydantic.Field(default=None)
21
+ """
22
+ Title of the memory if provided.
23
+ """
24
+
25
+ status: typing.Optional[SourceStatus] = pydantic.Field(default=None)
26
+ """
27
+ Initial processing status.
28
+ """
29
+
30
+ infer: typing.Optional[bool] = pydantic.Field(default=None)
31
+ """
32
+ Whether inference was requested for this memory.
33
+ """
34
+
35
+ error: typing.Optional[str] = pydantic.Field(default=None)
36
+ """
37
+ Error message if ingestion failed.
38
+ """
39
+
40
+ if IS_PYDANTIC_V2:
41
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
42
+ else:
43
+
44
+ class Config:
45
+ frozen = True
46
+ smart_union = True
47
+ extra = pydantic.Extra.allow
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ MilvusDataType = typing.Union[
6
+ typing.Literal[
7
+ "BOOL",
8
+ "INT8",
9
+ "INT16",
10
+ "INT32",
11
+ "INT64",
12
+ "FLOAT",
13
+ "DOUBLE",
14
+ "VARCHAR",
15
+ "JSON",
16
+ "ARRAY",
17
+ "FLOAT_VECTOR",
18
+ "SPARSE_FLOAT_VECTOR",
19
+ ],
20
+ typing.Any,
21
+ ]
@@ -9,24 +9,9 @@ from .relation_evidence import RelationEvidence
9
9
 
10
10
 
11
11
  class PathTriplet(UniversalBaseModel):
12
- """
13
- Single triplet within a path (without score, as the path is scored as a whole)
14
- """
15
-
16
- source: Entity = pydantic.Field()
17
- """
18
- Source entity
19
- """
20
-
21
- target: Entity = pydantic.Field()
22
- """
23
- Target entity
24
- """
25
-
26
- relation: RelationEvidence = pydantic.Field()
27
- """
28
- Relation between entities
29
- """
12
+ source: Entity
13
+ relation: RelationEvidence
14
+ target: Entity
30
15
 
31
16
  if IS_PYDANTIC_V2:
32
17
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -4,6 +4,7 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .processing_status_indexing_status import ProcessingStatusIndexingStatus
7
8
 
8
9
 
9
10
  class ProcessingStatus(UniversalBaseModel):
@@ -12,9 +13,9 @@ class ProcessingStatus(UniversalBaseModel):
12
13
  Identifier for the file whose status is being retrieved
13
14
  """
14
15
 
15
- indexing_status: str = pydantic.Field()
16
+ indexing_status: ProcessingStatusIndexingStatus = pydantic.Field()
16
17
  """
17
- Current status of the file. Possible values are 'queued', 'processing', 'completed', 'errored'
18
+ Current status of the file. Possible values are 'queued', 'processing', 'completed', 'errored', 'graph_creation', 'success'. 'graph_creation' indicates the file is indexed but the knowledge graph is still being created. 'success' is an alias for 'completed'.
18
19
  """
19
20
 
20
21
  error_code: typing.Optional[str] = pydantic.Field(default=None)
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ProcessingStatusIndexingStatus = typing.Union[
6
+ typing.Literal["queued", "processing", "completed", "errored", "graph_creation", "success"], typing.Any
7
+ ]
@@ -0,0 +1,49 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .graph_context import GraphContext
8
+ from .vector_store_chunk import VectorStoreChunk
9
+
10
+
11
+ class QnASearchResponse(UniversalBaseModel):
12
+ """
13
+ Response model for the QnA search API.
14
+ """
15
+
16
+ success: typing.Optional[bool] = None
17
+ answer: str = pydantic.Field()
18
+ """
19
+ The AI-generated answer based on retrieved context
20
+ """
21
+
22
+ chunks: typing.Optional[typing.List[VectorStoreChunk]] = pydantic.Field(default=None)
23
+ """
24
+ Retrieved context chunks used to generate the answer
25
+ """
26
+
27
+ graph_context: typing.Optional[GraphContext] = pydantic.Field(default=None)
28
+ """
29
+ Knowledge graph context (entity paths and chunk relations)
30
+ """
31
+
32
+ model_used: typing.Optional[str] = pydantic.Field(default=None)
33
+ """
34
+ The LLM model used for answer generation
35
+ """
36
+
37
+ timing: typing.Optional[typing.Dict[str, float]] = pydantic.Field(default=None)
38
+ """
39
+ Timing information (retrieval_ms, answer_generation_ms, total_ms)
40
+ """
41
+
42
+ if IS_PYDANTIC_V2:
43
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
44
+ else:
45
+
46
+ class Config:
47
+ frozen = True
48
+ smart_union = True
49
+ extra = pydantic.Extra.allow
@@ -4,24 +4,27 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .chunk_graph_relations_response import ChunkGraphRelationsResponse
8
- from .search_chunk import SearchChunk
7
+ from .raw_embedding_vector import RawEmbeddingVector
9
8
 
10
9
 
11
- class RetrieveResponse(UniversalBaseModel):
12
- chunks: typing.Optional[typing.List[SearchChunk]] = pydantic.Field(default=None)
10
+ class RawEmbeddingDocument(UniversalBaseModel):
13
11
  """
14
- Retrieved content chunks
12
+ A raw embedding document for direct insert/upsert operations.
15
13
  """
16
14
 
17
- graph_relations: typing.Optional[ChunkGraphRelationsResponse] = pydantic.Field(default=None)
15
+ source_id: str = pydantic.Field()
18
16
  """
19
- Graph relations with chunk_relations (by chunk_id) and entity_relations (top entity matches)
17
+ Source identifier for the embedding
20
18
  """
21
19
 
22
20
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
23
21
  """
24
- Additional metadata about the retrieval run
22
+ Metadata to store
23
+ """
24
+
25
+ embeddings: typing.List[RawEmbeddingVector] = pydantic.Field()
26
+ """
27
+ Embedding payloads containing ids and vectors
25
28
  """
26
29
 
27
30
  if IS_PYDANTIC_V2:
@@ -0,0 +1,47 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .raw_embedding_vector import RawEmbeddingVector
8
+
9
+
10
+ class RawEmbeddingSearchResult(UniversalBaseModel):
11
+ """
12
+ Search result for raw embedding collections.
13
+ """
14
+
15
+ source_id: str = pydantic.Field()
16
+ """
17
+ Source identifier
18
+ """
19
+
20
+ embedding: typing.Optional[RawEmbeddingVector] = pydantic.Field(default=None)
21
+ """
22
+ Embedding payload with chunk id and vector (if set)
23
+ """
24
+
25
+ score: typing.Optional[float] = pydantic.Field(default=None)
26
+ """
27
+ Similarity score
28
+ """
29
+
30
+ distance: typing.Optional[float] = pydantic.Field(default=None)
31
+ """
32
+ Vector distance
33
+ """
34
+
35
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
36
+ """
37
+ Metadata associated with the embedding
38
+ """
39
+
40
+ if IS_PYDANTIC_V2:
41
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
42
+ else:
43
+
44
+ class Config:
45
+ frozen = True
46
+ smart_union = True
47
+ extra = pydantic.Extra.allow
@@ -6,19 +6,19 @@ import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
7
 
8
8
 
9
- class UserMemory(UniversalBaseModel):
9
+ class RawEmbeddingVector(UniversalBaseModel):
10
10
  """
11
- Represents a user memory stored in the system.
11
+ Embedding payload containing the chunk identifier and vector.
12
12
  """
13
13
 
14
- memory_id: str = pydantic.Field()
14
+ chunk_id: str = pydantic.Field()
15
15
  """
16
- Unique identifier for the user memory
16
+ Primary key / chunk identifier
17
17
  """
18
18
 
19
- memory_content: str = pydantic.Field()
19
+ embedding: typing.List[float] = pydantic.Field()
20
20
  """
21
- The actual memory content text that was stored
21
+ Embedding vector
22
22
  """
23
23
 
24
24
  if IS_PYDANTIC_V2:
@@ -8,9 +8,8 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
 
9
9
 
10
10
  class RelationEvidence(UniversalBaseModel):
11
- relationship_id: typing.Optional[str] = pydantic.Field(default=None)
12
11
  """
13
- Unique identifier for this specific relationship edge in the graph. The combination of source entity, target entity, and relationship_id allows disambiguation between multiple relations between the same source and target entities.
12
+ Single piece of evidence for a relationship between two entities
14
13
  """
15
14
 
16
15
  canonical_predicate: str = pydantic.Field()
@@ -28,19 +27,39 @@ class RelationEvidence(UniversalBaseModel):
28
27
  Rich contextual description of the relationship with surrounding information, details about how/why/when, and any relevant background. Should be comprehensive enough to understand the relationship without referring back to source.
29
28
  """
30
29
 
30
+ confidence: typing.Optional[float] = pydantic.Field(default=None)
31
+ """
32
+ Confidence score
33
+ """
34
+
31
35
  temporal_details: typing.Optional[str] = pydantic.Field(default=None)
32
36
  """
33
37
  Temporal timing information extracted from text (e.g., 'last week', 'in 2023', 'yesterday')
34
38
  """
35
39
 
40
+ timestamp: typing.Optional[dt.datetime] = pydantic.Field(default=None)
41
+ """
42
+ Timestamp when this relation was introduced
43
+ """
44
+
45
+ relationship_id: str = pydantic.Field()
46
+ """
47
+ Unique ID for this relationship from graph database
48
+ """
49
+
36
50
  chunk_id: typing.Optional[str] = pydantic.Field(default=None)
37
51
  """
38
- The chunk_id this relation came from
52
+ ID of the chunk this relation was extracted from
39
53
  """
40
54
 
41
- timestamp: typing.Optional[dt.datetime] = pydantic.Field(default=None)
55
+ source_entity_id: typing.Optional[str] = pydantic.Field(default=None)
42
56
  """
43
- Timestamp when this relation was introduced
57
+ The entity ID of source node
58
+ """
59
+
60
+ target_entity_id: typing.Optional[str] = pydantic.Field(default=None)
61
+ """
62
+ The entity ID of target node
44
63
  """
45
64
 
46
65
  if IS_PYDANTIC_V2:
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .graph_context import GraphContext
8
+ from .vector_store_chunk import VectorStoreChunk
9
+
10
+
11
+ class RetrievalResult(UniversalBaseModel):
12
+ """
13
+ Result of a hybrid search retrieval operation.
14
+ """
15
+
16
+ chunks: typing.Optional[typing.List[VectorStoreChunk]] = None
17
+ graph_context: typing.Optional[GraphContext] = None
18
+ extra_context: typing.Optional[typing.Dict[str, VectorStoreChunk]] = pydantic.Field(default=None)
19
+ """
20
+ Map of chunk_uuid to VectorStoreChunk for extra context from forcefully related sources. Use chunk.extra_context_ids to look up chunks: extra_context[id] for id in chunk.extra_context_ids.
21
+ """
22
+
23
+ if IS_PYDANTIC_V2:
24
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
25
+ else:
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ extra = pydantic.Extra.allow
@@ -8,26 +8,12 @@ from .path_triplet import PathTriplet
8
8
 
9
9
 
10
10
  class ScoredPathResponse(UniversalBaseModel):
11
+ triplets: typing.List[PathTriplet]
12
+ relevancy_score: float
13
+ combined_context: typing.Optional[str] = None
14
+ group_id: typing.Optional[str] = pydantic.Field(default=None)
11
15
  """
12
- A multi-hop path (chain of triplets) with a relevancy score.
13
-
14
- Represents connected paths like: A --rel1--> B --rel2--> C
15
- The triplets list preserves the chain order.
16
- """
17
-
18
- combined_context: typing.Optional[str] = pydantic.Field(default=None)
19
- """
20
- Merged context from all triplets in the path
21
- """
22
-
23
- triplets: typing.List[PathTriplet] = pydantic.Field()
24
- """
25
- Ordered list of triplets forming the path chain
26
- """
27
-
28
- relevancy_score: typing.Optional[float] = pydantic.Field(default=None)
29
- """
30
- Relevancy score for the entire path
16
+ Path group identifier (e.g., 'p_0') for chunk mapping
31
17
  """
32
18
 
33
19
  if IS_PYDANTIC_V2:
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SearchMode = typing.Union[typing.Literal["sources", "memories"], typing.Any]
@@ -4,22 +4,22 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .file_upload_result import FileUploadResult
7
+ from .source_delete_result_item import SourceDeleteResultItem
8
8
 
9
9
 
10
- class BatchUploadData(UniversalBaseModel):
11
- uploaded: typing.List[FileUploadResult] = pydantic.Field()
10
+ class SourceDeleteResponse(UniversalBaseModel):
12
11
  """
13
- List of successfully uploaded files for processing
12
+ Response for delete request.
14
13
  """
15
14
 
16
- message: str = pydantic.Field()
15
+ success: typing.Optional[bool] = None
16
+ message: typing.Optional[str] = None
17
+ results: typing.Optional[typing.List[SourceDeleteResultItem]] = None
18
+ deleted_count: typing.Optional[int] = pydantic.Field(default=None)
17
19
  """
18
- Status message indicating batch document parsing scheduled
20
+ Number of sources deleted.
19
21
  """
20
22
 
21
- success: typing.Optional[bool] = None
22
-
23
23
  if IS_PYDANTIC_V2:
24
24
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
25
25
  else:
@@ -4,22 +4,26 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .user_memory import UserMemory
8
7
 
9
8
 
10
- class ListUserMemoriesResponse(UniversalBaseModel):
9
+ class SourceDeleteResultItem(UniversalBaseModel):
11
10
  """
12
- Response model for listing all user memories.
11
+ Result for a single source deletion.
13
12
  """
14
13
 
15
- success: bool = pydantic.Field()
14
+ source_id: str = pydantic.Field()
16
15
  """
17
- Indicates whether the memory listing operation was successful
16
+ ID of the source.
18
17
  """
19
18
 
20
- user_memories: typing.Optional[typing.List[UserMemory]] = pydantic.Field(default=None)
19
+ deleted: typing.Optional[bool] = pydantic.Field(default=None)
21
20
  """
22
- Array of all user memories associated with your tenant
21
+ Whether deletion succeeded.
22
+ """
23
+
24
+ error: typing.Optional[str] = pydantic.Field(default=None)
25
+ """
26
+ Error message if deletion failed.
23
27
  """
24
28
 
25
29
  if IS_PYDANTIC_V2: