usecortex-ai 0.3.5__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. usecortex_ai/__init__.py +84 -66
  2. usecortex_ai/client.py +25 -23
  3. usecortex_ai/dashboard/client.py +448 -0
  4. usecortex_ai/{user_memory → dashboard}/raw_client.py +371 -530
  5. usecortex_ai/embeddings/client.py +229 -102
  6. usecortex_ai/embeddings/raw_client.py +323 -211
  7. usecortex_ai/errors/__init__.py +2 -0
  8. usecortex_ai/errors/bad_request_error.py +1 -2
  9. usecortex_ai/errors/forbidden_error.py +1 -2
  10. usecortex_ai/errors/internal_server_error.py +1 -2
  11. usecortex_ai/errors/not_found_error.py +1 -2
  12. usecortex_ai/errors/service_unavailable_error.py +1 -2
  13. usecortex_ai/errors/too_many_requests_error.py +11 -0
  14. usecortex_ai/errors/unauthorized_error.py +1 -2
  15. usecortex_ai/fetch/client.py +350 -29
  16. usecortex_ai/fetch/raw_client.py +919 -65
  17. usecortex_ai/raw_client.py +8 -2
  18. usecortex_ai/search/client.py +293 -257
  19. usecortex_ai/search/raw_client.py +445 -346
  20. usecortex_ai/search/types/alpha.py +1 -1
  21. usecortex_ai/sources/client.py +29 -216
  22. usecortex_ai/sources/raw_client.py +51 -589
  23. usecortex_ai/tenant/client.py +155 -118
  24. usecortex_ai/tenant/raw_client.py +227 -350
  25. usecortex_ai/types/__init__.py +78 -62
  26. usecortex_ai/types/add_memory_response.py +39 -0
  27. usecortex_ai/types/{relations.py → api_key_info.py} +25 -5
  28. usecortex_ai/types/app_sources_upload_data.py +15 -6
  29. usecortex_ai/types/{file_upload_result.py → collection_stats.py} +5 -5
  30. usecortex_ai/types/custom_property_definition.py +75 -0
  31. usecortex_ai/types/dashboard_apis_response.py +33 -0
  32. usecortex_ai/types/dashboard_sources_response.py +33 -0
  33. usecortex_ai/types/dashboard_tenants_response.py +33 -0
  34. usecortex_ai/types/{list_sources_response.py → delete_result.py} +10 -7
  35. usecortex_ai/types/delete_user_memory_response.py +1 -1
  36. usecortex_ai/types/entity.py +4 -4
  37. usecortex_ai/types/fetch_mode.py +5 -0
  38. usecortex_ai/types/graph_context.py +26 -0
  39. usecortex_ai/types/{delete_sources.py → infra.py} +4 -3
  40. usecortex_ai/types/{fetch_content_data.py → insert_result.py} +12 -8
  41. usecortex_ai/types/memory_item.py +82 -0
  42. usecortex_ai/types/memory_result_item.py +47 -0
  43. usecortex_ai/types/milvus_data_type.py +21 -0
  44. usecortex_ai/types/{related_chunk.py → path_triplet.py} +6 -5
  45. usecortex_ai/types/processing_status.py +3 -2
  46. usecortex_ai/types/processing_status_indexing_status.py +7 -0
  47. usecortex_ai/types/qn_a_search_response.py +49 -0
  48. usecortex_ai/types/{retrieve_response.py → raw_embedding_document.py} +11 -8
  49. usecortex_ai/types/raw_embedding_search_result.py +47 -0
  50. usecortex_ai/types/{user_memory.py → raw_embedding_vector.py} +6 -6
  51. usecortex_ai/types/relation_evidence.py +20 -0
  52. usecortex_ai/types/retrieval_result.py +26 -0
  53. usecortex_ai/types/scored_path_response.py +26 -0
  54. usecortex_ai/types/search_mode.py +5 -0
  55. usecortex_ai/types/{batch_upload_data.py → source_delete_response.py} +8 -8
  56. usecortex_ai/types/{list_user_memories_response.py → source_delete_result_item.py} +11 -7
  57. usecortex_ai/types/source_fetch_response.py +70 -0
  58. usecortex_ai/types/{graph_relations_response.py → source_graph_relations_response.py} +3 -3
  59. usecortex_ai/types/{single_upload_data.py → source_list_response.py} +7 -10
  60. usecortex_ai/types/source_model.py +11 -1
  61. usecortex_ai/types/source_status.py +5 -0
  62. usecortex_ai/types/source_upload_response.py +35 -0
  63. usecortex_ai/types/source_upload_result_item.py +38 -0
  64. usecortex_ai/types/supported_llm_providers.py +5 -0
  65. usecortex_ai/types/{embeddings_create_collection_data.py → tenant_create_response.py} +9 -7
  66. usecortex_ai/types/{extended_context.py → tenant_info.py} +13 -4
  67. usecortex_ai/types/{embeddings_search_data.py → tenant_metadata_schema_info.py} +8 -9
  68. usecortex_ai/types/{tenant_create_data.py → tenant_stats_response.py} +9 -8
  69. usecortex_ai/types/{triple_with_evidence.py → triplet_with_evidence.py} +1 -1
  70. usecortex_ai/types/user_assistant_pair.py +4 -0
  71. usecortex_ai/types/{search_chunk.py → vector_store_chunk.py} +3 -9
  72. usecortex_ai/upload/__init__.py +3 -0
  73. usecortex_ai/upload/client.py +233 -1937
  74. usecortex_ai/upload/raw_client.py +364 -4401
  75. usecortex_ai/upload/types/__init__.py +7 -0
  76. usecortex_ai/upload/types/body_upload_app_ingestion_upload_app_post_app_sources.py +7 -0
  77. {usecortex_ai-0.3.5.dist-info → usecortex_ai-0.4.0.dist-info}/METADATA +2 -2
  78. usecortex_ai-0.4.0.dist-info/RECORD +113 -0
  79. {usecortex_ai-0.3.5.dist-info → usecortex_ai-0.4.0.dist-info}/WHEEL +1 -1
  80. usecortex_ai/document/client.py +0 -139
  81. usecortex_ai/document/raw_client.py +0 -312
  82. usecortex_ai/types/add_user_memory_response.py +0 -41
  83. usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -17
  84. usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -17
  85. usecortex_ai/types/delete_memory_request.py +0 -32
  86. usecortex_ai/types/delete_sub_tenant_data.py +0 -42
  87. usecortex_ai/types/embeddings_delete_data.py +0 -37
  88. usecortex_ai/types/embeddings_get_data.py +0 -37
  89. usecortex_ai/types/markdown_upload_request.py +0 -41
  90. usecortex_ai/types/retrieve_user_memory_response.py +0 -38
  91. usecortex_ai/types/source.py +0 -52
  92. usecortex_ai/types/sub_tenant_ids_data.py +0 -47
  93. usecortex_ai/types/tenant_stats.py +0 -42
  94. usecortex_ai/types/webpage_scrape_request.py +0 -27
  95. usecortex_ai/user/__init__.py +0 -4
  96. usecortex_ai/user/client.py +0 -145
  97. usecortex_ai/user/raw_client.py +0 -316
  98. usecortex_ai/user_memory/__init__.py +0 -4
  99. usecortex_ai/user_memory/client.py +0 -515
  100. usecortex_ai-0.3.5.dist-info/RECORD +0 -108
  101. /usecortex_ai/{document → dashboard}/__init__.py +0 -0
  102. {usecortex_ai-0.3.5.dist-info → usecortex_ai-0.4.0.dist-info}/licenses/LICENSE +0 -0
  103. {usecortex_ai-0.3.5.dist-info → usecortex_ai-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,82 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .user_assistant_pair import UserAssistantPair
8
+
9
+
10
+ class MemoryItem(UniversalBaseModel):
11
+ """
12
+ Represents a single memory item for ingestion.
13
+ Supports raw text, markdown, and user/assistant conversation pairs.
14
+ """
15
+
16
+ source_id: typing.Optional[str] = pydantic.Field(default=None)
17
+ """
18
+ Optional unique identifier. Auto-generated if not provided.
19
+ """
20
+
21
+ title: typing.Optional[str] = pydantic.Field(default=None)
22
+ """
23
+ Display title for this memory item.
24
+ """
25
+
26
+ text: typing.Optional[str] = pydantic.Field(default=None)
27
+ """
28
+ Raw text or markdown content to be indexed.
29
+ """
30
+
31
+ user_assistant_pairs: typing.Optional[typing.List[UserAssistantPair]] = pydantic.Field(default=None)
32
+ """
33
+ Array of user/assistant conversation pairs to store as memory.
34
+ """
35
+
36
+ is_markdown: typing.Optional[bool] = pydantic.Field(default=None)
37
+ """
38
+ Whether the text is markdown formatted.
39
+ """
40
+
41
+ infer: typing.Optional[bool] = pydantic.Field(default=None)
42
+ """
43
+ If true, process and extract additional insights/inferences from the content before indexing. Useful for extracting implicit information from conversations.
44
+ """
45
+
46
+ custom_instructions: typing.Optional[str] = pydantic.Field(default=None)
47
+ """
48
+ Custom instructions to guide inference processing.
49
+ """
50
+
51
+ user_name: typing.Optional[str] = pydantic.Field(default=None)
52
+ """
53
+ User's name for personalization in conversation pairs.
54
+ """
55
+
56
+ expiry_time: typing.Optional[int] = pydantic.Field(default=None)
57
+ """
58
+ Optional TTL in seconds for this memory.
59
+ """
60
+
61
+ tenant_metadata: typing.Optional[str] = pydantic.Field(default=None)
62
+ """
63
+ JSON string containing tenant-level document metadata (e.g., department, compliance_tag)
64
+
65
+ Example: > "{"department":"Finance","compliance_tag":"GDPR"}"
66
+ """
67
+
68
+ document_metadata: typing.Optional[str] = pydantic.Field(default=None)
69
+ """
70
+ JSON string containing document-specific metadata (e.g., title, author, file_id). If file_id is not provided, the system will generate an ID automatically.
71
+
72
+ Example: > "{"title":"Q1 Report.pdf","author":"Alice Smith","file_id":"custom_file_123"}"
73
+ """
74
+
75
+ if IS_PYDANTIC_V2:
76
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
77
+ else:
78
+
79
+ class Config:
80
+ frozen = True
81
+ smart_union = True
82
+ extra = pydantic.Extra.allow
@@ -0,0 +1,47 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .source_status import SourceStatus
8
+
9
+
10
+ class MemoryResultItem(UniversalBaseModel):
11
+ """
12
+ Result for a single ingested memory item.
13
+ """
14
+
15
+ source_id: str = pydantic.Field()
16
+ """
17
+ Unique identifier for the ingested source.
18
+ """
19
+
20
+ title: typing.Optional[str] = pydantic.Field(default=None)
21
+ """
22
+ Title of the memory if provided.
23
+ """
24
+
25
+ status: typing.Optional[SourceStatus] = pydantic.Field(default=None)
26
+ """
27
+ Initial processing status.
28
+ """
29
+
30
+ infer: typing.Optional[bool] = pydantic.Field(default=None)
31
+ """
32
+ Whether inference was requested for this memory.
33
+ """
34
+
35
+ error: typing.Optional[str] = pydantic.Field(default=None)
36
+ """
37
+ Error message if ingestion failed.
38
+ """
39
+
40
+ if IS_PYDANTIC_V2:
41
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
42
+ else:
43
+
44
+ class Config:
45
+ frozen = True
46
+ smart_union = True
47
+ extra = pydantic.Extra.allow
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ MilvusDataType = typing.Union[
6
+ typing.Literal[
7
+ "BOOL",
8
+ "INT8",
9
+ "INT16",
10
+ "INT32",
11
+ "INT64",
12
+ "FLOAT",
13
+ "DOUBLE",
14
+ "VARCHAR",
15
+ "JSON",
16
+ "ARRAY",
17
+ "FLOAT_VECTOR",
18
+ "SPARSE_FLOAT_VECTOR",
19
+ ],
20
+ typing.Any,
21
+ ]
@@ -4,13 +4,14 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .entity import Entity
8
+ from .relation_evidence import RelationEvidence
7
9
 
8
10
 
9
- class RelatedChunk(UniversalBaseModel):
10
- source_id: str
11
- chunk_uuid: str
12
- chunk_content: str
13
- source_title: typing.Optional[str] = None
11
+ class PathTriplet(UniversalBaseModel):
12
+ source: Entity
13
+ relation: RelationEvidence
14
+ target: Entity
14
15
 
15
16
  if IS_PYDANTIC_V2:
16
17
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -4,6 +4,7 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .processing_status_indexing_status import ProcessingStatusIndexingStatus
7
8
 
8
9
 
9
10
  class ProcessingStatus(UniversalBaseModel):
@@ -12,9 +13,9 @@ class ProcessingStatus(UniversalBaseModel):
12
13
  Identifier for the file whose status is being retrieved
13
14
  """
14
15
 
15
- indexing_status: str = pydantic.Field()
16
+ indexing_status: ProcessingStatusIndexingStatus = pydantic.Field()
16
17
  """
17
- Current status of the file. Possible values are 'queued', 'processing', 'completed', 'errored'
18
+ Current status of the file. Possible values are 'queued', 'processing', 'completed', 'errored', 'graph_creation', 'success'. 'graph_creation' indicates the file is indexed but the knowledge graph is still being created. 'success' is an alias for 'completed'.
18
19
  """
19
20
 
20
21
  error_code: typing.Optional[str] = pydantic.Field(default=None)
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ProcessingStatusIndexingStatus = typing.Union[
6
+ typing.Literal["queued", "processing", "completed", "errored", "graph_creation", "success"], typing.Any
7
+ ]
@@ -0,0 +1,49 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .graph_context import GraphContext
8
+ from .vector_store_chunk import VectorStoreChunk
9
+
10
+
11
+ class QnASearchResponse(UniversalBaseModel):
12
+ """
13
+ Response model for the QnA search API.
14
+ """
15
+
16
+ success: typing.Optional[bool] = None
17
+ answer: str = pydantic.Field()
18
+ """
19
+ The AI-generated answer based on retrieved context
20
+ """
21
+
22
+ chunks: typing.Optional[typing.List[VectorStoreChunk]] = pydantic.Field(default=None)
23
+ """
24
+ Retrieved context chunks used to generate the answer
25
+ """
26
+
27
+ graph_context: typing.Optional[GraphContext] = pydantic.Field(default=None)
28
+ """
29
+ Knowledge graph context (entity paths and chunk relations)
30
+ """
31
+
32
+ model_used: typing.Optional[str] = pydantic.Field(default=None)
33
+ """
34
+ The LLM model used for answer generation
35
+ """
36
+
37
+ timing: typing.Optional[typing.Dict[str, float]] = pydantic.Field(default=None)
38
+ """
39
+ Timing information (retrieval_ms, answer_generation_ms, total_ms)
40
+ """
41
+
42
+ if IS_PYDANTIC_V2:
43
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
44
+ else:
45
+
46
+ class Config:
47
+ frozen = True
48
+ smart_union = True
49
+ extra = pydantic.Extra.allow
@@ -4,24 +4,27 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .search_chunk import SearchChunk
8
- from .triple_with_evidence import TripleWithEvidence
7
+ from .raw_embedding_vector import RawEmbeddingVector
9
8
 
10
9
 
11
- class RetrieveResponse(UniversalBaseModel):
12
- chunks: typing.Optional[typing.List[SearchChunk]] = pydantic.Field(default=None)
10
+ class RawEmbeddingDocument(UniversalBaseModel):
13
11
  """
14
- Retrieved content chunks
12
+ A raw embedding document for direct insert/upsert operations.
15
13
  """
16
14
 
17
- extra_graph_context: typing.Optional[typing.List[TripleWithEvidence]] = pydantic.Field(default=None)
15
+ source_id: str = pydantic.Field()
18
16
  """
19
- Extra graph context which will help you agent get better understanding of the entities involved in query.
17
+ Source identifier for the embedding
20
18
  """
21
19
 
22
20
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
23
21
  """
24
- Additional metadata about the retrieval run
22
+ Metadata to store
23
+ """
24
+
25
+ embeddings: typing.List[RawEmbeddingVector] = pydantic.Field()
26
+ """
27
+ Embedding payloads containing ids and vectors
25
28
  """
26
29
 
27
30
  if IS_PYDANTIC_V2:
@@ -0,0 +1,47 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .raw_embedding_vector import RawEmbeddingVector
8
+
9
+
10
+ class RawEmbeddingSearchResult(UniversalBaseModel):
11
+ """
12
+ Search result for raw embedding collections.
13
+ """
14
+
15
+ source_id: str = pydantic.Field()
16
+ """
17
+ Source identifier
18
+ """
19
+
20
+ embedding: typing.Optional[RawEmbeddingVector] = pydantic.Field(default=None)
21
+ """
22
+ Embedding payload with chunk id and vector (if set)
23
+ """
24
+
25
+ score: typing.Optional[float] = pydantic.Field(default=None)
26
+ """
27
+ Similarity score
28
+ """
29
+
30
+ distance: typing.Optional[float] = pydantic.Field(default=None)
31
+ """
32
+ Vector distance
33
+ """
34
+
35
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
36
+ """
37
+ Metadata associated with the embedding
38
+ """
39
+
40
+ if IS_PYDANTIC_V2:
41
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
42
+ else:
43
+
44
+ class Config:
45
+ frozen = True
46
+ smart_union = True
47
+ extra = pydantic.Extra.allow
@@ -6,19 +6,19 @@ import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
7
 
8
8
 
9
- class UserMemory(UniversalBaseModel):
9
+ class RawEmbeddingVector(UniversalBaseModel):
10
10
  """
11
- Represents a user memory stored in the system.
11
+ Embedding payload containing the chunk identifier and vector.
12
12
  """
13
13
 
14
- memory_id: str = pydantic.Field()
14
+ chunk_id: str = pydantic.Field()
15
15
  """
16
- Unique identifier for the user memory
16
+ Primary key / chunk identifier
17
17
  """
18
18
 
19
- memory_content: str = pydantic.Field()
19
+ embedding: typing.List[float] = pydantic.Field()
20
20
  """
21
- The actual memory content text that was stored
21
+ Embedding vector
22
22
  """
23
23
 
24
24
  if IS_PYDANTIC_V2:
@@ -42,6 +42,26 @@ class RelationEvidence(UniversalBaseModel):
42
42
  Timestamp when this relation was introduced
43
43
  """
44
44
 
45
+ relationship_id: str = pydantic.Field()
46
+ """
47
+ Unique ID for this relationship from graph database
48
+ """
49
+
50
+ chunk_id: typing.Optional[str] = pydantic.Field(default=None)
51
+ """
52
+ ID of the chunk this relation was extracted from
53
+ """
54
+
55
+ source_entity_id: typing.Optional[str] = pydantic.Field(default=None)
56
+ """
57
+ The entity ID of source node
58
+ """
59
+
60
+ target_entity_id: typing.Optional[str] = pydantic.Field(default=None)
61
+ """
62
+ The entity ID of target node
63
+ """
64
+
45
65
  if IS_PYDANTIC_V2:
46
66
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
47
67
  else:
@@ -0,0 +1,26 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .graph_context import GraphContext
8
+ from .vector_store_chunk import VectorStoreChunk
9
+
10
+
11
+ class RetrievalResult(UniversalBaseModel):
12
+ """
13
+ Result of a hybrid search retrieval operation.
14
+ """
15
+
16
+ chunks: typing.Optional[typing.List[VectorStoreChunk]] = None
17
+ graph_context: typing.Optional[GraphContext] = None
18
+
19
+ if IS_PYDANTIC_V2:
20
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
21
+ else:
22
+
23
+ class Config:
24
+ frozen = True
25
+ smart_union = True
26
+ extra = pydantic.Extra.allow
@@ -0,0 +1,26 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .path_triplet import PathTriplet
8
+
9
+
10
+ class ScoredPathResponse(UniversalBaseModel):
11
+ triplets: typing.List[PathTriplet]
12
+ relevancy_score: float
13
+ combined_context: typing.Optional[str] = None
14
+ group_id: typing.Optional[str] = pydantic.Field(default=None)
15
+ """
16
+ Path group identifier (e.g., 'p_0') for chunk mapping
17
+ """
18
+
19
+ if IS_PYDANTIC_V2:
20
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
21
+ else:
22
+
23
+ class Config:
24
+ frozen = True
25
+ smart_union = True
26
+ extra = pydantic.Extra.allow
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SearchMode = typing.Union[typing.Literal["sources", "memories"], typing.Any]
@@ -4,22 +4,22 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .file_upload_result import FileUploadResult
7
+ from .source_delete_result_item import SourceDeleteResultItem
8
8
 
9
9
 
10
- class BatchUploadData(UniversalBaseModel):
11
- uploaded: typing.List[FileUploadResult] = pydantic.Field()
10
+ class SourceDeleteResponse(UniversalBaseModel):
12
11
  """
13
- List of successfully uploaded files for processing
12
+ Response for delete request.
14
13
  """
15
14
 
16
- message: str = pydantic.Field()
15
+ success: typing.Optional[bool] = None
16
+ message: typing.Optional[str] = None
17
+ results: typing.Optional[typing.List[SourceDeleteResultItem]] = None
18
+ deleted_count: typing.Optional[int] = pydantic.Field(default=None)
17
19
  """
18
- Status message indicating batch document parsing scheduled
20
+ Number of sources deleted.
19
21
  """
20
22
 
21
- success: typing.Optional[bool] = None
22
-
23
23
  if IS_PYDANTIC_V2:
24
24
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
25
25
  else:
@@ -4,22 +4,26 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .user_memory import UserMemory
8
7
 
9
8
 
10
- class ListUserMemoriesResponse(UniversalBaseModel):
9
+ class SourceDeleteResultItem(UniversalBaseModel):
11
10
  """
12
- Response model for listing all user memories.
11
+ Result for a single source deletion.
13
12
  """
14
13
 
15
- success: bool = pydantic.Field()
14
+ source_id: str = pydantic.Field()
16
15
  """
17
- Indicates whether the memory listing operation was successful
16
+ ID of the source.
18
17
  """
19
18
 
20
- user_memories: typing.Optional[typing.List[UserMemory]] = pydantic.Field(default=None)
19
+ deleted: typing.Optional[bool] = pydantic.Field(default=None)
21
20
  """
22
- Array of all user memories associated with your tenant
21
+ Whether deletion succeeded.
22
+ """
23
+
24
+ error: typing.Optional[str] = pydantic.Field(default=None)
25
+ """
26
+ Error message if deletion failed.
23
27
  """
24
28
 
25
29
  if IS_PYDANTIC_V2:
@@ -0,0 +1,70 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ import typing_extensions
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
+ from ..core.serialization import FieldMetadata
9
+
10
+
11
+ class SourceFetchResponse(UniversalBaseModel):
12
+ """
13
+ Response for file fetch request.
14
+ """
15
+
16
+ success: typing.Optional[bool] = pydantic.Field(default=None)
17
+ """
18
+ Whether the fetch was successful
19
+ """
20
+
21
+ source_id: str = pydantic.Field()
22
+ """
23
+ Source ID of the fetched file
24
+ """
25
+
26
+ content: typing.Optional[str] = pydantic.Field(default=None)
27
+ """
28
+ File content as string (if mode includes 'content')
29
+ """
30
+
31
+ content_base_64: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="content_base64")] = (
32
+ pydantic.Field(default=None)
33
+ )
34
+ """
35
+ File content as base64 encoded string (for binary files, if mode includes 'content')
36
+ """
37
+
38
+ presigned_url: typing.Optional[str] = pydantic.Field(default=None)
39
+ """
40
+ Presigned URL to access the file (if mode includes 'url')
41
+ """
42
+
43
+ content_type: typing.Optional[str] = pydantic.Field(default=None)
44
+ """
45
+ Content type of the file
46
+ """
47
+
48
+ size_bytes: typing.Optional[int] = pydantic.Field(default=None)
49
+ """
50
+ Size of the file in bytes
51
+ """
52
+
53
+ message: typing.Optional[str] = pydantic.Field(default=None)
54
+ """
55
+ Response message
56
+ """
57
+
58
+ error: typing.Optional[str] = pydantic.Field(default=None)
59
+ """
60
+ Error message if fetch failed
61
+ """
62
+
63
+ if IS_PYDANTIC_V2:
64
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
65
+ else:
66
+
67
+ class Config:
68
+ frozen = True
69
+ smart_union = True
70
+ extra = pydantic.Extra.allow
@@ -4,11 +4,11 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .triple_with_evidence import TripleWithEvidence
7
+ from .triplet_with_evidence import TripletWithEvidence
8
8
 
9
9
 
10
- class GraphRelationsResponse(UniversalBaseModel):
11
- relations: typing.List[typing.Optional[TripleWithEvidence]] = pydantic.Field()
10
+ class SourceGraphRelationsResponse(UniversalBaseModel):
11
+ relations: typing.List[typing.Optional[TripletWithEvidence]] = pydantic.Field()
12
12
  """
13
13
  List of relations retrieved
14
14
  """
@@ -4,21 +4,18 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .source_model import SourceModel
7
8
 
8
9
 
9
- class SingleUploadData(UniversalBaseModel):
10
- file_id: str = pydantic.Field()
11
- """
12
- Unique identifier for the file being processed
13
- """
14
-
15
- message: str = pydantic.Field()
10
+ class SourceListResponse(UniversalBaseModel):
11
+ success: typing.Optional[bool] = None
12
+ message: typing.Optional[str] = None
13
+ sources: typing.Optional[typing.List[SourceModel]] = None
14
+ total: int = pydantic.Field()
16
15
  """
17
- Status message indicating document parsing scheduled or update completed
16
+ Total number of sources matching the query.
18
17
  """
19
18
 
20
- success: typing.Optional[bool] = None
21
-
22
19
  if IS_PYDANTIC_V2:
23
20
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
24
21
  else:
@@ -9,11 +9,21 @@ from .content_model import ContentModel
9
9
 
10
10
 
11
11
  class SourceModel(UniversalBaseModel):
12
- id: typing.Optional[str] = pydantic.Field(default=None)
12
+ id: str = pydantic.Field()
13
13
  """
14
14
  Stable, unique identifier for the source. If omitted, one may be generated upstream.
15
15
  """
16
16
 
17
+ tenant_id: str = pydantic.Field()
18
+ """
19
+ Unique identifier for the tenant/organization
20
+ """
21
+
22
+ sub_tenant_id: str = pydantic.Field()
23
+ """
24
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
25
+ """
26
+
17
27
  title: typing.Optional[str] = pydantic.Field(default=None)
18
28
  """
19
29
  Short human-readable title for the source.
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SourceStatus = typing.Union[typing.Literal["queued", "processing", "completed", "failed"], typing.Any]