seekrai 0.5.16__tar.gz → 0.5.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {seekrai-0.5.16 → seekrai-0.5.17}/PKG-INFO +3 -4
  2. {seekrai-0.5.16 → seekrai-0.5.17}/pyproject.toml +1 -1
  3. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/client.py +4 -0
  4. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/explainability.py +19 -11
  5. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/ingestion.py +5 -7
  6. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/vectordb.py +6 -2
  7. seekrai-0.5.17/src/seekrai/types/explainability.py +26 -0
  8. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/vectordb.py +6 -1
  9. seekrai-0.5.16/src/seekrai/types/explainability.py +0 -57
  10. {seekrai-0.5.16 → seekrai-0.5.17}/LICENSE +0 -0
  11. {seekrai-0.5.16 → seekrai-0.5.17}/README.md +0 -0
  12. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/__init__.py +0 -0
  13. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/abstract/__init__.py +0 -0
  14. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/abstract/api_requestor.py +0 -0
  15. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/abstract/response_parsing.py +0 -0
  16. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/constants.py +0 -0
  17. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/error.py +0 -0
  18. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/filemanager.py +0 -0
  19. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/__init__.py +0 -0
  20. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/agents/__init__.py +0 -0
  21. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/agents/agent_inference.py +0 -0
  22. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/agents/agent_observability.py +0 -0
  23. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/agents/agents.py +0 -0
  24. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/agents/python_functions.py +0 -0
  25. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/agents/threads.py +0 -0
  26. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/alignment.py +0 -0
  27. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/chat/__init__.py +0 -0
  28. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/chat/completions.py +0 -0
  29. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/completions.py +0 -0
  30. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/deployments.py +0 -0
  31. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/embeddings.py +0 -0
  32. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/files.py +0 -0
  33. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/finetune.py +0 -0
  34. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/images.py +0 -0
  35. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/models.py +0 -0
  36. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/projects.py +0 -0
  37. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/resources/resource_base.py +0 -0
  38. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/seekrflow_response.py +0 -0
  39. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/__init__.py +0 -0
  40. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/abstract.py +0 -0
  41. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/__init__.py +0 -0
  42. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/agent.py +0 -0
  43. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/observability.py +0 -0
  44. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/python_functions.py +0 -0
  45. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/runs.py +0 -0
  46. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/threads.py +0 -0
  47. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/__init__.py +0 -0
  48. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/env_model_config.py +0 -0
  49. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/__init__.py +0 -0
  50. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/file_search.py +0 -0
  51. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/file_search_env.py +0 -0
  52. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/run_python.py +0 -0
  53. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/run_python_env.py +0 -0
  54. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/web_search.py +0 -0
  55. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/schemas/web_search_env.py +0 -0
  56. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/tool.py +0 -0
  57. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/agents/tools/tool_types.py +0 -0
  58. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/alignment.py +0 -0
  59. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/chat_completions.py +0 -0
  60. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/common.py +0 -0
  61. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/completions.py +0 -0
  62. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/deployments.py +0 -0
  63. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/embeddings.py +0 -0
  64. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/error.py +0 -0
  65. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/files.py +0 -0
  66. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/finetune.py +0 -0
  67. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/images.py +0 -0
  68. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/ingestion.py +0 -0
  69. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/models.py +0 -0
  70. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/types/projects.py +0 -0
  71. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/utils/__init__.py +0 -0
  72. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/utils/_log.py +0 -0
  73. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/utils/api_helpers.py +0 -0
  74. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/utils/files.py +0 -0
  75. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/utils/tools.py +0 -0
  76. {seekrai-0.5.16 → seekrai-0.5.17}/src/seekrai/version.py +0 -0
@@ -1,7 +1,8 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.1
2
2
  Name: seekrai
3
- Version: 0.5.16
3
+ Version: 0.5.17
4
4
  Summary: Python client for SeekrAI
5
+ Home-page: https://gitlab.cb.ntent.com/ml/seekr-py
5
6
  License: Apache-2.0
6
7
  Author: SeekrFlow
7
8
  Author-email: support@seekr.com
@@ -12,8 +13,6 @@ Classifier: Programming Language :: Python :: 3
12
13
  Classifier: Programming Language :: Python :: 3.9
13
14
  Classifier: Programming Language :: Python :: 3.10
14
15
  Classifier: Programming Language :: Python :: 3.11
15
- Classifier: Programming Language :: Python :: 3.12
16
- Classifier: Programming Language :: Python :: 3.13
17
16
  Requires-Dist: click (>=8.1.7,<9.0.0)
18
17
  Requires-Dist: eval-type-backport (>=0.1.3,<0.3.0)
19
18
  Requires-Dist: filelock (>=3.13.1,<4.0.0)
@@ -14,7 +14,7 @@ build-backend = "poetry.core.masonry.api"
14
14
 
15
15
  [tool.poetry]
16
16
  name = "seekrai"
17
- version = "0.5.16"
17
+ version = "0.5.17"
18
18
  authors = [
19
19
  "SeekrFlow <support@seekr.com>"
20
20
  ]
@@ -25,6 +25,7 @@ class SeekrFlow:
25
25
  vector_database: resources.VectorDatabase
26
26
  agents: resources.Agents
27
27
  observability: resources.AgentObservability
28
+ explainability: resources.Explainability
28
29
 
29
30
  # client options
30
31
  client: SeekrFlowClient
@@ -91,6 +92,7 @@ class SeekrFlow:
91
92
  self.vector_database = resources.VectorDatabase(self.client)
92
93
  self.agents = resources.Agents(self.client)
93
94
  self.observability = resources.AgentObservability(self.client)
95
+ self.explainability = resources.Explainability(self.client)
94
96
 
95
97
 
96
98
  class AsyncSeekrFlow:
@@ -108,6 +110,7 @@ class AsyncSeekrFlow:
108
110
  vector_database: resources.AsyncVectorDatabase
109
111
  agents: resources.AsyncAgents
110
112
  observability: resources.AsyncAgentObservability
113
+ explainability: resources.AsyncExplainability
111
114
 
112
115
  # client options
113
116
  client: SeekrFlowClient
@@ -174,6 +177,7 @@ class AsyncSeekrFlow:
174
177
  self.vector_database = resources.AsyncVectorDatabase(self.client)
175
178
  self.agents = resources.AsyncAgents(self.client)
176
179
  self.observability = resources.AsyncAgentObservability(self.client)
180
+ self.explainability = resources.AsyncExplainability(self.client)
177
181
 
178
182
 
179
183
  Client = SeekrFlow
@@ -2,7 +2,6 @@ from typing import Optional
2
2
 
3
3
  from seekrai.abstract import api_requestor
4
4
  from seekrai.resources.resource_base import ResourceBase
5
- from seekrai.seekrflow_response import SeekrFlowResponse
6
5
  from seekrai.types import (
7
6
  SeekrFlowRequest,
8
7
  )
@@ -14,15 +13,19 @@ from seekrai.types.explainability import (
14
13
 
15
14
  class Explainability(ResourceBase):
16
15
  def get_influential_finetuning_data(
17
- self, model_id: str, question: str, answer: Optional[str], k: int = 5
16
+ self,
17
+ model_id: str,
18
+ question: str,
19
+ system_prompt: Optional[str] = None,
20
+ answer: Optional[str] = None,
18
21
  ) -> InfluentialFinetuningDataResponse:
19
22
  """
20
23
  Retrieve influential QA pair fine tuning data for a specific model.
21
24
  Args:
22
25
  - model_id (str): ID of the model to explain.
23
26
  - question (str): question from user,
27
+ - system_prompt (str | None): System prompt for the user's question.
24
28
  - answer (str | None): answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model specified by model_id,
25
- - k (int): the number of results to be retrieved (5 by default)
26
29
  Returns:
27
30
  InfluentialFinetuningDataResponse: Object containing the influential fine tuning data.
28
31
  """
@@ -31,7 +34,7 @@ class Explainability(ResourceBase):
31
34
  )
32
35
  # Create query parameters dictionary
33
36
  parameter_payload = InfluentialFinetuningDataRequest(
34
- question=question, answer=answer, k=k
37
+ question=question, system_prompt=system_prompt, answer=answer
35
38
  ).model_dump()
36
39
 
37
40
  # if limit is not None:
@@ -41,26 +44,29 @@ class Explainability(ResourceBase):
41
44
  response, _, _ = requestor.request(
42
45
  options=SeekrFlowRequest(
43
46
  method="GET",
44
- url=f"v1/flow/explain/models/{model_id}/influential-finetuning-data",
47
+ url=f"flow/explain/models/{model_id}/influential-finetuning-data",
45
48
  params=parameter_payload,
46
49
  ),
47
50
  stream=False,
48
51
  )
49
- assert isinstance(response, SeekrFlowResponse)
50
52
  return InfluentialFinetuningDataResponse(**response.data)
51
53
 
52
54
 
53
55
  class AsyncExplainability(ResourceBase):
54
56
  async def get_influential_finetuning_data(
55
- self, model_id: str, question: str, answer: Optional[str], k: int = 5
57
+ self,
58
+ model_id: str,
59
+ question: str,
60
+ system_prompt: Optional[str] = None,
61
+ answer: Optional[str] = None,
56
62
  ) -> InfluentialFinetuningDataResponse:
57
63
  """
58
64
  Retrieve influential QA pair finetuning data for a specific model asynchronously.
59
65
  Args:
60
66
  - model_id (str): ID of the model to explain.
61
67
  - question (str): question from user,
68
+ - system_prompt (str | None): System prompt for the user's question.
62
69
  - answer (str | None): answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model specified by model_id,
63
- - k (int): the number of results to be retrieved (5 by default),
64
70
  Returns:
65
71
  InfluentialFinetuningDataResponse: Object containing the influential finetuning data.
66
72
  """
@@ -69,16 +75,18 @@ class AsyncExplainability(ResourceBase):
69
75
  )
70
76
  # Create query parameters dictionary
71
77
  parameter_payload = InfluentialFinetuningDataRequest(
72
- model_id=model_id, question=question, answer=answer, k=k
78
+ model_id=model_id,
79
+ question=question,
80
+ system_prompt=system_prompt,
81
+ answer=answer,
73
82
  ).model_dump()
74
83
 
75
84
  response, _, _ = await requestor.arequest(
76
85
  options=SeekrFlowRequest(
77
86
  method="GET",
78
- url=f"v1/flow/explain/models/{model_id}/influential-finetuning-data",
87
+ url=f"flow/explain/models/{model_id}/influential-finetuning-data",
79
88
  params=parameter_payload,
80
89
  ),
81
90
  stream=False,
82
91
  )
83
- assert isinstance(response, SeekrFlowResponse)
84
92
  return InfluentialFinetuningDataResponse(**response.data)
@@ -1,4 +1,4 @@
1
- from typing import List
1
+ from typing import List, Optional
2
2
 
3
3
  from seekrai.abstract import api_requestor
4
4
  from seekrai.resources.resource_base import ResourceBase
@@ -13,6 +13,7 @@ class Ingestion(ResourceBase):
13
13
  def ingest(
14
14
  self,
15
15
  files: List[str],
16
+ method: Optional[str] = "accuracy-optimized",
16
17
  ) -> IngestionResponse:
17
18
  """
18
19
  Start an ingestion job for the specified files.
@@ -27,9 +28,7 @@ class Ingestion(ResourceBase):
27
28
  client=self._client,
28
29
  )
29
30
 
30
- parameter_payload = IngestionRequest(
31
- files=files,
32
- ).model_dump()
31
+ parameter_payload = IngestionRequest(files=files, method=method).model_dump()
33
32
 
34
33
  response, _, _ = requestor.request(
35
34
  options=SeekrFlowRequest(
@@ -95,6 +94,7 @@ class AsyncIngestion(ResourceBase):
95
94
  async def ingest(
96
95
  self,
97
96
  files: List[str],
97
+ method: Optional[str] = "accuracy-optimized",
98
98
  ) -> IngestionResponse:
99
99
  """
100
100
  Start an ingestion job for the specified files asynchronously.
@@ -109,9 +109,7 @@ class AsyncIngestion(ResourceBase):
109
109
  client=self._client,
110
110
  )
111
111
 
112
- parameter_payload = IngestionRequest(
113
- files=files,
114
- ).model_dump()
112
+ parameter_payload = IngestionRequest(files=files, method=method).model_dump()
115
113
 
116
114
  response, _, _ = await requestor.arequest(
117
115
  options=SeekrFlowRequest(
@@ -107,7 +107,8 @@ class VectorDatabase(ResourceBase):
107
107
  self,
108
108
  database_id: str,
109
109
  files: List[str],
110
- method: str,
110
+ method: Optional[str] = "accuracy-optimized",
111
+ chunking_method: Optional[str] = "markdown",
111
112
  token_count: int = 800,
112
113
  overlap_tokens: int = 100,
113
114
  ) -> VectorDatabaseIngestionResponse:
@@ -129,6 +130,7 @@ class VectorDatabase(ResourceBase):
129
130
  parameter_payload = VectorDatabaseIngestionRequest(
130
131
  file_ids=files,
131
132
  method=method,
133
+ chunking_method=chunking_method,
132
134
  token_count=token_count,
133
135
  overlap_tokens=overlap_tokens,
134
136
  ).model_dump()
@@ -338,7 +340,8 @@ class AsyncVectorDatabase(ResourceBase):
338
340
  self,
339
341
  database_id: str,
340
342
  files: List[str],
341
- method: str,
343
+ method: Optional[str] = "accuracy-optimized",
344
+ chunking_method: Optional[str] = "markdown",
342
345
  token_count: int = 800,
343
346
  overlap_tokens: int = 100,
344
347
  ) -> VectorDatabaseIngestionResponse:
@@ -360,6 +363,7 @@ class AsyncVectorDatabase(ResourceBase):
360
363
  parameter_payload = VectorDatabaseIngestionRequest(
361
364
  file_ids=files,
362
365
  method=method,
366
+ chunking_method=chunking_method,
363
367
  token_count=token_count,
364
368
  overlap_tokens=overlap_tokens,
365
369
  ).model_dump()
@@ -0,0 +1,26 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from pydantic import Field
6
+
7
+ from seekrai.types.abstract import BaseModel
8
+
9
+
10
+ class InfluentialFinetuningDataResponse(BaseModel):
11
+ results: List[Dict[str, Any]] = Field(
12
+ ..., description="List of influential training data results"
13
+ )
14
+ version: str = Field(..., description="Version of the explainability service")
15
+
16
+
17
+ class InfluentialFinetuningDataRequest(BaseModel):
18
+ question: str = Field(..., description="Question from user")
19
+ system_prompt: Optional[str] = Field(
20
+ None,
21
+ description="System prompt for the user's question.",
22
+ )
23
+ answer: Optional[str] = Field(
24
+ None,
25
+ description="Answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model",
26
+ )
@@ -37,7 +37,12 @@ class VectorDatabaseIngestionRequest(BaseModel):
37
37
  """Request model for creating a new vector database ingestion job."""
38
38
 
39
39
  file_ids: List[str] = Field(..., description="List of file IDs to ingest")
40
- method: str = Field(..., description="Method to use for ingestion")
40
+ method: Optional[str] = Field(
41
+ default="accuracy-optimized", description="Method to use for ingestion"
42
+ )
43
+ chunking_method: Optional[str] = Field(
44
+ default="markdown", description="Configure how your content will be segmented"
45
+ )
41
46
  token_count: int = Field(default=800, description="Token count for ingestion")
42
47
  overlap_tokens: int = Field(default=100, description="Overlap tokens for ingestion")
43
48
 
@@ -1,57 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from datetime import datetime
4
- from enum import Enum
5
- from typing import Any, Dict, List, Literal
6
-
7
- from pydantic import Field
8
-
9
- from seekrai.types.abstract import BaseModel
10
-
11
-
12
- class InfluentialFinetuningDataResponse(BaseModel):
13
- results: List[Dict[str, Any]]
14
- version: str
15
-
16
-
17
- class InfluentialFinetuningDataRequest(BaseModel):
18
- question: str
19
- answer: str = Field(
20
- default="",
21
- description="Response could be generated or given",
22
- )
23
- k: int
24
-
25
-
26
- class ExplainabilityJobStatus(Enum):
27
- QUEUED = "queued"
28
- RUNNING = "running"
29
- COMPLETED = "completed"
30
- FAILED = "failed"
31
-
32
- # TODO should titles along the following get added:
33
- # create_index
34
- # populate_index
35
- # delete_index
36
- # influential-finetuning-data
37
-
38
-
39
- class ExplainabilityRequest(BaseModel):
40
- files: List[str] = Field(
41
- default=..., description="List of file ids to use for fine tuning"
42
- )
43
- method: str = Field(default="best", description="Method to use for explainability")
44
-
45
-
46
- class ExplainabilityResponse(BaseModel):
47
- id: str = Field(default=..., description="Explainability job ID")
48
- created_at: datetime
49
- status: ExplainabilityJobStatus
50
- output_files: List[str]
51
-
52
-
53
- class ExplainabilityList(BaseModel):
54
- # object type
55
- object: Literal["list"] | None = None
56
- # list of fine-tune job objects
57
- data: List[ExplainabilityResponse] | None = None
File without changes
File without changes
File without changes