noesium 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/core/__init__.py +4 -0
- noesium/core/agent/__init__.py +14 -0
- noesium/core/agent/base.py +227 -0
- noesium/core/consts.py +6 -0
- noesium/core/goalith/conflict/conflict.py +104 -0
- noesium/core/goalith/conflict/detector.py +53 -0
- noesium/core/goalith/decomposer/__init__.py +6 -0
- noesium/core/goalith/decomposer/base.py +46 -0
- noesium/core/goalith/decomposer/callable_decomposer.py +65 -0
- noesium/core/goalith/decomposer/llm_decomposer.py +326 -0
- noesium/core/goalith/decomposer/prompts.py +140 -0
- noesium/core/goalith/decomposer/simple_decomposer.py +61 -0
- noesium/core/goalith/errors.py +22 -0
- noesium/core/goalith/goalgraph/graph.py +526 -0
- noesium/core/goalith/goalgraph/node.py +179 -0
- noesium/core/goalith/replanner/base.py +31 -0
- noesium/core/goalith/replanner/replanner.py +36 -0
- noesium/core/goalith/service.py +26 -0
- noesium/core/llm/__init__.py +154 -0
- noesium/core/llm/base.py +152 -0
- noesium/core/llm/litellm.py +528 -0
- noesium/core/llm/llamacpp.py +487 -0
- noesium/core/llm/message.py +184 -0
- noesium/core/llm/ollama.py +459 -0
- noesium/core/llm/openai.py +520 -0
- noesium/core/llm/openrouter.py +89 -0
- noesium/core/llm/prompt.py +551 -0
- noesium/core/memory/__init__.py +11 -0
- noesium/core/memory/base.py +464 -0
- noesium/core/memory/memu/__init__.py +24 -0
- noesium/core/memory/memu/config/__init__.py +26 -0
- noesium/core/memory/memu/config/activity/config.py +46 -0
- noesium/core/memory/memu/config/event/config.py +46 -0
- noesium/core/memory/memu/config/markdown_config.py +241 -0
- noesium/core/memory/memu/config/profile/config.py +48 -0
- noesium/core/memory/memu/llm_adapter.py +129 -0
- noesium/core/memory/memu/memory/__init__.py +31 -0
- noesium/core/memory/memu/memory/actions/__init__.py +40 -0
- noesium/core/memory/memu/memory/actions/add_activity_memory.py +299 -0
- noesium/core/memory/memu/memory/actions/base_action.py +342 -0
- noesium/core/memory/memu/memory/actions/cluster_memories.py +262 -0
- noesium/core/memory/memu/memory/actions/generate_suggestions.py +198 -0
- noesium/core/memory/memu/memory/actions/get_available_categories.py +66 -0
- noesium/core/memory/memu/memory/actions/link_related_memories.py +515 -0
- noesium/core/memory/memu/memory/actions/run_theory_of_mind.py +254 -0
- noesium/core/memory/memu/memory/actions/update_memory_with_suggestions.py +514 -0
- noesium/core/memory/memu/memory/embeddings.py +130 -0
- noesium/core/memory/memu/memory/file_manager.py +306 -0
- noesium/core/memory/memu/memory/memory_agent.py +578 -0
- noesium/core/memory/memu/memory/recall_agent.py +376 -0
- noesium/core/memory/memu/memory_store.py +628 -0
- noesium/core/memory/models.py +149 -0
- noesium/core/msgbus/__init__.py +12 -0
- noesium/core/msgbus/base.py +395 -0
- noesium/core/orchestrix/__init__.py +0 -0
- noesium/core/py.typed +0 -0
- noesium/core/routing/__init__.py +20 -0
- noesium/core/routing/base.py +66 -0
- noesium/core/routing/router.py +241 -0
- noesium/core/routing/strategies/__init__.py +9 -0
- noesium/core/routing/strategies/dynamic_complexity.py +361 -0
- noesium/core/routing/strategies/self_assessment.py +147 -0
- noesium/core/routing/types.py +38 -0
- noesium/core/toolify/__init__.py +39 -0
- noesium/core/toolify/base.py +360 -0
- noesium/core/toolify/config.py +138 -0
- noesium/core/toolify/mcp_integration.py +275 -0
- noesium/core/toolify/registry.py +214 -0
- noesium/core/toolify/toolkits/__init__.py +1 -0
- noesium/core/tracing/__init__.py +37 -0
- noesium/core/tracing/langgraph_hooks.py +308 -0
- noesium/core/tracing/opik_tracing.py +144 -0
- noesium/core/tracing/token_tracker.py +166 -0
- noesium/core/utils/__init__.py +10 -0
- noesium/core/utils/logging.py +172 -0
- noesium/core/utils/statistics.py +12 -0
- noesium/core/utils/typing.py +17 -0
- noesium/core/vector_store/__init__.py +79 -0
- noesium/core/vector_store/base.py +94 -0
- noesium/core/vector_store/pgvector.py +304 -0
- noesium/core/vector_store/weaviate.py +383 -0
- noesium-0.1.0.dist-info/METADATA +525 -0
- noesium-0.1.0.dist-info/RECORD +86 -0
- noesium-0.1.0.dist-info/WHEEL +5 -0
- noesium-0.1.0.dist-info/licenses/LICENSE +21 -0
- noesium-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
from noesium.core.goalith.decomposer.base import GoalDecomposer
|
|
4
|
+
from noesium.core.goalith.goalgraph.graph import GoalGraph
|
|
5
|
+
from noesium.core.goalith.goalgraph.node import GoalNode
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class BaseReplanner(ABC):
|
|
9
|
+
"""
|
|
10
|
+
Base class for replanners.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, graph: GoalGraph, decomposer: GoalDecomposer):
|
|
14
|
+
self._goal_graph = graph
|
|
15
|
+
self._decomposer = decomposer
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def goal_graph(self):
|
|
19
|
+
"""Get the graph."""
|
|
20
|
+
return self._goal_graph
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def decomposer(self):
|
|
24
|
+
"""Get the decomposer."""
|
|
25
|
+
return self._decomposer
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
def replan(self, node: GoalNode, **kwargs) -> bool:
|
|
29
|
+
"""
|
|
30
|
+
Replan a node in the graph.
|
|
31
|
+
"""
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from noesium.core.goalith.decomposer.base import GoalDecomposer
|
|
2
|
+
from noesium.core.goalith.goalgraph.graph import GoalGraph
|
|
3
|
+
from noesium.core.goalith.goalgraph.node import GoalNode
|
|
4
|
+
|
|
5
|
+
from .base import BaseReplanner
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Replanner(BaseReplanner):
|
|
9
|
+
"""
|
|
10
|
+
Handles replanning operations when triggers fire.
|
|
11
|
+
|
|
12
|
+
Hooks into decomposition and scheduling modules to adjust plans.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, graph: GoalGraph, decomposer: GoalDecomposer, **kwargs):
|
|
16
|
+
"""
|
|
17
|
+
Initialize replanner.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
graph_store: The graph store
|
|
21
|
+
decomposer: The decomposer to use
|
|
22
|
+
"""
|
|
23
|
+
super().__init__(graph, decomposer, **kwargs)
|
|
24
|
+
|
|
25
|
+
def replan(self, node: GoalNode, **kwargs) -> bool:
|
|
26
|
+
"""
|
|
27
|
+
Replan a node in the graph store.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
node: The node to replan
|
|
31
|
+
**kwargs: Additional arguments
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
True if replanning was successful, False otherwise
|
|
35
|
+
"""
|
|
36
|
+
raise NotImplementedError("Replanner is an abstract class and cannot be instantiated directly.")
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class GoalithService:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
pass
|
|
7
|
+
|
|
8
|
+
def create_goal(
|
|
9
|
+
self,
|
|
10
|
+
description: str,
|
|
11
|
+
priority: float = 1.0,
|
|
12
|
+
context: Optional[Dict[str, Any]] = None,
|
|
13
|
+
tags: Optional[List[str]] = None,
|
|
14
|
+
) -> str:
|
|
15
|
+
"""
|
|
16
|
+
Create a new goal in the system.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
description: Description of the goal
|
|
20
|
+
priority: Priority level
|
|
21
|
+
context: Additional context data
|
|
22
|
+
tags: Tags for categorization
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
ID of the created goal
|
|
26
|
+
"""
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from .base import BaseLLMClient
|
|
5
|
+
|
|
6
|
+
# Deprecated imports
|
|
7
|
+
from .litellm import LLMClient as LitellmClient
|
|
8
|
+
from .litellm import LLMClient as LitellmLLMClient
|
|
9
|
+
from .openai import LLMClient as OpenAIClient
|
|
10
|
+
from .openai import LLMClient as OpenAILLMClient
|
|
11
|
+
from .openrouter import LLMClient as OpenRouterClient
|
|
12
|
+
from .openrouter import LLMClient as OpenRouterLLMClient
|
|
13
|
+
|
|
14
|
+
# Optional import - llamacpp might not be available
|
|
15
|
+
try:
|
|
16
|
+
from .llamacpp import LLMClient as LlamaCppClient
|
|
17
|
+
from .llamacpp import LLMClient as LlamaCppLLMClient
|
|
18
|
+
|
|
19
|
+
LLAMACPP_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
LlamaCppLLMClient = None
|
|
22
|
+
LLAMACPP_AVAILABLE = False
|
|
23
|
+
|
|
24
|
+
# Optional import - ollama might not be available
|
|
25
|
+
try:
|
|
26
|
+
from .ollama import LLMClient as OllamaClient
|
|
27
|
+
from .ollama import LLMClient as OllamaLLMClient
|
|
28
|
+
|
|
29
|
+
OLLAMA_AVAILABLE = True
|
|
30
|
+
except ImportError:
|
|
31
|
+
OllamaLLMClient = None
|
|
32
|
+
OLLAMA_AVAILABLE = False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
__all__ = [
|
|
36
|
+
"get_llm_client",
|
|
37
|
+
"BaseLLMClient",
|
|
38
|
+
# -- Deprecated imports --
|
|
39
|
+
"LitellmLLMClient",
|
|
40
|
+
"LlamaCppLLMClient",
|
|
41
|
+
"OpenRouterLLMClient",
|
|
42
|
+
"OllamaLLMClient",
|
|
43
|
+
"OpenAILLMClient",
|
|
44
|
+
# -- New imports --
|
|
45
|
+
"LitellmClient",
|
|
46
|
+
"LlamaCppClient",
|
|
47
|
+
"OllamaClient",
|
|
48
|
+
"OpenAIClient",
|
|
49
|
+
"OpenRouterClient",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
#############################
|
|
53
|
+
# Common LLM helper functions
|
|
54
|
+
#############################
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_llm_client(
|
|
58
|
+
provider: str = os.getenv("COGENTS_LLM_PROVIDER", "openai"),
|
|
59
|
+
base_url: Optional[str] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
structured_output: bool = True,
|
|
62
|
+
instructor: Optional[bool] = None,
|
|
63
|
+
chat_model: Optional[str] = None,
|
|
64
|
+
vision_model: Optional[str] = None,
|
|
65
|
+
embed_model: Optional[str] = None,
|
|
66
|
+
**kwargs,
|
|
67
|
+
):
|
|
68
|
+
"""
|
|
69
|
+
Get an LLM client instance based on the specified provider.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
provider: LLM provider to use ("openrouter", "openai", "litellm" always available; "ollama", "llamacpp" require optional dependencies)
|
|
73
|
+
base_url: Base URL for API (used by openai and ollama providers)
|
|
74
|
+
api_key: API key for authentication (used by openai and openrouter providers)
|
|
75
|
+
structured_output: Whether to enable structured output (default: True)
|
|
76
|
+
instructor: Whether to enable instructor for structured output (deprecated, use structured_output instead)
|
|
77
|
+
chat_model: Model to use for chat completions
|
|
78
|
+
vision_model: Model to use for vision tasks
|
|
79
|
+
embed_model: Model to use for embeddings
|
|
80
|
+
**kwargs: Additional provider-specific arguments:
|
|
81
|
+
- llamacpp: model_path, n_ctx, n_gpu_layers, etc.
|
|
82
|
+
- others: depends on provider
|
|
83
|
+
Note: 'instructor' in kwargs will be removed to avoid conflicts
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
LLMClient instance for the specified provider
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
ValueError: If provider is not supported or dependencies are missing
|
|
90
|
+
"""
|
|
91
|
+
# Handle backward compatibility: enable instructor if either parameter is True
|
|
92
|
+
# If instructor is explicitly provided (True/False), use it OR structured_output
|
|
93
|
+
# If instructor is None, use structured_output
|
|
94
|
+
enable_instructor = (instructor if instructor is not None else False) or structured_output
|
|
95
|
+
|
|
96
|
+
# Remove 'instructor' from kwargs to avoid duplicate argument errors
|
|
97
|
+
kwargs.pop("instructor", None)
|
|
98
|
+
|
|
99
|
+
if provider == "openrouter":
|
|
100
|
+
return OpenRouterLLMClient(
|
|
101
|
+
base_url=base_url,
|
|
102
|
+
api_key=api_key,
|
|
103
|
+
instructor=enable_instructor,
|
|
104
|
+
chat_model=chat_model,
|
|
105
|
+
vision_model=vision_model,
|
|
106
|
+
embed_model=embed_model,
|
|
107
|
+
**kwargs,
|
|
108
|
+
)
|
|
109
|
+
elif provider == "openai":
|
|
110
|
+
return OpenAILLMClient(
|
|
111
|
+
base_url=base_url,
|
|
112
|
+
api_key=api_key,
|
|
113
|
+
instructor=enable_instructor,
|
|
114
|
+
chat_model=chat_model,
|
|
115
|
+
vision_model=vision_model,
|
|
116
|
+
embed_model=embed_model,
|
|
117
|
+
**kwargs,
|
|
118
|
+
)
|
|
119
|
+
elif provider == "ollama":
|
|
120
|
+
if not OLLAMA_AVAILABLE:
|
|
121
|
+
raise ValueError("ollama provider is not available. Please install the required dependencies.")
|
|
122
|
+
return OllamaLLMClient(
|
|
123
|
+
base_url=base_url,
|
|
124
|
+
api_key=api_key,
|
|
125
|
+
instructor=enable_instructor,
|
|
126
|
+
chat_model=chat_model,
|
|
127
|
+
vision_model=vision_model,
|
|
128
|
+
embed_model=embed_model,
|
|
129
|
+
**kwargs,
|
|
130
|
+
)
|
|
131
|
+
elif provider == "llamacpp":
|
|
132
|
+
if not LLAMACPP_AVAILABLE:
|
|
133
|
+
raise ValueError("llamacpp provider is not available. Please install the required dependencies.")
|
|
134
|
+
return LlamaCppLLMClient(
|
|
135
|
+
instructor=enable_instructor,
|
|
136
|
+
chat_model=chat_model,
|
|
137
|
+
vision_model=vision_model,
|
|
138
|
+
embed_model=embed_model,
|
|
139
|
+
**kwargs,
|
|
140
|
+
)
|
|
141
|
+
elif provider == "litellm":
|
|
142
|
+
return LitellmLLMClient(
|
|
143
|
+
base_url=base_url,
|
|
144
|
+
api_key=api_key,
|
|
145
|
+
instructor=enable_instructor,
|
|
146
|
+
chat_model=chat_model,
|
|
147
|
+
vision_model=vision_model,
|
|
148
|
+
embed_model=embed_model,
|
|
149
|
+
**kwargs,
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
raise ValueError(
|
|
153
|
+
f"Unsupported provider: {provider}. Supported providers: openrouter, openai, ollama, llamacpp, litellm"
|
|
154
|
+
)
|
noesium/core/llm/base.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
|
|
5
|
+
|
|
6
|
+
from noesium.core.consts import DEFAULT_EMBEDDING_DIMS
|
|
7
|
+
|
|
8
|
+
T = TypeVar("T")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseLLMClient(ABC):
|
|
12
|
+
"""Client for interacting with LLMs via OpenRouter using OpenAI SDK."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, **kwargs):
|
|
15
|
+
"""
|
|
16
|
+
Initialize the LLM client.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
**kwargs: Additional arguments to pass to the LLM client
|
|
20
|
+
"""
|
|
21
|
+
self.debug = os.getenv("COGENTS_DEBUG", "false").lower() == "true"
|
|
22
|
+
|
|
23
|
+
@abstractmethod
|
|
24
|
+
def completion(
|
|
25
|
+
self,
|
|
26
|
+
messages: List[Dict[str, str]],
|
|
27
|
+
temperature: float = 0.7,
|
|
28
|
+
max_tokens: Optional[int] = None,
|
|
29
|
+
stream: bool = False,
|
|
30
|
+
**kwargs,
|
|
31
|
+
) -> Union[str, Dict[str, Any]]:
|
|
32
|
+
"""
|
|
33
|
+
Generate chat completion using the configured model.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
messages: List of message dictionaries with 'role' and 'content' keys
|
|
37
|
+
temperature: Sampling temperature (0.0 to 2.0)
|
|
38
|
+
max_tokens: Maximum tokens to generate
|
|
39
|
+
stream: Whether to stream the response
|
|
40
|
+
**kwargs: Additional arguments to pass to OpenAI API
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Generated text response or streaming response
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
@abstractmethod
|
|
47
|
+
def structured_completion(
|
|
48
|
+
self,
|
|
49
|
+
messages: List[Dict[str, str]],
|
|
50
|
+
response_model: Type[T],
|
|
51
|
+
temperature: float = 0.7,
|
|
52
|
+
max_tokens: Optional[int] = None,
|
|
53
|
+
attempts: int = 2,
|
|
54
|
+
backoff: float = 0.5,
|
|
55
|
+
**kwargs,
|
|
56
|
+
) -> T:
|
|
57
|
+
"""
|
|
58
|
+
Generate structured completion using instructor.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
messages: List of message dictionaries with 'role' and 'content' keys
|
|
62
|
+
response_model: Pydantic model class for structured output
|
|
63
|
+
temperature: Sampling temperature (0.0 to 2.0)
|
|
64
|
+
max_tokens: Maximum tokens to generate
|
|
65
|
+
attempts: Number of attempts to make
|
|
66
|
+
backoff: Backoff factor for exponential backoff
|
|
67
|
+
**kwargs: Additional arguments to pass to instructor
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Structured response as the specified model type
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def understand_image(
|
|
75
|
+
self,
|
|
76
|
+
image_path: Union[str, Path],
|
|
77
|
+
prompt: str,
|
|
78
|
+
temperature: float = 0.7,
|
|
79
|
+
max_tokens: Optional[int] = None,
|
|
80
|
+
**kwargs,
|
|
81
|
+
) -> str:
|
|
82
|
+
"""
|
|
83
|
+
Analyze an image using the configured vision model.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
image_path: Path to the image file
|
|
87
|
+
prompt: Text prompt describing what to analyze in the image
|
|
88
|
+
temperature: Sampling temperature
|
|
89
|
+
max_tokens: Maximum tokens to generate
|
|
90
|
+
**kwargs: Additional arguments
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
Analysis of the image
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
@abstractmethod
|
|
97
|
+
def understand_image_from_url(
|
|
98
|
+
self,
|
|
99
|
+
image_url: str,
|
|
100
|
+
prompt: str,
|
|
101
|
+
temperature: float = 0.7,
|
|
102
|
+
max_tokens: Optional[int] = None,
|
|
103
|
+
**kwargs,
|
|
104
|
+
) -> str:
|
|
105
|
+
"""
|
|
106
|
+
Analyze an image from URL using the configured vision model.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
image_url: URL of the image
|
|
110
|
+
prompt: Text prompt describing what to analyze in the image
|
|
111
|
+
temperature: Sampling temperature
|
|
112
|
+
max_tokens: Maximum tokens to generate
|
|
113
|
+
**kwargs: Additional arguments
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Analysis of the image
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
@abstractmethod
|
|
120
|
+
def embed(self, text: str) -> List[float]:
|
|
121
|
+
"""Generate embeddings for input text"""
|
|
122
|
+
|
|
123
|
+
@abstractmethod
|
|
124
|
+
def embed_batch(self, chunks: List[str]) -> List[List[float]]:
|
|
125
|
+
"""Generate embeddings for input text"""
|
|
126
|
+
|
|
127
|
+
@abstractmethod
|
|
128
|
+
def rerank(
|
|
129
|
+
self,
|
|
130
|
+
query: str,
|
|
131
|
+
chunks: List[str],
|
|
132
|
+
) -> List[Tuple[float, int, str]]:
|
|
133
|
+
"""
|
|
134
|
+
Rerank chunks based on their relevance to the query.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
query: The query to rank against
|
|
138
|
+
chunks: List of text chunks to rerank
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
List of tuples (similarity_score, original_index, chunk_text)
|
|
142
|
+
sorted by similarity score in descending order
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
def get_embedding_dimensions(self) -> int:
|
|
146
|
+
"""
|
|
147
|
+
Get the expected dimensions for embeddings from this provider.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
int: Expected embedding dimensions
|
|
151
|
+
"""
|
|
152
|
+
return int(os.getenv("COGENTS_EMBEDDING_DIMS", str(DEFAULT_EMBEDDING_DIMS)))
|