causaliq-knowledge 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. causaliq_knowledge/__init__.py +6 -3
  2. causaliq_knowledge/action.py +480 -0
  3. causaliq_knowledge/cache/__init__.py +18 -0
  4. causaliq_knowledge/cache/encoders/__init__.py +13 -0
  5. causaliq_knowledge/cache/encoders/base.py +90 -0
  6. causaliq_knowledge/cache/encoders/json_encoder.py +430 -0
  7. causaliq_knowledge/cache/token_cache.py +666 -0
  8. causaliq_knowledge/cli/__init__.py +15 -0
  9. causaliq_knowledge/cli/cache.py +478 -0
  10. causaliq_knowledge/cli/generate.py +410 -0
  11. causaliq_knowledge/cli/main.py +172 -0
  12. causaliq_knowledge/cli/models.py +309 -0
  13. causaliq_knowledge/graph/__init__.py +78 -0
  14. causaliq_knowledge/graph/generator.py +457 -0
  15. causaliq_knowledge/graph/loader.py +222 -0
  16. causaliq_knowledge/graph/models.py +426 -0
  17. causaliq_knowledge/graph/params.py +175 -0
  18. causaliq_knowledge/graph/prompts.py +445 -0
  19. causaliq_knowledge/graph/response.py +392 -0
  20. causaliq_knowledge/graph/view_filter.py +154 -0
  21. causaliq_knowledge/llm/base_client.py +147 -1
  22. causaliq_knowledge/llm/cache.py +443 -0
  23. causaliq_knowledge/py.typed +0 -0
  24. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/METADATA +10 -6
  25. causaliq_knowledge-0.4.0.dist-info/RECORD +42 -0
  26. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/WHEEL +1 -1
  27. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/entry_points.txt +3 -0
  28. causaliq_knowledge/cli.py +0 -414
  29. causaliq_knowledge-0.2.0.dist-info/RECORD +0 -22
  30. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/licenses/LICENSE +0 -0
  31. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,309 @@
1
+ """Model listing CLI command.
2
+
3
+ This module provides the command for listing available LLM models
4
+ from each provider.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import sys
10
+ from typing import Callable, List, Optional, Tuple, TypedDict
11
+
12
+ import click
13
+
14
+
15
+ class ProviderInfo(TypedDict):
16
+ """Type definition for provider information."""
17
+
18
+ name: str
19
+ prefix: str
20
+ env_var: Optional[str]
21
+ url: str
22
+ get_models: Callable[[], Tuple[bool, List[str], Optional[str]]]
23
+
24
+
25
+ def get_groq_models() -> Tuple[bool, List[str], Optional[str]]:
26
+ """Get available Groq models.
27
+
28
+ Returns:
29
+ Tuple of (available, models, error_message).
30
+ """
31
+ from causaliq_knowledge.llm import GroqClient, GroqConfig
32
+
33
+ try:
34
+ client = GroqClient(GroqConfig())
35
+ if not client.is_available():
36
+ return False, [], "GROQ_API_KEY not set"
37
+ models = [f"groq/{m}" for m in client.list_models()]
38
+ return True, models, None
39
+ except ValueError as e:
40
+ return False, [], str(e)
41
+
42
+
43
+ def get_anthropic_models() -> Tuple[bool, List[str], Optional[str]]:
44
+ """Get available Anthropic models.
45
+
46
+ Returns:
47
+ Tuple of (available, models, error_message).
48
+ """
49
+ from causaliq_knowledge.llm import AnthropicClient, AnthropicConfig
50
+
51
+ try:
52
+ client = AnthropicClient(AnthropicConfig())
53
+ if not client.is_available():
54
+ return False, [], "ANTHROPIC_API_KEY not set"
55
+ models = [f"anthropic/{m}" for m in client.list_models()]
56
+ return True, models, None
57
+ except ValueError as e:
58
+ return False, [], str(e)
59
+
60
+
61
+ def get_gemini_models() -> Tuple[bool, List[str], Optional[str]]:
62
+ """Get available Gemini models.
63
+
64
+ Returns:
65
+ Tuple of (available, models, error_message).
66
+ """
67
+ from causaliq_knowledge.llm import GeminiClient, GeminiConfig
68
+
69
+ try:
70
+ client = GeminiClient(GeminiConfig())
71
+ if not client.is_available():
72
+ return False, [], "GEMINI_API_KEY not set"
73
+ models = [f"gemini/{m}" for m in client.list_models()]
74
+ return True, models, None
75
+ except ValueError as e:
76
+ return False, [], str(e)
77
+
78
+
79
+ def get_ollama_models() -> Tuple[bool, List[str], Optional[str]]:
80
+ """Get available Ollama models.
81
+
82
+ Returns:
83
+ Tuple of (available, models, error_message).
84
+ """
85
+ from causaliq_knowledge.llm import OllamaClient, OllamaConfig
86
+
87
+ try:
88
+ client = OllamaClient(OllamaConfig())
89
+ models = [f"ollama/{m}" for m in client.list_models()]
90
+ if not models:
91
+ msg = "No models installed. Run: ollama pull <model>"
92
+ return True, [], msg
93
+ return True, models, None
94
+ except ValueError as e:
95
+ return False, [], str(e)
96
+
97
+
98
+ def get_openai_models() -> Tuple[bool, List[str], Optional[str]]:
99
+ """Get available OpenAI models.
100
+
101
+ Returns:
102
+ Tuple of (available, models, error_message).
103
+ """
104
+ from causaliq_knowledge.llm import OpenAIClient, OpenAIConfig
105
+
106
+ try:
107
+ client = OpenAIClient(OpenAIConfig())
108
+ if not client.is_available():
109
+ return False, [], "OPENAI_API_KEY not set"
110
+ models = [f"openai/{m}" for m in client.list_models()]
111
+ return True, models, None
112
+ except ValueError as e:
113
+ return False, [], str(e)
114
+
115
+
116
+ def get_deepseek_models() -> Tuple[bool, List[str], Optional[str]]:
117
+ """Get available DeepSeek models.
118
+
119
+ Returns:
120
+ Tuple of (available, models, error_message).
121
+ """
122
+ from causaliq_knowledge.llm import DeepSeekClient, DeepSeekConfig
123
+
124
+ try:
125
+ client = DeepSeekClient(DeepSeekConfig())
126
+ if not client.is_available():
127
+ return False, [], "DEEPSEEK_API_KEY not set"
128
+ models = [f"deepseek/{m}" for m in client.list_models()]
129
+ return True, models, None
130
+ except ValueError as e:
131
+ return False, [], str(e)
132
+
133
+
134
+ def get_mistral_models() -> Tuple[bool, List[str], Optional[str]]:
135
+ """Get available Mistral models.
136
+
137
+ Returns:
138
+ Tuple of (available, models, error_message).
139
+ """
140
+ from causaliq_knowledge.llm import MistralClient, MistralConfig
141
+
142
+ try:
143
+ client = MistralClient(MistralConfig())
144
+ if not client.is_available():
145
+ return False, [], "MISTRAL_API_KEY not set"
146
+ models = [f"mistral/{m}" for m in client.list_models()]
147
+ return True, models, None
148
+ except ValueError as e:
149
+ return False, [], str(e)
150
+
151
+
152
+ def get_all_providers() -> List[ProviderInfo]:
153
+ """Get list of all provider configurations.
154
+
155
+ Returns:
156
+ List of ProviderInfo dictionaries.
157
+ """
158
+ return [
159
+ {
160
+ "name": "Groq",
161
+ "prefix": "groq/",
162
+ "env_var": "GROQ_API_KEY",
163
+ "url": "https://console.groq.com",
164
+ "get_models": get_groq_models,
165
+ },
166
+ {
167
+ "name": "Anthropic",
168
+ "prefix": "anthropic/",
169
+ "env_var": "ANTHROPIC_API_KEY",
170
+ "url": "https://console.anthropic.com",
171
+ "get_models": get_anthropic_models,
172
+ },
173
+ {
174
+ "name": "Gemini",
175
+ "prefix": "gemini/",
176
+ "env_var": "GEMINI_API_KEY",
177
+ "url": "https://aistudio.google.com",
178
+ "get_models": get_gemini_models,
179
+ },
180
+ {
181
+ "name": "Ollama (Local)",
182
+ "prefix": "ollama/",
183
+ "env_var": None,
184
+ "url": "https://ollama.ai",
185
+ "get_models": get_ollama_models,
186
+ },
187
+ {
188
+ "name": "OpenAI",
189
+ "prefix": "openai/",
190
+ "env_var": "OPENAI_API_KEY",
191
+ "url": "https://platform.openai.com",
192
+ "get_models": get_openai_models,
193
+ },
194
+ {
195
+ "name": "DeepSeek",
196
+ "prefix": "deepseek/",
197
+ "env_var": "DEEPSEEK_API_KEY",
198
+ "url": "https://platform.deepseek.com",
199
+ "get_models": get_deepseek_models,
200
+ },
201
+ {
202
+ "name": "Mistral",
203
+ "prefix": "mistral/",
204
+ "env_var": "MISTRAL_API_KEY",
205
+ "url": "https://console.mistral.ai",
206
+ "get_models": get_mistral_models,
207
+ },
208
+ ]
209
+
210
+
211
+ VALID_PROVIDER_NAMES = [
212
+ "groq",
213
+ "anthropic",
214
+ "gemini",
215
+ "ollama",
216
+ "openai",
217
+ "deepseek",
218
+ "mistral",
219
+ ]
220
+
221
+
222
+ @click.command("models")
223
+ @click.argument("provider", required=False, default=None)
224
+ def list_models(provider: Optional[str]) -> None:
225
+ """List available LLM models from each provider.
226
+
227
+ Queries each provider's API to show models accessible with your
228
+ current configuration. Results are filtered by your API key's
229
+ access level or locally installed models.
230
+
231
+ Optionally specify PROVIDER to list models from a single provider:
232
+ groq, anthropic, gemini, ollama, openai, deepseek, or mistral.
233
+
234
+ Examples:
235
+
236
+ cqknow models # List all providers
237
+
238
+ cqknow models groq # List only Groq models
239
+
240
+ cqknow models mistral # List only Mistral models
241
+ """
242
+ providers = get_all_providers()
243
+
244
+ # Filter providers if a specific one is requested
245
+ if provider:
246
+ provider_lower = provider.lower()
247
+ if provider_lower not in VALID_PROVIDER_NAMES:
248
+ click.echo(
249
+ f"Unknown provider: {provider}. "
250
+ f"Valid options: {', '.join(VALID_PROVIDER_NAMES)}",
251
+ err=True,
252
+ )
253
+ sys.exit(1)
254
+ providers = [
255
+ p for p in providers if p["prefix"].rstrip("/") == provider_lower
256
+ ]
257
+
258
+ click.echo("\nAvailable LLM Models:\n")
259
+
260
+ any_available = False
261
+ for prov in providers:
262
+ available, models, error = prov["get_models"]()
263
+
264
+ if available and models:
265
+ any_available = True
266
+ status = click.style("[OK]", fg="green")
267
+ count = len(models)
268
+ click.echo(f" {status} {prov['name']} ({count} models):")
269
+ for m in models:
270
+ click.echo(f" {m}")
271
+ elif available and not models:
272
+ status = click.style("[!]", fg="yellow")
273
+ click.echo(f" {status} {prov['name']}:")
274
+ click.echo(f" {error}")
275
+ else:
276
+ status = click.style("[X]", fg="red")
277
+ click.echo(f" {status} {prov['name']}:")
278
+ click.echo(f" {error}")
279
+
280
+ click.echo()
281
+
282
+ click.echo("Provider Setup:")
283
+ for prov in providers:
284
+ available, _, _ = prov["get_models"]()
285
+ if prov["env_var"]:
286
+ status = "configured" if available else "not set"
287
+ color = "green" if available else "yellow"
288
+ click.echo(
289
+ f" {prov['env_var']}: "
290
+ f"{click.style(status, fg=color)} - {prov['url']}"
291
+ )
292
+ else:
293
+ status = "running" if available else "not running"
294
+ color = "green" if available else "yellow"
295
+ click.echo(
296
+ f" Ollama server: "
297
+ f"{click.style(status, fg=color)} - {prov['url']}"
298
+ )
299
+
300
+ click.echo()
301
+ click.echo(
302
+ click.style("Note: ", fg="yellow")
303
+ + "Some models may require a paid plan. "
304
+ + "Free tier availability varies by provider."
305
+ )
306
+ click.echo()
307
+ if any_available:
308
+ click.echo("Default model: groq/llama-3.1-8b-instant")
309
+ click.echo()
@@ -0,0 +1,78 @@
1
+ """Graph generation module for causaliq-knowledge.
2
+
3
+ This module provides functionality for LLM-based causal graph generation
4
+ from variable specifications.
5
+ """
6
+
7
+ from causaliq_knowledge.graph.generator import (
8
+ GraphGenerator,
9
+ GraphGeneratorConfig,
10
+ )
11
+ from causaliq_knowledge.graph.loader import ModelLoader, ModelLoadError
12
+ from causaliq_knowledge.graph.models import (
13
+ CausalPrinciple,
14
+ Constraints,
15
+ GroundTruth,
16
+ LLMGuidance,
17
+ ModelSpec,
18
+ PromptDetails,
19
+ Provenance,
20
+ VariableRole,
21
+ VariableSpec,
22
+ VariableType,
23
+ ViewDefinition,
24
+ )
25
+ from causaliq_knowledge.graph.params import GenerateGraphParams
26
+ from causaliq_knowledge.graph.prompts import (
27
+ ADJACENCY_MATRIX_RESPONSE_SCHEMA,
28
+ EDGE_LIST_RESPONSE_SCHEMA,
29
+ GraphQueryPrompt,
30
+ OutputFormat,
31
+ )
32
+ from causaliq_knowledge.graph.response import (
33
+ GeneratedGraph,
34
+ GenerationMetadata,
35
+ ProposedEdge,
36
+ parse_adjacency_matrix_response,
37
+ parse_edge_list_response,
38
+ parse_graph_response,
39
+ )
40
+ from causaliq_knowledge.graph.view_filter import PromptDetail, ViewFilter
41
+
42
+ __all__ = [
43
+ # Models
44
+ "ModelSpec",
45
+ "Provenance",
46
+ "LLMGuidance",
47
+ "ViewDefinition",
48
+ "PromptDetails",
49
+ "VariableSpec",
50
+ "VariableRole",
51
+ "VariableType",
52
+ "Constraints",
53
+ "CausalPrinciple",
54
+ "GroundTruth",
55
+ # Params (shared validation)
56
+ "GenerateGraphParams",
57
+ # Loader
58
+ "ModelLoader",
59
+ "ModelLoadError",
60
+ # Filtering
61
+ "ViewFilter",
62
+ "PromptDetail",
63
+ # Prompts
64
+ "GraphQueryPrompt",
65
+ "OutputFormat",
66
+ "EDGE_LIST_RESPONSE_SCHEMA",
67
+ "ADJACENCY_MATRIX_RESPONSE_SCHEMA",
68
+ # Response models
69
+ "ProposedEdge",
70
+ "GeneratedGraph",
71
+ "GenerationMetadata",
72
+ "parse_edge_list_response",
73
+ "parse_adjacency_matrix_response",
74
+ "parse_graph_response",
75
+ # Generator
76
+ "GraphGenerator",
77
+ "GraphGeneratorConfig",
78
+ ]