causaliq-knowledge 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- causaliq_knowledge/__init__.py +6 -3
- causaliq_knowledge/action.py +480 -0
- causaliq_knowledge/cache/__init__.py +18 -0
- causaliq_knowledge/cache/encoders/__init__.py +13 -0
- causaliq_knowledge/cache/encoders/base.py +90 -0
- causaliq_knowledge/cache/encoders/json_encoder.py +430 -0
- causaliq_knowledge/cache/token_cache.py +666 -0
- causaliq_knowledge/cli/__init__.py +15 -0
- causaliq_knowledge/cli/cache.py +478 -0
- causaliq_knowledge/cli/generate.py +410 -0
- causaliq_knowledge/cli/main.py +172 -0
- causaliq_knowledge/cli/models.py +309 -0
- causaliq_knowledge/graph/__init__.py +78 -0
- causaliq_knowledge/graph/generator.py +457 -0
- causaliq_knowledge/graph/loader.py +222 -0
- causaliq_knowledge/graph/models.py +426 -0
- causaliq_knowledge/graph/params.py +175 -0
- causaliq_knowledge/graph/prompts.py +445 -0
- causaliq_knowledge/graph/response.py +392 -0
- causaliq_knowledge/graph/view_filter.py +154 -0
- causaliq_knowledge/llm/base_client.py +147 -1
- causaliq_knowledge/llm/cache.py +443 -0
- causaliq_knowledge/py.typed +0 -0
- {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/METADATA +10 -6
- causaliq_knowledge-0.4.0.dist-info/RECORD +42 -0
- {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/WHEEL +1 -1
- {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/entry_points.txt +3 -0
- causaliq_knowledge/cli.py +0 -414
- causaliq_knowledge-0.2.0.dist-info/RECORD +0 -22
- {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/top_level.txt +0 -0
causaliq_knowledge/cli.py
DELETED
|
@@ -1,414 +0,0 @@
|
|
|
1
|
-
"""Command-line interface for causaliq-knowledge."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import json
|
|
6
|
-
import sys
|
|
7
|
-
from typing import Optional
|
|
8
|
-
|
|
9
|
-
import click
|
|
10
|
-
|
|
11
|
-
from causaliq_knowledge import __version__
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@click.group()
|
|
15
|
-
@click.version_option(version=__version__)
|
|
16
|
-
def cli() -> None:
|
|
17
|
-
"""CausalIQ Knowledge - LLM knowledge for causal discovery.
|
|
18
|
-
|
|
19
|
-
Query LLMs about causal relationships between variables.
|
|
20
|
-
"""
|
|
21
|
-
pass
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
@cli.command("query")
|
|
25
|
-
@click.argument("node_a")
|
|
26
|
-
@click.argument("node_b")
|
|
27
|
-
@click.option(
|
|
28
|
-
"--model",
|
|
29
|
-
"-m",
|
|
30
|
-
multiple=True,
|
|
31
|
-
default=["groq/llama-3.1-8b-instant"],
|
|
32
|
-
help="LLM model(s) to query. Can be specified multiple times.",
|
|
33
|
-
)
|
|
34
|
-
@click.option(
|
|
35
|
-
"--domain",
|
|
36
|
-
"-d",
|
|
37
|
-
default=None,
|
|
38
|
-
help="Domain context (e.g., 'medicine', 'economics').",
|
|
39
|
-
)
|
|
40
|
-
@click.option(
|
|
41
|
-
"--strategy",
|
|
42
|
-
"-s",
|
|
43
|
-
type=click.Choice(["weighted_vote", "highest_confidence"]),
|
|
44
|
-
default="weighted_vote",
|
|
45
|
-
help="Consensus strategy for multi-model queries.",
|
|
46
|
-
)
|
|
47
|
-
@click.option(
|
|
48
|
-
"--json",
|
|
49
|
-
"output_json",
|
|
50
|
-
is_flag=True,
|
|
51
|
-
help="Output result as JSON.",
|
|
52
|
-
)
|
|
53
|
-
@click.option(
|
|
54
|
-
"--temperature",
|
|
55
|
-
"-t",
|
|
56
|
-
type=float,
|
|
57
|
-
default=0.1,
|
|
58
|
-
help="LLM temperature (0.0-1.0).",
|
|
59
|
-
)
|
|
60
|
-
def query_edge(
|
|
61
|
-
node_a: str,
|
|
62
|
-
node_b: str,
|
|
63
|
-
model: tuple[str, ...],
|
|
64
|
-
domain: Optional[str],
|
|
65
|
-
strategy: str,
|
|
66
|
-
output_json: bool,
|
|
67
|
-
temperature: float,
|
|
68
|
-
) -> None:
|
|
69
|
-
"""Query LLMs about a causal relationship between two variables.
|
|
70
|
-
|
|
71
|
-
NODE_A and NODE_B are the variable names to query about.
|
|
72
|
-
|
|
73
|
-
Examples:
|
|
74
|
-
|
|
75
|
-
cqknow query smoking lung_cancer
|
|
76
|
-
|
|
77
|
-
cqknow query smoking lung_cancer --domain medicine
|
|
78
|
-
|
|
79
|
-
cqknow query X Y --model groq/llama-3.1-8b-instant \
|
|
80
|
-
--model gemini/gemini-2.5-flash
|
|
81
|
-
"""
|
|
82
|
-
# Import here to avoid slow startup for --help
|
|
83
|
-
from causaliq_knowledge.llm import LLMKnowledge
|
|
84
|
-
|
|
85
|
-
# Build context
|
|
86
|
-
context = None
|
|
87
|
-
if domain:
|
|
88
|
-
context = {"domain": domain}
|
|
89
|
-
|
|
90
|
-
# Create provider
|
|
91
|
-
try:
|
|
92
|
-
provider = LLMKnowledge(
|
|
93
|
-
models=list(model),
|
|
94
|
-
consensus_strategy=strategy,
|
|
95
|
-
temperature=temperature,
|
|
96
|
-
)
|
|
97
|
-
except Exception as e:
|
|
98
|
-
click.echo(f"Error creating provider: {e}", err=True)
|
|
99
|
-
sys.exit(1)
|
|
100
|
-
|
|
101
|
-
# Query
|
|
102
|
-
click.echo(
|
|
103
|
-
f"Querying {len(model)} model(s) about: {node_a} -> {node_b}",
|
|
104
|
-
err=True,
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
try:
|
|
108
|
-
result = provider.query_edge(node_a, node_b, context=context)
|
|
109
|
-
except Exception as e:
|
|
110
|
-
click.echo(f"Error querying LLM: {e}", err=True)
|
|
111
|
-
sys.exit(1)
|
|
112
|
-
|
|
113
|
-
# Output
|
|
114
|
-
if output_json:
|
|
115
|
-
output = {
|
|
116
|
-
"node_a": node_a,
|
|
117
|
-
"node_b": node_b,
|
|
118
|
-
"exists": result.exists,
|
|
119
|
-
"direction": result.direction.value if result.direction else None,
|
|
120
|
-
"confidence": result.confidence,
|
|
121
|
-
"reasoning": result.reasoning,
|
|
122
|
-
"model": result.model,
|
|
123
|
-
}
|
|
124
|
-
click.echo(json.dumps(output, indent=2))
|
|
125
|
-
else:
|
|
126
|
-
# Human-readable output
|
|
127
|
-
exists_map = {True: "Yes", False: "No", None: "Uncertain"}
|
|
128
|
-
exists_str = exists_map[result.exists]
|
|
129
|
-
direction_str = result.direction.value if result.direction else "N/A"
|
|
130
|
-
|
|
131
|
-
click.echo(f"\n{'='*60}")
|
|
132
|
-
click.echo(f"Query: Does '{node_a}' cause '{node_b}'?")
|
|
133
|
-
click.echo("=" * 60)
|
|
134
|
-
click.echo(f"Exists: {exists_str}")
|
|
135
|
-
click.echo(f"Direction: {direction_str}")
|
|
136
|
-
click.echo(f"Confidence: {result.confidence:.2f}")
|
|
137
|
-
click.echo(f"Model(s): {result.model or 'unknown'}")
|
|
138
|
-
click.echo(f"{'='*60}")
|
|
139
|
-
click.echo(f"Reasoning: {result.reasoning}")
|
|
140
|
-
click.echo()
|
|
141
|
-
|
|
142
|
-
# Show stats
|
|
143
|
-
stats = provider.get_stats()
|
|
144
|
-
if stats["total_cost"] > 0:
|
|
145
|
-
click.echo(
|
|
146
|
-
f"Cost: ${stats['total_cost']:.6f} "
|
|
147
|
-
f"({stats['total_calls']} call(s))",
|
|
148
|
-
err=True,
|
|
149
|
-
)
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
@cli.command("models")
|
|
153
|
-
@click.argument("provider", required=False, default=None)
|
|
154
|
-
def list_models(provider: Optional[str]) -> None:
|
|
155
|
-
"""List available LLM models from each provider.
|
|
156
|
-
|
|
157
|
-
Queries each provider's API to show models accessible with your
|
|
158
|
-
current configuration. Results are filtered by your API key's
|
|
159
|
-
access level or locally installed models.
|
|
160
|
-
|
|
161
|
-
Optionally specify PROVIDER to list models from a single provider:
|
|
162
|
-
groq, anthropic, gemini, ollama, openai, deepseek, or mistral.
|
|
163
|
-
|
|
164
|
-
Examples:
|
|
165
|
-
|
|
166
|
-
cqknow models # List all providers
|
|
167
|
-
|
|
168
|
-
cqknow models groq # List only Groq models
|
|
169
|
-
|
|
170
|
-
cqknow models mistral # List only Mistral models
|
|
171
|
-
"""
|
|
172
|
-
from typing import Callable, List, Optional, Tuple, TypedDict
|
|
173
|
-
|
|
174
|
-
from causaliq_knowledge.llm import (
|
|
175
|
-
AnthropicClient,
|
|
176
|
-
AnthropicConfig,
|
|
177
|
-
DeepSeekClient,
|
|
178
|
-
DeepSeekConfig,
|
|
179
|
-
GeminiClient,
|
|
180
|
-
GeminiConfig,
|
|
181
|
-
GroqClient,
|
|
182
|
-
GroqConfig,
|
|
183
|
-
MistralClient,
|
|
184
|
-
MistralConfig,
|
|
185
|
-
OllamaClient,
|
|
186
|
-
OllamaConfig,
|
|
187
|
-
OpenAIClient,
|
|
188
|
-
OpenAIConfig,
|
|
189
|
-
)
|
|
190
|
-
|
|
191
|
-
# Type for get_models functions
|
|
192
|
-
GetModelsFunc = Callable[[], Tuple[bool, List[str], Optional[str]]]
|
|
193
|
-
|
|
194
|
-
class ProviderInfo(TypedDict):
|
|
195
|
-
name: str
|
|
196
|
-
prefix: str
|
|
197
|
-
env_var: Optional[str]
|
|
198
|
-
url: str
|
|
199
|
-
get_models: GetModelsFunc
|
|
200
|
-
|
|
201
|
-
def get_groq_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
202
|
-
"""Returns (available, models, error_msg)."""
|
|
203
|
-
try:
|
|
204
|
-
client = GroqClient(GroqConfig())
|
|
205
|
-
if not client.is_available():
|
|
206
|
-
return False, [], "GROQ_API_KEY not set"
|
|
207
|
-
models = [f"groq/{m}" for m in client.list_models()]
|
|
208
|
-
return True, models, None
|
|
209
|
-
except ValueError as e:
|
|
210
|
-
return False, [], str(e)
|
|
211
|
-
|
|
212
|
-
def get_anthropic_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
213
|
-
"""Returns (available, models, error_msg)."""
|
|
214
|
-
try:
|
|
215
|
-
client = AnthropicClient(AnthropicConfig())
|
|
216
|
-
if not client.is_available():
|
|
217
|
-
return False, [], "ANTHROPIC_API_KEY not set"
|
|
218
|
-
models = [f"anthropic/{m}" for m in client.list_models()]
|
|
219
|
-
return True, models, None
|
|
220
|
-
except ValueError as e:
|
|
221
|
-
return False, [], str(e)
|
|
222
|
-
|
|
223
|
-
def get_gemini_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
224
|
-
"""Returns (available, models, error_msg)."""
|
|
225
|
-
try:
|
|
226
|
-
client = GeminiClient(GeminiConfig())
|
|
227
|
-
if not client.is_available():
|
|
228
|
-
return False, [], "GEMINI_API_KEY not set"
|
|
229
|
-
models = [f"gemini/{m}" for m in client.list_models()]
|
|
230
|
-
return True, models, None
|
|
231
|
-
except ValueError as e:
|
|
232
|
-
return False, [], str(e)
|
|
233
|
-
|
|
234
|
-
def get_ollama_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
235
|
-
"""Returns (available, models, error_msg)."""
|
|
236
|
-
try:
|
|
237
|
-
client = OllamaClient(OllamaConfig())
|
|
238
|
-
models = [f"ollama/{m}" for m in client.list_models()]
|
|
239
|
-
if not models:
|
|
240
|
-
msg = "No models installed. Run: ollama pull <model>"
|
|
241
|
-
return True, [], msg
|
|
242
|
-
return True, models, None
|
|
243
|
-
except ValueError as e:
|
|
244
|
-
return False, [], str(e)
|
|
245
|
-
|
|
246
|
-
def get_openai_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
247
|
-
"""Returns (available, models, error_msg)."""
|
|
248
|
-
try:
|
|
249
|
-
client = OpenAIClient(OpenAIConfig())
|
|
250
|
-
if not client.is_available():
|
|
251
|
-
return False, [], "OPENAI_API_KEY not set"
|
|
252
|
-
models = [f"openai/{m}" for m in client.list_models()]
|
|
253
|
-
return True, models, None
|
|
254
|
-
except ValueError as e:
|
|
255
|
-
return False, [], str(e)
|
|
256
|
-
|
|
257
|
-
def get_deepseek_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
258
|
-
"""Returns (available, models, error_msg)."""
|
|
259
|
-
try:
|
|
260
|
-
client = DeepSeekClient(DeepSeekConfig())
|
|
261
|
-
if not client.is_available():
|
|
262
|
-
return False, [], "DEEPSEEK_API_KEY not set"
|
|
263
|
-
models = [f"deepseek/{m}" for m in client.list_models()]
|
|
264
|
-
return True, models, None
|
|
265
|
-
except ValueError as e:
|
|
266
|
-
return False, [], str(e)
|
|
267
|
-
|
|
268
|
-
def get_mistral_models() -> Tuple[bool, List[str], Optional[str]]:
|
|
269
|
-
"""Returns (available, models, error_msg)."""
|
|
270
|
-
try:
|
|
271
|
-
client = MistralClient(MistralConfig())
|
|
272
|
-
if not client.is_available():
|
|
273
|
-
return False, [], "MISTRAL_API_KEY not set"
|
|
274
|
-
models = [f"mistral/{m}" for m in client.list_models()]
|
|
275
|
-
return True, models, None
|
|
276
|
-
except ValueError as e:
|
|
277
|
-
return False, [], str(e)
|
|
278
|
-
|
|
279
|
-
providers: List[ProviderInfo] = [
|
|
280
|
-
{
|
|
281
|
-
"name": "Groq",
|
|
282
|
-
"prefix": "groq/",
|
|
283
|
-
"env_var": "GROQ_API_KEY",
|
|
284
|
-
"url": "https://console.groq.com",
|
|
285
|
-
"get_models": get_groq_models,
|
|
286
|
-
},
|
|
287
|
-
{
|
|
288
|
-
"name": "Anthropic",
|
|
289
|
-
"prefix": "anthropic/",
|
|
290
|
-
"env_var": "ANTHROPIC_API_KEY",
|
|
291
|
-
"url": "https://console.anthropic.com",
|
|
292
|
-
"get_models": get_anthropic_models,
|
|
293
|
-
},
|
|
294
|
-
{
|
|
295
|
-
"name": "Gemini",
|
|
296
|
-
"prefix": "gemini/",
|
|
297
|
-
"env_var": "GEMINI_API_KEY",
|
|
298
|
-
"url": "https://aistudio.google.com",
|
|
299
|
-
"get_models": get_gemini_models,
|
|
300
|
-
},
|
|
301
|
-
{
|
|
302
|
-
"name": "Ollama (Local)",
|
|
303
|
-
"prefix": "ollama/",
|
|
304
|
-
"env_var": None,
|
|
305
|
-
"url": "https://ollama.ai",
|
|
306
|
-
"get_models": get_ollama_models,
|
|
307
|
-
},
|
|
308
|
-
{
|
|
309
|
-
"name": "OpenAI",
|
|
310
|
-
"prefix": "openai/",
|
|
311
|
-
"env_var": "OPENAI_API_KEY",
|
|
312
|
-
"url": "https://platform.openai.com",
|
|
313
|
-
"get_models": get_openai_models,
|
|
314
|
-
},
|
|
315
|
-
{
|
|
316
|
-
"name": "DeepSeek",
|
|
317
|
-
"prefix": "deepseek/",
|
|
318
|
-
"env_var": "DEEPSEEK_API_KEY",
|
|
319
|
-
"url": "https://platform.deepseek.com",
|
|
320
|
-
"get_models": get_deepseek_models,
|
|
321
|
-
},
|
|
322
|
-
{
|
|
323
|
-
"name": "Mistral",
|
|
324
|
-
"prefix": "mistral/",
|
|
325
|
-
"env_var": "MISTRAL_API_KEY",
|
|
326
|
-
"url": "https://console.mistral.ai",
|
|
327
|
-
"get_models": get_mistral_models,
|
|
328
|
-
},
|
|
329
|
-
]
|
|
330
|
-
|
|
331
|
-
# Filter providers if a specific one is requested
|
|
332
|
-
valid_provider_names = [
|
|
333
|
-
"groq",
|
|
334
|
-
"anthropic",
|
|
335
|
-
"gemini",
|
|
336
|
-
"ollama",
|
|
337
|
-
"openai",
|
|
338
|
-
"deepseek",
|
|
339
|
-
"mistral",
|
|
340
|
-
]
|
|
341
|
-
if provider:
|
|
342
|
-
provider_lower = provider.lower()
|
|
343
|
-
if provider_lower not in valid_provider_names:
|
|
344
|
-
click.echo(
|
|
345
|
-
f"Unknown provider: {provider}. "
|
|
346
|
-
f"Valid options: {', '.join(valid_provider_names)}",
|
|
347
|
-
err=True,
|
|
348
|
-
)
|
|
349
|
-
sys.exit(1)
|
|
350
|
-
providers = [
|
|
351
|
-
p for p in providers if p["prefix"].rstrip("/") == provider_lower
|
|
352
|
-
]
|
|
353
|
-
|
|
354
|
-
click.echo("\nAvailable LLM Models:\n")
|
|
355
|
-
|
|
356
|
-
any_available = False
|
|
357
|
-
for prov in providers:
|
|
358
|
-
available, models, error = prov["get_models"]()
|
|
359
|
-
|
|
360
|
-
if available and models:
|
|
361
|
-
any_available = True
|
|
362
|
-
status = click.style("[OK]", fg="green")
|
|
363
|
-
count = len(models)
|
|
364
|
-
click.echo(f" {status} {prov['name']} ({count} models):")
|
|
365
|
-
for m in models:
|
|
366
|
-
click.echo(f" {m}")
|
|
367
|
-
elif available and not models:
|
|
368
|
-
status = click.style("[!]", fg="yellow")
|
|
369
|
-
click.echo(f" {status} {prov['name']}:")
|
|
370
|
-
click.echo(f" {error}")
|
|
371
|
-
else:
|
|
372
|
-
status = click.style("[X]", fg="red")
|
|
373
|
-
click.echo(f" {status} {prov['name']}:")
|
|
374
|
-
click.echo(f" {error}")
|
|
375
|
-
|
|
376
|
-
click.echo()
|
|
377
|
-
|
|
378
|
-
click.echo("Provider Setup:")
|
|
379
|
-
for prov in providers:
|
|
380
|
-
available, _, _ = prov["get_models"]()
|
|
381
|
-
if prov["env_var"]:
|
|
382
|
-
status = "configured" if available else "not set"
|
|
383
|
-
color = "green" if available else "yellow"
|
|
384
|
-
click.echo(
|
|
385
|
-
f" {prov['env_var']}: "
|
|
386
|
-
f"{click.style(status, fg=color)} - {prov['url']}"
|
|
387
|
-
)
|
|
388
|
-
else:
|
|
389
|
-
status = "running" if available else "not running"
|
|
390
|
-
color = "green" if available else "yellow"
|
|
391
|
-
click.echo(
|
|
392
|
-
f" Ollama server: "
|
|
393
|
-
f"{click.style(status, fg=color)} - {prov['url']}"
|
|
394
|
-
)
|
|
395
|
-
|
|
396
|
-
click.echo()
|
|
397
|
-
click.echo(
|
|
398
|
-
click.style("Note: ", fg="yellow")
|
|
399
|
-
+ "Some models may require a paid plan. "
|
|
400
|
-
+ "Free tier availability varies by provider."
|
|
401
|
-
)
|
|
402
|
-
click.echo()
|
|
403
|
-
if any_available:
|
|
404
|
-
click.echo("Default model: groq/llama-3.1-8b-instant")
|
|
405
|
-
click.echo()
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
def main() -> None:
|
|
409
|
-
"""Entry point for the CLI."""
|
|
410
|
-
cli()
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
if __name__ == "__main__": # pragma: no cover
|
|
414
|
-
main()
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
causaliq_knowledge/__init__.py,sha256=IcoxZ6fjiN6VrniikCUZhHkxf2D1eGixtLWNrvKevN0,851
|
|
2
|
-
causaliq_knowledge/base.py,sha256=GBG-sftOKkmUoQzTpm6anDTjP-2nInRZN_36dxoYhvk,2917
|
|
3
|
-
causaliq_knowledge/cli.py,sha256=2c8WYxF4T_-R8hDIo9JiZCx59fVbXHCCln66UGLqirs,13169
|
|
4
|
-
causaliq_knowledge/models.py,sha256=tWGf186ASwO8NHiN97pEOLuBJmJI6Q9jvpU0mYZNdS0,4058
|
|
5
|
-
causaliq_knowledge/llm/__init__.py,sha256=30AL0h64zIkXoiqhMY7gjaf7mrtwtwMW38vzhns0My4,1663
|
|
6
|
-
causaliq_knowledge/llm/anthropic_client.py,sha256=dPFHYGWL4xwQCtmQuGwGY4DBKSINOgOS-11ekznaiXo,8719
|
|
7
|
-
causaliq_knowledge/llm/base_client.py,sha256=Dg5s9FqtTScliEK9MJ2_B0atTNwRRMNscv9gai6sEB4,7090
|
|
8
|
-
causaliq_knowledge/llm/deepseek_client.py,sha256=ZcOpgnYa66XHjiTaF5ekR_BtosRYvVmzlIafp_Gsx_A,3543
|
|
9
|
-
causaliq_knowledge/llm/gemini_client.py,sha256=XJMq9sPo7zExrALSr2rIRHLheSPqKo8ENG0KtdJ1cjw,9924
|
|
10
|
-
causaliq_knowledge/llm/groq_client.py,sha256=PnTXqtMF1Km9DY4HiCZXQ6LeOzdjZtQJaeuGe1GbeME,7531
|
|
11
|
-
causaliq_knowledge/llm/mistral_client.py,sha256=dTAOtymffCM1AJp5-JcfizofYrUA-jhKfHWrhZe2DDI,4187
|
|
12
|
-
causaliq_knowledge/llm/ollama_client.py,sha256=PPU3g-nD8D546zcYB3uGxZ9yVbU4Gngo3snM2tRFeTc,8612
|
|
13
|
-
causaliq_knowledge/llm/openai_client.py,sha256=MJmB6P32TZESMlXhn9d0-b3vFWXmf7ojHQ5CY8mCENI,3835
|
|
14
|
-
causaliq_knowledge/llm/openai_compat_client.py,sha256=L8ZW5csuhUePq4mt3EGOUqhR3tleFmM72UlhPBsgIMQ,9518
|
|
15
|
-
causaliq_knowledge/llm/prompts.py,sha256=bJ9iVGKUfTfLi2eWh-FFM4cNzk5Ux4Z0x8R6Ia27Dbo,6598
|
|
16
|
-
causaliq_knowledge/llm/provider.py,sha256=VDEv-1esT_EgJk_Gwlfl4423ojglOxzPCBCFbOFE4DQ,15184
|
|
17
|
-
causaliq_knowledge-0.2.0.dist-info/licenses/LICENSE,sha256=vUFUzQnti-D-MLSi9NxFlsFYOKwU25sxxH7WgJOQFIs,1084
|
|
18
|
-
causaliq_knowledge-0.2.0.dist-info/METADATA,sha256=NxnJJjL6hED91fu0DlclGSoeiji8litmEsy1sS_lt_0,8726
|
|
19
|
-
causaliq_knowledge-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
20
|
-
causaliq_knowledge-0.2.0.dist-info/entry_points.txt,sha256=8iQjiMgFxZszRWwSTGHvoOBb_OBUkMmwvH3PzgsH-Cc,104
|
|
21
|
-
causaliq_knowledge-0.2.0.dist-info/top_level.txt,sha256=GcxQf4BQAGa38i2-j8ylk2FmnBHtEZ9-8bSt-7Uka7k,19
|
|
22
|
-
causaliq_knowledge-0.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|