causaliq-knowledge 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,287 @@
1
+ """Base class for OpenAI-compatible API clients.
2
+
3
+ This module provides a shared base class for LLM providers that implement
4
+ the OpenAI API format (OpenAI, DeepSeek, Mistral, etc.).
5
+ """
6
+
7
+ import logging
8
+ from abc import abstractmethod
9
+ from dataclasses import dataclass
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ import httpx
13
+
14
+ from causaliq_knowledge.llm.base_client import (
15
+ BaseLLMClient,
16
+ LLMConfig,
17
+ LLMResponse,
18
+ )
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class OpenAICompatConfig(LLMConfig):
25
+ """Base configuration for OpenAI-compatible API clients.
26
+
27
+ Attributes:
28
+ model: Model identifier.
29
+ temperature: Sampling temperature (default: 0.1).
30
+ max_tokens: Maximum response tokens (default: 500).
31
+ timeout: Request timeout in seconds (default: 30.0).
32
+ api_key: API key (provider-specific env var fallback).
33
+ """
34
+
35
+ model: str = "gpt-4o-mini"
36
+ temperature: float = 0.1
37
+ max_tokens: int = 500
38
+ timeout: float = 30.0
39
+ api_key: Optional[str] = None
40
+
41
+
42
+ class OpenAICompatClient(BaseLLMClient):
43
+ """Base class for OpenAI-compatible API clients.
44
+
45
+ Provides shared implementation for providers that use the OpenAI API
46
+ format (chat/completions endpoint, same request/response structure).
47
+
48
+ Subclasses must implement:
49
+ - BASE_URL: The API base URL
50
+ - PROVIDER_NAME: Name for logging
51
+ - ENV_VAR: Environment variable for API key
52
+ - _get_pricing(): Return pricing dict for cost calculation
53
+ - _filter_models(): Optional model list filtering
54
+
55
+ Example:
56
+ >>> class MyClient(OpenAICompatClient):
57
+ ... BASE_URL = "https://api.example.com/v1"
58
+ ... PROVIDER_NAME = "example"
59
+ ... ENV_VAR = "EXAMPLE_API_KEY"
60
+ """
61
+
62
+ # Subclasses must override these
63
+ BASE_URL: str = ""
64
+ PROVIDER_NAME: str = "openai-compat"
65
+ ENV_VAR: str = "API_KEY"
66
+
67
+ def __init__(self, config: Optional[OpenAICompatConfig] = None) -> None:
68
+ """Initialize the client.
69
+
70
+ Args:
71
+ config: Client configuration. If None, uses defaults with
72
+ API key from environment variable.
73
+ """
74
+ self.config = config or self._default_config()
75
+ self._total_calls = 0
76
+
77
+ @abstractmethod
78
+ def _default_config(self) -> OpenAICompatConfig:
79
+ """Return default configuration for this provider."""
80
+ pass
81
+
82
+ @property
83
+ def provider_name(self) -> str:
84
+ """Return the provider name."""
85
+ return self.PROVIDER_NAME
86
+
87
+ def completion(
88
+ self, messages: List[Dict[str, str]], **kwargs: Any
89
+ ) -> LLMResponse:
90
+ """Make a chat completion request.
91
+
92
+ Args:
93
+ messages: List of message dicts with "role" and "content" keys.
94
+ **kwargs: Override config options (temperature, max_tokens).
95
+
96
+ Returns:
97
+ LLMResponse with the generated content and metadata.
98
+
99
+ Raises:
100
+ ValueError: If the API request fails.
101
+ """
102
+ # Build request payload
103
+ payload = {
104
+ "model": self.config.model,
105
+ "messages": messages,
106
+ "temperature": kwargs.get("temperature", self.config.temperature),
107
+ "max_tokens": kwargs.get("max_tokens", self.config.max_tokens),
108
+ "stream": False,
109
+ }
110
+
111
+ headers = {
112
+ "Authorization": f"Bearer {self.config.api_key}",
113
+ "Content-Type": "application/json",
114
+ }
115
+
116
+ logger.debug(
117
+ f"Calling {self.PROVIDER_NAME} API with model: {payload['model']}"
118
+ )
119
+
120
+ try:
121
+ with httpx.Client(timeout=self.config.timeout) as client:
122
+ response = client.post(
123
+ f"{self.BASE_URL}/chat/completions",
124
+ json=payload,
125
+ headers=headers,
126
+ )
127
+ response.raise_for_status()
128
+
129
+ data = response.json()
130
+
131
+ # Extract response data
132
+ content = data["choices"][0]["message"]["content"] or ""
133
+ usage = data.get("usage", {})
134
+ input_tokens = usage.get("prompt_tokens", 0)
135
+ output_tokens = usage.get("completion_tokens", 0)
136
+
137
+ self._total_calls += 1
138
+
139
+ logger.debug(
140
+ f"{self.PROVIDER_NAME} response: "
141
+ f"{input_tokens} in, {output_tokens} out"
142
+ )
143
+
144
+ # Calculate cost
145
+ cost = self._calculate_cost(
146
+ self.config.model, input_tokens, output_tokens
147
+ )
148
+
149
+ return LLMResponse(
150
+ content=content,
151
+ model=data.get("model", self.config.model),
152
+ input_tokens=input_tokens,
153
+ output_tokens=output_tokens,
154
+ cost=cost,
155
+ raw_response=data,
156
+ )
157
+
158
+ except httpx.HTTPStatusError as e:
159
+ msg = f"{self.PROVIDER_NAME} API error: {e.response.status_code}"
160
+ logger.error(f"{msg} - {e.response.text}")
161
+ raise ValueError(f"{msg} - {e.response.text}")
162
+ except httpx.TimeoutException:
163
+ raise ValueError(f"{self.PROVIDER_NAME} API request timed out")
164
+ except Exception as e:
165
+ logger.error(f"{self.PROVIDER_NAME} API unexpected error: {e}")
166
+ raise ValueError(f"{self.PROVIDER_NAME} API error: {str(e)}")
167
+
168
+ def _calculate_cost(
169
+ self, model: str, input_tokens: int, output_tokens: int
170
+ ) -> float:
171
+ """Calculate approximate cost for API call.
172
+
173
+ Args:
174
+ model: Model identifier.
175
+ input_tokens: Number of input tokens.
176
+ output_tokens: Number of output tokens.
177
+
178
+ Returns:
179
+ Estimated cost in USD.
180
+ """
181
+ pricing = self._get_pricing()
182
+
183
+ # Find matching pricing (check if model starts with known prefix)
184
+ model_pricing = None
185
+ for key in pricing:
186
+ if model.startswith(key):
187
+ model_pricing = pricing[key]
188
+ break
189
+
190
+ if not model_pricing:
191
+ return 0.0
192
+
193
+ input_cost = (input_tokens / 1_000_000) * model_pricing["input"]
194
+ output_cost = (output_tokens / 1_000_000) * model_pricing["output"]
195
+
196
+ return input_cost + output_cost
197
+
198
+ @abstractmethod
199
+ def _get_pricing(self) -> Dict[str, Dict[str, float]]:
200
+ """Return pricing dict for this provider.
201
+
202
+ Returns:
203
+ Dict mapping model prefixes to input/output costs per 1M tokens.
204
+ Example: {"gpt-4o": {"input": 2.50, "output": 10.00}}
205
+ """
206
+ pass
207
+
208
+ def complete_json(
209
+ self, messages: List[Dict[str, str]], **kwargs: Any
210
+ ) -> tuple[Optional[Dict[str, Any]], LLMResponse]:
211
+ """Make a completion request and parse response as JSON.
212
+
213
+ Args:
214
+ messages: List of message dicts with "role" and "content" keys.
215
+ **kwargs: Override config options passed to completion().
216
+
217
+ Returns:
218
+ Tuple of (parsed JSON dict or None, raw LLMResponse).
219
+ """
220
+ response = self.completion(messages, **kwargs)
221
+ parsed = response.parse_json()
222
+ return parsed, response
223
+
224
+ @property
225
+ def call_count(self) -> int:
226
+ """Return the number of API calls made."""
227
+ return self._total_calls
228
+
229
+ def is_available(self) -> bool:
230
+ """Check if the API is available.
231
+
232
+ Returns:
233
+ True if API key is configured.
234
+ """
235
+ return bool(self.config.api_key)
236
+
237
+ def list_models(self) -> List[str]:
238
+ """List available models from the API.
239
+
240
+ Queries the API to get models accessible with the current
241
+ API key, then filters using _filter_models().
242
+
243
+ Returns:
244
+ List of model identifiers.
245
+
246
+ Raises:
247
+ ValueError: If the API request fails.
248
+ """
249
+ try:
250
+ with httpx.Client(timeout=self.config.timeout) as client:
251
+ response = client.get(
252
+ f"{self.BASE_URL}/models",
253
+ headers={"Authorization": f"Bearer {self.config.api_key}"},
254
+ )
255
+ response.raise_for_status()
256
+ data = response.json()
257
+
258
+ # Get model IDs from response
259
+ all_models = [
260
+ model.get("id", "") for model in data.get("data", [])
261
+ ]
262
+
263
+ # Filter using provider-specific logic
264
+ models = self._filter_models(all_models)
265
+
266
+ return sorted(models)
267
+
268
+ except httpx.HTTPStatusError as e:
269
+ msg = f"{self.PROVIDER_NAME} API error: {e.response.status_code}"
270
+ raise ValueError(f"{msg} - {e.response.text}")
271
+ except Exception as e:
272
+ raise ValueError(
273
+ f"Failed to list {self.PROVIDER_NAME} models: {e}"
274
+ )
275
+
276
+ def _filter_models(self, models: List[str]) -> List[str]:
277
+ """Filter model list to relevant models.
278
+
279
+ Override in subclasses to customize filtering.
280
+
281
+ Args:
282
+ models: List of all model IDs from API.
283
+
284
+ Returns:
285
+ Filtered list of relevant model IDs.
286
+ """
287
+ return models
@@ -4,8 +4,19 @@ import logging
4
4
  from typing import Any, Dict, Optional, Union
5
5
 
6
6
  from causaliq_knowledge.base import KnowledgeProvider
7
+ from causaliq_knowledge.llm.anthropic_client import (
8
+ AnthropicClient,
9
+ AnthropicConfig,
10
+ )
11
+ from causaliq_knowledge.llm.deepseek_client import (
12
+ DeepSeekClient,
13
+ DeepSeekConfig,
14
+ )
7
15
  from causaliq_knowledge.llm.gemini_client import GeminiClient, GeminiConfig
8
16
  from causaliq_knowledge.llm.groq_client import GroqClient, GroqConfig
17
+ from causaliq_knowledge.llm.mistral_client import MistralClient, MistralConfig
18
+ from causaliq_knowledge.llm.ollama_client import OllamaClient, OllamaConfig
19
+ from causaliq_knowledge.llm.openai_client import OpenAIClient, OpenAIConfig
9
20
  from causaliq_knowledge.llm.prompts import EdgeQueryPrompt, parse_edge_response
10
21
  from causaliq_knowledge.models import EdgeDirection, EdgeKnowledge
11
22
 
@@ -164,6 +175,7 @@ class LLMKnowledge(KnowledgeProvider):
164
175
  models: List of model identifiers. Supported formats:
165
176
  - "groq/llama-3.1-8b-instant" (Groq API)
166
177
  - "gemini/gemini-2.5-flash" (Google Gemini API)
178
+ - "ollama/llama3.2:1b" (Local Ollama server)
167
179
  Defaults to ["groq/llama-3.1-8b-instant"].
168
180
  consensus_strategy: How to combine multi-model responses.
169
181
  Options: "weighted_vote", "highest_confidence".
@@ -190,9 +202,30 @@ class LLMKnowledge(KnowledgeProvider):
190
202
  self._consensus_fn = CONSENSUS_STRATEGIES[consensus_strategy]
191
203
 
192
204
  # Create a client for each model - use direct APIs only
193
- self._clients: dict[str, Union[GroqClient, GeminiClient]] = {}
205
+ self._clients: dict[
206
+ str,
207
+ Union[
208
+ AnthropicClient,
209
+ DeepSeekClient,
210
+ GeminiClient,
211
+ GroqClient,
212
+ MistralClient,
213
+ OllamaClient,
214
+ OpenAIClient,
215
+ ],
216
+ ] = {}
194
217
  for model in models:
195
- if model.startswith("groq/"):
218
+ if model.startswith("anthropic/"):
219
+ # Use direct Anthropic client for Claude models
220
+ anthropic_model = model.split("/", 1)[1] # Extract model name
221
+ anthropic_config = AnthropicConfig(
222
+ model=anthropic_model,
223
+ temperature=temperature,
224
+ max_tokens=max_tokens,
225
+ timeout=timeout,
226
+ )
227
+ self._clients[model] = AnthropicClient(config=anthropic_config)
228
+ elif model.startswith("groq/"):
196
229
  # Use direct Groq client - more reliable than litellm
197
230
  groq_model = model.split("/", 1)[1] # Extract model name
198
231
  config = GroqConfig(
@@ -212,9 +245,57 @@ class LLMKnowledge(KnowledgeProvider):
212
245
  timeout=timeout,
213
246
  )
214
247
  self._clients[model] = GeminiClient(config=gemini_config)
248
+ elif model.startswith("ollama/"):
249
+ # Use local Ollama client for running models locally
250
+ ollama_model = model.split("/", 1)[1] # Extract model name
251
+ ollama_config = OllamaConfig(
252
+ model=ollama_model,
253
+ temperature=temperature,
254
+ max_tokens=max_tokens,
255
+ timeout=timeout,
256
+ )
257
+ self._clients[model] = OllamaClient(config=ollama_config)
258
+ elif model.startswith("openai/"):
259
+ # Use direct OpenAI client for GPT models
260
+ openai_model = model.split("/", 1)[1] # Extract model name
261
+ openai_config = OpenAIConfig(
262
+ model=openai_model,
263
+ temperature=temperature,
264
+ max_tokens=max_tokens,
265
+ timeout=timeout,
266
+ )
267
+ self._clients[model] = OpenAIClient(config=openai_config)
268
+ elif model.startswith("deepseek/"):
269
+ # Use direct DeepSeek client for DeepSeek models
270
+ deepseek_model = model.split("/", 1)[1] # Extract model name
271
+ deepseek_config = DeepSeekConfig(
272
+ model=deepseek_model,
273
+ temperature=temperature,
274
+ max_tokens=max_tokens,
275
+ timeout=timeout,
276
+ )
277
+ self._clients[model] = DeepSeekClient(config=deepseek_config)
278
+ elif model.startswith("mistral/"):
279
+ # Use direct Mistral client for Mistral models
280
+ mistral_model = model.split("/", 1)[1] # Extract model name
281
+ mistral_config = MistralConfig(
282
+ model=mistral_model,
283
+ temperature=temperature,
284
+ max_tokens=max_tokens,
285
+ timeout=timeout,
286
+ )
287
+ self._clients[model] = MistralClient(config=mistral_config)
215
288
  else:
216
289
  # Only direct API clients are supported
217
- supported_prefixes = ["groq/", "gemini/"]
290
+ supported_prefixes = [
291
+ "anthropic/",
292
+ "deepseek/",
293
+ "gemini/",
294
+ "groq/",
295
+ "mistral/",
296
+ "ollama/",
297
+ "openai/",
298
+ ]
218
299
  raise ValueError(
219
300
  f"Model '{model}' not supported. "
220
301
  f"Supported prefixes: {supported_prefixes}."
@@ -262,35 +343,17 @@ class LLMKnowledge(KnowledgeProvider):
262
343
  responses: list[EdgeKnowledge] = []
263
344
  for model, client in self._clients.items():
264
345
  try:
265
- if isinstance(client, GroqClient):
266
- # Direct Groq API call
267
- messages = []
268
- if system_prompt:
269
- messages.append(
270
- {"role": "system", "content": system_prompt}
271
- )
272
- messages.append({"role": "user", "content": user_prompt})
273
-
274
- json_data, _ = client.complete_json(messages)
275
- knowledge = parse_edge_response(json_data, model=model)
276
- responses.append(knowledge)
277
- elif isinstance(client, GeminiClient):
278
- # Direct Gemini API call
279
- messages = []
280
- if system_prompt:
281
- messages.append(
282
- {"role": "system", "content": system_prompt}
283
- )
284
- messages.append({"role": "user", "content": user_prompt})
285
-
286
- json_data, _ = client.complete_json(messages)
287
- knowledge = parse_edge_response(json_data, model=model)
288
- responses.append(knowledge)
289
- else:
290
- # Should never reach here due to constructor validation
291
- raise ValueError(
292
- f"Unsupported client type for model {model}"
346
+ # All clients implement BaseLLMClient interface
347
+ messages = []
348
+ if system_prompt:
349
+ messages.append(
350
+ {"role": "system", "content": system_prompt}
293
351
  )
352
+ messages.append({"role": "user", "content": user_prompt})
353
+
354
+ json_data, _ = client.complete_json(messages)
355
+ knowledge = parse_edge_response(json_data, model=model)
356
+ responses.append(knowledge)
294
357
  except Exception as e:
295
358
  # On error, add uncertain response
296
359
  responses.append(
@@ -314,21 +377,11 @@ class LLMKnowledge(KnowledgeProvider):
314
377
  per_model: Dict[str, Dict[str, Any]] = {}
315
378
 
316
379
  for model, client in self._clients.items():
317
- if isinstance(client, GroqClient):
318
- # Direct Groq client stats
319
- stats: Dict[str, Any] = {
320
- "call_count": client.call_count,
321
- "total_cost": 0.0, # Free tier
322
- }
323
- elif isinstance(client, GeminiClient):
324
- # Direct Gemini client stats
325
- stats = {
326
- "call_count": client.call_count,
327
- "total_cost": 0.0, # Free tier
328
- }
329
- else:
330
- # Should never reach here due to constructor validation
331
- raise ValueError(f"Unsupported client type for model {model}")
380
+ # All clients implement BaseLLMClient with call_count property
381
+ stats: Dict[str, Any] = {
382
+ "call_count": client.call_count,
383
+ "total_cost": 0.0, # All current providers are free tier
384
+ }
332
385
 
333
386
  total_calls += stats["call_count"]
334
387
  total_cost += stats["total_cost"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: causaliq-knowledge
3
- Version: 0.1.0
3
+ Version: 0.3.0
4
4
  Summary: Incorporating LLM and human knowledge into causal discovery
5
5
  Author-email: CausalIQ <info@causaliq.com>
6
6
  Maintainer-email: CausalIQ <info@causaliq.com>
@@ -85,18 +85,14 @@ print(f"Reasoning: {result.reasoning}")
85
85
 
86
86
  ## Features
87
87
 
88
- Under development:
89
-
90
- - **Release v0.1.0 - Foundation LLM**: Simple LLM queries to 1 or 2 LLMs about edge existence and orientation to support graph averaging
91
-
92
88
  Currently implemented releases:
93
89
 
94
- - None
90
+ - **Release v0.1.0 - Foundation LLM**: Simple LLM queries to 1 or 2 LLMs about edge existence and orientation to support graph averaging
91
+ - **Release v0.2.0 - Additional LLMs**: Support for 7 LLM providers (Groq, Gemini, OpenAI, Anthropic, DeepSeek, Mistral, Ollama)
92
+ - **Release v0.3.0 - LLM Caching** *(in development)*: SQLite-based response caching with CLI tools for cache management
95
93
 
96
94
  Planned:
97
95
 
98
- - **Release v0.2.0 - Additional LLMs**: Support for more LLM providers (OpenAI, Anthropic)
99
- - **Release v0.3.0 - LLM Caching**: Caching of LLM queries and responses
100
96
  - **Release v0.4.0 - LLM Context**: Variable/role/literature etc context
101
97
  - **Release v0.5.0 - Algorithm integration**: Integration into structure learning algorithms
102
98
  - **Release v0.6.0 - Legacy Reference**: Support for legacy approaches of deriving knowledge from reference networks
@@ -128,8 +124,11 @@ This approach keeps the package lightweight, reliable, and easy to debug.
128
124
  |----------|--------|--------|-----------|
129
125
  | **Groq** | `GroqClient` | llama-3.1-8b-instant | ✅ Generous |
130
126
  | **Google Gemini** | `GeminiClient` | gemini-2.5-flash | ✅ Generous |
131
-
132
- Additional providers (OpenAI, Anthropic) can be added in future releases.
127
+ | **OpenAI** | `OpenAIClient` | gpt-4o-mini | ❌ Paid |
128
+ | **Anthropic** | `AnthropicClient` | claude-sonnet-4-20250514 | Paid |
129
+ | **DeepSeek** | `DeepSeekClient` | deepseek-chat | ✅ Low cost |
130
+ | **Mistral** | `MistralClient` | mistral-small-latest | ❌ Paid |
131
+ | **Ollama** | `OllamaClient` | llama3 | ✅ Free (local) |
133
132
 
134
133
  ## Upcoming Key Innovations
135
134
 
@@ -0,0 +1,28 @@
1
+ causaliq_knowledge/__init__.py,sha256=3m-1i0_giGiTzvJj_8lDrMrvpDvnPD3IBOGlU3ZmxfM,843
2
+ causaliq_knowledge/base.py,sha256=GBG-sftOKkmUoQzTpm6anDTjP-2nInRZN_36dxoYhvk,2917
3
+ causaliq_knowledge/cli.py,sha256=FjdlpQ62Mm4SjWGLAaXnPdv8hYh73-IUweLQAhrBw9k,25010
4
+ causaliq_knowledge/models.py,sha256=tWGf186ASwO8NHiN97pEOLuBJmJI6Q9jvpU0mYZNdS0,4058
5
+ causaliq_knowledge/cache/__init__.py,sha256=Av92YdCdVTRt9TmB2edRsIFDxq3f1Qi0daq0sFV1rp0,549
6
+ causaliq_knowledge/cache/token_cache.py,sha256=dURih1jr0csVBxU1pCtmcjV48GnQeCnVGi3j1E0KY7Q,21845
7
+ causaliq_knowledge/cache/encoders/__init__.py,sha256=gZ7gw96paFDbnJuc4v1aJsEJfVinI4zc03tXyFvfZxo,461
8
+ causaliq_knowledge/cache/encoders/base.py,sha256=jK7--Or3lVp1UkKghKYFo_gKJp0HsMxosL_8eYL7RQQ,2679
9
+ causaliq_knowledge/cache/encoders/json_encoder.py,sha256=44mcYpT6vJaJT9ZwtnWwdxCvTXIFyoeolqyiAXrgH1o,15110
10
+ causaliq_knowledge/llm/__init__.py,sha256=30AL0h64zIkXoiqhMY7gjaf7mrtwtwMW38vzhns0My4,1663
11
+ causaliq_knowledge/llm/anthropic_client.py,sha256=dPFHYGWL4xwQCtmQuGwGY4DBKSINOgOS-11ekznaiXo,8719
12
+ causaliq_knowledge/llm/base_client.py,sha256=o2qWu2_ttKMHT4isdkY4VUjma3B3jtdx1vhOLXVFLX4,12249
13
+ causaliq_knowledge/llm/cache.py,sha256=gBjZaYNJZ8HF54Hk25RWGVOvdBFwVPAv78_GYaanRTc,12723
14
+ causaliq_knowledge/llm/deepseek_client.py,sha256=ZcOpgnYa66XHjiTaF5ekR_BtosRYvVmzlIafp_Gsx_A,3543
15
+ causaliq_knowledge/llm/gemini_client.py,sha256=XJMq9sPo7zExrALSr2rIRHLheSPqKo8ENG0KtdJ1cjw,9924
16
+ causaliq_knowledge/llm/groq_client.py,sha256=PnTXqtMF1Km9DY4HiCZXQ6LeOzdjZtQJaeuGe1GbeME,7531
17
+ causaliq_knowledge/llm/mistral_client.py,sha256=dTAOtymffCM1AJp5-JcfizofYrUA-jhKfHWrhZe2DDI,4187
18
+ causaliq_knowledge/llm/ollama_client.py,sha256=PPU3g-nD8D546zcYB3uGxZ9yVbU4Gngo3snM2tRFeTc,8612
19
+ causaliq_knowledge/llm/openai_client.py,sha256=MJmB6P32TZESMlXhn9d0-b3vFWXmf7ojHQ5CY8mCENI,3835
20
+ causaliq_knowledge/llm/openai_compat_client.py,sha256=L8ZW5csuhUePq4mt3EGOUqhR3tleFmM72UlhPBsgIMQ,9518
21
+ causaliq_knowledge/llm/prompts.py,sha256=bJ9iVGKUfTfLi2eWh-FFM4cNzk5Ux4Z0x8R6Ia27Dbo,6598
22
+ causaliq_knowledge/llm/provider.py,sha256=VDEv-1esT_EgJk_Gwlfl4423ojglOxzPCBCFbOFE4DQ,15184
23
+ causaliq_knowledge-0.3.0.dist-info/licenses/LICENSE,sha256=vUFUzQnti-D-MLSi9NxFlsFYOKwU25sxxH7WgJOQFIs,1084
24
+ causaliq_knowledge-0.3.0.dist-info/METADATA,sha256=MIE-z6VqrnzuhHpU8j0DzxB48zwyDIobseO2SltVe-0,8774
25
+ causaliq_knowledge-0.3.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
26
+ causaliq_knowledge-0.3.0.dist-info/entry_points.txt,sha256=8iQjiMgFxZszRWwSTGHvoOBb_OBUkMmwvH3PzgsH-Cc,104
27
+ causaliq_knowledge-0.3.0.dist-info/top_level.txt,sha256=GcxQf4BQAGa38i2-j8ylk2FmnBHtEZ9-8bSt-7Uka7k,19
28
+ causaliq_knowledge-0.3.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,15 +0,0 @@
1
- causaliq_knowledge/__init__.py,sha256=E0QHb_Tu2lwzRVmyqbR357eFuusUeZ226SbD9y5UxCU,851
2
- causaliq_knowledge/base.py,sha256=GBG-sftOKkmUoQzTpm6anDTjP-2nInRZN_36dxoYhvk,2917
3
- causaliq_knowledge/cli.py,sha256=CFw6nhULlR6x3Wbu5uQNLd-mLpYu8TKQWA9xpeSRx6E,5695
4
- causaliq_knowledge/models.py,sha256=tWGf186ASwO8NHiN97pEOLuBJmJI6Q9jvpU0mYZNdS0,4058
5
- causaliq_knowledge/llm/__init__.py,sha256=hrb0AHRc3lRWFpcEtJmtZwA9QK3k6bxPymp08gXaiZk,791
6
- causaliq_knowledge/llm/gemini_client.py,sha256=fP9D6M0AFCtRnVbQrvaT-0EruQG0OWchdNaFS1Ph4z8,7046
7
- causaliq_knowledge/llm/groq_client.py,sha256=5-xfLvvSN8sd_T6niQE8ZLimG9sPCz_n5_nl0YNAqI4,4888
8
- causaliq_knowledge/llm/prompts.py,sha256=bJ9iVGKUfTfLi2eWh-FFM4cNzk5Ux4Z0x8R6Ia27Dbo,6598
9
- causaliq_knowledge/llm/provider.py,sha256=f9taEmbN0tJ4I04OHd6I-lSLAKEe3px3i0FJFj0N0EI,12995
10
- causaliq_knowledge-0.1.0.dist-info/licenses/LICENSE,sha256=vUFUzQnti-D-MLSi9NxFlsFYOKwU25sxxH7WgJOQFIs,1084
11
- causaliq_knowledge-0.1.0.dist-info/METADATA,sha256=dKZxJtIIFzkkLKefzwKvUp-9fYj4BqumnqWbNWVczws,8464
12
- causaliq_knowledge-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
- causaliq_knowledge-0.1.0.dist-info/entry_points.txt,sha256=8iQjiMgFxZszRWwSTGHvoOBb_OBUkMmwvH3PzgsH-Cc,104
14
- causaliq_knowledge-0.1.0.dist-info/top_level.txt,sha256=GcxQf4BQAGa38i2-j8ylk2FmnBHtEZ9-8bSt-7Uka7k,19
15
- causaliq_knowledge-0.1.0.dist-info/RECORD,,