causaliq-knowledge 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. causaliq_knowledge/__init__.py +6 -3
  2. causaliq_knowledge/action.py +480 -0
  3. causaliq_knowledge/cache/__init__.py +18 -0
  4. causaliq_knowledge/cache/encoders/__init__.py +13 -0
  5. causaliq_knowledge/cache/encoders/base.py +90 -0
  6. causaliq_knowledge/cache/encoders/json_encoder.py +430 -0
  7. causaliq_knowledge/cache/token_cache.py +666 -0
  8. causaliq_knowledge/cli/__init__.py +15 -0
  9. causaliq_knowledge/cli/cache.py +478 -0
  10. causaliq_knowledge/cli/generate.py +410 -0
  11. causaliq_knowledge/cli/main.py +172 -0
  12. causaliq_knowledge/cli/models.py +309 -0
  13. causaliq_knowledge/graph/__init__.py +78 -0
  14. causaliq_knowledge/graph/generator.py +457 -0
  15. causaliq_knowledge/graph/loader.py +222 -0
  16. causaliq_knowledge/graph/models.py +426 -0
  17. causaliq_knowledge/graph/params.py +175 -0
  18. causaliq_knowledge/graph/prompts.py +445 -0
  19. causaliq_knowledge/graph/response.py +392 -0
  20. causaliq_knowledge/graph/view_filter.py +154 -0
  21. causaliq_knowledge/llm/base_client.py +147 -1
  22. causaliq_knowledge/llm/cache.py +443 -0
  23. causaliq_knowledge/py.typed +0 -0
  24. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/METADATA +10 -6
  25. causaliq_knowledge-0.4.0.dist-info/RECORD +42 -0
  26. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/WHEEL +1 -1
  27. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/entry_points.txt +3 -0
  28. causaliq_knowledge/cli.py +0 -414
  29. causaliq_knowledge-0.2.0.dist-info/RECORD +0 -22
  30. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/licenses/LICENSE +0 -0
  31. {causaliq_knowledge-0.2.0.dist-info → causaliq_knowledge-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,443 @@
1
+ """
2
+ LLM-specific cache encoder and data structures.
3
+
4
+ This module provides the LLMEntryEncoder for caching LLM requests and
5
+ responses with rich metadata for analysis.
6
+
7
+ Note: This module stays in causaliq-knowledge (LLM-specific).
8
+ The base cache infrastructure will migrate to causaliq-core.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import json
14
+ from dataclasses import asdict, dataclass, field
15
+ from datetime import datetime, timezone
16
+ from pathlib import Path
17
+ from typing import TYPE_CHECKING, Any
18
+
19
+ from causaliq_knowledge.cache.encoders import JsonEncoder
20
+
21
+ if TYPE_CHECKING: # pragma: no cover
22
+ from causaliq_knowledge.cache.token_cache import TokenCache
23
+
24
+
25
+ @dataclass
26
+ class LLMTokenUsage:
27
+ """Token usage statistics for an LLM request.
28
+
29
+ Attributes:
30
+ input: Number of tokens in the prompt.
31
+ output: Number of tokens in the completion.
32
+ total: Total tokens (input + output).
33
+ """
34
+
35
+ input: int = 0
36
+ output: int = 0
37
+ total: int = 0
38
+
39
+
40
+ @dataclass
41
+ class LLMMetadata:
42
+ """Metadata for a cached LLM response.
43
+
44
+ Attributes:
45
+ provider: LLM provider name (openai, anthropic, etc.).
46
+ timestamp: When the original request was made (ISO format).
47
+ latency_ms: Response time in milliseconds.
48
+ tokens: Token usage statistics.
49
+ cost_usd: Estimated cost of the request in USD.
50
+ cache_hit: Whether this was served from cache.
51
+ request_id: Optional identifier for the request (not in cache key).
52
+ """
53
+
54
+ provider: str = ""
55
+ timestamp: str = ""
56
+ latency_ms: int = 0
57
+ tokens: LLMTokenUsage = field(default_factory=LLMTokenUsage)
58
+ cost_usd: float = 0.0
59
+ cache_hit: bool = False
60
+ request_id: str = ""
61
+
62
+ def to_dict(self) -> dict[str, Any]:
63
+ """Convert to dictionary for JSON serialisation."""
64
+ return {
65
+ "provider": self.provider,
66
+ "timestamp": self.timestamp,
67
+ "latency_ms": self.latency_ms,
68
+ "tokens": asdict(self.tokens),
69
+ "cost_usd": self.cost_usd,
70
+ "cache_hit": self.cache_hit,
71
+ "request_id": self.request_id,
72
+ }
73
+
74
+ @classmethod
75
+ def from_dict(cls, data: dict[str, Any]) -> LLMMetadata:
76
+ """Create from dictionary."""
77
+ tokens_data = data.get("tokens", {})
78
+ return cls(
79
+ provider=data.get("provider", ""),
80
+ timestamp=data.get("timestamp", ""),
81
+ latency_ms=data.get("latency_ms", 0),
82
+ tokens=LLMTokenUsage(
83
+ input=tokens_data.get("input", 0),
84
+ output=tokens_data.get("output", 0),
85
+ total=tokens_data.get("total", 0),
86
+ ),
87
+ cost_usd=data.get("cost_usd", 0.0),
88
+ cache_hit=data.get("cache_hit", False),
89
+ request_id=data.get("request_id", ""),
90
+ )
91
+
92
+
93
+ @dataclass
94
+ class LLMResponse:
95
+ """LLM response data for caching.
96
+
97
+ Attributes:
98
+ content: The full text response from the LLM.
99
+ finish_reason: Why generation stopped (stop, length, etc.).
100
+ model_version: Actual model version used.
101
+ """
102
+
103
+ content: str = ""
104
+ finish_reason: str = "stop"
105
+ model_version: str = ""
106
+
107
+ def to_dict(self) -> dict[str, Any]:
108
+ """Convert to dictionary for JSON serialisation."""
109
+ return {
110
+ "content": self.content,
111
+ "finish_reason": self.finish_reason,
112
+ "model_version": self.model_version,
113
+ }
114
+
115
+ def to_export_dict(self) -> dict[str, Any]:
116
+ """Convert to dictionary for export, parsing JSON content if valid.
117
+
118
+ Unlike to_dict(), this attempts to parse the content as JSON
119
+ for more readable exported files.
120
+ """
121
+ # Try to parse content as JSON for cleaner export
122
+ try:
123
+ parsed_content = json.loads(self.content)
124
+ except (json.JSONDecodeError, TypeError):
125
+ parsed_content = self.content
126
+
127
+ return {
128
+ "content": parsed_content,
129
+ "finish_reason": self.finish_reason,
130
+ "model_version": self.model_version,
131
+ }
132
+
133
+ @classmethod
134
+ def from_dict(cls, data: dict[str, Any]) -> LLMResponse:
135
+ """Create from dictionary."""
136
+ content = data.get("content", "")
137
+ # Handle both string and parsed JSON content (from export files)
138
+ if isinstance(content, dict):
139
+ content = json.dumps(content)
140
+ return cls(
141
+ content=content,
142
+ finish_reason=data.get("finish_reason", "stop"),
143
+ model_version=data.get("model_version", ""),
144
+ )
145
+
146
+
147
+ @dataclass
148
+ class LLMCacheEntry:
149
+ """Complete LLM cache entry with request, response, and metadata.
150
+
151
+ Attributes:
152
+ model: The model name requested.
153
+ messages: The conversation messages.
154
+ temperature: Sampling temperature.
155
+ max_tokens: Maximum tokens in response.
156
+ response: The LLM response data.
157
+ metadata: Rich metadata for analysis.
158
+ """
159
+
160
+ model: str = ""
161
+ messages: list[dict[str, Any]] = field(default_factory=list)
162
+ temperature: float = 0.0
163
+ max_tokens: int | None = None
164
+ response: LLMResponse = field(default_factory=LLMResponse)
165
+ metadata: LLMMetadata = field(default_factory=LLMMetadata)
166
+
167
+ @staticmethod
168
+ def _split_message_content(messages: list[dict[str, Any]]) -> list[Any]:
169
+ """Convert message content with newlines into arrays of lines."""
170
+ result = []
171
+ for msg in messages:
172
+ new_msg = dict(msg)
173
+ content = new_msg.get("content", "")
174
+ if isinstance(content, str) and "\n" in content:
175
+ new_msg["content"] = content.split("\n")
176
+ result.append(new_msg)
177
+ return result
178
+
179
+ @staticmethod
180
+ def _join_message_content(messages: list[Any]) -> list[dict[str, Any]]:
181
+ """Convert message content arrays back into strings with newlines."""
182
+ result = []
183
+ for msg in messages:
184
+ new_msg = dict(msg)
185
+ content = new_msg.get("content", "")
186
+ if isinstance(content, list):
187
+ new_msg["content"] = "\n".join(content)
188
+ result.append(new_msg)
189
+ return result
190
+
191
+ def to_dict(self) -> dict[str, Any]:
192
+ """Convert to dictionary for JSON serialisation."""
193
+ return {
194
+ "cache_key": {
195
+ "model": self.model,
196
+ "messages": self.messages,
197
+ "temperature": self.temperature,
198
+ "max_tokens": self.max_tokens,
199
+ },
200
+ "response": self.response.to_dict(),
201
+ "metadata": self.metadata.to_dict(),
202
+ }
203
+
204
+ def to_export_dict(self) -> dict[str, Any]:
205
+ """Convert to dictionary for export with readable formatting.
206
+
207
+ - Message content with newlines is split into arrays of lines
208
+ - Response JSON content is parsed into a proper JSON structure
209
+ """
210
+ return {
211
+ "cache_key": {
212
+ "model": self.model,
213
+ "messages": self._split_message_content(self.messages),
214
+ "temperature": self.temperature,
215
+ "max_tokens": self.max_tokens,
216
+ },
217
+ "response": self.response.to_export_dict(),
218
+ "metadata": self.metadata.to_dict(),
219
+ }
220
+
221
+ @classmethod
222
+ def from_dict(cls, data: dict[str, Any]) -> LLMCacheEntry:
223
+ """Create from dictionary.
224
+
225
+ Handles both internal format (string content) and export format
226
+ (array of lines for content).
227
+ """
228
+ cache_key = data.get("cache_key", {})
229
+ messages = cache_key.get("messages", [])
230
+ # Handle export format where content is array of lines
231
+ messages = cls._join_message_content(messages)
232
+ return cls(
233
+ model=cache_key.get("model", ""),
234
+ messages=messages,
235
+ temperature=cache_key.get("temperature", 0.0),
236
+ max_tokens=cache_key.get("max_tokens"),
237
+ response=LLMResponse.from_dict(data.get("response", {})),
238
+ metadata=LLMMetadata.from_dict(data.get("metadata", {})),
239
+ )
240
+
241
+ @classmethod
242
+ def create(
243
+ cls,
244
+ model: str,
245
+ messages: list[dict[str, Any]],
246
+ content: str,
247
+ *,
248
+ temperature: float = 0.0,
249
+ max_tokens: int | None = None,
250
+ finish_reason: str = "stop",
251
+ model_version: str = "",
252
+ provider: str = "",
253
+ latency_ms: int = 0,
254
+ input_tokens: int = 0,
255
+ output_tokens: int = 0,
256
+ cost_usd: float = 0.0,
257
+ request_id: str = "",
258
+ ) -> LLMCacheEntry:
259
+ """Create a cache entry with common parameters.
260
+
261
+ Args:
262
+ model: The model name requested.
263
+ messages: The conversation messages.
264
+ content: The response content.
265
+ temperature: Sampling temperature.
266
+ max_tokens: Maximum tokens in response.
267
+ finish_reason: Why generation stopped.
268
+ model_version: Actual model version.
269
+ provider: LLM provider name.
270
+ latency_ms: Response time in milliseconds.
271
+ input_tokens: Number of input tokens.
272
+ output_tokens: Number of output tokens.
273
+ cost_usd: Estimated cost in USD.
274
+ request_id: Optional identifier for the request (not part of hash).
275
+
276
+ Returns:
277
+ Configured LLMCacheEntry.
278
+ """
279
+ return cls(
280
+ model=model,
281
+ messages=messages,
282
+ temperature=temperature,
283
+ max_tokens=max_tokens,
284
+ response=LLMResponse(
285
+ content=content,
286
+ finish_reason=finish_reason,
287
+ model_version=model_version or model,
288
+ ),
289
+ metadata=LLMMetadata(
290
+ provider=provider,
291
+ timestamp=datetime.now(timezone.utc).isoformat(),
292
+ latency_ms=latency_ms,
293
+ tokens=LLMTokenUsage(
294
+ input=input_tokens,
295
+ output=output_tokens,
296
+ total=input_tokens + output_tokens,
297
+ ),
298
+ cost_usd=cost_usd,
299
+ cache_hit=False,
300
+ request_id=request_id,
301
+ ),
302
+ )
303
+
304
+
305
+ class LLMEntryEncoder(JsonEncoder):
306
+ """Encoder for LLM cache entries.
307
+
308
+ Extends JsonEncoder with LLM-specific convenience methods for
309
+ encoding/decoding LLMCacheEntry objects.
310
+
311
+ The encoder stores data in the standard JSON tokenised format,
312
+ achieving 50-70% compression through the shared token dictionary.
313
+
314
+ Example:
315
+ >>> from causaliq_knowledge.cache import TokenCache
316
+ >>> from causaliq_knowledge.llm.cache import (
317
+ ... LLMEntryEncoder, LLMCacheEntry,
318
+ ... )
319
+ >>> with TokenCache(":memory:") as cache:
320
+ ... encoder = LLMEntryEncoder()
321
+ ... entry = LLMCacheEntry.create(
322
+ ... model="gpt-4",
323
+ ... messages=[{"role": "user", "content": "Hello"}],
324
+ ... content="Hi there!",
325
+ ... provider="openai",
326
+ ... )
327
+ ... blob = encoder.encode(entry.to_dict(), cache)
328
+ ... data = encoder.decode(blob, cache)
329
+ ... restored = LLMCacheEntry.from_dict(data)
330
+ """
331
+
332
+ def encode_entry(self, entry: LLMCacheEntry, cache: TokenCache) -> bytes:
333
+ """Encode an LLMCacheEntry to bytes.
334
+
335
+ Convenience method that handles to_dict conversion.
336
+
337
+ Args:
338
+ entry: The cache entry to encode.
339
+ cache: TokenCache for token dictionary.
340
+
341
+ Returns:
342
+ Encoded bytes.
343
+ """
344
+ return self.encode(entry.to_dict(), cache)
345
+
346
+ def decode_entry(self, blob: bytes, cache: TokenCache) -> LLMCacheEntry:
347
+ """Decode bytes to an LLMCacheEntry.
348
+
349
+ Convenience method that handles from_dict conversion.
350
+
351
+ Args:
352
+ blob: Encoded bytes.
353
+ cache: TokenCache for token dictionary.
354
+
355
+ Returns:
356
+ Decoded LLMCacheEntry.
357
+ """
358
+ data = self.decode(blob, cache)
359
+ return LLMCacheEntry.from_dict(data)
360
+
361
+ def generate_export_filename(
362
+ self, entry: LLMCacheEntry, cache_key: str
363
+ ) -> str:
364
+ """Generate a human-readable filename for export.
365
+
366
+ Creates a filename using request_id, timestamp, and provider:
367
+ {request_id}_{yyyy-mm-dd-hhmmss}_{provider}.json
368
+
369
+ If request_id is not set, falls back to a short hash prefix.
370
+
371
+ Args:
372
+ entry: The cache entry to generate filename for.
373
+ cache_key: The cache key (hash) for fallback uniqueness.
374
+
375
+ Returns:
376
+ Human-readable filename with .json extension.
377
+
378
+ Example:
379
+ >>> encoder = LLMEntryEncoder()
380
+ >>> entry = LLMCacheEntry.create(
381
+ ... model="gpt-4",
382
+ ... messages=[{"role": "user", "content": "test"}],
383
+ ... content="Response",
384
+ ... provider="openai",
385
+ ... request_id="expt23",
386
+ ... )
387
+ >>> # Returns something like: expt23_2026-01-29-143052_openai.json
388
+ """
389
+ import re
390
+ from datetime import datetime
391
+
392
+ # Get request_id or use hash prefix as fallback
393
+ request_id = entry.metadata.request_id or cache_key[:8]
394
+ # Sanitise request_id (alphanumeric, hyphens, underscores only)
395
+ request_id = re.sub(r"[^a-zA-Z0-9_-]", "", request_id)
396
+ if not request_id:
397
+ request_id = cache_key[:8] if cache_key else "unknown"
398
+
399
+ # Parse timestamp and format as yyyy-mm-dd-hhmmss
400
+ timestamp_str = entry.metadata.timestamp
401
+ if timestamp_str:
402
+ try:
403
+ # Parse ISO format timestamp
404
+ dt = datetime.fromisoformat(
405
+ timestamp_str.replace("Z", "+00:00")
406
+ )
407
+ formatted_ts = dt.strftime("%Y-%m-%d-%H%M%S")
408
+ except ValueError:
409
+ formatted_ts = "unknown"
410
+ else:
411
+ formatted_ts = "unknown"
412
+
413
+ # Get provider, sanitised
414
+ provider = entry.metadata.provider or "unknown"
415
+ provider = re.sub(r"[^a-z0-9]", "", provider.lower())
416
+ if not provider:
417
+ provider = "unknown"
418
+
419
+ # Build filename: id_timestamp_provider.json
420
+ return f"{request_id}_{formatted_ts}_{provider}.json"
421
+
422
+ def export_entry(self, entry: LLMCacheEntry, path: Path) -> None:
423
+ """Export an LLMCacheEntry to a JSON file.
424
+
425
+ Uses to_export_dict() to parse JSON content for readability.
426
+
427
+ Args:
428
+ entry: The cache entry to export.
429
+ path: Destination file path.
430
+ """
431
+ self.export(entry.to_export_dict(), path)
432
+
433
+ def import_entry(self, path: Path) -> LLMCacheEntry:
434
+ """Import an LLMCacheEntry from a JSON file.
435
+
436
+ Args:
437
+ path: Source file path.
438
+
439
+ Returns:
440
+ Imported LLMCacheEntry.
441
+ """
442
+ data = self.import_(path)
443
+ return LLMCacheEntry.from_dict(data)
File without changes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: causaliq-knowledge
3
- Version: 0.2.0
3
+ Version: 0.4.0
4
4
  Summary: Incorporating LLM and human knowledge into causal discovery
5
5
  Author-email: CausalIQ <info@causaliq.com>
6
6
  Maintainer-email: CausalIQ <info@causaliq.com>
@@ -24,6 +24,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
24
24
  Requires-Python: >=3.9
25
25
  Description-Content-Type: text/markdown
26
26
  License-File: LICENSE
27
+ Requires-Dist: causaliq-workflow>=0.1.1.dev3
27
28
  Requires-Dist: click>=8.0.0
28
29
  Requires-Dist: httpx>=0.24.0
29
30
  Requires-Dist: pydantic>=2.0.0
@@ -32,7 +33,7 @@ Requires-Dist: causaliq-core>=0.3.0; extra == "dev"
32
33
  Requires-Dist: pytest>=7.0.0; extra == "dev"
33
34
  Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
34
35
  Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
35
- Requires-Dist: black>=22.0.0; extra == "dev"
36
+ Requires-Dist: black<26.0.0,>=25.0.0; extra == "dev"
36
37
  Requires-Dist: isort>=5.10.0; extra == "dev"
37
38
  Requires-Dist: flake8>=5.0.0; extra == "dev"
38
39
  Requires-Dist: mypy>=1.0.0; extra == "dev"
@@ -42,6 +43,7 @@ Requires-Dist: build>=0.8.0; extra == "dev"
42
43
  Requires-Dist: twine>=4.0.0; extra == "dev"
43
44
  Provides-Extra: test
44
45
  Requires-Dist: causaliq-core>=0.3.0; extra == "test"
46
+ Requires-Dist: causaliq-workflow>=0.1.1.dev3; extra == "test"
45
47
  Requires-Dist: pytest>=7.0.0; extra == "test"
46
48
  Requires-Dist: pytest-cov>=4.0.0; extra == "test"
47
49
  Requires-Dist: pytest-mock>=3.10.0; extra == "test"
@@ -89,13 +91,15 @@ Currently implemented releases:
89
91
 
90
92
  - **Release v0.1.0 - Foundation LLM**: Simple LLM queries to 1 or 2 LLMs about edge existence and orientation to support graph averaging
91
93
  - **Release v0.2.0 - Additional LLMs**: Support for 7 LLM providers (Groq, Gemini, OpenAI, Anthropic, DeepSeek, Mistral, Ollama)
94
+ - **Release v0.3.0 - LLM Caching**: SQLite-based response caching with CLI tools for cache management
95
+ - **Release v0.4.0 - Graph Generation**: CLI and CausalIQ workflow action for LLM-generated causal graphs
92
96
 
93
97
  Planned:
94
98
 
95
- - **Release v0.3.0 - LLM Caching**: Caching of LLM queries and responses
96
- - **Release v0.4.0 - LLM Context**: Variable/role/literature etc context
97
- - **Release v0.5.0 - Algorithm integration**: Integration into structure learning algorithms
98
- - **Release v0.6.0 - Legacy Reference**: Support for legacy approaches of deriving knowledge from reference networks
99
+ - **Release v0.5.0 - Graph Caching**: save generated graphs to Workflow caches
100
+ - **Release v0.6.0 - LLM Cost Tracking**: Query LLM provider APIs for usage and cost statistics
101
+ - **Release v0.7.0 - LLM Context**: Variable/role/literature etc context
102
+ - **Release v0.8.0 - Algorithm integration**: Integration into structure learning algorithms
99
103
 
100
104
  ## Implementation Approach
101
105
 
@@ -0,0 +1,42 @@
1
+ causaliq_knowledge/__init__.py,sha256=IVlm0G1g-xxJS13SFeC0h1D6LL7rfzX96F7rjfU-wqA,982
2
+ causaliq_knowledge/action.py,sha256=X7EGSTV7IiwnO8cTcz5-ExXpRuwamSaC1jWpAc86i6I,16416
3
+ causaliq_knowledge/base.py,sha256=GBG-sftOKkmUoQzTpm6anDTjP-2nInRZN_36dxoYhvk,2917
4
+ causaliq_knowledge/models.py,sha256=tWGf186ASwO8NHiN97pEOLuBJmJI6Q9jvpU0mYZNdS0,4058
5
+ causaliq_knowledge/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ causaliq_knowledge/cache/__init__.py,sha256=Av92YdCdVTRt9TmB2edRsIFDxq3f1Qi0daq0sFV1rp0,549
7
+ causaliq_knowledge/cache/token_cache.py,sha256=o3qYGnc1e7nSJm9BsM6pmp6cbsPzhaCnEM1utCY682E,23085
8
+ causaliq_knowledge/cache/encoders/__init__.py,sha256=gZ7gw96paFDbnJuc4v1aJsEJfVinI4zc03tXyFvfZxo,461
9
+ causaliq_knowledge/cache/encoders/base.py,sha256=jK7--Or3lVp1UkKghKYFo_gKJp0HsMxosL_8eYL7RQQ,2679
10
+ causaliq_knowledge/cache/encoders/json_encoder.py,sha256=7zN0qRdpVa8EZS08F22buyAYoIpnx5lngK5p1wK-3WI,15689
11
+ causaliq_knowledge/cli/__init__.py,sha256=worFcurYE_T5_uYvnM4oL3aP3v_fSWLUhggbCP9TZMc,434
12
+ causaliq_knowledge/cli/cache.py,sha256=w_fF3e8Ru7Sxl3FMgab_x2UWOfPMVq7BDvTUD-2Kayg,17863
13
+ causaliq_knowledge/cli/generate.py,sha256=78c1GUv4w42qtPjy9NV0X1q5kw9ATl2yKcdl-KBTxhI,13187
14
+ causaliq_knowledge/cli/main.py,sha256=MwUmokX9x3bL7E7pZMquWvVnKg3b-qdVk8VMR0ejK5o,4665
15
+ causaliq_knowledge/cli/models.py,sha256=2ga5PWhOOo2vE6e3A3oxvO2FB88zztuRoUMPGlhyE6M,9587
16
+ causaliq_knowledge/graph/__init__.py,sha256=920si3oBsuYIBW8gzHBYQnHCt9KupDdkPqVxTsj_py0,1952
17
+ causaliq_knowledge/graph/generator.py,sha256=tM1KKKgpsiLLziCUKKnAiH9n1yO8zUnSFZ-QbFZKdJU,15971
18
+ causaliq_knowledge/graph/loader.py,sha256=EO5Yj02qRrPY22rvfVk-LfXSZMVNEn37-H4u5kHCY0M,6615
19
+ causaliq_knowledge/graph/models.py,sha256=4f9kaHHs9J_ma95EgV0GItliY-G4BLNNyIwBq8yTiVk,14924
20
+ causaliq_knowledge/graph/params.py,sha256=RPviCO3ZOsOrm_rsysST4Y4hhWDN6jcJt46ajDvSY0M,5828
21
+ causaliq_knowledge/graph/prompts.py,sha256=C29w5LQDf2tF9JeFADRrKSjkP6dVzjsa1FNX_6ndt70,15399
22
+ causaliq_knowledge/graph/response.py,sha256=UaYbnVpfkWDZWMS9wQbEU4QP5In1YAqId2EuJ1V2kho,12437
23
+ causaliq_knowledge/graph/view_filter.py,sha256=-ebhj8cXxgLimAeAZ023YgW6kI-c8jTp_LDKjYf1Kow,5297
24
+ causaliq_knowledge/llm/__init__.py,sha256=30AL0h64zIkXoiqhMY7gjaf7mrtwtwMW38vzhns0My4,1663
25
+ causaliq_knowledge/llm/anthropic_client.py,sha256=dPFHYGWL4xwQCtmQuGwGY4DBKSINOgOS-11ekznaiXo,8719
26
+ causaliq_knowledge/llm/base_client.py,sha256=FJGX5QYawcelc3UScSMwvBJnKrUVR3PrBIY72KYthTU,12544
27
+ causaliq_knowledge/llm/cache.py,sha256=6bpCyBv_bUorKceYc5qpgXi30A0tDRwAtlhxS3TQklE,15404
28
+ causaliq_knowledge/llm/deepseek_client.py,sha256=ZcOpgnYa66XHjiTaF5ekR_BtosRYvVmzlIafp_Gsx_A,3543
29
+ causaliq_knowledge/llm/gemini_client.py,sha256=XJMq9sPo7zExrALSr2rIRHLheSPqKo8ENG0KtdJ1cjw,9924
30
+ causaliq_knowledge/llm/groq_client.py,sha256=PnTXqtMF1Km9DY4HiCZXQ6LeOzdjZtQJaeuGe1GbeME,7531
31
+ causaliq_knowledge/llm/mistral_client.py,sha256=dTAOtymffCM1AJp5-JcfizofYrUA-jhKfHWrhZe2DDI,4187
32
+ causaliq_knowledge/llm/ollama_client.py,sha256=PPU3g-nD8D546zcYB3uGxZ9yVbU4Gngo3snM2tRFeTc,8612
33
+ causaliq_knowledge/llm/openai_client.py,sha256=MJmB6P32TZESMlXhn9d0-b3vFWXmf7ojHQ5CY8mCENI,3835
34
+ causaliq_knowledge/llm/openai_compat_client.py,sha256=L8ZW5csuhUePq4mt3EGOUqhR3tleFmM72UlhPBsgIMQ,9518
35
+ causaliq_knowledge/llm/prompts.py,sha256=bJ9iVGKUfTfLi2eWh-FFM4cNzk5Ux4Z0x8R6Ia27Dbo,6598
36
+ causaliq_knowledge/llm/provider.py,sha256=VDEv-1esT_EgJk_Gwlfl4423ojglOxzPCBCFbOFE4DQ,15184
37
+ causaliq_knowledge-0.4.0.dist-info/licenses/LICENSE,sha256=vUFUzQnti-D-MLSi9NxFlsFYOKwU25sxxH7WgJOQFIs,1084
38
+ causaliq_knowledge-0.4.0.dist-info/METADATA,sha256=ZWEguAYGAWwk73VlIvb2KFXEyf37pTctjYZNlUgHWZM,9038
39
+ causaliq_knowledge-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
40
+ causaliq_knowledge-0.4.0.dist-info/entry_points.txt,sha256=tuHaj0XNw9KySBpHOZeAC5Q08G96ftxocOy2POV1DdA,179
41
+ causaliq_knowledge-0.4.0.dist-info/top_level.txt,sha256=GcxQf4BQAGa38i2-j8ylk2FmnBHtEZ9-8bSt-7Uka7k,19
42
+ causaliq_knowledge-0.4.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,3 +1,6 @@
1
+ [causaliq.actions]
2
+ causaliq-knowledge = causaliq_knowledge:CausalIQAction
3
+
1
4
  [console_scripts]
2
5
  causaliq-knowledge = causaliq_knowledge.cli:main
3
6
  cqknow = causaliq_knowledge.cli:main