civiclens 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
civiclens/__init__.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"
civiclens/cli.py ADDED
@@ -0,0 +1,151 @@
1
+ import argparse
2
+ import os
3
+ from enum import Enum
4
+ from civiclens.utils import add_ingest_cmd, add_query_cmd, add_info_cmd
5
+ from civiclens import __version__
6
+ from civiclens.llm import add_prompt_cmd
7
+ from civiclens.demo import PromptCommand
8
+
9
+ class Audience(str, Enum):
10
+ youth = "youth"
11
+ executive = "executive"
12
+ child = "child"
13
+
14
+ def positive_int(value):
15
+ """
16
+ Converts a string to a positive integer.
17
+
18
+ Args:
19
+ value (str): The string to convert to an integer
20
+
21
+ Returns:
22
+ int: The converted integer
23
+
24
+ Raises:
25
+ argparse.ArgumentTypeError: If the value is not a positive integer
26
+ """
27
+ ivalue = int(value)
28
+ if ivalue <= 0:
29
+ raise argparse.ArgumentTypeError("Value must be > 0")
30
+ return ivalue
31
+
32
+
33
+
34
+ EPILOG = """
35
+ Examples:
36
+ ==================================
37
+ Ingest documents from an S3 bucket:
38
+ civiclens ingest \\
39
+ --source s3 \\
40
+ --path s3://eo-archive \\
41
+ --chunk-size 1200
42
+
43
+ Query the system with audience adaptation:
44
+ civiclens query "How does this EO affect education?" --audience teen
45
+
46
+ Query with retrieval depth and source transparency:
47
+ civiclens query "How does this executive order affect education?" \\
48
+ --audience teen \\
49
+ --top-k 5 \\
50
+ --show-sources
51
+
52
+ Evaluate RAG system performance using benchmark data:
53
+ civiclens eval \\
54
+ --dataset benchmarks/eo_questions.json \\
55
+ --metrics faithfulness answer_relevance context_recall context_precision \\
56
+ --top-k 5 \\
57
+ --save-results
58
+
59
+ Copyright © 2026 CivicLens AI
60
+ """
61
+
62
+
63
+ def main():
64
+
65
+ """
66
+ Main function for the CivicLens AI CLI.
67
+
68
+ This function parses command line arguments using argparse and calls
69
+ the appropriate sub-command function based on the value of the
70
+ 'command' argument.
71
+
72
+ The available sub-commands are ingest, query, and eval.
73
+
74
+ The ingest command ingests documents into the vector database.
75
+
76
+ The query command generates a civic summary for a given set of
77
+ documents.
78
+
79
+ The eval command evaluates the performance of the model on a
80
+ given set of documents.
81
+
82
+ """
83
+ parser = argparse.ArgumentParser(
84
+ prog="civiclens",
85
+ epilog=EPILOG,
86
+ description="CivicLens AI – Document analysis and civic summarization CLI",
87
+ formatter_class=argparse.RawDescriptionHelpFormatter
88
+ )
89
+
90
+ parser.add_argument(
91
+ "--config",
92
+ "-c",
93
+ type=str,
94
+ help="Path to YAML or JSON config file"
95
+ )
96
+
97
+ parser.add_argument(
98
+ "--openai-key",
99
+ "--api-key",
100
+ default=os.getenv("OPENAI_API_KEY"),
101
+ help="OpenAI API key",
102
+ required=False
103
+ )
104
+
105
+ parser.add_argument(
106
+ "--audience",
107
+ "-a",
108
+ choices=[a.value for a in Audience],
109
+ default=Audience.youth.value
110
+ )
111
+
112
+ parser.add_argument(
113
+ "--top-k",
114
+ "-k",
115
+ type=positive_int,
116
+ default=5
117
+ )
118
+
119
+ parser.add_argument(
120
+ "--version",
121
+ "-v",
122
+ action="version",
123
+ version=f"%(prog)s {__version__}",
124
+ help="Show the version number and exit"
125
+ )
126
+
127
+ subparsers = parser.add_subparsers(
128
+ title="Available Commands",
129
+ dest="command",
130
+ required=True
131
+ )
132
+
133
+ # add commands
134
+ add_ingest_cmd(subparsers)
135
+ add_query_cmd(subparsers)
136
+ add_info_cmd(subparsers)
137
+ add_prompt_cmd(subparsers)
138
+
139
+ # PromptCommand.add_to_subparsers(subparsers)
140
+
141
+
142
+ try:
143
+ args = parser.parse_args()
144
+ args.func(args)
145
+ except SystemExit:
146
+ print("Use --help to see available commands.")
147
+ raise
148
+
149
+
150
+ if __name__ == "__main__":
151
+ main()
civiclens/demo.py ADDED
@@ -0,0 +1,168 @@
1
+ import json
2
+ import requests
3
+ from rich.console import Console
4
+ from rich.panel import Panel
5
+ from rich.markdown import Markdown
6
+
7
+
8
+ class LLMConsoleRenderer:
9
+ """Handles rendering LLM output to the terminal using Rich."""
10
+
11
+ def __init__(self):
12
+ self.console = Console()
13
+
14
+ def print_response(self, response: str) -> None:
15
+ """
16
+ Prints a formatted panel containing the LLM response.
17
+ """
18
+ self.console.print(
19
+ Panel(
20
+ Markdown(response),
21
+ title="🤖 LLM Response",
22
+ border_style="cyan",
23
+ padding=(1, 2),
24
+ )
25
+ )
26
+
27
+ def stream_response(self, chunks) -> None:
28
+ """
29
+ Streams a sequence of chunks from an LLM response.
30
+ """
31
+ self.console.print("\n🤖 LLM Response\n", style="bold cyan")
32
+ with self.console.status("Thinking...", spinner="dots"):
33
+ for chunk in chunks:
34
+ self.console.print(chunk, end="", style="green")
35
+
36
+
37
+ class LLMClient:
38
+ """Client for querying a local or remote LLM endpoint."""
39
+
40
+ def __init__(
41
+ self,
42
+ url: str = "http://localhost:11434/api/chat",
43
+ model: str = "gpt-oss:20b",
44
+ temperature: float = 0.0,
45
+ max_tokens: int = 2048,
46
+ renderer: LLMConsoleRenderer | None = None,
47
+ timeout: int = 60,
48
+ ):
49
+ self.url = url
50
+ self.model = model
51
+ self.temperature = temperature
52
+ self.max_tokens = max_tokens
53
+ self.timeout = timeout
54
+ self.renderer = renderer or LLMConsoleRenderer()
55
+
56
+ def query(self, prompt: str) -> str:
57
+ """
58
+ Query the model and return the full response text.
59
+ """
60
+ payload = {
61
+ "model": self.model,
62
+ "messages": [
63
+ {
64
+ "role": "user",
65
+ "content": prompt.strip(),
66
+ }
67
+ ],
68
+ "options": {
69
+ "seed": 123,
70
+ "temperature": self.temperature,
71
+ "num_ctx": self.max_tokens,
72
+ },
73
+ }
74
+
75
+ response_text = ""
76
+
77
+ with requests.post(
78
+ self.url,
79
+ json=payload,
80
+ stream=True,
81
+ timeout=self.timeout,
82
+ ) as response:
83
+ response.raise_for_status()
84
+
85
+ for line in response.iter_lines(decode_unicode=True):
86
+ if not line:
87
+ continue
88
+
89
+ data = json.loads(line)
90
+ if "message" in data:
91
+ response_text += data["message"]["content"]
92
+
93
+ return response_text
94
+
95
+ def query_and_render(self, prompt: str) -> None:
96
+ """
97
+ Query the model and print the formatted response.
98
+ """
99
+ response = self.query(prompt)
100
+ self.renderer.print_response(response)
101
+
102
+
103
+ class PromptCommand:
104
+ """CLI command registration and handler."""
105
+
106
+ @staticmethod
107
+ def add_to_subparsers(subparsers):
108
+ """
109
+ Adds the `prompt` command to the CLI.
110
+ """
111
+ parser = subparsers.add_parser(
112
+ "prompt",
113
+ help="Ask a question to the model",
114
+ )
115
+
116
+ parser.add_argument(
117
+ "question",
118
+ type=str,
119
+ default="Hello, how can I help you?",
120
+ help="The question to ask the model",
121
+ )
122
+
123
+ parser.add_argument(
124
+ "--temperature",
125
+ "-t",
126
+ type=float,
127
+ default=0.0,
128
+ help="The temperature value for the model (default: 0.0)",
129
+ )
130
+
131
+ parser.add_argument(
132
+ "--max-tokens",
133
+ "-m",
134
+ type=int,
135
+ default=2048,
136
+ help="The maximum number of tokens to generate (default: 2048)",
137
+ )
138
+
139
+ parser.add_argument(
140
+ "--model",
141
+ type=str,
142
+ default="gpt-oss:20b",
143
+ help="The model to query (default: gpt-oss:20b)",
144
+ )
145
+
146
+ parser.add_argument(
147
+ "--url",
148
+ "-u",
149
+ type=str,
150
+ default="http://localhost:11434/api/chat",
151
+ help="The URL of the model to query",
152
+ )
153
+
154
+ parser.set_defaults(func=PromptCommand.run)
155
+
156
+ @staticmethod
157
+ def run(args):
158
+ """
159
+ CLI entrypoint for the prompt command.
160
+ """
161
+ client = LLMClient(
162
+ url=args.url,
163
+ model=args.model,
164
+ temperature=float(args.temperature),
165
+ max_tokens=int(args.max_tokens),
166
+ )
167
+
168
+ client.query_and_render(args.question)
civiclens/llm.py ADDED
@@ -0,0 +1,151 @@
1
+ import json
2
+ import requests
3
+ from rich.console import Console
4
+ from rich.panel import Panel
5
+ from rich.markdown import Markdown
6
+
7
+ console = Console()
8
+
9
+
10
+ def print_llm_response(response: str):
11
+ """
12
+ Prints a formatted panel containing the LLM response.
13
+
14
+ Args:
15
+ response (str): The LLM response to print.
16
+ """
17
+ console.print(
18
+ Panel(
19
+ Markdown(response),
20
+ title="🤖 LLM Response",
21
+ border_style="cyan",
22
+ padding=(1, 2),
23
+ )
24
+ )
25
+
26
+ def stream_llm_response(chunks):
27
+ """
28
+ Stream a sequence of chunks from an LLM response to the console.
29
+
30
+ :param chunks: An iterable of strings, where each string is a chunk of the LLM response.
31
+ """
32
+ console.print("\n🤖 LLM Response\n", style="bold cyan")
33
+ with console.status("Thinking...", spinner="dots"):
34
+ for chunk in chunks:
35
+ console.print(chunk, end="", style="green")
36
+
37
+
38
+
39
+ def query_model(args):
40
+ # Create the data payload as a dictionary
41
+
42
+ """
43
+ Query a model using the provided prompt and parameters.
44
+
45
+ Parameters:
46
+ prompt (str): The input to the model.
47
+ stream (bool, optional): Whether to stream the response. Defaults to True.
48
+ model (str, optional): The model to query. Defaults to "gpt-oss:20b".
49
+ temperature (float, optional): The temperature value for the model. Defaults to 0.0.
50
+ max_tokens (int, optional): The maximum number of tokens to generate. Defaults to 2048.
51
+ url (str, optional): The URL of the model to query. Defaults to "http://localhost:11434/api/chat".
52
+
53
+ Returns:
54
+ str: The output of the model.
55
+
56
+ """
57
+ url = args.url
58
+ prompt = args.question.strip()
59
+ model = args.model
60
+ temperature = float(args.temperature)
61
+ max_tokens = int(args.max_tokens)
62
+
63
+ data = {
64
+ "model": model,
65
+ "messages": [
66
+ {
67
+ "role": "user",
68
+ "content": prompt
69
+ }
70
+ ],
71
+ "options": { # Settings below are required for deterministic responses
72
+ "seed": 123,
73
+ "temperature": temperature,
74
+ "num_ctx": max_tokens
75
+ }
76
+ }
77
+
78
+ # Send the POST request
79
+ with requests.post(url, json=data, stream=True, timeout=60) as r:
80
+ r.raise_for_status()
81
+ response_data = ""
82
+ for line in r.iter_lines(decode_unicode=True):
83
+ if not line:
84
+ continue
85
+ response_json = json.loads(line)
86
+ if "message" in response_json:
87
+ response_data += response_json["message"]["content"]
88
+
89
+ # Print the LLM response
90
+ # if stream:
91
+ # stream_llm_response(response_data)
92
+ # else:
93
+ print_llm_response(response=response_data)
94
+
95
+
96
+ def add_prompt_cmd(subparsers):
97
+ """
98
+ Adds the query command to the CLI.
99
+
100
+ The query command asks a question to the model.
101
+
102
+ The available arguments are:
103
+ - question: The question to ask the model
104
+ - --temperature or -t: The temperature value for the model (default: 0.0)
105
+ - --max-tokens or -m: The maximum number of tokens to generate (default: 2048)
106
+ """
107
+ query = subparsers.add_parser(
108
+ "prompt",
109
+ help="Ask a question to the model")
110
+
111
+ query.add_argument(
112
+ "question",
113
+ type=str,
114
+ default="Hello, how can I help you?",
115
+ help="The question to ask the model"
116
+ )
117
+
118
+ query.add_argument(
119
+ "--temperature",
120
+ "-t",
121
+ type=float,
122
+ default=0.0,
123
+ help="The temperature value for the model (default: 0.0)"
124
+ )
125
+
126
+ query.add_argument(
127
+ "--max-tokens",
128
+ "-m",
129
+ type=int,
130
+ default=2048,
131
+ help="The maximum number of tokens to generate (default: 2048)"
132
+ )
133
+
134
+ query.add_argument(
135
+ "--model",
136
+ type=str,
137
+ default="gpt-oss:20b",
138
+ help="The model to query (default: gpt-oss:20b)"
139
+ )
140
+
141
+ query.add_argument(
142
+ "--url",
143
+ "-u",
144
+ type=str,
145
+ default="http://localhost:11434/api/chat",
146
+ help="The URL of the model to query (default: http://localhost:11434/api/chat)"
147
+ )
148
+
149
+ query.set_defaults(func=query_model)
150
+
151
+
civiclens/utils.py ADDED
@@ -0,0 +1,402 @@
1
+ import argparse
2
+
3
+
4
+ def run_ingest(args):
5
+ """
6
+ Ingests documents into the vector database.
7
+
8
+ Args:
9
+ args: Parsed argparse arguments containing the ingest command arguments
10
+ """
11
+ print(f"Ingesting from {args.source} at {args.path}")
12
+
13
+ def add_info_cmd(subparsers):
14
+ info = subparsers.add_parser(
15
+ "info",
16
+ help="Display information about CivicLens AI"
17
+ )
18
+
19
+ info.add_argument(
20
+ "--short",
21
+ "-s",
22
+ action="store_true",
23
+ help="Show a brief description only"
24
+ )
25
+
26
+ info.add_argument(
27
+ "--json",
28
+ action="store_true",
29
+ help="Output system information in JSON format"
30
+ )
31
+
32
+ info.set_defaults(func=run_info)
33
+
34
+
35
+ def add_ingest_cmd(subparsers):
36
+ """
37
+ Adds the ingest command to the CLI.
38
+
39
+ The ingest command ingests documents into the vector database.
40
+
41
+ The available arguments are:
42
+ - --source: The document source (local or s3)
43
+ - --path: The directory path or S3 bucket URI of the documents to ingest
44
+ - --chunk-size: The number of documents to ingest at once (default: 1000)
45
+ """
46
+ ingest = subparsers.add_parser(
47
+ "ingest",
48
+ aliases=["i"],
49
+ help="Ingest documents into the vector database"
50
+ )
51
+
52
+ ingest.add_argument(
53
+ "--source",
54
+ "-s",
55
+ required=True,
56
+ choices=["local", "s3"],
57
+ help="Document source"
58
+ )
59
+
60
+ ingest.add_argument(
61
+ "--path",
62
+ "-p",
63
+ required=True,
64
+ help="Directory path or S3 bucket URI"
65
+ )
66
+
67
+ ingest.add_argument(
68
+ "--chunk-size",
69
+ "-cs",
70
+ type=int,
71
+ default=1000
72
+ )
73
+
74
+ ingest.set_defaults(func=run_ingest)
75
+
76
+
77
+ def add_query_cmd(subparsers):
78
+ """
79
+ Adds the query command to the CLI.
80
+
81
+ The query command generates a civic summary for a given set of
82
+ documents.
83
+
84
+ The available arguments are:
85
+ - question: The question to be answered by the civic summary
86
+ - --audience: The target audience for the civic summary
87
+ - --top-k: The number of civic summaries to return
88
+ - --show-sources: A boolean indicating whether to show the sources of the civic summaries
89
+ """
90
+ query = subparsers.add_parser(
91
+ "query",
92
+ aliases=["q"],
93
+ help="Generate a civic summary for a given question")
94
+
95
+ query.add_argument("question", type=str)
96
+
97
+ query.add_argument(
98
+ "--audience",
99
+ "-a",
100
+ choices=["child", "teen", "executive"],
101
+ default="teen"
102
+ )
103
+
104
+ query.add_argument(
105
+ "--top-k",
106
+ "-k",
107
+ type=int,
108
+ default=5
109
+ )
110
+ query.add_argument(
111
+ "--show-sources",
112
+ "-s",
113
+ action="store_true"
114
+ )
115
+
116
+ query.set_defaults(func=run_query)
117
+
118
+ # def run_eval(args):
119
+ # """
120
+ # Run RAG evaluation on a benchmark dataset.
121
+ # """
122
+ # print("\n📊 Running CivicLens RAG Evaluation")
123
+ # print(f"Dataset: {args.dataset}")
124
+ # print(f"Metrics: {', '.join(args.metrics)}")
125
+ # print(f"Top-K: {args.top_k}")
126
+ # print("-" * 60)
127
+
128
+ # dataset = load_eval_dataset(args.dataset)
129
+
130
+ # results = []
131
+
132
+ # for i, record in enumerate(dataset, 1):
133
+ # question = record["question"]
134
+ # ground_truth = record.get("ground_truth")
135
+
136
+ # docs = retrieve_documents(
137
+ # query=question,
138
+ # top_k=args.top_k
139
+ # )
140
+
141
+ # answer, _ = generate_answer_and_summary(
142
+ # question=question,
143
+ # documents=docs,
144
+ # audience="executive"
145
+ # )
146
+
147
+ # metrics = evaluate_answer(
148
+ # question=question,
149
+ # answer=answer,
150
+ # documents=docs,
151
+ # ground_truth=ground_truth,
152
+ # metrics=args.metrics
153
+ # )
154
+
155
+ # results.append({
156
+ # "test_number": i,
157
+ # "question": question,
158
+ # "answer": answer,
159
+ # "ground_truth": ground_truth,
160
+ # "metrics": metrics
161
+ # })
162
+
163
+ # print(f"✔ Test {i} completed")
164
+
165
+ # summary = aggregate_metrics(results)
166
+
167
+ # print("\n📈 Evaluation Summary")
168
+ # for k, v in summary.items():
169
+ # print(f"{k}: {v:.4f}")
170
+
171
+ # if args.save_results:
172
+ # save_eval_results(results, summary, args.project)
173
+
174
+
175
+ # def add_eval_cmd(subparsers):
176
+ # eval_cmd = subparsers.add_parser(
177
+ # "eval",
178
+ # help="Evaluate RAG system performance on a benchmark dataset"
179
+ # )
180
+
181
+ # eval_cmd.add_argument(
182
+ # "--dataset",
183
+ # required=True,
184
+ # help="Path to evaluation dataset (JSON, JSONL, or CSV)"
185
+ # )
186
+
187
+ # eval_cmd.add_argument(
188
+ # "--project",
189
+ # default="civiclens_evaluation",
190
+ # help="Evaluation project name"
191
+ # )
192
+
193
+ # eval_cmd.add_argument(
194
+ # "--metrics",
195
+ # nargs="+",
196
+ # choices=[
197
+ # "faithfulness",
198
+ # "answer_relevance",
199
+ # "context_recall",
200
+ # "context_precision"
201
+ # ],
202
+ # default=[
203
+ # "faithfulness",
204
+ # "answer_relevance",
205
+ # "context_recall",
206
+ # "context_precision"
207
+ # ],
208
+ # help="Evaluation metrics to compute"
209
+ # )
210
+
211
+ # eval_cmd.add_argument(
212
+ # "--top-k",
213
+ # type=int,
214
+ # default=5,
215
+ # help="Number of retrieved documents per query"
216
+ # )
217
+
218
+ # eval_cmd.add_argument(
219
+ # "--save-results",
220
+ # action="store_true",
221
+ # help="Save evaluation results to disk"
222
+ # )
223
+
224
+ # eval_cmd.set_defaults(func=run_eval)
225
+
226
+
227
+ def retrieve_documents(query: str, top_k: int):
228
+ """
229
+ Retrieve top-k documents from the vector store.
230
+ """
231
+ # Example: vector_db.similarity_search(query, k=top_k)
232
+ return "Retrieved Documents"
233
+
234
+ def generate_answer_and_summary(question, documents, audience):
235
+ """
236
+ Generate a grounded answer and summary using retrieved documents.
237
+ """
238
+ # context = "\n\n".join(doc.page_content for doc in documents)
239
+
240
+ # prompt = USER_PROMPT_TEMPLATE.format(
241
+ # input=question,
242
+ # context=context
243
+ # )
244
+
245
+ # response = llm.invoke(prompt)
246
+
247
+ # Expected format:
248
+ # ANSWER:
249
+ # ...
250
+ # SUMMARY:
251
+ # ...
252
+ return f"{question, documents, audience}" # parse_answer_and_summary(response)
253
+
254
+
255
+ def run_query(args):
256
+ """
257
+ Execute a RAG query with audience-aware output.
258
+ """
259
+ # 1. Resolve configuration
260
+ audience = args.audience
261
+ top_k = args.top_k
262
+ show_sources = args.show_sources
263
+ question = args.question
264
+
265
+ print(f"\n🔍 Querying CivicLens AI")
266
+ print(f"Audience: {audience}")
267
+ print(f"Top-K Retrieval: {top_k}")
268
+ print("-" * 50)
269
+
270
+ # 2. Retrieve relevant documents
271
+ docs = retrieve_documents(
272
+ query=question,
273
+ top_k=top_k
274
+ )
275
+
276
+ if not docs:
277
+ print("No relevant documents found.")
278
+ return
279
+
280
+ # 3. Generate grounded answer
281
+ answer, summary = generate_answer_and_summary(
282
+ question=question,
283
+ documents=docs,
284
+ audience=audience
285
+ )
286
+
287
+ # 4. Display output
288
+ print("\nANSWER:\n")
289
+ print(answer)
290
+
291
+ print("\nSUMMARY:\n")
292
+ print(summary)
293
+
294
+ # 5. Optional: show sources for transparency
295
+ if show_sources:
296
+ print("\nSOURCES:\n")
297
+ for i, doc in enumerate(docs, 1):
298
+ source = doc.metadata.get("source", "unknown")
299
+ print(f"[{i}] {source}")
300
+
301
+
302
+ def run_info(args):
303
+ """
304
+ Display CivicLens AI system information.
305
+ """
306
+ info_data = {
307
+ "name": "CivicLens AI",
308
+ "description": (
309
+ "A non-partisan AI system that translates complex U.S. government "
310
+ "documents into clear, age-appropriate explanations."
311
+ ),
312
+ "audiences": [
313
+ "Youth (ages 10-18)",
314
+ "Policymakers and government leaders"
315
+ ],
316
+ "capabilities": [
317
+ "Retrieval-Augmented Generation (RAG)",
318
+ "Multi-audience explanations from a single source document",
319
+ "Transparent source citation",
320
+ "Document-grounded responses"
321
+ ],
322
+ "ethics_and_safeguards": [
323
+ "No political persuasion or advocacy",
324
+ "No partisan bias",
325
+ "Explainable outputs",
326
+ "Human-in-the-loop oversight"
327
+ ],
328
+ "deployment": [
329
+ "Command-line interface (CLI)",
330
+ "Chainlit interactive UI",
331
+ "Local and S3-based document ingestion"
332
+ ],
333
+ "mission_alignment": [
334
+ "AI education",
335
+ "Civic literacy",
336
+ "Responsible governance"
337
+ ]
338
+ }
339
+
340
+ # -------------------------
341
+ # JSON Output
342
+ # -------------------------
343
+ if args.json:
344
+ import json
345
+ print(json.dumps(info_data, indent=2))
346
+ return
347
+
348
+ # -------------------------
349
+ # Short Text Output
350
+ # -------------------------
351
+ if args.short:
352
+ print(
353
+ "CivicLens AI is a non-partisan AI system that explains U.S. government "
354
+ "documents in clear, age-appropriate language for youth and policymakers."
355
+ )
356
+ return
357
+
358
+ # -------------------------
359
+ # Full Text Output
360
+ # -------------------------
361
+ print("""
362
+ CivicLens AI
363
+ ============
364
+
365
+ Purpose
366
+ -------
367
+ CivicLens AI is a non-partisan, responsible AI system designed to improve civic
368
+ literacy by translating complex U.S. government documents—such as Executive
369
+ Orders—into clear, accessible explanations.
370
+
371
+ Target Audiences
372
+ ----------------
373
+ • Youth (ages 10–18)
374
+ • Policymakers and senior government leaders
375
+
376
+ Core Capabilities
377
+ -----------------
378
+ • Retrieval-Augmented Generation (RAG)
379
+ • Multi-audience explanations from a single source document
380
+ • Transparent source citation and traceability
381
+ • Faithfulness-focused evaluation
382
+
383
+ Ethics & Safeguards
384
+ ------------------
385
+ • No political persuasion or advocacy
386
+ • No partisan bias
387
+ • Document-grounded responses only
388
+ • Explainable and auditable outputs
389
+ • Human oversight for high-impact decisions
390
+
391
+ Deployment
392
+ ----------
393
+ • CLI (argparse-based)
394
+ • Chainlit interactive interface
395
+ • Local and S3 document ingestion
396
+
397
+ Mission Alignment
398
+ -----------------
399
+ CivicLens AI advances AI education, civic understanding, and responsible use of
400
+ artificial intelligence in governance.
401
+ """)
402
+
@@ -0,0 +1,166 @@
1
+ Metadata-Version: 2.4
2
+ Name: civiclens
3
+ Version: 0.3.0
4
+ Summary: CivicLens AI – civic education and executive document analysis
5
+ Author-email: Theophilus Siameh <theodondre@gmail.com>
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: argparse>=1.4.0
9
+ Requires-Dist: requests>=2.32.5
10
+ Requires-Dist: rich>=14.2.0
11
+
12
+ A `Command Line Interface` (CLI) is best understood as a simple language you use to tell a computer what to do. Like natural language, it’s structured around verbs (actions), nouns (things), and modifiers (details).
13
+
14
+ ### CLI Sentence Structure
15
+ - verb [options] noun [arguments] : Do this action, in this way, to this thing, with these details
16
+ - `cp report.pdf backup/`
17
+ - Verb: copy, Noun: source, Noun: destination
18
+ - `git commit -m "Fix login bug"`
19
+ - git (tool), commit (verb), -m (modifier), Argument (message)
20
+ - `docker run -p 8080:80 nginx`
21
+ - docker (tool), run (verb), Modifier + argument, Noun (image)
22
+ - 📖 English: `“Run an Nginx container, mapping port 8080 to 80.”`
23
+
24
+ ### Breaking Down CLI Commands
25
+ ```md
26
+ When reading or writing a command, ask:
27
+ - What action do I want? → Verb
28
+ - On what? → Noun
29
+ - How exactly? → Options / flags
30
+ - With what details? → Arguments
31
+ ```
32
+
33
+ ### Verb (Command)
34
+ - The verb tells the system what action to perform.
35
+ ```bash
36
+ Examples:
37
+
38
+ ls → list
39
+ cp → copy
40
+ mv → move
41
+ rm → remove
42
+ git commit → commit changes
43
+
44
+ Think of verbs as actions.
45
+ ```
46
+
47
+ ### Noun (Target / Object)
48
+ - The noun is what the action is performed on.
49
+ ```md
50
+ Examples:
51
+
52
+ Files: `file.txt`
53
+ Directories: `src/`
54
+ Resources: `origin, main, container`
55
+ ```
56
+
57
+ ### Adjectives / Modifiers (Options & Flags)
58
+ - Modifiers change how the verb behaves.
59
+ - They usually start with: `- (short flag) -- (long flag)`
60
+ ```bash
61
+ Examples:
62
+
63
+ -l → long format
64
+ -a → include hidden items
65
+ --recursive → apply action recursively
66
+ ```
67
+
68
+
69
+ ## Demo
70
+ ```bash
71
+ civiclens --help
72
+ civiclens --version
73
+
74
+ civiclens ingest \
75
+ --source s3 \
76
+ --path s3://eo-archive \
77
+ --chunk-size 1200
78
+ ```
79
+
80
+ ### Example
81
+ ```bash
82
+ civiclens query "How does this EO affect education?" --audience teen
83
+
84
+ civiclens query "How does this executive order affect education?" \
85
+ --audience teen \
86
+ --top-k 5 \
87
+ --show-sources
88
+
89
+ civiclens prompt "How does this executive order affect education?" \
90
+ --temperature 0.0 \
91
+ --max-tokens 1024 \
92
+ --model "gpt-oss-20b" \
93
+ --url "http://localhost:11434/api/chat"
94
+
95
+ ```
96
+
97
+ ### Example
98
+ ```bash
99
+ civiclens eval \
100
+ --dataset benchmarks/eo_questions.json \
101
+ --metrics faithfulness answer_relevance context_recall context_precision \
102
+ --top-k 5 \
103
+ --save-results
104
+ ```
105
+
106
+ ### Add this to TOML
107
+ ```bash
108
+ [project]
109
+ name = "civiclens"
110
+ version = "0.1.0"
111
+ description = "CivicLens AI – civic education and executive document analysis"
112
+ requires-python = ">=3.9"
113
+
114
+ [project.scripts]
115
+ civiclens = "civiclens.cli:main"
116
+
117
+ [tool.uv]
118
+ package = true
119
+
120
+ [build-system]
121
+ requires = ["setuptools>=61"]
122
+ build-backend = "setuptools.build_meta"
123
+ ```
124
+
125
+ ### Install
126
+ ```bash
127
+ uv pip install -e .
128
+
129
+ which civiclens
130
+
131
+
132
+ civiclens --help
133
+ civiclens query "How does this EO affect education?"
134
+ civiclens eval --dataset benchmarks/eo_questions.json
135
+ ```
136
+
137
+ ### Docker deployment
138
+ ```bash
139
+ FROM python:3.11-slim
140
+
141
+ WORKDIR /app
142
+ COPY . .
143
+ RUN pip install .
144
+
145
+ ENTRYPOINT ["civiclens"]
146
+
147
+
148
+ # To build and run the Docker container
149
+ docker build -t civiclens-app .
150
+ docker run -it civiclens-app --help
151
+
152
+ docker run civiclens eval --dataset benchmarks/eo_questions.json
153
+
154
+ ```
155
+
156
+ ### Deploy to PyPI
157
+ ```bash
158
+ # Build the package
159
+ uv build
160
+
161
+ # https://pypi.org/
162
+
163
+ uv publish --username __token__ --password $PYPI_TOKEN
164
+
165
+ ```
166
+
@@ -0,0 +1,10 @@
1
+ civiclens/__init__.py,sha256=Pru0BlFBASFCFo7McHdohtKkUtgMPDwbGfyUZlE2_Vw,21
2
+ civiclens/cli.py,sha256=r7T-vK2rjX-QVqzx_-IoAwKc52j8aKL-bkjn5aSUcnc,3619
3
+ civiclens/demo.py,sha256=V14-1pFBNaQoSK94lNfPzsQYTauJTPt1SToguemNeX8,4537
4
+ civiclens/llm.py,sha256=Tj8UOSRQzKLmkK5EcNYzvKCTaKEl3FTu1SnZTcG7WTI,4161
5
+ civiclens/utils.py,sha256=ByCcREoLznpb83nGPLcBs_OcNczw1-3Cae5umqCQE6k,10210
6
+ civiclens-0.3.0.dist-info/METADATA,sha256=bz3akPVpnoi1cBvZuLx_FxWkD3N8kTk-sk4akZNzKPo,3619
7
+ civiclens-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ civiclens-0.3.0.dist-info/entry_points.txt,sha256=SLVab77DazFUW0bT3GoqKuBMQKHPuCGSxXYej1u7QsE,49
9
+ civiclens-0.3.0.dist-info/top_level.txt,sha256=R-jxhqjzbvdCMGV1x8UBiziQWRes5XLm5rPYJlzk394,10
10
+ civiclens-0.3.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ civiclens = civiclens.cli:main
@@ -0,0 +1 @@
1
+ civiclens