owlmind 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
owlmind/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from .ollama import Ollama
2
+
3
+ __version__ = "0.1.0"
owlmind/cli.py ADDED
@@ -0,0 +1,137 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ from .ollama import Ollama
5
+
6
+ class Dispatcher:
7
+ """Orchestrates the execution of commands and resolves prompt sources."""
8
+
9
+ @staticmethod
10
+ def dispatch(args):
11
+ """Primary router that handles prompt resolution before execution."""
12
+ api = Ollama(host=args.host)
13
+
14
+ if args.command == "ping":
15
+ Dispatcher.handle_ping(api)
16
+ elif args.command == "info":
17
+ Dispatcher.handle_info(api, args)
18
+ elif args.command == "query":
19
+ # Resolve the prompt source (Raw text vs @file vs --input)
20
+ final_prompt = Dispatcher.resolve_prompt(args)
21
+ if not final_prompt:
22
+ print("Error: No prompt provided. Use raw text, @file, or --input.", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ # Update args with the loaded content for the handler
26
+ args.prompt = final_prompt
27
+ Dispatcher.handle_query(api, args)
28
+
29
+ @staticmethod
30
+ def resolve_prompt(args):
31
+ """Determines the final prompt string based on priority."""
32
+ # Priority 1: Explicit --input flag
33
+ if getattr(args, 'input_file', None):
34
+ return Dispatcher.load_file(args.input_file)
35
+
36
+ # Priority 2: Shorthand @file syntax in the positional prompt
37
+ if args.prompt and args.prompt.startswith("@"):
38
+ return Dispatcher.load_file(args.prompt[1:]) # Strip '@'
39
+
40
+ # Priority 3: Standard raw text
41
+ return args.prompt
42
+
43
+ @staticmethod
44
+ def load_file(filepath):
45
+ """Reads and returns file content safely."""
46
+ try:
47
+ with open(filepath, 'r', encoding='utf-8') as f:
48
+ return f.read().strip()
49
+ except FileNotFoundError:
50
+ print(f"Error: Prompt file not found: {filepath}", file=sys.stderr)
51
+ sys.exit(1)
52
+ except Exception as e:
53
+ print(f"Error reading file {filepath}: {e}", file=sys.stderr)
54
+ sys.exit(1)
55
+
56
+ @staticmethod
57
+ def handle_ping(api):
58
+ status = "ONLINE" if api.ping() else "OFFLINE"
59
+ print(f"Status: {status} ({api.host})")
60
+
61
+ @staticmethod
62
+ def handle_info(api, args):
63
+ print("--- OwlMind Configuration ---")
64
+ host_src = "ENV" if os.environ.get("OLLAMA_HOST") else "DEFAULT"
65
+ model_src = "ENV" if os.environ.get("OLLAMA_MODEL") else "DEFAULT"
66
+ active_model = getattr(args, 'model', os.environ.get("OLLAMA_MODEL", "llama3"))
67
+
68
+ print(f"Active Host : {args.host} ({host_src})")
69
+ print(f"Active Model : {active_model} ({model_src})")
70
+ print("-" * 30)
71
+
72
+ if api.ping():
73
+ models = api.info()
74
+ print(f"Remote Models at {api.host}:")
75
+ for m in models: print(f" - {m}")
76
+ else:
77
+ print("Remote Status: OFFLINE (Cannot fetch models)")
78
+
79
+ print("-" * 30)
80
+ print("HELP:")
81
+ print(" To change model: export OLLAMA_MODEL=model_name")
82
+ print(" To change host: export OLLAMA_HOST=url")
83
+ print(" To load prompt: owlmind query @file.txt")
84
+ print("-" * 30)
85
+
86
+ @staticmethod
87
+ def handle_query(api, args):
88
+ if not api.ping():
89
+ print(f"Error: Server {api.host} unreachable.", file=sys.stderr)
90
+ sys.exit(1)
91
+
92
+ stream = api.query(
93
+ model=args.model,
94
+ prompt=args.prompt,
95
+ temperature=args.temperature,
96
+ top_k=args.top_k,
97
+ top_p=args.top_p,
98
+ max_tokens=args.max_tokens,
99
+ num_ctx=args.num_ctx
100
+ )
101
+ for chunk in stream:
102
+ print(chunk['response'], end='', flush=True)
103
+ print()
104
+
105
+ def get_parser():
106
+ """Generates the argparse structure."""
107
+ parser = argparse.ArgumentParser(prog="owlmind")
108
+ parser.add_argument("--host", default=os.environ.get("OLLAMA_HOST", "http://localhost:11434"))
109
+
110
+ subparsers = parser.add_subparsers(dest="command", required=True)
111
+
112
+ subparsers.add_parser("ping")
113
+ subparsers.add_parser("info")
114
+
115
+ qp = subparsers.add_parser("query")
116
+ qp.add_argument("prompt", nargs="?", default=None, help="Prompt text or @filename")
117
+ qp.add_argument("--input", "-i", dest="input_file", help="Explicit path to a prompt file")
118
+
119
+ # Model & Sampling Params
120
+ qp.add_argument("--model", "-m", default=os.environ.get("OLLAMA_MODEL", "llama3"))
121
+ qp.add_argument("--temp", "-t", type=float, default=0.8, dest="temperature")
122
+ qp.add_argument("--top-k", "-k", type=int, default=40, dest="top_k")
123
+ qp.add_argument("--top-p", "-p", type=float, default=0.9, dest="top_p")
124
+ qp.add_argument("--max-tokens", "-n", type=int, default=128, dest="max_tokens")
125
+ qp.add_argument("--ctx-size", "-c", type=int, default=2048, dest="num_ctx")
126
+
127
+ return parser
128
+
129
+ def main():
130
+ parser = get_parser()
131
+ args = parser.parse_args()
132
+ Dispatcher.dispatch(args)
133
+
134
+ if __name__ == "__main__":
135
+ main()
136
+
137
+
owlmind/model.py ADDED
@@ -0,0 +1,21 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ class Model(ABC):
4
+ def __init__(self, host: str):
5
+ self.host = host
6
+
7
+ @abstractmethod
8
+ def ping(self) -> bool:
9
+ """Check if the provider is reachable."""
10
+ pass
11
+
12
+ @abstractmethod
13
+ def info(self) -> list:
14
+ """List available models."""
15
+ pass
16
+
17
+ @abstractmethod
18
+ def query(self, model: str, prompt: str, **options):
19
+ """Execute a completion request."""
20
+ pass
21
+
owlmind/ollama.py ADDED
@@ -0,0 +1,62 @@
1
+
2
+ import ollama
3
+ from .model import Model
4
+
5
+ class Ollama(Model):
6
+ """
7
+ Ollama implementation of the Model interface.
8
+ Ensures parameters are correctly mapped to the 'options' dictionary.
9
+ """
10
+ def __init__(self, host: str):
11
+ super().__init__(host)
12
+ # The host is passed directly to the Client constructor
13
+ self.client = ollama.Client(host=self.host)
14
+
15
+ def ping(self) -> bool:
16
+ """Checks connectivity by attempting to list models."""
17
+ try:
18
+ self.client.list()
19
+ return True
20
+ except Exception:
21
+ return False
22
+
23
+ def info(self) -> list:
24
+ """Fetches the list of available model names from the local server."""
25
+ try:
26
+ response = self.client.list()
27
+ # Extracts model names from the 'models' key in the response dictionary
28
+ return [m['model'] for m in response.get('models', [])]
29
+ except Exception:
30
+ return []
31
+
32
+ def query(self, model: str, prompt: str, **options):
33
+ """
34
+ Executes a generation request.
35
+ Crucial: Parameters like temperature MUST be inside the 'options' dict.
36
+ """
37
+ # Map our generic CLI terms to Ollama API specific keys
38
+ # num_predict = Max Tokens
39
+ # num_ctx = Context Window size
40
+ ollama_params = {
41
+ 'temperature': options.get('temperature'),
42
+ 'top_k': options.get('top_k'),
43
+ 'top_p': options.get('top_p'),
44
+ 'num_predict': options.get('max_tokens'),
45
+ 'num_ctx': options.get('num_ctx'),
46
+ 'seed': options.get('seed') # Added for reproducibility testing
47
+ }
48
+
49
+ # Filter out None values so the Ollama server uses its internal defaults
50
+ # for any parameter the user didn't explicitly set via CLI flags.
51
+ clean_options = {k: v for k, v in ollama_params.items() if v is not None}
52
+
53
+ # The generate method takes model and prompt as top-level args,
54
+ # but all sampling/tuning parameters go into the 'options' keyword argument.
55
+ return self.client.generate(
56
+ model=model,
57
+ prompt=prompt,
58
+ stream=True,
59
+ options=clean_options
60
+ )
61
+
62
+
@@ -0,0 +1,109 @@
1
+ Metadata-Version: 2.4
2
+ Name: owlmind
3
+ Version: 0.1.2
4
+ Summary: A modular CLI for local Ollama control
5
+ Requires-Python: >=3.8
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: ollama
8
+
9
+
10
+ <!---
11
+ <img src="https://generativeintelligencelab.ai/images/owlmind-banner.png" width=800>
12
+ --->
13
+
14
+ # OwlMind
15
+
16
+ <div align="left">
17
+ <img src="https://img.shields.io/badge/Generative_AI-Lab-blueviolet?style=for-the-badge&logo=openai&logoColor=white" alt="Generative AI" />
18
+ <img src="https://img.shields.io/badge/Ollama-Supported-000000?style=for-the-badge&logo=ollama&logoColor=white" alt="Ollama" />
19
+ <img src="https://img.shields.io/badge/Python-3.14-3776AB?style=for-the-badge&logo=python&logoColor=white" alt="Python 3.14" />
20
+ <img src="https://img.shields.io/badge/License-MIT-green?style=for-the-badge" alt="License" />
21
+ </div>
22
+
23
+ ---
24
+
25
+ ## Overview
26
+
27
+ The OwlMind Platform is a foundational experimentation environment engineered by The Generative Intelligence Lab. It serves as a pedagogical sandbox where students and researchers can interrogate the mechanics of Generative Intelligence. By implementing a standardized CLI over a provider-agnostic abstraction layer, OwlMind enables learners to conduct comparative analyses of LLM behaviors through interchangeable parameters within a controlled, observable setup
28
+
29
+ Installation:
30
+
31
+ ```bash
32
+ pip install owlmind
33
+ ```
34
+
35
+ ---
36
+
37
+ ## Commands
38
+
39
+ #### Configuration
40
+ Control OwlMind via environment variables
41
+
42
+ ```bash
43
+ # OLLAMA_HOST -- URL of the Ollama server
44
+ export OLLAMA_HOST=http://localhost:11434
45
+
46
+ # OLLAMA_MODEL -- Default model for queries llama3
47
+ export OLLAMA_MODEL=llama3
48
+ ```
49
+
50
+
51
+ #### System Audit
52
+ View your current environment configuration
53
+
54
+ ```bash
55
+ owlmind info
56
+ ```
57
+
58
+ #### Connectivity Check
59
+ Verify if your model provider is online.
60
+
61
+ ```bash
62
+ owlmind ping
63
+ ```
64
+
65
+
66
+ #### Generation
67
+ Run inference with full control over sampling parameters.
68
+
69
+ ```bash
70
+ owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
71
+ ```
72
+
73
+ Possible parameters:
74
+
75
+ ```bash
76
+ $ owlmind query --help
77
+ usage: owlmind query [-h] [--input INPUT_FILE] [--model MODEL] [--temp TEMPERATURE] [--top-k TOP_K]
78
+ [--top-p TOP_P] [--max-tokens MAX_TOKENS] [--ctx-size NUM_CTX]
79
+ [prompt]
80
+
81
+ positional arguments:
82
+ prompt Prompt text or @filename
83
+
84
+ options:
85
+ -h, --help show this help message and exit
86
+ --input, -i INPUT_FILE
87
+ Explicit path to a prompt file
88
+ --model, -m MODEL
89
+ --temp, -t TEMPERATURE
90
+ --top-k, -k TOP_K
91
+ --top-p, -p TOP_P
92
+ --max-tokens, -n MAX_TOKENS
93
+ --ctx-size, -c NUM_CTX
94
+ ```
95
+
96
+
97
+ #### Prompt Loading (@file syntax)
98
+ OwlMind supports loading prompts directly from files using the @ prefix. This is ideal for long-form instructions or code analysis.
99
+
100
+ ```bash
101
+ owlmind query @my_prompt.txt
102
+ ```
103
+
104
+ Explicit Flag:
105
+
106
+ ```bash
107
+ owlmind query --input research_paper.md
108
+ ```
109
+
@@ -0,0 +1,9 @@
1
+ owlmind/__init__.py,sha256=WdAUZIWDqq_7CtIT9yT_Eghr5Ph24Bdgdh7sC7h4WzA,50
2
+ owlmind/cli.py,sha256=38HbHyCnRe1Sqx79rA4ugZwSu_9hfaZhXrOsQckafto,4943
3
+ owlmind/model.py,sha256=J1X0gaqdZZhSJgfuhnQaD395O6gnCbr1JD1cGIfN8v0,462
4
+ owlmind/ollama.py,sha256=hUJja7RAtYg7GWWQHd6l3BUV4xMcAolrqJWwlz_h9fw,2212
5
+ owlmind-0.1.2.dist-info/METADATA,sha256=KczHGIGqb3Bs3C1BypgKA8eon6I-OZ68ymVBpUMRrpA,2871
6
+ owlmind-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
7
+ owlmind-0.1.2.dist-info/entry_points.txt,sha256=dtbjpXwYC8Nbe3CJ02gflnKOOhpQWk9u5gALeDHOWGk,45
8
+ owlmind-0.1.2.dist-info/top_level.txt,sha256=hZkLOzK2jV0_OPvcTpeIwlEQi869uqittXNzXF8AajE,8
9
+ owlmind-0.1.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ owlmind = owlmind.cli:main
@@ -0,0 +1 @@
1
+ owlmind