owlmind 0.1.2__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
owlmind/__init__.py CHANGED
@@ -1,3 +1,5 @@
1
- from .ollama import Ollama
1
+ """
2
+ OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ """
2
4
 
3
- __version__ = "0.1.0"
5
+ __version__ = "0.1.5"
owlmind/cli.py CHANGED
@@ -1,137 +1,236 @@
1
+
2
+ #!/usr/bin/env python3
3
+ ##
4
+ ## OwlMind Framework - experimentation environment for Generative Intelligence Systems.
5
+ ## cli.py — Command-line interface.
6
+ ##
7
+ # Copyright (c) 2025, The Generative Intelligence Lab
8
+ # https://github.com/genilab/owlmind
9
+ #
10
+ # Disclosure:
11
+ # This framework was developed using a 'vibe coding' . AI-synthesized logic was
12
+ # subjected to human review and manual refinement to guarantee functional
13
+ # integrity and structural clarity.
14
+ #
15
+
1
16
  import argparse
2
17
  import os
3
18
  import sys
4
- from .ollama import Ollama
19
+ import logging
20
+ from owlmind import __version__
21
+ from owlmind.models import Ollama
5
22
 
6
23
  class Dispatcher:
7
- """Orchestrates the execution of commands and resolves prompt sources."""
24
+ """Orchestrates components using the OwlMind Launch standards."""
25
+
26
+ DEFAULT_LOG_LEVEL = Ollama.LOG_CRITICAL
27
+
28
+ @staticmethod
29
+ def load_env(filepath=".env"):
30
+ """Manually parses a .env file and injects into os.environ."""
31
+ if not os.path.exists(filepath):
32
+ return
33
+ try:
34
+ with open(filepath, "r", encoding="utf-8") as f:
35
+ for line in f:
36
+ line = line.strip()
37
+ if not line or line.startswith("#") or "=" not in line:
38
+ continue
39
+ key, value = line.split("=", 1)
40
+ key, value = key.strip(), value.strip().strip("'").strip('"')
41
+ if key not in os.environ:
42
+ os.environ[key] = value
43
+ except Exception as e:
44
+ print(f"Warning: Could not load .env file: {e}", file=sys.stderr)
45
+
46
+ @staticmethod
47
+ def parse_params(raw_params: list) -> dict:
48
+ """
49
+ Parses parameters into a typed dictionary.
50
+ Handles: -p k=v,k2=v2 AND -p k=v -p k2=v2
51
+ """
52
+ params = {}
53
+ if not raw_params:
54
+ return params
55
+
56
+ # 1. Flatten the list and split by commas
57
+ # If raw_params is ['temp=1', 'seed=42,top_k=10'], this flattens it correctly
58
+ tokens = []
59
+ for item in raw_params:
60
+ tokens.extend(item.split(','))
61
+
62
+ for kv in tokens:
63
+ if "=" not in kv:
64
+ continue
65
+
66
+ k, v = kv.split("=", 1)
67
+ k, v = k.strip(), v.strip()
68
+
69
+ # 2. Type Inference (Boolean, Int, Float, String)
70
+ if v.lower() == "true":
71
+ v = True
72
+ elif v.lower() == "false":
73
+ v = False
74
+ else:
75
+ try:
76
+ # Try Integer
77
+ v = int(v)
78
+ except ValueError:
79
+ try:
80
+ # Try Float
81
+ v = float(v)
82
+ except ValueError:
83
+ # Keep as string (e.g., stop sequences or system prompts)
84
+ pass
85
+ params[k] = v
86
+
87
+ return params
88
+
8
89
 
9
90
  @staticmethod
10
91
  def dispatch(args):
11
- """Primary router that handles prompt resolution before execution."""
12
- api = Ollama(host=args.host)
92
+ # 1. Start with context
93
+ context = vars(args).copy()
94
+
95
+ # 2. Logic for Logging / Debug using Component constants
96
+ # We check the debug flag and map it to our integer constants
97
+ target_level = Ollama.LOG_DEBUG if context.get('debug') else Dispatcher.DEFAULT_LOG_LEVEL
98
+
99
+ # Set global logging level
100
+ logging.getLogger().setLevel(target_level)
101
+ logging.getLogger("httpx").setLevel(target_level)
102
+
103
+ # Pass the integer level into the context so it hits the property setter
104
+ context['log_level'] = target_level
105
+
106
+ # 3. Dynamic Parameter Parsing
107
+ dynamic_params = Dispatcher.parse_params(context.pop('params', []))
108
+ context.update(dynamic_params)
109
+
110
+ # 4. Resolve Input Logic
111
+ if args.command == "query":
112
+ user_input = Dispatcher.resolve_prompt(args)
113
+ if not user_input:
114
+ print("Error: No prompt provided.", file=sys.stderr)
115
+ sys.exit(1)
116
+ context['input'] = user_input
117
+
118
+ # 5. Initialize Component
119
+ # At this point, context contains everything (debug, command, url, model, etc.)
120
+ api = Ollama(**context)
121
+
122
+ # 6. TELL OLLAMA TO OBFUSCATE THE PLUMBING
123
+ # This moves the argparse noise to private _attributes
124
+ api.obfuscate(['debug', 'command', 'input_file', 'prompt'])
13
125
 
126
+ # 7. Route Command
14
127
  if args.command == "ping":
15
128
  Dispatcher.handle_ping(api)
16
129
  elif args.command == "info":
17
- Dispatcher.handle_info(api, args)
130
+ Dispatcher.handle_info(api)
18
131
  elif args.command == "query":
19
- # Resolve the prompt source (Raw text vs @file vs --input)
20
- final_prompt = Dispatcher.resolve_prompt(args)
21
- if not final_prompt:
22
- print("Error: No prompt provided. Use raw text, @file, or --input.", file=sys.stderr)
23
- sys.exit(1)
24
-
25
- # Update args with the loaded content for the handler
26
- args.prompt = final_prompt
27
- Dispatcher.handle_query(api, args)
132
+ Dispatcher.handle_query(api)
133
+
134
+ return
135
+
136
+
28
137
 
29
138
  @staticmethod
30
139
  def resolve_prompt(args):
31
- """Determines the final prompt string based on priority."""
32
- # Priority 1: Explicit --input flag
33
140
  if getattr(args, 'input_file', None):
34
141
  return Dispatcher.load_file(args.input_file)
35
-
36
- # Priority 2: Shorthand @file syntax in the positional prompt
37
- if args.prompt and args.prompt.startswith("@"):
38
- return Dispatcher.load_file(args.prompt[1:]) # Strip '@'
39
-
40
- # Priority 3: Standard raw text
41
- return args.prompt
142
+ prompt_val = getattr(args, 'prompt', None)
143
+ if prompt_val and prompt_val.startswith("@"):
144
+ return Dispatcher.load_file(prompt_val[1:])
145
+ return prompt_val
42
146
 
43
147
  @staticmethod
44
148
  def load_file(filepath):
45
- """Reads and returns file content safely."""
46
149
  try:
47
150
  with open(filepath, 'r', encoding='utf-8') as f:
48
151
  return f.read().strip()
49
- except FileNotFoundError:
50
- print(f"Error: Prompt file not found: {filepath}", file=sys.stderr)
51
- sys.exit(1)
52
152
  except Exception as e:
53
- print(f"Error reading file {filepath}: {e}", file=sys.stderr)
153
+ print(f"Error loading {filepath}: {e}", file=sys.stderr)
54
154
  sys.exit(1)
55
155
 
56
156
  @staticmethod
57
157
  def handle_ping(api):
58
158
  status = "ONLINE" if api.ping() else "OFFLINE"
59
- print(f"Status: {status} ({api.host})")
159
+ print(f"Status: {status} (Host: {api.url})")
60
160
 
61
161
  @staticmethod
62
- def handle_info(api, args):
63
- print("--- OwlMind Configuration ---")
64
- host_src = "ENV" if os.environ.get("OLLAMA_HOST") else "DEFAULT"
65
- model_src = "ENV" if os.environ.get("OLLAMA_MODEL") else "DEFAULT"
66
- active_model = getattr(args, 'model', os.environ.get("OLLAMA_MODEL", "llama3"))
67
-
68
- print(f"Active Host : {args.host} ({host_src})")
69
- print(f"Active Model : {active_model} ({model_src})")
70
- print("-" * 30)
71
-
72
- if api.ping():
73
- models = api.info()
74
- print(f"Remote Models at {api.host}:")
75
- for m in models: print(f" - {m}")
76
- else:
77
- print("Remote Status: OFFLINE (Cannot fetch models)")
78
-
79
- print("-" * 30)
80
- print("HELP:")
81
- print(" To change model: export OLLAMA_MODEL=model_name")
82
- print(" To change host: export OLLAMA_HOST=url")
83
- print(" To load prompt: owlmind query @file.txt")
84
- print("-" * 30)
162
+ def handle_info(api):
163
+ data = api.info()
164
+
165
+ print("-" * 40)
166
+ print(f"Status : {data['status']}")
167
+ print(f"Host : {api.url}")
168
+ print(f"Model : {api.model}")
169
+
170
+ print("-" * 40)
171
+ models_list = data.get('models', [])
172
+ print(f"Available Models: {len(models_list)}")
173
+ for m in models_list:
174
+ print(f" - {m}")
175
+
176
+ print("-" * 40)
177
+ return
85
178
 
86
179
  @staticmethod
87
- def handle_query(api, args):
180
+ def handle_query(api):
88
181
  if not api.ping():
89
- print(f"Error: Server {api.host} unreachable.", file=sys.stderr)
182
+ print(f"Error: Server {api.url} unreachable.", file=sys.stderr)
90
183
  sys.exit(1)
91
184
 
92
- stream = api.query(
93
- model=args.model,
94
- prompt=args.prompt,
95
- temperature=args.temperature,
96
- top_k=args.top_k,
97
- top_p=args.top_p,
98
- max_tokens=args.max_tokens,
99
- num_ctx=args.num_ctx
100
- )
101
- for chunk in stream:
102
- print(chunk['response'], end='', flush=True)
185
+ # step() yields chunks; we print them in real-time
186
+ if api.log_level == api.LOG_DEBUG:
187
+ print(f"--- Inference: {api.model} ---")
188
+ for chunk in api.step():
189
+ print(chunk, end="", flush=True)
190
+
103
191
  print()
192
+ return
104
193
 
105
194
  def get_parser():
106
- """Generates the argparse structure."""
107
195
  parser = argparse.ArgumentParser(prog="owlmind")
108
- parser.add_argument("--host", default=os.environ.get("OLLAMA_HOST", "http://localhost:11434"))
109
196
 
197
+ # Generate the helpers
198
+ param_list = ", ".join(Ollama.OLLAMA_PARAMS.keys())
199
+ param_help = f"Supports k=v; (k1=v1,k2=v2), or multiple flags. Options: {param_list}"
200
+
201
+ # Global arguments
202
+ parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
203
+ parser.add_argument("--debug", action="store_true", help="Enable verbose telemetry and internal logs")
204
+ parser.add_argument("--url", dest="url", default=os.environ.get("OLLAMA_HOST", Ollama.DEFAULT_SERVER))
110
205
  subparsers = parser.add_subparsers(dest="command", required=True)
111
-
206
+
207
+ # ping / info
112
208
  subparsers.add_parser("ping")
113
209
  subparsers.add_parser("info")
114
210
 
211
+ # query
115
212
  qp = subparsers.add_parser("query")
116
- qp.add_argument("prompt", nargs="?", default=None, help="Prompt text or @filename")
117
- qp.add_argument("--input", "-i", dest="input_file", help="Explicit path to a prompt file")
118
-
119
- # Model & Sampling Params
120
- qp.add_argument("--model", "-m", default=os.environ.get("OLLAMA_MODEL", "llama3"))
121
- qp.add_argument("--temp", "-t", type=float, default=0.8, dest="temperature")
122
- qp.add_argument("--top-k", "-k", type=int, default=40, dest="top_k")
123
- qp.add_argument("--top-p", "-p", type=float, default=0.9, dest="top_p")
124
- qp.add_argument("--max-tokens", "-n", type=int, default=128, dest="max_tokens")
125
- qp.add_argument("--ctx-size", "-c", type=int, default=2048, dest="num_ctx")
213
+ qp.add_argument("prompt", nargs="?", default=None)
214
+ qp.add_argument("--input", "-i", dest="input_file")
215
+ qp.add_argument("--model", "-m", default=os.environ.get("OLLAMA_MODEL", Ollama.DEFAULT_MODEL))
216
+ qp.add_argument("--params", "-p", action="append", dest="params", help=param_help)
126
217
 
127
218
  return parser
128
219
 
220
+
221
+ ##
222
+ ## ENTRY POINT
223
+ ##
224
+
129
225
  def main():
226
+ Dispatcher.load_env()
130
227
  parser = get_parser()
131
228
  args = parser.parse_args()
132
229
  Dispatcher.dispatch(args)
133
230
 
134
- if __name__ == "__main__":
135
- main()
136
-
137
231
 
232
+ ##
233
+ ## EXECUTOR
234
+ ##
235
+ if __name__ == "__main__":
236
+ main()
@@ -0,0 +1,9 @@
1
+ """
2
+ OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ """
4
+
5
+ from .component import Component
6
+
7
+ __all__ = [
8
+ "Component"
9
+ ]
@@ -0,0 +1,87 @@
1
+ ##
2
+ ## OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ ## core/component.py — Abstract base definition for generative workflow components.
4
+ ##
5
+ # Copyright (c) 2025, The Generative Intelligence Lab
6
+ # https://github.com/genilab/owlmind
7
+ #
8
+ # Disclosure:
9
+ # This framework was developed using a 'vibe coding' . AI-synthesized logic was
10
+ # subjected to human review and manual refinement to guarantee functional
11
+ # integrity and structural clarity.
12
+ #
13
+
14
+
15
+ import logging
16
+ from typing import Any, Iterator
17
+ from abc import ABC, abstractmethod
18
+ from owlmind.graphk import Node
19
+
20
+
21
+ ##
22
+ ## COMPONENT
23
+ ##
24
+
25
+ class Component(Node, ABC):
26
+ """
27
+ Base class for framework components with managed I/O and logging.
28
+ """
29
+
30
+ LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
31
+
32
+ # Log Levels as Constants
33
+ LOG_DEBUG = logging.DEBUG
34
+ LOG_INFO = logging.INFO
35
+ LOG_WARNING = logging.WARNING
36
+ LOG_ERROR = logging.ERROR
37
+ LOG_CRITICAL = logging.CRITICAL
38
+
39
+ def __init__(self, session: dict = None, log_level: int = LOG_CRITICAL, **kwargs):
40
+ # 1. Setup logging infrastructure
41
+ if not logging.getLogger().hasHandlers():
42
+ logging.basicConfig(level=logging.WARNING, format=self.LOG_FORMAT)
43
+
44
+ # Internal framework storage using _name_ convention
45
+ self._logger_ = logging.getLogger(self.__class__.__name__)
46
+ self._input_ = None
47
+ self._output_ = None
48
+
49
+ # Initialize Node (this handles _condition_, _weight_, etc.)
50
+ super().__init__(session=session, **kwargs)
51
+
52
+ # 2. Set the log level using the property setter
53
+ self.log_level = log_level
54
+
55
+ # 3. Obfuscate framework attributes to keep session() clean
56
+ self.obfuscate(['log_level'])
57
+
58
+ return
59
+
60
+ @property
61
+ def input(self):
62
+ return self._input_
63
+
64
+ @input.setter
65
+ def input(self, value):
66
+ self._input_ = value
67
+
68
+ @property
69
+ def output(self):
70
+ return self._output_
71
+
72
+ @property
73
+ def log_level(self) -> int:
74
+ return self._logger_.level
75
+
76
+ @log_level.setter
77
+ def log_level(self, level: Any):
78
+ val = getattr(logging, level.upper(), None) if isinstance(level, str) else level
79
+ if isinstance(val, int):
80
+ self._logger_.setLevel(val)
81
+ return
82
+
83
+ def log(self, message: str, level: int = LOG_INFO):
84
+ self._logger_.log(level, message)
85
+ return
86
+
87
+
@@ -0,0 +1,12 @@
1
+ """
2
+ GraphK - Framework for Graph programming.
3
+ """
4
+
5
+ from .node import Node, Gate, BranchNode
6
+ from .pipeline import Pipeline
7
+
8
+ __all__ = [
9
+ "Node",
10
+ "Gate",
11
+ "BranchNode"
12
+ ]
owlmind/graphk/node.py ADDED
@@ -0,0 +1,182 @@
1
+ ##
2
+ ## GraphK - Framework for Graph programming.
3
+ ## node.py — data structures for Nodes in Processing Lines
4
+ # Include: Gate, Node, BrachNode.
5
+ ##
6
+ # Copyright (c) 2025, Dr. Fernando Koch
7
+ # http://github.com/kochf1/graphk
8
+ #
9
+ # Disclosure:
10
+ # This code was developed through 'vibe coding'. Certain components
11
+ # required manual implementation, and human-in-the-loop review and refinement
12
+ # were applied throughout the project.
13
+ #
14
+
15
+ import random
16
+ from abc import ABC, abstractmethod
17
+ from typing import Union, Callable, List, Any, Iterator, Optional, Dict
18
+
19
+
20
+ ##
21
+ ## GATE
22
+ ##
23
+
24
+ class Gate:
25
+ """
26
+ Validation Structurs.
27
+ """
28
+ # Strategy Constants
29
+ ALL_MATCH = 1 # Logical AND
30
+ ANY_MATCH = 2 # Logical OR
31
+ ONE_ONE_MATCH = 3 # Exclusive OR / Single match
32
+
33
+ def __init__(self,
34
+ checkers: Union[Callable, List[Callable]],
35
+ strategy: int = ALL_MATCH):
36
+ """
37
+ Initialize the Gate with a set of rules and a matching strategy.
38
+ """
39
+ self.checkers = checkers if isinstance(checkers, list) else [checkers]
40
+ self.strategy = strategy
41
+
42
+ def assess(self, session: Any) -> bool:
43
+ """
44
+ Executes the assessment logic against the provided session (dict)
45
+ """
46
+ # Convert checkers into a generator of booleans
47
+ results = (func(session) for func in self.checkers)
48
+
49
+ if self.strategy == self.ALL_MATCH:
50
+ return all(results)
51
+
52
+ if self.strategy == self.ANY_MATCH:
53
+ return any(results)
54
+
55
+ if self.strategy == self.ONE_ONE_MATCH:
56
+ # Returns True if exactly one checker returns True
57
+ return list(results).count(True) == 1
58
+
59
+ return False
60
+
61
+ ##
62
+ ## NODE
63
+ ##
64
+
65
+ class Node(ABC):
66
+ """
67
+ Base Node.
68
+ """
69
+
70
+ def __init__(self,
71
+ session: Optional[dict] = None,
72
+ *,
73
+ condition: Gate = None,
74
+ validation: Gate = None,
75
+ weight: int = None,
76
+ next: Any = None,
77
+ **kwargs):
78
+
79
+ # Initialize the obfuscation set
80
+ self._obfuscate_ = set()
81
+
82
+ # Map internal attributes only if they are provided
83
+ if condition is not None: self._condition_ = condition
84
+ if validation is not None: self._validation_ = validation
85
+ if weight is not None: self._weight_ = weight
86
+ if next is not None: self._next_ = next
87
+
88
+ # Process session and dynamic kwargs in one pass
89
+ for key, value in ((session or {}) | kwargs).items():
90
+ setattr(self, key, value)
91
+ return
92
+
93
+ def __repr__(self):
94
+ return str(self.session())
95
+
96
+ def obfuscate(self, keys: Any):
97
+ """Adds keys to the obfuscation list to hide it from session exports."""
98
+ self._obfuscate_.update([keys] if isinstance(keys, str) else keys)
99
+ return
100
+
101
+ def session(self) -> dict:
102
+ """Returns public state; filters internal and obfuscated keys."""
103
+ return {
104
+ k: v for k, v in self.__dict__.items()
105
+ if not k.startswith('_') and k not in self._obfuscate_
106
+ }
107
+
108
+ @abstractmethod
109
+ def ping(self) -> bool:
110
+ """Health check / Connectivity test."""
111
+ pass
112
+
113
+ @abstractmethod
114
+ def info(self) -> dict:
115
+ """Metadata and capability reporting."""
116
+ pass
117
+
118
+ @abstractmethod
119
+ def step(self) -> Iterator[Any]:
120
+ """Execution logic; must return iterator."""
121
+ pass
122
+
123
+
124
+ ##
125
+ ## BRANCH NODE
126
+ ##
127
+
128
+ class BranchNode(Node, ABC):
129
+ """
130
+ Branched nodes.
131
+ """
132
+
133
+ # Selection strategies
134
+ SELECT_FIRST = 0
135
+ SELECT_RANDOM = 1
136
+ SELECT_BEST = 2
137
+
138
+ def __init__(
139
+ self,
140
+ nodes: List[Node],
141
+ strategy: int = SELECT_FIRST,
142
+ session: dict = None,
143
+ **kwargs
144
+ ):
145
+ super().__init__(session=session, **kwargs)
146
+ self._nodes_ = nodes
147
+ self._strategy_ = strategy
148
+ return
149
+
150
+ def select(self) -> Optional[Node]:
151
+ """
152
+ Filters nodes based on their _condition_ Gate and applies the selection strategy.
153
+ """
154
+ selected_node = None
155
+
156
+ # 1. Filter valid nodes using the _condition_ gate
157
+ valid_nodes = []
158
+ for node in self._nodes_:
159
+ # If a gate exists, we assess it; otherwise, the path is open (default True)
160
+ is_valid = True
161
+ if hasattr(node, '_condition_'):
162
+ is_valid = node._condition_.assess(node.session())
163
+
164
+ if is_valid:
165
+ valid_nodes.append(node)
166
+
167
+ # Fail fast: No valid paths available
168
+ if not valid_nodes:
169
+ return None
170
+
171
+ # 2. Apply Selection Strategy
172
+ if self._strategy_ == self.SELECT_FIRST:
173
+ selected_node = valid_nodes[0]
174
+
175
+ elif self._strategy_ == self.SELECT_RANDOM:
176
+ selected_node = random.choice(valid_nodes)
177
+
178
+ elif self._strategy_ == self.SELECT_BEST:
179
+ # Selection based on the _weight_ attribute
180
+ selected_node = max(valid_nodes, key=lambda n: getattr(n, '_weight_', 0))
181
+
182
+ return selected_node
@@ -0,0 +1,94 @@
1
+ ##
2
+ ## GraphK - Framework for Graph programming.
3
+ ## pipeline.py — data structure for Processing Lines.
4
+ ##
5
+ # Copyright (c) 2025, Dr. Fernando Koch
6
+ # http://github.com/kochf1/graphk
7
+ #
8
+ # Disclosure:
9
+ # This code was developed through 'vibe coding'. Certain components
10
+ # required manual implementation, and human-in-the-loop review and refinement
11
+ # were applied throughout the project.
12
+ #
13
+
14
+
15
+ from abc import ABC, abstractmethod
16
+ from typing import Union, Callable, List, Any, Iterator
17
+ from .node import Node
18
+
19
+ class Pipeline(Node):
20
+ """
21
+ Orchestrator for a sequence of Nodes.
22
+ Compatible with GraphK Node and Gate structures.
23
+ """
24
+ # Execution Policies
25
+ SKIP_ON_FAIL = 0
26
+ STOP_ON_FAIL = 1
27
+
28
+ def __init__(self,
29
+ nodes: Iterable[Node] = None,
30
+ _session: dict = None,
31
+ fail_policy: int = STOP_ON_FAIL,
32
+ **kwargs):
33
+ """
34
+ Initialize the pipeline as a Node containing other Nodes.
35
+ """
36
+ super().__init__(_session=_session, **kwargs)
37
+ self.nodes = list(nodes) if nodes else []
38
+ self.fail_policy = fail_policy
39
+ self.obfuscate(['nodes', 'fail_policy'])
40
+
41
+ # --- Node Access (Pythonic Interface) ---
42
+
43
+ def __len__(self) -> int: return len(self.nodes)
44
+
45
+ def __getitem__(self, index): return self.nodes[index]
46
+
47
+ def add(self, node: Node):
48
+ self.nodes.append(node)
49
+ return self
50
+
51
+ # --- Execution Logic ---
52
+
53
+ def ping(self) -> bool:
54
+ """Pipeline is healthy if all internal nodes are healthy."""
55
+ return all(n.ping() for n in self.nodes)
56
+
57
+ def info(self) -> dict:
58
+ return {
59
+ "type": "Pipeline",
60
+ "node_count": len(self.nodes),
61
+ "nodes": [type(n).__name__ for n in self.nodes]
62
+ }
63
+
64
+ def step(self) -> Iterator[Any]:
65
+ """
66
+ Executes the pipeline sequence.
67
+ Passes output of one node as input to the next.
68
+ """
69
+ current_input = getattr(self, 'input', None)
70
+
71
+ for node in self.nodes:
72
+ # 1. Check Condition Gate (if assigned to the node)
73
+ if hasattr(node, 'can_proceed') and not node.can_proceed(current_input):
74
+ if self.fail_policy == self.STOP_ON_FAIL: break
75
+ continue
76
+
77
+ # 2. Set input and execute
78
+ node.input = current_input
79
+
80
+ # 3. Collect output (assuming step() returns an iterator)
81
+ node_output = None
82
+ for chunk in node.step():
83
+ node_output = chunk # Capture last chunk as the state
84
+ yield chunk
85
+
86
+ # 4. Check Validation Gate
87
+ if hasattr(node, 'is_valid') and not node.is_valid(node_output):
88
+ if self.fail_policy == self.STOP_ON_FAIL: break
89
+
90
+ # 5. Chain output to next input
91
+ current_input = node_output
92
+
93
+ self._output = current_input
94
+
@@ -0,0 +1,9 @@
1
+ """
2
+ OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ """
4
+
5
+ from .ollama import Ollama
6
+
7
+ __all__ = [
8
+ "Ollama"
9
+ ]
@@ -0,0 +1,121 @@
1
+
2
+ ##
3
+ ## OwlMind Framework - experimentation environment for Generative Intelligence Systems.
4
+ ## core/ollama.py — Ollama-backed implementation of the Component interface.
5
+ ##
6
+ # Copyright (c) 2025, The Generative Intelligence Lab
7
+ # https://github.com/genilab/owlmind
8
+ #
9
+ # Disclosure:
10
+ # This framework was developed using a 'vibe coding' . AI-synthesized logic was
11
+ # subjected to human review and manual refinement to guarantee functional
12
+ # integrity and structural clarity.
13
+ #
14
+
15
+ import logging
16
+ import ollama
17
+ from typing import Any, Iterator
18
+ from owlmind.core import Component
19
+
20
+ class Ollama(Component):
21
+ """
22
+ Implementation of Ollama model access component.
23
+ Configuration and parameters are strictly managed via attributes.
24
+ """
25
+
26
+ DEFAULT_SERVER = "http://localhost:11434"
27
+ DEFAULT_MODEL = "llama3"
28
+ DEFAULT_TIMEOUT = 10
29
+ DEFAULT_LOG_LEVEL = logging.INFO
30
+
31
+ OLLAMA_PARAMS = {
32
+ "temperature": None, "top_p": None, "seed": None,
33
+ "num_ctx": None, "num_predict": None, "repeat_penalty": None,
34
+ "top_k": None, "stop": None, "system": None
35
+ }
36
+
37
+ def __init__(self, session: dict = None, **kwargs):
38
+ # Internal framework storage using _name_ convention
39
+ self._models_cache_ = None
40
+ self._client_ = None
41
+
42
+ # Establish loggign level
43
+ log_level = kwargs.pop('log_level', self.DEFAULT_LOG_LEVEL)
44
+
45
+ # Initialize Component (which handles _logger_, _input_, etc.)
46
+ super().__init__(session=session, log_level=log_level, **kwargs)
47
+
48
+ # Ensure public config attributes exist in __dict__ for session()
49
+ if not hasattr(self, 'url'): self.url = self.DEFAULT_SERVER
50
+ if not hasattr(self, 'model'): self.model = self.DEFAULT_MODEL
51
+
52
+ # Map parameters; if None, they stay in session() as None unless filtered
53
+ for param, default_value in self.OLLAMA_PARAMS.items():
54
+ if not hasattr(self, param): setattr(self, param, default_value)
55
+
56
+ # Initialize the client using the framework internal
57
+ self._client_ = ollama.Client(host=self.url, timeout=self.DEFAULT_TIMEOUT)
58
+
59
+ # Obfuscate the client-related keys just in case
60
+ self.obfuscate(['_client_', '_models_cache_'])
61
+ return
62
+
63
+ def ping(self) -> bool:
64
+ """Health check for Ollama server."""
65
+ is_alive = False
66
+ try:
67
+ self._client_.list()
68
+ is_alive = True
69
+ except Exception as e:
70
+ self.log(f"Ping failed at {self.url}: {e}", level=self.LOG_WARNING)
71
+
72
+ return is_alive
73
+
74
+ def info(self) -> dict:
75
+ """Capability reporting including available models."""
76
+ if self._models_cache_ is None:
77
+ try:
78
+ response = self._client_.list()
79
+ # Handle varying response structures from Ollama library
80
+ models_list = getattr(response, 'models', response.get('models', []))
81
+ self._models_cache_ = [
82
+ getattr(m, 'model', getattr(m, 'name', m.get('model', m.get('name'))))
83
+ for m in models_list if m
84
+ ]
85
+ except Exception:
86
+ self._models_cache_ = []
87
+
88
+ # Filter out None values for a cleaner info report
89
+ clean_session = {k: v for k, v in self.session().items() if v is not None}
90
+
91
+ return {
92
+ "status": "online" if self.ping() else "offline",
93
+ "session": clean_session,
94
+ "models": self._models_cache_
95
+ }
96
+
97
+ def step(self) -> Iterator[str]:
98
+ """Execution: Performs LLM inference."""
99
+ # Fail Fast: No input
100
+ if not self.input:
101
+ self.log("Step skipped: No input provided.", level=self.LOG_WARNING)
102
+ yield "No input found."
103
+ return
104
+
105
+ # Prepare parameters for Ollama API
106
+ options = {p: getattr(self, p) for p in self.OLLAMA_PARAMS if getattr(self, p) is not None}
107
+
108
+ self.log(f"Inference: {self.model} @ {self.url}", level=self.LOG_INFO)
109
+ self.log(f"Parameters: {options}", level=self.LOG_DEBUG)
110
+
111
+ result = ""
112
+ try:
113
+ response = self._client_.generate(model=self.model, prompt=self.input, options=options)
114
+ result = response.get('response', '')
115
+ self.output = result # Using property setter
116
+ except Exception as e:
117
+ self.log(f"Ollama Error: {e}", level=self.LOG_ERROR)
118
+ result = f"Error: {str(e)}"
119
+
120
+ yield result
121
+ return
@@ -1,10 +1,15 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: owlmind
3
- Version: 0.1.2
4
- Summary: A modular CLI for local Ollama control
5
- Requires-Python: >=3.8
3
+ Version: 0.1.5
4
+ Summary: Experimentation environment and pedagogical sandbox for studying generative intelligence systems.
5
+ Author-email: Fernando Koch <your-email@example.com>
6
+ License: MIT
7
+ Requires-Python: >=3.10
6
8
  Description-Content-Type: text/markdown
7
- Requires-Dist: ollama
9
+ License-File: LICENSE.txt
10
+ Requires-Dist: ollama>=0.4.0
11
+ Requires-Dist: python-dotenv>=1.0.0
12
+ Dynamic: license-file
8
13
 
9
14
 
10
15
  <!---
@@ -24,17 +29,20 @@ Requires-Dist: ollama
24
29
 
25
30
  ## Overview
26
31
 
27
- The OwlMind Platform is a foundational experimentation environment engineered by The Generative Intelligence Lab. It serves as a pedagogical sandbox where students and researchers can interrogate the mechanics of Generative Intelligence. By implementing a standardized CLI over a provider-agnostic abstraction layer, OwlMind enables learners to conduct comparative analyses of LLM behaviors through interchangeable parameters within a controlled, observable setup
32
+ The OwlMind Framework provides an experimentation environment and pedagogical sandbox for [Generative Intelligence Systems](https://medium.com/lecture-notes-on-generative-intelligence/generative-intelligence-systems-5b23727acffe). The platform defines a standardized programming structure and command-line interface across multiple architectural layers. This structure enables comparative experiments around the behaviors of Large Language Models (LLMs), AI pipelines, and component-level configurations.
28
33
 
29
- Installation:
34
+ #### Installation:
30
35
 
31
36
  ```bash
32
37
  pip install owlmind
33
38
  ```
34
39
 
35
- ---
40
+ If the installation goes well, you should be able to run the script:
41
+
42
+ ```bash
43
+ owlmind --version
44
+ ```
36
45
 
37
- ## Commands
38
46
 
39
47
  #### Configuration
40
48
  Control OwlMind via environment variables
@@ -47,15 +55,18 @@ export OLLAMA_HOST=http://localhost:11434
47
55
  export OLLAMA_MODEL=llama3
48
56
  ```
49
57
 
50
-
51
- #### System Audit
52
- View your current environment configuration
58
+ A good alternative is to create a ``.env`` file.
53
59
 
54
60
  ```bash
55
- owlmind info
61
+ # File: .env
62
+ # .env files are loaded automatically
63
+
64
+ OLLAMA_HOST=http://localhost:11434
65
+ OLLAMA_MODEL=llama
56
66
  ```
57
67
 
58
- #### Connectivity Check
68
+
69
+ #### Connectivity
59
70
  Verify if your model provider is online.
60
71
 
61
72
  ```bash
@@ -63,14 +74,21 @@ owlmind ping
63
74
  ```
64
75
 
65
76
 
66
- #### Generation
77
+ #### Information
78
+ View your environment information.
79
+
80
+ ```bash
81
+ owlmind info
82
+ ```
83
+
84
+ #### Generation with Parameters
67
85
  Run inference with full control over sampling parameters.
68
86
 
69
87
  ```bash
70
88
  owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
71
89
  ```
72
90
 
73
- Possible parameters:
91
+ Other parameters:
74
92
 
75
93
  ```bash
76
94
  $ owlmind query --help
@@ -94,7 +112,7 @@ options:
94
112
  ```
95
113
 
96
114
 
97
- #### Prompt Loading (@file syntax)
115
+ #### Prompt Loading
98
116
  OwlMind supports loading prompts directly from files using the @ prefix. This is ideal for long-form instructions or code analysis.
99
117
 
100
118
  ```bash
@@ -107,3 +125,4 @@ Explicit Flag:
107
125
  owlmind query --input research_paper.md
108
126
  ```
109
127
 
128
+
@@ -0,0 +1,15 @@
1
+ owlmind/__init__.py,sha256=jeK2gtlTJnsYnkBZnUWO9SbKcyzyIGOZidZUfOBpSa8,115
2
+ owlmind/cli.py,sha256=UCU7YVyi3hnjmkMp-QmFvaRIo4hBQd9y9VhREag0bj0,7523
3
+ owlmind/core/__init__.py,sha256=FdHsekm-vN68W1lxoqgVGhDGNW8r5VUbvHcWYT8MafQ,158
4
+ owlmind/core/component.py,sha256=FxXcki2I986vgpHQj3iUR8Ocb6zjKPwPvcE59MYjbX8,2452
5
+ owlmind/graphk/__init__.py,sha256=Sm9S-qmlxvgxJB6ZyZ8MEJhWGsDmD-2ctsc9Vmi5ivQ,181
6
+ owlmind/graphk/node.py,sha256=GUPvd0XMzflPKgOzbJBV7igrGVHQOn31JkPlzffTR_Y,5138
7
+ owlmind/graphk/pipeline.py,sha256=otmpg7PDRzbkoHouqukXUzVS3yS5CVwra0O0vPFfw1Y,2936
8
+ owlmind/models/__init__.py,sha256=oDwcewvCenSr3PMpoZPOkq8sSj0MkXlruIAw-Dj8uI4,149
9
+ owlmind/models/ollama.py,sha256=Y-9J5C5ztRgricOJ4Co3AE_QGQbr99bKreJwGJUoZTQ,4514
10
+ owlmind-0.1.5.dist-info/licenses/LICENSE.txt,sha256=N3XP8HnV0oou2kX_RhIixOcH-_3R2EU9t3JZ5a1Xq-4,1084
11
+ owlmind-0.1.5.dist-info/METADATA,sha256=brzEaFFtQwRdhD1dz1dAlo7LqUoNHeCe2h5aSVDqino,3371
12
+ owlmind-0.1.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
13
+ owlmind-0.1.5.dist-info/entry_points.txt,sha256=dtbjpXwYC8Nbe3CJ02gflnKOOhpQWk9u5gALeDHOWGk,45
14
+ owlmind-0.1.5.dist-info/top_level.txt,sha256=hZkLOzK2jV0_OPvcTpeIwlEQi869uqittXNzXF8AajE,8
15
+ owlmind-0.1.5.dist-info/RECORD,,
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) <year> Adam Veldhousen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
owlmind/model.py DELETED
@@ -1,21 +0,0 @@
1
- from abc import ABC, abstractmethod
2
-
3
- class Model(ABC):
4
- def __init__(self, host: str):
5
- self.host = host
6
-
7
- @abstractmethod
8
- def ping(self) -> bool:
9
- """Check if the provider is reachable."""
10
- pass
11
-
12
- @abstractmethod
13
- def info(self) -> list:
14
- """List available models."""
15
- pass
16
-
17
- @abstractmethod
18
- def query(self, model: str, prompt: str, **options):
19
- """Execute a completion request."""
20
- pass
21
-
owlmind/ollama.py DELETED
@@ -1,62 +0,0 @@
1
-
2
- import ollama
3
- from .model import Model
4
-
5
- class Ollama(Model):
6
- """
7
- Ollama implementation of the Model interface.
8
- Ensures parameters are correctly mapped to the 'options' dictionary.
9
- """
10
- def __init__(self, host: str):
11
- super().__init__(host)
12
- # The host is passed directly to the Client constructor
13
- self.client = ollama.Client(host=self.host)
14
-
15
- def ping(self) -> bool:
16
- """Checks connectivity by attempting to list models."""
17
- try:
18
- self.client.list()
19
- return True
20
- except Exception:
21
- return False
22
-
23
- def info(self) -> list:
24
- """Fetches the list of available model names from the local server."""
25
- try:
26
- response = self.client.list()
27
- # Extracts model names from the 'models' key in the response dictionary
28
- return [m['model'] for m in response.get('models', [])]
29
- except Exception:
30
- return []
31
-
32
- def query(self, model: str, prompt: str, **options):
33
- """
34
- Executes a generation request.
35
- Crucial: Parameters like temperature MUST be inside the 'options' dict.
36
- """
37
- # Map our generic CLI terms to Ollama API specific keys
38
- # num_predict = Max Tokens
39
- # num_ctx = Context Window size
40
- ollama_params = {
41
- 'temperature': options.get('temperature'),
42
- 'top_k': options.get('top_k'),
43
- 'top_p': options.get('top_p'),
44
- 'num_predict': options.get('max_tokens'),
45
- 'num_ctx': options.get('num_ctx'),
46
- 'seed': options.get('seed') # Added for reproducibility testing
47
- }
48
-
49
- # Filter out None values so the Ollama server uses its internal defaults
50
- # for any parameter the user didn't explicitly set via CLI flags.
51
- clean_options = {k: v for k, v in ollama_params.items() if v is not None}
52
-
53
- # The generate method takes model and prompt as top-level args,
54
- # but all sampling/tuning parameters go into the 'options' keyword argument.
55
- return self.client.generate(
56
- model=model,
57
- prompt=prompt,
58
- stream=True,
59
- options=clean_options
60
- )
61
-
62
-
@@ -1,9 +0,0 @@
1
- owlmind/__init__.py,sha256=WdAUZIWDqq_7CtIT9yT_Eghr5Ph24Bdgdh7sC7h4WzA,50
2
- owlmind/cli.py,sha256=38HbHyCnRe1Sqx79rA4ugZwSu_9hfaZhXrOsQckafto,4943
3
- owlmind/model.py,sha256=J1X0gaqdZZhSJgfuhnQaD395O6gnCbr1JD1cGIfN8v0,462
4
- owlmind/ollama.py,sha256=hUJja7RAtYg7GWWQHd6l3BUV4xMcAolrqJWwlz_h9fw,2212
5
- owlmind-0.1.2.dist-info/METADATA,sha256=KczHGIGqb3Bs3C1BypgKA8eon6I-OZ68ymVBpUMRrpA,2871
6
- owlmind-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
7
- owlmind-0.1.2.dist-info/entry_points.txt,sha256=dtbjpXwYC8Nbe3CJ02gflnKOOhpQWk9u5gALeDHOWGk,45
8
- owlmind-0.1.2.dist-info/top_level.txt,sha256=hZkLOzK2jV0_OPvcTpeIwlEQi869uqittXNzXF8AajE,8
9
- owlmind-0.1.2.dist-info/RECORD,,