owlmind 0.1.2__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. owlmind-0.1.5/LICENSE.txt +21 -0
  2. {owlmind-0.1.2/src/owlmind.egg-info → owlmind-0.1.5}/PKG-INFO +35 -16
  3. owlmind-0.1.2/PKG-INFO → owlmind-0.1.5/README.md +26 -20
  4. owlmind-0.1.5/pyproject.toml +35 -0
  5. owlmind-0.1.5/src/owlmind/__init__.py +5 -0
  6. owlmind-0.1.5/src/owlmind/cli.py +236 -0
  7. owlmind-0.1.5/src/owlmind/core/__init__.py +9 -0
  8. owlmind-0.1.5/src/owlmind/core/component.py +87 -0
  9. owlmind-0.1.5/src/owlmind/graphk/__init__.py +12 -0
  10. owlmind-0.1.5/src/owlmind/graphk/node.py +182 -0
  11. owlmind-0.1.5/src/owlmind/graphk/pipeline.py +94 -0
  12. owlmind-0.1.5/src/owlmind/models/__init__.py +9 -0
  13. owlmind-0.1.5/src/owlmind/models/ollama.py +121 -0
  14. owlmind-0.1.2/README.md → owlmind-0.1.5/src/owlmind.egg-info/PKG-INFO +39 -12
  15. owlmind-0.1.5/src/owlmind.egg-info/SOURCES.txt +21 -0
  16. owlmind-0.1.5/src/owlmind.egg-info/requires.txt +2 -0
  17. owlmind-0.1.5/tests/test_core_1.py +179 -0
  18. owlmind-0.1.5/tests/test_models_1.py +208 -0
  19. owlmind-0.1.5/tests/test_pipeline_1.py +150 -0
  20. owlmind-0.1.2/pyproject.toml +0 -20
  21. owlmind-0.1.2/src/owlmind/__init__.py +0 -3
  22. owlmind-0.1.2/src/owlmind/cli.py +0 -137
  23. owlmind-0.1.2/src/owlmind/model.py +0 -21
  24. owlmind-0.1.2/src/owlmind/ollama.py +0 -62
  25. owlmind-0.1.2/src/owlmind.egg-info/SOURCES.txt +0 -12
  26. owlmind-0.1.2/src/owlmind.egg-info/requires.txt +0 -1
  27. {owlmind-0.1.2 → owlmind-0.1.5}/setup.cfg +0 -0
  28. {owlmind-0.1.2 → owlmind-0.1.5}/src/owlmind.egg-info/dependency_links.txt +0 -0
  29. {owlmind-0.1.2 → owlmind-0.1.5}/src/owlmind.egg-info/entry_points.txt +0 -0
  30. {owlmind-0.1.2 → owlmind-0.1.5}/src/owlmind.egg-info/top_level.txt +0 -0
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) <year> Adam Veldhousen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
@@ -1,10 +1,15 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: owlmind
3
- Version: 0.1.2
4
- Summary: A modular CLI for local Ollama control
5
- Requires-Python: >=3.8
3
+ Version: 0.1.5
4
+ Summary: Experimentation environment and pedagogical sandbox for studying generative intelligence systems.
5
+ Author-email: Fernando Koch <your-email@example.com>
6
+ License: MIT
7
+ Requires-Python: >=3.10
6
8
  Description-Content-Type: text/markdown
7
- Requires-Dist: ollama
9
+ License-File: LICENSE.txt
10
+ Requires-Dist: ollama>=0.4.0
11
+ Requires-Dist: python-dotenv>=1.0.0
12
+ Dynamic: license-file
8
13
 
9
14
 
10
15
  <!---
@@ -24,17 +29,20 @@ Requires-Dist: ollama
24
29
 
25
30
  ## Overview
26
31
 
27
- The OwlMind Platform is a foundational experimentation environment engineered by The Generative Intelligence Lab. It serves as a pedagogical sandbox where students and researchers can interrogate the mechanics of Generative Intelligence. By implementing a standardized CLI over a provider-agnostic abstraction layer, OwlMind enables learners to conduct comparative analyses of LLM behaviors through interchangeable parameters within a controlled, observable setup
32
+ The OwlMind Framework provides an experimentation environment and pedagogical sandbox for [Generative Intelligence Systems](https://medium.com/lecture-notes-on-generative-intelligence/generative-intelligence-systems-5b23727acffe). The platform defines a standardized programming structure and command-line interface across multiple architectural layers. This structure enables comparative experiments around the behaviors of Large Language Models (LLMs), AI pipelines, and component-level configurations.
28
33
 
29
- Installation:
34
+ #### Installation:
30
35
 
31
36
  ```bash
32
37
  pip install owlmind
33
38
  ```
34
39
 
35
- ---
40
+ If the installation goes well, you should be able to run the script:
41
+
42
+ ```bash
43
+ owlmind --version
44
+ ```
36
45
 
37
- ## Commands
38
46
 
39
47
  #### Configuration
40
48
  Control OwlMind via environment variables
@@ -47,15 +55,18 @@ export OLLAMA_HOST=http://localhost:11434
47
55
  export OLLAMA_MODEL=llama3
48
56
  ```
49
57
 
50
-
51
- #### System Audit
52
- View your current environment configuration
58
+ A good alternative is to create a ``.env`` file.
53
59
 
54
60
  ```bash
55
- owlmind info
61
+ # File: .env
62
+ # .env files are loaded automatically
63
+
64
+ OLLAMA_HOST=http://localhost:11434
65
+ OLLAMA_MODEL=llama
56
66
  ```
57
67
 
58
- #### Connectivity Check
68
+
69
+ #### Connectivity
59
70
  Verify if your model provider is online.
60
71
 
61
72
  ```bash
@@ -63,14 +74,21 @@ owlmind ping
63
74
  ```
64
75
 
65
76
 
66
- #### Generation
77
+ #### Information
78
+ View your environment information.
79
+
80
+ ```bash
81
+ owlmind info
82
+ ```
83
+
84
+ #### Generation with Parameters
67
85
  Run inference with full control over sampling parameters.
68
86
 
69
87
  ```bash
70
88
  owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
71
89
  ```
72
90
 
73
- Possible parameters:
91
+ Other parameters:
74
92
 
75
93
  ```bash
76
94
  $ owlmind query --help
@@ -94,7 +112,7 @@ options:
94
112
  ```
95
113
 
96
114
 
97
- #### Prompt Loading (@file syntax)
115
+ #### Prompt Loading
98
116
  OwlMind supports loading prompts directly from files using the @ prefix. This is ideal for long-form instructions or code analysis.
99
117
 
100
118
  ```bash
@@ -107,3 +125,4 @@ Explicit Flag:
107
125
  owlmind query --input research_paper.md
108
126
  ```
109
127
 
128
+
@@ -1,11 +1,3 @@
1
- Metadata-Version: 2.4
2
- Name: owlmind
3
- Version: 0.1.2
4
- Summary: A modular CLI for local Ollama control
5
- Requires-Python: >=3.8
6
- Description-Content-Type: text/markdown
7
- Requires-Dist: ollama
8
-
9
1
 
10
2
  <!---
11
3
  <img src="https://generativeintelligencelab.ai/images/owlmind-banner.png" width=800>
@@ -24,17 +16,20 @@ Requires-Dist: ollama
24
16
 
25
17
  ## Overview
26
18
 
27
- The OwlMind Platform is a foundational experimentation environment engineered by The Generative Intelligence Lab. It serves as a pedagogical sandbox where students and researchers can interrogate the mechanics of Generative Intelligence. By implementing a standardized CLI over a provider-agnostic abstraction layer, OwlMind enables learners to conduct comparative analyses of LLM behaviors through interchangeable parameters within a controlled, observable setup
19
+ The OwlMind Framework provides an experimentation environment and pedagogical sandbox for [Generative Intelligence Systems](https://medium.com/lecture-notes-on-generative-intelligence/generative-intelligence-systems-5b23727acffe). The platform defines a standardized programming structure and command-line interface across multiple architectural layers. This structure enables comparative experiments around the behaviors of Large Language Models (LLMs), AI pipelines, and component-level configurations.
28
20
 
29
- Installation:
21
+ #### Installation:
30
22
 
31
23
  ```bash
32
24
  pip install owlmind
33
25
  ```
34
26
 
35
- ---
27
+ If the installation goes well, you should be able to run the script:
28
+
29
+ ```bash
30
+ owlmind --version
31
+ ```
36
32
 
37
- ## Commands
38
33
 
39
34
  #### Configuration
40
35
  Control OwlMind via environment variables
@@ -47,15 +42,18 @@ export OLLAMA_HOST=http://localhost:11434
47
42
  export OLLAMA_MODEL=llama3
48
43
  ```
49
44
 
50
-
51
- #### System Audit
52
- View your current environment configuration
45
+ A good alternative is to create a ``.env`` file.
53
46
 
54
47
  ```bash
55
- owlmind info
48
+ # File: .env
49
+ # .env files are loaded automatically
50
+
51
+ OLLAMA_HOST=http://localhost:11434
52
+ OLLAMA_MODEL=llama
56
53
  ```
57
54
 
58
- #### Connectivity Check
55
+
56
+ #### Connectivity
59
57
  Verify if your model provider is online.
60
58
 
61
59
  ```bash
@@ -63,14 +61,21 @@ owlmind ping
63
61
  ```
64
62
 
65
63
 
66
- #### Generation
64
+ #### Information
65
+ View your environment information.
66
+
67
+ ```bash
68
+ owlmind info
69
+ ```
70
+
71
+ #### Generation with Parameters
67
72
  Run inference with full control over sampling parameters.
68
73
 
69
74
  ```bash
70
75
  owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
71
76
  ```
72
77
 
73
- Possible parameters:
78
+ Other parameters:
74
79
 
75
80
  ```bash
76
81
  $ owlmind query --help
@@ -94,7 +99,7 @@ options:
94
99
  ```
95
100
 
96
101
 
97
- #### Prompt Loading (@file syntax)
102
+ #### Prompt Loading
98
103
  OwlMind supports loading prompts directly from files using the @ prefix. This is ideal for long-form instructions or code analysis.
99
104
 
100
105
  ```bash
@@ -107,3 +112,4 @@ Explicit Flag:
107
112
  owlmind query --input research_paper.md
108
113
  ```
109
114
 
115
+
@@ -0,0 +1,35 @@
1
+ # =================================================================
2
+ # Project Configuration (for OwlMind)
3
+ # =================================================================
4
+
5
+ [build-system]
6
+ requires = ["setuptools>=61.0", "wheel"]
7
+ build-backend = "setuptools.build_meta"
8
+
9
+ [project]
10
+ name = "owlmind"
11
+ dynamic = ["version"]
12
+ description = "Experimentation environment and pedagogical sandbox for studying generative intelligence systems."
13
+ readme = "README.md"
14
+ requires-python = ">=3.10"
15
+ license = {text = "MIT"}
16
+ authors = [
17
+ {name = "Fernando Koch", email = "your-email@example.com"}
18
+ ]
19
+ dependencies = [
20
+ "ollama>=0.4.0",
21
+ "python-dotenv>=1.0.0",
22
+ ]
23
+
24
+ [project.scripts]
25
+ owlmind = "owlmind.cli:main"
26
+
27
+ [tool.setuptools.dynamic]
28
+ # This points to src/owlmind/__init__.py -> __version__
29
+ version = {attr = "owlmind.__version__"}
30
+
31
+ [tool.setuptools.packages.find]
32
+ where = ["src"]
33
+
34
+ [tool.setuptools.package-data]
35
+ "*" = ["*.txt", "*.json", "*.md"]
@@ -0,0 +1,5 @@
1
+ """
2
+ OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ """
4
+
5
+ __version__ = "0.1.5"
@@ -0,0 +1,236 @@
1
+
2
+ #!/usr/bin/env python3
3
+ ##
4
+ ## OwlMind Framework - experimentation environment for Generative Intelligence Systems.
5
+ ## cli.py — Command-line interface.
6
+ ##
7
+ # Copyright (c) 2025, The Generative Intelligence Lab
8
+ # https://github.com/genilab/owlmind
9
+ #
10
+ # Disclosure:
11
+ # This framework was developed using a 'vibe coding' . AI-synthesized logic was
12
+ # subjected to human review and manual refinement to guarantee functional
13
+ # integrity and structural clarity.
14
+ #
15
+
16
+ import argparse
17
+ import os
18
+ import sys
19
+ import logging
20
+ from owlmind import __version__
21
+ from owlmind.models import Ollama
22
+
23
+ class Dispatcher:
24
+ """Orchestrates components using the OwlMind Launch standards."""
25
+
26
+ DEFAULT_LOG_LEVEL = Ollama.LOG_CRITICAL
27
+
28
+ @staticmethod
29
+ def load_env(filepath=".env"):
30
+ """Manually parses a .env file and injects into os.environ."""
31
+ if not os.path.exists(filepath):
32
+ return
33
+ try:
34
+ with open(filepath, "r", encoding="utf-8") as f:
35
+ for line in f:
36
+ line = line.strip()
37
+ if not line or line.startswith("#") or "=" not in line:
38
+ continue
39
+ key, value = line.split("=", 1)
40
+ key, value = key.strip(), value.strip().strip("'").strip('"')
41
+ if key not in os.environ:
42
+ os.environ[key] = value
43
+ except Exception as e:
44
+ print(f"Warning: Could not load .env file: {e}", file=sys.stderr)
45
+
46
+ @staticmethod
47
+ def parse_params(raw_params: list) -> dict:
48
+ """
49
+ Parses parameters into a typed dictionary.
50
+ Handles: -p k=v,k2=v2 AND -p k=v -p k2=v2
51
+ """
52
+ params = {}
53
+ if not raw_params:
54
+ return params
55
+
56
+ # 1. Flatten the list and split by commas
57
+ # If raw_params is ['temp=1', 'seed=42,top_k=10'], this flattens it correctly
58
+ tokens = []
59
+ for item in raw_params:
60
+ tokens.extend(item.split(','))
61
+
62
+ for kv in tokens:
63
+ if "=" not in kv:
64
+ continue
65
+
66
+ k, v = kv.split("=", 1)
67
+ k, v = k.strip(), v.strip()
68
+
69
+ # 2. Type Inference (Boolean, Int, Float, String)
70
+ if v.lower() == "true":
71
+ v = True
72
+ elif v.lower() == "false":
73
+ v = False
74
+ else:
75
+ try:
76
+ # Try Integer
77
+ v = int(v)
78
+ except ValueError:
79
+ try:
80
+ # Try Float
81
+ v = float(v)
82
+ except ValueError:
83
+ # Keep as string (e.g., stop sequences or system prompts)
84
+ pass
85
+ params[k] = v
86
+
87
+ return params
88
+
89
+
90
+ @staticmethod
91
+ def dispatch(args):
92
+ # 1. Start with context
93
+ context = vars(args).copy()
94
+
95
+ # 2. Logic for Logging / Debug using Component constants
96
+ # We check the debug flag and map it to our integer constants
97
+ target_level = Ollama.LOG_DEBUG if context.get('debug') else Dispatcher.DEFAULT_LOG_LEVEL
98
+
99
+ # Set global logging level
100
+ logging.getLogger().setLevel(target_level)
101
+ logging.getLogger("httpx").setLevel(target_level)
102
+
103
+ # Pass the integer level into the context so it hits the property setter
104
+ context['log_level'] = target_level
105
+
106
+ # 3. Dynamic Parameter Parsing
107
+ dynamic_params = Dispatcher.parse_params(context.pop('params', []))
108
+ context.update(dynamic_params)
109
+
110
+ # 4. Resolve Input Logic
111
+ if args.command == "query":
112
+ user_input = Dispatcher.resolve_prompt(args)
113
+ if not user_input:
114
+ print("Error: No prompt provided.", file=sys.stderr)
115
+ sys.exit(1)
116
+ context['input'] = user_input
117
+
118
+ # 5. Initialize Component
119
+ # At this point, context contains everything (debug, command, url, model, etc.)
120
+ api = Ollama(**context)
121
+
122
+ # 6. TELL OLLAMA TO OBFUSCATE THE PLUMBING
123
+ # This moves the argparse noise to private _attributes
124
+ api.obfuscate(['debug', 'command', 'input_file', 'prompt'])
125
+
126
+ # 7. Route Command
127
+ if args.command == "ping":
128
+ Dispatcher.handle_ping(api)
129
+ elif args.command == "info":
130
+ Dispatcher.handle_info(api)
131
+ elif args.command == "query":
132
+ Dispatcher.handle_query(api)
133
+
134
+ return
135
+
136
+
137
+
138
+ @staticmethod
139
+ def resolve_prompt(args):
140
+ if getattr(args, 'input_file', None):
141
+ return Dispatcher.load_file(args.input_file)
142
+ prompt_val = getattr(args, 'prompt', None)
143
+ if prompt_val and prompt_val.startswith("@"):
144
+ return Dispatcher.load_file(prompt_val[1:])
145
+ return prompt_val
146
+
147
+ @staticmethod
148
+ def load_file(filepath):
149
+ try:
150
+ with open(filepath, 'r', encoding='utf-8') as f:
151
+ return f.read().strip()
152
+ except Exception as e:
153
+ print(f"Error loading {filepath}: {e}", file=sys.stderr)
154
+ sys.exit(1)
155
+
156
+ @staticmethod
157
+ def handle_ping(api):
158
+ status = "ONLINE" if api.ping() else "OFFLINE"
159
+ print(f"Status: {status} (Host: {api.url})")
160
+
161
+ @staticmethod
162
+ def handle_info(api):
163
+ data = api.info()
164
+
165
+ print("-" * 40)
166
+ print(f"Status : {data['status']}")
167
+ print(f"Host : {api.url}")
168
+ print(f"Model : {api.model}")
169
+
170
+ print("-" * 40)
171
+ models_list = data.get('models', [])
172
+ print(f"Available Models: {len(models_list)}")
173
+ for m in models_list:
174
+ print(f" - {m}")
175
+
176
+ print("-" * 40)
177
+ return
178
+
179
+ @staticmethod
180
+ def handle_query(api):
181
+ if not api.ping():
182
+ print(f"Error: Server {api.url} unreachable.", file=sys.stderr)
183
+ sys.exit(1)
184
+
185
+ # step() yields chunks; we print them in real-time
186
+ if api.log_level == api.LOG_DEBUG:
187
+ print(f"--- Inference: {api.model} ---")
188
+ for chunk in api.step():
189
+ print(chunk, end="", flush=True)
190
+
191
+ print()
192
+ return
193
+
194
+ def get_parser():
195
+ parser = argparse.ArgumentParser(prog="owlmind")
196
+
197
+ # Generate the helpers
198
+ param_list = ", ".join(Ollama.OLLAMA_PARAMS.keys())
199
+ param_help = f"Supports k=v; (k1=v1,k2=v2), or multiple flags. Options: {param_list}"
200
+
201
+ # Global arguments
202
+ parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
203
+ parser.add_argument("--debug", action="store_true", help="Enable verbose telemetry and internal logs")
204
+ parser.add_argument("--url", dest="url", default=os.environ.get("OLLAMA_HOST", Ollama.DEFAULT_SERVER))
205
+ subparsers = parser.add_subparsers(dest="command", required=True)
206
+
207
+ # ping / info
208
+ subparsers.add_parser("ping")
209
+ subparsers.add_parser("info")
210
+
211
+ # query
212
+ qp = subparsers.add_parser("query")
213
+ qp.add_argument("prompt", nargs="?", default=None)
214
+ qp.add_argument("--input", "-i", dest="input_file")
215
+ qp.add_argument("--model", "-m", default=os.environ.get("OLLAMA_MODEL", Ollama.DEFAULT_MODEL))
216
+ qp.add_argument("--params", "-p", action="append", dest="params", help=param_help)
217
+
218
+ return parser
219
+
220
+
221
+ ##
222
+ ## ENTRY POINT
223
+ ##
224
+
225
+ def main():
226
+ Dispatcher.load_env()
227
+ parser = get_parser()
228
+ args = parser.parse_args()
229
+ Dispatcher.dispatch(args)
230
+
231
+
232
+ ##
233
+ ## EXECUTOR
234
+ ##
235
+ if __name__ == "__main__":
236
+ main()
@@ -0,0 +1,9 @@
1
+ """
2
+ OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ """
4
+
5
+ from .component import Component
6
+
7
+ __all__ = [
8
+ "Component"
9
+ ]
@@ -0,0 +1,87 @@
1
+ ##
2
+ ## OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
+ ## core/component.py — Abstract base definition for generative workflow components.
4
+ ##
5
+ # Copyright (c) 2025, The Generative Intelligence Lab
6
+ # https://github.com/genilab/owlmind
7
+ #
8
+ # Disclosure:
9
+ # This framework was developed using a 'vibe coding' . AI-synthesized logic was
10
+ # subjected to human review and manual refinement to guarantee functional
11
+ # integrity and structural clarity.
12
+ #
13
+
14
+
15
+ import logging
16
+ from typing import Any, Iterator
17
+ from abc import ABC, abstractmethod
18
+ from owlmind.graphk import Node
19
+
20
+
21
+ ##
22
+ ## COMPONENT
23
+ ##
24
+
25
+ class Component(Node, ABC):
26
+ """
27
+ Base class for framework components with managed I/O and logging.
28
+ """
29
+
30
+ LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
31
+
32
+ # Log Levels as Constants
33
+ LOG_DEBUG = logging.DEBUG
34
+ LOG_INFO = logging.INFO
35
+ LOG_WARNING = logging.WARNING
36
+ LOG_ERROR = logging.ERROR
37
+ LOG_CRITICAL = logging.CRITICAL
38
+
39
+ def __init__(self, session: dict = None, log_level: int = LOG_CRITICAL, **kwargs):
40
+ # 1. Setup logging infrastructure
41
+ if not logging.getLogger().hasHandlers():
42
+ logging.basicConfig(level=logging.WARNING, format=self.LOG_FORMAT)
43
+
44
+ # Internal framework storage using _name_ convention
45
+ self._logger_ = logging.getLogger(self.__class__.__name__)
46
+ self._input_ = None
47
+ self._output_ = None
48
+
49
+ # Initialize Node (this handles _condition_, _weight_, etc.)
50
+ super().__init__(session=session, **kwargs)
51
+
52
+ # 2. Set the log level using the property setter
53
+ self.log_level = log_level
54
+
55
+ # 3. Obfuscate framework attributes to keep session() clean
56
+ self.obfuscate(['log_level'])
57
+
58
+ return
59
+
60
+ @property
61
+ def input(self):
62
+ return self._input_
63
+
64
+ @input.setter
65
+ def input(self, value):
66
+ self._input_ = value
67
+
68
+ @property
69
+ def output(self):
70
+ return self._output_
71
+
72
+ @property
73
+ def log_level(self) -> int:
74
+ return self._logger_.level
75
+
76
+ @log_level.setter
77
+ def log_level(self, level: Any):
78
+ val = getattr(logging, level.upper(), None) if isinstance(level, str) else level
79
+ if isinstance(val, int):
80
+ self._logger_.setLevel(val)
81
+ return
82
+
83
+ def log(self, message: str, level: int = LOG_INFO):
84
+ self._logger_.log(level, message)
85
+ return
86
+
87
+
@@ -0,0 +1,12 @@
1
+ """
2
+ GraphK - Framework for Graph programming.
3
+ """
4
+
5
+ from .node import Node, Gate, BranchNode
6
+ from .pipeline import Pipeline
7
+
8
+ __all__ = [
9
+ "Node",
10
+ "Gate",
11
+ "BranchNode"
12
+ ]