owlmind 0.1.6__tar.gz → 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: owlmind
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Experimentation environment and pedagogical sandbox for studying generative intelligence systems.
5
5
  Author-email: Fernando Koch <your-email@example.com>
6
6
  License: MIT
@@ -73,6 +73,10 @@ Verify if your model provider is online.
73
73
  owlmind ping
74
74
  ```
75
75
 
76
+ It should return:
77
+ ```bash
78
+ Status: ONLINE (Host: http://localhost:11434)
79
+ ```
76
80
 
77
81
  #### Information
78
82
  View your environment information.
@@ -81,11 +85,29 @@ View your environment information.
81
85
  owlmind info
82
86
  ```
83
87
 
88
+ It should return something like:
89
+
90
+ ```bash
91
+ $ owlmind info
92
+ ----------------------------------------
93
+ Status : online
94
+ Host : http://localhost:11434
95
+ Model : llama3
96
+ ----------------------------------------
97
+ Available Models: 5
98
+ - gpt-oss:latest
99
+ - gemma:latest
100
+ - tinyllama:latest
101
+ - llama3:latest
102
+ - llama3.2:latest
103
+ ----------------------------------------
104
+ ```
105
+
84
106
  #### Generation with Parameters
85
107
  Run inference with full control over sampling parameters.
86
108
 
87
109
  ```bash
88
- owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
110
+ owlmind query "How do AI-driven organizations scale?" -p temperature=1.2, ctx-size=4096
89
111
  ```
90
112
 
91
113
  Other parameters:
@@ -60,6 +60,10 @@ Verify if your model provider is online.
60
60
  owlmind ping
61
61
  ```
62
62
 
63
+ It should return:
64
+ ```bash
65
+ Status: ONLINE (Host: http://localhost:11434)
66
+ ```
63
67
 
64
68
  #### Information
65
69
  View your environment information.
@@ -68,11 +72,29 @@ View your environment information.
68
72
  owlmind info
69
73
  ```
70
74
 
75
+ It should return something like:
76
+
77
+ ```bash
78
+ $ owlmind info
79
+ ----------------------------------------
80
+ Status : online
81
+ Host : http://localhost:11434
82
+ Model : llama3
83
+ ----------------------------------------
84
+ Available Models: 5
85
+ - gpt-oss:latest
86
+ - gemma:latest
87
+ - tinyllama:latest
88
+ - llama3:latest
89
+ - llama3.2:latest
90
+ ----------------------------------------
91
+ ```
92
+
71
93
  #### Generation with Parameters
72
94
  Run inference with full control over sampling parameters.
73
95
 
74
96
  ```bash
75
- owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
97
+ owlmind query "How do AI-driven organizations scale?" -p temperature=1.2, ctx-size=4096
76
98
  ```
77
99
 
78
100
  Other parameters:
@@ -2,4 +2,4 @@
2
2
  OwlMind Framework - experimentation environment for Generative Intelligence Systems.
3
3
  """
4
4
 
5
- __version__ = "0.1.6"
5
+ __version__ = "0.1.7"
@@ -25,7 +25,6 @@ class Ollama(Component):
25
25
 
26
26
  DEFAULT_SERVER = "http://localhost:11434"
27
27
  DEFAULT_MODEL = "llama3"
28
- DEFAULT_TIMEOUT = 10
29
28
  DEFAULT_LOG_LEVEL = logging.INFO
30
29
 
31
30
  OLLAMA_PARAMS = {
@@ -54,7 +53,7 @@ class Ollama(Component):
54
53
  if not hasattr(self, param): setattr(self, param, default_value)
55
54
 
56
55
  # Initialize the client using the framework internal
57
- self._client_ = ollama.Client(host=self.url, timeout=self.DEFAULT_TIMEOUT)
56
+ self._client_ = ollama.Client(host=self.url)
58
57
 
59
58
  # Obfuscate the client-related keys just in case
60
59
  self.obfuscate(['_client_', '_models_cache_'])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: owlmind
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Experimentation environment and pedagogical sandbox for studying generative intelligence systems.
5
5
  Author-email: Fernando Koch <your-email@example.com>
6
6
  License: MIT
@@ -73,6 +73,10 @@ Verify if your model provider is online.
73
73
  owlmind ping
74
74
  ```
75
75
 
76
+ It should return:
77
+ ```bash
78
+ Status: ONLINE (Host: http://localhost:11434)
79
+ ```
76
80
 
77
81
  #### Information
78
82
  View your environment information.
@@ -81,11 +85,29 @@ View your environment information.
81
85
  owlmind info
82
86
  ```
83
87
 
88
+ It should return something like:
89
+
90
+ ```bash
91
+ $ owlmind info
92
+ ----------------------------------------
93
+ Status : online
94
+ Host : http://localhost:11434
95
+ Model : llama3
96
+ ----------------------------------------
97
+ Available Models: 5
98
+ - gpt-oss:latest
99
+ - gemma:latest
100
+ - tinyllama:latest
101
+ - llama3:latest
102
+ - llama3.2:latest
103
+ ----------------------------------------
104
+ ```
105
+
84
106
  #### Generation with Parameters
85
107
  Run inference with full control over sampling parameters.
86
108
 
87
109
  ```bash
88
- owlmind query "How do AI-driven organizations scale?" --temp 1.2 --ctx-size 4096
110
+ owlmind query "How do AI-driven organizations scale?" -p temperature=1.2, ctx-size=4096
89
111
  ```
90
112
 
91
113
  Other parameters:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes