sublayer 0.0.5 → 0.0.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 05adf0abb1924f82c0e60a66265331c2b9cfead17a39114fc0dd9c395e5e00ff
4
- data.tar.gz: 20a1dfdfbdec1bb59356736150dfc48e623838fc8807e13e743a08205a8d6105
3
+ metadata.gz: 7a49a0cdd72c4a8171f246646ac8021f72bcdf35a0591931614cbbf7599c3bad
4
+ data.tar.gz: 3e0da0784fc6ace719556110c47a4145acb01d11ff90555bc25fd7ecb7c6f43f
5
5
  SHA512:
6
- metadata.gz: 9bc8be69c42f61099bd35163d7b15661cee6244fd5d26a7b1ca0d2cb6b4932c08ed57721a88def4ba2168a27d04080242c19d3c5282d0d9724826de4c1fc9a77
7
- data.tar.gz: 7b3e0f0948534ac8313568d8318db2fac51a60467fb8d879d648bccd900ca67ff1dcb7757330f9c5348e69ce463e4376684959d154007365095abd290eea9cda
6
+ metadata.gz: 9fc567c2e6f84886b525ccac9a842b16e6e519b17d46d6eb86cec502eeafb29c110936ad8a11978124b478f8a3271415c8f96473aaebbd3d28f4678a8736340f
7
+ data.tar.gz: 17a53b8f40146f09f2380f4c25ad86526d6b78dac8562ab1a06f047ab05e16646cdac4bc1453533f313e6c9bdb5a6e1537a7f8efb2ab515286608eec990ee26e
data/README.md CHANGED
@@ -71,26 +71,60 @@ Sublayer.configuration.ai_provider = Sublayer::Providers::Groq
71
71
  Sublayer.configuration.ai_model = "mixtral-8x7b-32768"
72
72
  ```
73
73
 
74
- ### Local (Experimental with Hermes2)
74
+ ### Local
75
75
 
76
- *Tested against the latest [Hermes 2 Mistral
77
- Model](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF)*
76
+ If you've never run a local model before see the [Local Model Quickstart](#local-model-quickstart) below. Know that local models take several GB of space.
78
77
 
79
- Support for running a local model and serving an API on https://localhost:8080
78
+ The model you use must have the ChatML formatted v1/chat/completions endpoint to work with sublayer (many models do by default)
80
79
 
81
- The simplest way to do this is to download
82
- [llamafile](https://github.com/Mozilla-Ocho/llamafile) and download one of the
83
- server llamafiles they provide.
84
-
85
- We've also tested with [Hermes 2 Mistral](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF) from
86
- [Nous
87
- Research](https://nousresearch.com/).
80
+ Usage:
88
81
 
82
+ Run your local model on http://localhost:8080 and then set:
89
83
  ```ruby
90
84
  Sublayer.configuration.ai_provider = Sublayer::Providers::Local
91
85
  Sublayer.configuration.ai_model = "LLaMA_CPP"
92
86
  ```
93
87
 
88
+ #### Local Model Quickstart:
89
+
90
+ Instructions to run a local model
91
+
92
+ 1. Setting up Llamafile
93
+
94
+ ```bash
95
+ cd where/you/keep/your/projects
96
+ git clone git@github.com:Mozilla-Ocho/llamafile.git
97
+ cd llamafile
98
+ ```
99
+
100
+ Download: https://cosmo.zip/pub/cosmos/bin/make (windows users need this too: https://justine.lol/cosmo3/)
101
+
102
+ ```bash
103
+ # within llamafile directory
104
+ chmod +x path/to/the/downloaded/make
105
+ path/to/the/downloaded/make -j8
106
+ sudo path/to/the/downloaded/make install PREFIX=/usr/local
107
+ ```
108
+ You can now run llamfile
109
+
110
+ 2. Downloading Model
111
+
112
+ click [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/resolve/main/Hermes-2-Pro-Mistral-7B.Q5_K_M.gguf?download=true) to download Mistral_7b.Q5_K_M (5.13 GB)
113
+
114
+ 3. Running Llamafile with a model
115
+
116
+ ```bash
117
+ llamafile -ngl 9999 -m path/to/the/downloaded/Hermes-2-Pro-Mistral-7B.Q5_K_M.gguf --host 0.0.0.0 -c 4096
118
+ ```
119
+
120
+ You are now running a local model on http://localhost:8080
121
+
122
+ #### Recommended Settings for Apple M1 users:
123
+ ```bash
124
+ llamafile -ngl 9999 -m Hermes-2-Pro-Mistral-7B.Q5_K_M.gguf --host 0.0.0.0 --nobrowser -c 2048 --gpu APPLE -t 12
125
+ ```
126
+ run `sysctl -n hw.logicalcpu` to see what number to give the `-t` threads option
127
+
94
128
  ## Concepts
95
129
 
96
130
  ### Generators
@@ -0,0 +1,6 @@
1
+ module Sublayer
2
+ module Agents
3
+ class Base
4
+ end
5
+ end
6
+ end
@@ -0,0 +1,6 @@
1
+ module Sublayer
2
+ module Tasks
3
+ class Base
4
+ end
5
+ end
6
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sublayer
4
- VERSION = "0.0.5"
4
+ VERSION = "0.0.6"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sublayer
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.5
4
+ version: 0.0.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Scott Werner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-04-08 00:00:00.000000000 Z
11
+ date: 2024-04-14 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ruby-openai
@@ -153,6 +153,7 @@ files:
153
153
  - examples/invalid_to_valid_json_generator.rb
154
154
  - lib/sublayer.rb
155
155
  - lib/sublayer/actions/base.rb
156
+ - lib/sublayer/agents/base.rb
156
157
  - lib/sublayer/components/output_adapters.rb
157
158
  - lib/sublayer/components/output_adapters/single_string.rb
158
159
  - lib/sublayer/generators/base.rb
@@ -161,6 +162,7 @@ files:
161
162
  - lib/sublayer/providers/groq.rb
162
163
  - lib/sublayer/providers/local.rb
163
164
  - lib/sublayer/providers/open_ai.rb
165
+ - lib/sublayer/tasks/base.rb
164
166
  - lib/sublayer/version.rb
165
167
  - sublayer.gemspec
166
168
  homepage: https://docs.sublayer.com