openai_101 1.2.2 → 1.2.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f679172f1d630c59fcc74f1ddf85910da0a33efd75e57a2e92cd24e3864b58f6
4
- data.tar.gz: 0ea818befda4557981d7c6b520792a32ad5eeaae77307e1aaf67b6f1c4900d0d
3
+ metadata.gz: 79f63641cbdb6de624c2114e414d89b19c74ff54cce75f12a7933da1e3bf794c
4
+ data.tar.gz: aeb198288206c3bb986e5857f5697ee0214d570040380ab6b8aeb00d8f7d2b62
5
5
  SHA512:
6
- metadata.gz: e763bfda0f34f73fc163a7693532be203c991e7378fa1fe0d0d211993f8d77d25ac5452c232044eb522353c5a1149352163eb2b316c4bd4cd6dc60e48fe4a020
7
- data.tar.gz: d16eae6deedf60cc0e66ba26e3626bc868c8a3538de3faf805f7b46362347b96bb4c6c74f5fff3f9646db24e6ecb42de8c127f8e207af5dacf46fe34bbf8ea40
6
+ metadata.gz: 10f327aa5411a2bacedf94f59ce457e2b7a6ee47e3b92aebbae8ade218ded0c38f65043e18d1500c5a7ebc29292a1fcc4cd4eedb56ae240d717d07b625dcf5db
7
+ data.tar.gz: d5178938b8faf8b7a2c54d9a1fe04c2b8618264934dc8e9a59650f3b39203ba60bb269b481d20d8de6605bf8b07bf0a6d4a4092a045fd5a9f66f519b98476f9a
data/.rubocop.yml CHANGED
@@ -92,8 +92,12 @@ Layout/SpaceBeforeComma:
92
92
  # My Preferences - End
93
93
 
94
94
  # RSpec Cops
95
+ RSpec/MultipleExpectations:
96
+ Max: 8
97
+ RSpec/ExampleLength:
98
+ Max: 25
95
99
  RSpec/NestedGroups:
96
- Max: 5
100
+ Max: 8
97
101
 
98
102
  RSpec/SpecFilePathFormat:
99
103
  Enabled: true
data/CHANGELOG.md CHANGED
@@ -1,3 +1,17 @@
1
+ ## [1.2.3](https://github.com/appydave/openai_101/compare/v1.2.2...v1.2.3) (2024-06-19)
2
+
3
+
4
+ ### Bug Fixes
5
+
6
+ * node update on ci ([3aa4b9d](https://github.com/appydave/openai_101/commit/3aa4b9dc783f5ef1d47c2583c6c988446c472a05))
7
+
8
+ ## [1.2.2](https://github.com/appydave/openai_101/compare/v1.2.1...v1.2.2) (2024-06-19)
9
+
10
+
11
+ ### Bug Fixes
12
+
13
+ * tools enabled flag and node update on ci ([dc515b6](https://github.com/appydave/openai_101/commit/dc515b6d004d4399f6312ef8355102bd8882d977))
14
+
1
15
  ## [1.2.1](https://github.com/appydave/openai_101/compare/v1.2.0...v1.2.1) (2024-06-19)
2
16
 
3
17
 
@@ -0,0 +1,285 @@
1
+ {
2
+ "groups": [
3
+ {
4
+ "name": "GPT-4 Turbo",
5
+ "description": "With 128k context, fresher knowledge and the broadest set of capabilities, GPT-4 Turbo is more powerful than GPT-4 and offered at a lower price.",
6
+ "models": [
7
+ {
8
+ "name": "gpt-4-0125-preview",
9
+ "costs": [
10
+ {
11
+ "usage_type": "input",
12
+ "price_per_unit": "$10.00",
13
+ "unit_type": "1M tokens"
14
+ },
15
+ {
16
+ "usage_type": "output",
17
+ "price_per_unit": "$30.00",
18
+ "unit_type": "1M tokens"
19
+ }
20
+ ]
21
+ },
22
+ {
23
+ "name": "gpt-4-1106-preview",
24
+ "costs": [
25
+ {
26
+ "usage_type": "input",
27
+ "price_per_unit": "$10.00",
28
+ "unit_type": "1M tokens"
29
+ },
30
+ {
31
+ "usage_type": "output",
32
+ "price_per_unit": "$30.00",
33
+ "unit_type": "1M tokens"
34
+ }
35
+ ]
36
+ },
37
+ {
38
+ "name": "gpt-4-1106-vision-preview",
39
+ "costs": [
40
+ {
41
+ "usage_type": "input",
42
+ "price_per_unit": "$10.00",
43
+ "unit_type": "1M tokens"
44
+ },
45
+ {
46
+ "usage_type": "output",
47
+ "price_per_unit": "$30.00",
48
+ "unit_type": "1M tokens"
49
+ }
50
+ ]
51
+ }
52
+ ]
53
+ },
54
+ {
55
+ "name": "GPT-4",
56
+ "description": "With broad general knowledge and domain expertise, GPT-4 can follow complex instructions in natural language and solve difficult problems with accuracy.",
57
+ "models": [
58
+ {
59
+ "name": "gpt-4",
60
+ "costs": [
61
+ {
62
+ "usage_type": "input",
63
+ "price_per_unit": "$30.00",
64
+ "unit_type": "1M tokens"
65
+ },
66
+ {
67
+ "usage_type": "output",
68
+ "price_per_unit": "$60.00",
69
+ "unit_type": "1M tokens"
70
+ }
71
+ ]
72
+ },
73
+ {
74
+ "name": "gpt-4-32k",
75
+ "costs": [
76
+ {
77
+ "usage_type": "input",
78
+ "price_per_unit": "$60.00",
79
+ "unit_type": "1M tokens"
80
+ },
81
+ {
82
+ "usage_type": "output",
83
+ "price_per_unit": "$120.00",
84
+ "unit_type": "1M tokens"
85
+ }
86
+ ]
87
+ }
88
+ ]
89
+ },
90
+ {
91
+ "name": "GPT-3.5 Turbo",
92
+ "description": "GPT-3.5 Turbo models are capable and cost-effective.",
93
+ "models": [
94
+ {
95
+ "name": "gpt-3.5-turbo-0125",
96
+ "notes": "gpt-3.5-turbo-0125 is the flagship model of this family, supports a 16K context window and is optimized for dialog.",
97
+ "costs": [
98
+ {
99
+ "usage_type": "input",
100
+ "price_per_unit": "$0.50",
101
+ "unit_type": "1M tokens"
102
+ },
103
+ {
104
+ "usage_type": "output",
105
+ "price_per_unit": "$1.50",
106
+ "unit_type": "1M tokens"
107
+ }
108
+ ]
109
+ },
110
+ {
111
+ "name": "gpt-3.5-turbo-instruct",
112
+ "notes": "gpt-3.5-turbo-instruct is an Instruct model and only supports a 4K context window.",
113
+ "costs": [
114
+ {
115
+ "usage_type": "input",
116
+ "price_per_unit": "$1.50",
117
+ "unit_type": "1M tokens"
118
+ },
119
+ {
120
+ "usage_type": "output",
121
+ "price_per_unit": "$2.00",
122
+ "unit_type": "1M tokens"
123
+ }
124
+ ]
125
+ }
126
+ ]
127
+ },
128
+ {
129
+ "name": "Assistants API",
130
+ "description": "Assistants API and tools (retrieval, code interpreter) make it easy for developers to build AI assistants within their own applications. Each assistant incurs its own retrieval file storage fee based on the files passed to that assistant. The retrieval tool chunks and indexes your files content in our vector database.",
131
+ "models": [
132
+ {
133
+ "name": "code_interpreter",
134
+ "notes": "The tokens used for the Assistant API are billed at the chosen language model's per-token input/output rates, and the assistant intelligently chooses which context from the thread to include when calling the model.",
135
+ "costs": [
136
+ {
137
+ "usage_type": "input",
138
+ "price_per_unit": "$0.03",
139
+ "unit_type": "session"
140
+ }
141
+ ]
142
+ },
143
+ {
144
+ "name": "retrieval",
145
+ "costs": [
146
+ {
147
+ "usage_type": "input",
148
+ "price_per_unit": "$0.20",
149
+ "unit_type": "GB / assistant / day",
150
+ "notes": "Free until 04/01/2024"
151
+ }
152
+ ]
153
+ }
154
+ ]
155
+ },
156
+ {
157
+ "name": "Fine-tuning models",
158
+ "description": "Create your own custom models by fine-tuning our base models with your training data. Once you fine-tune a model, you’ll be billed only for the tokens you use in requests to that model.",
159
+ "models": [
160
+ {
161
+ "name": "gpt-3.5-turbo",
162
+ "costs": [
163
+ {
164
+ "usage_type": "training",
165
+ "price_per_unit": "$8.00",
166
+ "unit_type": "1M tokens"
167
+ },
168
+ {
169
+ "usage_type": "input",
170
+ "price_per_unit": "$3.00",
171
+ "unit_type": "1M tokens"
172
+ },
173
+ {
174
+ "usage_type": "output",
175
+ "price_per_unit": "$6.00",
176
+ "unit_type": "1M tokens"
177
+ }
178
+ ]
179
+ },
180
+ {
181
+ "name": "davinci-002",
182
+ "costs": [
183
+ {
184
+ "usage_type": "training",
185
+ "price_per_unit": "$6.00",
186
+ "unit_type": "1M tokens"
187
+ },
188
+ {
189
+ "usage_type": "input",
190
+ "price_per_unit": "$12.00",
191
+ "unit_type": "1M tokens"
192
+ },
193
+ {
194
+ "usage_type": "output",
195
+ "price_per_unit": "$12.00",
196
+ "unit_type": "1M tokens"
197
+ }
198
+ ]
199
+ },
200
+ {
201
+ "name": "babbage-002",
202
+ "costs": [
203
+ {
204
+ "usage_type": "training",
205
+ "price_per_unit": "$0.40",
206
+ "unit_type": "1M tokens"
207
+ },
208
+ {
209
+ "usage_type": "input",
210
+ "price_per_unit": "$1.60",
211
+ "unit_type": "1M tokens"
212
+ },
213
+ {
214
+ "usage_type": "output",
215
+ "price_per_unit": "$1.60",
216
+ "unit_type": "1M tokens"
217
+ }
218
+ ]
219
+ }
220
+ ]
221
+ },
222
+ {
223
+ "name": "Embedding models",
224
+ "description": "Build advanced search, clustering, topic modeling, and classification functionality with our embeddings offering.",
225
+ "models": [
226
+ {
227
+ "name": "text-embedding-3-small",
228
+ "costs": [
229
+ {
230
+ "usage_type": "usage",
231
+ "price_per_unit": "$0.02",
232
+ "unit_type": "1M tokens"
233
+ }
234
+ ]
235
+ },
236
+ {
237
+ "name": "text-embedding-3-large",
238
+ "costs": [
239
+ {
240
+ "usage_type": "usage",
241
+ "price_per_unit": "$0.13",
242
+ "unit_type": "1M tokens"
243
+ }
244
+ ]
245
+ },
246
+ {
247
+ "name": "ada v2",
248
+ "costs": [
249
+ {
250
+ "usage_type": "usage",
251
+ "price_per_unit": "$0.10",
252
+ "unit_type": "1M tokens"
253
+ }
254
+ ]
255
+ }
256
+ ]
257
+ },
258
+ {
259
+ "name": "Base models",
260
+ "description": "GPT base models are not optimized for instruction-following and are less capable, but they can be effective when fine-tuned for narrow tasks.",
261
+ "models": [
262
+ {
263
+ "name": "davinci-002",
264
+ "costs": [
265
+ {
266
+ "usage_type": "usage",
267
+ "price_per_unit": "$2.00",
268
+ "unit_type": "1M tokens"
269
+ }
270
+ ]
271
+ },
272
+ {
273
+ "name": "babbage-002",
274
+ "costs": [
275
+ {
276
+ "usage_type": "usage",
277
+ "price_per_unit": "$0.40",
278
+ "unit_type": "1M tokens"
279
+ }
280
+ ]
281
+ }
282
+ ]
283
+ }
284
+ ]
285
+ }
@@ -1,15 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'dotenv'
4
- # Dotenv.load(".env.#{ENV['APP_ENV']}") # Loads environment-specific dotenv file.
4
+
5
5
  Dotenv.load('.env')
6
6
 
7
7
  OpenAI.configure do |config|
8
8
  tools_enabled = ENV.fetch('TOOLS_ENABLED', 'false')
9
9
 
10
10
  if tools_enabled == 'true'
11
- puts 'OpenAI Tools are enabled'
12
11
  config.access_token = ENV.fetch('OPENAI_ACCESS_TOKEN')
13
12
  config.organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
14
13
  end
14
+
15
+ puts "Initializing OpenAI with tools #{tools_enabled == 'true' ? 'enabled' : 'disabled'}"
15
16
  end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Openai101
4
+ module Models
5
+ # Parameters for the OpenAI API
6
+ class CompletionParams < Openai101::Types::BaseModel
7
+ # attribute :platform, :string, default: 'openai'
8
+ attribute :model, :string
9
+ attribute :prompt, :string
10
+ attribute :temperature, :float, default: 1.0
11
+ attribute :max_tokens, :integer, default: 256
12
+ attribute :top_p, :float, default: 1.0
13
+ attribute :best_of, :integer, default: 1
14
+ attribute :frequency_penalty, :float, default: 0.0
15
+ attribute :presence_penalty, :float, default: 0.0
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Openai101
4
+ module Types
5
+ # Used by the ActiveModel attributes API to cast values to arrays
6
+ class ArrayType < ActiveModel::Type::Value
7
+ def cast(value)
8
+ case value
9
+ when String
10
+ value.split(',')
11
+ when Array
12
+ value
13
+ else
14
+ raise ArgumentError, "Cannot cast #{value.class} to Array"
15
+ end
16
+ end
17
+
18
+ def serialize(value)
19
+ value.join(',')
20
+ end
21
+ end
22
+ end
23
+ end
24
+
25
+ ActiveModel::Type.register(:array, Openai101::Types::ArrayType)
@@ -0,0 +1,11 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Openai101
4
+ module Types
5
+ # Used by the ActiveModel attributes API to cast values to hashes
6
+ class BaseModel
7
+ include ActiveModel::Model
8
+ include ActiveModel::Attributes
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Openai101
4
+ module Types
5
+ # Used by the ActiveModel attributes API to cast values to hashes
6
+ class HashType < ActiveModel::Type::Value
7
+ def cast(value)
8
+ case value
9
+ when String
10
+ JSON.parse(value)
11
+ when Hash
12
+ value
13
+ else
14
+ raise ArgumentError, "Cannot cast #{value.class} to Hash"
15
+ end
16
+ end
17
+
18
+ def serialize(value)
19
+ value.to_json
20
+ end
21
+ end
22
+ end
23
+ end
24
+
25
+ ActiveModel::Type.register(:hash, Openai101::Types::HashType)
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Read the model metrics from
4
+ # https://platform.openai.com/docs/models
5
+ # https://openai.com/api/pricing
@@ -0,0 +1,3 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Read: https://platform.openai.com/docs/guides/text-generation/managing-tokens
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Openai101
4
- VERSION = '1.2.2'
4
+ VERSION = '1.2.4'
5
5
  end
data/lib/openai_101.rb CHANGED
@@ -7,9 +7,15 @@ require 'csv'
7
7
  require 'json'
8
8
  require 'table_print'
9
9
  require 'pry'
10
+ require 'active_model'
10
11
 
11
12
  require 'openai101/version'
12
- require 'openai101/sample'
13
+ require 'openai101/initializer'
14
+ require 'openai101/types/hash_type'
15
+ require 'openai101/types/array_type'
16
+ require 'openai101/types/base_model'
17
+
18
+ require 'openai101/models/completion_params'
13
19
 
14
20
  module Openai101
15
21
  # raise Openai101::Error, 'Sample message'
data/package-lock.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "openai_101",
3
- "version": "1.2.2",
3
+ "version": "1.2.4",
4
4
  "lockfileVersion": 3,
5
5
  "requires": true,
6
6
  "packages": {
7
7
  "": {
8
8
  "name": "openai_101",
9
- "version": "1.2.2",
9
+ "version": "1.2.4",
10
10
  "devDependencies": {
11
11
  "@klueless-js/semantic-release-rubygem": "github:klueless-js/semantic-release-rubygem",
12
12
  "@semantic-release/changelog": "^6.0.3",
data/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openai_101",
3
- "version": "1.2.2",
3
+ "version": "1.2.4",
4
4
  "description": "OpenAI 101 working through the API endpoints",
5
5
  "scripts": {
6
6
  "release": "semantic-release"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai_101
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.2
4
+ version: 1.2.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - David Cruwys
@@ -10,6 +10,34 @@ bindir: exe
10
10
  cert_chain: []
11
11
  date: 2024-06-19 00:00:00.000000000 Z
12
12
  dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: activemodel
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '7'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '7'
27
+ - !ruby/object:Gem::Dependency
28
+ name: bigdecimal
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
13
41
  - !ruby/object:Gem::Dependency
14
42
  name: clipboard
15
43
  requirement: !ruby/object:Gem::Requirement
@@ -24,6 +52,20 @@ dependencies:
24
52
  - - "~>"
25
53
  - !ruby/object:Gem::Version
26
54
  version: '1'
55
+ - !ruby/object:Gem::Dependency
56
+ name: csv
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: '3'
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: '3'
27
69
  - !ruby/object:Gem::Dependency
28
70
  name: dotenv
29
71
  requirement: !ruby/object:Gem::Requirement
@@ -52,6 +94,20 @@ dependencies:
52
94
  - - "~>"
53
95
  - !ruby/object:Gem::Version
54
96
  version: '0'
97
+ - !ruby/object:Gem::Dependency
98
+ name: mutex_m
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - ">="
102
+ - !ruby/object:Gem::Version
103
+ version: '0'
104
+ type: :runtime
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - ">="
109
+ - !ruby/object:Gem::Version
110
+ version: '0'
55
111
  - !ruby/object:Gem::Dependency
56
112
  name: ruby-openai
57
113
  requirement: !ruby/object:Gem::Requirement
@@ -89,8 +145,14 @@ files:
89
145
  - Rakefile
90
146
  - bin/console
91
147
  - bin/setup
92
- - lib/openai101/config/openai.rb
93
- - lib/openai101/sample.rb
148
+ - config/openai-cost.json
149
+ - lib/openai101/initializer.rb
150
+ - lib/openai101/models/completion_params.rb
151
+ - lib/openai101/types/array_type.rb
152
+ - lib/openai101/types/base_model.rb
153
+ - lib/openai101/types/hash_type.rb
154
+ - lib/openai101/usecases/scrape_model_metrics.rb
155
+ - lib/openai101/usecases/token_counter.rb
94
156
  - lib/openai101/version.rb
95
157
  - lib/openai_101.rb
96
158
  - package-lock.json
@@ -1,10 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Openai101
4
- # Testing the Sample class
5
- class Sample
6
- def self.hello
7
- 'Hello, World!'
8
- end
9
- end
10
- end