openai_101 1.2.3 → 1.2.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.rubocop.yml +9 -2
- data/CHANGELOG.md +16 -0
- data/config/openai-cost.json +285 -0
- data/course/01-1-completion-code.png +0 -0
- data/course/01-2-completion-before.png +0 -0
- data/course/01-3-completion-after.png +0 -0
- data/lib/openai101/{config/openai.rb → initializer.rb} +4 -2
- data/lib/openai101/models/completion_params.rb +18 -0
- data/lib/openai101/types/array_type.rb +25 -0
- data/lib/openai101/types/base_model.rb +11 -0
- data/lib/openai101/types/hash_type.rb +25 -0
- data/lib/openai101/usecases/scrape_model_metrics.rb +5 -0
- data/lib/openai101/usecases/token_counter.rb +3 -0
- data/lib/openai101/version.rb +1 -1
- data/lib/openai_101.rb +7 -1
- data/models.json +293 -0
- data/package-lock.json +2 -2
- data/package.json +1 -1
- metadata +83 -3
- data/lib/openai101/sample.rb +0 -10
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: eb3493178c55c0286c26dd0c32de1ba55ebb5a8634bc2022da8c07163e5a7c38
|
4
|
+
data.tar.gz: e9e00c37245f0050270f73ef65a59d737412a708147a8d36fa0f29ff29aa8903
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f36ea8ec19ff3e905aaf8417e401c725b03ced9cc5a1d444a0b288694746210de2ee84c0ea3c3dc11975255aa695fe54565d30ea0e239bbe20aac1e18018ccc9
|
7
|
+
data.tar.gz: febb6375a6ffd981c90045a669511c1aadf43ec50bf144344ad36d214b05071e81129552ad16b1edddef2abe46438c6f8e3870e120ae1206006e68792222babd
|
data/.rubocop.yml
CHANGED
@@ -92,12 +92,19 @@ Layout/SpaceBeforeComma:
|
|
92
92
|
# My Preferences - End
|
93
93
|
|
94
94
|
# RSpec Cops
|
95
|
+
RSpec/MultipleExpectations:
|
96
|
+
Max: 8
|
97
|
+
RSpec/ExampleLength:
|
98
|
+
Max: 25
|
95
99
|
RSpec/NestedGroups:
|
96
|
-
Max:
|
100
|
+
Max: 8
|
97
101
|
|
98
102
|
RSpec/SpecFilePathFormat:
|
99
103
|
Enabled: true
|
100
|
-
|
104
|
+
RSpec/DescribeClass:
|
105
|
+
Enabled: false
|
106
|
+
RSpec/NoExpectationExample:
|
107
|
+
Enabled: false
|
101
108
|
RSpec/SpecFilePathSuffix:
|
102
109
|
Enabled: true
|
103
110
|
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,19 @@
|
|
1
|
+
## [1.2.4](https://github.com/appydave/openai_101/compare/v1.2.3...v1.2.4) (2024-06-19)
|
2
|
+
|
3
|
+
|
4
|
+
### Bug Fixes
|
5
|
+
|
6
|
+
* preperation for the completions API (legacy) ([1d88a79](https://github.com/appydave/openai_101/commit/1d88a799aaa489de107ae22f0033316aabb754f1))
|
7
|
+
* preperation for the completions API (legacy) ([c0be6cf](https://github.com/appydave/openai_101/commit/c0be6cf3fdd2431e9ec3b1254c4879ad667efa13))
|
8
|
+
* update cops ([e6fb01a](https://github.com/appydave/openai_101/commit/e6fb01a07eb4864f1eeab5cbf9975524274462a6))
|
9
|
+
|
10
|
+
## [1.2.3](https://github.com/appydave/openai_101/compare/v1.2.2...v1.2.3) (2024-06-19)
|
11
|
+
|
12
|
+
|
13
|
+
### Bug Fixes
|
14
|
+
|
15
|
+
* node update on ci ([3aa4b9d](https://github.com/appydave/openai_101/commit/3aa4b9dc783f5ef1d47c2583c6c988446c472a05))
|
16
|
+
|
1
17
|
## [1.2.2](https://github.com/appydave/openai_101/compare/v1.2.1...v1.2.2) (2024-06-19)
|
2
18
|
|
3
19
|
|
@@ -0,0 +1,285 @@
|
|
1
|
+
{
|
2
|
+
"groups": [
|
3
|
+
{
|
4
|
+
"name": "GPT-4 Turbo",
|
5
|
+
"description": "With 128k context, fresher knowledge and the broadest set of capabilities, GPT-4 Turbo is more powerful than GPT-4 and offered at a lower price.",
|
6
|
+
"models": [
|
7
|
+
{
|
8
|
+
"name": "gpt-4-0125-preview",
|
9
|
+
"costs": [
|
10
|
+
{
|
11
|
+
"usage_type": "input",
|
12
|
+
"price_per_unit": "$10.00",
|
13
|
+
"unit_type": "1M tokens"
|
14
|
+
},
|
15
|
+
{
|
16
|
+
"usage_type": "output",
|
17
|
+
"price_per_unit": "$30.00",
|
18
|
+
"unit_type": "1M tokens"
|
19
|
+
}
|
20
|
+
]
|
21
|
+
},
|
22
|
+
{
|
23
|
+
"name": "gpt-4-1106-preview",
|
24
|
+
"costs": [
|
25
|
+
{
|
26
|
+
"usage_type": "input",
|
27
|
+
"price_per_unit": "$10.00",
|
28
|
+
"unit_type": "1M tokens"
|
29
|
+
},
|
30
|
+
{
|
31
|
+
"usage_type": "output",
|
32
|
+
"price_per_unit": "$30.00",
|
33
|
+
"unit_type": "1M tokens"
|
34
|
+
}
|
35
|
+
]
|
36
|
+
},
|
37
|
+
{
|
38
|
+
"name": "gpt-4-1106-vision-preview",
|
39
|
+
"costs": [
|
40
|
+
{
|
41
|
+
"usage_type": "input",
|
42
|
+
"price_per_unit": "$10.00",
|
43
|
+
"unit_type": "1M tokens"
|
44
|
+
},
|
45
|
+
{
|
46
|
+
"usage_type": "output",
|
47
|
+
"price_per_unit": "$30.00",
|
48
|
+
"unit_type": "1M tokens"
|
49
|
+
}
|
50
|
+
]
|
51
|
+
}
|
52
|
+
]
|
53
|
+
},
|
54
|
+
{
|
55
|
+
"name": "GPT-4",
|
56
|
+
"description": "With broad general knowledge and domain expertise, GPT-4 can follow complex instructions in natural language and solve difficult problems with accuracy.",
|
57
|
+
"models": [
|
58
|
+
{
|
59
|
+
"name": "gpt-4",
|
60
|
+
"costs": [
|
61
|
+
{
|
62
|
+
"usage_type": "input",
|
63
|
+
"price_per_unit": "$30.00",
|
64
|
+
"unit_type": "1M tokens"
|
65
|
+
},
|
66
|
+
{
|
67
|
+
"usage_type": "output",
|
68
|
+
"price_per_unit": "$60.00",
|
69
|
+
"unit_type": "1M tokens"
|
70
|
+
}
|
71
|
+
]
|
72
|
+
},
|
73
|
+
{
|
74
|
+
"name": "gpt-4-32k",
|
75
|
+
"costs": [
|
76
|
+
{
|
77
|
+
"usage_type": "input",
|
78
|
+
"price_per_unit": "$60.00",
|
79
|
+
"unit_type": "1M tokens"
|
80
|
+
},
|
81
|
+
{
|
82
|
+
"usage_type": "output",
|
83
|
+
"price_per_unit": "$120.00",
|
84
|
+
"unit_type": "1M tokens"
|
85
|
+
}
|
86
|
+
]
|
87
|
+
}
|
88
|
+
]
|
89
|
+
},
|
90
|
+
{
|
91
|
+
"name": "GPT-3.5 Turbo",
|
92
|
+
"description": "GPT-3.5 Turbo models are capable and cost-effective.",
|
93
|
+
"models": [
|
94
|
+
{
|
95
|
+
"name": "gpt-3.5-turbo-0125",
|
96
|
+
"notes": "gpt-3.5-turbo-0125 is the flagship model of this family, supports a 16K context window and is optimized for dialog.",
|
97
|
+
"costs": [
|
98
|
+
{
|
99
|
+
"usage_type": "input",
|
100
|
+
"price_per_unit": "$0.50",
|
101
|
+
"unit_type": "1M tokens"
|
102
|
+
},
|
103
|
+
{
|
104
|
+
"usage_type": "output",
|
105
|
+
"price_per_unit": "$1.50",
|
106
|
+
"unit_type": "1M tokens"
|
107
|
+
}
|
108
|
+
]
|
109
|
+
},
|
110
|
+
{
|
111
|
+
"name": "gpt-3.5-turbo-instruct",
|
112
|
+
"notes": "gpt-3.5-turbo-instruct is an Instruct model and only supports a 4K context window.",
|
113
|
+
"costs": [
|
114
|
+
{
|
115
|
+
"usage_type": "input",
|
116
|
+
"price_per_unit": "$1.50",
|
117
|
+
"unit_type": "1M tokens"
|
118
|
+
},
|
119
|
+
{
|
120
|
+
"usage_type": "output",
|
121
|
+
"price_per_unit": "$2.00",
|
122
|
+
"unit_type": "1M tokens"
|
123
|
+
}
|
124
|
+
]
|
125
|
+
}
|
126
|
+
]
|
127
|
+
},
|
128
|
+
{
|
129
|
+
"name": "Assistants API",
|
130
|
+
"description": "Assistants API and tools (retrieval, code interpreter) make it easy for developers to build AI assistants within their own applications. Each assistant incurs its own retrieval file storage fee based on the files passed to that assistant. The retrieval tool chunks and indexes your files content in our vector database.",
|
131
|
+
"models": [
|
132
|
+
{
|
133
|
+
"name": "code_interpreter",
|
134
|
+
"notes": "The tokens used for the Assistant API are billed at the chosen language model's per-token input/output rates, and the assistant intelligently chooses which context from the thread to include when calling the model.",
|
135
|
+
"costs": [
|
136
|
+
{
|
137
|
+
"usage_type": "input",
|
138
|
+
"price_per_unit": "$0.03",
|
139
|
+
"unit_type": "session"
|
140
|
+
}
|
141
|
+
]
|
142
|
+
},
|
143
|
+
{
|
144
|
+
"name": "retrieval",
|
145
|
+
"costs": [
|
146
|
+
{
|
147
|
+
"usage_type": "input",
|
148
|
+
"price_per_unit": "$0.20",
|
149
|
+
"unit_type": "GB / assistant / day",
|
150
|
+
"notes": "Free until 04/01/2024"
|
151
|
+
}
|
152
|
+
]
|
153
|
+
}
|
154
|
+
]
|
155
|
+
},
|
156
|
+
{
|
157
|
+
"name": "Fine-tuning models",
|
158
|
+
"description": "Create your own custom models by fine-tuning our base models with your training data. Once you fine-tune a model, you’ll be billed only for the tokens you use in requests to that model.",
|
159
|
+
"models": [
|
160
|
+
{
|
161
|
+
"name": "gpt-3.5-turbo",
|
162
|
+
"costs": [
|
163
|
+
{
|
164
|
+
"usage_type": "training",
|
165
|
+
"price_per_unit": "$8.00",
|
166
|
+
"unit_type": "1M tokens"
|
167
|
+
},
|
168
|
+
{
|
169
|
+
"usage_type": "input",
|
170
|
+
"price_per_unit": "$3.00",
|
171
|
+
"unit_type": "1M tokens"
|
172
|
+
},
|
173
|
+
{
|
174
|
+
"usage_type": "output",
|
175
|
+
"price_per_unit": "$6.00",
|
176
|
+
"unit_type": "1M tokens"
|
177
|
+
}
|
178
|
+
]
|
179
|
+
},
|
180
|
+
{
|
181
|
+
"name": "davinci-002",
|
182
|
+
"costs": [
|
183
|
+
{
|
184
|
+
"usage_type": "training",
|
185
|
+
"price_per_unit": "$6.00",
|
186
|
+
"unit_type": "1M tokens"
|
187
|
+
},
|
188
|
+
{
|
189
|
+
"usage_type": "input",
|
190
|
+
"price_per_unit": "$12.00",
|
191
|
+
"unit_type": "1M tokens"
|
192
|
+
},
|
193
|
+
{
|
194
|
+
"usage_type": "output",
|
195
|
+
"price_per_unit": "$12.00",
|
196
|
+
"unit_type": "1M tokens"
|
197
|
+
}
|
198
|
+
]
|
199
|
+
},
|
200
|
+
{
|
201
|
+
"name": "babbage-002",
|
202
|
+
"costs": [
|
203
|
+
{
|
204
|
+
"usage_type": "training",
|
205
|
+
"price_per_unit": "$0.40",
|
206
|
+
"unit_type": "1M tokens"
|
207
|
+
},
|
208
|
+
{
|
209
|
+
"usage_type": "input",
|
210
|
+
"price_per_unit": "$1.60",
|
211
|
+
"unit_type": "1M tokens"
|
212
|
+
},
|
213
|
+
{
|
214
|
+
"usage_type": "output",
|
215
|
+
"price_per_unit": "$1.60",
|
216
|
+
"unit_type": "1M tokens"
|
217
|
+
}
|
218
|
+
]
|
219
|
+
}
|
220
|
+
]
|
221
|
+
},
|
222
|
+
{
|
223
|
+
"name": "Embedding models",
|
224
|
+
"description": "Build advanced search, clustering, topic modeling, and classification functionality with our embeddings offering.",
|
225
|
+
"models": [
|
226
|
+
{
|
227
|
+
"name": "text-embedding-3-small",
|
228
|
+
"costs": [
|
229
|
+
{
|
230
|
+
"usage_type": "usage",
|
231
|
+
"price_per_unit": "$0.02",
|
232
|
+
"unit_type": "1M tokens"
|
233
|
+
}
|
234
|
+
]
|
235
|
+
},
|
236
|
+
{
|
237
|
+
"name": "text-embedding-3-large",
|
238
|
+
"costs": [
|
239
|
+
{
|
240
|
+
"usage_type": "usage",
|
241
|
+
"price_per_unit": "$0.13",
|
242
|
+
"unit_type": "1M tokens"
|
243
|
+
}
|
244
|
+
]
|
245
|
+
},
|
246
|
+
{
|
247
|
+
"name": "ada v2",
|
248
|
+
"costs": [
|
249
|
+
{
|
250
|
+
"usage_type": "usage",
|
251
|
+
"price_per_unit": "$0.10",
|
252
|
+
"unit_type": "1M tokens"
|
253
|
+
}
|
254
|
+
]
|
255
|
+
}
|
256
|
+
]
|
257
|
+
},
|
258
|
+
{
|
259
|
+
"name": "Base models",
|
260
|
+
"description": "GPT base models are not optimized for instruction-following and are less capable, but they can be effective when fine-tuned for narrow tasks.",
|
261
|
+
"models": [
|
262
|
+
{
|
263
|
+
"name": "davinci-002",
|
264
|
+
"costs": [
|
265
|
+
{
|
266
|
+
"usage_type": "usage",
|
267
|
+
"price_per_unit": "$2.00",
|
268
|
+
"unit_type": "1M tokens"
|
269
|
+
}
|
270
|
+
]
|
271
|
+
},
|
272
|
+
{
|
273
|
+
"name": "babbage-002",
|
274
|
+
"costs": [
|
275
|
+
{
|
276
|
+
"usage_type": "usage",
|
277
|
+
"price_per_unit": "$0.40",
|
278
|
+
"unit_type": "1M tokens"
|
279
|
+
}
|
280
|
+
]
|
281
|
+
}
|
282
|
+
]
|
283
|
+
}
|
284
|
+
]
|
285
|
+
}
|
Binary file
|
Binary file
|
Binary file
|
@@ -1,15 +1,17 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require 'dotenv'
|
4
|
-
|
4
|
+
|
5
5
|
Dotenv.load('.env')
|
6
6
|
|
7
7
|
OpenAI.configure do |config|
|
8
8
|
tools_enabled = ENV.fetch('TOOLS_ENABLED', 'false')
|
9
9
|
|
10
10
|
if tools_enabled == 'true'
|
11
|
-
puts 'OpenAI Tools are enabled'
|
12
11
|
config.access_token = ENV.fetch('OPENAI_ACCESS_TOKEN')
|
13
12
|
config.organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
|
13
|
+
config.log_errors = true
|
14
14
|
end
|
15
|
+
|
16
|
+
puts "Initializing OpenAI with tools #{tools_enabled == 'true' ? 'enabled' : 'disabled'}"
|
15
17
|
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Openai101
|
4
|
+
module Models
|
5
|
+
# Parameters for the OpenAI API
|
6
|
+
class CompletionParams < Openai101::Types::BaseModel
|
7
|
+
# attribute :platform, :string, default: 'openai'
|
8
|
+
attribute :model, :string
|
9
|
+
attribute :prompt, :string
|
10
|
+
attribute :temperature, :float, default: 1.0
|
11
|
+
attribute :max_tokens, :integer, default: 256
|
12
|
+
attribute :top_p, :float, default: 1.0
|
13
|
+
attribute :best_of, :integer, default: 1
|
14
|
+
attribute :frequency_penalty, :float, default: 0.0
|
15
|
+
attribute :presence_penalty, :float, default: 0.0
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Openai101
|
4
|
+
module Types
|
5
|
+
# Used by the ActiveModel attributes API to cast values to arrays
|
6
|
+
class ArrayType < ActiveModel::Type::Value
|
7
|
+
def cast(value)
|
8
|
+
case value
|
9
|
+
when String
|
10
|
+
value.split(',')
|
11
|
+
when Array
|
12
|
+
value
|
13
|
+
else
|
14
|
+
raise ArgumentError, "Cannot cast #{value.class} to Array"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def serialize(value)
|
19
|
+
value.join(',')
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
ActiveModel::Type.register(:array, Openai101::Types::ArrayType)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Openai101
|
4
|
+
module Types
|
5
|
+
# Used by the ActiveModel attributes API to cast values to hashes
|
6
|
+
class HashType < ActiveModel::Type::Value
|
7
|
+
def cast(value)
|
8
|
+
case value
|
9
|
+
when String
|
10
|
+
JSON.parse(value)
|
11
|
+
when Hash
|
12
|
+
value
|
13
|
+
else
|
14
|
+
raise ArgumentError, "Cannot cast #{value.class} to Hash"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def serialize(value)
|
19
|
+
value.to_json
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
ActiveModel::Type.register(:hash, Openai101::Types::HashType)
|
data/lib/openai101/version.rb
CHANGED
data/lib/openai_101.rb
CHANGED
@@ -7,9 +7,15 @@ require 'csv'
|
|
7
7
|
require 'json'
|
8
8
|
require 'table_print'
|
9
9
|
require 'pry'
|
10
|
+
require 'active_model'
|
10
11
|
|
11
12
|
require 'openai101/version'
|
12
|
-
require 'openai101/
|
13
|
+
require 'openai101/initializer'
|
14
|
+
require 'openai101/types/hash_type'
|
15
|
+
require 'openai101/types/array_type'
|
16
|
+
require 'openai101/types/base_model'
|
17
|
+
|
18
|
+
require 'openai101/models/completion_params'
|
13
19
|
|
14
20
|
module Openai101
|
15
21
|
# raise Openai101::Error, 'Sample message'
|
data/models.json
ADDED
@@ -0,0 +1,293 @@
|
|
1
|
+
{
|
2
|
+
"object": "list",
|
3
|
+
"data": [
|
4
|
+
{
|
5
|
+
"id": "gpt-3.5-turbo-0613",
|
6
|
+
"object": "model",
|
7
|
+
"created": 1686587434,
|
8
|
+
"owned_by": "openai"
|
9
|
+
},
|
10
|
+
{
|
11
|
+
"id": "whisper-1",
|
12
|
+
"object": "model",
|
13
|
+
"created": 1677532384,
|
14
|
+
"owned_by": "openai-internal"
|
15
|
+
},
|
16
|
+
{
|
17
|
+
"id": "babbage-002",
|
18
|
+
"object": "model",
|
19
|
+
"created": 1692634615,
|
20
|
+
"owned_by": "system"
|
21
|
+
},
|
22
|
+
{
|
23
|
+
"id": "dall-e-2",
|
24
|
+
"object": "model",
|
25
|
+
"created": 1698798177,
|
26
|
+
"owned_by": "system"
|
27
|
+
},
|
28
|
+
{
|
29
|
+
"id": "gpt-3.5-turbo-16k",
|
30
|
+
"object": "model",
|
31
|
+
"created": 1683758102,
|
32
|
+
"owned_by": "openai-internal"
|
33
|
+
},
|
34
|
+
{
|
35
|
+
"id": "tts-1-hd-1106",
|
36
|
+
"object": "model",
|
37
|
+
"created": 1699053533,
|
38
|
+
"owned_by": "system"
|
39
|
+
},
|
40
|
+
{
|
41
|
+
"id": "tts-1-hd",
|
42
|
+
"object": "model",
|
43
|
+
"created": 1699046015,
|
44
|
+
"owned_by": "system"
|
45
|
+
},
|
46
|
+
{
|
47
|
+
"id": "gpt-4-turbo-2024-04-09",
|
48
|
+
"object": "model",
|
49
|
+
"created": 1712601677,
|
50
|
+
"owned_by": "system"
|
51
|
+
},
|
52
|
+
{
|
53
|
+
"id": "gpt-4-0125-preview",
|
54
|
+
"object": "model",
|
55
|
+
"created": 1706037612,
|
56
|
+
"owned_by": "system"
|
57
|
+
},
|
58
|
+
{
|
59
|
+
"id": "gpt-3.5-turbo",
|
60
|
+
"object": "model",
|
61
|
+
"created": 1677610602,
|
62
|
+
"owned_by": "openai"
|
63
|
+
},
|
64
|
+
{
|
65
|
+
"id": "gpt-4-turbo-preview",
|
66
|
+
"object": "model",
|
67
|
+
"created": 1706037777,
|
68
|
+
"owned_by": "system"
|
69
|
+
},
|
70
|
+
{
|
71
|
+
"id": "gpt-4-turbo",
|
72
|
+
"object": "model",
|
73
|
+
"created": 1712361441,
|
74
|
+
"owned_by": "system"
|
75
|
+
},
|
76
|
+
{
|
77
|
+
"id": "gpt-3.5-turbo-instruct-0914",
|
78
|
+
"object": "model",
|
79
|
+
"created": 1694122472,
|
80
|
+
"owned_by": "system"
|
81
|
+
},
|
82
|
+
{
|
83
|
+
"id": "gpt-4o",
|
84
|
+
"object": "model",
|
85
|
+
"created": 1715367049,
|
86
|
+
"owned_by": "system"
|
87
|
+
},
|
88
|
+
{
|
89
|
+
"id": "gpt-3.5-turbo-instruct",
|
90
|
+
"object": "model",
|
91
|
+
"created": 1692901427,
|
92
|
+
"owned_by": "system"
|
93
|
+
},
|
94
|
+
{
|
95
|
+
"id": "text-embedding-3-small",
|
96
|
+
"object": "model",
|
97
|
+
"created": 1705948997,
|
98
|
+
"owned_by": "system"
|
99
|
+
},
|
100
|
+
{
|
101
|
+
"id": "tts-1",
|
102
|
+
"object": "model",
|
103
|
+
"created": 1681940951,
|
104
|
+
"owned_by": "openai-internal"
|
105
|
+
},
|
106
|
+
{
|
107
|
+
"id": "gpt-4",
|
108
|
+
"object": "model",
|
109
|
+
"created": 1687882411,
|
110
|
+
"owned_by": "openai"
|
111
|
+
},
|
112
|
+
{
|
113
|
+
"id": "text-embedding-3-large",
|
114
|
+
"object": "model",
|
115
|
+
"created": 1705953180,
|
116
|
+
"owned_by": "system"
|
117
|
+
},
|
118
|
+
{
|
119
|
+
"id": "gpt-4-1106-preview",
|
120
|
+
"object": "model",
|
121
|
+
"created": 1698957206,
|
122
|
+
"owned_by": "system"
|
123
|
+
},
|
124
|
+
{
|
125
|
+
"id": "gpt-4-0613",
|
126
|
+
"object": "model",
|
127
|
+
"created": 1686588896,
|
128
|
+
"owned_by": "openai"
|
129
|
+
},
|
130
|
+
{
|
131
|
+
"id": "gpt-3.5-turbo-0125",
|
132
|
+
"object": "model",
|
133
|
+
"created": 1706048358,
|
134
|
+
"owned_by": "system"
|
135
|
+
},
|
136
|
+
{
|
137
|
+
"id": "tts-1-1106",
|
138
|
+
"object": "model",
|
139
|
+
"created": 1699053241,
|
140
|
+
"owned_by": "system"
|
141
|
+
},
|
142
|
+
{
|
143
|
+
"id": "dall-e-3",
|
144
|
+
"object": "model",
|
145
|
+
"created": 1698785189,
|
146
|
+
"owned_by": "system"
|
147
|
+
},
|
148
|
+
{
|
149
|
+
"id": "text-embedding-ada-002",
|
150
|
+
"object": "model",
|
151
|
+
"created": 1671217299,
|
152
|
+
"owned_by": "openai-internal"
|
153
|
+
},
|
154
|
+
{
|
155
|
+
"id": "gpt-4-32k-0314",
|
156
|
+
"object": "model",
|
157
|
+
"created": 1687979321,
|
158
|
+
"owned_by": "openai"
|
159
|
+
},
|
160
|
+
{
|
161
|
+
"id": "davinci-002",
|
162
|
+
"object": "model",
|
163
|
+
"created": 1692634301,
|
164
|
+
"owned_by": "system"
|
165
|
+
},
|
166
|
+
{
|
167
|
+
"id": "gpt-3.5-turbo-1106",
|
168
|
+
"object": "model",
|
169
|
+
"created": 1698959748,
|
170
|
+
"owned_by": "system"
|
171
|
+
},
|
172
|
+
{
|
173
|
+
"id": "gpt-4o-2024-05-13",
|
174
|
+
"object": "model",
|
175
|
+
"created": 1715368132,
|
176
|
+
"owned_by": "system"
|
177
|
+
},
|
178
|
+
{
|
179
|
+
"id": "gpt-3.5-turbo-16k-0613",
|
180
|
+
"object": "model",
|
181
|
+
"created": 1685474247,
|
182
|
+
"owned_by": "openai"
|
183
|
+
},
|
184
|
+
{
|
185
|
+
"id": "gpt-4-0314",
|
186
|
+
"object": "model",
|
187
|
+
"created": 1687882410,
|
188
|
+
"owned_by": "openai"
|
189
|
+
},
|
190
|
+
{
|
191
|
+
"id": "gpt-3.5-turbo-0301",
|
192
|
+
"object": "model",
|
193
|
+
"created": 1677649963,
|
194
|
+
"owned_by": "openai"
|
195
|
+
},
|
196
|
+
{
|
197
|
+
"id": "davinci:ft-print-speak-2023-08-20-04-31-59",
|
198
|
+
"object": "model",
|
199
|
+
"created": 1692505920,
|
200
|
+
"owned_by": "print-speak"
|
201
|
+
},
|
202
|
+
{
|
203
|
+
"id": "davinci:ft-print-speak-2023-08-20-20-34-58",
|
204
|
+
"object": "model",
|
205
|
+
"created": 1692563698,
|
206
|
+
"owned_by": "print-speak"
|
207
|
+
},
|
208
|
+
{
|
209
|
+
"id": "davinci:ft-print-speak-2023-08-26-08-48-11",
|
210
|
+
"object": "model",
|
211
|
+
"created": 1693039691,
|
212
|
+
"owned_by": "print-speak"
|
213
|
+
},
|
214
|
+
{
|
215
|
+
"id": "davinci:ft-print-speak-2023-08-26-13-12-53",
|
216
|
+
"object": "model",
|
217
|
+
"created": 1693055573,
|
218
|
+
"owned_by": "print-speak"
|
219
|
+
},
|
220
|
+
{
|
221
|
+
"id": "davinci:ft-print-speak-2023-08-26-13-22-02",
|
222
|
+
"object": "model",
|
223
|
+
"created": 1693056122,
|
224
|
+
"owned_by": "print-speak"
|
225
|
+
},
|
226
|
+
{
|
227
|
+
"id": "davinci:ft-print-speak-2023-08-26-13-31-20",
|
228
|
+
"object": "model",
|
229
|
+
"created": 1693056680,
|
230
|
+
"owned_by": "print-speak"
|
231
|
+
},
|
232
|
+
{
|
233
|
+
"id": "davinci:ft-print-speak-2023-08-26-13-40-58",
|
234
|
+
"object": "model",
|
235
|
+
"created": 1693057258,
|
236
|
+
"owned_by": "print-speak"
|
237
|
+
},
|
238
|
+
{
|
239
|
+
"id": "davinci:ft-print-speak-2023-08-26-13-50-55",
|
240
|
+
"object": "model",
|
241
|
+
"created": 1693057855,
|
242
|
+
"owned_by": "print-speak"
|
243
|
+
},
|
244
|
+
{
|
245
|
+
"id": "davinci:ft-print-speak-2023-08-26-14-19-45",
|
246
|
+
"object": "model",
|
247
|
+
"created": 1693059585,
|
248
|
+
"owned_by": "print-speak"
|
249
|
+
},
|
250
|
+
{
|
251
|
+
"id": "davinci:ft-print-speak-2023-08-27-03-14-18",
|
252
|
+
"object": "model",
|
253
|
+
"created": 1693106058,
|
254
|
+
"owned_by": "print-speak"
|
255
|
+
},
|
256
|
+
{
|
257
|
+
"id": "davinci:ft-print-speak-2023-08-27-03-21-24",
|
258
|
+
"object": "model",
|
259
|
+
"created": 1693106484,
|
260
|
+
"owned_by": "print-speak"
|
261
|
+
},
|
262
|
+
{
|
263
|
+
"id": "davinci:ft-print-speak-2023-08-27-03-30-25",
|
264
|
+
"object": "model",
|
265
|
+
"created": 1693107026,
|
266
|
+
"owned_by": "print-speak"
|
267
|
+
},
|
268
|
+
{
|
269
|
+
"id": "davinci:ft-print-speak-2023-08-27-03-37-34",
|
270
|
+
"object": "model",
|
271
|
+
"created": 1693107455,
|
272
|
+
"owned_by": "print-speak"
|
273
|
+
},
|
274
|
+
{
|
275
|
+
"id": "davinci:ft-print-speak-2023-08-27-03-45-56",
|
276
|
+
"object": "model",
|
277
|
+
"created": 1693107956,
|
278
|
+
"owned_by": "print-speak"
|
279
|
+
},
|
280
|
+
{
|
281
|
+
"id": "davinci:ft-print-speak-2023-08-27-04-13-40",
|
282
|
+
"object": "model",
|
283
|
+
"created": 1693109620,
|
284
|
+
"owned_by": "print-speak"
|
285
|
+
},
|
286
|
+
{
|
287
|
+
"id": "davinci:ft-print-speak-2023-08-27-04-16-52",
|
288
|
+
"object": "model",
|
289
|
+
"created": 1693109812,
|
290
|
+
"owned_by": "print-speak"
|
291
|
+
}
|
292
|
+
]
|
293
|
+
}
|
data/package-lock.json
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
{
|
2
2
|
"name": "openai_101",
|
3
|
-
"version": "1.2.
|
3
|
+
"version": "1.2.5",
|
4
4
|
"lockfileVersion": 3,
|
5
5
|
"requires": true,
|
6
6
|
"packages": {
|
7
7
|
"": {
|
8
8
|
"name": "openai_101",
|
9
|
-
"version": "1.2.
|
9
|
+
"version": "1.2.5",
|
10
10
|
"devDependencies": {
|
11
11
|
"@klueless-js/semantic-release-rubygem": "github:klueless-js/semantic-release-rubygem",
|
12
12
|
"@semantic-release/changelog": "^6.0.3",
|
data/package.json
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: openai_101
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.2.
|
4
|
+
version: 1.2.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- David Cruwys
|
@@ -10,6 +10,34 @@ bindir: exe
|
|
10
10
|
cert_chain: []
|
11
11
|
date: 2024-06-19 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: activemodel
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '7'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '7'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: bigdecimal
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '3'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '3'
|
13
41
|
- !ruby/object:Gem::Dependency
|
14
42
|
name: clipboard
|
15
43
|
requirement: !ruby/object:Gem::Requirement
|
@@ -24,6 +52,20 @@ dependencies:
|
|
24
52
|
- - "~>"
|
25
53
|
- !ruby/object:Gem::Version
|
26
54
|
version: '1'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: csv
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '3'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '3'
|
27
69
|
- !ruby/object:Gem::Dependency
|
28
70
|
name: dotenv
|
29
71
|
requirement: !ruby/object:Gem::Requirement
|
@@ -52,6 +94,20 @@ dependencies:
|
|
52
94
|
- - "~>"
|
53
95
|
- !ruby/object:Gem::Version
|
54
96
|
version: '0'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: mutex_m
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - "~>"
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0'
|
104
|
+
type: :runtime
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - "~>"
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '0'
|
55
111
|
- !ruby/object:Gem::Dependency
|
56
112
|
name: ruby-openai
|
57
113
|
requirement: !ruby/object:Gem::Requirement
|
@@ -66,6 +122,20 @@ dependencies:
|
|
66
122
|
- - "~>"
|
67
123
|
- !ruby/object:Gem::Version
|
68
124
|
version: '7'
|
125
|
+
- !ruby/object:Gem::Dependency
|
126
|
+
name: table_print
|
127
|
+
requirement: !ruby/object:Gem::Requirement
|
128
|
+
requirements:
|
129
|
+
- - "~>"
|
130
|
+
- !ruby/object:Gem::Version
|
131
|
+
version: '1'
|
132
|
+
type: :runtime
|
133
|
+
prerelease: false
|
134
|
+
version_requirements: !ruby/object:Gem::Requirement
|
135
|
+
requirements:
|
136
|
+
- - "~>"
|
137
|
+
- !ruby/object:Gem::Version
|
138
|
+
version: '1'
|
69
139
|
description: " OpenAI 101 working through the API endpoints\n"
|
70
140
|
email:
|
71
141
|
- david@ideasmen.com.au
|
@@ -89,10 +159,20 @@ files:
|
|
89
159
|
- Rakefile
|
90
160
|
- bin/console
|
91
161
|
- bin/setup
|
92
|
-
-
|
93
|
-
-
|
162
|
+
- config/openai-cost.json
|
163
|
+
- course/01-1-completion-code.png
|
164
|
+
- course/01-2-completion-before.png
|
165
|
+
- course/01-3-completion-after.png
|
166
|
+
- lib/openai101/initializer.rb
|
167
|
+
- lib/openai101/models/completion_params.rb
|
168
|
+
- lib/openai101/types/array_type.rb
|
169
|
+
- lib/openai101/types/base_model.rb
|
170
|
+
- lib/openai101/types/hash_type.rb
|
171
|
+
- lib/openai101/usecases/scrape_model_metrics.rb
|
172
|
+
- lib/openai101/usecases/token_counter.rb
|
94
173
|
- lib/openai101/version.rb
|
95
174
|
- lib/openai_101.rb
|
175
|
+
- models.json
|
96
176
|
- package-lock.json
|
97
177
|
- package.json
|
98
178
|
- sig/openai_101.rbs
|