llm_lib 0.1.1 → 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (3) hide show
  1. checksums.yaml +4 -4
  2. data/lib/llm_lib.rb +1 -38
  3. metadata +1 -1
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 74ba99c44eaa6b120986df7ff3bd257dc1fa395c7a6aa3e83287850ba08a1275
4
- data.tar.gz: b8d24197c782363f01ab737bb63563197926f665be9ac9d1e154201e9f241076
3
+ metadata.gz: a7fe9391bf23bcbe47c61ac8e8449536eb3052a19f8150813bf125861492d167
4
+ data.tar.gz: c2107e7106b73200502730c128fe0220a2a374d8f1ba6cfe5e6db48c8e78b95d
5
5
  SHA512:
6
- metadata.gz: d5b95c2ae9e63591ee822f8abb7a47227ab34e2ec0abccd30422e3289728b3118dd6e7d33fcce7c644a556115b5436c0b681c132159ca2b2e5835a5996b25b1c
7
- data.tar.gz: b76dd9437711b511a15dcd7ca0d7bf1830dc6e22dead75fafa9939dd2763bb442fe15d85585287f2a3bb45b13fa2a0dbfe041bb03fb58b118fb7e52a443bdbc2
6
+ metadata.gz: cc56b149e4e2e127857528a6c1754cf4e472eab9a09a4e34932b9a63d5d200eba80718b9c87dfa0f2d6d7901e9c95526b28dd348e870cc171a601b245c465830
7
+ data.tar.gz: d456b665985410472ba8248a517ab0bcc9126532cf11df46bf2e4134e8aafbb3903cdf9215164b280ff8f7b1ce55920f5af41792bb70268b54d8c71ca202300b
data/lib/llm_lib.rb CHANGED
@@ -11,40 +11,10 @@ module LlmLib
11
11
  # response = client.chat_gpt_call(prompt, max_tokens)
12
12
  # puts response
13
13
 
14
- # remove
15
- attr_reader :apikey
16
- # def def initialize(apikey, query)
17
- # @apikey, @query = apikey, query
18
- # end
19
-
20
14
  def initialize(apikey)
21
15
  @apikey = apikey
22
16
  end
23
17
 
24
- # def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, logprobs = nil, stop = "\n")
25
- # model = "text-davinci-003"
26
- # response = self.class.send(@api_key, {
27
- # "model" => model,
28
- # "prompt" => prompt,
29
- # "max_tokens" => max_tokens,
30
- # "temperature" => temperature,
31
- # "top_p" => top_p,
32
- # "n" => n,
33
- # "stream" => stream,
34
- # "logprobs" => logprobs,
35
- # "stop" => stop
36
- # })
37
- # response
38
- # end
39
-
40
- # def self.send(apikey, body)
41
- # LlmLib::Restclient.post(
42
- # body: body,
43
- # url: "https://api.openai.com/v1/completions",
44
- # apikey: apikey
45
- # )
46
- # end
47
-
48
18
  def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n")
49
19
  model = "gpt-3.5-turbo"
50
20
  response = OpenAI.send(@apikey,
@@ -62,7 +32,7 @@ module LlmLib
62
32
 
63
33
  def gpt4_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n")
64
34
  model = "gpt-4"
65
- response = OpenAI.send(@api_key,
35
+ response = OpenAI.send(@apikey,
66
36
  model,
67
37
  prompt,
68
38
  max_tokens,
@@ -117,13 +87,6 @@ module LlmLib
117
87
  response
118
88
  end
119
89
 
120
- # def self.send(apikey, body, model)
121
- # LlmLib::Restclient.post(
122
- # body: body,
123
- # url: "https://api-inference.huggingface.co/models/#{model}",
124
- # apikey: apikey
125
- # )
126
- # end
127
90
  end
128
91
 
129
92
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm_lib
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.1.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Chamath Attanayaka