llm_lib 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 85f973672c9b0eb31e43f3fbeb04c583a2b92bd5886c3caa320db5f972d614a2
4
+ data.tar.gz: f2fa7bb2bdb6d06a688a0987017c22f9fe55eb4ad6861abe1efd205773df35fc
5
+ SHA512:
6
+ metadata.gz: ab1ea2cbdd89d7af8229195addb8edeb03bacb93b99879de60f78ee4924c410a0b350f5d513bc91282d9ba0a1655c8ea2abde72cf64daec495a1b014750a42f6
7
+ data.tar.gz: 2916daaa07a84273eeffd20c500ba3d27be918f40490b91cd5099eae793311a8c3d08ef4526b3fe731a38099bf6e6b60d4b7b84d8607884a76b5c01250847675
@@ -0,0 +1,16 @@
1
+ class LlmLib::HuggingFace
2
+
3
+ def self.send(apikey, model, query)
4
+
5
+ return LlmLib::Restclient.post(
6
+ body: {
7
+ "inputs" => query,
8
+
9
+ },
10
+ url: "https://api-inference.huggingface.co/models/#{model}",
11
+ apikey: apikey
12
+ )
13
+ end
14
+ end
15
+
16
+ require 'llm_lib/restclient'
@@ -0,0 +1,25 @@
1
+ require 'llm_lib'
2
+
3
+ class LlmLib::OpenAI
4
+
5
+ def self.send(apikey, model, prompt, max_tokens, temperature=0, top_p=1, n=1, stream=false, stop="\n")
6
+
7
+ return LlmLib::Restclient.post(
8
+ body: {
9
+ "model" => model,
10
+ "messages" => [{ role: "user", content: prompt}],
11
+ "max_tokens" => max_tokens,
12
+ "temperature" => temperature,
13
+ "top_p" => top_p,
14
+ "n" => n,
15
+ "stream" => stream,
16
+ "stop" => stop,
17
+
18
+ },
19
+ url: "https://api.openai.com/v1/chat/completions",
20
+ apikey: apikey
21
+ )
22
+ end
23
+ end
24
+
25
+ require 'llm_lib/restclient'
@@ -0,0 +1,44 @@
1
+ class LlmLib::Restclient
2
+
3
+ # def self.post(url:, body:, headers: {})
4
+ def self.post(url:, body:, apikey:)
5
+
6
+ url = URI(url)
7
+ apikey = apikey
8
+
9
+ http = Net::HTTP.new(url.host, url.port)
10
+ http.use_ssl = true
11
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
12
+
13
+ request = Net::HTTP::Post.new(url.path)
14
+ request['Content-Type'] = 'application/json'
15
+ request["cache-control"] = 'no-cache'
16
+ # request["apikey"] = apikey
17
+ # headers.each { |key, value| request[key] = value }
18
+ request['Authorization'] = "Bearer #{apikey}"
19
+ # request.set_form_data(body)
20
+ request.body = JSON.generate(body)
21
+
22
+ begin
23
+ response = http.request(request)
24
+
25
+ case response
26
+ when Net::HTTPSuccess then
27
+ return {code: 200, response: JSON.parse(response.read_body)}
28
+ else
29
+ return {code: response.value, response: response.read_body}
30
+ end
31
+
32
+ rescue StandardError
33
+ puts response
34
+ return {code: response.code, response: response.read_body}
35
+
36
+ end
37
+
38
+ end
39
+
40
+ end
41
+
42
+ require 'uri'
43
+ require 'net/http'
44
+ require 'json'
data/lib/llm_lib.rb ADDED
@@ -0,0 +1,133 @@
1
+ module LlmLib
2
+ class OpenAIClient
3
+
4
+ # # Replace 'YOUR_API_KEY_HERE' with your actual OpenAI API key
5
+ # api_key = 'YOUR_API_KEY_HERE'
6
+ # client = OpenAIClient.new(api_key)
7
+
8
+ # # Example API call
9
+ # prompt = "Once upon a time"
10
+ # max_tokens = 100
11
+ # response = client.chat_gpt_call(prompt, max_tokens)
12
+ # puts response
13
+
14
+ # remove
15
+ # attr_reader :apikey
16
+ # def def initialize(apikey, query)
17
+ # @apikey, @query = apikey, query
18
+ # end
19
+
20
+ def initialize(apikey)
21
+ @apikey = apikey
22
+ end
23
+
24
+ # def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, logprobs = nil, stop = "\n")
25
+ # model = "text-davinci-003"
26
+ # response = self.class.send(@api_key, {
27
+ # "model" => model,
28
+ # "prompt" => prompt,
29
+ # "max_tokens" => max_tokens,
30
+ # "temperature" => temperature,
31
+ # "top_p" => top_p,
32
+ # "n" => n,
33
+ # "stream" => stream,
34
+ # "logprobs" => logprobs,
35
+ # "stop" => stop
36
+ # })
37
+ # response
38
+ # end
39
+
40
+ # def self.send(apikey, body)
41
+ # LlmLib::Restclient.post(
42
+ # body: body,
43
+ # url: "https://api.openai.com/v1/completions",
44
+ # apikey: apikey
45
+ # )
46
+ # end
47
+
48
+ def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n")
49
+ model = "gpt-3.5-turbo"
50
+ response = OpenAI.send(@api_key,
51
+ model,
52
+ prompt,
53
+ max_tokens,
54
+ temperature,
55
+ top_p,
56
+ n,
57
+ stream,
58
+ stop
59
+ )
60
+ response
61
+ end
62
+
63
+ def gpt4_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n")
64
+ model = "gpt-4"
65
+ response = OpenAI.send(@api_key,
66
+ model,
67
+ prompt,
68
+ max_tokens,
69
+ temperature,
70
+ top_p,
71
+ n,
72
+ stream,
73
+ stop
74
+ )
75
+ response
76
+ end
77
+
78
+ end
79
+
80
+ require 'llm_lib'
81
+
82
+ class HuggingfaceApiClient
83
+
84
+ # # Replace 'YOUR_API_KEY_HERE' with your actual Huggingface API key
85
+ # api_key = 'YOUR_API_KEY_HERE'
86
+ # client = HuggingfaceApiClient.new(api_key)
87
+
88
+ # # Example API call
89
+ # query = "Tell me a joke"
90
+ # response = client.hugging_bloom_call(query)
91
+ # puts response
92
+
93
+
94
+ def initialize(api_key)
95
+ @api_key = api_key
96
+ end
97
+
98
+ def hugging_bloom_call(query, model = "bigscience/bloom")
99
+ response = HuggingFace.send(@api_key,
100
+ query,
101
+ model)
102
+ response
103
+ end
104
+
105
+ def hugging_falcon_call(query, model = "tiiuae/falcon-40b-instruct")
106
+ response = HuggingFace.send(@api_key,
107
+ query,
108
+ model)
109
+ response
110
+ end
111
+
112
+ def hugging_llama2_call(query, model = "meta-llama/Llama-2-70b-chat-hf")
113
+ response = HuggingFace.send(@api_key,
114
+ query,
115
+ model)
116
+ response
117
+ end
118
+
119
+ # def self.send(apikey, body, model)
120
+ # LlmLib::Restclient.post(
121
+ # body: body,
122
+ # url: "https://api-inference.huggingface.co/models/#{model}",
123
+ # apikey: apikey
124
+ # )
125
+ # end
126
+ end
127
+
128
+ end
129
+
130
+ require 'llm_lib/openai'
131
+ require 'llm_lib/huggingface'
132
+ require 'uri'
133
+ require 'dotenv/load'
metadata ADDED
@@ -0,0 +1,147 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: llm_lib
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Chamath Attanayaka
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2023-07-28 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: uri
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: 0.12.2
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: 0.12.2
27
+ - !ruby/object:Gem::Dependency
28
+ name: rake
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: 13.0.6
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: 13.0.6
41
+ - !ruby/object:Gem::Dependency
42
+ name: mini_mime
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: 1.1.2
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: 1.1.2
55
+ - !ruby/object:Gem::Dependency
56
+ name: multi_xml
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: 0.6.0
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: 0.6.0
69
+ - !ruby/object:Gem::Dependency
70
+ name: json
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - "~>"
74
+ - !ruby/object:Gem::Version
75
+ version: 2.6.3
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - "~>"
81
+ - !ruby/object:Gem::Version
82
+ version: 2.6.3
83
+ - !ruby/object:Gem::Dependency
84
+ name: dotenv
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - "~>"
88
+ - !ruby/object:Gem::Version
89
+ version: 2.8.1
90
+ type: :runtime
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - "~>"
95
+ - !ruby/object:Gem::Version
96
+ version: 2.8.1
97
+ - !ruby/object:Gem::Dependency
98
+ name: minitest
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: 5.19.0
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: 5.19.0
111
+ description: Gem to invoke API calls to Huggingface and Openai LLMs
112
+ email:
113
+ - kaushalyabandaraatta@gmail.com
114
+ executables: []
115
+ extensions: []
116
+ extra_rdoc_files: []
117
+ files:
118
+ - lib/llm_lib.rb
119
+ - lib/llm_lib/huggingface.rb
120
+ - lib/llm_lib/openai.rb
121
+ - lib/llm_lib/restclient.rb
122
+ homepage: https://rubygems.org/gems/llm_lib
123
+ licenses:
124
+ - MIT
125
+ metadata:
126
+ source_code_uri: https://github.com/ChamathKB/llm-sdk
127
+ changelog_uri: https://github.com/ChamathKB/llm-sdk/blob/main/CHANGELOG.md
128
+ post_install_message:
129
+ rdoc_options: []
130
+ require_paths:
131
+ - lib
132
+ required_ruby_version: !ruby/object:Gem::Requirement
133
+ requirements:
134
+ - - ">="
135
+ - !ruby/object:Gem::Version
136
+ version: 3.0.2
137
+ required_rubygems_version: !ruby/object:Gem::Requirement
138
+ requirements:
139
+ - - ">="
140
+ - !ruby/object:Gem::Version
141
+ version: '0'
142
+ requirements: []
143
+ rubygems_version: 3.2.22
144
+ signing_key:
145
+ specification_version: 4
146
+ summary: Invoke API calls to LLMs
147
+ test_files: []