llm_lib 0.1.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/llm_lib.rb +13 -42
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 40b5f225218a674034c99e849437850386407b735cc20b78bd0139fc5be37d91
|
4
|
+
data.tar.gz: 41032e97b353a8a1c1cfe8cee0211be41035688fb2475adbe113fa5064eb7ee1
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 418f302f026fefd27f857a87172fc044847206cb0bbce838315595aa93ec86e4b62edf00d6cdfdd9ab85eb65580284bcc3658f013da4b4f53b9fa081d4d91188
|
7
|
+
data.tar.gz: 55840841a1978aed41e7165ea40c7ac13fdd1da79f38f5b6c55151a1571eba90bfed53b818c2bbe9f5f5b385aee20615e7603f4bf8ce0e919b2e6ef6d5106640
|
data/lib/llm_lib.rb
CHANGED
@@ -11,40 +11,10 @@ module LlmLib
|
|
11
11
|
# response = client.chat_gpt_call(prompt, max_tokens)
|
12
12
|
# puts response
|
13
13
|
|
14
|
-
# remove
|
15
|
-
attr_reader :apikey
|
16
|
-
# def def initialize(apikey, query)
|
17
|
-
# @apikey, @query = apikey, query
|
18
|
-
# end
|
19
|
-
|
20
14
|
def initialize(apikey)
|
21
15
|
@apikey = apikey
|
22
16
|
end
|
23
17
|
|
24
|
-
# def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, logprobs = nil, stop = "\n")
|
25
|
-
# model = "text-davinci-003"
|
26
|
-
# response = self.class.send(@api_key, {
|
27
|
-
# "model" => model,
|
28
|
-
# "prompt" => prompt,
|
29
|
-
# "max_tokens" => max_tokens,
|
30
|
-
# "temperature" => temperature,
|
31
|
-
# "top_p" => top_p,
|
32
|
-
# "n" => n,
|
33
|
-
# "stream" => stream,
|
34
|
-
# "logprobs" => logprobs,
|
35
|
-
# "stop" => stop
|
36
|
-
# })
|
37
|
-
# response
|
38
|
-
# end
|
39
|
-
|
40
|
-
# def self.send(apikey, body)
|
41
|
-
# LlmLib::Restclient.post(
|
42
|
-
# body: body,
|
43
|
-
# url: "https://api.openai.com/v1/completions",
|
44
|
-
# apikey: apikey
|
45
|
-
# )
|
46
|
-
# end
|
47
|
-
|
48
18
|
def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n")
|
49
19
|
model = "gpt-3.5-turbo"
|
50
20
|
response = OpenAI.send(@apikey,
|
@@ -62,7 +32,7 @@ module LlmLib
|
|
62
32
|
|
63
33
|
def gpt4_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n")
|
64
34
|
model = "gpt-4"
|
65
|
-
response = OpenAI.send(@
|
35
|
+
response = OpenAI.send(@apikey,
|
66
36
|
model,
|
67
37
|
prompt,
|
68
38
|
max_tokens,
|
@@ -105,25 +75,26 @@ module LlmLib
|
|
105
75
|
|
106
76
|
def hugging_falcon_call(query, model = "tiiuae/falcon-40b-instruct")
|
107
77
|
response = HuggingFace.send(@api_key,
|
108
|
-
|
109
|
-
|
78
|
+
model,
|
79
|
+
query)
|
110
80
|
response
|
111
81
|
end
|
112
82
|
|
113
83
|
def hugging_llama2_call(query, model = "meta-llama/Llama-2-70b-chat-hf")
|
114
84
|
response = HuggingFace.send(@api_key,
|
115
|
-
|
116
|
-
|
85
|
+
model,
|
86
|
+
query)
|
87
|
+
response
|
88
|
+
end
|
89
|
+
|
90
|
+
def hugging_dolly2_call(query, model = "databricks/dolly-v2-12b")
|
91
|
+
response = HuggingFace.send(@api_key,
|
92
|
+
model,
|
93
|
+
query
|
94
|
+
)
|
117
95
|
response
|
118
96
|
end
|
119
97
|
|
120
|
-
# def self.send(apikey, body, model)
|
121
|
-
# LlmLib::Restclient.post(
|
122
|
-
# body: body,
|
123
|
-
# url: "https://api-inference.huggingface.co/models/#{model}",
|
124
|
-
# apikey: apikey
|
125
|
-
# )
|
126
|
-
# end
|
127
98
|
end
|
128
99
|
|
129
100
|
end
|