cloudflare-ai 0.9.0 → 0.9.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +30 -0
- data/lib/cloudflare/ai/client.rb +21 -0
- data/lib/cloudflare/ai/models.rb +67 -20
- data/lib/cloudflare/ai/results/image_to_text.rb +5 -0
- data/lib/cloudflare/ai/results/object_detection.rb +3 -0
- data/lib/cloudflare/ai/results/summarization.rb +5 -0
- data/lib/cloudflare/ai/version.rb +1 -1
- metadata +5 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c0dbb7a703cef587e4c132ced954abc6be79bb903fe6e2b1d531ae8a1bd22ad9
|
4
|
+
data.tar.gz: f59a67ae6ff4785994269a65bce16f3bc22d284dfc3fcdac5d4d846b70e5e2b7
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 55ab260777dfdfb4427f20e06c2a3522226db63bd869a627ed0abbd7c83948d33f37585f2249c1417ae590b0267934b5ce9e7444df03df95fad8b4d1334c0a12
|
7
|
+
data.tar.gz: 1716e968bf8f7694de57eadf3b003a5e4cd76cb25b240112a8561830c2fe16cd17a769af02662024edb32efc860675520356431cd2dd8cfc132db58ae7c8a165
|
data/README.md
CHANGED
@@ -194,6 +194,36 @@ result = client.transcribe(audio: File.open("/path/to/audio.wav"))
|
|
194
194
|
#### Result object
|
195
195
|
All invocations of the `transcribe` method returns a `Cloudflare::AI::Results::Transcribe`.
|
196
196
|
|
197
|
+
### Summarization
|
198
|
+
```ruby
|
199
|
+
result = client.summarize(text: "This text should be a lot longer.")
|
200
|
+
p result.summary # => {"result":{"summary":"Short text"},"success":true,"errors":[],"messages":[]}
|
201
|
+
```
|
202
|
+
#### Result object
|
203
|
+
All invocations of the `summarize` method returns a `Cloudflare::AI::Results::Summarization` object.
|
204
|
+
|
205
|
+
### Object detection
|
206
|
+
The object detection endpoint accepts either a path to a file or a file stream.
|
207
|
+
|
208
|
+
```ruby
|
209
|
+
result = client.detect_objects(image: "/path/to/cat.jpg")
|
210
|
+
result = client.classify(image: File.open("/path/to/cat.jpg"))
|
211
|
+
```
|
212
|
+
|
213
|
+
#### Result object
|
214
|
+
All invocations of the `detect_objects` method returns a `Cloudflare::AI::Results::ObjectDetection` object.
|
215
|
+
|
216
|
+
### Image-to-text
|
217
|
+
The captioning endpoint accepts either a path to a file or a file stream.
|
218
|
+
|
219
|
+
```ruby
|
220
|
+
client.caption(image: "/path/to/cat.jpg").description # => "a cat sitting on a couch"
|
221
|
+
client.caption(image: File.open("/path/to/cat.jpg")).description # => "a cat sitting on a couch"
|
222
|
+
```
|
223
|
+
|
224
|
+
#### Result object
|
225
|
+
All invocations of the `caption` method returns a `Cloudflare::AI::Results::ImageToText` object.
|
226
|
+
|
197
227
|
# Logging
|
198
228
|
|
199
229
|
This gem uses standard logging mechanisms and defaults to `:warn` level. Most messages are at info level, but we will add debug or warn statements as needed.
|
data/lib/cloudflare/ai/client.rb
CHANGED
@@ -12,6 +12,13 @@ class Cloudflare::AI::Client
|
|
12
12
|
@api_token = api_token
|
13
13
|
end
|
14
14
|
|
15
|
+
def caption(image: nil, model_name: Cloudflare::AI::Models.image_to_text.first)
|
16
|
+
url = service_url_for(account_id: account_id, model_name: model_name)
|
17
|
+
|
18
|
+
image = File.open(image) if image.is_a?(String)
|
19
|
+
Cloudflare::AI::Results::ImageToText.new(post_request_with_binary_file(url, image).body)
|
20
|
+
end
|
21
|
+
|
15
22
|
def chat(messages:, model_name: default_text_generation_model_name, max_tokens: default_max_tokens, &block)
|
16
23
|
url = service_url_for(account_id: account_id, model_name: model_name)
|
17
24
|
stream = block ? true : false
|
@@ -41,6 +48,13 @@ class Cloudflare::AI::Client
|
|
41
48
|
post_streamable_request(url, payload, &block)
|
42
49
|
end
|
43
50
|
|
51
|
+
def detect_objects(image: nil, model_name: Cloudflare::AI::Models.object_detection.first)
|
52
|
+
url = service_url_for(account_id: account_id, model_name: model_name)
|
53
|
+
|
54
|
+
image = File.open(image) if image.is_a?(String)
|
55
|
+
Cloudflare::AI::Results::ObjectDetection.new(post_request_with_binary_file(url, image).body)
|
56
|
+
end
|
57
|
+
|
44
58
|
def draw(prompt:, num_steps: 20, model_name: Cloudflare::AI::Models.text_to_image.first)
|
45
59
|
url = service_url_for(account_id: account_id, model_name: model_name)
|
46
60
|
payload = {prompt: prompt, num_steps: num_steps}.to_json
|
@@ -59,6 +73,13 @@ class Cloudflare::AI::Client
|
|
59
73
|
Cloudflare::AI::Results::TextEmbedding.new(connection.post(url, payload).body)
|
60
74
|
end
|
61
75
|
|
76
|
+
def summarize(text:, model_name: Cloudflare::AI::Models.summarization.first, max_tokens: 1024)
|
77
|
+
url = service_url_for(account_id: account_id, model_name: model_name)
|
78
|
+
payload = {input_text: text, max_tokens: max_tokens}.to_json
|
79
|
+
|
80
|
+
Cloudflare::AI::Results::Summarization.new(connection.post(url, payload).body)
|
81
|
+
end
|
82
|
+
|
62
83
|
def transcribe(source_url: nil, audio: nil, model_name: Cloudflare::AI::Models.automatic_speech_recognition.first)
|
63
84
|
raise ArgumentError, "Must provide either audio_url or audio" if [source_url, audio].compact.size != 1
|
64
85
|
|
data/lib/cloudflare/ai/models.rb
CHANGED
@@ -1,43 +1,90 @@
|
|
1
1
|
class Cloudflare::AI::Models
|
2
2
|
class << self
|
3
|
-
def
|
4
|
-
|
3
|
+
def all
|
4
|
+
{
|
5
|
+
automatic_speech_recognition: automatic_speech_recognition,
|
6
|
+
image_classification: image_classification,
|
7
|
+
image_to_text: image_to_text,
|
8
|
+
object_detection: object_detection,
|
9
|
+
summarization: summarization,
|
10
|
+
text_classification: text_classification,
|
11
|
+
text_embeddings: text_embedding,
|
12
|
+
text_generation: text_generation,
|
13
|
+
text_to_image: text_to_image,
|
14
|
+
translation: translation
|
15
|
+
}
|
5
16
|
end
|
6
17
|
|
7
18
|
def automatic_speech_recognition
|
8
19
|
%w[@cf/openai/whisper]
|
9
20
|
end
|
10
21
|
|
11
|
-
def
|
12
|
-
%w[@cf/
|
22
|
+
def image_classification
|
23
|
+
%w[@cf/microsoft/resnet-50]
|
13
24
|
end
|
14
25
|
|
15
|
-
def
|
16
|
-
%w[@cf/
|
26
|
+
def image_to_text
|
27
|
+
%w[@cf/unum/uform-gen2-qwen-500m]
|
17
28
|
end
|
18
29
|
|
19
|
-
def
|
20
|
-
%w[@cf/
|
30
|
+
def object_detection
|
31
|
+
%w[@cf/meta/detr-resnet-50]
|
21
32
|
end
|
22
33
|
|
23
|
-
def
|
24
|
-
%w[@cf/
|
34
|
+
def summarization
|
35
|
+
%w[@cf/facebook/bart-large-cnn]
|
36
|
+
end
|
37
|
+
|
38
|
+
def text_classification
|
39
|
+
%w[@cf/huggingface/distilbert-sst-2-int8]
|
25
40
|
end
|
26
41
|
|
27
42
|
def text_embedding
|
28
43
|
%w[@cf/baai/bge-base-en-v1.5 @cf/baai/bge-large-en-v1.5 @cf/baai/bge-small-en-v1.5]
|
29
44
|
end
|
30
45
|
|
31
|
-
def
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
46
|
+
def text_generation
|
47
|
+
%w[
|
48
|
+
@hf/thebloke/codellama-7b-instruct-awq
|
49
|
+
@hf/thebloke/deepseek-coder-6.7b-base-awq
|
50
|
+
@hf/thebloke/deepseek-coder-6.7b-instruct-awq
|
51
|
+
@cf/deepseek-ai/deepseek-math-7b-base
|
52
|
+
@cf/deepseek-ai/deepseek-math-7b-instruct
|
53
|
+
@cf/thebloke/discolm-german-7b-v1-awq
|
54
|
+
@cf/tiiuae/falcon-7b-instruct
|
55
|
+
@hf/thebloke/llama-2-13b-chat-awq
|
56
|
+
@cf/meta/llama-2-7b-chat-fp16
|
57
|
+
@cf/meta/llama-2-7b-chat-int8
|
58
|
+
@hf/thebloke/llamaguard-7b-awq
|
59
|
+
@cf/mistral/mistral-7b-instruct-v0.1
|
60
|
+
@hf/thebloke/mistral-7b-instruct-v0.1-awq
|
61
|
+
@hf/thebloke/neural-chat-7b-v3-1-awq
|
62
|
+
@hf/thebloke/openchat_3.5-awq
|
63
|
+
@cf/openchat/openchat-3.5-0106
|
64
|
+
@hf/thebloke/openhermes-2.5-mistral-7b-awq
|
65
|
+
@cf/microsoft/phi-2
|
66
|
+
@cf/qwen/qwen1.5-0.5b-chat
|
67
|
+
@cf/qwen/qwen1.5-1.8b-chat
|
68
|
+
@cf/qwen/qwen1.5-14b-chat-awq
|
69
|
+
@cf/qwen/qwen1.5-7b-chat-awq
|
70
|
+
@cf/defog/sqlcoder-7b-2
|
71
|
+
@cf/tinyllama/tinyllama-1.1b-chat-v1.0
|
72
|
+
@hf/thebloke/zephyr-7b-beta-awq
|
73
|
+
]
|
74
|
+
end
|
75
|
+
|
76
|
+
def text_to_image
|
77
|
+
%w[
|
78
|
+
@cf/lykon/dreamshaper-8-lcm
|
79
|
+
@cf/runwayml/stable-diffusion-v1-5-img2img
|
80
|
+
@cf/runwayml/stable-diffusion-v1-5-inpainting
|
81
|
+
@cf/stabilityai/stable-diffusion-xl-base-1.0
|
82
|
+
@cf/bytedance/stable-diffusion-xl-lightning
|
83
|
+
]
|
84
|
+
end
|
85
|
+
|
86
|
+
def translation
|
87
|
+
%w[@cf/meta/m2m100-1.2b]
|
41
88
|
end
|
42
89
|
end
|
43
90
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: cloudflare-ai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Ajay Krishnan
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-02-
|
11
|
+
date: 2024-02-29 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activemodel
|
@@ -115,6 +115,9 @@ files:
|
|
115
115
|
- lib/cloudflare/ai/result.rb
|
116
116
|
- lib/cloudflare/ai/results/automatic_speech_recognition.rb
|
117
117
|
- lib/cloudflare/ai/results/image_classification.rb
|
118
|
+
- lib/cloudflare/ai/results/image_to_text.rb
|
119
|
+
- lib/cloudflare/ai/results/object_detection.rb
|
120
|
+
- lib/cloudflare/ai/results/summarization.rb
|
118
121
|
- lib/cloudflare/ai/results/text_classification.rb
|
119
122
|
- lib/cloudflare/ai/results/text_embedding.rb
|
120
123
|
- lib/cloudflare/ai/results/text_generation.rb
|