multiwoven-integrations 0.15.11 → 0.16.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 47ebb726bc626f15a399cb9f1b94f594efa9b4fb75df06482014f10e37e9f6aa
4
- data.tar.gz: 429e1a5d6f20f8445c9ef1fd6ecc9073443d97b9d8572c4224d871d4826c9fb7
3
+ metadata.gz: 87a9d42fe90c6a5588fb8843d4cf3ce0b2667b498337658fd1339f462678d9ed
4
+ data.tar.gz: 6ee164cce6f372e010302ab66a95558533173bdcaaa5a3ab9b4ddeb8f7ce73f2
5
5
  SHA512:
6
- metadata.gz: 9a2757cadcb8754020f92c4a9503eb27418c5408eda08bc90baadf2d055c2d6cdb6a0877a8a0e744ecf0cafa578d1397efcc53f9c8ef13903dde6b77d22f1029
7
- data.tar.gz: 054cb615fe6ccb54536648970ae365a7ec6326ed037d9f2fe2592ba0197d8c9dc87fc165bc89790df2d98fed8542cadfb9a98095ad84097fe963e584e8dec137
6
+ metadata.gz: cc87a1d9c5fc1252a1bc007fb597a5aea1e6a1f2860602fad3095a2586bcac1919b32bfb0b781500ec64639b0a2f3eaa3d6e68e837c79a0cc0c3b200f95ce8bb
7
+ data.tar.gz: 4aec5ed6beefe6c3913b60ec8727d3e9c78c74efa4bfb8dcae7d36f277244c22550ec0bceb47b7f9e0939405244d4720af0bb1b2da71677e2e70060741729a30
@@ -63,6 +63,8 @@ module Multiwoven
63
63
  # google sheets
64
64
  GOOGLE_SHEETS_SCOPE = "https://www.googleapis.com/auth/drive"
65
65
  GOOGLE_SPREADSHEET_ID_REGEX = %r{/d/([-\w]{20,})/}.freeze
66
+
67
+ OPEN_AI_URL = "https://api.openai.com/v1/chat/completions"
66
68
  end
67
69
  end
68
70
  end
@@ -2,7 +2,7 @@
2
2
 
3
3
  module Multiwoven
4
4
  module Integrations
5
- VERSION = "0.15.11"
5
+ VERSION = "0.16.1"
6
6
 
7
7
  ENABLED_SOURCES = %w[
8
8
  Snowflake
@@ -20,6 +20,7 @@ module Multiwoven
20
20
  AwsSagemakerModel
21
21
  VertexModel
22
22
  HttpModel
23
+ OpenAI
23
24
  ].freeze
24
25
 
25
26
  ENABLED_DESTINATIONS = %w[
@@ -4,7 +4,7 @@
4
4
  "title": "HTTP Model Endpoint",
5
5
  "connector_type": "source",
6
6
  "category": "AI Model",
7
- "documentation_url": "https://docs.mutliwoven.com",
7
+ "documentation_url": "https://docs.mutltiwoven.com",
8
8
  "github_issue_label": "source-http-model",
9
9
  "icon": "icon.svg",
10
10
  "license": "MIT",
@@ -6,7 +6,7 @@
6
6
  "$schema": "http://json-schema.org/draft-07/schema#",
7
7
  "title": "HTTP Model Endpoint",
8
8
  "type": "object",
9
- "required": ["url_host"],
9
+ "required": ["url_host", "http_method"],
10
10
  "properties": {
11
11
  "http_method": {
12
12
  "type": "string",
@@ -0,0 +1,117 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Multiwoven::Integrations::Source
4
+ module OpenAI
5
+ include Multiwoven::Integrations::Core
6
+ class Client < SourceConnector
7
+ def check_connection(connection_config)
8
+ connection_config = prepare_config(connection_config)
9
+ response = send_request(
10
+ url: OPEN_AI_URL,
11
+ http_method: HTTP_POST,
12
+ payload: JSON.parse(connection_config[:request_format]),
13
+ headers: auth_headers(connection_config[:api_key]),
14
+ config: connection_config[:config]
15
+ )
16
+ success?(response) ? success_status : failure_status(nil)
17
+ rescue StandardError => e
18
+ handle_exception(e, { context: "OPEN AI:CHECK_CONNECTION:EXCEPTION", type: "error" })
19
+ failure_status(e)
20
+ end
21
+
22
+ def discover(_connection_config = nil)
23
+ catalog_json = read_json(CATALOG_SPEC_PATH)
24
+ catalog = build_catalog(catalog_json)
25
+ catalog.to_multiwoven_message
26
+ rescue StandardError => e
27
+ handle_exception(e, { context: "OPEN AI:DISCOVER:EXCEPTION", type: "error" })
28
+ end
29
+
30
+ def read(sync_config)
31
+ connection_config = prepare_config(sync_config.source.connection_specification)
32
+ stream = connection_config[:is_stream] ||= false
33
+ # The server checks the ConnectorQueryType.
34
+ # If it's "ai_ml," the server calculates the payload and passes it as a query in the sync config model protocol.
35
+ # This query is then sent to the AI/ML model.
36
+ payload = parse_json(sync_config.model.query)
37
+
38
+ if stream
39
+ run_model_stream(connection_config, payload) { |message| yield message if block_given? }
40
+ else
41
+ run_model(connection_config, payload)
42
+ end
43
+ rescue StandardError => e
44
+ handle_exception(e, { context: "OPEN AI:READ:EXCEPTION", type: "error" })
45
+ end
46
+
47
+ private
48
+
49
+ def prepare_config(config)
50
+ config.with_indifferent_access.tap do |conf|
51
+ conf[:config][:timeout] ||= 30
52
+ end
53
+ end
54
+
55
+ def parse_json(json_string)
56
+ JSON.parse(json_string)
57
+ rescue JSON::ParserError => e
58
+ handle_exception(e, { context: "OPEN AI:PARSE_JSON:EXCEPTION", type: "error" })
59
+ {}
60
+ end
61
+
62
+ def run_model(connection_config, payload)
63
+ response = send_request(
64
+ url: OPEN_AI_URL,
65
+ http_method: HTTP_POST,
66
+ payload: payload,
67
+ headers: auth_headers(connection_config[:api_key]),
68
+ config: connection_config[:config]
69
+ )
70
+ process_response(response)
71
+ rescue StandardError => e
72
+ handle_exception(e, { context: "OPEN AI:RUN_MODEL:EXCEPTION", type: "error" })
73
+ end
74
+
75
+ def run_model_stream(connection_config, payload)
76
+ send_streaming_request(
77
+ url: OPEN_AI_URL,
78
+ http_method: HTTP_POST,
79
+ payload: payload,
80
+ headers: auth_headers(connection_config[:api_key]),
81
+ config: connection_config[:config]
82
+ ) do |chunk|
83
+ process_streaming_response(chunk) { |message| yield message if block_given? }
84
+ end
85
+ rescue StandardError => e
86
+ handle_exception(e, { context: "OPEN AI:RUN_STREAM_MODEL:EXCEPTION", type: "error" })
87
+ end
88
+
89
+ def process_response(response)
90
+ if success?(response)
91
+ data = JSON.parse(response.body)
92
+ [RecordMessage.new(data: data, emitted_at: Time.now.to_i).to_multiwoven_message]
93
+ else
94
+ create_log_message("OPEN AI:RUN_MODEL", "error", "request failed: #{response.body}")
95
+ end
96
+ rescue StandardError => e
97
+ handle_exception(e, { context: "OPEN AI:PROCESS_RESPONSE:EXCEPTION", type: "error" })
98
+ end
99
+
100
+ def extract_data_entries(chunk)
101
+ chunk.split(/^data: /).map(&:strip).reject(&:empty?)
102
+ end
103
+
104
+ def process_streaming_response(chunk)
105
+ data_entries = extract_data_entries(chunk)
106
+ data_entries.each do |entry|
107
+ next if entry == "[DONE]"
108
+
109
+ data = parse_json(entry)
110
+ yield [RecordMessage.new(data: data, emitted_at: Time.now.to_i).to_multiwoven_message] if block_given?
111
+ rescue StandardError => e
112
+ handle_exception(e, { context: "OPEN AI:PROCESS_STREAMING_RESPONSE:EXCEPTION", type: "error", entry: entry })
113
+ end
114
+ end
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,6 @@
1
+ {
2
+ "request_rate_limit": 600,
3
+ "request_rate_limit_unit": "minute",
4
+ "request_rate_concurrency": 10,
5
+ "streams": []
6
+ }
@@ -0,0 +1,15 @@
1
+ {
2
+ "data": {
3
+ "name": "OpenAI",
4
+ "title": "OpenAI Model Endpoint",
5
+ "connector_type": "source",
6
+ "category": "AI Model",
7
+ "documentation_url": "https://docs.mutltiwoven.com",
8
+ "github_issue_label": "source-open-ai-model",
9
+ "icon": "icon.svg",
10
+ "license": "MIT",
11
+ "release_stage": "alpha",
12
+ "support_level": "community",
13
+ "tags": ["language:ruby", "multiwoven"]
14
+ }
15
+ }
@@ -0,0 +1,54 @@
1
+ {
2
+ "documentation_url": "https://docs.multiwoven.com/integrations/source/open-ai-endpoint",
3
+ "stream_type": "user_defined",
4
+ "connector_query_type": "ai_ml",
5
+ "connection_specification": {
6
+ "$schema": "http://json-schema.org/draft-07/schema#",
7
+ "title": "Open AI Endpoint",
8
+ "type": "object",
9
+ "required": ["api_key", "request_format", "response_format"],
10
+ "properties": {
11
+ "api_key": {
12
+ "type": "string",
13
+ "multiwoven_secret": true,
14
+ "title": "API Key",
15
+ "order": 0
16
+ },
17
+ "is_stream": {
18
+ "type": "boolean",
19
+ "title": "Streaming Enabled",
20
+ "description": "Enables data streaming for such as chat, when supported by the model. When true, messages and model data are processed in chunks for immediate delivery, enhancing responsiveness. Default is false, processing only after the entire response is received.",
21
+ "default": false,
22
+ "order": 1
23
+ },
24
+ "config": {
25
+ "title": "",
26
+ "type": "object",
27
+ "properties": {
28
+ "timeout": {
29
+ "type": "string",
30
+ "default": "30",
31
+ "title": "HTTP Timeout",
32
+ "description": "The maximum time, in seconds, to wait for a response from the server before the request is canceled.",
33
+ "order": 0
34
+ }
35
+ },
36
+ "order": 2
37
+ },
38
+ "request_format": {
39
+ "title": "Request Format",
40
+ "description": "Sample Request Format",
41
+ "type": "string",
42
+ "x-request-format": true,
43
+ "order": 3
44
+ },
45
+ "response_format": {
46
+ "title": "Response Format",
47
+ "description": "Sample Response Format",
48
+ "type": "string",
49
+ "x-response-format": true,
50
+ "order": 4
51
+ }
52
+ }
53
+ }
54
+ }
@@ -0,0 +1 @@
1
+ <svg viewBox="0 0 1180 320" xmlns="http://www.w3.org/2000/svg"><path d="m367.44 153.84c0 52.32 33.6 88.8 80.16 88.8s80.16-36.48 80.16-88.8-33.6-88.8-80.16-88.8-80.16 36.48-80.16 88.8zm129.6 0c0 37.44-20.4 61.68-49.44 61.68s-49.44-24.24-49.44-61.68 20.4-61.68 49.44-61.68 49.44 24.24 49.44 61.68z"/><path d="m614.27 242.64c35.28 0 55.44-29.76 55.44-65.52s-20.16-65.52-55.44-65.52c-16.32 0-28.32 6.48-36.24 15.84v-13.44h-28.8v169.2h28.8v-56.4c7.92 9.36 19.92 15.84 36.24 15.84zm-36.96-69.12c0-23.76 13.44-36.72 31.2-36.72 20.88 0 32.16 16.32 32.16 40.32s-11.28 40.32-32.16 40.32c-17.76 0-31.2-13.2-31.2-36.48z"/><path d="m747.65 242.64c25.2 0 45.12-13.2 54-35.28l-24.72-9.36c-3.84 12.96-15.12 20.16-29.28 20.16-18.48 0-31.44-13.2-33.6-34.8h88.32v-9.6c0-34.56-19.44-62.16-55.92-62.16s-60 28.56-60 65.52c0 38.88 25.2 65.52 61.2 65.52zm-1.44-106.8c18.24 0 26.88 12 27.12 25.92h-57.84c4.32-17.04 15.84-25.92 30.72-25.92z"/><path d="m823.98 240h28.8v-73.92c0-18 13.2-27.6 26.16-27.6 15.84 0 22.08 11.28 22.08 26.88v74.64h28.8v-83.04c0-27.12-15.84-45.36-42.24-45.36-16.32 0-27.6 7.44-34.8 15.84v-13.44h-28.8z"/><path d="m1014.17 67.68-65.28 172.32h30.48l14.64-39.36h74.4l14.88 39.36h30.96l-65.28-172.32zm16.8 34.08 27.36 72h-54.24z"/><path d="m1163.69 68.18h-30.72v172.32h30.72z"/><path d="m297.06 130.97c7.26-21.79 4.76-45.66-6.85-65.48-17.46-30.4-52.56-46.04-86.84-38.68-15.25-17.18-37.16-26.95-60.13-26.81-35.04-.08-66.13 22.48-76.91 55.82-22.51 4.61-41.94 18.7-53.31 38.67-17.59 30.32-13.58 68.54 9.92 94.54-7.26 21.79-4.76 45.66 6.85 65.48 17.46 30.4 52.56 46.04 86.84 38.68 15.24 17.18 37.16 26.95 60.13 26.8 35.06.09 66.16-22.49 76.94-55.86 22.51-4.61 41.94-18.7 53.31-38.67 17.57-30.32 13.55-68.51-9.94-94.51zm-120.28 168.11c-14.03.02-27.62-4.89-38.39-13.88.49-.26 1.34-.73 1.89-1.07l63.72-36.8c3.26-1.85 5.26-5.32 5.24-9.07v-89.83l26.93 15.55c.29.14.48.42.52.74v74.39c-.04 33.08-26.83 59.9-59.91 59.97zm-128.84-55.03c-7.03-12.14-9.56-26.37-7.15-40.18.47.28 1.3.79 1.89 1.13l63.72 36.8c3.23 1.89 7.23 1.89 10.47 0l77.79-44.92v31.1c.02.32-.13.63-.38.83l-64.41 37.19c-28.69 16.52-65.33 6.7-81.92-21.95zm-16.77-139.09c7-12.16 18.05-21.46 31.21-26.29 0 .55-.03 1.52-.03 2.2v73.61c-.02 3.74 1.98 7.21 5.23 9.06l77.79 44.91-26.93 15.55c-.27.18-.61.21-.91.08l-64.42-37.22c-28.63-16.58-38.45-53.21-21.95-81.89zm221.26 51.49-77.79-44.92 26.93-15.54c.27-.18.61-.21.91-.08l64.42 37.19c28.68 16.57 38.51 53.26 21.94 81.94-7.01 12.14-18.05 21.44-31.2 26.28v-75.81c.03-3.74-1.96-7.2-5.2-9.06zm26.8-40.34c-.47-.29-1.3-.79-1.89-1.13l-63.72-36.8c-3.23-1.89-7.23-1.89-10.47 0l-77.79 44.92v-31.1c-.02-.32.13-.63.38-.83l64.41-37.16c28.69-16.55 65.37-6.7 81.91 22 6.99 12.12 9.52 26.31 7.15 40.1zm-168.51 55.43-26.94-15.55c-.29-.14-.48-.42-.52-.74v-74.39c.02-33.12 26.89-59.96 60.01-59.94 14.01 0 27.57 4.92 38.34 13.88-.49.26-1.33.73-1.89 1.07l-63.72 36.8c-3.26 1.85-5.26 5.31-5.24 9.06l-.04 89.79zm14.63-31.54 34.65-20.01 34.65 20v40.01l-34.65 20-34.65-20z"/></svg>
@@ -73,6 +73,7 @@ require_relative "integrations/source/databrics_model/client"
73
73
  require_relative "integrations/source/aws_sagemaker_model/client"
74
74
  require_relative "integrations/source/google_vertex_model/client"
75
75
  require_relative "integrations/source/http_model/client"
76
+ require_relative "integrations/source/open_ai/client"
76
77
 
77
78
  # Destination
78
79
  require_relative "integrations/destination/klaviyo/client"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: multiwoven-integrations
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.11
4
+ version: 0.16.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Subin T P
8
- autorequire:
8
+ autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-12-13 00:00:00.000000000 Z
11
+ date: 2024-12-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -735,6 +735,11 @@ files:
735
735
  - lib/multiwoven/integrations/source/maria_db/config/meta.json
736
736
  - lib/multiwoven/integrations/source/maria_db/config/spec.json
737
737
  - lib/multiwoven/integrations/source/maria_db/icon.svg
738
+ - lib/multiwoven/integrations/source/open_ai/client.rb
739
+ - lib/multiwoven/integrations/source/open_ai/config/catalog.json
740
+ - lib/multiwoven/integrations/source/open_ai/config/meta.json
741
+ - lib/multiwoven/integrations/source/open_ai/config/spec.json
742
+ - lib/multiwoven/integrations/source/open_ai/icon.svg
738
743
  - lib/multiwoven/integrations/source/oracle_db/client.rb
739
744
  - lib/multiwoven/integrations/source/oracle_db/config/meta.json
740
745
  - lib/multiwoven/integrations/source/oracle_db/config/spec.json
@@ -767,7 +772,7 @@ metadata:
767
772
  homepage_uri: https://www.multiwoven.com/
768
773
  source_code_uri: https://github.com/Multiwoven/multiwoven/tree/main/integrations
769
774
  changelog_uri: https://github.com/Multiwoven/multiwoven/blob/main/integrations/CHANGELOG.md
770
- post_install_message:
775
+ post_install_message:
771
776
  rdoc_options: []
772
777
  require_paths:
773
778
  - lib
@@ -783,7 +788,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
783
788
  version: '0'
784
789
  requirements: []
785
790
  rubygems_version: 3.4.1
786
- signing_key:
791
+ signing_key:
787
792
  specification_version: 4
788
793
  summary: Integration suite for open source reverse ETL platform
789
794
  test_files: []