multiwoven-integrations 0.19.3 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cfc51c82e8c79ab8f6b275559a6b15167d23ee1f501c58ca257d3d748b2e9bf8
4
- data.tar.gz: 27aaf1554717099d3485a1d230f6804117373ec74382f0faa78d748d16a14d44
3
+ metadata.gz: 902cb64b94dfc3aca7db9cbf9e010681c8a0f2318ea44fb587a9bb3d04d6dd89
4
+ data.tar.gz: 58afaf6900a75f7b0f010d151143bef0c19724b17b98cfebd24804975aade58f
5
5
  SHA512:
6
- metadata.gz: a03defe388d9c0a96b80b070201d34ee9520630c7c1adebf3ca7b0158316ab4af0dae524755c3eed2ecbdbb98dce16316ce3224bea9b874f42cd01c9a0c3c912
7
- data.tar.gz: da3a1b3c468090850d9b4732a261dcc82f124325fe8db7f20a9577a8022395ecd30a5c55dddea272b61e5fc6a79acac0d77c1d88419c6960ef901b3a32293c59
6
+ metadata.gz: 017ea4022436ae277d626ad94b3d1981e46af63b53f3696ec267ebb910856f93acad7d02d2264f981c002678f1d5a48216e4f5e24df0f2f7e84030bb59b6dcf2
7
+ data.tar.gz: 834f6f6730786310fec4b478dff54b6599ab1379b0642c59f691fa6b28229ca1c7c9e92a0668ce72a730dbc654556f6138b83a8d9437a2452535b5d7d180aa82
@@ -55,6 +55,11 @@ module Multiwoven
55
55
  GOOGLE_VERTEX_ENDPOINT_SERVICE_URL = "%<region>s-aiplatform.googleapis.com"
56
56
  GOOGLE_VERTEX_MODEL_NAME = "projects/%<project_id>s/locations/%<region>s/endpoints/%<endpoint_id>s"
57
57
 
58
+ WATSONX_HEALTH_DEPLOYMENT_URL = "https://%<region>s.ml.cloud.ibm.com/ml/v4/deployments?version=%<version>s"
59
+ WATSONX_PREDICTION_DEPLOYMENT_URL = "https://%<region>s.ml.cloud.ibm.com/ml/v4/deployments/%<deployment_id>s/predictions?version=%<version>s"
60
+ WATSONX_GENERATION_DEPLOYMENT_URL = "https://%<region>s.ml.cloud.ibm.com/ml/v1/deployments/%<deployment_id>s/text/generation?version=%<version>s"
61
+ WATSONX_STREAM_DEPLOYMENT_URL = "https://%<region>s.ml.cloud.ibm.com/ml/v1/deployments/%<deployment_id>s/text/generation_stream?version=%<version>s"
62
+
58
63
  # HTTP
59
64
  HTTP_GET = "GET"
60
65
  HTTP_POST = "POST"
@@ -67,6 +72,7 @@ module Multiwoven
67
72
  GOOGLE_SPREADSHEET_ID_REGEX = %r{/d/([-\w]{20,})/}.freeze
68
73
 
69
74
  OPEN_AI_URL = "https://api.openai.com/v1/chat/completions"
75
+ ANTROPIC_URL = "https://api.anthropic.com/v1/messages"
70
76
  end
71
77
  end
72
78
  end
@@ -2,7 +2,7 @@
2
2
 
3
3
  module Multiwoven
4
4
  module Integrations
5
- VERSION = "0.19.3"
5
+ VERSION = "0.21.0"
6
6
 
7
7
  ENABLED_SOURCES = %w[
8
8
  Snowflake
@@ -22,6 +22,8 @@ module Multiwoven
22
22
  HttpModel
23
23
  OpenAI
24
24
  Sftp
25
+ WatsonxAi
26
+ Antropic
25
27
  ].freeze
26
28
 
27
29
  ENABLED_DESTINATIONS = %w[
@@ -0,0 +1,135 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Multiwoven::Integrations::Source
4
+ module Antropic
5
+ include Multiwoven::Integrations::Core
6
+ class Client < SourceConnector
7
+ API_VERSION = "2023-06-01"
8
+ def check_connection(connection_config)
9
+ connection_config = prepare_config(connection_config)
10
+ response = make_request(ANTROPIC_URL, HTTP_POST, connection_config[:request_format], connection_config)
11
+ success?(response) ? success_status : failure_status(nil)
12
+ rescue StandardError => e
13
+ handle_exception(e, { context: "ANTROPIC:CHECK_CONNECTION:EXCEPTION", type: "error" })
14
+ failure_status(e)
15
+ end
16
+
17
+ def discover(_connection_config = nil)
18
+ catalog_json = read_json(CATALOG_SPEC_PATH)
19
+ catalog = build_catalog(catalog_json)
20
+ catalog.to_multiwoven_message
21
+ rescue StandardError => e
22
+ handle_exception(e, { context: "ANTROPIC:DISCOVER:EXCEPTION", type: "error" })
23
+ end
24
+
25
+ def read(sync_config)
26
+ # The server checks the ConnectorQueryType.
27
+ # If it's "ai_ml," the server calculates the payload and passes it as a query in the sync config model protocol.
28
+ # This query is then sent to the AI/ML model.
29
+ connection_config = prepare_config(sync_config.source.connection_specification)
30
+ stream = connection_config[:is_stream] ||= false
31
+ payload = sync_config.model.query
32
+ if stream
33
+ run_model_stream(connection_config, payload) { |message| yield message if block_given? }
34
+ else
35
+ run_model(connection_config, payload)
36
+ end
37
+ rescue StandardError => e
38
+ handle_exception(e, { context: "ANTROPIC:READ:EXCEPTION", type: "error" })
39
+ end
40
+
41
+ private
42
+
43
+ def prepare_config(config)
44
+ config.with_indifferent_access.tap do |conf|
45
+ conf[:config][:timeout] ||= 30
46
+ end
47
+ end
48
+
49
+ def parse_json(json_string)
50
+ JSON.parse(json_string)
51
+ rescue JSON::ParserError => e
52
+ handle_exception(e, { context: "ANTROPIC:PARSE_JSON:EXCEPTION", type: "error" })
53
+ {}
54
+ end
55
+
56
+ def build_headers(connection_config, streaming: false)
57
+ {
58
+ "x-api-key" => connection_config[:api_key],
59
+ "anthropic-version" => API_VERSION,
60
+ "content-type" => "application/json"
61
+ }.tap do |headers|
62
+ headers["transfer-encoding"] = "chunked" if streaming
63
+ end
64
+ end
65
+
66
+ def make_request(url, http_method, payload, connection_config)
67
+ send_request(
68
+ url: url,
69
+ http_method: http_method,
70
+ payload: JSON.parse(payload),
71
+ headers: build_headers(connection_config, streaming: false),
72
+ config: connection_config[:config]
73
+ )
74
+ end
75
+
76
+ def run_model(connection_config, payload)
77
+ response = make_request(ANTROPIC_URL, HTTP_POST, payload, connection_config)
78
+ process_response(response)
79
+ rescue StandardError => e
80
+ handle_exception(e, { context: "ANTROPIC:RUN_MODEL:EXCEPTION", type: "error" })
81
+ end
82
+
83
+ def run_model_stream(connection_config, payload)
84
+ send_streaming_request(
85
+ url: ANTROPIC_URL,
86
+ http_method: HTTP_POST,
87
+ payload: JSON.parse(payload),
88
+ headers: build_headers(connection_config, streaming: true),
89
+ config: connection_config[:config]
90
+ ) do |chunk|
91
+ process_streaming_response(chunk) { |message| yield message if block_given? }
92
+ end
93
+ rescue StandardError => e
94
+ handle_exception(e, { context: "ANTROPIC:RUN_STREAM_MODEL:EXCEPTION", type: "error" })
95
+ end
96
+
97
+ def process_response(response)
98
+ if success?(response)
99
+ data = JSON.parse(response.body)
100
+ [RecordMessage.new(data: data, emitted_at: Time.now.to_i).to_multiwoven_message]
101
+ else
102
+ create_log_message("ANTROPIC:RUN_MODEL", "error", "request failed: #{response.body}")
103
+ end
104
+ rescue StandardError => e
105
+ handle_exception(e, { context: "ANTROPIC:PROCESS_RESPONSE:EXCEPTION", type: "error" })
106
+ end
107
+
108
+ def check_chunk_error(chunk)
109
+ return unless chunk.include?("{\"type\":\"error\"")
110
+
111
+ data = JSON.parse(chunk)
112
+ raise StandardError, "Error: #{data["error"]}" if data["error"] && data["error"]["message"]
113
+ end
114
+
115
+ def extract_content_event(chunk)
116
+ events = chunk.split("\n\n")
117
+ events.find { |e| e.include?("event: content_block_delta") }
118
+ end
119
+
120
+ def process_streaming_response(chunk)
121
+ check_chunk_error(chunk)
122
+
123
+ chunk.each_line do |event|
124
+ next unless event.include?("\"type\":\"content_block_delta\"")
125
+
126
+ json_string = event.split("\n").find { |line| line.start_with?("data: ") }&.sub(/^data: /, "")
127
+ next unless json_string
128
+
129
+ parsed_data = JSON.parse(json_string)
130
+ yield [RecordMessage.new(data: parsed_data, emitted_at: Time.now.to_i).to_multiwoven_message] if block_given?
131
+ end
132
+ end
133
+ end
134
+ end
135
+ end
@@ -0,0 +1,6 @@
1
+ {
2
+ "request_rate_limit": 50,
3
+ "request_rate_limit_unit": "minute",
4
+ "request_rate_concurrency": 10,
5
+ "streams": []
6
+ }
@@ -0,0 +1,15 @@
1
+ {
2
+ "data": {
3
+ "name": "Antropic",
4
+ "title": "Antropic Model Endpoint",
5
+ "connector_type": "source",
6
+ "category": "AI Model",
7
+ "documentation_url": "https://docs.mutltiwoven.com",
8
+ "github_issue_label": "source-antropic-model",
9
+ "icon": "icon.svg",
10
+ "license": "MIT",
11
+ "release_stage": "alpha",
12
+ "support_level": "community",
13
+ "tags": ["language:ruby", "multiwoven"]
14
+ }
15
+ }
@@ -0,0 +1,56 @@
1
+ {
2
+ "documentation_url": "https://docs.multiwoven.com/integrations/source/antropic-model",
3
+ "stream_type": "user_defined",
4
+ "connector_query_type": "ai_ml",
5
+ "connection_specification": {
6
+ "$schema": "http://json-schema.org/draft-07/schema#",
7
+ "title": "Antropic Endpoint",
8
+ "type": "object",
9
+ "required": ["api_key", "request_format", "response_format"],
10
+ "properties": {
11
+ "api_key": {
12
+ "type": "string",
13
+ "multiwoven_secret": true,
14
+ "title": "API Key",
15
+ "order": 0
16
+ },
17
+ "is_stream": {
18
+ "type": "boolean",
19
+ "title": "Enable streaming",
20
+ "description": "Enables data streaming for such as chat, when supported by the model. When true, messages and model data are processed in chunks for immediate delivery, enhancing responsiveness. Default is false, processing only after the entire response is received.",
21
+ "default": false,
22
+ "order": 1
23
+ },
24
+ "config": {
25
+ "title": "",
26
+ "type": "object",
27
+ "properties": {
28
+ "timeout": {
29
+ "type": "string",
30
+ "default": "30",
31
+ "title": "HTTP Timeout",
32
+ "description": "The maximum time, in seconds, to wait for a response from the server before the request is canceled.",
33
+ "order": 0
34
+ }
35
+ },
36
+ "order": 2
37
+ },
38
+ "request_format": {
39
+ "title": "Request Format",
40
+ "description": "Sample Request Format",
41
+ "type": "string",
42
+ "default": "{\"model\":\"claude-3-7-sonnet-20250219\",\"max_tokens\": 256, \"messages\":[{\"role\": \"user\", \"content\": \"Hi.\"}], \"stream\": false}",
43
+ "x-request-format": true,
44
+ "order": 3
45
+ },
46
+ "response_format": {
47
+ "title": "Response Format",
48
+ "description": "Sample Response Format",
49
+ "type": "string",
50
+ "default": "{\"id\":\"msg_0123ABC\",\"type\":\"message\",\"role\":\"assistant\",\"model\":\"claude-3-7-sonnet-20250219\",\"content\":[{\"type\":\"text\",\"text\":\"Hello there! How can I assist you today? Whether you have a question, need some information, or just want to chat, I'm here to help. What's on your mind?\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":10,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"output_tokens\":41}}",
51
+ "x-response-format": true,
52
+ "order": 4
53
+ }
54
+ }
55
+ }
56
+ }
@@ -0,0 +1 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" id="Anthropic-Icon--Streamline-Svg-Logos" height="24" width="24"><desc>Anthropic Icon Streamline Icon: https://streamlinehq.com</desc><path fill="#181818" d="m13.788825 3.932 6.43325 16.136075h3.5279L17.316725 3.932H13.788825Z" stroke-width="0.25"></path><path fill="#181818" d="m6.325375 13.682775 2.20125 -5.67065 2.201275 5.67065H6.325375ZM6.68225 3.932 0.25 20.068075h3.596525l1.3155 -3.3886h6.729425l1.315275 3.3886h3.59655L10.371 3.932H6.68225Z" stroke-width="0.25"></path></svg>
@@ -0,0 +1,194 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Multiwoven::Integrations::Source
4
+ module WatsonxAi
5
+ include Multiwoven::Integrations::Core
6
+ API_VERSION = "2021-05-01"
7
+ class Client < SourceConnector
8
+ def check_connection(connection_config)
9
+ get_access_token(connection_config[:api_key])
10
+ url = format(
11
+ WATSONX_HEALTH_DEPLOYMENT_URL,
12
+ region: connection_config[:region],
13
+ version: API_VERSION
14
+ )
15
+ response = send_request(
16
+ url: url,
17
+ http_method: HTTP_GET,
18
+ payload: {},
19
+ headers: auth_headers(@access_token),
20
+ config: connection_config[:config]
21
+ )
22
+ evaluate_deployment_status(response, connection_config[:deployment_id])
23
+ rescue StandardError => e
24
+ handle_exception(e, { context: "WATSONX AI:CHECK_CONNECTION:EXCEPTION", type: "error" })
25
+ failure_status(e)
26
+ end
27
+
28
+ def discover(_connection_config)
29
+ catalog_json = read_json(CATALOG_SPEC_PATH)
30
+ catalog = build_catalog(catalog_json)
31
+ catalog.to_multiwoven_message
32
+ rescue StandardError => e
33
+ handle_exception(e, { context: "WATSONX AI:DISCOVER:EXCEPTION", type: "error" })
34
+ end
35
+
36
+ def read(sync_config)
37
+ connection_config, payload = prepare_config_and_payload(sync_config)
38
+ process_model_request(connection_config, payload) { |message| yield message if block_given? }
39
+ rescue StandardError => e
40
+ handle_exception(e, { context: "WATSONX AI:READ:EXCEPTION", type: "error" })
41
+ end
42
+
43
+ private
44
+
45
+ def process_model_request(connection_config, payload)
46
+ if connection_config[:is_stream] && connection_config[:model_type] == "Prompt template"
47
+ run_model_stream(connection_config, payload) { |message| yield message if block_given? }
48
+ else
49
+ run_model(connection_config, payload)
50
+ end
51
+ end
52
+
53
+ def evaluate_deployment_status(response, deployment_id)
54
+ response_body = JSON.parse(response.body)
55
+ deployment_status = response_body["resources"]&.find { |res| res.dig("metadata", "id") == deployment_id }
56
+
57
+ return failure_status unless deployment_status
58
+
59
+ deployment_status.dig("entity", "status", "state") == "ready" ? success_status : failure_status
60
+ end
61
+
62
+ def prepare_config_and_payload(sync_config)
63
+ config = sync_config.source.connection_specification
64
+ connection_config = config.with_indifferent_access.tap do |conf|
65
+ conf[:config][:timeout] ||= 30
66
+ conf[:is_stream] ||= false
67
+ end
68
+ payload = sync_config.model.query
69
+ [connection_config, payload]
70
+ end
71
+
72
+ def get_access_token(api_key)
73
+ cache = defined?(Rails) && Rails.respond_to?(:cache) ? Rails.cache : ActiveSupport::Cache::MemoryStore.new
74
+ cache_key = "watsonx_ai_#{api_key}"
75
+ cached_token = cache.read(cache_key)
76
+ if cached_token
77
+ @access_token = cached_token
78
+ else
79
+ new_token = get_iam_token(api_key)
80
+ # max expiration is 3 minutes. No way to make it higher
81
+ cache.write(cache_key, new_token, expires_in: 180)
82
+ @access_token = new_token
83
+ end
84
+ end
85
+
86
+ def get_iam_token(api_key)
87
+ uri = URI("https://iam.cloud.ibm.com/identity/token")
88
+ request = Net::HTTP::Post.new(uri)
89
+ request["Content-Type"] = "application/x-www-form-urlencoded"
90
+ request.body = "grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey=#{api_key}"
91
+ response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
92
+ http.request(request)
93
+ end
94
+
95
+ raise "Failed to get IAM token: #{response.body}" unless response.is_a?(Net::HTTPSuccess)
96
+
97
+ JSON.parse(response.body)["access_token"]
98
+ end
99
+
100
+ def parse_json(json_string)
101
+ JSON.parse(json_string)
102
+ rescue JSON::ParserError => e
103
+ handle_exception(e, { context: "OPEN AI:PARSE_JSON:EXCEPTION", type: "error" })
104
+ {}
105
+ end
106
+
107
+ def run_model(connection_config, payload)
108
+ get_access_token(connection_config[:api_key])
109
+ url = format(
110
+ connection_config[:model_type] == "Machine learning model" ? WATSONX_PREDICTION_DEPLOYMENT_URL : WATSONX_GENERATION_DEPLOYMENT_URL,
111
+ region: connection_config[:region],
112
+ deployment_id: connection_config[:deployment_id],
113
+ version: API_VERSION
114
+ )
115
+ response = send_request(
116
+ url: url,
117
+ http_method: HTTP_POST,
118
+ payload: JSON.parse(payload),
119
+ headers: auth_headers(@access_token),
120
+ config: connection_config[:config]
121
+ )
122
+ process_response(response)
123
+ rescue StandardError => e
124
+ handle_exception(e, { context: "WATSONX AI:RUN_MODEL:EXCEPTION", type: "error" })
125
+ end
126
+
127
+ def process_response(response)
128
+ if success?(response)
129
+ if response.body.start_with?("{") || response.body.start_with?("[")
130
+ data = JSON.parse(response.body)
131
+ [RecordMessage.new(data: data, emitted_at: Time.now.to_i).to_multiwoven_message]
132
+ else
133
+ data = format_data(response.body)
134
+ RecordMessage.new(data: { responses: data }, emitted_at: Time.now.to_i).to_multiwoven_message
135
+ end
136
+ else
137
+ create_log_message("WATSONX AI:RUN_MODEL", "error", "request failed: #{response.body}")
138
+ end
139
+ rescue StandardError => e
140
+ handle_exception(e, { context: "WATSONX AI:PROCESS_RESPONSE:EXCEPTION", type: "error" })
141
+ end
142
+
143
+ def run_model_stream(connection_config, payload)
144
+ get_access_token(connection_config[:api_key])
145
+ url = format(
146
+ WATSONX_STREAM_DEPLOYMENT_URL,
147
+ region: connection_config[:region],
148
+ deployment_id: connection_config[:deployment_id],
149
+ version: API_VERSION
150
+ )
151
+ send_streaming_request(
152
+ url: url,
153
+ http_method: HTTP_POST,
154
+ payload: JSON.parse(payload),
155
+ headers: auth_headers(@access_token),
156
+ config: connection_config[:config]
157
+ ) do |chunk|
158
+ process_streaming_response(chunk) { |message| yield message if block_given? }
159
+ end
160
+ rescue StandardError => e
161
+ handle_exception(e, { context: "WATSONX AI:RUN_STREAM_MODEL:EXCEPTION", type: "error" })
162
+ end
163
+
164
+ def format_data(response_body)
165
+ messages = response_body.split("\n\n")
166
+ messages.map do |message|
167
+ match = message.match(/data:\s*(\{.*\})/)
168
+ match ? JSON.parse(match[1]) : nil
169
+ end.compact
170
+ end
171
+
172
+ def extract_data_entries(chunk)
173
+ chunk.split(/^data: /).map(&:strip).reject(&:empty?)
174
+ end
175
+
176
+ def process_streaming_response(chunk)
177
+ data_entries = extract_data_entries(chunk)
178
+ data_entries.each do |entry|
179
+ data, = entry.split("\n", 2)
180
+
181
+ next if data == "id: 1"
182
+
183
+ data = parse_json(data)
184
+
185
+ raise StandardError, "Error: #{data["errors"][0]["message"]}" if data["errors"] && data["errors"][0]["message"]
186
+
187
+ next if data["results"][0]["stop_reason"] != "not_finished"
188
+
189
+ yield [RecordMessage.new(data: data, emitted_at: Time.now.to_i).to_multiwoven_message] if block_given?
190
+ end
191
+ end
192
+ end
193
+ end
194
+ end
@@ -0,0 +1,6 @@
1
+ {
2
+ "request_rate_limit": 120,
3
+ "request_rate_limit_unit": "minute",
4
+ "request_rate_concurrency": 10,
5
+ "streams": []
6
+ }
@@ -0,0 +1,15 @@
1
+ {
2
+ "data": {
3
+ "name": "WatsonxAi",
4
+ "title": "WatsonX AI Model Endpoint",
5
+ "connector_type": "source",
6
+ "category": "AI Model",
7
+ "documentation_url": "https://docs.mutltiwoven.com",
8
+ "github_issue_label": "source-watsonx-ai-model",
9
+ "icon": "icon.svg",
10
+ "license": "MIT",
11
+ "release_stage": "alpha",
12
+ "support_level": "community",
13
+ "tags": ["language:ruby", "multiwoven"]
14
+ }
15
+ }
@@ -0,0 +1,74 @@
1
+ {
2
+ "documentation_url": "https://docs.multiwoven.com/integrations/source/watsonx-ai-endpoint",
3
+ "stream_type": "user_defined",
4
+ "connector_query_type": "ai_ml",
5
+ "connection_specification": {
6
+ "$schema": "http://json-schema.org/draft-07/schema#",
7
+ "title": "WatsonX AI Endpoint",
8
+ "type": "object",
9
+ "required": ["api_key", "region", "deployment_id", "request_format", "response_format"],
10
+ "properties": {
11
+ "api_key": {
12
+ "type": "string",
13
+ "multiwoven_secret": true,
14
+ "title": "API Key",
15
+ "order": 0
16
+ },
17
+ "is_stream": {
18
+ "type": "boolean",
19
+ "title": "Enable streaming",
20
+ "description": "Enables data streaming for such as chat, when supported by the model. When true, messages and model data are processed in chunks for immediate delivery, enhancing responsiveness. Default is false, processing only after the entire response is received.",
21
+ "default": false,
22
+ "order": 1
23
+ },
24
+ "region": {
25
+ "description": "WatsonX AI region",
26
+ "type": "string",
27
+ "title": "Region",
28
+ "order": 2
29
+ },
30
+ "deployment_id": {
31
+ "description": "WatsonX AI online deployment id",
32
+ "type": "string",
33
+ "title": "Deployment ID",
34
+ "multiwoven_secret": true,
35
+ "order": 3
36
+ },
37
+ "config": {
38
+ "title": "",
39
+ "type": "object",
40
+ "properties": {
41
+ "timeout": {
42
+ "type": "string",
43
+ "default": "30",
44
+ "title": "HTTP Timeout",
45
+ "description": "The maximum time, in seconds, to wait for a response from the server before the request is canceled.",
46
+ "order": 0
47
+ }
48
+ },
49
+ "order": 4
50
+ },
51
+ "request_format": {
52
+ "title": "Request Format",
53
+ "description": "Sample request format",
54
+ "type": "string",
55
+ "order": 5
56
+ },
57
+ "response_format": {
58
+ "title": "Response Format",
59
+ "description": "Sample response format",
60
+ "type": "string",
61
+ "x-response-format": true,
62
+ "order": 6
63
+ },
64
+ "model_type": {
65
+ "title": "Model Type",
66
+ "type": "string",
67
+ "description": "Deployed model type.",
68
+ "enum": ["Machine learning model", "Prompt template"],
69
+ "default": "Machine learning model",
70
+ "order": 7
71
+ }
72
+ }
73
+ }
74
+ }
@@ -0,0 +1 @@
1
+ <svg id="Watsonx-Data--Streamline-Carbon" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" height="16" width="16"><desc>Watsonx Data Streamline Icon: https://streamlinehq.com</desc><defs></defs><path d="M26 24c-1.1046 0-2 .8954-2 2 0 .0764.0142.1488.0225.2229C21.7417 28.0192 18.9433 29 16 29c-2.7746 0-5.3432-.881-7.4566-2.3676.2576.0261.517.0444.7798.0444C13.5561 26.6768 17 23.233 17 19h-2c0 3.1304-2.5464 5.6768-5.6768 5.6768-2.2111 0-4.1977-1.2816-5.1318-3.2725-.1365-.2972-.2595-.6007-.3738-.9094C4.4778 20.8169 5.2174 21 6 21c2.7568 0 5-2.2432 5-5v-2H9v2c0 1.6543-1.3457 3-3 3s-3-1.3457-3-3c0-2.1152.4917-4.1328 1.4619-5.9956l-1.7744-.9238C1.5835 11.2017 1 13.5943 1 16c0 8.271 6.729 15 15 15 3.3744 0 6.5818-1.1193 9.2048-3.1662.244.106.5123.1662.7952.1662 1.1046 0 2-.8954 2-2s-.8954-2-2-2Z" fill="#000000"></path><path transform="rotate(90 22 22)" d="M21 21h2v2h-2Z" fill="#000000"></path><path transform="rotate(90 16 16)" d="M15 15h2v2h-2Z" fill="#000000"></path><path transform="rotate(-90 10 10)" d="M9 9h2v2H9Z" fill="#000000"></path><path d="M16 1c-3.3744 0-6.5818 1.1193-9.2048 3.1662C6.5512 4.0602 6.2829 4 6 4c-1.1046 0-2 .8954-2 2s.8954 2 2 2 2-.8954 2-2c0-.0764-.0142-.1488-.0225-.2229C10.2583 3.9808 13.0567 3 16 3c2.7708 0 5.3363.8784 7.4481 2.3613-.249-.0242-.5005-.038-.7547-.038-4.2329 0-7.6768 3.4438-7.6768 7.6768h2c0-3.1304 2.5464-5.6768 5.6768-5.6768 2.0554 0 3.9068 1.0953 4.9186 2.8651.2153.4283.4053.8701.5729 1.3237C27.5234 11.1887 26.7844 11 26 11c-2.7568 0-5 2.2432-5 5v2h2v-2c0-1.6543 1.3457-3 3-3s3 1.3457 3 3c0 2.1152-.4917 4.1328-1.4619 5.9956l1.7744.9238C30.4165 20.7983 31 18.4057 31 16c0-8.271-6.729-15-15-15Z" fill="#000000"></path><path id="_Transparent_Rectangle_" d="M0 0h32v32H0Z" fill="none"></path></svg>
@@ -75,6 +75,8 @@ require_relative "integrations/source/google_vertex_model/client"
75
75
  require_relative "integrations/source/http_model/client"
76
76
  require_relative "integrations/source/open_ai/client"
77
77
  require_relative "integrations/source/sftp/client"
78
+ require_relative "integrations/source/watsonx_ai/client"
79
+ require_relative "integrations/source/antropic/client"
78
80
 
79
81
  # Destination
80
82
  require_relative "integrations/destination/klaviyo/client"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: multiwoven-integrations
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.3
4
+ version: 0.21.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Subin T P
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-02-12 00:00:00.000000000 Z
11
+ date: 2025-03-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -705,6 +705,11 @@ files:
705
705
  - lib/multiwoven/integrations/source/amazon_s3/config/meta.json
706
706
  - lib/multiwoven/integrations/source/amazon_s3/config/spec.json
707
707
  - lib/multiwoven/integrations/source/amazon_s3/icon.svg
708
+ - lib/multiwoven/integrations/source/antropic/client.rb
709
+ - lib/multiwoven/integrations/source/antropic/config/catalog.json
710
+ - lib/multiwoven/integrations/source/antropic/config/meta.json
711
+ - lib/multiwoven/integrations/source/antropic/config/spec.json
712
+ - lib/multiwoven/integrations/source/antropic/icon.svg
708
713
  - lib/multiwoven/integrations/source/aws_athena/client.rb
709
714
  - lib/multiwoven/integrations/source/aws_athena/config/meta.json
710
715
  - lib/multiwoven/integrations/source/aws_athena/config/spec.json
@@ -776,6 +781,11 @@ files:
776
781
  - lib/multiwoven/integrations/source/snowflake/config/meta.json
777
782
  - lib/multiwoven/integrations/source/snowflake/config/spec.json
778
783
  - lib/multiwoven/integrations/source/snowflake/icon.svg
784
+ - lib/multiwoven/integrations/source/watsonx_ai/client.rb
785
+ - lib/multiwoven/integrations/source/watsonx_ai/config/catalog.json
786
+ - lib/multiwoven/integrations/source/watsonx_ai/config/meta.json
787
+ - lib/multiwoven/integrations/source/watsonx_ai/config/spec.json
788
+ - lib/multiwoven/integrations/source/watsonx_ai/icon.svg
779
789
  - multiwoven-integrations.gemspec
780
790
  - sig/multiwoven/integrations.rbs
781
791
  homepage: https://www.multiwoven.com/