multiwoven-integrations 0.20.0 → 0.21.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: eb53fac768bc4dc4cf394e1414fa84762041448762d9e48517288ba7cbe9c56c
4
- data.tar.gz: 6e1e7eb31a0174aeb981c1d27c62c30fc29db5b39de9a9dba3b79cb1cdbd6c5b
3
+ metadata.gz: cf7259493817f0cca526b9d359e0da09e1f0eb456754e58146757525b6e6bc9e
4
+ data.tar.gz: 574b93581eadef82ac5dcf39b041e6e8ddecd5cc781a9cef28bd766393d172c2
5
5
  SHA512:
6
- metadata.gz: dd355e29baafc570c76e9165163a5020cbc491ed1aa1afc311f6bff049c24072065aad1c06cd974731c64a846803b144ad66de11e1e0d5c75de4ca0bbb83c931
7
- data.tar.gz: 801eae3f6cb3f1e1ae6f54b1ba75e38a6b264b35680010d3657b299c4e7d97f216db854b824f3210de1dfa7757b46120a62fac3aff1346ba64e185a3f0a426e4
6
+ metadata.gz: 295d86c051f4b723acb2719f0cd14e6f7a7d01425f1246608b76a55385e24293dab399dca7350da5c45d4d8d4a92bcfcb291c0628e3a01fb6283de2b7aef119e
7
+ data.tar.gz: 78a203a13c867d6df522608032bcd4b6bf8f0c318988b5db341d2ef1158c29917acf60023c522c0afb9ef1d5b75257150b0fa4168d32011369aef98a56820049
@@ -72,6 +72,7 @@ module Multiwoven
72
72
  GOOGLE_SPREADSHEET_ID_REGEX = %r{/d/([-\w]{20,})/}.freeze
73
73
 
74
74
  OPEN_AI_URL = "https://api.openai.com/v1/chat/completions"
75
+ ANTHROPIC_URL = "https://api.anthropic.com/v1/messages"
75
76
  end
76
77
  end
77
78
  end
@@ -2,7 +2,7 @@
2
2
 
3
3
  module Multiwoven
4
4
  module Integrations
5
- VERSION = "0.20.0"
5
+ VERSION = "0.21.1"
6
6
 
7
7
  ENABLED_SOURCES = %w[
8
8
  Snowflake
@@ -23,6 +23,7 @@ module Multiwoven
23
23
  OpenAI
24
24
  Sftp
25
25
  WatsonxAi
26
+ Anthropic
26
27
  ].freeze
27
28
 
28
29
  ENABLED_DESTINATIONS = %w[
@@ -0,0 +1,135 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Multiwoven::Integrations::Source
4
+ module Anthropic
5
+ include Multiwoven::Integrations::Core
6
+ class Client < SourceConnector
7
+ API_VERSION = "2023-06-01"
8
+ def check_connection(connection_config)
9
+ connection_config = prepare_config(connection_config)
10
+ response = make_request(ANTHROPIC_URL, HTTP_POST, connection_config[:request_format], connection_config)
11
+ success?(response) ? success_status : failure_status(nil)
12
+ rescue StandardError => e
13
+ handle_exception(e, { context: "ANTHROPIC:CHECK_CONNECTION:EXCEPTION", type: "error" })
14
+ failure_status(e)
15
+ end
16
+
17
+ def discover(_connection_config = nil)
18
+ catalog_json = read_json(CATALOG_SPEC_PATH)
19
+ catalog = build_catalog(catalog_json)
20
+ catalog.to_multiwoven_message
21
+ rescue StandardError => e
22
+ handle_exception(e, { context: "ANTHROPIC:DISCOVER:EXCEPTION", type: "error" })
23
+ end
24
+
25
+ def read(sync_config)
26
+ # The server checks the ConnectorQueryType.
27
+ # If it's "ai_ml," the server calculates the payload and passes it as a query in the sync config model protocol.
28
+ # This query is then sent to the AI/ML model.
29
+ connection_config = prepare_config(sync_config.source.connection_specification)
30
+ stream = connection_config[:is_stream] ||= false
31
+ payload = sync_config.model.query
32
+ if stream
33
+ run_model_stream(connection_config, payload) { |message| yield message if block_given? }
34
+ else
35
+ run_model(connection_config, payload)
36
+ end
37
+ rescue StandardError => e
38
+ handle_exception(e, { context: "ANTHROPIC:READ:EXCEPTION", type: "error" })
39
+ end
40
+
41
+ private
42
+
43
+ def prepare_config(config)
44
+ config.with_indifferent_access.tap do |conf|
45
+ conf[:config][:timeout] ||= 30
46
+ end
47
+ end
48
+
49
+ def parse_json(json_string)
50
+ JSON.parse(json_string)
51
+ rescue JSON::ParserError => e
52
+ handle_exception(e, { context: "ANTHROPIC:PARSE_JSON:EXCEPTION", type: "error" })
53
+ {}
54
+ end
55
+
56
+ def build_headers(connection_config, streaming: false)
57
+ {
58
+ "x-api-key" => connection_config[:api_key],
59
+ "anthropic-version" => API_VERSION,
60
+ "content-type" => "application/json"
61
+ }.tap do |headers|
62
+ headers["transfer-encoding"] = "chunked" if streaming
63
+ end
64
+ end
65
+
66
+ def make_request(url, http_method, payload, connection_config)
67
+ send_request(
68
+ url: url,
69
+ http_method: http_method,
70
+ payload: JSON.parse(payload),
71
+ headers: build_headers(connection_config, streaming: false),
72
+ config: connection_config[:config]
73
+ )
74
+ end
75
+
76
+ def run_model(connection_config, payload)
77
+ response = make_request(ANTHROPIC_URL, HTTP_POST, payload, connection_config)
78
+ process_response(response)
79
+ rescue StandardError => e
80
+ handle_exception(e, { context: "ANTHROPIC:RUN_MODEL:EXCEPTION", type: "error" })
81
+ end
82
+
83
+ def run_model_stream(connection_config, payload)
84
+ send_streaming_request(
85
+ url: ANTHROPIC_URL,
86
+ http_method: HTTP_POST,
87
+ payload: JSON.parse(payload),
88
+ headers: build_headers(connection_config, streaming: true),
89
+ config: connection_config[:config]
90
+ ) do |chunk|
91
+ process_streaming_response(chunk) { |message| yield message if block_given? }
92
+ end
93
+ rescue StandardError => e
94
+ handle_exception(e, { context: "ANTHROPIC:RUN_STREAM_MODEL:EXCEPTION", type: "error" })
95
+ end
96
+
97
+ def process_response(response)
98
+ if success?(response)
99
+ data = JSON.parse(response.body)
100
+ [RecordMessage.new(data: data, emitted_at: Time.now.to_i).to_multiwoven_message]
101
+ else
102
+ create_log_message("ANTHROPIC:RUN_MODEL", "error", "request failed: #{response.body}")
103
+ end
104
+ rescue StandardError => e
105
+ handle_exception(e, { context: "ANTHROPIC:PROCESS_RESPONSE:EXCEPTION", type: "error" })
106
+ end
107
+
108
+ def check_chunk_error(chunk)
109
+ return unless chunk.include?("{\"type\":\"error\"")
110
+
111
+ data = JSON.parse(chunk)
112
+ raise StandardError, "Error: #{data["error"]}" if data["error"] && data["error"]["message"]
113
+ end
114
+
115
+ def extract_content_event(chunk)
116
+ events = chunk.split("\n\n")
117
+ events.find { |e| e.include?("event: content_block_delta") }
118
+ end
119
+
120
+ def process_streaming_response(chunk)
121
+ check_chunk_error(chunk)
122
+
123
+ chunk.each_line do |event|
124
+ next unless event.include?("\"type\":\"content_block_delta\"")
125
+
126
+ json_string = event.split("\n").find { |line| line.start_with?("data: ") }&.sub(/^data: /, "")
127
+ next unless json_string
128
+
129
+ parsed_data = JSON.parse(json_string)
130
+ yield [RecordMessage.new(data: parsed_data, emitted_at: Time.now.to_i).to_multiwoven_message] if block_given?
131
+ end
132
+ end
133
+ end
134
+ end
135
+ end
@@ -0,0 +1,6 @@
1
+ {
2
+ "request_rate_limit": 50,
3
+ "request_rate_limit_unit": "minute",
4
+ "request_rate_concurrency": 10,
5
+ "streams": []
6
+ }
@@ -0,0 +1,15 @@
1
+ {
2
+ "data": {
3
+ "name": "Anthropic",
4
+ "title": "Anthropic Model Endpoint",
5
+ "connector_type": "source",
6
+ "category": "AI Model",
7
+ "documentation_url": "https://docs.mutltiwoven.com",
8
+ "github_issue_label": "source-antropic-model",
9
+ "icon": "icon.svg",
10
+ "license": "MIT",
11
+ "release_stage": "alpha",
12
+ "support_level": "community",
13
+ "tags": ["language:ruby", "multiwoven"]
14
+ }
15
+ }
@@ -0,0 +1,56 @@
1
+ {
2
+ "documentation_url": "https://docs.multiwoven.com/integrations/source/antropic-model",
3
+ "stream_type": "user_defined",
4
+ "connector_query_type": "ai_ml",
5
+ "connection_specification": {
6
+ "$schema": "http://json-schema.org/draft-07/schema#",
7
+ "title": "Anthropic Endpoint",
8
+ "type": "object",
9
+ "required": ["api_key", "request_format", "response_format"],
10
+ "properties": {
11
+ "api_key": {
12
+ "type": "string",
13
+ "multiwoven_secret": true,
14
+ "title": "API Key",
15
+ "order": 0
16
+ },
17
+ "is_stream": {
18
+ "type": "boolean",
19
+ "title": "Enable streaming",
20
+ "description": "Enables data streaming for such as chat, when supported by the model. When true, messages and model data are processed in chunks for immediate delivery, enhancing responsiveness. Default is false, processing only after the entire response is received.",
21
+ "default": false,
22
+ "order": 1
23
+ },
24
+ "config": {
25
+ "title": "",
26
+ "type": "object",
27
+ "properties": {
28
+ "timeout": {
29
+ "type": "string",
30
+ "default": "30",
31
+ "title": "HTTP Timeout",
32
+ "description": "The maximum time, in seconds, to wait for a response from the server before the request is canceled.",
33
+ "order": 0
34
+ }
35
+ },
36
+ "order": 2
37
+ },
38
+ "request_format": {
39
+ "title": "Request Format",
40
+ "description": "Sample Request Format",
41
+ "type": "string",
42
+ "default": "{\"model\":\"claude-3-opus-20240229\",\"max_tokens\": 256, \"messages\":[{\"role\": \"user\", \"content\": \"Hi.\"}], \"stream\": false}",
43
+ "x-request-format": true,
44
+ "order": 3
45
+ },
46
+ "response_format": {
47
+ "title": "Response Format",
48
+ "description": "Sample Response Format",
49
+ "type": "string",
50
+ "default": "{\"id\":\"msg_0123ABC\",\"type\":\"message\",\"role\":\"assistant\",\"model\":\"claude-3-7-sonnet-20250219\",\"content\":[{\"type\":\"text\",\"text\":\"Hello there! How can I assist you today? Whether you have a question, need some information, or just want to chat, I'm here to help. What's on your mind?\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":10,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"output_tokens\":41}}",
51
+ "x-response-format": true,
52
+ "order": 4
53
+ }
54
+ }
55
+ }
56
+ }
@@ -0,0 +1 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" id="Anthropic-Icon--Streamline-Svg-Logos" height="24" width="24"><desc>Anthropic Icon Streamline Icon: https://streamlinehq.com</desc><path fill="#181818" d="m13.788825 3.932 6.43325 16.136075h3.5279L17.316725 3.932H13.788825Z" stroke-width="0.25"></path><path fill="#181818" d="m6.325375 13.682775 2.20125 -5.67065 2.201275 5.67065H6.325375ZM6.68225 3.932 0.25 20.068075h3.596525l1.3155 -3.3886h6.729425l1.315275 3.3886h3.59655L10.371 3.932H6.68225Z" stroke-width="0.25"></path></svg>
@@ -76,6 +76,7 @@ require_relative "integrations/source/http_model/client"
76
76
  require_relative "integrations/source/open_ai/client"
77
77
  require_relative "integrations/source/sftp/client"
78
78
  require_relative "integrations/source/watsonx_ai/client"
79
+ require_relative "integrations/source/anthropic/client"
79
80
 
80
81
  # Destination
81
82
  require_relative "integrations/destination/klaviyo/client"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: multiwoven-integrations
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.20.0
4
+ version: 0.21.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Subin T P
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-03-19 00:00:00.000000000 Z
11
+ date: 2025-03-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -705,6 +705,11 @@ files:
705
705
  - lib/multiwoven/integrations/source/amazon_s3/config/meta.json
706
706
  - lib/multiwoven/integrations/source/amazon_s3/config/spec.json
707
707
  - lib/multiwoven/integrations/source/amazon_s3/icon.svg
708
+ - lib/multiwoven/integrations/source/anthropic/client.rb
709
+ - lib/multiwoven/integrations/source/anthropic/config/catalog.json
710
+ - lib/multiwoven/integrations/source/anthropic/config/meta.json
711
+ - lib/multiwoven/integrations/source/anthropic/config/spec.json
712
+ - lib/multiwoven/integrations/source/anthropic/icon.svg
708
713
  - lib/multiwoven/integrations/source/aws_athena/client.rb
709
714
  - lib/multiwoven/integrations/source/aws_athena/config/meta.json
710
715
  - lib/multiwoven/integrations/source/aws_athena/config/spec.json