fog-aws 0.6.0 → 0.7.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. checksums.yaml +4 -4
  2. data/lib/fog/aws.rb +3 -1
  3. data/lib/fog/aws/errors.rb +3 -3
  4. data/lib/fog/aws/kinesis.rb +187 -0
  5. data/lib/fog/aws/lambda.rb +10 -2
  6. data/lib/fog/aws/models/dns/zones.rb +2 -2
  7. data/lib/fog/aws/parsers/compute/describe_route_tables.rb +3 -3
  8. data/lib/fog/aws/parsers/compute/describe_vpcs.rb +1 -1
  9. data/lib/fog/aws/requests/compute/describe_route_tables.rb +1 -0
  10. data/lib/fog/aws/requests/dns/change_resource_record_sets.rb +116 -107
  11. data/lib/fog/aws/requests/kinesis/add_tags_to_stream.rb +48 -0
  12. data/lib/fog/aws/requests/kinesis/create_stream.rb +70 -0
  13. data/lib/fog/aws/requests/kinesis/delete_stream.rb +45 -0
  14. data/lib/fog/aws/requests/kinesis/describe_stream.rb +54 -0
  15. data/lib/fog/aws/requests/kinesis/get_records.rb +72 -0
  16. data/lib/fog/aws/requests/kinesis/get_shard_iterator.rb +53 -0
  17. data/lib/fog/aws/requests/kinesis/list_streams.rb +40 -0
  18. data/lib/fog/aws/requests/kinesis/list_tags_for_stream.rb +57 -0
  19. data/lib/fog/aws/requests/kinesis/merge_shards.rb +85 -0
  20. data/lib/fog/aws/requests/kinesis/put_record.rb +70 -0
  21. data/lib/fog/aws/requests/kinesis/put_records.rb +68 -0
  22. data/lib/fog/aws/requests/kinesis/remove_tags_from_stream.rb +49 -0
  23. data/lib/fog/aws/requests/kinesis/split_shard.rb +85 -0
  24. data/lib/fog/aws/storage.rb +4 -8
  25. data/lib/fog/aws/version.rb +1 -1
  26. data/tests/helpers/mock_helper.rb +0 -1
  27. data/tests/requests/compute/route_tests.rb +8 -7
  28. data/tests/requests/dns/change_resource_record_sets_tests.rb +26 -2
  29. data/tests/requests/emr/helper.rb +0 -1
  30. data/tests/requests/kinesis/helper.rb +111 -0
  31. data/tests/requests/kinesis/stream_tests.rb +169 -0
  32. metadata +19 -4
@@ -0,0 +1,85 @@
1
+ module Fog
2
+ module AWS
3
+ class Kinesis
4
+ class Real
5
+ # Merges two adjacent shards in a stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data.
6
+ #
7
+ # ==== Options
8
+ # * AdjacentShardToMerge<~String>: The shard ID of the adjacent shard for the merge.
9
+ # * ShardToMerge<~String>: The shard ID of the shard to combine with the adjacent shard for the merge.
10
+ # * StreamName<~String>: The name of the stream for the merge.
11
+ # ==== Returns
12
+ # * response<~Excon::Response>:
13
+ #
14
+ # ==== See Also
15
+ # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_MergeShards.html
16
+ #
17
+ def merge_shards(options={})
18
+ body = {
19
+ "AdjacentShardToMerge" => options.delete("AdjacentShardToMerge"),
20
+ "ShardToMerge" => options.delete("ShardToMerge"),
21
+ "StreamName" => options.delete("StreamName")
22
+ }.reject{ |_,v| v.nil? }
23
+
24
+ request({
25
+ 'X-Amz-Target' => "Kinesis_#{@version}.MergeShards",
26
+ :body => body,
27
+ }.merge(options))
28
+ end
29
+ end
30
+
31
+ class Mock
32
+ def merge_shards(options={})
33
+ stream_name = options.delete("StreamName")
34
+ shard_to_merge_id = options.delete("ShardToMerge")
35
+ adjacent_shard_to_merge_id = options.delete("AdjacentShardToMerge")
36
+
37
+ unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name }
38
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.")
39
+ end
40
+
41
+ unless shard_to_merge = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_to_merge_id }
42
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{shard_to_merge_id} in stream #{stream_name} under account #{@account_id}.")
43
+ end
44
+
45
+ unless adjacent_shard_to_merge = stream["Shards"].detect{ |shard| shard["ShardId"] == adjacent_shard_to_merge_id }
46
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{adjacent_shard_to_merge_id} in stream #{stream_name} under account #{@account_id}.")
47
+ end
48
+
49
+ # Close shards (set an EndingSequenceNumber on them)
50
+ shard_to_merge["SequenceNumberRange"]["EndingSequenceNumber"] = next_sequence_number
51
+ adjacent_shard_to_merge["SequenceNumberRange"]["EndingSequenceNumber"] = next_sequence_number
52
+
53
+ new_starting_hash_key = [
54
+ shard_to_merge["HashKeyRange"]["StartingHashKey"].to_i,
55
+ adjacent_shard_to_merge["HashKeyRange"]["StartingHashKey"].to_i
56
+ ].min.to_s
57
+
58
+ new_ending_hash_key = [
59
+ shard_to_merge["HashKeyRange"]["EndingHashKey"].to_i,
60
+ adjacent_shard_to_merge["HashKeyRange"]["EndingHashKey"].to_i
61
+ ].max.to_s
62
+
63
+ # create a new shard with ParentShardId and AdjacentParentShardID
64
+ stream["Shards"] << {
65
+ "HashKeyRange"=> {
66
+ "EndingHashKey" => new_ending_hash_key,
67
+ "StartingHashKey" => new_starting_hash_key
68
+ },
69
+ "SequenceNumberRange" => {
70
+ "StartingSequenceNumber" => next_sequence_number
71
+ },
72
+ "ShardId" => next_shard_id,
73
+ "ParentShardId" => shard_to_merge_id,
74
+ "AdjacentParentShardId" => adjacent_shard_to_merge_id
75
+ }
76
+
77
+ response = Excon::Response.new
78
+ response.status = 200
79
+ response.body = ""
80
+ response
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
@@ -0,0 +1,70 @@
1
+ module Fog
2
+ module AWS
3
+ class Kinesis
4
+ class Real
5
+ # Writes a single data record from a producer into an Amazon Kinesis stream.
6
+ #
7
+ # ==== Options
8
+ # * Data<~Blob>: The data blob to put into the record, which is base64-encoded when the blob is serialized.
9
+ # * ExplicitHashKey<~String>: The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash.
10
+ # * PartitionKey<~String>: Determines which shard in the stream the data record is assigned to.
11
+ # * SequenceNumberForOrdering<~String>: Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key.
12
+ # * StreamName<~String>: The stream name associated with the request.
13
+ # ==== Returns
14
+ # * response<~Excon::Response>:
15
+ #
16
+ # ==== See Also
17
+ # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html
18
+ #
19
+ def put_record(options={})
20
+ body = {
21
+ "Data" => options.delete("Data"),
22
+ "ExplicitHashKey" => options.delete("ExplicitHashKey"),
23
+ "PartitionKey" => options.delete("PartitionKey"),
24
+ "SequenceNumberForOrdering" => options.delete("SequenceNumberForOrdering"),
25
+ "StreamName" => options.delete("StreamName")
26
+ }.reject{ |_,v| v.nil? }
27
+
28
+ response = request({
29
+ 'X-Amz-Target' => "Kinesis_#{@version}.PutRecord",
30
+ :body => body,
31
+ }.merge(options))
32
+ response.body = Fog::JSON.decode(response.body) unless response.body.nil?
33
+ response
34
+ end
35
+ end
36
+
37
+ class Mock
38
+ def put_record(options={})
39
+ stream_name = options.delete("StreamName")
40
+
41
+ unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name }
42
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.")
43
+ end
44
+
45
+ sequence_number = next_sequence_number
46
+ data = options.delete("Data")
47
+ partition_key = options.delete("PartitionKey")
48
+
49
+ sample_method = RUBY_VERSION == "1.8.7" ? :choice : :sample
50
+ shard_id = stream["Shards"].send(sample_method)["ShardId"]
51
+ shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id }
52
+ # store the records on the shard(s)
53
+ shard["Records"] << {
54
+ "SequenceNumber" => sequence_number,
55
+ "Data" => data,
56
+ "PartitionKey" => partition_key
57
+ }
58
+
59
+ response = Excon::Response.new
60
+ response.status = 200
61
+ response.body = {
62
+ "SequenceNumber" => sequence_number,
63
+ "ShardId" => shard_id
64
+ }
65
+ response
66
+ end
67
+ end
68
+ end
69
+ end
70
+ end
@@ -0,0 +1,68 @@
1
+ module Fog
2
+ module AWS
3
+ class Kinesis
4
+ class Real
5
+ # Writes multiple data records from a producer into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request).
6
+ #
7
+ # ==== Options
8
+ # * Records<~Array>: The records associated with the request.
9
+ # * Record<~Hash>: A record.
10
+ # * Data<~Blob>: The data blob to put into the record, which is base64-encoded when the blob is serialized.
11
+ # * ExplicitHashKey<~String>: The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash.
12
+ # * PartitionKey<~String>: Determines which shard in the stream the data record is assigned to.
13
+ # * StreamName<~String>: The stream name associated with the request.
14
+ # ==== Returns
15
+ # * response<~Excon::Response>:
16
+ #
17
+ # ==== See Also
18
+ # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html
19
+ #
20
+ def put_records(options={})
21
+ body = {
22
+ "Records" => options.delete("Records"),
23
+ "StreamName" => options.delete("StreamName")
24
+ }.reject{ |_,v| v.nil? }
25
+
26
+ response = request({
27
+ 'X-Amz-Target' => "Kinesis_#{@version}.PutRecords",
28
+ :body => body,
29
+ }.merge(options))
30
+ response.body = Fog::JSON.decode(response.body) unless response.body.nil?
31
+ response
32
+ end
33
+ end
34
+
35
+ class Mock
36
+ def put_records(options={})
37
+ stream_name = options.delete("StreamName")
38
+ unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name }
39
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.")
40
+ end
41
+
42
+ records = options.delete("Records")
43
+ record_results = records.map { |r|
44
+ sequence_number = next_sequence_number
45
+
46
+ sample_method = RUBY_VERSION == "1.8.7" ? :choice : :sample
47
+ shard_id = stream["Shards"].send(sample_method)["ShardId"]
48
+ shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id }
49
+ # store the records on the shard(s)
50
+ shard["Records"] << r.merge("SequenceNumber" => sequence_number)
51
+ {
52
+ "SequenceNumber" => sequence_number,
53
+ "ShardId" => shard_id
54
+ }
55
+ }
56
+
57
+ response = Excon::Response.new
58
+ response.status = 200
59
+ response.body = {
60
+ "FailedRecordCount" => 0,
61
+ "Records" => record_results
62
+ }
63
+ response
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,49 @@
1
+ module Fog
2
+ module AWS
3
+ class Kinesis
4
+ class Real
5
+ # Deletes tags from the specified Amazon Kinesis stream.
6
+ #
7
+ # ==== Options
8
+ # * StreamName<~String>: The name of the stream.
9
+ # * TagKeys<~Array>: A list of tag keys.
10
+ # ==== Returns
11
+ # * response<~Excon::Response>:
12
+ #
13
+ # ==== See Also
14
+ # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_RemoveTagsFromStream.html
15
+ #
16
+ def remove_tags_from_stream(options={})
17
+ body = {
18
+ "StreamName" => options.delete("StreamName"),
19
+ "TagKeys" => options.delete("TagKeys")
20
+ }.reject{ |_,v| v.nil? }
21
+
22
+ request({
23
+ 'X-Amz-Target' => "Kinesis_#{@version}.RemoveTagsFromStream",
24
+ :body => body,
25
+ }.merge(options))
26
+ end
27
+ end
28
+
29
+ class Mock
30
+ def remove_tags_from_stream(options={})
31
+ stream_name = options.delete("StreamName")
32
+ tags = options.delete("TagKeys")
33
+
34
+ unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name }
35
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.")
36
+ end
37
+
38
+ stream["Tags"] = stream["Tags"].delete_if { |k,_| tags.include?(k) }
39
+
40
+ response = Excon::Response.new
41
+ response.status = 200
42
+ response.body = ""
43
+ response
44
+ end
45
+ end
46
+
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,85 @@
1
+ module Fog
2
+ module AWS
3
+ class Kinesis
4
+ class Real
5
+ # Splits a shard into two new shards in the stream, to increase the stream's capacity to ingest and transport data.
6
+ #
7
+ # ==== Options
8
+ # * NewStartingHashKey<~String>: A hash key value for the starting hash key of one of the child shards created by the split.
9
+ # * ShardToSplit<~String>: The shard ID of the shard to split.
10
+ # * StreamName<~String>: The name of the stream for the shard split.
11
+ # ==== Returns
12
+ # * response<~Excon::Response>:
13
+ #
14
+ # ==== See Also
15
+ # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SplitShard.html
16
+ #
17
+ def split_shard(options={})
18
+ body = {
19
+ "NewStartingHashKey" => options.delete("NewStartingHashKey"),
20
+ "ShardToSplit" => options.delete("ShardToSplit"),
21
+ "StreamName" => options.delete("StreamName")
22
+ }.reject{ |_,v| v.nil? }
23
+
24
+ request({
25
+ 'X-Amz-Target' => "Kinesis_#{@version}.SplitShard",
26
+ :body => body,
27
+ }.merge(options))
28
+ end
29
+ end
30
+
31
+ class Mock
32
+ def split_shard(options={})
33
+ stream_name = options.delete("StreamName")
34
+ shard_id = options.delete("ShardToSplit")
35
+ stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name }
36
+
37
+ unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name }
38
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.")
39
+ end
40
+
41
+ unless shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id }
42
+ raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{shard_id} in stream #{stream_name} under account #{@account_id}.")
43
+ end
44
+
45
+ # Close original shard (set an EndingSequenceNumber on it)
46
+ shard["SequenceNumberRange"]["EndingSequenceNumber"] = next_sequence_number
47
+
48
+ # Calculate new shard ranges
49
+ parent_starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
50
+ parent_ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
51
+ new_starting_hash_key = options.delete("NewStartingHashKey")
52
+
53
+ # Create two new shards using contiguous hash space based on the original shard
54
+ stream["Shards"] << {
55
+ "HashKeyRange"=> {
56
+ "EndingHashKey" => (new_starting_hash_key.to_i - 1).to_s,
57
+ "StartingHashKey" => parent_starting_hash_key
58
+ },
59
+ "SequenceNumberRange" => {
60
+ "StartingSequenceNumber" => next_sequence_number
61
+ },
62
+ "ShardId" => next_shard_id,
63
+ "ParentShardId" => shard_id
64
+ }
65
+ stream["Shards"] << {
66
+ "HashKeyRange" => {
67
+ "EndingHashKey" => parent_ending_hash_key,
68
+ "StartingHashKey" => new_starting_hash_key
69
+ },
70
+ "SequenceNumberRange" =>{
71
+ "StartingSequenceNumber" => next_sequence_number
72
+ },
73
+ "ShardId" => next_shard_id,
74
+ "ParentShardId" => shard_id
75
+ }
76
+
77
+ response = Excon::Response.new
78
+ response.status = 200
79
+ response.body = ""
80
+ response
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
@@ -559,16 +559,12 @@ module Fog
559
559
  params[:headers]['x-amz-date'] = date.to_iso8601_basic
560
560
  if params[:body].respond_to?(:read)
561
561
  # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
562
+ # We ignore the bit about setting the content-encoding to aws-chunked because
563
+ # this can cause s3 to serve files with a blank content encoding which causes problems with some CDNs
564
+ # AWS have confirmed that s3 can infer that the content-encoding is aws-chunked from the x-amz-content-sha256 header
565
+ #
562
566
  params[:headers]['x-amz-content-sha256'] = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
563
567
  params[:headers]['x-amz-decoded-content-length'] = params[:headers].delete 'Content-Length'
564
-
565
- if params[:headers]['Content-Encoding'] && params[:headers]['Content-Encoding'].to_s.length > 0
566
- encoding = "aws-chunked,#{params[:headers]['Content-Encoding']}"
567
- else
568
- encoding = "aws-chunked"
569
- end
570
-
571
- params[:headers]['Content-Encoding'] = encoding
572
568
  else
573
569
  params[:headers]['x-amz-content-sha256'] ||= Digest::SHA256.hexdigest(params[:body] || '')
574
570
  end
@@ -1,5 +1,5 @@
1
1
  module Fog
2
2
  module AWS
3
- VERSION = "0.6.0"
3
+ VERSION = "0.7.2"
4
4
  end
5
5
  end
@@ -97,7 +97,6 @@ if Fog.mock?
97
97
  :vsphere_username => 'apiuser',
98
98
  :vsphere_password => 'apipassword',
99
99
  :vsphere_expected_pubkey_hash => 'abcdef1234567890',
100
- :libvirt_uri => 'qemu:///system',
101
100
  :libvirt_username => 'root',
102
101
  :libvirt_password => 'password',
103
102
  :cloudsigma_username => 'csuname',
@@ -25,13 +25,14 @@ Shindo.tests('Fog::Compute[:aws] | route table requests', ['aws']) do
25
25
  }],
26
26
  'tagSet' => Hash,
27
27
  'routeSet' => [{
28
- 'destinationCidrBlock' => String,
29
- 'gatewayId' => Fog::Nullable::String,
30
- 'instanceId' => Fog::Nullable::String,
31
- 'instanceOwnerId' => Fog::Nullable::String,
32
- 'networkInterfaceId' => Fog::Nullable::String,
33
- 'state' => String,
34
- 'origin' => String
28
+ 'destinationCidrBlock' => String,
29
+ 'gatewayId' => Fog::Nullable::String,
30
+ 'instanceId' => Fog::Nullable::String,
31
+ 'instanceOwnerId' => Fog::Nullable::String,
32
+ 'networkInterfaceId' => Fog::Nullable::String,
33
+ 'vpcPeeringConnectionId' => Fog::Nullable::String,
34
+ 'state' => String,
35
+ 'origin' => String
35
36
  }],
36
37
  'routeTableId' => String,
37
38
  'vpcId' => String,
@@ -1,10 +1,34 @@
1
1
  Shindo.tests('Fog::DNS[:aws] | change_resource_record_sets', ['aws', 'dns']) do
2
- @r53_connection = Fog::DNS[:aws]
3
-
4
2
  tests('success') do
5
3
  test('#elb_hosted_zone_mapping from DNS name') do
6
4
  zone_id = Fog::DNS::AWS.hosted_zone_for_alias_target('arbitrary-sub-domain.eu-west-1.elb.amazonaws.com')
7
5
  zone_id == Fog::DNS::AWS.elb_hosted_zone_mapping['eu-west-1']
8
6
  end
9
7
  end
8
+
9
+ tests("#change_resource_record_sets_data formats geolocation properly") do
10
+ change_batch = [{
11
+ :action=>"CREATE",
12
+ :name=>"ark.m.example.net.",
13
+ :resource_records=>["1.1.1.1"],
14
+ :ttl=>"300",
15
+ :type=>"A",
16
+ :set_identifier=>"ark",
17
+ :geo_location=>{"CountryCode"=>"US", "SubdivisionCode"=>"AR"},
18
+ }]
19
+
20
+ version = '2013-04-01'
21
+ result = Fog::DNS::AWS.change_resource_record_sets_data('zone_id123', change_batch, version)
22
+ doc = Nokogiri::XML(result)
23
+
24
+ returns("https://route53.amazonaws.com/doc/#{version}/") { doc.namespaces['xmlns'] }
25
+ returns(%w[US AR]) {
26
+ [
27
+ doc.css("GeoLocation CountryCode").text,
28
+ doc.css("GeoLocation SubdivisionCode").text
29
+ ]
30
+ }
31
+
32
+ result
33
+ end
10
34
  end