gooddata 2.1.11 → 2.1.12

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3a685dc668e6b4f2143b8abeaa5264d0576144f88f7fc4f3be2b647512efc852
4
- data.tar.gz: 3fd65d6bc019487c9771f932e650ffc67a5b7366db17c09bf15ed1a3543bd2c6
3
+ metadata.gz: 21bb1e8b1a4eacb96fbf7aa25e22b54927a881fe3e04648f886b3ba132110c18
4
+ data.tar.gz: 797c83b8e0c76d07b4ca976023ff58cc82a1fa1bc3e63ad68fee294e24f9ddde
5
5
  SHA512:
6
- metadata.gz: 180103d93ac47c89a24a57119fb64a9fa755d9e97cf01cf3e3f6f998f270024dd8755520b4b03e973f8543c97fb26f3191a69f5448b9b616b60fff6bcd604743
7
- data.tar.gz: 65dadc4b41397a1dad8cda4d6c92dc3d027d29fa161f81d7a16d2fc194f88cdb5471daa2a3f8499d50c5f65ea3acac5029cfac7d3d3444280990cc206d12adaa
6
+ metadata.gz: bb2d421d223db058c231ec0d66b3325a97237a5dc050305e17bc27dcab78694c8237241fbca2dab60d45a8257cd5b8bfb875c2db532e38fa075cfc02cfb57b26
7
+ data.tar.gz: 7ef77abd3d61fe54238b25b00a8d4ea05852fbc3474ae64aa54a770b739f3d64a5cc847e42cc993ac232423204ac9a354a4401882663dd8723a02f95fb1e91ba
@@ -1,4 +1,13 @@
1
1
  # GoodData Ruby SDK Changelog
2
+ ## 2.1.12
3
+ - FEATURE: MSF-17621 Apply patched version for activesupport to fix vulnerable issue
4
+ - CONFIG: SETI-4379 Add gdc-fossa configuration for gooddata-ruby
5
+ - CONFIG: MSF-17345 Set umask 0002 for lcm brick
6
+ - BUGFIX: TMA-1015 check case sensitivity for group name
7
+ - BUGFIX: MSF-17219 Fixed incorrect client used in domain.users
8
+ - BUGFIX: TMA-1022 Add more logs for case roll out without LDM change in master
9
+ - FEATURE: TMA-1640 Add alert for CPU limit hit
10
+
2
11
  ## 2.1.11
3
12
  - FEATURE: TMA-1647 Set VCR version to 5.0.0 due to incompatible license of the latest version
4
13
 
@@ -1 +1 @@
1
- 2.1.11
1
+ 2.1.12
data/VERSION CHANGED
@@ -1 +1 @@
1
- 3.7.18
1
+ 3.7.20
@@ -8,6 +8,9 @@ DEFAULT_BRICK = 'hello_world_brick'
8
8
  BRICK_PARAM_PREFIX = 'BRICK_PARAM_'
9
9
  HIDDEN_BRICK_PARAMS_PREFIX = 'HIDDEN_BRICK_PARAM_'
10
10
 
11
+ # MSF-17345 Set umask so files are group-writable
12
+ File.umask(0002)
13
+
11
14
  brick_type = !ARGV.empty? ? ARGV[0] : DEFAULT_BRICK
12
15
 
13
16
  def get_brick_params(prefix)
@@ -90,7 +90,11 @@ def clean_up!(client, force, days)
90
90
  delete_project_by_title(/LCM spec Client With Conflicting LDM Changes/, projects, days, force)
91
91
  delete_project_by_title(/LCM spec master project/, projects, days, force)
92
92
  delete_project_by_title(/users brick load test/, projects, days, force)
93
- delete_project_by_title(/#transfer_processes and #transfer_schedules test/, projects, days, force)
93
+ delete_project_by_title(/transfer_processes and #transfer_schedules test/, projects, days, force)
94
+ delete_project_by_title(/DailyUse Project for gooddata-ruby integration tests/, projects, days, force)
95
+ delete_project_by_title(/^New project$/, projects, days, force)
96
+ delete_project_by_title(/RubyGem Dev Week test/, projects, days, force)
97
+ delete_project_by_title(/My project from blueprint/, projects, days, force)
94
98
  delete_ads_by_title(/Development ADS/, client, days, force)
95
99
  delete_ads_by_title(/Production ADS/, client, days, force)
96
100
  delete_ads_by_title(/TEST ADS/, client, days, force)
@@ -112,7 +116,7 @@ dev_client = init_client(username, password, "https://#{config[:dev_server]}")
112
116
  prod_client = init_client(username, password, "https://#{config[:prod_server]}")
113
117
 
114
118
  force = options[:force]
115
- days = options[:days] || 14
119
+ days = options[:days] || 3
116
120
  clean_up!(dev_client, force, days)
117
121
  clean_up!(prod_client, force, days)
118
122
 
@@ -0,0 +1,2 @@
1
+ ---
2
+ fossa_project: "gooddata-ruby-lcm"
@@ -0,0 +1,4 @@
1
+ ---
2
+ fossa_project: "gooddata-ruby-sdk"
3
+ ignored_paths:
4
+ - 'ci/.*'
@@ -55,9 +55,9 @@ Gem::Specification.new do |s|
55
55
  s.add_development_dependency 'sqlite3' if RUBY_PLATFORM != 'java'
56
56
 
57
57
  if RUBY_VERSION >= '2.5'
58
- s.add_dependency 'activesupport', '> 4.2.9', '< 6.1'
58
+ s.add_dependency 'activesupport', '>= 6.0.3.1', '< 6.1'
59
59
  else
60
- s.add_dependency 'activesupport', '> 4.2.9', '< 6.0'
60
+ s.add_dependency 'activesupport', '>= 5.2.4.3', '< 6.0'
61
61
  end
62
62
 
63
63
  s.add_dependency 'aws-sdk-s3', '~> 1.16'
@@ -1,4 +1,4 @@
1
1
  apiVersion: v1
2
2
  name: lcm-bricks
3
3
  description: LCM Bricks
4
- version: 2.0.1
4
+ version: 2.0.3
@@ -20,7 +20,7 @@ data:
20
20
  expr: container_pod:lcm_pod_container_status_restarts:increase10m >= 1
21
21
  labels:
22
22
  severity: warning
23
- team: lcm # switch to msf in production
23
+ team: lcm
24
24
  cluster_id: {{ .Values.clusterId }}
25
25
  annotations:
26
26
  description: "There is more than 0 restarts of {{`{{ $labels.pod }}`}} pod in the last 10 minutes"
@@ -28,8 +28,8 @@ data:
28
28
  - alert: "[LCM] Pod has too many restarts on cluster={{ .Values.clusterId }}"
29
29
  expr: container_pod:lcm_pod_container_status_restarts:increase10m >= 2
30
30
  labels:
31
- severity: critical
32
- team: lcm # switch to msf in production
31
+ severity: warning
32
+ team: lcm
33
33
  cluster_id: {{ .Values.clusterId }}
34
34
  annotations:
35
35
  description: "There is more than 1 restart of {{`{{ $labels.pod }}`}} pod in the last 10 minutes"
@@ -40,7 +40,7 @@ data:
40
40
  expr: container_pod:lcm_pod_container_status_oomkilled:increase10m >= 1
41
41
  labels:
42
42
  severity: warning
43
- team: lcm # switch to msf in production
43
+ team: lcm
44
44
  cluster_id: {{ .Values.clusterId }}
45
45
  annotations:
46
46
  description: "{{`{{ $labels.pod }}`}} was OOMKilled in the last 30 minutes. Investigate and/or increase memoryRequest or memoryLimit."
@@ -48,8 +48,8 @@ data:
48
48
  - alert: "[LCM] OOMKill occured on cluster={{ .Values.clusterId }}"
49
49
  expr: container_pod:lcm_pod_container_status_oomkilled:increase10m >= 2
50
50
  labels:
51
- severity: critical
52
- team: lcm # switch to msf in production
51
+ severity: warning
52
+ team: lcm
53
53
  cluster_id: {{ .Values.clusterId }}
54
54
  annotations:
55
55
  description: "{{`{{ $labels.pod }}`}} was OOMKilled in the last 10 minutes. Investigate and/or increase memoryRequest or memoryLimit."
@@ -58,8 +58,8 @@ data:
58
58
  expr: rate(container_cpu_cfs_throttled_seconds_total{namespace='{{ .Release.Namespace }}'}[1m]) > 1
59
59
  for: 5m
60
60
  labels:
61
- severity: critical
62
- team: lcm # switch to msf in production
61
+ severity: warning
62
+ team: lcm
63
63
  cluster_id: {{ .Values.clusterId }}
64
64
  annotations:
65
65
  description: "{{`{{ $labels.pod_name }}`}} container is beeing throttled and probably hit CPU limit. Investigate root cause and increase limit and/or number of replicas if necessary."
@@ -68,8 +68,8 @@ data:
68
68
  expr: rate(jvm_gc_pause_seconds_sum{kubernetes_namespace='{{ .Release.Namespace }}'}[1m]) > 1
69
69
  for: 5m
70
70
  labels:
71
- severity: critical
72
- team: lcm # switch to msf in production
71
+ severity: warning
72
+ team: lcm
73
73
  cluster_id: {{ .Values.clusterId }}
74
74
  annotations:
75
75
  description: "{{`{{ $labels.kubernetes_pod_name }}`}} container is spending too much time in pause garbage collector. Investigate root cause and increase heap size and/or number of replicas if necessary."
@@ -77,9 +77,19 @@ data:
77
77
  - alert: "[LCM] there is more than 100 jobs on cluster={{ .Values.clusterId }}"
78
78
  expr: count(kube_job_info{namespace="lcm"}) > 100
79
79
  labels:
80
- severity: critical
81
- team: lcm # switch to msf in production
80
+ severity: warning
81
+ team: lcm
82
82
  cluster_id: {{ .Values.clusterId }}
83
83
  annotations:
84
84
  description: "There is more than 100 jobs in LCM namespace. They are likely not deleted."
85
85
  summary: "There is more than 100 jobs in LCM namespace."
86
+ - alert: "[LCM] Resource quotas hit CPU limit on cluster={{ .Values.clusterId }}"
87
+ expr: kube_resourcequota{namespace='{{ .Release.Namespace }}',resource="limits.cpu",type="hard"} - ignoring(type) kube_resourcequota{namespace='{{ .Release.Namespace }}',resource="limits.cpu",type="used"} == 0
88
+ labels:
89
+ severity: warning
90
+ team: lcm
91
+ cluster_id: {{ .Values.clusterId }}
92
+ annotations:
93
+ description: "We are hitting CPU limit in LCM namespace."
94
+ summary: "We are hitting CPU limit in LCM namespace."
95
+
@@ -89,6 +89,13 @@ module GoodData
89
89
  maql_diff_params << :excludeFactRule if exclude_fact_rule
90
90
  maql_diff_params << :includeDeprecated if include_deprecated
91
91
  maql_diff = previous_master.maql_diff(blueprint: blueprint, params: maql_diff_params)
92
+ chunks = maql_diff['projectModelDiff']['updateScripts']
93
+ if chunks.empty?
94
+ GoodData.logger.info "Synchronize LDM to clients will not proceed in mode \
95
+ '#{params[:synchronize_ldm].downcase}' due to no LDM changes in the new master project. \
96
+ If you had changed LDM of clients manually, please use mode 'diff_against_clients' \
97
+ to force synchronize LDM to clients"
98
+ end
92
99
  end
93
100
 
94
101
  segment_info[:to] = segment_info[:to].pmap do |entry|
@@ -241,7 +241,7 @@ module GoodData
241
241
 
242
242
  all_users
243
243
  else
244
- find_user_by_login(domain, id)
244
+ find_user_by_login(domain, id, opts)
245
245
  end
246
246
  end
247
247
 
@@ -1606,14 +1606,19 @@ module GoodData
1606
1606
  def import_users(new_users, options = {})
1607
1607
  role_list = roles
1608
1608
  users_list = users
1609
- new_users = new_users.map { |x| ((x.is_a?(Hash) && x[:user] && x[:user].to_hash.merge(role: x[:role])) || x.to_hash).tap { |u| u[:login].downcase! } }
1610
1609
 
1611
1610
  GoodData.logger.warn("Importing users to project (#{pid})")
1611
+ new_users = new_users.map { |x| ((x.is_a?(Hash) && x[:user] && x[:user].to_hash.merge(role: x[:role])) || x.to_hash).tap { |u| u[:login].downcase! } }
1612
+ # First check that if groups are provided we have them set up
1613
+ user_groups_cache, change_groups = check_groups(new_users.map(&:to_hash).flat_map { |u| u[:user_group] || [] }.uniq, options[:user_groups_cache], options)
1612
1614
 
1613
- whitelisted_new_users, whitelisted_users = whitelist_users(new_users.map(&:to_hash), users_list, options[:whitelists])
1615
+ unless change_groups.empty?
1616
+ new_users.each do |user|
1617
+ user[:user_group].map! { |e| change_groups[e].nil? ? e : change_groups[e] }
1618
+ end
1619
+ end
1614
1620
 
1615
- # First check that if groups are provided we have them set up
1616
- user_groups_cache = check_groups(new_users.map(&:to_hash).flat_map { |u| u[:user_group] || [] }.uniq, options[:user_groups_cache], options)
1621
+ whitelisted_new_users, whitelisted_users = whitelist_users(new_users.map(&:to_hash), users_list, options[:whitelists])
1617
1622
 
1618
1623
  # conform the role on list of new users so we can diff them with the users coming from the project
1619
1624
  diffable_new_with_default_role = whitelisted_new_users.map do |u|
@@ -1760,7 +1765,20 @@ module GoodData
1760
1765
  def check_groups(specified_groups, user_groups_cache = nil, options = {})
1761
1766
  current_user_groups = user_groups if user_groups_cache.nil? || user_groups_cache.empty?
1762
1767
  groups = current_user_groups.map(&:name)
1763
- missing_groups = specified_groups - groups
1768
+ missing_groups = []
1769
+ change_groups = {}
1770
+ specified_groups.each do |group|
1771
+ found_group = groups.find { |name| name.casecmp(group).zero? }
1772
+ if found_group.nil?
1773
+ missing_groups << group
1774
+ else
1775
+ # Change groups when they have similar group name with difference of case sensitivity
1776
+ if found_group != group
1777
+ change_groups[group] = found_group
1778
+ GoodData.logger.warn("Group with name #{group} is existed in project with name #{found_group}.")
1779
+ end
1780
+ end
1781
+ end
1764
1782
  if options[:create_non_existing_user_groups]
1765
1783
  missing_groups.each do |g|
1766
1784
  GoodData.logger.info("Creating group #{g}")
@@ -1773,7 +1791,7 @@ module GoodData
1773
1791
  "#{groups.join(',')} and you asked for #{missing_groups.join(',')}"
1774
1792
  end
1775
1793
  end
1776
- current_user_groups
1794
+ [current_user_groups, change_groups]
1777
1795
  end
1778
1796
 
1779
1797
  # Update user
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gooddata
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.11
4
+ version: 2.1.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Pavel Kolesnikov
@@ -14,7 +14,7 @@ authors:
14
14
  autorequire:
15
15
  bindir: bin
16
16
  cert_chain: []
17
- date: 2020-05-20 00:00:00.000000000 Z
17
+ date: 2020-06-22 00:00:00.000000000 Z
18
18
  dependencies:
19
19
  - !ruby/object:Gem::Dependency
20
20
  name: license_finder
@@ -286,9 +286,9 @@ dependencies:
286
286
  name: activesupport
287
287
  requirement: !ruby/object:Gem::Requirement
288
288
  requirements:
289
- - - ">"
289
+ - - ">="
290
290
  - !ruby/object:Gem::Version
291
- version: 4.2.9
291
+ version: 5.2.4.3
292
292
  - - "<"
293
293
  - !ruby/object:Gem::Version
294
294
  version: '6.0'
@@ -296,9 +296,9 @@ dependencies:
296
296
  prerelease: false
297
297
  version_requirements: !ruby/object:Gem::Requirement
298
298
  requirements:
299
- - - ">"
299
+ - - ">="
300
300
  - !ruby/object:Gem::Version
301
- version: 4.2.9
301
+ version: 5.2.4.3
302
302
  - - "<"
303
303
  - !ruby/object:Gem::Version
304
304
  version: '6.0'
@@ -640,6 +640,8 @@ files:
640
640
  - dev-gooddata-sso.pub.encrypted
641
641
  - docker-compose.lcm.yml
642
642
  - docker-compose.yml
643
+ - gdc_fossa_lcm.yaml
644
+ - gdc_fossa_ruby_sdk.yaml
643
645
  - gooddata
644
646
  - gooddata.gemspec
645
647
  - k8s/charts/lcm-bricks/Chart.yaml