gooddata 2.1.11-java → 2.1.12-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2838607bcfd0d97d1e1a9b982922188d7f9b1fc0fe73cbf0e48cc997fc9cd11a
4
- data.tar.gz: 0f3a9cef21252b09c3b1eaa85f3605d533fae91e8ef8f6764eb4bbdd3286921f
3
+ metadata.gz: 73f3c8fde12dff87986355749de47fbdbbc334416d032f58c18d993fd5f28967
4
+ data.tar.gz: 73e045492e7cccc765c4ad60c08242c53efaf3a92f9c7adf56af9259a22c14e0
5
5
  SHA512:
6
- metadata.gz: dc28fac94310ffc503746aca0cd9a0c32f70b3d5c4e91d5b5da31011fb69b95d9137454bb85a96e84c643a5e0cdcd77d8e6d45716ac10c964a39573b87cca8b1
7
- data.tar.gz: 7d013700f2447a9553f553cabc43bd41787ee92f2f91e84c2eec628d14ecdeab6e5956b855a3bfefe44d775f4e848066abf872aa5e61f7b32a258a84387e98f4
6
+ metadata.gz: 1ab74bd538fecff1536d028993fc62cc287435eeb19163fc3db0b842effd3feca3b746113358c7bd3236c9a9ac15f30ac37361b944cadfad6cfa7161f8219f81
7
+ data.tar.gz: 18c48a23901266649fc9f4cf1db10d5f289ea4726f0cc5f63ceb919007f44f2dbbd6b9a37589bc87994c4104136237f49f0540ab7a4648506b5d5521f80c150b
@@ -1,4 +1,13 @@
1
1
  # GoodData Ruby SDK Changelog
2
+ ## 2.1.12
3
+ - FEATURE: MSF-17621 Apply patched version for activesupport to fix vulnerable issue
4
+ - CONFIG: SETI-4379 Add gdc-fossa configuration for gooddata-ruby
5
+ - CONFIG: MSF-17345 Set umask 0002 for lcm brick
6
+ - BUGFIX: TMA-1015 check case sensitivity for group name
7
+ - BUGFIX: MSF-17219 Fixed incorrect client used in domain.users
8
+ - BUGFIX: TMA-1022 Add more logs for case roll out without LDM change in master
9
+ - FEATURE: TMA-1640 Add alert for CPU limit hit
10
+
2
11
  ## 2.1.11
3
12
  - FEATURE: TMA-1647 Set VCR version to 5.0.0 due to incompatible license of the latest version
4
13
 
@@ -1 +1 @@
1
- 2.1.11
1
+ 2.1.12
data/VERSION CHANGED
@@ -1 +1 @@
1
- 3.7.18
1
+ 3.7.20
@@ -8,6 +8,9 @@ DEFAULT_BRICK = 'hello_world_brick'
8
8
  BRICK_PARAM_PREFIX = 'BRICK_PARAM_'
9
9
  HIDDEN_BRICK_PARAMS_PREFIX = 'HIDDEN_BRICK_PARAM_'
10
10
 
11
+ # MSF-17345 Set umask so files are group-writable
12
+ File.umask(0002)
13
+
11
14
  brick_type = !ARGV.empty? ? ARGV[0] : DEFAULT_BRICK
12
15
 
13
16
  def get_brick_params(prefix)
@@ -90,7 +90,11 @@ def clean_up!(client, force, days)
90
90
  delete_project_by_title(/LCM spec Client With Conflicting LDM Changes/, projects, days, force)
91
91
  delete_project_by_title(/LCM spec master project/, projects, days, force)
92
92
  delete_project_by_title(/users brick load test/, projects, days, force)
93
- delete_project_by_title(/#transfer_processes and #transfer_schedules test/, projects, days, force)
93
+ delete_project_by_title(/transfer_processes and #transfer_schedules test/, projects, days, force)
94
+ delete_project_by_title(/DailyUse Project for gooddata-ruby integration tests/, projects, days, force)
95
+ delete_project_by_title(/^New project$/, projects, days, force)
96
+ delete_project_by_title(/RubyGem Dev Week test/, projects, days, force)
97
+ delete_project_by_title(/My project from blueprint/, projects, days, force)
94
98
  delete_ads_by_title(/Development ADS/, client, days, force)
95
99
  delete_ads_by_title(/Production ADS/, client, days, force)
96
100
  delete_ads_by_title(/TEST ADS/, client, days, force)
@@ -112,7 +116,7 @@ dev_client = init_client(username, password, "https://#{config[:dev_server]}")
112
116
  prod_client = init_client(username, password, "https://#{config[:prod_server]}")
113
117
 
114
118
  force = options[:force]
115
- days = options[:days] || 14
119
+ days = options[:days] || 3
116
120
  clean_up!(dev_client, force, days)
117
121
  clean_up!(prod_client, force, days)
118
122
 
@@ -0,0 +1,2 @@
1
+ ---
2
+ fossa_project: "gooddata-ruby-lcm"
@@ -0,0 +1,4 @@
1
+ ---
2
+ fossa_project: "gooddata-ruby-sdk"
3
+ ignored_paths:
4
+ - 'ci/.*'
@@ -55,9 +55,9 @@ Gem::Specification.new do |s|
55
55
  s.add_development_dependency 'sqlite3' if RUBY_PLATFORM != 'java'
56
56
 
57
57
  if RUBY_VERSION >= '2.5'
58
- s.add_dependency 'activesupport', '> 4.2.9', '< 6.1'
58
+ s.add_dependency 'activesupport', '>= 6.0.3.1', '< 6.1'
59
59
  else
60
- s.add_dependency 'activesupport', '> 4.2.9', '< 6.0'
60
+ s.add_dependency 'activesupport', '>= 5.2.4.3', '< 6.0'
61
61
  end
62
62
 
63
63
  s.add_dependency 'aws-sdk-s3', '~> 1.16'
@@ -1,4 +1,4 @@
1
1
  apiVersion: v1
2
2
  name: lcm-bricks
3
3
  description: LCM Bricks
4
- version: 2.0.1
4
+ version: 2.0.3
@@ -20,7 +20,7 @@ data:
20
20
  expr: container_pod:lcm_pod_container_status_restarts:increase10m >= 1
21
21
  labels:
22
22
  severity: warning
23
- team: lcm # switch to msf in production
23
+ team: lcm
24
24
  cluster_id: {{ .Values.clusterId }}
25
25
  annotations:
26
26
  description: "There is more than 0 restarts of {{`{{ $labels.pod }}`}} pod in the last 10 minutes"
@@ -28,8 +28,8 @@ data:
28
28
  - alert: "[LCM] Pod has too many restarts on cluster={{ .Values.clusterId }}"
29
29
  expr: container_pod:lcm_pod_container_status_restarts:increase10m >= 2
30
30
  labels:
31
- severity: critical
32
- team: lcm # switch to msf in production
31
+ severity: warning
32
+ team: lcm
33
33
  cluster_id: {{ .Values.clusterId }}
34
34
  annotations:
35
35
  description: "There is more than 1 restart of {{`{{ $labels.pod }}`}} pod in the last 10 minutes"
@@ -40,7 +40,7 @@ data:
40
40
  expr: container_pod:lcm_pod_container_status_oomkilled:increase10m >= 1
41
41
  labels:
42
42
  severity: warning
43
- team: lcm # switch to msf in production
43
+ team: lcm
44
44
  cluster_id: {{ .Values.clusterId }}
45
45
  annotations:
46
46
  description: "{{`{{ $labels.pod }}`}} was OOMKilled in the last 30 minutes. Investigate and/or increase memoryRequest or memoryLimit."
@@ -48,8 +48,8 @@ data:
48
48
  - alert: "[LCM] OOMKill occured on cluster={{ .Values.clusterId }}"
49
49
  expr: container_pod:lcm_pod_container_status_oomkilled:increase10m >= 2
50
50
  labels:
51
- severity: critical
52
- team: lcm # switch to msf in production
51
+ severity: warning
52
+ team: lcm
53
53
  cluster_id: {{ .Values.clusterId }}
54
54
  annotations:
55
55
  description: "{{`{{ $labels.pod }}`}} was OOMKilled in the last 10 minutes. Investigate and/or increase memoryRequest or memoryLimit."
@@ -58,8 +58,8 @@ data:
58
58
  expr: rate(container_cpu_cfs_throttled_seconds_total{namespace='{{ .Release.Namespace }}'}[1m]) > 1
59
59
  for: 5m
60
60
  labels:
61
- severity: critical
62
- team: lcm # switch to msf in production
61
+ severity: warning
62
+ team: lcm
63
63
  cluster_id: {{ .Values.clusterId }}
64
64
  annotations:
65
65
  description: "{{`{{ $labels.pod_name }}`}} container is beeing throttled and probably hit CPU limit. Investigate root cause and increase limit and/or number of replicas if necessary."
@@ -68,8 +68,8 @@ data:
68
68
  expr: rate(jvm_gc_pause_seconds_sum{kubernetes_namespace='{{ .Release.Namespace }}'}[1m]) > 1
69
69
  for: 5m
70
70
  labels:
71
- severity: critical
72
- team: lcm # switch to msf in production
71
+ severity: warning
72
+ team: lcm
73
73
  cluster_id: {{ .Values.clusterId }}
74
74
  annotations:
75
75
  description: "{{`{{ $labels.kubernetes_pod_name }}`}} container is spending too much time in pause garbage collector. Investigate root cause and increase heap size and/or number of replicas if necessary."
@@ -77,9 +77,19 @@ data:
77
77
  - alert: "[LCM] there is more than 100 jobs on cluster={{ .Values.clusterId }}"
78
78
  expr: count(kube_job_info{namespace="lcm"}) > 100
79
79
  labels:
80
- severity: critical
81
- team: lcm # switch to msf in production
80
+ severity: warning
81
+ team: lcm
82
82
  cluster_id: {{ .Values.clusterId }}
83
83
  annotations:
84
84
  description: "There is more than 100 jobs in LCM namespace. They are likely not deleted."
85
85
  summary: "There is more than 100 jobs in LCM namespace."
86
+ - alert: "[LCM] Resource quotas hit CPU limit on cluster={{ .Values.clusterId }}"
87
+ expr: kube_resourcequota{namespace='{{ .Release.Namespace }}',resource="limits.cpu",type="hard"} - ignoring(type) kube_resourcequota{namespace='{{ .Release.Namespace }}',resource="limits.cpu",type="used"} == 0
88
+ labels:
89
+ severity: warning
90
+ team: lcm
91
+ cluster_id: {{ .Values.clusterId }}
92
+ annotations:
93
+ description: "We are hitting CPU limit in LCM namespace."
94
+ summary: "We are hitting CPU limit in LCM namespace."
95
+
@@ -89,6 +89,13 @@ module GoodData
89
89
  maql_diff_params << :excludeFactRule if exclude_fact_rule
90
90
  maql_diff_params << :includeDeprecated if include_deprecated
91
91
  maql_diff = previous_master.maql_diff(blueprint: blueprint, params: maql_diff_params)
92
+ chunks = maql_diff['projectModelDiff']['updateScripts']
93
+ if chunks.empty?
94
+ GoodData.logger.info "Synchronize LDM to clients will not proceed in mode \
95
+ '#{params[:synchronize_ldm].downcase}' due to no LDM changes in the new master project. \
96
+ If you had changed LDM of clients manually, please use mode 'diff_against_clients' \
97
+ to force synchronize LDM to clients"
98
+ end
92
99
  end
93
100
 
94
101
  segment_info[:to] = segment_info[:to].pmap do |entry|
@@ -241,7 +241,7 @@ module GoodData
241
241
 
242
242
  all_users
243
243
  else
244
- find_user_by_login(domain, id)
244
+ find_user_by_login(domain, id, opts)
245
245
  end
246
246
  end
247
247
 
@@ -1606,14 +1606,19 @@ module GoodData
1606
1606
  def import_users(new_users, options = {})
1607
1607
  role_list = roles
1608
1608
  users_list = users
1609
- new_users = new_users.map { |x| ((x.is_a?(Hash) && x[:user] && x[:user].to_hash.merge(role: x[:role])) || x.to_hash).tap { |u| u[:login].downcase! } }
1610
1609
 
1611
1610
  GoodData.logger.warn("Importing users to project (#{pid})")
1611
+ new_users = new_users.map { |x| ((x.is_a?(Hash) && x[:user] && x[:user].to_hash.merge(role: x[:role])) || x.to_hash).tap { |u| u[:login].downcase! } }
1612
+ # First check that if groups are provided we have them set up
1613
+ user_groups_cache, change_groups = check_groups(new_users.map(&:to_hash).flat_map { |u| u[:user_group] || [] }.uniq, options[:user_groups_cache], options)
1612
1614
 
1613
- whitelisted_new_users, whitelisted_users = whitelist_users(new_users.map(&:to_hash), users_list, options[:whitelists])
1615
+ unless change_groups.empty?
1616
+ new_users.each do |user|
1617
+ user[:user_group].map! { |e| change_groups[e].nil? ? e : change_groups[e] }
1618
+ end
1619
+ end
1614
1620
 
1615
- # First check that if groups are provided we have them set up
1616
- user_groups_cache = check_groups(new_users.map(&:to_hash).flat_map { |u| u[:user_group] || [] }.uniq, options[:user_groups_cache], options)
1621
+ whitelisted_new_users, whitelisted_users = whitelist_users(new_users.map(&:to_hash), users_list, options[:whitelists])
1617
1622
 
1618
1623
  # conform the role on list of new users so we can diff them with the users coming from the project
1619
1624
  diffable_new_with_default_role = whitelisted_new_users.map do |u|
@@ -1760,7 +1765,20 @@ module GoodData
1760
1765
  def check_groups(specified_groups, user_groups_cache = nil, options = {})
1761
1766
  current_user_groups = user_groups if user_groups_cache.nil? || user_groups_cache.empty?
1762
1767
  groups = current_user_groups.map(&:name)
1763
- missing_groups = specified_groups - groups
1768
+ missing_groups = []
1769
+ change_groups = {}
1770
+ specified_groups.each do |group|
1771
+ found_group = groups.find { |name| name.casecmp(group).zero? }
1772
+ if found_group.nil?
1773
+ missing_groups << group
1774
+ else
1775
+ # Change groups when they have similar group name with difference of case sensitivity
1776
+ if found_group != group
1777
+ change_groups[group] = found_group
1778
+ GoodData.logger.warn("Group with name #{group} is existed in project with name #{found_group}.")
1779
+ end
1780
+ end
1781
+ end
1764
1782
  if options[:create_non_existing_user_groups]
1765
1783
  missing_groups.each do |g|
1766
1784
  GoodData.logger.info("Creating group #{g}")
@@ -1773,7 +1791,7 @@ module GoodData
1773
1791
  "#{groups.join(',')} and you asked for #{missing_groups.join(',')}"
1774
1792
  end
1775
1793
  end
1776
- current_user_groups
1794
+ [current_user_groups, change_groups]
1777
1795
  end
1778
1796
 
1779
1797
  # Update user
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gooddata
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.11
4
+ version: 2.1.12
5
5
  platform: java
6
6
  authors:
7
7
  - Pavel Kolesnikov
@@ -14,7 +14,7 @@ authors:
14
14
  autorequire:
15
15
  bindir: bin
16
16
  cert_chain: []
17
- date: 2020-05-20 00:00:00.000000000 Z
17
+ date: 2020-06-22 00:00:00.000000000 Z
18
18
  dependencies:
19
19
  - !ruby/object:Gem::Dependency
20
20
  requirement: !ruby/object:Gem::Requirement
@@ -201,9 +201,9 @@ dependencies:
201
201
  - !ruby/object:Gem::Dependency
202
202
  requirement: !ruby/object:Gem::Requirement
203
203
  requirements:
204
- - - ">"
204
+ - - ">="
205
205
  - !ruby/object:Gem::Version
206
- version: 4.2.9
206
+ version: 5.2.4.3
207
207
  - - "<"
208
208
  - !ruby/object:Gem::Version
209
209
  version: '6.0'
@@ -212,9 +212,9 @@ dependencies:
212
212
  type: :runtime
213
213
  version_requirements: !ruby/object:Gem::Requirement
214
214
  requirements:
215
- - - ">"
215
+ - - ">="
216
216
  - !ruby/object:Gem::Version
217
- version: 4.2.9
217
+ version: 5.2.4.3
218
218
  - - "<"
219
219
  - !ruby/object:Gem::Version
220
220
  version: '6.0'
@@ -570,6 +570,8 @@ files:
570
570
  - dev-gooddata-sso.pub.encrypted
571
571
  - docker-compose.lcm.yml
572
572
  - docker-compose.yml
573
+ - gdc_fossa_lcm.yaml
574
+ - gdc_fossa_ruby_sdk.yaml
573
575
  - gooddata
574
576
  - gooddata.gemspec
575
577
  - k8s/charts/lcm-bricks/Chart.yaml