instana 1.215.1 → 1.216.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +83 -3
- data/.tekton/.currency/resources/requirements.txt +0 -1
- data/.tekton/.currency/scripts/generate_report.py +37 -130
- data/.tekton/README.md +278 -0
- data/.tekton/github-pr-eventlistener.yaml +1 -1
- data/.tekton/pipeline.yaml +96 -2
- data/.tekton/pipelinerun.yaml +2 -2
- data/.tekton/ruby-tracer-prepuller.yaml +4 -0
- data/.tekton/run_unittests.sh +27 -6
- data/.tekton/scheduled-eventlistener.yaml +1 -1
- data/.tekton/task.yaml +72 -0
- data/gemfiles/rails_61.gemfile +1 -0
- data/gemfiles/rails_70.gemfile +1 -0
- data/gemfiles/rails_71.gemfile +1 -0
- data/gemfiles/sequel_56.gemfile +16 -0
- data/gemfiles/sequel_57.gemfile +16 -0
- data/gemfiles/sequel_58.gemfile +16 -0
- data/lib/instana/activators/sequel.rb +20 -0
- data/lib/instana/config.rb +1 -0
- data/lib/instana/instrumentation/sequel.rb +42 -0
- data/lib/instana/tracing/span.rb +2 -2
- data/lib/instana/version.rb +1 -1
- data/test/activator_test.rb +1 -1
- data/test/instrumentation/rails_active_record_test.rb +1 -1
- data/test/instrumentation/sequel_test.rb +105 -0
- metadata +11 -8
- data/.tekton/.currency/currency-pipeline.yaml +0 -36
- data/.tekton/.currency/currency-pipelinerun.yaml +0 -20
- data/.tekton/.currency/currency-rbac.yaml +0 -29
- data/.tekton/.currency/currency-scheduled-eventlistener.yaml +0 -56
- data/.tekton/.currency/currency-tasks.yaml +0 -94
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d537c1bb144d87403ff4f77159269e31e8af893fd31f6c942d5d6c00a2f89a4a
|
4
|
+
data.tar.gz: 81d978a21bd5bd84c1305851a34570a20c69dd794ccaef7081490db78037c5d4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 423d36b04c2e7b6035b566d0bd9617dfd9c2d7700e8d0c8ae934ce2631703cdc4e875ac73a64b136aed2f511a07bb5cfdd257c881a293a2cb85b66bc1d347bd9
|
7
|
+
data.tar.gz: ef472837810e93803daef7776c0b0e664093ddb72cb44f149be2798c9f8ddc0d26eeefe25a9d01a5114e42fb578d050ddc75e03470d1735d1d34eecd5226baa5
|
data/.circleci/config.yml
CHANGED
@@ -183,6 +183,43 @@ executors:
|
|
183
183
|
environment:
|
184
184
|
POSTGRES_PASSWORD: 'test'
|
185
185
|
POSTGRES_DB: 'ci_test'
|
186
|
+
ruby_34:
|
187
|
+
docker:
|
188
|
+
- image: ruby:3.4.0-preview1-bookworm
|
189
|
+
environment:
|
190
|
+
MEMCACHED_HOST: '127.0.0.1:11211'
|
191
|
+
REDIS_URL: 'redis://127.0.0.1:6379'
|
192
|
+
DATABASE_URL: 'sqlite3::memory:'
|
193
|
+
- image: memcached
|
194
|
+
- image: redis
|
195
|
+
- image: amazon/dynamodb-local
|
196
|
+
- image: minio/minio:latest
|
197
|
+
command: ["server", "/data"]
|
198
|
+
- image: s12v/sns
|
199
|
+
- image: softwaremill/elasticmq-native
|
200
|
+
- image: mongo:5-focal
|
201
|
+
ruby_34_mysql2:
|
202
|
+
docker:
|
203
|
+
- image: ruby:3.4.0-preview1-bookworm
|
204
|
+
environment:
|
205
|
+
DATABASE_URL: "mysql2://root@127.0.0.1:3306/ci_test"
|
206
|
+
- image: mariadb
|
207
|
+
environment:
|
208
|
+
MYSQL_DATABASE: 'ci_test'
|
209
|
+
MYSQL_USER: 'root'
|
210
|
+
MYSQL_PASSWORD: ''
|
211
|
+
MYSQL_ALLOW_EMPTY_PASSWORD: 'yes'
|
212
|
+
MYSQL_ROOT_PASSWORD: ''
|
213
|
+
MYSQL_ROOT_HOST: '%'
|
214
|
+
ruby_34_postgres:
|
215
|
+
docker:
|
216
|
+
- image: ruby:3.4.0-preview1-bookworm
|
217
|
+
environment:
|
218
|
+
DATABASE_URL: "postgres://postgres:test@127.0.0.1:5432/ci_test"
|
219
|
+
- image: postgres
|
220
|
+
environment:
|
221
|
+
POSTGRES_PASSWORD: 'test'
|
222
|
+
POSTGRES_DB: 'ci_test'
|
186
223
|
ruby_33_lint:
|
187
224
|
docker:
|
188
225
|
- image: cimg/ruby:3.3-node
|
@@ -319,12 +356,12 @@ jobs:
|
|
319
356
|
bundle check || bundle install
|
320
357
|
- run_rubocop
|
321
358
|
report_coverage:
|
322
|
-
executor:
|
359
|
+
executor: ruby_33
|
323
360
|
steps:
|
324
361
|
- checkout
|
325
362
|
- run_sonarqube
|
326
363
|
publish:
|
327
|
-
executor:
|
364
|
+
executor: ruby_33
|
328
365
|
steps:
|
329
366
|
- checkout
|
330
367
|
- setup
|
@@ -350,7 +387,8 @@ workflows:
|
|
350
387
|
- ruby_31
|
351
388
|
- ruby_32
|
352
389
|
- ruby_33
|
353
|
-
|
390
|
+
- ruby_34
|
391
|
+
libraries_ruby_30_32:
|
354
392
|
jobs:
|
355
393
|
- test_apprisal:
|
356
394
|
matrix:
|
@@ -359,8 +397,26 @@ workflows:
|
|
359
397
|
- ruby_30
|
360
398
|
- ruby_31
|
361
399
|
- ruby_32
|
400
|
+
<<: *library_gemfile
|
401
|
+
libraries_ruby_33_34:
|
402
|
+
jobs:
|
403
|
+
- test_apprisal:
|
404
|
+
matrix:
|
405
|
+
parameters:
|
406
|
+
stack:
|
362
407
|
- ruby_33
|
408
|
+
- ruby_34
|
363
409
|
<<: *library_gemfile
|
410
|
+
# Currently compiling the native extensions for `grpc`
|
411
|
+
# exceeds an internal RAM and/or CPU limit of CircleCI executor,
|
412
|
+
# and the job gets terminated with:
|
413
|
+
# `Received "killed" signal`
|
414
|
+
# Hence we exclude it here for the time being.
|
415
|
+
# TODO: Remove this exclusion as soon as pre-built binaries are available
|
416
|
+
# on https://rubygems.org/gems/grpc/versions
|
417
|
+
exclude:
|
418
|
+
- stack: ruby_34
|
419
|
+
gemfile: "./gemfiles/grpc_10.gemfile"
|
364
420
|
rails_ruby_3:
|
365
421
|
jobs:
|
366
422
|
- test_apprisal:
|
@@ -376,12 +432,36 @@ workflows:
|
|
376
432
|
- ruby_32
|
377
433
|
- ruby_32_postgres
|
378
434
|
- ruby_32_mysql2
|
435
|
+
- ruby_33
|
379
436
|
- ruby_33_postgres
|
380
437
|
- ruby_33_mysql2
|
438
|
+
- ruby_34
|
439
|
+
- ruby_34_postgres
|
440
|
+
- ruby_34_mysql2
|
381
441
|
gemfile:
|
382
442
|
- "./gemfiles/rails_61.gemfile"
|
383
443
|
- "./gemfiles/rails_70.gemfile"
|
384
444
|
- "./gemfiles/rails_71.gemfile"
|
445
|
+
sequel:
|
446
|
+
jobs:
|
447
|
+
- test_apprisal:
|
448
|
+
matrix:
|
449
|
+
parameters:
|
450
|
+
stack:
|
451
|
+
- ruby_30
|
452
|
+
- ruby_30_mysql2
|
453
|
+
- ruby_31
|
454
|
+
- ruby_31_mysql2
|
455
|
+
- ruby_32
|
456
|
+
- ruby_32_mysql2
|
457
|
+
- ruby_33
|
458
|
+
- ruby_33_mysql2
|
459
|
+
- ruby_34
|
460
|
+
- ruby_34_mysql2
|
461
|
+
gemfile:
|
462
|
+
- "./gemfiles/sequel_56.gemfile"
|
463
|
+
- "./gemfiles/sequel_57.gemfile"
|
464
|
+
- "./gemfiles/sequel_58.gemfile"
|
385
465
|
report_coverage:
|
386
466
|
jobs:
|
387
467
|
- report_coverage
|
@@ -1,4 +1,5 @@
|
|
1
1
|
# Standard Libraries
|
2
|
+
import glob
|
2
3
|
import re
|
3
4
|
from json import load
|
4
5
|
|
@@ -6,143 +7,49 @@ from json import load
|
|
6
7
|
from requests import get
|
7
8
|
from pandas import DataFrame
|
8
9
|
from bs4 import BeautifulSoup
|
9
|
-
from kubernetes import client, config
|
10
10
|
|
11
11
|
JSON_FILE = "resources/table.json"
|
12
12
|
REPORT_FILE = "docs/report.md"
|
13
13
|
API_V1_ENDPOINT = "https://rubygems.org/api/v1/versions/"
|
14
14
|
|
15
|
-
def
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
taskruns = tektonV1.list_namespaced_custom_object(
|
32
|
-
group,
|
33
|
-
version,
|
34
|
-
namespace,
|
35
|
-
plural,
|
36
|
-
label_selector=f"{group}/task={task_name}, triggers.tekton.dev/trigger=ruby-tracer-scheduled-pipeline-triggger",
|
37
|
-
)["items"]
|
38
|
-
|
39
|
-
return taskruns
|
40
|
-
|
41
|
-
|
42
|
-
def process_taskrun_logs(
|
43
|
-
taskruns, core_v1_client, namespace, library, tekton_ci_output
|
44
|
-
):
|
45
|
-
for tr in taskruns:
|
46
|
-
pod_name = tr["status"]["podName"]
|
47
|
-
taskrun_name = tr["metadata"]["name"]
|
48
|
-
logs = core_v1_client.read_namespaced_pod_log(
|
49
|
-
pod_name, namespace, container="step-unittest"
|
50
|
-
)
|
51
|
-
if "Installing" not in logs:
|
52
|
-
print(
|
53
|
-
f"Unable to retrieve logs from taskrun pod {pod_name} of taskrun {taskrun_name} for gem {library}."
|
54
|
-
)
|
55
|
-
continue
|
56
|
-
|
57
|
-
print(
|
58
|
-
f"Retrieving logs from taskrun pod {pod_name} of taskrun {taskrun_name} for gem {library}.."
|
59
|
-
)
|
60
|
-
|
61
|
-
match = re.search(f"Installing ({library} [^\s]+)", logs)
|
62
|
-
tekton_ci_output += f"{match[1]}\n"
|
63
|
-
break
|
64
|
-
|
65
|
-
return tekton_ci_output
|
66
|
-
|
67
|
-
|
68
|
-
def get_tekton_ci_output():
|
69
|
-
config.load_incluster_config()
|
70
|
-
|
71
|
-
namespace = "default"
|
72
|
-
core_v1_client = client.CoreV1Api()
|
73
|
-
|
74
|
-
ruby_33_prefix = "unittest-default-ruby-33-"
|
75
|
-
ruby_31_prefix = "unittest-default-ruby-31-"
|
76
|
-
|
77
|
-
default_libraries_dict = {
|
78
|
-
"cuba": f"{ruby_33_prefix}1",
|
79
|
-
"excon": f"{ruby_33_prefix}4",
|
80
|
-
"graphql": f"{ruby_33_prefix}6",
|
81
|
-
"grpc": f"{ruby_33_prefix}7",
|
82
|
-
"rack": f"{ruby_33_prefix}10",
|
83
|
-
"rest-client": f"{ruby_33_prefix}11",
|
84
|
-
"roda": f"{ruby_33_prefix}13",
|
85
|
-
"sinatra": f"{ruby_33_prefix}16",
|
86
|
-
"net-http": f"{ruby_31_prefix}8",
|
15
|
+
def get_bundle_install_output():
|
16
|
+
|
17
|
+
libraries_from_logs = {
|
18
|
+
"cuba": "cuba_40_ruby_3.3.",
|
19
|
+
"excon": "excon_100_ruby_3.3.",
|
20
|
+
"graphql": "graphql_20_ruby_3.3.",
|
21
|
+
"grpc": "grpc_10_ruby_3.3.",
|
22
|
+
"rack": "rack_30_ruby_3.3.",
|
23
|
+
"rest-client": "rest_client_20_ruby_3.3.",
|
24
|
+
"roda": "roda_30_ruby_3.3.",
|
25
|
+
"sinatra": "sinatra_40_ruby_3.3.",
|
26
|
+
"net-http": "net_http_01_ruby_3.1.",
|
27
|
+
"rails": "rails_71_sqlite3_ruby_3.3.",
|
28
|
+
"dalli": "dalli_32_ruby_3.3.",
|
29
|
+
"resque": "resque_20_ruby_3.3.",
|
30
|
+
"sidekiq": "sidekiq_70_ruby_3.3."
|
87
31
|
}
|
88
32
|
|
89
|
-
|
90
|
-
task_name = "ruby-tracer-unittest-default-libraries-task"
|
91
|
-
default_taskruns = get_taskruns(namespace, task_name)
|
33
|
+
bundle_install_output = ""
|
92
34
|
|
93
|
-
for library, pattern in
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
filtered_default_taskruns = filter_taskruns(taskrun_filter, default_taskruns)
|
99
|
-
|
100
|
-
tekton_ci_output = process_taskrun_logs(
|
101
|
-
filtered_default_taskruns,
|
102
|
-
core_v1_client,
|
103
|
-
namespace,
|
104
|
-
library,
|
105
|
-
tekton_ci_output,
|
106
|
-
)
|
35
|
+
for library, pattern in libraries_from_logs.items():
|
36
|
+
glob_result = glob.glob(f"../../dep_{pattern}*")
|
37
|
+
if not glob_result:
|
38
|
+
print(f"Could not find bundle install log for gem '{library}'.")
|
39
|
+
continue
|
107
40
|
|
108
|
-
|
109
|
-
|
110
|
-
"pattern": "rails-postgres-11",
|
111
|
-
"task_name": "ruby-tracer-unittest-rails-postgres-task",
|
112
|
-
},
|
113
|
-
"dalli": {
|
114
|
-
"pattern": "memcached-11",
|
115
|
-
"task_name": "ruby-tracer-unittest-memcached-libraries-task",
|
116
|
-
},
|
117
|
-
"resque": {
|
118
|
-
"pattern": "unittest-redis-ruby-32-33-9",
|
119
|
-
"task_name": "ruby-tracer-unittest-redis-libraries-task",
|
120
|
-
},
|
121
|
-
"sidekiq": {
|
122
|
-
"pattern": "unittest-redis-ruby-32-33-18",
|
123
|
-
"task_name": "ruby-tracer-unittest-redis-libraries-task",
|
124
|
-
},
|
125
|
-
}
|
41
|
+
with open(glob_result[0], 'r') as file:
|
42
|
+
logs = file.read().replace('\n', ' ')
|
126
43
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
other_taskruns = get_taskruns(namespace, task_name)
|
135
|
-
filtered_other_taskruns = filter_taskruns(taskrun_filter, other_taskruns)
|
136
|
-
|
137
|
-
tekton_ci_output = process_taskrun_logs(
|
138
|
-
filtered_other_taskruns,
|
139
|
-
core_v1_client,
|
140
|
-
namespace,
|
141
|
-
library,
|
142
|
-
tekton_ci_output
|
143
|
-
)
|
44
|
+
if "Installing" not in logs:
|
45
|
+
print( f"Unable to retrieve logs from for gem '{library}'.")
|
46
|
+
continue
|
47
|
+
|
48
|
+
print(f"Retrieving currency for gem '{library}'.")
|
49
|
+
match = re.search(f"Installing ({library} [^\s]+)", logs)
|
50
|
+
bundle_install_output += f"{match[1]}\n"
|
144
51
|
|
145
|
-
return
|
52
|
+
return bundle_install_output
|
146
53
|
|
147
54
|
|
148
55
|
def get_upstream_version(dependency):
|
@@ -161,11 +68,11 @@ def get_upstream_version(dependency):
|
|
161
68
|
return latest_version
|
162
69
|
|
163
70
|
|
164
|
-
def get_last_supported_version(
|
71
|
+
def get_last_supported_version(bundle_install_output, dependency):
|
165
72
|
"""get up-to-date supported version"""
|
166
73
|
pattern = r" ([^\s]+)"
|
167
74
|
|
168
|
-
last_supported_version = re.search(dependency + pattern,
|
75
|
+
last_supported_version = re.search(dependency + pattern, bundle_install_output, flags=re.I | re.M)
|
169
76
|
|
170
77
|
return last_supported_version[1]
|
171
78
|
|
@@ -183,7 +90,7 @@ def main():
|
|
183
90
|
with open(JSON_FILE) as file:
|
184
91
|
data = load(file)
|
185
92
|
|
186
|
-
|
93
|
+
bundle_install_output = get_bundle_install_output()
|
187
94
|
|
188
95
|
items = data["table"]
|
189
96
|
|
@@ -194,7 +101,7 @@ def main():
|
|
194
101
|
latest_version = get_upstream_version(package)
|
195
102
|
|
196
103
|
if not package in ["rails lts", "rails-api"]:
|
197
|
-
last_supported_version = get_last_supported_version(
|
104
|
+
last_supported_version = get_last_supported_version(bundle_install_output, package)
|
198
105
|
else:
|
199
106
|
last_supported_version = latest_version
|
200
107
|
|
data/.tekton/README.md
ADDED
@@ -0,0 +1,278 @@
|
|
1
|
+
# Tekton CI for Instana Ruby Tracer
|
2
|
+
|
3
|
+
## Basic Tekton setup
|
4
|
+
|
5
|
+
### Get a cluster
|
6
|
+
|
7
|
+
What you will need:
|
8
|
+
* Full administrator access
|
9
|
+
* Enough RAM and CPU on a cluster node to run all the pods of a single Pipelinerun on a single node.
|
10
|
+
Multiple nodes increase the number of parallel `PipelineRun` instances.
|
11
|
+
Currently one `PipelineRun` instance is capable of saturating a 8vCPU - 16GB RAM worker node.
|
12
|
+
|
13
|
+
### Setup Tekton on your cluster
|
14
|
+
|
15
|
+
1. Install latest stable Tekton Pipeline release
|
16
|
+
```bash
|
17
|
+
kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
|
18
|
+
```
|
19
|
+
|
20
|
+
2. Install Tekton Dashboard Full (the normal is read only, and doesn't allow for example to re-run).
|
21
|
+
|
22
|
+
````bash
|
23
|
+
kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/release-full.yaml
|
24
|
+
````
|
25
|
+
|
26
|
+
3. Access the dashboard
|
27
|
+
|
28
|
+
```bash
|
29
|
+
kubectl proxy
|
30
|
+
```
|
31
|
+
|
32
|
+
Once the proxy is active, navigate your browser to the [dashboard url](
|
33
|
+
http://localhost:8001/api/v1/namespaces/tekton-pipelines/services/tekton-dashboard:http/proxy/)
|
34
|
+
|
35
|
+
### Setup the ruby-tracer-ci-pipeline
|
36
|
+
|
37
|
+
````bash
|
38
|
+
kubectl apply --filename task.yaml && kubectl apply --filename pipeline.yaml
|
39
|
+
````
|
40
|
+
|
41
|
+
### Run the pipeline manually
|
42
|
+
|
43
|
+
#### From the Dashboard
|
44
|
+
Navigate your browser to the [pipelineruns section of the dashboard](
|
45
|
+
http://localhost:8001/api/v1/namespaces/tekton-pipelines/services/tekton-dashboard:http/proxy/#/pipelineruns)
|
46
|
+
|
47
|
+
1. Click `Create`
|
48
|
+
2. Select the `Namespace` (where the `Pipeline` resource is created by default it is `default`)
|
49
|
+
3. Select the `Pipeline` created in the `pipeline.yaml` right now it is `ruby-tracer-ci-pipeline`
|
50
|
+
4. Fill in `Params`. The `revision` should be `master` for the `master` branch
|
51
|
+
4. Select the `ServiceAccount` set to `default`
|
52
|
+
5. Optionally, enter a `PipelineRun name` for example `my-master-test-pipeline`,
|
53
|
+
but if you don't then the Dashboard will generate a unique one for you.
|
54
|
+
6. As long as [the known issue with Tekton Dashboard Workspace binding](
|
55
|
+
https://github.com/tektoncd/dashboard/issues/1283), is not resolved.
|
56
|
+
You have to go to `YAML Mode` and insert the workspace definition at the end of the file,
|
57
|
+
with the exact same indentation:
|
58
|
+
|
59
|
+
````yaml
|
60
|
+
workspaces:
|
61
|
+
- name: ruby-tracer-ci-pipeline-pvc-$(params.revision)
|
62
|
+
volumeClaimTemplate:
|
63
|
+
spec:
|
64
|
+
accessModes:
|
65
|
+
- ReadWriteOnce
|
66
|
+
resources:
|
67
|
+
requests:
|
68
|
+
storage: 200Mi
|
69
|
+
|
70
|
+
````
|
71
|
+
7. Click `Create` at the bottom of the page
|
72
|
+
|
73
|
+
|
74
|
+
#### From kubectl CLI
|
75
|
+
As an alternative to using the Dashboard, you can manually edit `pipelinerun.yaml` and create it with:
|
76
|
+
````bash
|
77
|
+
kubectl apply --filename pipelinerun.yaml
|
78
|
+
````
|
79
|
+
|
80
|
+
### Clanup PipelineRun and associated PV resources
|
81
|
+
|
82
|
+
`PipelineRuns` and workspace `PersistentVolume` resources by default are kept indefinitely,
|
83
|
+
and repeated runs might exhaust the available resources, therefore they need to be cleaned up either
|
84
|
+
automatically or manually.
|
85
|
+
|
86
|
+
#### Manully from the Dashboard
|
87
|
+
|
88
|
+
Navigate to `PipelineRuns` and check the checkbox next to the pipelinerun
|
89
|
+
and then click `Delete` in the upper right corner.
|
90
|
+
|
91
|
+
#### Manually from the CLI
|
92
|
+
|
93
|
+
You can use either `kubectl`
|
94
|
+
````bash
|
95
|
+
kubectl get pipelinerun
|
96
|
+
kubectl delete pipelinerun <selected-pipelinerun-here>
|
97
|
+
````
|
98
|
+
|
99
|
+
or `tkn` cli
|
100
|
+
````bash
|
101
|
+
tkn pipelinerun list
|
102
|
+
tkn pipelinerun delete <selected-pipelinerun-here>
|
103
|
+
````
|
104
|
+
|
105
|
+
#### Automatic cleanup with a cronjob
|
106
|
+
|
107
|
+
Install and configure resources from https://github.com/3scale-ops/tekton-pipelinerun-cleaner
|
108
|
+
|
109
|
+
|
110
|
+
## Integrate with GitHub
|
111
|
+
|
112
|
+
### GitHub PR Trigger & PR Check API integration
|
113
|
+
|
114
|
+
The GitHub integration requires further Tekton Triggers and Interceptors to be installed
|
115
|
+
````bash
|
116
|
+
kubectl apply --filename \
|
117
|
+
https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml
|
118
|
+
kubectl apply --filename \
|
119
|
+
https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml
|
120
|
+
````
|
121
|
+
#### Create a ServiceAccount
|
122
|
+
|
123
|
+
Our future GitHub PR Event listener needs a service account,
|
124
|
+
`tekton-triggers-eventlistener-serviceaccount` which authorizes it to
|
125
|
+
perform operations specified in eventlistener `Role` and `ClusteRole`.
|
126
|
+
Create the service account with the needed role bindings:
|
127
|
+
|
128
|
+
````bash
|
129
|
+
kubectl apply --filename tekton-triggers-eventlistener-serviceaccount.yaml
|
130
|
+
````
|
131
|
+
|
132
|
+
#### Create the Secret for the GitHub repository webhook
|
133
|
+
|
134
|
+
In order to authorize the incoming webhooks into our cluster, we need to share
|
135
|
+
a secret between our webhook listener, and the GitHub repo.
|
136
|
+
Generate a long, strong and random generated token, put it into `github-interceptor-secret.yaml`.
|
137
|
+
Create the secret resource:
|
138
|
+
````bash
|
139
|
+
kubectl apply --filename github-interceptor-secret.yaml
|
140
|
+
````
|
141
|
+
|
142
|
+
#### Create the Task and token to report PR Check status to GitHub
|
143
|
+
|
144
|
+
The GitHub PR specific Tekton pipeline will want to send data to report the `PR Check Status`.
|
145
|
+
That [GitHub API](https://docs.github.com/en/rest/commits/statuses?apiVersion=2022-11-28#create-a-commit-status
|
146
|
+
) requires authentication, and therefore we need a token.
|
147
|
+
The user which generates the token has to have `Write` access in the target repo,
|
148
|
+
as part of the organisation. Check the repo access for this repo under
|
149
|
+
https://github.com/instana/ruby-sensor/settings/access.
|
150
|
+
|
151
|
+
With the proper user:
|
152
|
+
1. Navigate to https://github.com/settings/tokens
|
153
|
+
2. Click on `Generate new token` dropdown `Generate new token (classic)`.
|
154
|
+
3. Fill in `Note` with for example `Tekton commit status`,
|
155
|
+
4. Make sure if you set an expiration, than you remember to renew the token after expiry.
|
156
|
+
5. Under `Select scopes` find `repo` and below that only select the checkbox next to `repo:status` - `Access commit status`.
|
157
|
+
click `Generate token`
|
158
|
+
6. Create the kubernetes secret with the token:
|
159
|
+
|
160
|
+
````bash
|
161
|
+
kubectl create secret generic githubtoken --from-literal token="MY_TOKEN"
|
162
|
+
````
|
163
|
+
|
164
|
+
And we also make an HTTP POST with the status update data to GitHub.
|
165
|
+
This is done in a `Task` called `github-set-status`, create it as such:
|
166
|
+
````bash
|
167
|
+
kubectl apply -f github-set-status-task.yaml
|
168
|
+
````
|
169
|
+
|
170
|
+
#### Create the GitHub PR pipeline
|
171
|
+
|
172
|
+
Create the new pipeline, which executes the previously created `ruby-tracer-ci-pipeline`,
|
173
|
+
removes the currency report tasks and wraps the unittest jobs with GitHub Check status reporting tasks.
|
174
|
+
As long as [Pipelines in Pipelines](
|
175
|
+
https://tekton.dev/docs/pipelines/pipelines-in-pipelines/), remains an
|
176
|
+
unimplemented `alpha` feature in Tekton,
|
177
|
+
we will need the [yq](https://github.com/mikefarah/yq) (at least `4.0`)
|
178
|
+
to pull the tasks from our previous `ruby-tracer-ci-pipeline` into the
|
179
|
+
new pipeline `github-pr-ruby-tracer-ci-pipeline`.
|
180
|
+
|
181
|
+
````bash
|
182
|
+
(cat github-pr-pipeline.yaml.part && yq '{"a": {"b": .spec.tasks}}' pipeline.yaml| tail --lines=+3| head --lines=-16) | kubectl apply -f -
|
183
|
+
````
|
184
|
+
|
185
|
+
#### Create the GitHub PR Event Listener, TriggerTemplate and TriggerBinding
|
186
|
+
|
187
|
+
Once the new GitHub specific pipeline is created, we need a listener which starts
|
188
|
+
a new `PipelineRun` based on GitHub events.
|
189
|
+
For security reasons the listener will only trigger if the PR owner is a
|
190
|
+
repo owner. PRs from non-repo owners can be allowed in by owners after an initial review
|
191
|
+
with a comment on the pull request that contains `/ok-to-test`.
|
192
|
+
More abou this feature in the [tekton triggers documentsion](
|
193
|
+
https://github.com/tektoncd/triggers/blob/main/docs/interceptors.md#owners-validation-for-pull-requests).
|
194
|
+
|
195
|
+
````bash
|
196
|
+
kubectl apply --filename github-pr-eventlistener.yaml
|
197
|
+
````
|
198
|
+
|
199
|
+
After this ensure that there is a pod and a service created:
|
200
|
+
|
201
|
+
````bash
|
202
|
+
kubectl get pod | grep -i el-github-pr-eventlistener
|
203
|
+
kubectl get svc | grep -i el-github-pr-eventlistener
|
204
|
+
````
|
205
|
+
|
206
|
+
Do not continue if any of these missing.
|
207
|
+
|
208
|
+
#### Create the Ingress for the GitHub Webhook to come through
|
209
|
+
|
210
|
+
You will need an ingress controller for this.
|
211
|
+
On IKS you might want to read these resources:
|
212
|
+
* [managed ingress](https://cloud.ibm.com/docs/containers?topic=containers-managed-ingress-about)
|
213
|
+
* Or unmanaged [ingress controller howto](
|
214
|
+
https://github.com/IBM-Cloud/iks-ingress-controller/blob/master/docs/installation.md
|
215
|
+
).
|
216
|
+
|
217
|
+
1. Check the available `ingressclass` resources on your cluster
|
218
|
+
|
219
|
+
````bash
|
220
|
+
kubectl get ingressclass
|
221
|
+
````
|
222
|
+
|
223
|
+
* On `IKS` it will be `public-iks-k8s-nginx`.
|
224
|
+
* On `EKS` with the `ALB` ingress controller, it might be just `alb`
|
225
|
+
* On self hosted [nginx controller](https://kubernetes.github.io/ingress-nginx/deploy/)
|
226
|
+
this might just be `nginx`.
|
227
|
+
|
228
|
+
Edit and save the value of `ingressClassName:` in `github-webhook-ingress.yaml`.
|
229
|
+
|
230
|
+
2. Find out your Ingress domain or subdomain name.
|
231
|
+
|
232
|
+
* On `IKS`, go to `Clusters` select your cluster and then click `Overview`.
|
233
|
+
The domain name is listed under `Ingress subdomain`.
|
234
|
+
|
235
|
+
and create the resource:
|
236
|
+
|
237
|
+
````bash
|
238
|
+
kubectl apply --filename github-webhook-ingress.yaml
|
239
|
+
````
|
240
|
+
|
241
|
+
Make sure that you can use the ingress with the `/hooks` path via `https`:
|
242
|
+
````bash
|
243
|
+
curl https://<INGRESS_DOMAIN_NAME>/hooks
|
244
|
+
````
|
245
|
+
|
246
|
+
At this point this should respond this:
|
247
|
+
```json
|
248
|
+
{
|
249
|
+
"eventListener":"github-pr-eventlistener",
|
250
|
+
"namespace":"default",
|
251
|
+
"eventListenerUID":"",
|
252
|
+
"errorMessage":"Invalid event body format : unexpected end of JSON input"
|
253
|
+
}
|
254
|
+
```
|
255
|
+
|
256
|
+
#### Setup the webhook on GitHub
|
257
|
+
|
258
|
+
In the GitHub repo go to `Settings` -> `Webhooks` and click `Add Webhook`.
|
259
|
+
The fields we need to set are:
|
260
|
+
* `Payload URL`: `https://<INGRESS_DOMAIN_NAME>/hooks`
|
261
|
+
* `Content type`: application/json
|
262
|
+
* `Secret`: XXXXXXX (the secret token from github-interceptor-secret.yaml)
|
263
|
+
|
264
|
+
Under `SSL verification` select the radio button for `Enable SSL verification`.
|
265
|
+
Under `Which events would you like to trigger this webhook?` select
|
266
|
+
the radio button for `Let me select individual events.` and thick the checkbox next to
|
267
|
+
`Pull requests` and ensure that the rest are unthicked.
|
268
|
+
|
269
|
+
Click `Add webhook`.
|
270
|
+
|
271
|
+
If the webhook has been set up correctly, then GitHub sends a ping message.
|
272
|
+
Ensure that the ping is received from GitHub, and that it is filtered out so
|
273
|
+
a simple ping event does not trigger any `PipelineRun` unnecessarily.
|
274
|
+
|
275
|
+
````bash
|
276
|
+
eventlistener_pod=$(kubectl get pods -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep el-github-pr-ruby)
|
277
|
+
kubectl logs "${eventlistener_pod}" | grep 'event type ping is not allowed'
|
278
|
+
````
|