ConfigLMM 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +34 -0
- data/CNAME +1 -0
- data/Examples/.lmm.state.yaml +159 -0
- data/Examples/ConfigLMM.mm.yaml +32 -0
- data/Examples/Implemented.mm.yaml +252 -4
- data/Examples/SmallBusiness.mm.yaml +492 -0
- data/Plugins/Apps/Answer/answer.lmm.rb +165 -0
- data/Plugins/Apps/Answer/answer@.service +40 -0
- data/Plugins/Apps/ArchiSteamFarm/ArchiSteamFarm.conf.erb +0 -3
- data/Plugins/Apps/ArchiSteamFarm/ArchiSteamFarm.lmm.rb +0 -1
- data/Plugins/Apps/Authentik/Authentik-ProxyOutpost.container +7 -1
- data/Plugins/Apps/Authentik/Authentik-Server.container +6 -1
- data/Plugins/Apps/Authentik/Authentik-Worker.container +6 -1
- data/Plugins/Apps/Authentik/Authentik.conf.erb +12 -7
- data/Plugins/Apps/Authentik/Authentik.lmm.rb +226 -61
- data/Plugins/Apps/BookStack/BookStack.conf.erb +0 -3
- data/Plugins/Apps/BookStack/BookStack.container +5 -0
- data/Plugins/Apps/BookStack/BookStack.lmm.rb +14 -3
- data/Plugins/Apps/Cassandra/Cassandra.lmm.rb +9 -19
- data/Plugins/Apps/ClickHouse/ClickHouse.container +28 -0
- data/Plugins/Apps/ClickHouse/ClickHouse.lmm.rb +113 -0
- data/Plugins/Apps/ClickHouse/Config/listen.yaml +2 -0
- data/Plugins/Apps/ClickHouse/Config/logger.yaml +8 -0
- data/Plugins/Apps/ClickHouse/Config/zookeepers.yaml +5 -0
- data/Plugins/Apps/ClickHouse/Connection.rb +96 -0
- data/Plugins/Apps/Discourse/Discourse-Sidekiq.container +5 -0
- data/Plugins/Apps/Discourse/Discourse.conf.erb +1 -4
- data/Plugins/Apps/Discourse/Discourse.container +4 -0
- data/Plugins/Apps/Discourse/Discourse.lmm.rb +116 -55
- data/Plugins/Apps/Dovecot/Dovecot.lmm.rb +74 -62
- data/Plugins/Apps/ERPNext/ERPNext-Frontend.container +6 -1
- data/Plugins/Apps/ERPNext/ERPNext-Queue.container +5 -0
- data/Plugins/Apps/ERPNext/ERPNext-Scheduler.container +5 -0
- data/Plugins/Apps/ERPNext/ERPNext-Websocket.container +6 -1
- data/Plugins/Apps/ERPNext/ERPNext.container +6 -1
- data/Plugins/Apps/ERPNext/ERPNext.lmm.rb +138 -127
- data/Plugins/Apps/GitLab/GitLab.container +6 -0
- data/Plugins/Apps/GitLab/GitLab.lmm.rb +43 -49
- data/Plugins/Apps/Homepage/Homepage.conf.erb +86 -0
- data/Plugins/Apps/Homepage/Homepage.container +19 -0
- data/Plugins/Apps/Homepage/Homepage.lmm.rb +54 -0
- data/Plugins/Apps/IPFS/IPFS.conf.erb +0 -3
- data/Plugins/Apps/IPFS/IPFS.lmm.rb +0 -1
- data/Plugins/Apps/InfluxDB/InfluxDB.conf.erb +0 -3
- data/Plugins/Apps/InfluxDB/InfluxDB.lmm.rb +0 -1
- data/Plugins/Apps/Jackett/Jackett.conf.erb +0 -3
- data/Plugins/Apps/Jackett/Jackett.lmm.rb +0 -1
- data/Plugins/Apps/Jellyfin/Jellyfin.conf.erb +0 -3
- data/Plugins/Apps/Jellyfin/Jellyfin.lmm.rb +0 -1
- data/Plugins/Apps/LetsEncrypt/LetsEncrypt.lmm.rb +49 -28
- data/Plugins/Apps/LibreTranslate/LibreTranslate.container +21 -0
- data/Plugins/Apps/LibreTranslate/LibreTranslate.lmm.rb +34 -0
- data/Plugins/Apps/Lobsters/Containerfile +81 -0
- data/Plugins/Apps/Lobsters/Lobsters-Tasks.container +26 -0
- data/Plugins/Apps/Lobsters/Lobsters.conf.erb +99 -0
- data/Plugins/Apps/Lobsters/Lobsters.container +27 -0
- data/Plugins/Apps/Lobsters/Lobsters.lmm.rb +196 -0
- data/Plugins/Apps/Lobsters/crontab +3 -0
- data/Plugins/Apps/Lobsters/database.yml +26 -0
- data/Plugins/Apps/Lobsters/entrypoint.sh +30 -0
- data/Plugins/Apps/Lobsters/generateCredentials.rb +19 -0
- data/Plugins/Apps/Lobsters/lobsters-cron.sh +25 -0
- data/Plugins/Apps/Lobsters/lobsters-daily.sh +23 -0
- data/Plugins/Apps/Lobsters/puma.rb +49 -0
- data/Plugins/Apps/MariaDB/Connection.rb +55 -0
- data/Plugins/Apps/MariaDB/MariaDB.lmm.rb +60 -53
- data/Plugins/Apps/Mastodon/Mastodon-Sidekiq.container +22 -0
- data/Plugins/Apps/Mastodon/Mastodon-Streaming.container +20 -0
- data/Plugins/Apps/Mastodon/Mastodon.conf.erb +34 -45
- data/Plugins/Apps/Mastodon/Mastodon.container +28 -0
- data/Plugins/Apps/Mastodon/Mastodon.lmm.rb +240 -5
- data/Plugins/Apps/Mastodon/configlmm.rake +30 -0
- data/Plugins/Apps/Mastodon/entrypoint.sh +16 -0
- data/Plugins/Apps/Matrix/Element.container +5 -0
- data/Plugins/Apps/Matrix/Matrix.conf.erb +2 -8
- data/Plugins/Apps/Matrix/Matrix.lmm.rb +100 -71
- data/Plugins/Apps/Matrix/Synapse.container +5 -0
- data/Plugins/Apps/Netdata/Netdata.conf.erb +0 -3
- data/Plugins/Apps/Netdata/Netdata.lmm.rb +0 -1
- data/Plugins/Apps/Nextcloud/Nextcloud.conf.erb +3 -4
- data/Plugins/Apps/Nextcloud/Nextcloud.lmm.rb +150 -68
- data/Plugins/Apps/Nextcloud/autoconfig.php +13 -0
- data/Plugins/Apps/Nextcloud/config.php +10 -1
- data/Plugins/Apps/Nextcloud/nextcloudcron.service +8 -0
- data/Plugins/Apps/Nextcloud/nextcloudcron.timer +10 -0
- data/Plugins/Apps/Nginx/Connection.rb +93 -0
- data/Plugins/Apps/Nginx/conf.d/configlmm.conf +50 -9
- data/Plugins/Apps/Nginx/conf.d/languages.conf +21 -0
- data/Plugins/Apps/Nginx/config-lmm/errors.conf +25 -20
- data/Plugins/Apps/Nginx/config-lmm/gateway-errors.conf +20 -0
- data/Plugins/Apps/Nginx/config-lmm/proxy.conf +1 -1
- data/Plugins/Apps/Nginx/main.conf.erb +7 -3
- data/Plugins/Apps/Nginx/nginx.conf +2 -2
- data/Plugins/Apps/Nginx/nginx.lmm.rb +99 -81
- data/Plugins/Apps/Nginx/proxy.conf.erb +11 -3
- data/Plugins/Apps/Odoo/Odoo.conf.erb +0 -3
- data/Plugins/Apps/Odoo/Odoo.container +5 -0
- data/Plugins/Apps/Odoo/Odoo.lmm.rb +4 -5
- data/Plugins/Apps/Ollama/Ollama.container +26 -0
- data/Plugins/Apps/Ollama/Ollama.lmm.rb +73 -0
- data/Plugins/Apps/OpenTelemetry/Config/config.yaml +704 -0
- data/Plugins/Apps/OpenTelemetry/OpenTelemetry.lmm.rb +154 -0
- data/Plugins/Apps/OpenVidu/Ingress.container +5 -0
- data/Plugins/Apps/OpenVidu/OpenVidu.conf.erb +0 -3
- data/Plugins/Apps/OpenVidu/OpenVidu.container +5 -0
- data/Plugins/Apps/OpenVidu/OpenVidu.lmm.rb +7 -3
- data/Plugins/Apps/OpenVidu/OpenViduCall.conf.erb +0 -3
- data/Plugins/Apps/OpenVidu/OpenViduCall.container +5 -0
- data/Plugins/Apps/PHP-FPM/Connection.rb +91 -0
- data/Plugins/Apps/PHP-FPM/PHP-FPM.lmm.rb +31 -4
- data/Plugins/Apps/Peppermint/Peppermint.conf.erb +2 -5
- data/Plugins/Apps/Peppermint/Peppermint.container +5 -0
- data/Plugins/Apps/Peppermint/Peppermint.lmm.rb +29 -33
- data/Plugins/Apps/Perplexica/Perplexica.container +25 -0
- data/Plugins/Apps/Perplexica/Perplexica.lmm.rb +92 -0
- data/Plugins/Apps/Perplexica/config.toml +26 -0
- data/Plugins/Apps/Podman/Connection.rb +24 -0
- data/Plugins/Apps/Podman/Podman.lmm.rb +80 -0
- data/Plugins/Apps/Podman/storage.conf +6 -0
- data/Plugins/Apps/Postfix/Postfix.lmm.rb +242 -164
- data/Plugins/Apps/PostgreSQL/Connection.rb +97 -0
- data/Plugins/Apps/PostgreSQL/PostgreSQL.lmm.rb +184 -148
- data/Plugins/Apps/Pterodactyl/Pterodactyl.conf.erb +0 -3
- data/Plugins/Apps/Pterodactyl/Pterodactyl.lmm.rb +0 -2
- data/Plugins/Apps/Pterodactyl/Wings.conf.erb +0 -3
- data/Plugins/Apps/RVM/RVM.lmm.rb +57 -0
- data/Plugins/Apps/Roundcube/Roundcube.conf.erb +0 -3
- data/Plugins/Apps/Roundcube/Roundcube.lmm.rb +15 -19
- data/Plugins/Apps/SSH/SSH.lmm.rb +9 -15
- data/Plugins/Apps/SearXNG/SearXNG.container +22 -0
- data/Plugins/Apps/SearXNG/SearXNG.lmm.rb +79 -0
- data/Plugins/Apps/SearXNG/limiter.toml +40 -0
- data/Plugins/Apps/SearXNG/settings.yml +2 -0
- data/Plugins/Apps/SigNoz/Config/alerts.yml +11 -0
- data/Plugins/Apps/SigNoz/Config/otel-collector-config.yaml +110 -0
- data/Plugins/Apps/SigNoz/Config/otel-collector-opamp-config.yaml +1 -0
- data/Plugins/Apps/SigNoz/Config/prometheus.yml +18 -0
- data/Plugins/Apps/SigNoz/SigNoz-Collector.container +23 -0
- data/Plugins/Apps/SigNoz/SigNoz-Migrator.container +17 -0
- data/Plugins/Apps/SigNoz/SigNoz.conf.erb +61 -0
- data/Plugins/Apps/SigNoz/SigNoz.container +26 -0
- data/Plugins/Apps/SigNoz/SigNoz.lmm.rb +319 -0
- data/Plugins/Apps/Solr/log4j2.xml +89 -0
- data/Plugins/Apps/Solr/solr.lmm.rb +82 -0
- data/Plugins/Apps/Sunshine/Sunshine.conf.erb +0 -3
- data/Plugins/Apps/Sunshine/Sunshine.lmm.rb +0 -1
- data/Plugins/Apps/Tunnel/tunnel.lmm.rb +33 -37
- data/Plugins/Apps/UVdesk/UVdesk.conf.erb +0 -3
- data/Plugins/Apps/Umami/Umami.container +19 -0
- data/Plugins/Apps/Umami/Umami.lmm.rb +108 -0
- data/Plugins/Apps/Valkey/Valkey.lmm.rb +54 -42
- data/Plugins/Apps/Vaultwarden/Vaultwarden.conf.erb +9 -6
- data/Plugins/Apps/Vaultwarden/Vaultwarden.container +7 -1
- data/Plugins/Apps/Vaultwarden/Vaultwarden.lmm.rb +64 -29
- data/Plugins/Apps/Wiki.js/Wiki.js.conf.erb +1 -4
- data/Plugins/Apps/Wiki.js/Wiki.js.container +5 -0
- data/Plugins/Apps/Wiki.js/Wiki.js.lmm.rb +31 -37
- data/Plugins/Apps/YaCy/YaCy.conf.erb +93 -0
- data/Plugins/Apps/YaCy/YaCy.container +21 -0
- data/Plugins/Apps/YaCy/YaCy.lmm.rb +160 -0
- data/Plugins/Apps/ZooKeeper/ZooKeeper.container +24 -0
- data/Plugins/Apps/ZooKeeper/ZooKeeper.lmm.rb +68 -0
- data/Plugins/Apps/bitmagnet/bitmagnet.conf.erb +0 -3
- data/Plugins/Apps/bitmagnet/bitmagnet.lmm.rb +0 -1
- data/Plugins/Apps/gollum/gollum.conf.erb +2 -4
- data/Plugins/Apps/gollum/gollum.container +6 -0
- data/Plugins/Apps/gollum/gollum.lmm.rb +51 -50
- data/Plugins/Apps/llama.cpp/llama.cpp.container +28 -0
- data/Plugins/Apps/llama.cpp/llama.cpp.lmm.rb +90 -0
- data/Plugins/Apps/vLLM/vLLM.container +32 -0
- data/Plugins/Apps/vLLM/vLLM.lmm.rb +89 -0
- data/Plugins/OS/General/Utils.lmm.rb +26 -0
- data/Plugins/OS/Linux/Connection.rb +472 -0
- data/Plugins/OS/Linux/Debian/preseed.cfg.erb +25 -6
- data/Plugins/OS/Linux/Flavours.yaml +13 -0
- data/Plugins/OS/Linux/Grub/grub.cfg +10 -0
- data/Plugins/OS/Linux/HTTP.rb +32 -0
- data/Plugins/OS/Linux/Linux.lmm.rb +533 -187
- data/Plugins/OS/Linux/Packages.yaml +20 -1
- data/Plugins/OS/Linux/Services.yaml +8 -0
- data/Plugins/OS/Linux/Shell.rb +70 -0
- data/Plugins/OS/Linux/Syslinux/default +8 -0
- data/Plugins/OS/Linux/WireGuard/WireGuard.lmm.rb +83 -59
- data/Plugins/OS/Linux/WireGuard/wg0.conf.erb +3 -0
- data/Plugins/OS/Linux/openSUSE/autoinst.xml.erb +29 -3
- data/Plugins/OS/Linux/systemd/systemd.lmm.rb +13 -11
- data/Plugins/OS/Routers/Aruba/ArubaInstant.lmm.rb +6 -5
- data/Plugins/Platforms/GitHub.lmm.rb +73 -28
- data/Plugins/Platforms/GoDaddy/GoDaddy.lmm.rb +9 -6
- data/Plugins/Platforms/Proxmox/Proxmox.lmm.rb +402 -0
- data/Plugins/Platforms/Proxmox/XTerm.rb +321 -0
- data/Plugins/Platforms/libvirt/libvirt.lmm.rb +38 -13
- data/Plugins/Platforms/porkbun.lmm.rb +12 -2
- data/Plugins/Platforms/porkbun_spec.rb +2 -2
- data/Plugins/Services/DNS/AmberBit.lmm.rb +1 -1
- data/Plugins/Services/DNS/ArubaItDNS.lmm.rb +1 -1
- data/Plugins/Services/DNS/NICLV.lmm.rb +1 -1
- data/Plugins/Services/DNS/PowerDNS.lmm.rb +70 -68
- data/Plugins/Services/DNS/tonic.lmm.rb +22 -12
- data/lib/ConfigLMM/Framework/plugins/dns.rb +4 -3
- data/lib/ConfigLMM/Framework/plugins/linuxApp.rb +145 -184
- data/lib/ConfigLMM/Framework/plugins/nginxApp.rb +34 -17
- data/lib/ConfigLMM/Framework/plugins/plugin.rb +53 -181
- data/lib/ConfigLMM/Framework/plugins/store.rb +4 -4
- data/lib/ConfigLMM/Framework/variables.rb +75 -0
- data/lib/ConfigLMM/Framework.rb +1 -0
- data/lib/ConfigLMM/cli.rb +12 -6
- data/lib/ConfigLMM/commands/configsCommand.rb +37 -6
- data/lib/ConfigLMM/commands/diff.rb +33 -9
- data/lib/ConfigLMM/context.rb +22 -3
- data/lib/ConfigLMM/io/configList.rb +82 -6
- data/lib/ConfigLMM/io/connection.rb +143 -0
- data/lib/ConfigLMM/io/dhcp.rb +330 -0
- data/lib/ConfigLMM/io/http.rb +78 -0
- data/lib/ConfigLMM/io/local.rb +207 -0
- data/lib/ConfigLMM/io/pxe.rb +92 -0
- data/lib/ConfigLMM/io/ssh.rb +156 -0
- data/lib/ConfigLMM/io/tftp.rb +105 -0
- data/lib/ConfigLMM/io.rb +2 -0
- data/lib/ConfigLMM/secrets/envStore.rb +39 -0
- data/lib/ConfigLMM/secrets/fileStore.rb +43 -0
- data/lib/ConfigLMM/state.rb +2 -1
- data/lib/ConfigLMM/version.rb +2 -1
- data/lib/ConfigLMM.rb +1 -0
- data/{Examples → scripts}/configlmmAuth.sh +7 -5
- metadata +205 -8
@@ -0,0 +1,319 @@
|
|
1
|
+
require 'yaml'
|
2
|
+
require 'uri'
|
3
|
+
|
4
|
+
module ConfigLMM
|
5
|
+
module LMM
|
6
|
+
class SigNoz < Framework::Plugin
|
7
|
+
|
8
|
+
VERSION = 'v0.79.1'
|
9
|
+
COLLECTOR_VERSION = 'v0.111.39'
|
10
|
+
|
11
|
+
USER = 'signoz'
|
12
|
+
COLLECTOR_USER = 'signoz-collector'
|
13
|
+
FRONTEND_USER = 'signoz-frontend'
|
14
|
+
HOME_DIR = '/var/lib/signoz'
|
15
|
+
COLLECTOR_HOME_DIR = '/var/lib/signoz-collector'
|
16
|
+
FRONTEND_HOME_DIR = '/var/lib/signoz-frontend'
|
17
|
+
HOST_IP = '10.0.2.2'
|
18
|
+
DB_ANALYTICS = 'signoz_analytics'
|
19
|
+
DB_METADATA = 'signoz_metadata'
|
20
|
+
DB_TRACES = 'signoz_traces'
|
21
|
+
DB_METRICS = 'signoz_metrics'
|
22
|
+
DB_LOGS = 'signoz_logs'
|
23
|
+
|
24
|
+
def actionSigNozDeploy(id, target, activeState, context, options)
|
25
|
+
self.withConnection(target['Location'], target) do |connection|
|
26
|
+
Linux.withConnection(connection) do |linuxConnection|
|
27
|
+
if !target.key?('Proxy') || target['Proxy'] == false
|
28
|
+
deploySigNozService(linuxConnection, target, activeState, context, options)
|
29
|
+
end
|
30
|
+
|
31
|
+
deploySigNozProxy(id, linuxConnection, target, activeState, context, options)
|
32
|
+
|
33
|
+
if !target.key?('Proxy') || target['Proxy'] == false
|
34
|
+
linuxConnection.reloadUserServices(USER, options)
|
35
|
+
linuxConnection.restartUserService(USER, 'SigNoz-Migrator', options)
|
36
|
+
linuxConnection.restartUserService(USER, 'SigNoz', options)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def deploySigNozService(linuxConnection, target, activeState, context, options)
|
43
|
+
Podman.ensurePresent(linuxConnection, options)
|
44
|
+
username, password = self.configureClickHouseSigNoz(target, linuxConnection, activeState, context, options)
|
45
|
+
Podman.createUser(USER, HOME_DIR, 'SigNoz', linuxConnection, options)
|
46
|
+
linuxConnection.withUserShell(USER) do |shell|
|
47
|
+
shell.createDirs(options, '~/data', '~/config/dashboards')
|
48
|
+
end
|
49
|
+
|
50
|
+
path = Podman.containersPath(HOME_DIR)
|
51
|
+
|
52
|
+
dbUrl = self.class.buildEndpoint(target['Database']['HostName'],
|
53
|
+
target['Database']['Port'],
|
54
|
+
'',
|
55
|
+
username,
|
56
|
+
password)
|
57
|
+
|
58
|
+
jwtSecret = SecureRandom.alphanumeric(30)
|
59
|
+
|
60
|
+
linuxConnection.fileWrite("#{path}/SigNoz.env", 'TELEMETRY_ENABLED=false', options)
|
61
|
+
linuxConnection.fileAppend("#{path}/SigNoz.env", 'SIGNOZ_JWT_SECRET=' + jwtSecret, options)
|
62
|
+
linuxConnection.fileAppend("#{path}/SigNoz.env", 'SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse', options)
|
63
|
+
linuxConnection.fileAppend("#{path}/SigNoz.env", "SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=#{dbUrl}", options)
|
64
|
+
|
65
|
+
if target['SMTP'] && target['SMTP']['Host']
|
66
|
+
port = target['SMTP']['Port']
|
67
|
+
port = 25 unless port
|
68
|
+
linuxConnection.fileAppend(path + '/SigNoz.env', 'SIGNOZ_ALERTMANAGER_SIGNOZ_GLOBAL_SMTP__SMARTHOST=' + target['SMTP']['Host'] + ':' + port.to_s, options)
|
69
|
+
if target['SMTP']['Username']
|
70
|
+
linuxConnection.fileAppend(path + '/SigNoz.env', 'SIGNOZ_ALERTMANAGER_SIGNOZ_GLOBAL_SMTP__AUTH__USERNAME=' + target['SMTP']['Username'], options)
|
71
|
+
end
|
72
|
+
if target['SMTP']['SecretId']
|
73
|
+
smtpPassword = context.secrets.load(target['SMTP']['SecretId'], target['SMTP']['Username'].upcase + '_PASSWORD')
|
74
|
+
linuxConnection.fileAppend(path + '/SigNoz.env', 'SIGNOZ_ALERTMANAGER_SIGNOZ_GLOBAL_SMTP__AUTH__PASSWORD=' + smtpPassword.to_s, { **options, hide: true })
|
75
|
+
end
|
76
|
+
if target['SMTP']['FromAddress']
|
77
|
+
linuxConnection.fileAppend(path + '/SigNoz.env', 'SIGNOZ_ALERTMANAGER_SIGNOZ_GLOBAL_SMTP__FROM=' + target['SMTP']['Username'], options)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
linuxConnection.setUserGroup("#{path}/SigNoz.env", USER, USER, options)
|
82
|
+
linuxConnection.setPrivate("#{path}/SigNoz.env", options)
|
83
|
+
|
84
|
+
config = YAML.load_file(__dir__ + '/Config/prometheus.yml')
|
85
|
+
config['remote_read'].first['url'] = self.class.buildEndpoint(target['Database']['HostName'],
|
86
|
+
target['Database']['Port'],
|
87
|
+
DB_METRICS)
|
88
|
+
|
89
|
+
configFile = options['output'] + '/prometheus.yml'
|
90
|
+
File.write(configFile, config.to_yaml)
|
91
|
+
|
92
|
+
linuxConnection.upload(__dir__ + '/SigNoz.container', path, options)
|
93
|
+
linuxConnection.upload(__dir__ + '/SigNoz-Migrator.container', path, options)
|
94
|
+
linuxConnection.upload(configFile, HOME_DIR + '/config', options)
|
95
|
+
linuxConnection.upload(__dir__ + '/Config/alerts.yml', HOME_DIR + '/config', options)
|
96
|
+
|
97
|
+
linuxConnection.fileReplace("#{path}/SigNoz.container", '\$VERSION', VERSION, options)
|
98
|
+
linuxConnection.fileReplace("#{path}/SigNoz-Migrator.container", '\$VERSION', COLLECTOR_VERSION, options)
|
99
|
+
linuxConnection.fileReplace("#{path}/SigNoz-Migrator.container", '\$DSN', dbUrl, { **options, hide: true })
|
100
|
+
end
|
101
|
+
|
102
|
+
def deploySigNozProxy(id, linuxConnection, target, activeState, context, options)
|
103
|
+
if !target.key?('Proxy') || target['Proxy']
|
104
|
+
raise Framework::PluginProcessError.new('Domain field must be set!') unless target['Domain']
|
105
|
+
Nginx.withConnection(linuxConnection) do |nginxConnection|
|
106
|
+
target['Server'] = '127.0.0.1:18600' unless target['Server']
|
107
|
+
target['Server'] += ':18600' unless target['Server'].include?(':')
|
108
|
+
target['ConfigName'] = target['Name']
|
109
|
+
nginxConnection.provision(__dir__, 'SigNoz', target, activeState, context, options)
|
110
|
+
end
|
111
|
+
elsif target.key?('Proxy') && target['Proxy'] == false
|
112
|
+
path = Podman.containersPath(HOME_DIR)
|
113
|
+
linuxConnection.fileReplace("#{path}/SigNoz.container", 'PublishPort=127.0.0.1:18600:', 'PublishPort=0.0.0.0:18600:', options)
|
114
|
+
linuxConnection.firewallAddPort('18600/tcp', options)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def actionSigNozCollectorDeploy(id, target, activeState, context, options)
|
119
|
+
self.withConnection(target['Location'], target) do |connection|
|
120
|
+
Linux.withConnection(connection) do |linuxConnection|
|
121
|
+
Podman.ensurePresent(linuxConnection, options)
|
122
|
+
username, password = self.configureClickHouseCollector(target, linuxConnection, activeState, context, options)
|
123
|
+
Podman.createUser(COLLECTOR_USER, COLLECTOR_HOME_DIR, 'SigNoz Collector', linuxConnection, options)
|
124
|
+
|
125
|
+
config = YAML.load_file(__dir__ + '/Config/otel-collector-config.yaml')
|
126
|
+
|
127
|
+
config['exporters']['clickhousetraces']['datasource'] = self.class.buildEndpoint(target['Database']['HostName'],
|
128
|
+
target['Database']['Port'],
|
129
|
+
DB_TRACES,
|
130
|
+
username,
|
131
|
+
password)
|
132
|
+
|
133
|
+
config['exporters']['clickhousemetricswrite']['endpoint'] = self.class.buildEndpoint(target['Database']['HostName'],
|
134
|
+
target['Database']['Port'],
|
135
|
+
DB_METRICS,
|
136
|
+
username,
|
137
|
+
password)
|
138
|
+
|
139
|
+
config['exporters']['clickhousemetricswrite/prometheus']['endpoint'] = self.class.buildEndpoint(target['Database']['HostName'],
|
140
|
+
target['Database']['Port'],
|
141
|
+
DB_METRICS,
|
142
|
+
username,
|
143
|
+
password)
|
144
|
+
|
145
|
+
config['exporters']['signozclickhousemetrics']['dsn'] = self.class.buildEndpoint(target['Database']['HostName'],
|
146
|
+
target['Database']['Port'],
|
147
|
+
DB_METRICS,
|
148
|
+
username,
|
149
|
+
password)
|
150
|
+
|
151
|
+
config['exporters']['clickhouselogsexporter']['dsn'] = self.class.buildEndpoint(target['Database']['HostName'],
|
152
|
+
target['Database']['Port'],
|
153
|
+
DB_LOGS,
|
154
|
+
username,
|
155
|
+
password)
|
156
|
+
|
157
|
+
configFile = options['output'] + '/otel-collector-config.yaml'
|
158
|
+
File.write(configFile, config.to_yaml)
|
159
|
+
|
160
|
+
linuxConnection.upload(__dir__ + '/SigNoz-Collector.container', Podman.containersPath(COLLECTOR_HOME_DIR), options)
|
161
|
+
linuxConnection.upload(configFile, COLLECTOR_HOME_DIR, options)
|
162
|
+
linuxConnection.upload(__dir__ + '/Config/otel-collector-opamp-config.yaml', COLLECTOR_HOME_DIR, options)
|
163
|
+
|
164
|
+
path = Podman.containersPath(COLLECTOR_HOME_DIR)
|
165
|
+
linuxConnection.fileReplace("#{path}/SigNoz-Collector.container", '\$VERSION', COLLECTOR_VERSION, options)
|
166
|
+
|
167
|
+
if target['Listen']
|
168
|
+
linuxConnection.fileReplace("#{path}/SigNoz-Collector.container", 'PublishPort=127.0.0.1:', "PublishPort=#{target['Listen']}:", options)
|
169
|
+
linuxConnection.firewallAddPort('4317/tcp', options)
|
170
|
+
linuxConnection.firewallAddPort('4318/tcp', options)
|
171
|
+
end
|
172
|
+
|
173
|
+
linuxConnection.reloadUserServices(COLLECTOR_USER, options)
|
174
|
+
linuxConnection.restartUserService(COLLECTOR_USER, 'SigNoz-Collector', options)
|
175
|
+
end
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
def configureClickHouseSigNoz(target, linuxConnection, activeState, context, options)
|
180
|
+
target['Database'] ||= {}
|
181
|
+
ClickHouse.defaults(target['Database'])
|
182
|
+
username = target['Database']['Username'] || context.secrets.load(target['SecretId'], 'CLICKHOUSE_USERNAME') || USER
|
183
|
+
context.secrets.store(target['SecretId'], 'CLICKHOUSE_USERNAME', username)
|
184
|
+
password = context.secrets.load(target['SecretId'], 'CLICKHOUSE_PASSWORD')
|
185
|
+
if password.nil?
|
186
|
+
password = SecureRandom.alphanumeric(20)
|
187
|
+
context.secrets.store(target['SecretId'], 'CLICKHOUSE_PASSWORD', password)
|
188
|
+
end
|
189
|
+
|
190
|
+
ClickHouse.withConnection(target['Database'], linuxConnection, context.secrets, options) do |connection|
|
191
|
+
connection.createUser(username, password, nil, options)
|
192
|
+
connection.createDB(DB_ANALYTICS, ClickHouse::DEFAULT_CLUSTER, options)
|
193
|
+
connection.createDB(DB_METADATA, ClickHouse::DEFAULT_CLUSTER, options)
|
194
|
+
connection.grantDB('ALL', username, DB_ANALYTICS, nil, options)
|
195
|
+
connection.grantDB('ALL', username, DB_METADATA, nil, options)
|
196
|
+
connection.grantDB('ALL', username, DB_TRACES, nil, options)
|
197
|
+
connection.grantDB('ALL', username, DB_METRICS, nil, options)
|
198
|
+
connection.grantDB('ALL', username, DB_LOGS, nil, options)
|
199
|
+
connection.grant('SELECT', username, 'system.clusters', nil, options)
|
200
|
+
connection.grant('SELECT', username, 'system.distributed_ddl_queue', nil, options)
|
201
|
+
connection.grant('SELECT', username, 'system.disks', nil, options)
|
202
|
+
connection.grantCluster(username, nil, options)
|
203
|
+
connection.grantRemote(username, nil, options)
|
204
|
+
end
|
205
|
+
|
206
|
+
if target['Database']['HostName'] == 'localhost'
|
207
|
+
target['Database']['HostName'] = HOST_IP
|
208
|
+
end
|
209
|
+
[username, password]
|
210
|
+
end
|
211
|
+
|
212
|
+
def configureClickHouseCollector(target, linuxConnection, activeState, context, options)
|
213
|
+
target['Database'] ||= {}
|
214
|
+
ClickHouse.defaults(target['Database'])
|
215
|
+
username = target['Database']['Username'] || context.secrets.load(target['SecretId'], 'CLICKHOUSE_USERNAME') || 'otel'
|
216
|
+
context.secrets.store(target['SecretId'], 'CLICKHOUSE_USERNAME', username)
|
217
|
+
password = context.secrets.load(target['SecretId'], 'CLICKHOUSE_PASSWORD')
|
218
|
+
if password.nil?
|
219
|
+
password = SecureRandom.alphanumeric(20)
|
220
|
+
context.secrets.store(target['SecretId'], 'CLICKHOUSE_PASSWORD', password)
|
221
|
+
end
|
222
|
+
|
223
|
+
ClickHouse.withConnection(target['Database'], linuxConnection, context.secrets, options) do |connection|
|
224
|
+
connection.createUser(username, password, nil, options)
|
225
|
+
connection.createDB(DB_TRACES, ClickHouse::DEFAULT_CLUSTER, options)
|
226
|
+
connection.createDB(DB_METRICS, ClickHouse::DEFAULT_CLUSTER, options)
|
227
|
+
connection.createDB(DB_LOGS, ClickHouse::DEFAULT_CLUSTER, options)
|
228
|
+
connection.grantDB('CREATE DATABASE', username, DB_TRACES, nil, options)
|
229
|
+
connection.grantDB('INSERT', username, DB_TRACES, nil, options)
|
230
|
+
connection.grantDB('SELECT', username, DB_TRACES, nil, options)
|
231
|
+
connection.grantDB('INSERT', username, DB_METRICS, nil, options)
|
232
|
+
connection.grantDB('SELECT', username, DB_METRICS, nil, options)
|
233
|
+
connection.grantDB('INSERT', username, DB_LOGS, nil, options)
|
234
|
+
connection.grantDB('SELECT', username, DB_LOGS, nil, options)
|
235
|
+
connection.grant('SELECT', username, 'system.clusters', nil, options)
|
236
|
+
connection.grantCluster(username, nil, options)
|
237
|
+
end
|
238
|
+
|
239
|
+
if target['Database']['HostName'] == 'localhost'
|
240
|
+
target['Database']['HostName'] = HOST_IP
|
241
|
+
end
|
242
|
+
[username, password]
|
243
|
+
end
|
244
|
+
|
245
|
+
def self.buildEndpoint(hostname, port, database, username = nil, password = nil)
|
246
|
+
query = ''
|
247
|
+
if username
|
248
|
+
username = URI.encode_uri_component(username)
|
249
|
+
query = "?username=#{username}"
|
250
|
+
if password
|
251
|
+
password = URI.encode_uri_component(password)
|
252
|
+
query += "&password=#{password}"
|
253
|
+
end
|
254
|
+
end
|
255
|
+
"tcp://#{hostname}:#{port}/#{database}#{query}"
|
256
|
+
end
|
257
|
+
|
258
|
+
def cleanup(configs, state, context, options)
|
259
|
+
cleanupType(:SigNozCollector, configs, state, context, options) do |item, id, state, context, options, connection|
|
260
|
+
Linux.withConnection(connection) do |linuxConnection|
|
261
|
+
linuxConnection.stopUserService(USER, 'signoz-otel-collector', options)
|
262
|
+
|
263
|
+
path = Podman.containersPath(COLLECTOR_HOME_DIR)
|
264
|
+
linuxConnection.rm(path + 'signoz-otel-collector.container', options[:dry])
|
265
|
+
|
266
|
+
state.item(id)['Status'] = State::STATUS_DELETED unless options[:dry]
|
267
|
+
|
268
|
+
if options[:destroy]
|
269
|
+
username = context.secrets.load(item['Config']['SecretId'], 'CLICKHOUSE_USERNAME')
|
270
|
+
if !username.nil?
|
271
|
+
ClickHouse.withConnection(item['Config']['Database'], connection, context.secrets, options) do |connection|
|
272
|
+
connection.dropUser(username)
|
273
|
+
end
|
274
|
+
end
|
275
|
+
linuxConnection.deleteUserAndGroup(COLLECTOR_USER, options)
|
276
|
+
state.item(id)['Status'] = State::STATUS_DESTROYED unless options[:dry]
|
277
|
+
end
|
278
|
+
end
|
279
|
+
end
|
280
|
+
cleanupType(:SigNoz, configs, state, context, options) do |item, id, state, context, options, connection|
|
281
|
+
Linux.withConnection(connection) do |linuxConnection|
|
282
|
+
if !item['Config'].key?('Proxy') || item['Config']['Proxy']
|
283
|
+
Nginx.withConnection(linuxConnection) do |nginxConnection|
|
284
|
+
nginxConnection.cleanupConfig('SigNoz', context, options)
|
285
|
+
nginxConnection.reload(options)
|
286
|
+
end
|
287
|
+
elsif item['Config'].key?('Proxy') && item['Config']['Proxy'] == false
|
288
|
+
linuxConnection.firewallRemovePort('3301/tcp', options)
|
289
|
+
end
|
290
|
+
|
291
|
+
linuxConnection.stopUserService(USER, 'SigNoz', options)
|
292
|
+
linuxConnection.stopUserService(USER, 'SigNoz-Migrator', options)
|
293
|
+
|
294
|
+
path = Podman.containersPath(HOME_DIR)
|
295
|
+
linuxConnection.rm(path + 'SigNoz.container', options[:dry])
|
296
|
+
linuxConnection.rm(path + 'SigNoz-Migrator.container', options[:dry])
|
297
|
+
|
298
|
+
state.item(id)['Status'] = State::STATUS_DELETED unless options[:dry]
|
299
|
+
|
300
|
+
if options[:destroy]
|
301
|
+
ClickHouse.withConnection(item['Config']['Database'], connection, context.secrets, options) do |connection|
|
302
|
+
username = context.secrets.load(item['Config']['SecretId'], 'CLICKHOUSE_USERNAME')
|
303
|
+
if !username.nil?
|
304
|
+
connection.dropUser(username, nil, options)
|
305
|
+
end
|
306
|
+
connection.dropDB(DB_TRACES, ClickHouse::DEFAULT_CLUSTER, options)
|
307
|
+
connection.dropDB(DB_METRICS, ClickHouse::DEFAULT_CLUSTER, options)
|
308
|
+
connection.dropDB(DB_LOGS, ClickHouse::DEFAULT_CLUSTER, options)
|
309
|
+
end
|
310
|
+
linuxConnection.deleteUserAndGroup(USER, connection, options[:dry])
|
311
|
+
state.item(id)['Status'] = State::STATUS_DESTROYED unless options[:dry]
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
end
|
318
|
+
end
|
319
|
+
end
|
@@ -0,0 +1,89 @@
|
|
1
|
+
<?xml version="1.0" encoding="UTF-8"?>
|
2
|
+
<!--
|
3
|
+
Licensed to the Apache Software Foundation (ASF) under one or more
|
4
|
+
contributor license agreements. See the NOTICE file distributed with
|
5
|
+
this work for additional information regarding copyright ownership.
|
6
|
+
The ASF licenses this file to You under the Apache License, Version 2.0
|
7
|
+
(the "License"); you may not use this file except in compliance with
|
8
|
+
the License. You may obtain a copy of the License at
|
9
|
+
|
10
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
11
|
+
|
12
|
+
Unless required by applicable law or agreed to in writing, software
|
13
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
14
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15
|
+
See the License for the specific language governing permissions and
|
16
|
+
limitations under the License.
|
17
|
+
-->
|
18
|
+
|
19
|
+
<!-- Default production configuration is asynchronous logging -->
|
20
|
+
<Configuration>
|
21
|
+
<Appenders>
|
22
|
+
|
23
|
+
<Console name="STDOUT" target="SYSTEM_OUT">
|
24
|
+
<JsonTemplateLayout />
|
25
|
+
<!--<PatternLayout>
|
26
|
+
<Pattern>
|
27
|
+
%maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n
|
28
|
+
</Pattern>
|
29
|
+
</PatternLayout>-->
|
30
|
+
</Console>
|
31
|
+
|
32
|
+
<RollingRandomAccessFile
|
33
|
+
name="MainLogFile"
|
34
|
+
fileName="${sys:solr.log.dir}/solr.json"
|
35
|
+
filePattern="${sys:solr.log.dir}/solr.json.%i" >
|
36
|
+
<JsonTemplateLayout />
|
37
|
+
<!--<PatternLayout>
|
38
|
+
<Pattern>
|
39
|
+
%maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n
|
40
|
+
</Pattern>
|
41
|
+
</PatternLayout>-->
|
42
|
+
<Policies>
|
43
|
+
<OnStartupTriggeringPolicy />
|
44
|
+
<SizeBasedTriggeringPolicy size="32 MB"/>
|
45
|
+
</Policies>
|
46
|
+
<DefaultRolloverStrategy max="10"/>
|
47
|
+
</RollingRandomAccessFile>
|
48
|
+
|
49
|
+
<RollingRandomAccessFile
|
50
|
+
name="SlowLogFile"
|
51
|
+
fileName="${sys:solr.log.dir}/solr_slow_requests.log"
|
52
|
+
filePattern="${sys:solr.log.dir}/solr_slow_requests.log.%i" >
|
53
|
+
<PatternLayout>
|
54
|
+
<Pattern>
|
55
|
+
%maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n
|
56
|
+
</Pattern>
|
57
|
+
</PatternLayout>
|
58
|
+
<Policies>
|
59
|
+
<OnStartupTriggeringPolicy />
|
60
|
+
<SizeBasedTriggeringPolicy size="32 MB"/>
|
61
|
+
</Policies>
|
62
|
+
<DefaultRolloverStrategy max="10"/>
|
63
|
+
</RollingRandomAccessFile>
|
64
|
+
|
65
|
+
</Appenders>
|
66
|
+
<Loggers>
|
67
|
+
<!-- Use <AsyncLogger/<AsyncRoot and <Logger/<Root for asynchronous logging or synchronous logging respectively -->
|
68
|
+
<AsyncLogger name="org.apache.hadoop" level="warn"/>
|
69
|
+
<AsyncLogger name="org.apache.solr.update.LoggingInfoStream" level="off"/>
|
70
|
+
<AsyncLogger name="org.apache.zookeeper" level="warn"/>
|
71
|
+
<!-- HttpSolrCall adds markers denoting the handler class to allow fine grained control, metrics are
|
72
|
+
very noisy so by default the metrics handler is turned off to see metrics logging set DENY to ACCEPT -->
|
73
|
+
<AsyncLogger name="org.apache.solr.servlet.HttpSolrCall" level="info">
|
74
|
+
<MarkerFilter marker="org.apache.solr.handler.admin.MetricsHandler" onMatch="DENY" onMismatch="ACCEPT"/>
|
75
|
+
</AsyncLogger>
|
76
|
+
<AsyncLogger name="org.apache.solr.core.SolrCore.SlowRequest" level="info" additivity="false">
|
77
|
+
<AppenderRef ref="SlowLogFile"/>
|
78
|
+
</AsyncLogger>
|
79
|
+
<AsyncLogger name="org.eclipse.jetty.deploy" level="warn"/>
|
80
|
+
<AsyncLogger name="org.eclipse.jetty.webapp" level="warn"/>
|
81
|
+
<AsyncLogger name="org.eclipse.jetty.server.session" level="warn"/>
|
82
|
+
|
83
|
+
<AsyncRoot level="info">
|
84
|
+
<AppenderRef ref="MainLogFile"/>
|
85
|
+
<AppenderRef ref="STDOUT"/>
|
86
|
+
</AsyncRoot>
|
87
|
+
</Loggers>
|
88
|
+
</Configuration>
|
89
|
+
|
@@ -0,0 +1,82 @@
|
|
1
|
+
|
2
|
+
module ConfigLMM
|
3
|
+
module LMM
|
4
|
+
class Solr < Framework::Plugin
|
5
|
+
|
6
|
+
USER = 'solr'
|
7
|
+
HOME_DIR = '/var/lib/solr'
|
8
|
+
INSTALL_PATH = '/opt/solr'
|
9
|
+
VERSION = '9.8.1'
|
10
|
+
URL = "https://www.apache.org/dyn/closer.lua/solr/solr/#{VERSION}/solr-#{VERSION}.tgz?action=download"
|
11
|
+
|
12
|
+
# Systemd support will be only with 10.x version but we want to already use it
|
13
|
+
INSTALL_SCRIPT = 'https://raw.githubusercontent.com/apache/solr/ccd5ede68bf0cd19be63cda4e35a320842336e07/solr/bin/install_solr_service.sh'
|
14
|
+
SYSTEMD_SERVICE = 'https://raw.githubusercontent.com/apache/solr/ccd5ede68bf0cd19be63cda4e35a320842336e07/solr/bin/systemd/solr.service'
|
15
|
+
|
16
|
+
def actionSolrDeploy(id, target, activeState, context, options)
|
17
|
+
self.withConnection(target['Location'], target) do |connection|
|
18
|
+
Linux.withConnection(connection) do |linuxConnection|
|
19
|
+
if linuxConnection.distroName == Linux::ARCH_NAME
|
20
|
+
linuxConnection.ensurePackage('solr', options)
|
21
|
+
else
|
22
|
+
if !linuxConnection.filePresent?(INSTALL_PATH)
|
23
|
+
linuxConnection.createServiceUser(USER, HOME_DIR, 'Apache Solr', options)
|
24
|
+
|
25
|
+
linuxConnection.exec("curl --silent --location --output /tmp/solr-#{VERSION}.tgz '#{URL}'", false, options)
|
26
|
+
|
27
|
+
# FIXME Once Solr 10.x is released
|
28
|
+
#linuxConnection.exec("tar --extract --strip-components=2 --directory /tmp --file /tmp/solr-#{VERSION}.tgz --wildcards '*/install_solr_service.sh'", false, options)
|
29
|
+
# BEGIN HACK
|
30
|
+
linuxConnection.exec("curl --silent --location --output /tmp/install_solr_service.sh '#{INSTALL_SCRIPT}'", false, options)
|
31
|
+
linuxConnection.exec("curl --silent --location --output /tmp/solr.service '#{SYSTEMD_SERVICE}'", false, options)
|
32
|
+
linuxConnection.exec("sed -i 's|$SOLR_INSTALL_DIR/bin/systemd|/tmp|' /tmp/install_solr_service.sh", false, options)
|
33
|
+
linuxConnection.exec("chmod +x /tmp/install_solr_service.sh", false, options)
|
34
|
+
# END HACK
|
35
|
+
|
36
|
+
linuxConnection.exec("/tmp/install_solr_service.sh /tmp/solr-#{VERSION}.tgz -u #{USER} -d #{HOME_DIR}", false, options)
|
37
|
+
|
38
|
+
linuxConnection.exec('rm -rf solr.service', false, options) # CLEANUP HACK
|
39
|
+
|
40
|
+
linuxConnection.exec("rm -rf /tmp/install_solr_service.sh /tmp/solr-#{VERSION}.tgz", false, options)
|
41
|
+
|
42
|
+
activeState['Version'] = VERSION
|
43
|
+
|
44
|
+
# To use JSON logging
|
45
|
+
linuxConnection.upload(__dir__ + '/log4j2.xml', HOME_DIR, options)
|
46
|
+
linuxConnection.restartService("solr.service", options)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def cleanup(configs, state, context, options)
|
54
|
+
cleanupType(:Solr, configs, state, context, options) do |item, id, state, context, options, connection|
|
55
|
+
Linux.withConnection(connection) do |linuxConnection|
|
56
|
+
|
57
|
+
linuxConnection.stopService('solr.service', options)
|
58
|
+
linuxConnection.disableService('solr.service', options)
|
59
|
+
|
60
|
+
linuxConnection.rm('/etc/systemd/system/solr.service', options[:dry])
|
61
|
+
|
62
|
+
version = state.item(id)['Version']
|
63
|
+
if !version.to_s.empty?
|
64
|
+
linuxConnection.rm(INSTALL_PATH, options[:dry])
|
65
|
+
linuxConnection.rm(INSTALL_PATH + '-' + version, options[:dry])
|
66
|
+
end
|
67
|
+
|
68
|
+
state.item(id)['Status'] = State::STATUS_DELETED unless options[:dry]
|
69
|
+
|
70
|
+
if options[:destroy]
|
71
|
+
linuxConnection.deleteUserAndGroup(USER, options)
|
72
|
+
linuxConnection.rm(HOME_DIR, options[:dry])
|
73
|
+
linuxConnection.rm('/etc/default/solr.in.sh', options[:dry])
|
74
|
+
state.item(id)['Status'] = State::STATUS_DESTROYED unless options[:dry]
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
@@ -4,57 +4,53 @@ module ConfigLMM
|
|
4
4
|
class Tunnel < Framework::NginxApp
|
5
5
|
|
6
6
|
def actionTunnelDeploy(id, target, activeState, context, options)
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
raise Framework::PluginProcessError.new("#{id}: Unknown Protocol: #{uri.scheme}!") if uri.scheme != 'ssh'
|
11
|
-
|
12
|
-
self.class.sshStart(uri) do |ssh|
|
13
|
-
|
14
|
-
Framework::LinuxApp.ensurePackage('socat', ssh)
|
7
|
+
self.withConnection(target['Location'], target) do |connection|
|
8
|
+
Linux.withConnection(connection) do |linuxConnection|
|
9
|
+
linuxConnection.ensurePackage('socat', options)
|
15
10
|
|
16
11
|
port = target['Port']
|
17
|
-
activeState['Port'] = port
|
18
|
-
activeState['UDP'] = target['UDP']
|
19
12
|
if target['UDP']
|
20
13
|
name = "tunnelUDP-#{port}"
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
14
|
+
linuxConnection.upload(__dir__ + '/tunnelUDP.service', "/etc/systemd/system/#{name}.service", options)
|
15
|
+
linuxConnection.upload(__dir__ + '/tunnelUDP.socket', "/etc/systemd/system/#{name}.socket", options)
|
16
|
+
linuxConnection.fileReplace("/etc/systemd/system/#{name}.service", '\$PORT', port, options)
|
17
|
+
linuxConnection.fileReplace("/etc/systemd/system/#{name}.socket", '\$PORT', port, options)
|
18
|
+
linuxConnection.fileReplace("/etc/systemd/system/#{name}.service", '\$REMOTE', Addressable::IDNA.to_ascii(target['Remote']) , options)
|
19
|
+
linuxConnection.firewallAddPort("#{port}/udp", options)
|
26
20
|
else
|
27
21
|
name = "tunnelTCP-#{port}"
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
22
|
+
linuxConnection.upload(__dir__ + '/tunnelTCP.service', "/etc/systemd/system/#{name}.service", options)
|
23
|
+
linuxConnection.upload(__dir__ + '/tunnelTCP.socket', "/etc/systemd/system/#{name}.socket", options)
|
24
|
+
linuxConnection.fileReplace("/etc/systemd/system/#{name}.service", '\$PORT', port, options)
|
25
|
+
linuxConnection.fileReplace("/etc/systemd/system/#{name}.socket", '\$PORT', port, options)
|
26
|
+
linuxConnection.fileReplace("/etc/systemd/system/#{name}.service", '\$REMOTE', Addressable::IDNA.to_ascii(target['Remote']), options)
|
27
|
+
linuxConnection.firewallAddPort("#{port}/tcp", options)
|
33
28
|
end
|
34
29
|
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
30
|
+
linuxConnection.reloadServiceManager(options)
|
31
|
+
linuxConnection.ensureServiceAutoStart(name + '.socket', options)
|
32
|
+
linuxConnection.stopService(name + '.service', options)
|
33
|
+
linuxConnection.startService(name + '.socket', options)
|
39
34
|
end
|
40
|
-
else
|
41
|
-
# TODO
|
42
35
|
end
|
43
|
-
activeState['Status'] = State::STATUS_DEPLOYED
|
44
36
|
end
|
45
37
|
|
46
38
|
def cleanup(configs, state, context, options)
|
47
|
-
cleanupType(:Tunnel, configs, state, context, options) do |item, id, state, context, options,
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
39
|
+
cleanupType(:Tunnel, configs, state, context, options) do |item, id, state, context, options, connection|
|
40
|
+
Linux.withConnection(connection) do |linuxConnection|
|
41
|
+
if item['Config']['UDP']
|
42
|
+
name = "tunnelUDP-#{item['Config']['Port']}"
|
43
|
+
linuxConnection.firewallRemovePort("#{item['Config']['Port']}/udp", options)
|
44
|
+
else
|
45
|
+
name = "tunnelTCP-#{item['Config']['Port']}"
|
46
|
+
linuxConnection.firewallRemovePort("#{item['Config']['Port']}/tcp", options)
|
47
|
+
end
|
48
|
+
linuxConnection.stopService(name + '.socket', options)
|
49
|
+
linuxConnection.disableService(name + '.socket', options)
|
50
|
+
linuxConnection.rm("/etc/systemd/system/#{name}.service", options[:dry])
|
51
|
+
linuxConnection.rm("/etc/systemd/system/#{name}.socket", options[:dry])
|
52
|
+
state.item(id)['Status'] = State::STATUS_DESTROYED
|
52
53
|
end
|
53
|
-
Framework::LinuxApp.stopService(name + '.socket', ssh)
|
54
|
-
Framework::LinuxApp.disableService(name + '.socket', ssh)
|
55
|
-
rm("/etc/systemd/system/#{name}.service", options[:dry], ssh)
|
56
|
-
rm("/etc/systemd/system/#{name}.socket", options[:dry], ssh)
|
57
|
-
state.item(id)['Status'] = State::STATUS_DESTROYED
|
58
54
|
end
|
59
55
|
end
|
60
56
|
|
@@ -0,0 +1,19 @@
|
|
1
|
+
|
2
|
+
[Unit]
|
3
|
+
Description=Umami container
|
4
|
+
After=local-fs.target
|
5
|
+
|
6
|
+
[Container]
|
7
|
+
ContainerName=Umami
|
8
|
+
Image=docker.umami.is/umami-software/umami:postgresql-latest
|
9
|
+
EnvironmentFile=/var/lib/umami/.config/containers/systemd/Umami.env
|
10
|
+
Network=slirp4netns:allow_host_loopback=true
|
11
|
+
PublishPort=127.0.0.1:13300:3000
|
12
|
+
LogDriver=journald
|
13
|
+
AutoUpdate=registry
|
14
|
+
|
15
|
+
[Service]
|
16
|
+
Restart=on-failure
|
17
|
+
|
18
|
+
[Install]
|
19
|
+
WantedBy=multi-user.target default.target
|