opentelemetry-semantic_conventions 1.11.0 → 1.36.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.yardopts +2 -2
- data/CHANGELOG.md +4 -0
- data/lib/opentelemetry/semantic_conventions/resource.rb +258 -4
- data/lib/opentelemetry/semantic_conventions/trace.rb +432 -5
- data/lib/opentelemetry/semantic_conventions/version.rb +1 -1
- data/lib/opentelemetry/semantic_conventions.rb +13 -1
- data/lib/opentelemetry/semconv/aspnetcore/attributes.rb +97 -0
- data/lib/opentelemetry/semconv/aspnetcore/metrics.rb +83 -0
- data/lib/opentelemetry/semconv/aspnetcore.rb +22 -0
- data/lib/opentelemetry/semconv/client/attributes.rb +53 -0
- data/lib/opentelemetry/semconv/client.rb +21 -0
- data/lib/opentelemetry/semconv/code/attributes.rb +92 -0
- data/lib/opentelemetry/semconv/code.rb +21 -0
- data/lib/opentelemetry/semconv/db/attributes.rb +174 -0
- data/lib/opentelemetry/semconv/db/metrics.rb +36 -0
- data/lib/opentelemetry/semconv/db.rb +22 -0
- data/lib/opentelemetry/semconv/dotnet/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/dotnet/metrics.rb +181 -0
- data/lib/opentelemetry/semconv/dotnet.rb +22 -0
- data/lib/opentelemetry/semconv/error/attributes.rb +61 -0
- data/lib/opentelemetry/semconv/error.rb +21 -0
- data/lib/opentelemetry/semconv/exception/attributes.rb +64 -0
- data/lib/opentelemetry/semconv/exception.rb +21 -0
- data/lib/opentelemetry/semconv/http/attributes.rb +164 -0
- data/lib/opentelemetry/semconv/http/metrics.rb +39 -0
- data/lib/opentelemetry/semconv/http.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/android/attributes.rb +58 -0
- data/lib/opentelemetry/semconv/incubating/android.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/app/attributes.rb +103 -0
- data/lib/opentelemetry/semconv/incubating/app.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/artifact/attributes.rb +117 -0
- data/lib/opentelemetry/semconv/incubating/artifact.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/aspnetcore/attributes.rb +113 -0
- data/lib/opentelemetry/semconv/incubating/aspnetcore/metrics.rb +99 -0
- data/lib/opentelemetry/semconv/incubating/aspnetcore.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/aws/attributes.rb +551 -0
- data/lib/opentelemetry/semconv/incubating/aws.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/az/attributes.rb +53 -0
- data/lib/opentelemetry/semconv/incubating/az.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/azure/attributes.rb +115 -0
- data/lib/opentelemetry/semconv/incubating/azure/metrics.rb +41 -0
- data/lib/opentelemetry/semconv/incubating/azure.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/browser/attributes.rb +77 -0
- data/lib/opentelemetry/semconv/incubating/browser.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/cassandra/attributes.rb +78 -0
- data/lib/opentelemetry/semconv/incubating/cassandra.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/cicd/attributes.rb +201 -0
- data/lib/opentelemetry/semconv/incubating/cicd/metrics.rb +61 -0
- data/lib/opentelemetry/semconv/incubating/cicd.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/client/attributes.rb +59 -0
- data/lib/opentelemetry/semconv/incubating/client.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/cloud/attributes.rb +105 -0
- data/lib/opentelemetry/semconv/incubating/cloud.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/cloudevents/attributes.rb +80 -0
- data/lib/opentelemetry/semconv/incubating/cloudevents.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/cloudfoundry/attributes.rb +190 -0
- data/lib/opentelemetry/semconv/incubating/cloudfoundry.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/code/attributes.rb +155 -0
- data/lib/opentelemetry/semconv/incubating/code.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/container/attributes.rb +197 -0
- data/lib/opentelemetry/semconv/incubating/container/metrics.rb +74 -0
- data/lib/opentelemetry/semconv/incubating/container.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/cpu/attributes.rb +50 -0
- data/lib/opentelemetry/semconv/incubating/cpu/metrics.rb +49 -0
- data/lib/opentelemetry/semconv/incubating/cpu.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/cpython/attributes.rb +42 -0
- data/lib/opentelemetry/semconv/incubating/cpython/metrics.rb +52 -0
- data/lib/opentelemetry/semconv/incubating/cpython.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/db/attributes.rb +615 -0
- data/lib/opentelemetry/semconv/incubating/db/metrics.rb +156 -0
- data/lib/opentelemetry/semconv/incubating/db.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/deployment/attributes.rb +84 -0
- data/lib/opentelemetry/semconv/incubating/deployment.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/destination/attributes.rb +54 -0
- data/lib/opentelemetry/semconv/incubating/destination.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/device/attributes.rb +88 -0
- data/lib/opentelemetry/semconv/incubating/device.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/disk/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/incubating/disk.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/dns/attributes.rb +52 -0
- data/lib/opentelemetry/semconv/incubating/dns/metrics.rb +36 -0
- data/lib/opentelemetry/semconv/incubating/dns.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/dotnet/attributes.rb +44 -0
- data/lib/opentelemetry/semconv/incubating/dotnet/metrics.rb +221 -0
- data/lib/opentelemetry/semconv/incubating/dotnet.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/elasticsearch/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/incubating/elasticsearch.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/enduser/attributes.rb +79 -0
- data/lib/opentelemetry/semconv/incubating/enduser.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/error/attributes.rb +81 -0
- data/lib/opentelemetry/semconv/incubating/error.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/event/attributes.rb +42 -0
- data/lib/opentelemetry/semconv/incubating/event.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/exception/attributes.rb +73 -0
- data/lib/opentelemetry/semconv/incubating/exception.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/faas/attributes.rb +200 -0
- data/lib/opentelemetry/semconv/incubating/faas/metrics.rb +76 -0
- data/lib/opentelemetry/semconv/incubating/faas.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/feature_flag/attributes.rb +159 -0
- data/lib/opentelemetry/semconv/incubating/feature_flag.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/file/attributes.rb +207 -0
- data/lib/opentelemetry/semconv/incubating/file.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/gcp/attributes.rb +159 -0
- data/lib/opentelemetry/semconv/incubating/gcp.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/gen_ai/attributes.rb +404 -0
- data/lib/opentelemetry/semconv/incubating/gen_ai/metrics.rb +56 -0
- data/lib/opentelemetry/semconv/incubating/gen_ai.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/geo/attributes.rb +91 -0
- data/lib/opentelemetry/semconv/incubating/geo.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/go/attributes.rb +41 -0
- data/lib/opentelemetry/semconv/incubating/go/metrics.rb +94 -0
- data/lib/opentelemetry/semconv/incubating/go.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/graphql/attributes.rb +62 -0
- data/lib/opentelemetry/semconv/incubating/graphql.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/heroku/attributes.rb +58 -0
- data/lib/opentelemetry/semconv/incubating/heroku.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/host/attributes.rb +172 -0
- data/lib/opentelemetry/semconv/incubating/host.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/http/attributes.rb +366 -0
- data/lib/opentelemetry/semconv/incubating/http/metrics.rb +93 -0
- data/lib/opentelemetry/semconv/incubating/http.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/hw/attributes.rb +70 -0
- data/lib/opentelemetry/semconv/incubating/hw/metrics.rb +79 -0
- data/lib/opentelemetry/semconv/incubating/hw.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/ios/attributes.rb +44 -0
- data/lib/opentelemetry/semconv/incubating/ios.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/jvm/attributes.rb +129 -0
- data/lib/opentelemetry/semconv/incubating/jvm/metrics.rb +162 -0
- data/lib/opentelemetry/semconv/incubating/jvm.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/k8s/attributes.rb +910 -0
- data/lib/opentelemetry/semconv/incubating/k8s/metrics.rb +658 -0
- data/lib/opentelemetry/semconv/incubating/k8s.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/kestrel/metrics.rb +106 -0
- data/lib/opentelemetry/semconv/incubating/kestrel.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/linux/attributes.rb +41 -0
- data/lib/opentelemetry/semconv/incubating/linux.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/log/attributes.rb +96 -0
- data/lib/opentelemetry/semconv/incubating/log.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/mainframe/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/incubating/mainframe.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/message/attributes.rb +55 -0
- data/lib/opentelemetry/semconv/incubating/message.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/messaging/attributes.rb +451 -0
- data/lib/opentelemetry/semconv/incubating/messaging/metrics.rb +96 -0
- data/lib/opentelemetry/semconv/incubating/messaging.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/net/attributes.rb +175 -0
- data/lib/opentelemetry/semconv/incubating/net.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/network/attributes.rb +222 -0
- data/lib/opentelemetry/semconv/incubating/network.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/nodejs/attributes.rb +36 -0
- data/lib/opentelemetry/semconv/incubating/nodejs/metrics.rb +94 -0
- data/lib/opentelemetry/semconv/incubating/nodejs.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/oci/attributes.rb +43 -0
- data/lib/opentelemetry/semconv/incubating/oci.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/opentracing/attributes.rb +38 -0
- data/lib/opentelemetry/semconv/incubating/opentracing.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/os/attributes.rb +78 -0
- data/lib/opentelemetry/semconv/incubating/os.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/otel/attributes.rb +138 -0
- data/lib/opentelemetry/semconv/incubating/otel/metrics.rb +194 -0
- data/lib/opentelemetry/semconv/incubating/otel.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/other/attributes.rb +41 -0
- data/lib/opentelemetry/semconv/incubating/other.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/peer/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/incubating/peer.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/pool/attributes.rb +41 -0
- data/lib/opentelemetry/semconv/incubating/pool.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/process/attributes.rb +367 -0
- data/lib/opentelemetry/semconv/incubating/process/metrics.rb +89 -0
- data/lib/opentelemetry/semconv/incubating/process.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/profile/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/incubating/profile.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/rpc/attributes.rb +215 -0
- data/lib/opentelemetry/semconv/incubating/rpc/metrics.rb +115 -0
- data/lib/opentelemetry/semconv/incubating/rpc.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/security_rule/attributes.rb +106 -0
- data/lib/opentelemetry/semconv/incubating/security_rule.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/server/attributes.rb +61 -0
- data/lib/opentelemetry/semconv/incubating/server.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/service/attributes.rb +103 -0
- data/lib/opentelemetry/semconv/incubating/service.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/session/attributes.rb +49 -0
- data/lib/opentelemetry/semconv/incubating/session.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/signalr/attributes.rb +55 -0
- data/lib/opentelemetry/semconv/incubating/signalr/metrics.rb +49 -0
- data/lib/opentelemetry/semconv/incubating/signalr.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/source/attributes.rb +54 -0
- data/lib/opentelemetry/semconv/incubating/source.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/system/attributes.rb +162 -0
- data/lib/opentelemetry/semconv/incubating/system/metrics.rb +227 -0
- data/lib/opentelemetry/semconv/incubating/system.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/telemetry/attributes.rb +88 -0
- data/lib/opentelemetry/semconv/incubating/telemetry.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/test/attributes.rb +75 -0
- data/lib/opentelemetry/semconv/incubating/test.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/thread/attributes.rb +49 -0
- data/lib/opentelemetry/semconv/incubating/thread.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/tls/attributes.rb +293 -0
- data/lib/opentelemetry/semconv/incubating/tls.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/url/attributes.rb +225 -0
- data/lib/opentelemetry/semconv/incubating/url.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/user/attributes.rb +87 -0
- data/lib/opentelemetry/semconv/incubating/user.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/user_agent/attributes.rb +100 -0
- data/lib/opentelemetry/semconv/incubating/user_agent.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/v8js/attributes.rb +43 -0
- data/lib/opentelemetry/semconv/incubating/v8js/metrics.rb +66 -0
- data/lib/opentelemetry/semconv/incubating/v8js.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/vcs/attributes.rb +303 -0
- data/lib/opentelemetry/semconv/incubating/vcs/metrics.rb +88 -0
- data/lib/opentelemetry/semconv/incubating/vcs.rb +22 -0
- data/lib/opentelemetry/semconv/incubating/webengine/attributes.rb +58 -0
- data/lib/opentelemetry/semconv/incubating/webengine.rb +21 -0
- data/lib/opentelemetry/semconv/incubating/zos/attributes.rb +49 -0
- data/lib/opentelemetry/semconv/incubating/zos.rb +21 -0
- data/lib/opentelemetry/semconv/jvm/attributes.rb +91 -0
- data/lib/opentelemetry/semconv/jvm/metrics.rb +91 -0
- data/lib/opentelemetry/semconv/jvm.rb +22 -0
- data/lib/opentelemetry/semconv/kestrel/metrics.rb +88 -0
- data/lib/opentelemetry/semconv/kestrel.rb +21 -0
- data/lib/opentelemetry/semconv/network/attributes.rb +120 -0
- data/lib/opentelemetry/semconv/network.rb +21 -0
- data/lib/opentelemetry/semconv/otel/attributes.rb +61 -0
- data/lib/opentelemetry/semconv/otel.rb +21 -0
- data/lib/opentelemetry/semconv/server/attributes.rb +55 -0
- data/lib/opentelemetry/semconv/server.rb +21 -0
- data/lib/opentelemetry/semconv/service/attributes.rb +50 -0
- data/lib/opentelemetry/semconv/service.rb +21 -0
- data/lib/opentelemetry/semconv/signalr/attributes.rb +49 -0
- data/lib/opentelemetry/semconv/signalr/metrics.rb +43 -0
- data/lib/opentelemetry/semconv/signalr.rb +22 -0
- data/lib/opentelemetry/semconv/telemetry/attributes.rb +59 -0
- data/lib/opentelemetry/semconv/telemetry.rb +21 -0
- data/lib/opentelemetry/semconv/url/attributes.rb +118 -0
- data/lib/opentelemetry/semconv/url.rb +21 -0
- data/lib/opentelemetry/semconv/user_agent/attributes.rb +40 -0
- data/lib/opentelemetry/semconv/user_agent.rb +21 -0
- metadata +249 -4
@@ -0,0 +1,658 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright The OpenTelemetry Authors
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
#
|
17
|
+
# SPDX-License-Identifier: Apache-2.0
|
18
|
+
#
|
19
|
+
# This file was autogenerated. Do not edit it by hand.
|
20
|
+
|
21
|
+
module OpenTelemetry
|
22
|
+
module SemConv
|
23
|
+
module Incubating
|
24
|
+
module K8S
|
25
|
+
# @!group Metrics Names
|
26
|
+
|
27
|
+
# Maximum CPU resource limit set for the container
|
28
|
+
#
|
29
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
30
|
+
#
|
31
|
+
# @note Stability Level: development
|
32
|
+
K8S_CONTAINER_CPU_LIMIT = 'k8s.container.cpu.limit'
|
33
|
+
|
34
|
+
# CPU resource requested for the container
|
35
|
+
#
|
36
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
37
|
+
#
|
38
|
+
# @note Stability Level: development
|
39
|
+
K8S_CONTAINER_CPU_REQUEST = 'k8s.container.cpu.request'
|
40
|
+
|
41
|
+
# Maximum ephemeral storage resource limit set for the container
|
42
|
+
#
|
43
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
44
|
+
#
|
45
|
+
# @note Stability Level: development
|
46
|
+
K8S_CONTAINER_EPHEMERAL_STORAGE_LIMIT = 'k8s.container.ephemeral_storage.limit'
|
47
|
+
|
48
|
+
# Ephemeral storage resource requested for the container
|
49
|
+
#
|
50
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
51
|
+
#
|
52
|
+
# @note Stability Level: development
|
53
|
+
K8S_CONTAINER_EPHEMERAL_STORAGE_REQUEST = 'k8s.container.ephemeral_storage.request'
|
54
|
+
|
55
|
+
# Maximum memory resource limit set for the container
|
56
|
+
#
|
57
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
58
|
+
#
|
59
|
+
# @note Stability Level: development
|
60
|
+
K8S_CONTAINER_MEMORY_LIMIT = 'k8s.container.memory.limit'
|
61
|
+
|
62
|
+
# Memory resource requested for the container
|
63
|
+
#
|
64
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
65
|
+
#
|
66
|
+
# @note Stability Level: development
|
67
|
+
K8S_CONTAINER_MEMORY_REQUEST = 'k8s.container.memory.request'
|
68
|
+
|
69
|
+
# Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)
|
70
|
+
#
|
71
|
+
# This metric SHOULD reflect the value of the `ready` field in the
|
72
|
+
# [K8s ContainerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core).
|
73
|
+
#
|
74
|
+
# @note Stability Level: development
|
75
|
+
K8S_CONTAINER_READY = 'k8s.container.ready'
|
76
|
+
|
77
|
+
# Describes how many times the container has restarted (since the last counter reset)
|
78
|
+
#
|
79
|
+
# This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0
|
80
|
+
# at any time depending on how your kubelet is configured to prune dead containers.
|
81
|
+
# It is best to not depend too much on the exact value but rather look at it as
|
82
|
+
# either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case
|
83
|
+
# you can conclude there were restarts in the recent past, and not try and analyze the value beyond that.
|
84
|
+
#
|
85
|
+
# @note Stability Level: development
|
86
|
+
K8S_CONTAINER_RESTART_COUNT = 'k8s.container.restart.count'
|
87
|
+
|
88
|
+
# Describes the number of K8s containers that are currently in a state for a given reason
|
89
|
+
#
|
90
|
+
# All possible container state reasons will be reported at each time interval to avoid missing metrics.
|
91
|
+
# Only the value corresponding to the current state reason will be non-zero.
|
92
|
+
#
|
93
|
+
# @note Stability Level: development
|
94
|
+
K8S_CONTAINER_STATUS_REASON = 'k8s.container.status.reason'
|
95
|
+
|
96
|
+
# Describes the number of K8s containers that are currently in a given state
|
97
|
+
#
|
98
|
+
# All possible container states will be reported at each time interval to avoid missing metrics.
|
99
|
+
# Only the value corresponding to the current state will be non-zero.
|
100
|
+
#
|
101
|
+
# @note Stability Level: development
|
102
|
+
K8S_CONTAINER_STATUS_STATE = 'k8s.container.status.state'
|
103
|
+
|
104
|
+
# Maximum storage resource limit set for the container
|
105
|
+
#
|
106
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
107
|
+
#
|
108
|
+
# @note Stability Level: development
|
109
|
+
K8S_CONTAINER_STORAGE_LIMIT = 'k8s.container.storage.limit'
|
110
|
+
|
111
|
+
# Storage resource requested for the container
|
112
|
+
#
|
113
|
+
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details.
|
114
|
+
#
|
115
|
+
# @note Stability Level: development
|
116
|
+
K8S_CONTAINER_STORAGE_REQUEST = 'k8s.container.storage.request'
|
117
|
+
|
118
|
+
# The number of actively running jobs for a cronjob
|
119
|
+
#
|
120
|
+
# This metric aligns with the `active` field of the
|
121
|
+
# [K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch).
|
122
|
+
#
|
123
|
+
# @note Stability Level: development
|
124
|
+
K8S_CRONJOB_ACTIVE_JOBS = 'k8s.cronjob.active_jobs'
|
125
|
+
|
126
|
+
# Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod
|
127
|
+
#
|
128
|
+
# This metric aligns with the `currentNumberScheduled` field of the
|
129
|
+
# [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
|
130
|
+
#
|
131
|
+
# @note Stability Level: development
|
132
|
+
K8S_DAEMONSET_CURRENT_SCHEDULED_NODES = 'k8s.daemonset.current_scheduled_nodes'
|
133
|
+
|
134
|
+
# Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)
|
135
|
+
#
|
136
|
+
# This metric aligns with the `desiredNumberScheduled` field of the
|
137
|
+
# [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
|
138
|
+
#
|
139
|
+
# @note Stability Level: development
|
140
|
+
K8S_DAEMONSET_DESIRED_SCHEDULED_NODES = 'k8s.daemonset.desired_scheduled_nodes'
|
141
|
+
|
142
|
+
# Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod
|
143
|
+
#
|
144
|
+
# This metric aligns with the `numberMisscheduled` field of the
|
145
|
+
# [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
|
146
|
+
#
|
147
|
+
# @note Stability Level: development
|
148
|
+
K8S_DAEMONSET_MISSCHEDULED_NODES = 'k8s.daemonset.misscheduled_nodes'
|
149
|
+
|
150
|
+
# Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready
|
151
|
+
#
|
152
|
+
# This metric aligns with the `numberReady` field of the
|
153
|
+
# [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
|
154
|
+
#
|
155
|
+
# @note Stability Level: development
|
156
|
+
K8S_DAEMONSET_READY_NODES = 'k8s.daemonset.ready_nodes'
|
157
|
+
|
158
|
+
# Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment
|
159
|
+
#
|
160
|
+
# This metric aligns with the `availableReplicas` field of the
|
161
|
+
# [K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps).
|
162
|
+
#
|
163
|
+
# @note Stability Level: development
|
164
|
+
K8S_DEPLOYMENT_AVAILABLE_PODS = 'k8s.deployment.available_pods'
|
165
|
+
|
166
|
+
# Number of desired replica pods in this deployment
|
167
|
+
#
|
168
|
+
# This metric aligns with the `replicas` field of the
|
169
|
+
# [K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps).
|
170
|
+
#
|
171
|
+
# @note Stability Level: development
|
172
|
+
K8S_DEPLOYMENT_DESIRED_PODS = 'k8s.deployment.desired_pods'
|
173
|
+
|
174
|
+
# Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler
|
175
|
+
#
|
176
|
+
# This metric aligns with the `currentReplicas` field of the
|
177
|
+
# [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling)
|
178
|
+
#
|
179
|
+
# @note Stability Level: development
|
180
|
+
K8S_HPA_CURRENT_PODS = 'k8s.hpa.current_pods'
|
181
|
+
|
182
|
+
# Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler
|
183
|
+
#
|
184
|
+
# This metric aligns with the `desiredReplicas` field of the
|
185
|
+
# [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling)
|
186
|
+
#
|
187
|
+
# @note Stability Level: development
|
188
|
+
K8S_HPA_DESIRED_PODS = 'k8s.hpa.desired_pods'
|
189
|
+
|
190
|
+
# The upper limit for the number of replica pods to which the autoscaler can scale up
|
191
|
+
#
|
192
|
+
# This metric aligns with the `maxReplicas` field of the
|
193
|
+
# [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling)
|
194
|
+
#
|
195
|
+
# @note Stability Level: development
|
196
|
+
K8S_HPA_MAX_PODS = 'k8s.hpa.max_pods'
|
197
|
+
|
198
|
+
# Target average utilization, in percentage, for CPU resource in HPA config.
|
199
|
+
#
|
200
|
+
# This metric aligns with the `averageUtilization` field of the
|
201
|
+
# [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling).
|
202
|
+
# If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis),
|
203
|
+
# the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies.
|
204
|
+
#
|
205
|
+
# @note Stability Level: development
|
206
|
+
K8S_HPA_METRIC_TARGET_CPU_AVERAGE_UTILIZATION = 'k8s.hpa.metric.target.cpu.average_utilization'
|
207
|
+
|
208
|
+
# Target average value for CPU resource in HPA config.
|
209
|
+
#
|
210
|
+
# This metric aligns with the `averageValue` field of the
|
211
|
+
# [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling).
|
212
|
+
# If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis),
|
213
|
+
# the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies.
|
214
|
+
#
|
215
|
+
# @note Stability Level: development
|
216
|
+
K8S_HPA_METRIC_TARGET_CPU_AVERAGE_VALUE = 'k8s.hpa.metric.target.cpu.average_value'
|
217
|
+
|
218
|
+
# Target value for CPU resource in HPA config.
|
219
|
+
#
|
220
|
+
# This metric aligns with the `value` field of the
|
221
|
+
# [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling).
|
222
|
+
# If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis),
|
223
|
+
# the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies.
|
224
|
+
#
|
225
|
+
# @note Stability Level: development
|
226
|
+
K8S_HPA_METRIC_TARGET_CPU_VALUE = 'k8s.hpa.metric.target.cpu.value'
|
227
|
+
|
228
|
+
# The lower limit for the number of replica pods to which the autoscaler can scale down
|
229
|
+
#
|
230
|
+
# This metric aligns with the `minReplicas` field of the
|
231
|
+
# [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling)
|
232
|
+
#
|
233
|
+
# @note Stability Level: development
|
234
|
+
K8S_HPA_MIN_PODS = 'k8s.hpa.min_pods'
|
235
|
+
|
236
|
+
# The number of pending and actively running pods for a job
|
237
|
+
#
|
238
|
+
# This metric aligns with the `active` field of the
|
239
|
+
# [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch).
|
240
|
+
#
|
241
|
+
# @note Stability Level: development
|
242
|
+
K8S_JOB_ACTIVE_PODS = 'k8s.job.active_pods'
|
243
|
+
|
244
|
+
# The desired number of successfully finished pods the job should be run with
|
245
|
+
#
|
246
|
+
# This metric aligns with the `completions` field of the
|
247
|
+
# [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch)..
|
248
|
+
#
|
249
|
+
# @note Stability Level: development
|
250
|
+
K8S_JOB_DESIRED_SUCCESSFUL_PODS = 'k8s.job.desired_successful_pods'
|
251
|
+
|
252
|
+
# The number of pods which reached phase Failed for a job
|
253
|
+
#
|
254
|
+
# This metric aligns with the `failed` field of the
|
255
|
+
# [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch).
|
256
|
+
#
|
257
|
+
# @note Stability Level: development
|
258
|
+
K8S_JOB_FAILED_PODS = 'k8s.job.failed_pods'
|
259
|
+
|
260
|
+
# The max desired number of pods the job should run at any given time
|
261
|
+
#
|
262
|
+
# This metric aligns with the `parallelism` field of the
|
263
|
+
# [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch).
|
264
|
+
#
|
265
|
+
# @note Stability Level: development
|
266
|
+
K8S_JOB_MAX_PARALLEL_PODS = 'k8s.job.max_parallel_pods'
|
267
|
+
|
268
|
+
# The number of pods which reached phase Succeeded for a job
|
269
|
+
#
|
270
|
+
# This metric aligns with the `succeeded` field of the
|
271
|
+
# [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch).
|
272
|
+
#
|
273
|
+
# @note Stability Level: development
|
274
|
+
K8S_JOB_SUCCESSFUL_PODS = 'k8s.job.successful_pods'
|
275
|
+
|
276
|
+
# Describes number of K8s namespaces that are currently in a given phase.
|
277
|
+
#
|
278
|
+
# @note Stability Level: development
|
279
|
+
K8S_NAMESPACE_PHASE = 'k8s.namespace.phase'
|
280
|
+
|
281
|
+
# Amount of cpu allocatable on the node
|
282
|
+
#
|
283
|
+
# @note Stability Level: development
|
284
|
+
K8S_NODE_ALLOCATABLE_CPU = 'k8s.node.allocatable.cpu'
|
285
|
+
|
286
|
+
# Amount of ephemeral-storage allocatable on the node
|
287
|
+
#
|
288
|
+
# @note Stability Level: development
|
289
|
+
K8S_NODE_ALLOCATABLE_EPHEMERAL_STORAGE = 'k8s.node.allocatable.ephemeral_storage'
|
290
|
+
|
291
|
+
# Amount of memory allocatable on the node
|
292
|
+
#
|
293
|
+
# @note Stability Level: development
|
294
|
+
K8S_NODE_ALLOCATABLE_MEMORY = 'k8s.node.allocatable.memory'
|
295
|
+
|
296
|
+
# Amount of pods allocatable on the node
|
297
|
+
#
|
298
|
+
# @note Stability Level: development
|
299
|
+
K8S_NODE_ALLOCATABLE_PODS = 'k8s.node.allocatable.pods'
|
300
|
+
|
301
|
+
# Describes the condition of a particular Node.
|
302
|
+
#
|
303
|
+
# All possible node condition pairs (type and status) will be reported at each time interval to avoid missing metrics. Condition pairs corresponding to the current conditions' statuses will be non-zero.
|
304
|
+
#
|
305
|
+
# @note Stability Level: development
|
306
|
+
K8S_NODE_CONDITION_STATUS = 'k8s.node.condition.status'
|
307
|
+
|
308
|
+
# Total CPU time consumed
|
309
|
+
#
|
310
|
+
# Total CPU time consumed by the specific Node on all available CPU cores
|
311
|
+
#
|
312
|
+
# @note Stability Level: development
|
313
|
+
K8S_NODE_CPU_TIME = 'k8s.node.cpu.time'
|
314
|
+
|
315
|
+
# Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs
|
316
|
+
#
|
317
|
+
# CPU usage of the specific Node on all available CPU cores, averaged over the sample window
|
318
|
+
#
|
319
|
+
# @note Stability Level: development
|
320
|
+
K8S_NODE_CPU_USAGE = 'k8s.node.cpu.usage'
|
321
|
+
|
322
|
+
# Memory usage of the Node
|
323
|
+
#
|
324
|
+
# Total memory usage of the Node
|
325
|
+
#
|
326
|
+
# @note Stability Level: development
|
327
|
+
K8S_NODE_MEMORY_USAGE = 'k8s.node.memory.usage'
|
328
|
+
|
329
|
+
# Node network errors
|
330
|
+
#
|
331
|
+
# @note Stability Level: development
|
332
|
+
K8S_NODE_NETWORK_ERRORS = 'k8s.node.network.errors'
|
333
|
+
|
334
|
+
# Network bytes for the Node
|
335
|
+
#
|
336
|
+
# @note Stability Level: development
|
337
|
+
K8S_NODE_NETWORK_IO = 'k8s.node.network.io'
|
338
|
+
|
339
|
+
# The time the Node has been running
|
340
|
+
#
|
341
|
+
# Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
|
342
|
+
# The actual accuracy would depend on the instrumentation and operating system.
|
343
|
+
#
|
344
|
+
# @note Stability Level: development
|
345
|
+
K8S_NODE_UPTIME = 'k8s.node.uptime'
|
346
|
+
|
347
|
+
# Total CPU time consumed
|
348
|
+
#
|
349
|
+
# Total CPU time consumed by the specific Pod on all available CPU cores
|
350
|
+
#
|
351
|
+
# @note Stability Level: development
|
352
|
+
K8S_POD_CPU_TIME = 'k8s.pod.cpu.time'
|
353
|
+
|
354
|
+
# Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs
|
355
|
+
#
|
356
|
+
# CPU usage of the specific Pod on all available CPU cores, averaged over the sample window
|
357
|
+
#
|
358
|
+
# @note Stability Level: development
|
359
|
+
K8S_POD_CPU_USAGE = 'k8s.pod.cpu.usage'
|
360
|
+
|
361
|
+
# Memory usage of the Pod
|
362
|
+
#
|
363
|
+
# Total memory usage of the Pod
|
364
|
+
#
|
365
|
+
# @note Stability Level: development
|
366
|
+
K8S_POD_MEMORY_USAGE = 'k8s.pod.memory.usage'
|
367
|
+
|
368
|
+
# Pod network errors
|
369
|
+
#
|
370
|
+
# @note Stability Level: development
|
371
|
+
K8S_POD_NETWORK_ERRORS = 'k8s.pod.network.errors'
|
372
|
+
|
373
|
+
# Network bytes for the Pod
|
374
|
+
#
|
375
|
+
# @note Stability Level: development
|
376
|
+
K8S_POD_NETWORK_IO = 'k8s.pod.network.io'
|
377
|
+
|
378
|
+
# The time the Pod has been running
|
379
|
+
#
|
380
|
+
# Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
|
381
|
+
# The actual accuracy would depend on the instrumentation and operating system.
|
382
|
+
#
|
383
|
+
# @note Stability Level: development
|
384
|
+
K8S_POD_UPTIME = 'k8s.pod.uptime'
|
385
|
+
|
386
|
+
# Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset
|
387
|
+
#
|
388
|
+
# This metric aligns with the `availableReplicas` field of the
|
389
|
+
# [K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps).
|
390
|
+
#
|
391
|
+
# @note Stability Level: development
|
392
|
+
K8S_REPLICASET_AVAILABLE_PODS = 'k8s.replicaset.available_pods'
|
393
|
+
|
394
|
+
# Number of desired replica pods in this replicaset
|
395
|
+
#
|
396
|
+
# This metric aligns with the `replicas` field of the
|
397
|
+
# [K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps).
|
398
|
+
#
|
399
|
+
# @note Stability Level: development
|
400
|
+
K8S_REPLICASET_DESIRED_PODS = 'k8s.replicaset.desired_pods'
|
401
|
+
|
402
|
+
# Deprecated, use `k8s.replicationcontroller.available_pods` instead.
|
403
|
+
#
|
404
|
+
# @note Stability Level: development
|
405
|
+
# @deprecated {"note": "Replaced by `k8s.replicationcontroller.available_pods`.", "reason": "renamed", "renamed_to": "k8s.replicationcontroller.available_pods"}
|
406
|
+
K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS = 'k8s.replication_controller.available_pods'
|
407
|
+
|
408
|
+
# Deprecated, use `k8s.replicationcontroller.desired_pods` instead.
|
409
|
+
#
|
410
|
+
# @note Stability Level: development
|
411
|
+
# @deprecated {"note": "Replaced by `k8s.replicationcontroller.desired_pods`.", "reason": "renamed", "renamed_to": "k8s.replicationcontroller.desired_pods"}
|
412
|
+
K8S_REPLICATION_CONTROLLER_DESIRED_PODS = 'k8s.replication_controller.desired_pods'
|
413
|
+
|
414
|
+
# Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller
|
415
|
+
#
|
416
|
+
# This metric aligns with the `availableReplicas` field of the
|
417
|
+
# [K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core)
|
418
|
+
#
|
419
|
+
# @note Stability Level: development
|
420
|
+
K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS = 'k8s.replicationcontroller.available_pods'
|
421
|
+
|
422
|
+
# Number of desired replica pods in this replication controller
|
423
|
+
#
|
424
|
+
# This metric aligns with the `replicas` field of the
|
425
|
+
# [K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core)
|
426
|
+
#
|
427
|
+
# @note Stability Level: development
|
428
|
+
K8S_REPLICATIONCONTROLLER_DESIRED_PODS = 'k8s.replicationcontroller.desired_pods'
|
429
|
+
|
430
|
+
# The CPU limits in a specific namespace.
|
431
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
432
|
+
#
|
433
|
+
# This metric is retrieved from the `hard` field of the
|
434
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
435
|
+
#
|
436
|
+
# @note Stability Level: development
|
437
|
+
K8S_RESOURCEQUOTA_CPU_LIMIT_HARD = 'k8s.resourcequota.cpu.limit.hard'
|
438
|
+
|
439
|
+
# The CPU limits in a specific namespace.
|
440
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
441
|
+
#
|
442
|
+
# This metric is retrieved from the `used` field of the
|
443
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
444
|
+
#
|
445
|
+
# @note Stability Level: development
|
446
|
+
K8S_RESOURCEQUOTA_CPU_LIMIT_USED = 'k8s.resourcequota.cpu.limit.used'
|
447
|
+
|
448
|
+
# The CPU requests in a specific namespace.
|
449
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
450
|
+
#
|
451
|
+
# This metric is retrieved from the `hard` field of the
|
452
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
453
|
+
#
|
454
|
+
# @note Stability Level: development
|
455
|
+
K8S_RESOURCEQUOTA_CPU_REQUEST_HARD = 'k8s.resourcequota.cpu.request.hard'
|
456
|
+
|
457
|
+
# The CPU requests in a specific namespace.
|
458
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
459
|
+
#
|
460
|
+
# This metric is retrieved from the `used` field of the
|
461
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
462
|
+
#
|
463
|
+
# @note Stability Level: development
|
464
|
+
K8S_RESOURCEQUOTA_CPU_REQUEST_USED = 'k8s.resourcequota.cpu.request.used'
|
465
|
+
|
466
|
+
# The sum of local ephemeral storage limits in the namespace.
|
467
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
468
|
+
#
|
469
|
+
# This metric is retrieved from the `hard` field of the
|
470
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
471
|
+
#
|
472
|
+
# @note Stability Level: development
|
473
|
+
K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD = 'k8s.resourcequota.ephemeral_storage.limit.hard'
|
474
|
+
|
475
|
+
# The sum of local ephemeral storage limits in the namespace.
|
476
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
477
|
+
#
|
478
|
+
# This metric is retrieved from the `used` field of the
|
479
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
480
|
+
#
|
481
|
+
# @note Stability Level: development
|
482
|
+
K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_USED = 'k8s.resourcequota.ephemeral_storage.limit.used'
|
483
|
+
|
484
|
+
# The sum of local ephemeral storage requests in the namespace.
|
485
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
486
|
+
#
|
487
|
+
# This metric is retrieved from the `hard` field of the
|
488
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
489
|
+
#
|
490
|
+
# @note Stability Level: development
|
491
|
+
K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD = 'k8s.resourcequota.ephemeral_storage.request.hard'
|
492
|
+
|
493
|
+
# The sum of local ephemeral storage requests in the namespace.
|
494
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
495
|
+
#
|
496
|
+
# This metric is retrieved from the `used` field of the
|
497
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
498
|
+
#
|
499
|
+
# @note Stability Level: development
|
500
|
+
K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_USED = 'k8s.resourcequota.ephemeral_storage.request.used'
|
501
|
+
|
502
|
+
# The huge page requests in a specific namespace.
|
503
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
504
|
+
#
|
505
|
+
# This metric is retrieved from the `hard` field of the
|
506
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
507
|
+
#
|
508
|
+
# @note Stability Level: development
|
509
|
+
K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_HARD = 'k8s.resourcequota.hugepage_count.request.hard'
|
510
|
+
|
511
|
+
# The huge page requests in a specific namespace.
|
512
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
513
|
+
#
|
514
|
+
# This metric is retrieved from the `used` field of the
|
515
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
516
|
+
#
|
517
|
+
# @note Stability Level: development
|
518
|
+
K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_USED = 'k8s.resourcequota.hugepage_count.request.used'
|
519
|
+
|
520
|
+
# The memory limits in a specific namespace.
|
521
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
522
|
+
#
|
523
|
+
# This metric is retrieved from the `hard` field of the
|
524
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
525
|
+
#
|
526
|
+
# @note Stability Level: development
|
527
|
+
K8S_RESOURCEQUOTA_MEMORY_LIMIT_HARD = 'k8s.resourcequota.memory.limit.hard'
|
528
|
+
|
529
|
+
# The memory limits in a specific namespace.
|
530
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
531
|
+
#
|
532
|
+
# This metric is retrieved from the `used` field of the
|
533
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
534
|
+
#
|
535
|
+
# @note Stability Level: development
|
536
|
+
K8S_RESOURCEQUOTA_MEMORY_LIMIT_USED = 'k8s.resourcequota.memory.limit.used'
|
537
|
+
|
538
|
+
# The memory requests in a specific namespace.
|
539
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
540
|
+
#
|
541
|
+
# This metric is retrieved from the `hard` field of the
|
542
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
543
|
+
#
|
544
|
+
# @note Stability Level: development
|
545
|
+
K8S_RESOURCEQUOTA_MEMORY_REQUEST_HARD = 'k8s.resourcequota.memory.request.hard'
|
546
|
+
|
547
|
+
# The memory requests in a specific namespace.
|
548
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
549
|
+
#
|
550
|
+
# This metric is retrieved from the `used` field of the
|
551
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
552
|
+
#
|
553
|
+
# @note Stability Level: development
|
554
|
+
K8S_RESOURCEQUOTA_MEMORY_REQUEST_USED = 'k8s.resourcequota.memory.request.used'
|
555
|
+
|
556
|
+
# The object count limits in a specific namespace.
|
557
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
558
|
+
#
|
559
|
+
# This metric is retrieved from the `hard` field of the
|
560
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
561
|
+
#
|
562
|
+
# @note Stability Level: development
|
563
|
+
K8S_RESOURCEQUOTA_OBJECT_COUNT_HARD = 'k8s.resourcequota.object_count.hard'
|
564
|
+
|
565
|
+
# The object count limits in a specific namespace.
|
566
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
567
|
+
#
|
568
|
+
# This metric is retrieved from the `used` field of the
|
569
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
570
|
+
#
|
571
|
+
# @note Stability Level: development
|
572
|
+
K8S_RESOURCEQUOTA_OBJECT_COUNT_USED = 'k8s.resourcequota.object_count.used'
|
573
|
+
|
574
|
+
# The total number of PersistentVolumeClaims that can exist in the namespace.
|
575
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
576
|
+
#
|
577
|
+
# This metric is retrieved from the `hard` field of the
|
578
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
579
|
+
#
|
580
|
+
# The `k8s.storageclass.name` should be required when a resource quota is defined for a specific
|
581
|
+
# storage class.
|
582
|
+
#
|
583
|
+
# @note Stability Level: development
|
584
|
+
K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD = 'k8s.resourcequota.persistentvolumeclaim_count.hard'
|
585
|
+
|
586
|
+
# The total number of PersistentVolumeClaims that can exist in the namespace.
|
587
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
588
|
+
#
|
589
|
+
# This metric is retrieved from the `used` field of the
|
590
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
591
|
+
#
|
592
|
+
# The `k8s.storageclass.name` should be required when a resource quota is defined for a specific
|
593
|
+
# storage class.
|
594
|
+
#
|
595
|
+
# @note Stability Level: development
|
596
|
+
K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED = 'k8s.resourcequota.persistentvolumeclaim_count.used'
|
597
|
+
|
598
|
+
# The storage requests in a specific namespace.
|
599
|
+
# The value represents the configured quota limit of the resource in the namespace.
|
600
|
+
#
|
601
|
+
# This metric is retrieved from the `hard` field of the
|
602
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
603
|
+
#
|
604
|
+
# The `k8s.storageclass.name` should be required when a resource quota is defined for a specific
|
605
|
+
# storage class.
|
606
|
+
#
|
607
|
+
# @note Stability Level: development
|
608
|
+
K8S_RESOURCEQUOTA_STORAGE_REQUEST_HARD = 'k8s.resourcequota.storage.request.hard'
|
609
|
+
|
610
|
+
# The storage requests in a specific namespace.
|
611
|
+
# The value represents the current observed total usage of the resource in the namespace.
|
612
|
+
#
|
613
|
+
# This metric is retrieved from the `used` field of the
|
614
|
+
# [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core).
|
615
|
+
#
|
616
|
+
# The `k8s.storageclass.name` should be required when a resource quota is defined for a specific
|
617
|
+
# storage class.
|
618
|
+
#
|
619
|
+
# @note Stability Level: development
|
620
|
+
K8S_RESOURCEQUOTA_STORAGE_REQUEST_USED = 'k8s.resourcequota.storage.request.used'
|
621
|
+
|
622
|
+
# The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision
|
623
|
+
#
|
624
|
+
# This metric aligns with the `currentReplicas` field of the
|
625
|
+
# [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps).
|
626
|
+
#
|
627
|
+
# @note Stability Level: development
|
628
|
+
K8S_STATEFULSET_CURRENT_PODS = 'k8s.statefulset.current_pods'
|
629
|
+
|
630
|
+
# Number of desired replica pods in this statefulset
|
631
|
+
#
|
632
|
+
# This metric aligns with the `replicas` field of the
|
633
|
+
# [K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps).
|
634
|
+
#
|
635
|
+
# @note Stability Level: development
|
636
|
+
K8S_STATEFULSET_DESIRED_PODS = 'k8s.statefulset.desired_pods'
|
637
|
+
|
638
|
+
# The number of replica pods created for this statefulset with a Ready Condition
|
639
|
+
#
|
640
|
+
# This metric aligns with the `readyReplicas` field of the
|
641
|
+
# [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps).
|
642
|
+
#
|
643
|
+
# @note Stability Level: development
|
644
|
+
K8S_STATEFULSET_READY_PODS = 'k8s.statefulset.ready_pods'
|
645
|
+
|
646
|
+
# Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision
|
647
|
+
#
|
648
|
+
# This metric aligns with the `updatedReplicas` field of the
|
649
|
+
# [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps).
|
650
|
+
#
|
651
|
+
# @note Stability Level: development
|
652
|
+
K8S_STATEFULSET_UPDATED_PODS = 'k8s.statefulset.updated_pods'
|
653
|
+
|
654
|
+
# @!endgroup
|
655
|
+
end
|
656
|
+
end
|
657
|
+
end
|
658
|
+
end
|