Skip to content

Commit 9342065

Browse files
replicated-ci-kurllaverya
andauthoredFeb 11, 2025
Automated Prometheus version update 0.80.0-69.2.0 (#5518)
Create new Prometheus version Co-authored-by: laverya <[email protected]>
1 parent c153e37 commit 9342065

17 files changed

+74450
-0
lines changed
 
+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
image alertmanager quay.io/prometheus/alertmanager:v0.28.0
2+
image grafana docker.io/grafana/grafana:11.5.1
3+
image k8s-sidecar quay.io/kiwigrid/k8s-sidecar:1.28.0
4+
image kube-state-metrics registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.14.0
5+
image node-exporter quay.io/prometheus/node-exporter:v1.8.2
6+
image prometheus quay.io/prometheus/prometheus:v3.1.0
7+
image prometheus-adapter registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.12.0
8+
image prometheus-config-reloader quay.io/prometheus-operator/prometheus-config-reloader:v0.80.0
9+
image prometheus-operator quay.io/prometheus-operator/prometheus-operator:v0.80.0

‎addons/prometheus/0.80.0-69.2.0/crds/crds.yaml

+67,101
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
resources:
2+
- crds.yaml
3+
4+
patchesJson6902:
5+
- target:
6+
group: "apiextensions.k8s.io"
7+
version: v1 # apiVersion
8+
kind: CustomResourceDefinition
9+
name: alertmanagers.monitoring.coreos.com
10+
path: preserveUnknown.yaml
11+
- target:
12+
group: "apiextensions.k8s.io"
13+
version: v1 # apiVersion
14+
kind: CustomResourceDefinition
15+
name: prometheuses.monitoring.coreos.com
16+
path: preserveUnknown.yaml
17+
- target:
18+
group: "apiextensions.k8s.io"
19+
version: v1 # apiVersion
20+
kind: CustomResourceDefinition
21+
name: podmonitors.monitoring.coreos.com
22+
path: preserveUnknown.yaml
23+
- target:
24+
group: "apiextensions.k8s.io"
25+
version: v1 # apiVersion
26+
kind: CustomResourceDefinition
27+
name: servicemonitors.monitoring.coreos.com
28+
path: preserveUnknown.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
- op: add
2+
path: "/spec/preserveUnknownFields"
3+
value: false
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
apiVersion: troubleshoot.sh/v1beta2
2+
kind: HostPreflight
3+
metadata:
4+
name: prometheus
5+
spec:
6+
collectors:
7+
- tcpPortStatus:
8+
collectorName: "Node Exporter Metrics Server TCP Port Status"
9+
port: 9100
10+
exclude: '{{kurl .IsUpgrade }}'
11+
12+
analyzers:
13+
- tcpPortStatus:
14+
checkName: "Node Exporter Metrics Server TCP Port Status"
15+
collectorName: "Node Exporter Metrics Server TCP Port Status"
16+
exclude: '{{kurl .IsUpgrade }}'
17+
outcomes:
18+
- fail:
19+
when: "connection-refused"
20+
message: Connection to port 9100 was refused. This is likely to be a routing problem since this preflight configures a test server to listen on this port.
21+
- warn:
22+
when: "address-in-use"
23+
message: Another process was already listening on port 9100.
24+
- fail:
25+
when: "connection-timeout"
26+
message: Timed out connecting to port 9100. Check your firewall.
27+
- fail:
28+
when: "error"
29+
message: Unexpected port status
30+
- pass:
31+
when: "connected"
32+
message: Port 9100 is available
33+
- warn:
34+
message: Unexpected port status
+155
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
# shellcheck disable=SC2148
2+
3+
function prometheus() {
4+
local src="$DIR/addons/prometheus/0.80.0-69.2.0"
5+
local dst="$DIR/kustomize/prometheus"
6+
7+
local operatorsrc="$src/operator"
8+
local operatordst="$dst/operator"
9+
10+
local crdssrc="$src/crds"
11+
local crdsdst="$dst/crds"
12+
13+
cp -r "$operatorsrc/" "$operatordst/"
14+
cp -r "$crdssrc/" "$crdsdst/"
15+
16+
grafana_admin_secret "$src" "$operatordst"
17+
18+
# Server-side apply is needed here because the CRDs are too large to keep in metadata
19+
# https://github.com/prometheus-community/helm-charts/issues/1500
20+
# Also delete any existing last-applied-configuration annotations for pre-122 clusters
21+
kubectl get crd | grep coreos.com | awk '{ print $1 }' | xargs -I {} kubectl patch crd {} --type=json -p='[{"op": "remove", "path": "/metadata/annotations/kubectl.kubernetes.io~1last-applied-configuration"}]' 2>/dev/null || true
22+
kubectl apply --server-side --force-conflicts -k "$crdsdst/"
23+
spinner_until -1 prometheus_crd_ready
24+
25+
prometheus_rook_ceph "$operatordst"
26+
prometheus_longhorn "$operatordst"
27+
28+
# remove deployments and daemonsets that had labelselectors change (as those are immutable)
29+
kubectl delete deployment -n monitoring kube-state-metrics || true
30+
kubectl delete daemonset -n monitoring node-exporter || true
31+
kubectl delete deployment -n monitoring grafana || true
32+
kubectl delete deployment -n monitoring prometheus-adapter || true
33+
34+
# remove things that had names change during upgrades
35+
kubectl delete alertmanager -n monitoring main || true
36+
37+
# remove services that had a clusterip change
38+
kubectl delete service -n monitoring kube-state-metrics || true
39+
kubectl delete service -n monitoring prometheus-operator || true
40+
41+
# remove nodeport services that had names change
42+
kubectl delete service -n monitoring grafana || true
43+
kubectl delete service -n monitoring alertmanager-main || true
44+
kubectl delete service -n monitoring prometheus-k8s || true
45+
46+
# if the prometheus-node-exporter daemonset exists and has a release labelSelector set, delete it
47+
if kubernetes_resource_exists monitoring daemonset prometheus-node-exporter; then
48+
local promNodeExporterLabelSelector=$(kubectl get daemonset -n monitoring prometheus-node-exporter --output="jsonpath={.spec.selector.matchLabels.release}")
49+
if [ -n "$promNodeExporterLabelSelector" ]; then
50+
kubectl delete daemonset -n monitoring prometheus-node-exporter || true
51+
fi
52+
fi
53+
54+
# if the prometheus-operator deployment exists and has the wrong labelSelectors set, delete it
55+
if kubernetes_resource_exists monitoring deployment prometheus-operator; then
56+
local promOperatorLabelSelector=$(kubectl get deployment -n monitoring prometheus-operator --output="jsonpath={.spec.selector.matchLabels.release}") || true
57+
if [ -n "$promOperatorLabelSelector" ]; then
58+
kubectl delete deployment -n monitoring prometheus-operator || true
59+
fi
60+
61+
promOperatorLabelSelector=$(kubectl get deployment -n monitoring prometheus-operator --output="jsonpath={.spec.selector.matchLabels.app\.kubernetes\.io/component}") || true
62+
if [ -n "$promOperatorLabelSelector" ]; then
63+
kubectl delete deployment -n monitoring prometheus-operator || true
64+
fi
65+
fi
66+
67+
# the metrics service has been renamed to v1beta1.custom.metrics.k8s.io, delete the old
68+
if kubectl get --no-headers apiservice v1beta1.metrics.k8s.io 2>/dev/null | grep -q 'monitoring/prometheus-adapter' ; then
69+
kubectl delete apiservice v1beta1.metrics.k8s.io
70+
fi
71+
72+
# change ClusterIP services to NodePorts if required
73+
if [ -z "$PROMETHEUS_SERVICE_TYPE" ] || [ "$PROMETHEUS_SERVICE_TYPE" = "NodePort" ] ; then
74+
cp "$src/nodeport-services.yaml" "$operatordst"
75+
insert_patches_strategic_merge "$operatordst/kustomization.yaml" nodeport-services.yaml
76+
fi
77+
78+
kubectl apply -k "$operatordst/"
79+
}
80+
81+
GRAFANA_ADMIN_USER=
82+
GRAFANA_ADMIN_PASS=
83+
function grafana_admin_secret() {
84+
if kubernetes_resource_exists monitoring secret grafana-admin; then
85+
return 0
86+
fi
87+
88+
local src="$1"
89+
local grafanadst="$2"
90+
91+
GRAFANA_ADMIN_USER=admin
92+
GRAFANA_ADMIN_PASS=$(< /dev/urandom tr -dc A-Za-z0-9 | head -c9)
93+
94+
insert_resources "$grafanadst/kustomization.yaml" grafana-secret.yaml
95+
96+
render_yaml_file "$src/tmpl-grafana-secret.yaml" > "$grafanadst/grafana-secret.yaml"
97+
}
98+
99+
function prometheus_outro() {
100+
printf "\n"
101+
printf "\n"
102+
if [ -z "$PROMETHEUS_SERVICE_TYPE" ] || [ "$PROMETHEUS_SERVICE_TYPE" = "NodePort" ] ; then
103+
printf "The UIs of Prometheus, Grafana and Alertmanager have been exposed on NodePorts ${GREEN}30900${NC}, ${GREEN}30902${NC} and ${GREEN}30903${NC} respectively.\n"
104+
else
105+
printf "The UIs of Prometheus, Grafana and Alertmanager have been exposed on internal ClusterIP services.\n"
106+
fi
107+
if [ -n "$GRAFANA_ADMIN_PASS" ]; then
108+
printf "\n"
109+
printf "To access Grafana use the generated user:password of ${GREEN}${GRAFANA_ADMIN_USER:-admin}:${GRAFANA_ADMIN_PASS} .${NC}\n"
110+
fi
111+
printf "\n"
112+
printf "\n"
113+
}
114+
115+
function prometheus_crd_ready() {
116+
# https://github.com/coreos/kube-prometheus#quickstart
117+
if ! kubectl get customresourcedefinitions servicemonitors.monitoring.coreos.com &>/dev/null; then
118+
return 1
119+
fi
120+
if ! kubectl get customresourcedefinitions servicemonitors.monitoring.coreos.com -o yaml | grep "enableHttp2" &>/dev/null; then
121+
return 1
122+
fi
123+
if ! kubectl get servicemonitors --all-namespaces &>/dev/null; then
124+
return 1
125+
fi
126+
if ! kubectl get customresourcedefinitions prometheuses.monitoring.coreos.com &>/dev/null; then
127+
return 1
128+
fi
129+
if ! kubectl get prometheuses --all-namespaces &>/dev/null; then
130+
return 1
131+
fi
132+
if ! kubectl get customresourcedefinitions prometheusagents.monitoring.coreos.com &>/dev/null; then
133+
return 1
134+
fi
135+
if ! kubectl get prometheusagents --all-namespaces &>/dev/null; then
136+
return 1
137+
fi
138+
return 0
139+
}
140+
141+
function prometheus_rook_ceph() {
142+
local dst="$1"
143+
144+
if kubectl get ns | grep -q rook-ceph; then
145+
insert_resources "$dst/kustomization.yaml" rook-ceph-rolebindings.yaml
146+
fi
147+
}
148+
149+
function prometheus_longhorn() {
150+
local dst="$1"
151+
152+
if kubectl get ns | grep -q longhorn-system; then
153+
insert_resources "$dst/kustomization.yaml" longhorn.yaml
154+
fi
155+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
apiVersion: v1
2+
kind: Service
3+
metadata:
4+
name: prometheus-alertmanager
5+
namespace: monitoring
6+
spec:
7+
ports:
8+
- name: web
9+
port: 9093
10+
protocol: TCP
11+
nodePort: 30903
12+
type: "NodePort"
13+
---
14+
apiVersion: v1
15+
kind: Service
16+
metadata:
17+
name: prometheus-k8s
18+
namespace: monitoring
19+
spec:
20+
ports:
21+
- name: web
22+
port: 9090
23+
nodePort: 30900
24+
type: "NodePort"
25+
---
26+
apiVersion: v1
27+
kind: Service
28+
metadata:
29+
name: grafana
30+
namespace: monitoring
31+
spec:
32+
type: "NodePort"
33+
ports:
34+
- name: service
35+
port: 80
36+
protocol: TCP
37+
nodePort: 30902

0 commit comments

Comments
 (0)