Page MenuHomePhorge

No OneTemporary

Authored By
Unknown
Size
8 KB
Referenced Files
None
Subscribers
None
diff --git a/helm/templates/monitoring-cronjob.yaml b/helm/templates/monitoring-cronjob.yaml
index dc54b46..bd98a49 100644
--- a/helm/templates/monitoring-cronjob.yaml
+++ b/helm/templates/monitoring-cronjob.yaml
@@ -1,148 +1,150 @@
{{- if and .Values.prometheus.enabled .Values.serviceAccounts.monitoring1.user -}}
apiVersion: batch/v1
kind: CronJob
metadata:
annotations:
alpha.image.policy.openshift.io/resolve-names: '*'
labels:
app: monitoring
app.kubernetes.io/name: monitoring
app.kubernetes.io/part-of: kolab-app
name: monitoring
spec:
schedule: "*/20 * * * *"
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 2
startingDeadlineSeconds: 60
suspend: False
# Given we run on an hourly schedule, replace the previous container
concurrencyPolicy: Replace
jobTemplate:
spec:
ttlSecondsAfterFinished: 172800 # 48 hours
template:
metadata:
+ labels:
+ app: monitoring
annotations:
checksum/config: {{ include (print $.Template.BasePath "/kolab-configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/kolab-secret.yaml") . | sha256sum }}
spec:
{{- if .Values.image.pullSecret }}
imagePullSecrets:
- name: registry-pull-secret
{{- end }}
{{- if .Values.monitoring }}
hostAliases:
{{- toYaml .Values.monitoring.hostAliases | nindent 12 }}
{{- end }}
volumes:
- name: monitoring-scripts
configMap:
name: monitoring-scripts
defaultMode: 0777
containers:
- name: mailtransporttest
image: {{ .Values.image.utilsImage }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/bin/bash"]
args: ["/monitoring-scripts/infrastructurecheck.sh"]
volumeMounts:
- mountPath: /monitoring-scripts
name: monitoring-scripts
env:
- name: APP_DOMAIN
valueFrom:
configMapKeyRef:
key: APP_DOMAIN
name: kolab-config
{{- if .Values.prometheus.customDavDomain }}
- name: CUSTOM_DAV_DOMAIN
value: "{{ .Values.prometheus.customDavDomain }}"
{{- end }}
- name: APP_WEBSITE_DOMAIN
valueFrom:
configMapKeyRef:
key: APP_WEBSITE_DOMAIN
name: kolab-config
- name: MONITORING1_USER
valueFrom:
secretKeyRef:
key: MONITORING1_USER
name: kolab-config-secret
- name: MONITORING1_PASSWORD
valueFrom:
secretKeyRef:
key: MONITORING1_PASSWORD
name: kolab-config-secret
- name: MONITORING2_USER
valueFrom:
secretKeyRef:
key: MONITORING2_USER
name: kolab-config-secret
- name: MONITORING2_PASSWORD
valueFrom:
secretKeyRef:
key: MONITORING2_PASSWORD
name: kolab-config-secret
restartPolicy: Never
---
apiVersion: v1
kind: ConfigMap
metadata:
name: monitoring-scripts
data:
infrastructurecheck.sh: |
#!/bin/bash
HOST="monitoring-cronjob"
function check_success() {
if [[ "$1" == "0" ]]; then
echo "1";
else
echo "0";
fi;
}
EPOCH=$(date +"%s")
DOMAIN="${APP_WEBSITE_DOMAIN:-$APP_DOMAIN}"
DAV_DOMAIN="${CUSTOM_DAV_DOMAIN:-$DOMAIN}"
TESTUSER1="$MONITORING1_USER"
PASSWORD1="$MONITORING1_PASSWORD"
TESTUSER2="$MONITORING2_USER"
PASSWORD2="$MONITORING2_PASSWORD"
timeout -s9 1m ./mailtransporttest.py --sender-username "$TESTUSER1" --sender-password "$PASSWORD1" --sender-host {{ .Values.prometheus.submissionHost }} --sender-port {{ .Values.prometheus.submissionPort }} --recipient-username "$TESTUSER2" --recipient-password "$PASSWORD2" --recipient-host {{ .Values.prometheus.imapHost }} --starttls {{ .Values.prometheus.monitoringMailtransporttestExtraFlags }} --verbose
MAILTRANSPORT_INTERNAL_STATUS=$(check_success $?)
{{- if .Values.kolab.withMailfilter }}
timeout -s9 1m ./mailtransporttest.py --sender-username "$TESTUSER1" --sender-password "$PASSWORD1" --sender-host {{ .Values.prometheus.submissionHost }} --sender-port {{ .Values.prometheus.submissionPort }} --recipient-username "$TESTUSER2" --recipient-password "$PASSWORD2" --recipient-host {{ .Values.prometheus.imapHost }} --starttls {{ .Values.prometheus.monitoringMailtransporttestExtraFlags }} --testmessage --verbose
MAILTRANSPORT_TESTMESSAGE_STATUS=$(check_success $?)
{{- end }}
timeout -s9 1m ./kolabendpointtester.py --user "$TESTUSER1" --password "$PASSWORD1" --imap {{ .Values.prometheus.imapHost }} --activesync "$DOMAIN" --smtp {{ .Values.prometheus.submissionHost }} --dav "https://$DAV_DOMAIN/.well-known/caldav" {{ .Values.prometheus.monitoringEndpointtesterExtraFlags }} --verbose
ENDPOINT_STATUS=$(check_success $?)
timeout -s9 1m ./activesynccli.py --host $DOMAIN --user "$TESTUSER1" --password "$PASSWORD1" --verbose list
ACTIVESYNC_STATUS=$(check_success $?)
METRICS=$(
cat <<EOF
kolab_infrastructure_check_timestamp $EPOCH
# HELP kolab_infrastructure_check Displays whether or not the infrastructure check was a success
# TYPE kolab_infrastructure_check gauge
kolab_infrastructure_check{host="$HOST", testname="kolab endpoint tester", instance="$DOMAIN"} $ENDPOINT_STATUS
kolab_infrastructure_check{host="$HOST", testname="mailtransport", instance="$DOMAIN"} $MAILTRANSPORT_INTERNAL_STATUS
{{- if .Values.kolab.withMailfilter }}
kolab_infrastructure_check{host="$HOST", testname="mailtransport testmessage", instance="$DOMAIN"} $MAILTRANSPORT_TESTMESSAGE_STATUS
{{- end }}
kolab_infrastructure_check{host="$HOST", testname="activesync", instance="$DOMAIN"} $ACTIVESYNC_STATUS
EOF
)
echo "$METRICS"
echo "$METRICS" | curl -k --data-binary @- http://pushgateway:9091/metrics/job/selftest/host/$HOST
{{- end }}
diff --git a/helm/templates/scheduler-cronjob.yaml b/helm/templates/scheduler-cronjob.yaml
index 55454f3..2385ebb 100644
--- a/helm/templates/scheduler-cronjob.yaml
+++ b/helm/templates/scheduler-cronjob.yaml
@@ -1,52 +1,54 @@
{{- if .Values.horizon.enabled -}}
apiVersion: batch/v1
kind: CronJob
metadata:
annotations:
alpha.image.policy.openshift.io/resolve-names: '*'
labels:
app: scheduler
app.kubernetes.io/name: scheduler
app.kubernetes.io/part-of: kolab-app
name: scheduler
spec:
schedule: "0,30 * * * *"
successfulJobsHistoryLimit: {{ .Values.horizon.successfulJobsHistoryLimit }}
failedJobsHistoryLimit: 3
startingDeadlineSeconds: 60
suspend: False
# Given we run on an hourly schedule, replace the previous container
concurrencyPolicy: Replace
jobTemplate:
spec:
ttlSecondsAfterFinished: 172800 # 48 hours
template:
metadata:
+ labels:
+ app: scheduler
annotations:
checksum/config: {{ include (print $.Template.BasePath "/kolab-configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/kolab-secret.yaml") . | sha256sum }}
spec:
{{- if .Values.image.pullSecret }}
imagePullSecrets:
- name: registry-pull-secret
{{- end }}
containers:
- name: scheduler
image: {{ .Values.image.kolabImage }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
#Run artisan schedule:run, but wait for all subprocesses to exit. https://www.jeffgeerling.com/blog/2019/running-php-artisan-schedulerun-laravel-kubernetes-cronjobs
command: ["/bin/sh"]
args: ["-c", "php artisan schedule:run 2>&1 && while pgrep php > /dev/null; do sleep 1; done"]
envFrom:
- configMapRef:
name: kolab-config
- secretRef:
name: kolab-config-secret
env:
{{- if .Values.kolab.tenantId }}
- name: APP_TENANT_ID
value: "{{ .Values.kolab.tenantId }}"
{{- end }}
{{- include "kolab.env" .Values.kolab | indent 12 }}
restartPolicy: OnFailure
{{- end }}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Apr 4, 8:48 AM (2 w, 5 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
18823321
Default Alt Text
(8 KB)

Event Timeline