Page Menu
Home
Phorge
Search
Configure Global Search
Log In
Files
F117754768
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Flag For Later
Award Token
Authored By
Unknown
Size
7 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/helm/templates/external-syslog-svc.yaml b/helm/templates/external-syslog-svc.yaml
new file mode 100644
index 0000000..e96e136
--- /dev/null
+++ b/helm/templates/external-syslog-svc.yaml
@@ -0,0 +1,42 @@
+{{- if .Values.vector.enabled -}}
+kind: Service
+apiVersion: v1
+metadata:
+ name: external-syslog
+{{- if .Values.metallb.addressPool }}
+ annotations:
+ metallb.universe.tf/address-pool: {{ .Values.metallb.addressPool }}
+ metallb.universe.tf/allow-shared-ip: "{{ .Release.Name }}-shared-ip"
+ labels:
+ IPAddressPool: l2
+{{- end }}
+spec:
+ ipFamilies:
+ - IPv4
+ ports:
+ - name: syslog
+ protocol: TCP
+ port: 514
+ targetPort: 9000
+ {{- if .Values.nodePorts }}
+ nodePort: {{ .Values.nodePorts.syslog }}
+ {{- end }}
+ internalTrafficPolicy: Cluster
+ # Required to preserve the client ip
+ externalTrafficPolicy: Local
+ ipFamilyPolicy: SingleStack
+ {{- if .Values.nodePorts }}
+ type: NodePort
+ {{- else }}
+ type: LoadBalancer
+ {{- end }}
+{{- if .Values.vector.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.vector.loadBalancerIP }}
+{{- end }}
+{{- if .Values.vector.externalIP }}
+ externalIPs:
+ - {{ .Values.vector.externalIP }}
+{{- end }}
+ selector:
+ deployment: vector
+{{- end }}
diff --git a/helm/templates/vector-configmap.yaml b/helm/templates/vector-configmap.yaml
index 194daa9..40edf31 100644
--- a/helm/templates/vector-configmap.yaml
+++ b/helm/templates/vector-configmap.yaml
@@ -1,192 +1,207 @@
{{- if .Values.vector.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: vector
{{- if not .Values.openshift }}
namespace: kube-system
{{- end }}
labels:
app: vector
data:
vector.yaml: |
data_dir: "/data"
api:
enabled: true
{{- if .Values.openshift }}
sources:
+ syslog:
+ type: syslog
+ address: 0.0.0.0:9000
+ mode: tcp
vector_metrics:
type: internal_metrics
openshift:
type: http_server
address: "0.0.0.0:8000"
encoding: json
transforms:
input:
type: remap
inputs:
- openshift
+ - syslog
source: |
.log = del(.message)
.container_name = del(.kubernetes.container_name)
.container_id = del(.kubernetes.container_id)
.namespace_name = del(.kubernetes.namespace_name)
- .pod_name = del(.kubernetes.pod_name)
+ # Fallback for syslog messages
+ .pod_name = del(.kubernetes.pod_name) || .host
.pod_owner = del(.kubernetes.pod_owner)
.pod_node_name = del(.hostname)
del(.kubernetes)
del(.openshift)
.user = "null"
.component = "null"
+ # For syslog
+ if exists(.appname) && (is_empty(string(.container_name) ?? "")) {
+ if contains!(.appname, "imap") {
+ .container_name = "imap"
+ }
+ if contains!(.appname, "postfix") {
+ .container_name = "postfix"
+ }
+ }
{{- else }}
sources:
vector_metrics:
type: internal_metrics
kubernetes:
type: kubernetes_logs
extra_namespace_label_selector: "kubernetes.io/metadata.name=kolab"
ignore_older_secs: 600
# This is critical on a busy system, so we don't miss files.
# Kubernetes starts to rotate files quickly with a lot of activity (see /var/log/pods/kolab_roundcube-.*/roundcube/ on a worker).
# See also: https://github.com/vectordotdev/vector/issues/7934
glob_minimum_cooldown_ms: 500
rotate_wait_secs: 5
oldest_first: true
node_metrics:
type: host_metrics
# Without cgroups
collectors: [cpu, disk, filesystem, load, host, memory, network]
scrape_interval_secs: 15
# bin_fmt workaround according to https://vector.dev/docs/reference/configuration/sources/host_metrics/#filesystem
filesystem:
mountpoints:
excludes: ["*/proc/sys/fs/binfmt_misc"]
devices:
excludes: [binfmt_misc]
filesystems:
excludes: [binfmt_misc]
transforms:
input:
type: remap
inputs:
- kubernetes
source: |
# In seconds
age = to_float(now()) - to_float(parse_timestamp(.timestamp, "%+") ?? now())
if age > 60 * 60 {
abort
}
.log = del(.message)
.container_name = del(.kubernetes.container_name)
.container_id = del(.kubernetes.container_id)
.namespace_name = del(.kubernetes.pod_namespace)
.pod_name = del(.kubernetes.pod_name)
.pod_owner = del(.kubernetes.pod_owner)
.pod_node_name = del(.kubernetes.pod_node_name)
del(.file)
del(.kubernetes)
.user = "null"
.component = "null"
{{- end }}
sinks:
# file:
# type: file
# encoding:
# codec: logfmt
# inputs:
# - parse_roundcube
# path: /tmp/roundcube-%Y-%m-%d.log
#
# stdout:
# type: console
# inputs:
# - openshift
# # - parse_proxy
# # - parse_roundcube
# # - parse_kolab
# # - parse_imap
# # - parse_postfix
# # - apps._unmatched
# encoding:
# codec: json
# json:
# pretty: true
{{- if not .Values.openshift }}
prometheus:
type: prometheus_exporter
inputs:
- vector_metrics
- node_metrics
{{- end }}
{{- if .Values.loki.enabled }}
loki:
type: loki
buffer:
type: memory
max_events: 500
when_full: drop_newest
inputs:
- parse_proxy
- parse_roundcube
- parse_kolab
- parse_imap
- parse_postfix
- parse_unmatched
{{- if .Values.openshift }}
# Running in the same namespace
endpoint: http://loki:3100
{{- else }}
# Running in a different namespace
endpoint: http://loki.kolab:3100
{{- end }}
compression: "gzip"
healthcheck:
enabled: true
encoding:
codec: "json"
remove_label_fields: true
labels:
# We have to escape the helm templating syntax, because it's the same as the vector template syntax
pod_name: "{{ printf "{{ .pod_name }}" }}"
container_name: "{{ printf "{{ .container_name }}" }}"
namespace_name: "{{ printf "{{ .namespace_name }}" }}"
user: "{{ printf "{{ .user }}" }}"
component: "{{ printf "{{ .component }}" }}"
{{- end }}
{{- if .Values.victorialogs.enabled }}
victorialogs:
inputs:
- parse_proxy
- parse_roundcube
- parse_kolab
- parse_imap
- parse_postfix
- parse_unmatched
type: elasticsearch
endpoints:
{{- if .Values.openshift }}
# Running in the same namespace
- http://victorialogs:9428/insert/elasticsearch/
{{- else }}
# Running in a different namespace
- http://victorialogs.kolab:9428/insert/elasticsearch/
{{- end }}
api_version: v8
compression: gzip
# Concurrency seems problematic.
# We end up with 3k messages in flight (vector_adaptive_concurrency_in_flight_sum),
# and it seems like messages are being lost (though I never found the culprit), and
# and a lack of sorting may have been the problem. Seems to work well without concurrency though.
request:
concurrency: none
healthcheck:
enabled: false
query:
_msg_field: log
_time_field: timestamp
_stream_fields: pod_name,container_name,component,user
{{- end }}
{{- end }}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sat, Apr 4, 7:19 AM (1 w, 4 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
18822988
Default Alt Text
(7 KB)
Attached To
Mode
R114 kolab-infrastructure
Attached
Detach File
Event Timeline