Page MenuHomePhorge

vector-configmap.yaml
No OneTemporary

Authored By
Unknown
Size
5 KB
Referenced Files
None
Subscribers
None

vector-configmap.yaml

{{- if .Values.vector.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: vector
{{- if not .Values.openshift }}
namespace: kube-system
{{- end }}
labels:
app: vector
data:
vector.yaml: |
data_dir: "/data"
api:
enabled: true
{{- if .Values.openshift }}
sources:
vector_metrics:
type: internal_metrics
openshift:
type: http_server
address: "0.0.0.0:8000"
encoding: json
transforms:
input:
type: remap
inputs:
- openshift
source: |
.log = del(.message)
.container_name = del(.kubernetes.container_name)
.container_id = del(.kubernetes.container_id)
.namespace_name = del(.kubernetes.namespace_name)
.pod_name = del(.kubernetes.pod_name)
.pod_owner = del(.kubernetes.pod_owner)
.pod_node_name = del(.hostname)
del(.kubernetes)
del(.openshift)
.user = "null"
.component = "null"
{{- else }}
sources:
vector_metrics:
type: internal_metrics
kubernetes:
type: kubernetes_logs
extra_namespace_label_selector: "kubernetes.io/metadata.name=kolab"
ignore_older_secs: 600
# This is critical on a busy system, so we don't miss files.
# Kubernetes starts to rotate files quickly with a lot of activity (see /var/log/pods/kolab_roundcube-.*/roundcube/ on a worker).
# See also: https://github.com/vectordotdev/vector/issues/7934
glob_minimum_cooldown_ms: 500
rotate_wait_secs: 5
oldest_first: true
node_metrics:
type: host_metrics
# Without cgroups
collectors: [cpu, disk, filesystem, load, host, memory, network]
scrape_interval_secs: 15
# bin_fmt workaround according to https://vector.dev/docs/reference/configuration/sources/host_metrics/#filesystem
filesystem:
mountpoints:
excludes: ["*/proc/sys/fs/binfmt_misc"]
devices:
excludes: [binfmt_misc]
filesystems:
excludes: [binfmt_misc]
transforms:
input:
type: remap
inputs:
- kubernetes
source: |
# In seconds
age = to_float(now()) - to_float(parse_timestamp(.timestamp, "%+") ?? now())
if age > 60 * 60 {
abort
}
.log = del(.message)
.container_name = del(.kubernetes.container_name)
.container_id = del(.kubernetes.container_id)
.namespace_name = del(.kubernetes.pod_namespace)
.pod_name = del(.kubernetes.pod_name)
.pod_owner = del(.kubernetes.pod_owner)
.pod_node_name = del(.kubernetes.pod_node_name)
del(.file)
del(.kubernetes)
.user = "null"
.component = "null"
{{- end }}
sinks:
# file:
# type: file
# encoding:
# codec: logfmt
# inputs:
# - parse_roundcube
# path: /tmp/roundcube-%Y-%m-%d.log
#
# stdout:
# type: console
# inputs:
# - openshift
# # - parse_proxy
# # - parse_roundcube
# # - parse_kolab
# # - parse_imap
# # - parse_postfix
# # - apps._unmatched
# encoding:
# codec: json
# json:
# pretty: true
{{- if not .Values.openshift }}
prometheus:
type: prometheus_exporter
inputs:
- vector_metrics
- node_metrics
{{- end }}
{{- if .Values.loki.enabled }}
loki:
type: loki
buffer:
type: memory
max_events: 500
when_full: drop_newest
inputs:
- parse_proxy
- parse_roundcube
- parse_kolab
- parse_imap
- parse_postfix
- parse_unmatched
{{- if .Values.openshift }}
# Running in the same namespace
endpoint: http://loki:3100
{{- else }}
# Running in a different namespace
endpoint: http://loki.kolab:3100
{{- end }}
compression: "gzip"
healthcheck:
enabled: true
encoding:
codec: "json"
remove_label_fields: true
labels:
# We have to escape the helm templating syntax, because it's the same as the vector template syntax
pod_name: "{{ printf "{{ .pod_name }}" }}"
container_name: "{{ printf "{{ .container_name }}" }}"
namespace_name: "{{ printf "{{ .namespace_name }}" }}"
user: "{{ printf "{{ .user }}" }}"
component: "{{ printf "{{ .component }}" }}"
{{- end }}
{{- if .Values.victorialogs.enabled }}
victorialogs:
inputs:
- parse_proxy
- parse_roundcube
- parse_kolab
- parse_imap
- parse_postfix
- parse_unmatched
type: elasticsearch
endpoints:
{{- if .Values.openshift }}
# Running in the same namespace
- http://victorialogs:9428/insert/elasticsearch/
{{- else }}
# Running in a different namespace
- http://victorialogs.kolab:9428/insert/elasticsearch/
{{- end }}
api_version: v8
compression: gzip
# Concurrency seems problematic.
# We end up with 3k messages in flight (vector_adaptive_concurrency_in_flight_sum),
# and it seems like messages are being lost (though I never found the culprit), and
# and a lack of sorting may have been the problem. Seems to work well without concurrency though.
request:
concurrency: none
healthcheck:
enabled: false
query:
_msg_field: log
_time_field: timestamp
_stream_fields: pod_name,container_name,component,user
{{- end }}
{{- end }}

File Metadata

Mime Type
text/plain
Expires
Sat, Apr 4, 3:40 AM (1 d, 14 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
18776799
Default Alt Text
vector-configmap.yaml (5 KB)

Event Timeline