--- # Source: tidb-drainer/templates/drainer-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: tidb-cluster-drainer-config namespace: tidb labels: app.kubernetes.io/name: tidb-cluster-drainer app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: tidb-cluster app.kubernetes.io/component: drainer helm.sh/chart: tidb-drainer-v1.6.0 data: config-file: |- detect-interval = 10 compressor = "" [syncer] worker-count = 16 disable-dispatch = false ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql" safe-mode = false txn-batch = 20 db-type = "file" [syncer.to] dir = "/data/pb" --- # Source: tidb-drainer/templates/drainer-service.yaml apiVersion: v1 kind: Service metadata: name: tidb-cluster-drainer namespace: tidb labels: app.kubernetes.io/name: tidb-cluster-drainer app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: tidb-cluster app.kubernetes.io/component: drainer helm.sh/chart: tidb-drainer-v1.6.0 spec: clusterIP: None ports: - name: drainer port: 8249 selector: app.kubernetes.io/name: tidb-cluster-drainer app.kubernetes.io/instance: tidb-cluster app.kubernetes.io/component: drainer --- # Source: tidb-drainer/templates/drainer-statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: tidb-cluster-drainer namespace: tidb labels: app.kubernetes.io/name: tidb-cluster-drainer app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: tidb-cluster app.kubernetes.io/component: drainer helm.sh/chart: tidb-drainer-v1.6.0 spec: selector: matchLabels: app.kubernetes.io/name: tidb-cluster-drainer app.kubernetes.io/instance: tidb-cluster app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: drainer serviceName: tidb-cluster-drainer replicas: 1 template: metadata: annotations: prometheus.io/scrape: "true" prometheus.io/path: "/metrics" prometheus.io/port: "8249" labels: app.kubernetes.io/name: tidb-cluster-drainer app.kubernetes.io/instance: tidb-cluster app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: drainer spec: nodeSelector: zone: dell containers: - name: drainer image: harbor.uenpay.com/base/tidb-binlog:v8.1.0 imagePullPolicy: IfNotPresent command: - /bin/sh - -c - |- set -euo pipefail domain=`echo ${HOSTNAME}`.tidb-cluster-drainer elapseTime=0 period=1 threshold=30 while true; do sleep ${period} elapseTime=$(( elapseTime+period )) if [[ ${elapseTime} -ge ${threshold} ]] then echo "waiting for drainer domain ready timeout" >&2 exit 1 fi if nslookup ${domain} 2>/dev/null then echo "nslookup domain ${domain} success" break else echo "nslookup domain ${domain} failed" >&2 fi done /drainer \ -L=info \ -pd-urls=http://tidb-cluster-pd:2379 \ -addr=0.0.0.0:8249 \ -advertise-addr=`echo ${HOSTNAME}`.tidb-cluster-drainer:8249 \ -config=/etc/drainer/drainer.toml \ -disable-detect=false \ -initial-commit-ts=-1 \ -data-dir=/data \ -log-file="" if [ $? == 0 ]; then echo $(date -u +"[%Y/%m/%d %H:%M:%S.%3N %:z]") "drainer offline, please delete my pod" tail -f /dev/null fi ports: - containerPort: 8249 name: drainer volumeMounts: - name: data mountPath: /data - name: config mountPath: /etc/drainer resources: {} volumes: - name: config configMap: name: tidb-cluster-drainer-config items: - key: config-file path: drainer.toml volumeClaimTemplates: - metadata: name: data spec: accessModes: [ "ReadWriteOnce" ] storageClassName: cephrbd-sc resources: requests: storage: 100Gi