yum install -y openssl openssl-devel readline-devel pcre-devel libssl-dev libpcre3

wget http://www.haproxy.org/download/2.0/src/haproxy-2.0.7.tar.gz
tar xvf haproxy-2.0.7.tar.gz 
cd haproxy-2.0.7
make TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_CRYPT_H=1 USE_LIBCRYPT=1 EXTRA_OBJS="contrib/prometheus-exporter/service-prometheus.o" PREFIX=/usr/local/haproxy
make install PREFIX=/usr/local/haproxy
ldd haproxy | grep ssl
cp -rf /usr/local/haproxy/sbin/haproxy /usr/sbin/




mkdir -p /etc/haproxy && cd /etc/haproxy/
cat haproxy.cfg


global
    daemon
    stats socket /var/run/haproxy.stat level admin
    pidfile  /var/run/haproxy.pid
    #chroot   /var/lib/haproxy
    maxconn 120000
    nbproc   1
 
defaults
    mode                    http
    log                     global
    #log        127.0.0.1 local2
    option                  dontlognull
    option http-server-close
    #option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    maxconn 120000
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          900s
    timeout server          900s
    timeout http-keep-alive 10s
    timeout check           10s
 
listen  admin_stats
        bind 0.0.0.0:9000
        stats enable
        mode http
        http-request use-service prometheus-exporter if { path /metrics }
        stats refresh 30s
        stats uri /stats
        stats realm Haproxy Manager
        stats auth admin:adminxxxxxx
        #stats admin if TRUE

frontend nginx-ingress-controller-http
        bind :80
        mode http
        option forwardfor
        acl duiopen hdr_reg(host) -i ".*duiopen.com"
        use_backend nginx-ingress-controller-http-backend if duiopen
        default_backend nginx-ingress-controller-http-backend

backend nginx-ingress-controller-http-backend
        mode http
        balance roundrobin
        server  d3-nginx-001 10.24.10.79:80 weight 5 check inter 2000 rise 3 fall 3 
        server  d3-nginx-002 10.24.10.80:80 weight 5 check inter 2000 rise 3 fall 3
        server  d3-nginx-003 10.24.10.81:80 weight 5 check inter 2000 rise 3 fall 3


frontend nginx-ingress-controller-https
        bind :443 
        mode http
        option forwardfor
        acl duiopens hdr_reg(host) -i ".*duiopen.com"
        use_backend nginx-ingress-controller-https-backend if duiopens
        default_backend nginx-ingress-controller-https-backend

backend nginx-ingress-controller-https-backend
        mode http
        balance roundrobin
        server  d3-nginx-001 10.24.10.79:443 weight 5 check inter 2000 rise 3 fall 3 
        server  d3-nginx-002 10.24.10.80:443 weight 5 check inter 2000 rise 3 fall 3
        server  d3-nginx-003 10.24.10.81:443 weight 5 check inter 2000 rise 3 fall 3

        #acl h_xfp_exists req.hdr(X-Forwarded-Proto) -m found
        #http-request set-header X-Forwarded-Proto http if !{ ssl_fc } !h_xfp_exists
        #http-request set-header X-Forwarded-Proto https if { ssl_fc } !h_xfp_exists








vim /usr/lib/systemd/system/haproxy.service 
[Unit]
Description=HAProxy Load Balancer
After=network.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -W -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
ExecReload=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
Restart=always
SuccessExitStatus=143
Type=forking

[Install]
WantedBy=multi-user.target




systemctl daemon-reload
systemctl enable haproxy
systemctl restart haproxy






yum -y install keepalived




! Configuration File for keepalived
 
global_defs {
    router_id haproxy-10.77
    vrrp_mcast_group4 224.0.0.19
}
 
vrrp_script check_haproxy {
        script "/etc/keepalived/check_haproxy.sh"
        interval 2
}
 
vrrp_instance VI_1 {
    state BACKUP
    interface bond0
    virtual_router_id 113
    priority 100
    nopreempt
    advert_int 1
    preempt_delay   100
    garp_master_delay    3
    authentication {
        auth_type PASS
        auth_pass haproxyxxxxx
    }
    track_script {
        check_haproxy
    }
    virtual_ipaddress {
        10.24.10.113  dev bond0  label  bond0:0
    }
}


systemctl start keepalived

2、部署Nginx Ingress Controller组件

1)添加节点标签

[root@d3-master-001 kubernetes]# kubectl get nodes -o wide|grep -e NAME -e nginx
NAME            STATUS   ROLES    AGE     VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME
d3-nginx-001    Ready    <none>   45s     v1.14.6   10.24.10.79   <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://18.6.3
d3-nginx-002    Ready    <none>   45s     v1.14.6   10.24.10.80   <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://18.6.3
d3-nginx-003    Ready    <none>   45s     v1.14.6   10.24.10.81   <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://18.6.3
[root@d3-master-001 kubernetes]# kubectl label nodes d3-gpu-057 role=application
node/d3-gpu-057 labeled
[root@d3-master-001 kubernetes]# kubectl label nodes d3-nginx-001 nginx-ingress-controller=1
node/d3-nginx-001 labeled
[root@d3-master-001 kubernetes]# kubectl label nodes d3-nginx-002 nginx-ingress-controller=1
node/d3-nginx-002 labeled
[root@d3-master-001 kubernetes]# kubectl label nodes d3-nginx-003 nginx-ingress-controller=1
node/d3-nginx-003 labeled

2)创建Nginx Ingress Controller所需的ConfigMap nginx-ingress-controller-161服务需要default-config(ETCD_SERVER_INTERNAL)、nginx-load-balancer-conf(Nginx Ingress Controller全局设置参数,nginx-ingress-controller的--configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf命令行参数指定)和custom-headers(自定义Headers,ConfigMap/nginx-load-balancer-conf的add-headers: default/custom-headers指定)三个default名字空间的ConfigMap。 daoker中集群管理-Ingress管理-configmap,D3-DUI-正式环境(国科)导入Ingress需要的3个:

apiVersion: v1
kind: ConfigMap
metadata:
  name: default-config
  namespace: default
data:
  ETCD_SERVER_INTERNAL: '["http://$MASTER01_IP:2381","http://$MASTER02_IP:2381","http://$MASTER03_IP:2381"]'
  RESOLVER: 10.96.0.10
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-load-balancer-conf
  namespace: default
data:
  add-headers: default/custom-headers
  client-body-buffer-size: 512K
  error-log-level: notice
  hsts: "false"
  max-worker-connections: "65535"
  proxy-body-size: 1024M
  proxy-read-timeout: "3600"
  proxy-send-timeout: "3600"
  ssl-ciphers: ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
  ssl-protocols: TLSv1.2 TLSv1.1 TLSv1
  use-http2: "false"
  worker-processes: "2"
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: custom-headers
  namespace: default
data:
  # Cache-Control: no-cache
  • “Cache-Control: no-cache”策略影响cdn缓存静态资源,所有nginx-ingress-controller都计划去掉该http header;
  • 在daoker中创建这3个ConfigMap。

检查导入的ConfigMap:

[root@d3-master-001 kubernetes]# kubectl get configmaps
NAME                       DATA   AGE
custom-headers             0      59s
default-config             2      59s
nginx-load-balancer-conf   12     59s

3)部署Nginx ingress controller服务

  • daoker中添加odcp名字空间,部署自定义404页面的服务odcp/error-404(nginx-ingress-controller的--default-backend-service=odcp/error-404命令行参数指定);
  • daoker中部署nginx-ingress-controller-161服务。

4)检查Nginx ingress controller服务状态 Pods状态:

[root@d3-master-001 kubernetes]# kubectl get pods -n odcp -o wide
NAME                         READY   STATUS    RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
error-404-546b6bc786-94855   1/1     Running   0          2m53s   10.244.23.2   d3-gpu-057   <none>           <none>
[root@d3-master-001 kubernetes]# kubectl get pods -o wide
NAME                                           READY   STATUS    RESTARTS   AGE   IP            NODE           NOMINATED NODE   READINESS GATES
default-http-backend-7885f8f56c-dfjkl          1/1     Running   0          15s   10.244.13.2   d3-gpu-069     <none>           <none>
nginx-ingress-controller-161-7d89756dc-czjg9   1/1     Running   0          15s   10.24.10.81   d3-nginx-003   <none>           <none>
nginx-ingress-controller-161-7d89756dc-fwjhp   1/1     Running   0          15s   10.24.10.80   d3-nginx-002   <none>           <none>
nginx-ingress-controller-161-7d89756dc-vk7k7   1/1     Running   0          15s   10.24.10.79   d3-nginx-001   <none>           <none>

ConfigMap状态:

[root@d3-master-001 kubernetes]# kubectl get configmaps
NAME                              DATA   AGE
custom-headers                    0      19h
default-config                    2      19h
ingress-controller-leader-nginx   0      19h
nginx-load-balancer-conf          12     19h
[root@d3-master-001 kubernetes]# kubectl get configmaps ingress-controller-leader-nginx -o yaml
apiVersion: v1
kind: ConfigMap
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"nginx-ingress-controller-161-7d89756dc-vk7k7","leaseDurationSeconds":30,"acquireTime":"2019-10-12T09:43:49Z","renewTime":"2019-10-12T23:48:30Z","leaderTransitions":1}'
  creationTimestamp: "2019-10-12T03:53:00Z"
  name: ingress-controller-leader-nginx
  namespace: default
  resourceVersion: "493939"
  selfLink: /api/v1/namespaces/default/configmaps/ingress-controller-leader-nginx
  uid: c8c2a61d-eca3-11e9-9b24-141877685738
  • 其中,configmaps/ingress-controller-leader-nginx是Nginx ingress controller服务创建的ConfigMap,其他3个是手动创建的ConfigMap。

5)测试Nginx ingress controller daoker部署odcp/dui-ai服务,然后创建最简的ConfigMap和Ingress:

[root@d3-master-001 kubernetes]# kubectl create cm -n odcp odcp-config
configmap/odcp-config created
[root@d3-master-001 kubernetes]# kubectl apply -f - <<EOF
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/configuration-snippet: |
      more_set_headers "X-Frame-Options: ALLOW-FROM https://www.growingio.com";
      more_set_headers "Cache-Control no-cache";
    nginx.ingress.kubernetes.io/server-snippet: |
      error_page 404 = https://duiopen.com/console/assets/errors/index.html;
    nginx.ingress.kubernetes.io/ssl-redirect: "false"
  name: odcp-ingress-v2
  namespace: odcp
spec:
  rules:
  - host: www.duiopen.com
    http:
      paths:
      - backend:
          serviceName: dui-ai
          servicePort: 9000
        path: /
EOF
ingress.extensions/odcp-ingress-v2 created
[root@d3-master-001 kubernetes]# curl -H "Host:www.duiopen.com" 10.24.10.113

3、部署kong

1)kong的数据库部署和初始化 创建PostgreSQL实例:kong-plugin-gray-国科 添加白名单分组d3_gk:10.24.10.0/24 内网地址:pgm-bp15tcf3flbv4pia14830.pg.rds.aliyuncs.com 3433 创建高权限账号:kong_gray/******

[root@d3-kong-001 ~]# telnet pgm-bp15tcf3flbv4pia14830.pg.rds.aliyuncs.com 3433
Trying 10.17.118.173...
Connected to pgm-bp15tcf3flbv4pia14830.pg.rds.aliyuncs.com.
Escape character is '^]'.
^]
telnet> quit
Connection closed.

登录阿里云数据库后台,创建数据库kong_gray。 注意,这里的数据库名和用户名为kong_gray,而之前华东1为kong-gray,因为阿里云新PostgreSQL数据库名和用户名不支持使用中划线。

2)初始化kong-gray使用的数据库

[root@d3-kong-001 ~]# docker run --rm \
     -e "KONG_DATABASE=postgres" \
     -e "KONG_PG_HOST=pgm-bp15tcf3flbv4pia14830.pg.rds.aliyuncs.com" \
     -e "KONG_PG_PORT=3433" \
     -e "KONG_PG_USER=kong_gray" \
     -e "KONG_PG_PASSWORD=******" \
     -e "KONG_PG_DATABASE=kong_gray" \
     docker.v2.aispeech.com/aispeech/kong-plugin-gray:v0.6.20-grace kong migrations up

3)创建kong-gray使用的环境变量 对应ConfigMap/cloud-config: KONG_GRAY_PG_PORT:3433 KONG_GRAY_PG_DB:kong_gray KONG_GRAY_PG_PWD:7RSO20Gr7q0+xd8NE02W KONG_GRAY_PG_USER:kong_gray KONG_GRAY_PG_HOST:pgm-bp15tcf3flbv4pia14830.pg.rds.aliyuncs.com KONG_GRAY_SERVER_INTERNAL:http://kong-plugin-gray.cloud.svc.cluster.local:8000 KONG_GRAY_ADMIN_INTERNAL:http://kong-plugin-gray.cloud.svc.cluster.local:8001 KONG_GRAY_SERVER_WS:ws://kong-plugin-gray.cloud.svc.cluster.local:8000

4)创建kong-gray使用的ServiceAccount和ClusterRoleBinding ServiceAccount:kong-grace-serviceaccount ClusterRoleBinding:kong-grace-clusterrolebinding

5)部署kong-plugin-gray服务 部署跟华东1一样的版本: docker.v2.aispeech.com/aispeech/kong-plugin-gray:v0.6.20-grace docker.v2.aispeech.com/aispeech/kong-gray-dashboard:v0.0.6

[root@d3-master-001 kubernetes]# kubectl get deployments -n cloud kong-plugin-gray
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
kong-plugin-gray   0/2     2            0           45s
[root@d3-master-001 kubernetes]# kubectl label nodes d3-kong-001 kong-plugin-gray=1
node/d3-kong-001 labeled
[root@d3-master-001 kubernetes]# kubectl label nodes d3-kong-002 kong-plugin-gray=1
node/d3-kong-002 labeled
[root@d3-master-001 kubernetes]# kubectl get pods -n cloud -o wide|grep kong-plugin-gray
kong-plugin-gray-8dc55855f-2j4n6   1/1     Running   0          3m40s   10.244.47.3     d3-kong-001   <none>           <none>
kong-plugin-gray-8dc55855f-6gktw   1/1     Running   0          3m40s   10.244.47.2     d3-kong-001   <none>           <none>
  • 两个Pod被调度到了同一个节点,修改Yaml配置添加Pod非亲和性条件(修改于2019年10月29日,已和金卫核对,华东1线上的配置下次更新kong-plugin-gray服务生效)。

6)修改kong-plugin-gray服务的Yaml配置 修改cloud/kong-plugin-gray的Yaml配置,其中spec.template.spec.affinity字段是新添加的:

apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: kong-plugin-gray
  namespace: cloud
spec:
  minReadySeconds: 5
  replicas: 1
  selector:
    matchLabels:
      app: kong-plugin-gray
      tier: backend
  strategy:
    rollingUpdate:
      maxSurge: 50%
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      annotations: {}
      labels:
        app: kong-plugin-gray
        tier: backend
    spec:
      containers:
      ...
      nodeSelector:
        kong-plugin-gray: "1"
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - kong-plugin-gray
              topologyKey: "kubernetes.io/hostname"
  • 通过daoker重启kong-plugin-gray服务、扩容到4个副本、缩容到2个副本,结果均符合预期。

7)创建kong-gray的Ingress 通过daoker创建如下3个Ingress:

a)走专线的域名kong-gk-internal.duiopen.com对应的Ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: 'false'
  name: kong-plugin-gray-internal-ingress
  namespace: cloud
spec:
  rules:
  - host: kong-gk-internal.duiopen.com
    http:
      paths:
      - backend:
          serviceName: kong-plugin-gray
          servicePort: 8000
        path: /
  tls:
  - hosts:
    - kong-gk-internal.duiopen.com
    secretName: all.duiopen.com
  • 走专线,不设置ip白名单,减少Nginx Ingress Controller的压力。

b)走公网的域名kong-gk-public.duiopen.com对应的Ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: 'false'
    nginx.ingress.kubernetes.io/whitelist-source-range: 58.210.212.110,47.97.102.70,39.100.96.151,10.0.0.0/8
  name: kong-plugin-gray-public-ingress
  namespace: cloud
spec:
  rules:
  - host: kong-gk-public.duiopen.com
    http:
      paths:
      - backend:
          serviceName: kong-plugin-gray
          servicePort: 8000
        path: /
  tls:
  - hosts:
    - kong-gk-public.duiopen.com
    secretName: all.duiopen.com
  • 走公网,设置白名单,其中:58.210.212.110为苏州办公网出口ip,47.97.102.70为华东1正式环境出口ip,39.100.96.151为华北3灾备环境出口ip,10.0.0.0/8为集群内部ip。

c)Kong Dashboard对应的Ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: 'false'
    nginx.ingress.kubernetes.io/whitelist-source-range: 58.210.212.110
  name: kong-gray-dashboard-ingress
  namespace: cloud
spec:
  rules:
  - host: kgd-gk.duiopen.com
    http:
      paths:
      - backend:
          serviceName: kong-gray-dashboard
          servicePort: 8080
        path: /
  tls:
  - hosts:
    - kgd-gk.duiopen.com
    secretName: all.duiopen.com
  • 走公网,设置白名单,仅限苏州办公网出口ip访问。
  • No labels