Kubernetes 搭建zookeeper集群

By | 2021年7月8日
目录
[隐藏]

一、修改官方镜像的运行脚本

如果要以statefulset的方式启动zookeeper集群,默认情况下myid的值是固定的,所以要修改最后的启动脚本。

https://github.com/xiaohuait/zookeeper-docker/tree/master/3.6.3

修改文件

# git clone https://github.com/xiaohuait/zookeeper-docker.git
# cd zookeeper-docker/3.6.3/
# cat docker-entrypoint.sh
#!/bin/bash

set -e

# Allow the container to be started with `--user`
if [[ "$1" = 'zkServer.sh' && "$(id -u)" = '0' ]]; then
    chown -R zookeeper "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR" "$ZOO_LOG_DIR"
    exec gosu zookeeper "$0" "$@"
fi

# Generate the config only if it doesn't exist
if [[ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]]; then
    CONFIG="$ZOO_CONF_DIR/zoo.cfg"
    {
        echo "dataDir=$ZOO_DATA_DIR" 
        echo "dataLogDir=$ZOO_DATA_LOG_DIR"

        echo "tickTime=$ZOO_TICK_TIME"
        echo "initLimit=$ZOO_INIT_LIMIT"
        echo "syncLimit=$ZOO_SYNC_LIMIT"

        echo "autopurge.snapRetainCount=$ZOO_AUTOPURGE_SNAPRETAINCOUNT"
        echo "autopurge.purgeInterval=$ZOO_AUTOPURGE_PURGEINTERVAL"
        echo "maxClientCnxns=$ZOO_MAX_CLIENT_CNXNS"
        echo "standaloneEnabled=$ZOO_STANDALONE_ENABLED"
        echo "admin.enableServer=$ZOO_ADMINSERVER_ENABLED"
    } >> "$CONFIG"
    if [[ -z $ZOO_SERVERS ]]; then
      ZOO_SERVERS="server.1=localhost:2888:3888;2181"
    fi

    for server in $ZOO_SERVERS; do
        echo "$server" >> "$CONFIG"
    done

    if [[ -n $ZOO_4LW_COMMANDS_WHITELIST ]]; then
        echo "4lw.commands.whitelist=$ZOO_4LW_COMMANDS_WHITELIST" >> "$CONFIG"
    fi

    for cfg_extra_entry in $ZOO_CFG_EXTRA; do
        echo "$cfg_extra_entry" >> "$CONFIG"
    done
fi

# Write myid only if it doesn't exist
if [[ ! -f "$ZOO_DATA_DIR/myid" ]]; then
    echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid"
fi

exec "$@"
# docker build -t zookeeper:3.6.3-c ./ # 构建zookeeper镜像
dockerfile文件内容
FROM openjdk:11-jre-slim

ENV ZOO_CONF_DIR=/conf \
    ZOO_DATA_DIR=/data \
    ZOO_DATA_LOG_DIR=/datalog \
    ZOO_LOG_DIR=/logs \
    ZOO_TICK_TIME=2000 \
    ZOO_INIT_LIMIT=5 \
    ZOO_SYNC_LIMIT=2 \
    ZOO_AUTOPURGE_PURGEINTERVAL=0 \
    ZOO_AUTOPURGE_SNAPRETAINCOUNT=3 \
    ZOO_MAX_CLIENT_CNXNS=60 \
    ZOO_STANDALONE_ENABLED=true \
    ZOO_ADMINSERVER_ENABLED=true

# Add a user with an explicit UID/GID and create necessary directories
RUN set -eux; \
    groupadd -r zookeeper --gid=1000; \
    useradd -r -g zookeeper --uid=1000 zookeeper; \
    mkdir -p "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" "$ZOO_LOG_DIR"; \
    chown zookeeper:zookeeper "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" "$ZOO_LOG_DIR"

# Install required packges
RUN set -eux; \
    apt-get update; \
    DEBIAN_FRONTEND=noninteractive \
    apt-get install -y --no-install-recommends \
        ca-certificates \
        dirmngr \
        gosu \
        gnupg \
        netcat \
        wget; \
    rm -rf /var/lib/apt/lists/*; \
# Verify that gosu binary works
    gosu nobody true

ARG GPG_KEY=DFF24FB8323ADAC90E3CF36F729E61230EA917E9
ARG SHORT_DISTRO_NAME=zookeeper-3.6.3
ARG DISTRO_NAME=apache-zookeeper-3.6.3-bin

# Download Apache Zookeeper, verify its PGP signature, untar and clean up
RUN set -eux; \
    ddist() { \
        local f="$1"; shift; \
        local distFile="$1"; shift; \
        local success=; \
        local distUrl=; \
        for distUrl in \
            'https://www.apache.org/dyn/closer.cgi?action=download&filename=' \
            https://www-us.apache.org/dist/ \
            https://www.apache.org/dist/ \
            https://archive.apache.org/dist/ \
        ; do \
            if wget -q -O "$f" "$distUrl$distFile" && [ -s "$f" ]; then \
                success=1; \
                break; \
            fi; \
        done; \
        [ -n "$success" ]; \
    }; \
    ddist "$DISTRO_NAME.tar.gz" "zookeeper/$SHORT_DISTRO_NAME/$DISTRO_NAME.tar.gz"; \
    ddist "$DISTRO_NAME.tar.gz.asc" "zookeeper/$SHORT_DISTRO_NAME/$DISTRO_NAME.tar.gz.asc"; \
    export GNUPGHOME="$(mktemp -d)"; \
    gpg --keyserver ha.pool.sks-keyservers.net --recv-key "$GPG_KEY" || \
    gpg --keyserver pgp.mit.edu --recv-keys "$GPG_KEY" || \
    gpg --keyserver keyserver.pgp.com --recv-keys "$GPG_KEY"; \
    gpg --batch --verify "$DISTRO_NAME.tar.gz.asc" "$DISTRO_NAME.tar.gz"; \
    tar -zxf "$DISTRO_NAME.tar.gz"; \
    mv "$DISTRO_NAME/conf/"* "$ZOO_CONF_DIR"; \
    rm -rf "$GNUPGHOME" "$DISTRO_NAME.tar.gz" "$DISTRO_NAME.tar.gz.asc"; \
    chown -R zookeeper:zookeeper "/$DISTRO_NAME"

WORKDIR $DISTRO_NAME
VOLUME ["$ZOO_DATA_DIR", "$ZOO_DATA_LOG_DIR", "$ZOO_LOG_DIR"]

EXPOSE 2181 2888 3888 8080

ENV PATH=$PATH:/$DISTRO_NAME/bin \
    ZOOCFGDIR=$ZOO_CONF_DIR

COPY docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["zkServer.sh", "start-foreground"]



二、创建Service服务文件

# cat zookeeper-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-cluster-test-svc
  namespace: default
  labels:
    app: zookeeper-cluster-test
spec:
  clusterIP: None
  ports:
  - name: client            # 对client端提供服务的端口,在公有云平台搭建zk类型应选择 type: LoadBalancer
    port: 2181
    targetPort: 2181
    protocol: TCP 
  - name: peer              # 集群内通信使用(leader监听此端口)
    port: 2888
    targetPort: 2888
    protocol: TCP
  - name: leader-election   # 选举leader使用
    port: 3888
    targetPort: 3888
    protocol: TCP
  selector:
    app: zookeeper-cluster-test

---
# 如果使用公有云则使用如下配置文件
# cat zookeeper-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-cluster-test-svc
  namespace: default
  labels:
    app: zookeeper-cluster-test
  annotations:
    service.kubernetes.io/qcloud-loadbalancer-internal-subnetid: subnet-pqqeergv  # 子网ID
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: 2181-tcp
    port: 2181
    targetPort: 2181
    protocol: TCP
  - name: peer
    port: 2888
    targetPort: 2888
    protocol: TCP
  - name: leader-election
    port: 3888
    targetPort: 3888
    protocol: TCP
  - name: web
    port: 8080
    targetPort: 8080
  selector:
    app: zookeeper-cluster-test
  type: LoadBalancer

三、zookeeper的Statefulset服务文件

# cat zookeeper.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper-cluster-test
  namespace: default
  labels:
    app: zookeeper-cluster-test
spec:
  serviceName: zookeeper-cluster-test-svc
  replicas: 3
  selector:
    matchLabels:
      app: zookeeper-cluster-test
  template:
    metadata:
      labels:
        app: zookeeper-cluster-test
    spec:
      containers:
      - name: zookeeper
        imagePullPolicy: Always
        image: zookeeper:3.6.2-c
        resources:
          requests:
            cpu: 200m
            memory: 201Mi
          limits:
            cpu: 500m
            memory: 1024Mi 
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: leader
        - containerPort: 3888
          name: leader-election
          protocol: TCP     
        volumeMounts:
        - name: zookeeper-data
          mountPath: "/data/"
        readinessProbe:
          tcpSocket:
            port: 2181
          initialDelaySeconds: 30
          periodSeconds: 5
        livenessProbe:
          tcpSocket:
            port: 2181
          initialDelaySeconds: 30
          failureThreshold: 30
          periodSeconds: 10
        env: 
        - name: ZOO_STANDALONE_ENABLED
          value: "false"
        - name: ZOO_SERVERS
          value: "server.1=zookeeper-cluster-test-0.zookeeper-cluster-test-svc.default.svc.cluster.local:2888:3888;2181 server.2=zookeeper-cluster-test-1.zookeeper-cluster-test-svc.default.svc.cluster.local:2888:3888;2181 server.3=zookeeper-cluster-test-2.zookeeper-cluster-test-svc.default.svc.cluster.local:2888:3888;2181"

  volumeClaimTemplates:
  - metadata:
      name: zookeeper-data
    spec:
      storageClassName: disk-test
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 10Gi

四、登录容器后验证集群

# kubectl exec -it zookeeper-cluster-test-0 bash
root@zookeeper-cluster-test-0:/apache-zookeeper-3.6.3-bin# apt-get update -y && apt-get install procps iputils-ping net-tools -y
root@zookeeper-cluster-test-0:/apache-zookeeper-3.6.3-bin# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
root@zookeeper-cluster-test-0:/apache-zookeeper-3.6.3-bin# netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:2181            0.0.0.0:*               LISTEN      -                   
tcp        0      0 0.0.0.0:38509           0.0.0.0:*               LISTEN      -                   
tcp        0      0 172.16.0.80:3888        0.0.0.0:*               LISTEN      -                   
tcp        0      0 0.0.0.0:8080            0.0.0.0:*               LISTEN      -

发表评论

您的电子邮箱地址不会被公开。 必填项已用*标注