主机规划
安装步骤
FROM openjdk:8
WORKDIR /usr/local
ADD apache-zookeeper-3.8.0-bin.tar.gz /usr/local/
# 修改docker时区为东八区,规避应用程序和北京时间相差8小时问题
ENV TZ=Asia/Shanghai
# 下一步可以删除,在pod中设置command来操作也可以
CMD /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh start-foreground
Docker
[root@node01 ~]# ls -rlt
总用量 12888
-rw-------. 1 root root 1698 3月 28 16:37 anaconda-ks.cfg
-rw-r--r-- 1 root root 13185104 4月 27 14:34 apache-zookeeper-3.8.0-bin.tar.gz
-rw-r--r-- 1 root root 226 4月 27 18:05 dockerfile
# 构建镜像名为myzk:1.0的镜像
[root@node01 ~]# docker build -f dockerfile -t myzk:1.0 .
Bash
# 在master上安装nfs服务,设置开机自启动
[root@nfs ~]# yum install nfs-utils -y
[root@nfs ~]# systemctl restart nfs
[root@nfs ~]# systemctl enable nfs
# 在node上安装nfs服务,注意不需要启动
[root@k8s-master01 ~]# yum install nfs-utils -y
Bash
# 创建共享目录
for x in $(seq 1 3);
> do
> mkdir -p /data/pv/zk${x}
> done
# 将共享目录暴露,暴露给指定主机将“192.168.158.0/24”改成主机ip即可
[root@master ~]# vim /etc/exports
/data/pv/zk1 192.168.13.0/24(rw,no_root_squash)
/data/pv/zk2 192.168.13.0/24(rw,no_root_squash)
/data/pv/zk3 192.168.13.0/24(rw,no_root_squash)
Bash
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk1
namespace: zookeeper
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: "zookeeper"
nfs:
path: /data/pv/zk1
server: 192.168.13.129
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk2
namespace: zookeeper
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: "zookeeper"
nfs:
path: /data/pv/zk2
server: 192.168.13.129
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk3
namespace: zookeeper
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: "zookeeper"
nfs:
path: /data/pv/zk3
server: 192.168.13.129
persistentVolumeReclaimPolicy: Recycle
YAML
[root@master ~]# kubectl apply -f pv-zk.yaml
# 创建成功可以通过kubectl命令看到下列3个pv
[root@master ~]# kubectl get pv -n zookeeper
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
k8s-pv-zk1 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-1 zookeeper 17m
k8s-pv-zk2 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-0 zookeeper 17m
k8s-pv-zk3 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-2 zookeeper 17m
Bash
apiVersion: v1
kind: Service # 该svc用于zk集群内部通信
metadata:
name: zk-hs
namespace: zookeeper
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service # 该svc用于与zk集群外部通信
metadata:
name: zk-cs
namespace: zookeeper
labels:
app: zk
spec:
type: ClusterIP # 仅允许k8s集群内部机器访问,若需要外部机器或浏览器访问,需要将该类型改为NodePort
ports:
- port: 2181
name: client
selector:
app: zk
---
apiVersion: v1
kind: ConfigMap # 用于保存zk配置文件
metadata:
name: zoo-conf
namespace: zookeeper
data:
zoo.cfg: |+
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/usr/local/apache-zookeeper-3.8.0-bin/data
dataLogDir=/usr/local/apache-zookeeper-3.8.0-bin/data/log
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpHost=0.0.0.0
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true
# 由于pod重启后容器ip会改变,将ip转换为DNS域名 格式"pod名.内部通信的服务名.命名空间名.svc.cluster.local"
server.1=zookeeper-cluster-0.zk-hs.zookeeper.svc.cluster.local:2888:3888
server.2=zookeeper-cluster-1.zk-hs.zookeeper.svc.cluster.local:2888:3888
server.3=zookeeper-cluster-2.zk-hs.zookeeper.svc.cluster.local:2888:3888
---
apiVersion: apps/v1
kind: StatefulSet # 由于每次pod重启容器ip会改变,所以需要使用StatefulSet控制器保证pod名不变,从而可以使用DNS域名替代ip
metadata:
name: zookeeper-cluster
namespace: zookeeper
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
template:
metadata:
labels:
app: zk
spec:
containers:
- name: zookeeper
imagePullPolicy: IfNotPresent
image: "myzk:1.0"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
volumeMounts:
- name: conf
mountPath: /usr/local/apache-zookeeper-3.8.0-bin/conf
- name: data
mountPath: /usr/local/apache-zookeeper-3.8.0-bin/data
command: [ "/usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh","start-foreground" ] # 这一步可以在dockerfile中用CMD实现
volumes:
- name: conf
configMap:
name: zoo-conf
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
storageClassName: "zookeeper"
YAML
[root@master ~]# kubectl apply -f zk-cluster.yaml
# 查看
[root@master ~]# kubectl get pods -n zookeeper
NAME READY STATUS RESTARTS AGE
zookeeper-cluster-0 0/1 CrashLoopBackOff 1 17s
Bash
[root@master ~]# kubectl logs zookeeper-cluster-0 -n zookeeper
21:20:18.807 [main] ERROR org.apache.zookeeper.server.quorum.QuorumPeerMain - Invalid config, exiting abnormally
org.apache.zookeeper.server.quorum.QuorumPeerConfig$ConfigException: Error processing /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.parse(QuorumPeerConfig.java:198)
at org.apache.zookeeper.server.quorum.QuorumPeerMain.initializeAndRun(QuorumPeerMain.java:125)
at org.apache.zookeeper.server.quorum.QuorumPeerMain.main(QuorumPeerMain.java:91)
Caused by: java.lang.IllegalArgumentException: myid file is missing
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.checkValidity(QuorumPeerConfig.java:792)
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.setupQuorumPeerConfig(QuorumPeerConfig.java:663)
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.parseProperties(QuorumPeerConfig.java:487)
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.parse(QuorumPeerConfig.java:194)
... 2 common frames omitted
YAML
[root@master ~]# kubectl get pv -n zookeeper
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
k8s-pv-zk1 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-1 zookeeper 38m
k8s-pv-zk2 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-0 zookeeper 38m
k8s-pv-zk3 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-2 zookeeper 38m
[root@master zk1]# echo 2 > /data/pv/zk1/myid
[root@master zk1]# echo 1 > /data/pv/zk1/myid
[root@master zk1]# echo 3 > /data/pv/zk1/myid
Bash
[root@master ~]# kubectl get pod -n zookeeper
NAME READY STATUS RESTARTS AGE
zookeeper-cluster-0 1/1 Running 4 41m
zookeeper-cluster-1 1/1 Running 4 40m
zookeeper-cluster-2 1/1 Running 0 39m
Bash
[root@master ~]# kubectl exec -it -n zookeeper zookeeper-cluster-0 -- /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
[root@master ~]# kubectl exec -it -n zookeeper zookeeper-cluster-1 -- /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader
[root@master ~]# kubectl exec -it -n zookeeper zookeeper-cluster-2 -- /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
Bash
https://blog.csdn.net/zhoujianhui008/article/details/114259416
文章版权归作者所有,未经允许请勿转载,若此文章存在违规行为,您可以联系管理员删除。
转载请注明本文地址:https://www.ucloud.cn/yun/129416.html
摘要:宋体本文从拉勾网的业务架构日志采集监控服务暴露调用等方面介绍了其基于的容器化改造实践。宋体此外,拉勾网还有一套自研的环境的业务发布系统,不过这套发布系统未适配容器环境。写在前面 拉勾网于 2019 年 3 月份开始尝试将生产环境的业务从 UHost 迁移到 UK8S,截至 2019 年 9 月份,QA 环境的大部分业务模块已经完成容器化改造,生产环境中,后台管理服务已全部迁移到 UK8...
摘要:马拉松会匹配每个和提供的资源,然后通过将任务下发下去。对外暴露的就是负载均衡的某个服务,后面自动将流量转发到某个容器的端口上。还有一直办法是用内网的,这个会维护现有的容器列表端口,并且返回任意一个的端口,页实现了负载均衡和服务发现功能。 演讲嘉宾 数人云COO 谢乐冰 在德国工作十年,回国后加入惠普电信运营商部门,拥有多年项目经验和创业公司工作经验。在数人云负责产品售前和运营,专注行...
摘要:背景原来学习时我是在本地搭建的伪集群虽然说使用起来没有什么问题但是总感觉部署起来有点麻烦刚好我发现了已经有了的镜像了于是就尝试了一下发现真是爽爆了几个命令就可以搭建一个完整的集群下面我简单记录一下使用搭建集群的一些步骤镜像下载上有不少镜像不 背景 原来学习 ZK 时, 我是在本地搭建的伪集群, 虽然说使用起来没有什么问题, 但是总感觉部署起来有点麻烦. 刚好我发现了 ZK 已经有了 D...
摘要:搭建系列环境搭建集群搭建集群环境搭建搭建集群环境搭建序对于个人开发者而言,学习分布式的好多东东,都比较费劲,因为手头的机器不够。本文主要是记录使用搭建集群的过程。鸣谢使用不同网络模型搭建集群这篇文章总结的很好有坑,没尝试成功 docker搭建系列 docker环境搭建zk集群 docker搭建redis集群 docker环境搭建elasticsearch docker搭建rabbit...
阅读 1251·2023-01-11 13:20
阅读 1566·2023-01-11 13:20
阅读 1019·2023-01-11 13:20
阅读 1702·2023-01-11 13:20
阅读 3973·2023-01-11 13:20
阅读 2546·2023-01-11 13:20
阅读 1356·2023-01-11 13:20
阅读 3494·2023-01-11 13:20