知识点描述
标准指导操作
主机名 | ip | 担任角色 | k8s版本 |
master | 192.168.158.136 | master | v1.17.4 |
node01 | 192.168.158.137 | node | v1.17.4 |
node02 | 192.168.158.138 | node | v1.17.4 |
1)默认以安装好k8s集群就不做叙述,需要准备nfs单机环境,若k8s为多master集群,可以准备nfs集群环境。
# 在master上安装nfs服务,设置开机自启动。
[root@nfs ~]# yum install nfs-utils -y
[root@nfs ~]# systemctl restart nfs
[root@nfs ~]# systemctl enable nfs
# 在node上安装nfs服务,注意不需要启动。
[root@k8s-master01 ~]# yum install nfs-utils -y
2)准备共享目录,将目录以读写权限暴露给192.168.158.0/24网段中的所有主机(在生产环境应该只将共享目录暴露给集群所在机器);必须暴漏共享目录,否则在创建pod时会出现报错无法找到pv!
# 创建共享目录。
for x in $(seq 1 6);
> do
> mkdir -p /data/redis-cluster/pv${x}
> done
# 将共享目录暴露,暴露给指定主机将“192.168.158.0/24”改成主机ip即可。
[root@master ~]# vim /etc/exports
/data/redis-cluster/pv1 192.168.158.0/24(rw,no_root_squash)
/data/redis-cluster/pv2 192.168.158.0/24(rw,no_root_squash)
/data/redis-cluster/pv3 192.168.158.0/24(rw,no_root_squash)
/data/redis-cluster/pv4 192.168.158.0/24(rw,no_root_squash)
/data/redis-cluster/pv5 192.168.158.0/24(rw,no_root_squash)
/data/redis-cluster/pv6 192.168.158.0/24(rw,no_root_squash)
3)创建6个pv,kubectl apply -f redis-pv.yaml,下面为redis-pv.yaml内容
apiVersion: v1
kind: PersistentVolume # 创建pv
metadata:
name: redis-pv1 # 名称
spec:
capacity:
storage: 3Gi # 3G磁盘空间
accessModes:
- ReadWriteOnce # 权限读写
persistentVolumeReclaimPolicy: Recycle # 回收策略,清除数据
storageClassName: "redis-cluster" # 存储类别为“redis-cluster”,只能允许存储类别相同的pvc使用
nfs:
path: /data/redis-cluster/pv1
server: 192.168.158.136
# ...
# 中间省略一直到redis-pv6 修改metadata:name 和 spec:nfs:path即可。
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-pv6
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: "redis-cluster"
nfs:
path: /data/redis-cluster/pv6
server: 192.168.158.136
[root@master ~]# kubectl apply -f redis-pv.yaml
# 创建成功可以通过kubectl命令看到下列6个pv:
[root@master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
redis-pv1 3Gi RWO Recycle Bound default/data-redis-cluster-0 redis-cluster 115m
redis-pv2 3Gi RWO Recycle Bound default/data-redis-cluster-2 redis-cluster 115m
redis-pv3 3Gi RWO Recycle Bound default/data-redis-cluster-3 redis-cluster 115m
redis-pv4 3Gi RWO Recycle Bound default/data-redis-cluster-4 redis-cluster 115m
redis-pv5 3Gi RWO Recycle Bound default/data-redis-cluster-1 redis-cluster 115m
redis-pv6 3Gi RWO Recycle Bound default/data-redis-cluster-5 redis-cluster 115m
4)创建ConfigMap和StatefulSet:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-cluster
data:
update-node.sh: |
#!/bin/sh
REDIS_NODES="/data/nodes.conf"
sed -i -e "/myself/ s/[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/${POD_IP}/" ${REDIS_NODES}
exec "$@"
redis.conf: |+
cluster-enabled yes
cluster-require-full-coverage no
cluster-node-timeout 15000
cluster-config-file /data/nodes
cluster-migration-barrier 1
Maxmemory 3GB
port 6379
appendonly yes
protected-mode no
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-cluster
spec:
serviceName: redis-cluster
replicas: 6
selector: # 标签选择器
matchLabels:
app: redis-cluster
template: # pod创建模板
metadata:
labels:
app: redis-cluster
spec:
containers:
- name: redis
image: redis:5.0.5-alpine
ports:
- containerPort: 6379
name: client
- containerPort: 16379
name: gossip
command: ["/conf/update-node.sh", "redis-server", "/conf/redis.conf"]
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: conf
mountPath: /conf
readOnly: false
- name: data
mountPath: /data
readOnly: false
volumes:
- name: conf
configMap:
name: redis-cluster
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 3Gi
storageClassName: redis-cluster
[root@master ~]# kubectl apply -f redis-StatefulSets.yaml
# 查看
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-58777cc9fd-cwj77 1/1 Running 1 29h
redis-cluster-0 1/1 Running 0 76m
redis-cluster-1 1/1 Running 0 76m
redis-cluster-2 1/1 Running 0 76m
redis-cluster-3 1/1 Running 0 75m
redis-cluster-4 1/1 Running 0 74m
redis-cluster-5 1/1 Running 0 74m
5)创建service对外暴露端口,kubectl apply -f redis-svc.yaml,若需要集群外机器访问,将下列yaml中type值改为NodePort即可。
---
apiVersion: v1
kind: Service
metadata:
name: redis-cluster
spec:
type: ClusterIP # 集群内部机器可访问,如需要集群外部访问,将类型改为NodePort即可
clusterIP: 10.96.97.97 # 不写会自己分配
ports:
- port: 6379
targetPort: 6379
name: client
- port: 16379
targetPort: 16379
name: gossip
selector:
app: redis-cluster
[root@master ~]# kubectl apply -f redis-svc.yaml
[root@master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
redis-cluster ClusterIP 10.96.97.97 6379/TCP,16379/TCP 124m
6)初始化redis,创建redis集群:
kubectl exec -it redis-cluster-0 -- redis-cli --cluster create --cluster-replicas 1 $(kubectl get pods -l app=redis-cluster -o jsonpath={range.items[*]}{.status.podIP}:6379 )
[root@master ~]# kubectl exec -it redis-cluster-0 -- redis-cli --cluster create --cluster-replicas 1 $(kubectl get pods -l app=redis-cluster -o jsonpath={range.items[*]}{.status.podIP}:6379 )
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 10.244.2.10:6379 to 10.244.2.8:6379
Adding replica 10.244.1.10:6379 to 10.244.1.8:6379
Adding replica 10.244.1.9:6379 to 10.244.2.9:6379
M: aaf12abf3906e40d7c1084fa7228e99a49fc02df 10.244.2.8:6379
slots:[0-5460] (5461 slots) master
M: 547619c817623c71502e52413e46bf33bfb307bc 10.244.1.8:6379
slots:[5461-10922] (5462 slots) master
M: cd3abc406759315a814820dc0a6ce53b93a919a8 10.244.2.9:6379
slots:[10923-16383] (5461 slots) master
S: b0e40a1d30b397bacefd0f4c4d8584246ce52fc6 10.244.1.9:6379
replicates cd3abc406759315a814820dc0a6ce53b93a919a8
S: d8f1a35fc156598c4d9871a607f2639206072782 10.244.2.10:6379
replicates aaf12abf3906e40d7c1084fa7228e99a49fc02df
S: 372b0086cccdcdea81be571df557b958712529a5 10.244.1.10:6379
replicates 547619c817623c71502e52413e46bf33bfb307bc
Can I set the above configuration? (type yes to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
......
>>> Performing Cluster Check (using node 10.244.2.8:6379)
M: aaf12abf3906e40d7c1084fa7228e99a49fc02df 10.244.2.8:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 372b0086cccdcdea81be571df557b958712529a5 10.244.1.10:6379
slots: (0 slots) slave
replicates 547619c817623c71502e52413e46bf33bfb307bc
S: d8f1a35fc156598c4d9871a607f2639206072782 10.244.2.10:6379
slots: (0 slots) slave
replicates aaf12abf3906e40d7c1084fa7228e99a49fc02df
M: 547619c817623c71502e52413e46bf33bfb307bc 10.244.1.8:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: b0e40a1d30b397bacefd0f4c4d8584246ce52fc6 10.244.1.9:6379
slots: (0 slots) slave
replicates cd3abc406759315a814820dc0a6ce53b93a919a8
M: cd3abc406759315a814820dc0a6ce53b93a919a8 10.244.2.9:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
7)验证集群 redis-cli cluster info,可以看到集群有6个节点,创建成功。
[root@master ~]# kubectl exec -it redis-cluster-0 -- redis-cli cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:6454
cluster_stats_messages_pong_sent:6664
cluster_stats_messages_sent:13118
cluster_stats_messages_ping_received:6664
cluster_stats_messages_pong_received:6438
cluster_stats_messages_received:13102
8)配置修改,将configmap中的redis.conf修改即可,重启pod可将配置生效。
[root@master ~]# kubectl delete pods redis-cluster-0
[root@master ~]# kubectl delete pods redis-cluster-1
[root@master ~]# kubectl delete pods redis-cluster-2
[root@master ~]# kubectl delete pods redis-cluster-3
[root@master ~]# kubectl delete pods redis-cluster-4
[root@master ~]# kubectl delete pods redis-cluster-5
文章版权归作者所有,未经允许请勿转载,若此文章存在违规行为,您可以联系管理员删除。
转载请注明本文地址:https://www.ucloud.cn/yun/129266.html
摘要:计划通过解决持久化的问题通过带起个实例,它们将有稳定的主机名线上一个部署单元是个实例通过和注入配置文件和敏感信息因为线上系统的特性,我们底层的实例是不需要顺序启动或停止的,将采用创建集群参考文章,快速创建一个集群。 缘起 线上有一个 redis 集群,因为当时 redis 自带的集群还不成熟,而且我们项目上的需求和应用的场景比较简单,redis 集群是通过 twemproxy + re...
摘要:常见的和等都是属于某一个的默认是,而等则不属于任何。其实其的命令和上面都差不多,这里不一一列出了创建查看启动情况是一个定义了一组的策略的抽象,可以理解为抽象到用户层的一个宏观服务。其实这个概念在集群里也有,可以参照理解。 showImg(https://segmentfault.com/img/remote/1460000013229549); 【利用K8S技术栈打造个人私有云系列文...
摘要:常见的和等都是属于某一个的默认是,而等则不属于任何。其实其的命令和上面都差不多,这里不一一列出了创建查看启动情况是一个定义了一组的策略的抽象,可以理解为抽象到用户层的一个宏观服务。其实这个概念在集群里也有,可以参照理解。 showImg(https://segmentfault.com/img/remote/1460000013229549); 【利用K8S技术栈打造个人私有云系列文...
摘要:但此功能目前并不直接可用相关也已经创建。根源在于参数的获取实现上。省略输出可以看到,这个名称可以在这个中重复使用了。比如省略输出支持将推送至镜像仓库中简而言之就是使用镜像仓库同时存储镜像和不过这个功能我暂时还没验证。 经过了长时间的开发,Helm 3 终于在今天发布了第一个 alpha 版本。本文将简单介绍 Helm 3 新特性。 移除 Tiller Helm 2 是 C/S 架构,主...
摘要:部署环境及架构操作系统版本版本版本服务器信息在详细介绍部署集群前,先给大家展示下集群的逻辑架构。其他操作更新删除查看删除除此之外,你可以删除,如删除上的格式为服务名字,不必关心从哪个上删除了。 本文通过实际操作来演示Kubernetes的使用,因为环境有限,集群部署在本地3个ubuntu上,主要包括如下内容: 部署环境介绍,以及Kubernetes集群逻辑架构 安装部署Open v...
阅读 1251·2023-01-11 13:20
阅读 1566·2023-01-11 13:20
阅读 1019·2023-01-11 13:20
阅读 1702·2023-01-11 13:20
阅读 3973·2023-01-11 13:20
阅读 2546·2023-01-11 13:20
阅读 1356·2023-01-11 13:20
阅读 3494·2023-01-11 13:20