ljzsdut
GitHubToggle Dark/Light/Auto modeToggle Dark/Light/Auto modeToggle Dark/Light/Auto modeBack to homepage
Edit page

1.6 K8s部署之部署heketi Glusterfs实现存储类

一、说明

glusterfs使用本地部署,heketi部署在k8s上。glusterfs服务器只需安装glusterfs相关软件即可,gfs集群的部署由heketi完成。

二、本地安装glusterfs软件

1、服务规划:

操作系统IP主机名硬盘数量(可多块)
centos 7.6192.168.6.127vm127vda:200G,vdb:300G,vdc:300G
centos 7.6192.168.6.128vm128vda:200G,vdb:300G,vdc:300G
centos 7.6192.168.6.129vm129vda:200G,vdb:300G,vdc:300G
centos 7.6192.168.6.130vm130vda:200G,vdb:300G,vdc:300G

vda作为系统盘;vdb,vdc作为数据盘。

2、首先关闭iptables和selinux,配置hosts文件如下(全部glusterfs主机)

# 停用selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config   #关闭SELinux
setenforce 0

#启用时间自动同步
rpm -qa |grep chrony &>/dev/null || yum install -y chrony
sed -i '2a server ntp1.aliyun.com iburst' /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd

#加载内核模块:modprobe dm_snapshot dm_mirror dm_thin_pool,使用lsmod | grep <name>查看
cat > /etc/sysconfig/modules/glusterfs.modules <<"EOF"
#!/bin/bash
glusterfs_modules="dm_snapshot dm_mirror dm_thin_pool"
for kernel_module in ${glusterfs_modules}; do
    /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
    if [ $? -eq 0 ]; then
        /sbin/modprobe ${kernel_module}
    fi
done
EOF
chmod 755 /etc/sysconfig/modules/glusterfs.modules && bash /etc/sysconfig/modules/glusterfs.modules && lsmod | grep dm

3、安装gluterfs源(全部glusterfs主机)

[root@vm127 ~]#wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo   #不是必须的
[root@vm127 ~]#yum search  centos-release-gluster   #查看有哪些版本的glusterfs源    
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.cqu.edu.cn
 * extras: mirrors.cqu.edu.cn
 * updates: mirrors.cqu.edu.cn
================================================================== N/S matched: centos-release-gluster ==================================================================
centos-release-gluster-legacy.noarch : Disable unmaintained Gluster repositories from the CentOS Storage SIG
centos-release-gluster40.x86_64 : Gluster 4.0 (Short Term Stable) packages from the CentOS Storage SIG repository
centos-release-gluster41.noarch : Gluster 4.1 (Long Term Stable) packages from the CentOS Storage SIG repository
centos-release-gluster5.noarch : Gluster 5 packages from the CentOS Storage SIG repository
centos-release-gluster6.noarch : Gluster 6 packages from the CentOS Storage SIG repository

  Name and summary matches only, use "search all" for everything.

这里我们使用glusterfs的6版本的源

[root@vm127 ~]# yum install -y centos-release-gluster6.noarch

4、安装glusterfs(全部glusterfs主机)

在安装glusterfs的时候直接指定源为glusterfs源,由于 源[centos-gluster6-test]的enable为0,所以在指定源的时候用–enablerepo来让源生效:

yum -y --enablerepo=centos-gluster*-test install glusterfs-server glusterfs-geo-replication glusterfs-fuse

5、设置glusterfs防火墙并启动glusterfs服务(全部glusterfs主机)

# 防火墙设置
systemctl restart firewalld.service 
firewall-cmd --add-service=glusterfs --permanent
firewall-cmd --reload

#启动glusterd
systemctl start glusterd.service
systemctl enable glusterd.service
systemctl status glusterd.service

至此,glusterfs软件已经部署完成。

三、k8s上部署Heketi

1、准备部署脚本和gfs拓补配置文件

#clone仓库(master分支,git-version:7246eb4),只使用仓库的deploy目录
git clone https://github.com/gluster/gluster-kubernetes.git

#gluster-kubernetes/deploy目录下的文件有几个地方需要修改
修改文件:
(1)gk-deploy:
-EXECUTOR="kubernetes"
+EXECUTOR="ssh"

(2)kube-templates/deploy-heketi-deployment.yaml:
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1

+  selector:
+    matchLabels:
+      glusterfs: heketi-pod
+      heketi: pod


(3)kube-templates/deploy-heketi-deployment.yaml
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1

+  selector:
+    matchLabels:
+      glusterfs: heketi-pod
+      deploy-heketi: pod

#创建并修改topology.json
cp topology.json.sample topology.json
vi topology.json

{
  "clusters": [
    {
      "nodes": [
        {
          "node": {
            "hostnames": {
              "manage": [
                "192.168.6.127"
              ],
              "storage": [
                "192.168.6.127"
              ]
            },
            "zone": 1
          },
          "devices": [
            "/dev/vdb",
            "/dev/vdc"
          ]
        },
        {
          "node": {
            "hostnames": {
              "manage": [
                "192.168.6.128"
              ],
              "storage": [
                "192.168.6.128"
              ]
            },
            "zone": 1
          },
          "devices": [
            "/dev/vdb",
            "/dev/vdc"
          ]
        },
        {
          "node": {
            "hostnames": {
              "manage": [
                "192.168.6.129"
              ],
              "storage": [
                "192.168.6.129"
              ]
            },
            "zone": 1
          },
          "devices": [
            "/dev/vdb",
            "/dev/vdc"
          ]
        },
        {
          "node": {
            "hostnames": {
              "manage": [
                "192.168.6.130"
              ],
              "storage": [
                "192.168.6.130"
              ]
            },
            "zone": 1
          },
          "devices": [
            "/dev/vdb",
            "/dev/vdc"
          ]
        }
      ]
    }
  ]
}

2、ssh免密登录glsuterfs各个node

./gk-deploy脚本的参数–admin-key指定私钥,该私钥会配置为k8s的secret资源,用于heketi免密连接glusterfs。所以要配置该私钥ssh免密登录外置的glusterfs集群。

ssh-keygen
ssh-copy-id 192.168.6.127
ssh-copy-id 192.168.6.128
ssh-copy-id 192.168.6.129
ssh-copy-id 192.168.6.130

3、所有k8s节点安装glsuterfs-fuse

yum install -y centos-release-gluster6
yum install -y glusterfs-fuse socat

4、k8s部署heketi

kubectl create ns heketi
bash gk-deploy --namespace heketi --wait 3600 --admin-key admin111111 --user-key admin111111  --ssh-keyfile ~/.ssh/id_rsa --ssh-user root --ssh-port 22 -y topology.json
#注意key不能数字开头,因为key是使用k8s的环境变量导入的,不允许数字开头

5、【补充】gk-deploy脚本部署失败处理方法

无论glusterfs是否跑在k8s上:使用./gk-deploy --abort删除所有的k8s资源,再将gluster节点的目录/var/lib/glusterd清空(如果是全新的glusterfs集群),然后systemctl restart glusterd.service重启glusterfs,最后依次删除磁盘的vg和pv,删除vg时会一并删除lv。

[root@vm128 ~]# ./gk-deploy --namespace heketi --wait 3600 --admin-key admin111111 --user-key user111111  --ssh-keyfile ~/.ssh/id_rsa --ssh-user root --ssh-port 22 -y topology.json --abort
Using Kubernetes CLI.
Using namespace "heketi".
No resources found
pod "heketi-6847cb98b6-wm7kz" deleted
service "heketi" deleted
service "heketi-storage-endpoints" deleted
deployment.apps "heketi" deleted
replicaset.apps "heketi-6847cb98b6" deleted
secret "heketi-config-secret" deleted
serviceaccount "heketi-service-account" deleted
clusterrolebinding.rbac.authorization.k8s.io "heketi-sa-view" deleted
Error from server (NotFound): services "heketi-storage-endpoints" not found
No resources found

所有的glusterfs节点:

[root@vm127 ~]# rm -rf /var/lib/glusterd/*
[root@vm127 ~]# systemctl restart glusterd.service

#卸载所有的lv
[root@vm127 ~]# 
for i in $(mount |grep '/var/lib/heketi/mounts/vg' |awk '{print $1}');do
    umount -l $i
done

#删除vg和lv
[root@vm127 ~]# vgremove -y vg_1cd98ecd67854b772cf8c9184a5a77f3 
  Logical volume "brick_2808612b0e1cdaf4be170911f37188dc" successfully removed
  Logical volume "tp_2808612b0e1cdaf4be170911f37188dc" successfully removed
  Volume group "vg_1cd98ecd67854b772cf8c9184a5a77f3" successfully removed
[root@vm127 heketi]# vgs
[root@vm127 heketi]# lvs

#删除pv
[root@vm127 heketi]# pvs
  PV         VG Fmt  Attr PSize   PFree  
  /dev/vdb      lvm2 ---  200.00g 200.00g
[root@vm127 heketi]# pvremove /dev/vdb
  Labels on physical volume "/dev/vdb" successfully wiped.
[root@vm127 heketi]# pvs

四、k8s创建StorageClass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: glusterfs
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete  #默认值为Delete
allowVolumeExpansion: true  #允许pvc动态resize
parameters:
  resturl: "http://10.96.194.52:8080"   #heketi访问地址:service/heketi的ClusterIP,不能使用service的dns域名
  #clusterid: "dae1ab512dfad0001c3911850cecbd61"  #可选
  restauthenabled: "true"
  restuser: "admin"
  secretNamespace: "heketi"
  secretName: "heketi-secret"
  #restuserkey: "adminkey"    #明文方式指定密码,不推荐
  volumetype: "none"    #其他可选值:"replicate:2"\"disperse:4:2"   参考:https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs
  
---
apiVersion: v1
kind: Secret
metadata:
  name: heketi-secret   #注意name与storageclass资源中的secretName定义一致;
  namespace: heketi    #注意namespace与storageclass资源中的secretNamespace定义一致
data:
  key: YWRtaW4xMTExMTE=  # base64 encoded password. E.g.: echo -n "admin111111" | base64
type: kubernetes.io/glusterfs  #type必须为“kubernetes.io/glusterfs”