linux:filesystem:glusterfs

Aquesta és una revisió antiga del document


glusterfs

  • require: vagrant plugin install vagrant-hosts
Vagrantfile
VAGRANTFILE_API_VERSION = "2"
 
cluster = {
  "node1" => { :ip => "10.0.0.10", :cpus => 1, :mem => 1024 },
  "node2" => { :ip => "10.0.0.20", :cpus => 1, :mem => 1024 },
  "node3" => { :ip => "10.0.0.30", :cpus => 1, :mem => 1024 }
}
 
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
 
  cluster.each_with_index do |(hostname, info), index|
 
    config.vm.define hostname do |cfg|
 
      cfg.vm.provider :virtualbox do |vb, override|
        config.vm.box = "debian/buster64"
        override.vm.network :private_network, ip: "#{info[:ip]}"
        override.vm.hostname = hostname
        vb.name = hostname
        vb.customize ["modifyvm", :id, "--memory", info[:mem], "--cpus", info[:cpus], "--hwvirtex", "on"]
 
        file_to_disk = 'data/' + hostname + '-disk.vdi'
        unless File.exist?(file_to_disk)
          vb.customize ['createhd', 
                        '--filename', file_to_disk, 
                        '--size', 5 * 1024]
        end
        vb.customize ['storageattach', :id, 
                      '--storagectl', 'SATA Controller', 
                      '--port', 1, 
                      '--device', 0, 
                      '--type', 'hdd', 
                      '--medium', file_to_disk]
 
      end # end provider
 
      config.vm.provision 'shell', path: './data/allnodes.sh'
      config.vm.provision :hosts, :sync_hosts => true
 
      node_script = './data/' + hostname + '.sh'
      if File.exists?(node_script) then
        config.vm.provision "file", source: node_script, destination: "$HOME/"
      else
        config.vm.provision "shell", inline: "echo SCRIPT not found!"
      end
 
    end # end config
 
  end # end cluster
end
allnodes.sh
[[ ! -f /etc/provision_env_disk_added_date ]] && {
 
  echo 'type=83' | sudo sfdisk /dev/sdb
  sudo mkfs.ext4 /dev/sdb1
  sudo mkdir -p /exports/sdb1
  echo "/dev/sdb1      /exports/sdb1   ext4 user,auto,defaults     0       0" | sudo tee --append /etc/fstab
  sudo date > /etc/provision_env_disk_added_date
 
}
 
mount /exports/sdb1
sudo mkdir /exports/sdb1/brick1
 
sudo apt-get -y install gnupg
wget -O - https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | sudo apt-key add -
# DEBID=$(grep 'VERSION_ID=' /etc/os-release | cut -d '=' -f 2 | tr -d '"')
# DEBVER=$(grep 'VERSION=' /etc/os-release | grep -Eo '[a-z]+')
# DEBARCH=$(dpkg --print-architecture)
# echo "deb https://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/${DEBID}/${DEBARCH}/apt ${DEBVER} main" | sudo tee /etc/apt/sources.list.d/gluster.list
echo "deb https://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/10/amd64/apt buster main" | sudo tee /etc/apt/sources.list.d/gluster.list
 
sudo apt-get -y update
 
sudo apt-get -y install glusterfs-server
sudo systemctl start glusterd
sudo systemctl enable glusterd
node1.sh
#!/bin/bash
 
sudo gluster peer probe node2
sudo gluster peer probe node3
 
sudo gluster volume create gv0 replica 3 \
  node1:/exports/sdb1/brick1 \
  node2:/exports/sdb1/brick1 \
  node3:/exports/sdb1/brick1
 
sudo gluster volume start gv0
 
sudo mount -t glusterfs node1:/gv0 /mnt
node{2,3}.sh
#! /bin/bash
 
sudo mount -t glusterfs node1:/gv0 /mnt
  • enb lugar de node1, podría ser cualquier otro de los dos nodos
  • /etc/init.d/glusterfs-server status
  • config files:/var/lib/glusterd
sudo gluster help
glusterfs --version
 
glusterfs 3.4.2 built on Jan 14 2014 18:05:35
Repository revision: git://git.gluster.com/glusterfs.git
Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
sudo gluster volume info
 
Volume Name: gv0
Type: Replicate
Volume ID: 39159af2-8a53-44f0-9379-43b86370302a
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: processing1:/exports/sdb1/brick1
Brick2: processing2:/exports/sda1/brick1
sudo gluster vol status
 
Status of volume: gv0
Gluster process						Port	Online	Pid
------------------------------------------------------------------------------
Brick processing1:/exports/sdb1/brick1			49152	Y	2130
Brick processing2:/exports/sda1/brick1			49152	Y	1822
NFS Server on localhost					2049	Y	2119
Self-heal Daemon on localhost				N/A	Y	2125
NFS Server on processing2				2049	Y	1829
Self-heal Daemon on processing2				N/A	Y	1821
 
There are no active volume tasks
gluster volume start gv0
gluster volume list
  • gluster volume log locate <VOLNAME>

    ← no funciona encara que apareix a la documentació

  • gluster volume log rotate <VOLNAME> <BRICK>
  • afegir un nou node (ja configurat):
    gluster peer probe <HOSTNAME>
  • status dels nodes:
    gluster peer status
  • linux/filesystem/glusterfs.1607181418.txt.gz
  • Darrera modificació: 05/12/2020 07:16
  • per mate