From IT Mission Linux Tips, Hacks, Tutorials, Howtos - Itmission.org

Main: Redhat-Storage

IMP - THIS IS DRAFT NOTE. CAN BE USED AS AN EXAMPLE BUT NOT AS DOCUMENT.

Red Hat Storage:

High Availability NAS in AWS

[ec2-user@ip-10-100-0-34 ~]$ ssh [email protected]
The authenticity of host '10.100.1.11 (10.100.1.11)' can't be established.
RSA key fingerprint is 22:2e:3f:43:ea:76:8e:5c:47:2c:bd:44:b3:d7:6f:ca.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.100.1.11' (RSA) to the list of known hosts.
[email protected]'s password: 
Last login: Mon Mar 31 05:48:42 2014 from 50.79.46.14
[root@ip-10-100-1-11 ~]# gluster peer probe 10.100.2.12
peer probe: success. 
[root@ip-10-100-1-11 ~]# gluster peer proble 10.100.1.13
unrecognized word: proble (position 1)
[root@ip-10-100-1-11 ~]# gluster peer probe 10.100.1.13
peer probe: success. 
[root@ip-10-100-1-11 ~]# gluster peer probe 10.100.2.14
peer probe: success. 
[root@ip-10-100-1-11 ~]# gluster peer status
Number of Peers: 3

Hostname: 10.100.2.12
Uuid: 5a5a0427-978d-46f7-9347-9666828f635b
State: Peer in Cluster (Connected)

Hostname: 10.100.1.13
Uuid: f44de833-6db5-4249-95d0-b3373e726dbd
State: Peer in Cluster (Connected)

Hostname: 10.100.2.14
Uuid: d973306c-953a-4f46-8cd3-6fbffec73b58
State: Peer in Cluster (Connected)
[root@ip-10-100-1-11 ~]#



[root@ip-10-100-1-11 ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/xvda1             99G  2.5G   96G   3% /
none                  3.6G     0  3.6G   0% /dev/shm
/dev/md127            100G   33M  100G   1% /export/brick
[root@ip-10-100-1-11 ~]# 


[root@ip-10-100-1-11 ~]# gluster volume create testvol replica 2
Usage: volume create <NEW-VOLNAME> [stripe <COUNT>] [replica <COUNT>] 
[device vg] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... [force]
[root@ip-10-100-1-11 ~]# gluster volume create testvol replica 2 10.100.1.11:
/export/brick/testvol 10.100.2.12:/export/brick/testvol 10.100.1.13:/export/brick/testvol 10.100.2.14:/export/brick/testvol
volume create: testvol: success: please start the volume to access data
[root@ip-10-100-1-11 ~]#


[root@ip-10-100-1-11 ~]# gluster volume start testvol
volume start: testvol: success
[root@ip-10-100-1-11 ~]# gluster volume info testvol

Volume Name: testvol
Type: Distributed-Replicate
Volume ID: 77daccf3-4dee-4388-93fc-f7c7d0aeb510
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.100.1.11:/export/brick/testvol
Brick2: 10.100.2.12:/export/brick/testvol
Brick3: 10.100.1.13:/export/brick/testvol
Brick4: 10.100.2.14:/export/brick/testvol
[root@ip-10-100-1-11 ~]# 


[root@ip-10-100-1-11 ~]# gluster peer status
Number of Peers: 3

Hostname: 10.100.2.12
Uuid: 5a5a0427-978d-46f7-9347-9666828f635b
State: Peer in Cluster (Connected)

Hostname: 10.100.1.13
Uuid: f44de833-6db5-4249-95d0-b3373e726dbd
State: Peer in Cluster (Connected)

Hostname: 10.100.2.14
Uuid: d973306c-953a-4f46-8cd3-6fbffec73b58
State: Peer in Cluster (Connected)
[root@ip-10-100-1-11 ~]# 
[root@ip-10-100-1-11 ~]# 




[root@ip-10-100-1-11 ~]# gluster volume info

Volume Name: testvol
Type: Distributed-Replicate
Volume ID: 77daccf3-4dee-4388-93fc-f7c7d0aeb510
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.100.1.11:/export/brick/testvol
Brick2: 10.100.2.12:/export/brick/testvol
Brick3: 10.100.1.13:/export/brick/testvol
Brick4: 10.100.2.14:/export/brick/testvol
[root@ip-10-100-1-11 ~]

[root@ip-10-100-0-34 ~]# mount -t glusterfs 10.100.1.11:/export/brick/testvol /mnt
[root@ip-10-100-0-34 ~]# cd /mnt
[root@ip-10-100-0-34 mnt]# ls
[root@ip-10-100-0-34 mnt]# mkdir test
[root@ip-10-100-0-34 mnt]# ls
test
[root@ip-10-100-0-34 mnt]# ls -ld test
drwxr-xr-x 2 root root 4096 Oct 30 23:36 test
[root@ip-10-100-0-34 mnt]# 



int@mint ~/Downloads $ ssh -i qwikLABS-L8-1077.pem [email protected]
Last login: Thu Oct 30 21:53:51 2014 from 122.172.135.201
[ec2-user@ip-10-100-0-34 ~]$ sudo -i
[root@ip-10-100-0-34 ~]# man sudo
Formatting page, please wait...
[root@ip-10-100-0-34 ~]# 


Mount the test volume (testvol) using the “Native” GlusterFS client protocol


[root@ip-10-100-0-34 ~]# yum install glusterfs
Loaded plugins: amazon-id, rhnplugin, rhui-lb, security
There was an error communicating with RHN.
RHN Satellite or RHN Classic support will be disabled.

Error Message:
	Please run rhn_register as root on this client
Error Class Code: 9
Error Class Info: Invalid System Credentials.
Explanation: 
     An error has occurred while processing your request. If this problem
     persists please enter a bug report at bugzilla.redhat.com.
     If you choose to submit the bug report, please be sure to include
     details of what you were trying to do when this error occurred and
     details on how to reproduce this problem.

rhui-REGION-client-config-server-6                       | 2.9 kB     00:00     
rhui-REGION-client-config-server-6/primary_db            | 4.0 kB     00:00     
rhui-REGION-rhel-server-releases                         | 3.7 kB     00:00     
rhui-REGION-rhel-server-releases/primary_db              |  29 MB     00:01     
rhui-REGION-rhel-server-releases-optional                | 3.5 kB     00:00     
rhui-REGION-rhel-server-releases-optional/primary_db     | 3.0 MB     00:00     
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package glusterfs.x86_64 0:3.4.0.33rhs-1.el6_4 will be updated
--> Processing Dependency: glusterfs = 3.4.0.33rhs-1.el6_4 for package: glusterfs-fuse-3.4.0.33rhs-1.el6_4.x86_64
---> Package glusterfs.x86_64 0:3.6.0.29-2.el6 will be an update
--> Processing Dependency: glusterfs-libs = 3.6.0.29-2.el6 for package: glusterfs-3.6.0.29-2.el6.x86_64
--> Processing Dependency: libssl.so.10(libssl.so.10)(64bit) for package: glusterfs-3.6.0.29-2.el6.x86_64
--> Processing Dependency: libcrypto.so.10(libcrypto.so.10)(64bit) for package: glusterfs-3.6.0.29-2.el6.x86_64
--> Processing Dependency: libgfapi.so.0()(64bit) for package: glusterfs-3.6.0.29-2.el6.x86_64
--> Running transaction check
---> Package glusterfs-api.x86_64 0:3.6.0.29-2.el6 will be installed
---> Package glusterfs-fuse.x86_64 0:3.4.0.33rhs-1.el6_4 will be updated
---> Package glusterfs-fuse.x86_64 0:3.6.0.29-2.el6 will be an update
---> Package glusterfs-libs.x86_64 0:3.4.0.33rhs-1.el6_4 will be updated
---> Package glusterfs-libs.x86_64 0:3.6.0.29-2.el6 will be an update
---> Package openssl.x86_64 0:1.0.0-27.el6_4.2 will be updated
---> Package openssl.x86_64 0:1.0.1e-30.el6_6.2 will be an update
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package        Arch   Version           Repository                        Size
================================================================================
Updating:
 glusterfs      x86_64 3.6.0.29-2.el6    rhui-REGION-rhel-server-releases 1.3 M
Installing for dependencies:
 glusterfs-api  x86_64 3.6.0.29-2.el6    rhui-REGION-rhel-server-releases  56 k
Updating for dependencies:
 glusterfs-fuse x86_64 3.6.0.29-2.el6    rhui-REGION-rhel-server-releases  78 k
 glusterfs-libs x86_64 3.6.0.29-2.el6    rhui-REGION-rhel-server-releases 263 k
 openssl        x86_64 1.0.1e-30.el6_6.2 rhui-REGION-rhel-server-releases 1.5 M

Transaction Summary
================================================================================
Install       1 Package(s)
Upgrade       4 Package(s)

Total download size: 3.2 M
Is this ok [y/N]: y
Downloading Packages:
(1/5): glusterfs-3.6.0.29-2.el6.x86_64.rpm               | 1.3 MB     00:00     
(2/5): glusterfs-api-3.6.0.29-2.el6.x86_64.rpm           |  56 kB     00:00     
(3/5): glusterfs-fuse-3.6.0.29-2.el6.x86_64.rpm          |  78 kB     00:00     
(4/5): glusterfs-libs-3.6.0.29-2.el6.x86_64.rpm          | 263 kB     00:00     
(5/5): openssl-1.0.1e-30.el6_6.2.x86_64.rpm              | 1.5 MB     00:00     
--------------------------------------------------------------------------------
Total                                           5.4 MB/s | 3.2 MB     00:00     
Running rpm_check_debug
Running Transaction Test
Transaction Test Succeeded
Running Transaction
  Updating   : openssl-1.0.1e-30.el6_6.2.x86_64                             1/9 
  Updating   : glusterfs-libs-3.6.0.29-2.el6.x86_64                         2/9 
  Updating   : glusterfs-3.6.0.29-2.el6.x86_64                              3/9 
  Installing : glusterfs-api-3.6.0.29-2.el6.x86_64                          4/9 
  Updating   : glusterfs-fuse-3.6.0.29-2.el6.x86_64                         5/9 
  Cleanup    : glusterfs-fuse-3.4.0.33rhs-1.el6_4.x86_64                    6/9 
  Cleanup    : glusterfs-3.4.0.33rhs-1.el6_4.x86_64                         7/9 
  Cleanup    : glusterfs-libs-3.4.0.33rhs-1.el6_4.x86_64                    8/9 
  Cleanup    : openssl-1.0.0-27.el6_4.2.x86_64                              9/9 
  Verifying  : openssl-1.0.1e-30.el6_6.2.x86_64                             1/9 
  Verifying  : glusterfs-3.6.0.29-2.el6.x86_64                              2/9 
  Verifying  : glusterfs-api-3.6.0.29-2.el6.x86_64                          3/9 
  Verifying  : glusterfs-fuse-3.6.0.29-2.el6.x86_64                         4/9 
  Verifying  : glusterfs-libs-3.6.0.29-2.el6.x86_64                         5/9 
  Verifying  : glusterfs-3.4.0.33rhs-1.el6_4.x86_64                         6/9 
  Verifying  : glusterfs-fuse-3.4.0.33rhs-1.el6_4.x86_64                    7/9 
  Verifying  : glusterfs-libs-3.4.0.33rhs-1.el6_4.x86_64                    8/9 
  Verifying  : openssl-1.0.0-27.el6_4.2.x86_64                              9/9 

Dependency Installed:
  glusterfs-api.x86_64 0:3.6.0.29-2.el6                                         

Updated:
  glusterfs.x86_64 0:3.6.0.29-2.el6                                             

Dependency Updated:
  glusterfs-fuse.x86_64 0:3.6.0.29-2.el6 glusterfs-libs.x86_64 0:3.6.0.29-2.el6
  openssl.x86_64 0:1.0.1e-30.el6_6.2    

Complete!
[root@ip-10-100-0-34 ~]#




[root@ip-10-100-0-34 ~]# mount -t glusterfs 10.100.1.11:/export/brick/testvol /mnt
[root@ip-10-100-0-34 ~]# cd /mnt
[root@ip-10-100-0-34 mnt]# ls
[root@ip-10-100-0-34 mnt]# mkdir test
[root@ip-10-100-0-34 mnt]# ls
test
[root@ip-10-100-0-34 mnt]# ls -ld test
drwxr-xr-x 2 root root 4096 Oct 30 23:36 test

[root@ip-10-100-0-34 mnt]# chmod 777 test
[root@ip-10-100-0-34 mnt]# cd test
[root@ip-10-100-0-34 test]# for i in `seq 1 100`
> do 
> echo hello$i > file$i
> done
[root@ip-10-100-0-34 test]# ls
file1    file18  file27  file36  file45  file54  file63  file72  file81  file90
file10   file19  file28  file37  file46  file55  file64  file73  file82  file91
file100  file2   file29  file38  file47  file56  file65  file74  file83  file92
file11   file20  file3   file39  file48  file57  file66  file75  file84  file93
file12   file21  file30  file4   file49  file58  file67  file76  file85  file94
file13   file22  file31  file40  file5   file59  file68  file77  file86  file95
file14   file23  file32  file41  file50  file6   file69  file78  file87  file96
file15   file24  file33  file42  file51  file60  file7   file79  file88  file97
file16   file25  file34  file43  file52  file61  file70  file8   file89  file98
file17   file26  file35  file44  file53  file62  file71  file80  file9   file99
[root@ip-10-100-0-34 test]#



[root@ip-10-100-0-34 test]# mount -t glusterfs 10.100.1.11:/testvol /mnt[root@ip-10-100-0-34 test]# cd /mnt
[root@ip-10-100-0-34 mnt]# ls
[root@ip-10-100-0-34 mnt]# mkdir tst
[root@ip-10-100-0-34 mnt]# mkdir test
[root@ip-10-100-0-34 mnt]# chmod 777 test
[root@ip-10-100-0-34 mnt]# cd test
[root@ip-10-100-0-34 test]# for i in `seq 1 100`
> do 
> echo hello$i > file$i
> done
[root@ip-10-100-0-34 test]# ls -C
file1    file18  file27  file36  file45  file54  file63  file72  file81  file90
file10   file19  file28  file37  file46  file55  file64  file73  file82  file91
file100  file2   file29  file38  file47  file56  file65  file74  file83  file92
file11   file20  file3   file39  file48  file57  file66  file75  file84  file93
file12   file21  file30  file4   file49  file58  file67  file76  file85  file94
file13   file22  file31  file40  file5   file59  file68  file77  file86  file95
file14   file23  file32  file41  file50  file6   file69  file78  file87  file96
file15   file24  file33  file42  file51  file60  file7   file79  file88  file97
file16   file25  file34  file43  file52  file61  file70  file8   file89  file98
file17   file26  file35  file44  file53  file62  file71  file80  file9   file99
[root@ip-10-100-0-34 test]# 



[root@ip-10-100-2-12 ~]# date
Fri Oct 31 03:54:54 UTC 2014
[root@ip-10-100-2-12 ~]# ls -l
total 8
drwxr-xr-x 2 root root 4096 Oct 31 01:29 scripts
drwxr-xr-x 2 root root 4096 Feb 27  2014 tools
[root@ip-10-100-2-12 ~]# cat /etc/hosts
127.0.0.1		localhost.localdomain localhost
::1		localhost6.localdomain6 localhost6
10.204.106.196   ip-10-204-106-196
10.181.145.85   ip-10-181-145-85
10.185.77.229   ip-10-185-77-229
10.215.90.239   domU-12-31-39-0C-59-01
10.100.2.12   ip-10-100-2-12
[root@ip-10-100-2-12 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=localhost.localdomain
[root@ip-10-100-2-12 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
DEVICE=eth0
BOOTPROTO=dhcp
ONBOOT=on
[root@ip-10-100-2-12 ~]# cat /etc/sysconfig/network-scripts/ifcfg-
ifcfg-eth0  ifcfg-lo    
[root@ip-10-100-2-12 ~]# cat /etc/sysconfig/network-scripts/ifcfg-lo 
DEVICE=lo
IPADDR=127.0.0.1
NETMASK=255.0.0.0
NETWORK=127.0.0.0
# If you're having problems with gated making 127.0.0.0/8 a martian,
# you can change this to something else (255.255.255.255, for example)
BROADCAST=127.255.255.255
ONBOOT=yes
NAME=loopback


mint@mint ~/Downloads $ ssh -i qwikLABS-L8-1077.pem [email protected]
Last login: Fri Oct 31 01:59:27 2014 from 122.172.135.201

       __|  __|_  )
       _|  (     /   Amazon Linux AMI
      ___|\___|___|

https://aws.amazon.com/amazon-linux-ami/2014.09-release-notes/
[ec2-user@ip-10-100-0-216 ~]$


[root@ip-10-100-0-216 ~]# mount -o mountproto=tcp -t nfs 10.100.1.11:/testvol /mnt
[root@ip-10-100-0-216 ~]# mount
proc on /proc type proc (rw,relatime)
sysfs on /sys type sysfs (rw,relatime)
devtmpfs on /dev type devtmpfs (rw,relatime,size=836420k,nr_inodes=209105,mode=755)
devpts on /dev/pts type devpts (rw,relatime,gid=5,mode=620,ptmxmode=000)
tmpfs on /dev/shm type tmpfs (rw,relatime)
/dev/xvda1 on / type ext4 (rw,noatime,data=ordered)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,relatime)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
10.100.1.11:/testvol on /mnt type nfs (rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,
retrans=2,sec=sys,mountaddr=10.100.1.11,mountvers=3,mountport=38465,mountproto=tcp,local_lock=none,addr=10.100.1.11)
[root@ip-10-100-0-216 ~]#
[root@ip-10-100-2-12 ~]# 



root@ip-10-100-1-11 samba]# vi smb.conf 
[root@ip-10-100-1-11 samba]# tail -n 10 smb.conf 

[gluster-testvol]
comment = For samba share of volume testvol
vfs objects = glusterfs
glusterfs:volume = testvol
glusterfs:logfile = /var/log/samba/glusterfs-testvol.%M.log
glusterfs:loglevel = 7
path = /
read only = no
guest ok = yes
[root@ip-10-100-1-11 samba]# useradd samba-user
[root@ip-10-100-1-11 samba]# sbmpasswd -a samba-user
-bash: sbmpasswd: command not found
[root@ip-10-100-1-11 samba]# smbpasswd -a samba-user
New SMB password:
Retype new SMB password:
Added user samba-user.
[root@ip-10-100-1-11 samba]# /sbin/chkconfig smb on
[root@ip-10-100-1-11 samba]# /etc/init.d/smb start
Starting SMB services:                                     [  OK  ]
[root@ip-10-100-1-11 samba]#


[root@ip-10-100-1-11 samba]# mkdir /mnt/gluster-object
[root@ip-10-100-1-11 samba]# mount -t glusterfs localhost:/testvol /mnt/gluster-objject
ERROR: Mount point does not exist.
Usage:  mount.glusterfs <volumeserver>:<volumeid/volumeport> -o <options> <mountpoint>
Options:
man 8 mount.glusterfs

To display the version number of the mount helper:
mount.glusterfs --version
[root@ip-10-100-1-11 samba]# mount -t glusterfs localhost:/testvol /mnt/gluster-object
[root@ip-10-100-1-11 samba]#



[root@ip-10-100-1-11 samba]# cp /etc/swift/*-gluster /etc/swift/*.conf
cp: target `/etc/swift/*.conf' is not a directory
[root@ip-10-100-1-11 samba]# cp /etc/swift/
account-server.conf-gluster    object-server.conf-gluster
container-server.conf-gluster  proxy-server.conf-gluster
fs.conf-gluster                swift.conf-gluster
[root@ip-10-100-1-11 samba]# cp /etc/swift/*-gluster /etc/swift/*.conf
cp: target `/etc/swift/*.conf' is not a directory
[root@ip-10-100-1-11 samba]# cp /etc/swift/*-gluster /etc/swift/*.conf^C
[root@ip-10-100-1-11 samba]# cd /etc/
[root@ip-10-100-1-11 etc]# cd swift/
[root@ip-10-100-1-11 swift]# ls
account-server.conf-gluster    object-server.conf-gluster
container-server.conf-gluster  proxy-server.conf-gluster
fs.conf-gluster                swift.conf-gluster
[root@ip-10-100-1-11 swift]# cp account-server.conf-gluster account-server.conf
[root@ip-10-100-1-11 swift]# cp object-server.conf-gluster object-server.conf
[root@ip-10-100-1-11 swift]# cp container-server.conf-gluster container-server.conf
[root@ip-10-100-1-11 swift]# cp proxy-server.conf-gluster proxy-server.conf
[root@ip-10-100-1-11 swift]# cp fs.conf-gluster fs.conf
[root@ip-10-100-1-11 swift]# cp swift.conf-gluster swift.conf
[root@ip-10-100-1-11 swift]# ls -l *.conf
-rw-r--r-- 1 root root 1157 Oct 31 04:39 account-server.conf
-rw-r--r-- 1 root root 1167 Oct 31 04:39 container-server.conf
-rw-r--r-- 1 root root  552 Oct 31 04:40 fs.conf
-rw-r--r-- 1 root root 2038 Oct 31 04:39 object-server.conf
-rw-r--r-- 1 root root 2588 Oct 31 04:40 proxy-server.conf
-rw-r--r-- 1 root root 2545 Oct 31 04:40 swift.conf
[root@ip-10-100-1-11 swift]# vi proxy-server.conf
[root@ip-10-100-1-11 swift]# vi proxy-server.conf
[root@ip-10-100-1-11 swift]# cd /etc/swift/
[root@ip-10-100-1-11 swift]# ls
account-server.conf            object-server.conf
account-server.conf-gluster    object-server.conf-gluster
container-server.conf          proxy-server.conf
container-server.conf-gluster  proxy-server.conf-gluster
fs.conf                        swift.conf
fs.conf-gluster                swift.conf-gluster
[root@ip-10-100-1-11 swift]# gluster-swift-gen-builders testvol
Ring files are prepared in /etc/swift. Please restart object store services
[root@ip-10-100-1-11 swift]# 



[root@ip-10-100-1-11 swift]# vi proxy-server.conf
[root@ip-10-100-1-11 swift]# swift-init main start
Starting proxy-server...(/etc/swift/proxy-server.conf)
container-server running (8592 - /etc/swift/container-server.conf)
container-server already started...
account-server running (8593 - /etc/swift/account-server.conf)
account-server already started...
object-server running (8594 - /etc/swift/object-server.conf)
object-server already started...
[root@ip-10-100-1-11 swift]# tail proxy-server.conf

[filter:cache]
use = egg:swift#memcache
# Update this line to contain a comma separated list of memcache servers
# shared by all nodes running the proxy-server service.
memcache_servers = localhost:11211

[filter:tempauth]
use = egg:swift#tempauth
user_testvol_veda = password .admin
[root@ip-10-100-1-11 swift]#

[root@ip-10-100-1-11 swift]# curl -v -H 'X-Storage-User: testvol:veda' -H 'X-Storage-Pass:password' http://10.100.1.11:8080/auth/v1.0 -k
* About to connect() to 10.100.1.11 port 8080 (#0)
*   Trying 10.100.1.11... connected
* Connected to 10.100.1.11 (10.100.1.11) port 8080 (#0)
> GET /auth/v1.0 HTTP/1.1
> User-Agent: curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.14.3.0 zlib/1.2.3 libidn/1.18 libssh2/1.4.2
> Host: 10.100.1.11:8080
> Accept: */*
> X-Storage-User: testvol:veda
> X-Storage-Pass:password
> 
< HTTP/1.1 200 OK
< X-Storage-Url: http://10.100.1.11:8080/v1/AUTH_testvol
< X-Auth-Token: AUTH_tk56f43a38cc224169ab5ebb4d77f9dbb6
< Content-Type: text/html; charset=UTF-8
< X-Storage-Token: AUTH_tk56f43a38cc224169ab5ebb4d77f9dbb6
< Content-Length: 0
< X-Trans-Id: txb274343b7f744880bd86b-00545315aa
< Date: Fri, 31 Oct 2014 04:52:58 GMT
< 
* Connection #0 to host 10.100.1.11 left intact
* Closing connection #0
You have new mail in /var/spool/mail/root
[root@ip-10-100-1-11 swift]# 



[root@ip-10-100-1-11 swift]# cd 
[root@ip-10-100-1-11 ~]# service glusterd stop
Stopping glusterd:                                         [  OK  ]
[root@ip-10-100-1-11 ~]# pkill glusterfs
[root@ip-10-100-1-11 ~]# pkill glusterfd
[root@ip-10-100-1-11 ~]# gluster volume info
Connection failed. Please check if gluster daemon is operational.
[root@ip-10-100-1-11 ~]# gluster peer status
Connection failed. Please check if gluster daemon is operational.
You have new mail in /var/spool/mail/root
[root@ip-10-100-1-11 ~]# 


[root@ip-10-100-1-11 export]# service glusterd start
Starting glusterd:                                         [  OK  ]
[root@ip-10-100-1-11 export]# gluster volume info

Volume Name: testvol
Type: Distributed-Replicate
Volume ID: 77daccf3-4dee-4388-93fc-f7c7d0aeb510
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.100.1.11:/export/brick/testvol
Brick2: 10.100.2.12:/export/brick/testvol
Brick3: 10.100.1.13:/export/brick/testvol
Brick4: 10.100.2.14:/export/brick/testvol
[root@ip-10-100-1-11 export]# 


[root@ip-10-100-1-11 export]# gluster peer status
Number of Peers: 3

Hostname: 10.100.1.13
Uuid: f44de833-6db5-4249-95d0-b3373e726dbd
State: Peer in Cluster (Connected)

Hostname: 10.100.2.14
Uuid: d973306c-953a-4f46-8cd3-6fbffec73b58
State: Peer in Cluster (Connected)

Hostname: 10.100.2.12
Uuid: 5a5a0427-978d-46f7-9347-9666828f635b
State: Peer in Cluster (Connected)
[root@ip-10-100-1-11 export]# 



[root@ip-10-100-1-11 export]# gluster volume heal testvol info
Brick ip-10-100-1-11:/export/brick/testvol/
Number of entries: 0

Brick ip-10-100-2-12:/export/brick/testvol/
Number of entries: 0

Brick ip-10-100-1-13:/export/brick/testvol/
Number of entries: 0

Brick ip-10-100-2-14:/export/brick/testvol/
Number of entries: 0

You have new mail in /var/spool/mail/root
[root@ip-10-100-1-11 export]# 


root@ip-10-100-1-15 ~]# gluster peer probe 10.100.2.16
peer probe: success. 
[root@ip-10-100-1-15 ~]# gluster peer status
Number of Peers: 1

Hostname: 10.100.2.16
Uuid: 863e6f17-00ea-4621-903e-e920596a74a8
State: Peer in Cluster (Connected)
[root@ip-10-100-1-15 ~]#

[root@ip-10-100-1-15 ~]# gluster volume create drvol 10.100.1.15:/export/brick/drvol 10.100.2.16:/export/brick/drvol
volume create: drvol: success: please start the volume to access data
[root@ip-10-100-1-15 ~]# gluster volume start drvol
volume start: drvol: success
[root@ip-10-100-1-15 ~]# gluster volume info drvol

Volume Name: drvol
Type: Distribute
Volume ID: 76cbb2ca-207c-4fed-840a-6a76a3f21600
Status: Started
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: 10.100.1.15:/export/brick/drvol
Brick2: 10.100.2.16:/export/brick/drvol
[root@ip-10-100-1-15 ~]# 



Last login: Fri Oct 31 03:54:21 2014 from 10.100.0.34
[root@ip-10-100-2-12 ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
dc:00:b3:b6:92:31:39:e1:81:4d:92:55:ac:92:d1:13 root@ip-10-100-2-12
The key's randomart image is:
+--[ RSA 2048]----+
|  .BE++          |
|  +o++.+         |
|   o*oo .        |
|  o .* o o       |
|   .o . S .      |
|     .           |
|                 |
|                 |
|                 |
+-----------------+
[root@ip-10-100-2-12 ~]# scp /root/.ssh/id_rsa.pub 10.100.1.15:/root/ssh/authorized_keys
The authenticity of host '10.100.1.15 (10.100.1.15)' can't be established.
RSA key fingerprint is 22:2e:3f:43:ea:76:8e:5c:47:2c:bd:44:b3:d7:6f:ca.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.100.1.15' (RSA) to the list of known hosts.
[email protected]'s password: 
scp: /root/ssh/authorized_keys: No such file or directory
[root@ip-10-100-2-12 ~]# scp /root/.ssh/id_rsa.pub 10.100.1.15:/root/.ssh/authorized_keys
[email protected]'s password: 
id_rsa.pub                                    100%  401     0.4KB/s   00:00    
[root@ip-10-100-2-12 ~]# ssh 10.100.1.15
Last login: Fri Oct 31 05:04:42 2014 from 10.100.1.11
[root@ip-10-100-1-15 ~]#


[root@ip-10-100-2-12 ~]# gluster system:: execute gsec_create
Common secret pub file present at /var/lib/glusterd/geo-replication/common_secret.pem.pub
[root@ip-10-100-2-12 ~]# gluster volume geo-replication testvol 10.100.1.15::drvol create push-pem
Creating geo-replication session between testvol & 10.100.1.15::drvol has been successful
[root@ip-10-100-2-12 ~]# gluster volume geo-replication testvol 10.100.1.15::drvol status

MASTER NODE       MASTER VOL    MASTER BRICK             SLAVE                 STATUS         CHECKPOINT STATUS    CRAWL STATUS        
--------------------------------------------------------------------------------------------------------------------------------
ip-10-100-2-12    testvol       /export/brick/testvol    10.100.1.15::drvol    Not Started    N/A                  N/A                 
ip-10-100-1-13    testvol       /export/brick/testvol    10.100.1.15::drvol    Not Started    N/A                  N/A                 
ip-10-100-2-14    testvol       /export/brick/testvol    10.100.1.15::drvol    Not Started    N/A                  N/A                 
ip-10-100-1-11    testvol       /export/brick/testvol    10.100.1.15::drvol    Not Started    N/A                  N/A                 
[root@ip-10-100-2-12 ~]# 



[root@ip-10-100-2-12 ~]# gluster volume geo-replication testvol 10.100.1.15::drvol start
Starting geo-replication session between testvol & 10.100.1.15::drvol has been successful
[root@ip-10-100-2-12 ~]# gluster volume geo-replication testvol 10.100.1.15::drvol Status
Usage: volume geo-replication [<VOLNAME>] [<SLAVE-URL>] {create [push-pem] [force]|start [force]|stop [force]|config|status [detail]|delete} [options...]
[root@ip-10-100-2-12 ~]# gluster volume geo-replication testvol 10.100.1.15::drvol status

MASTER NODE       MASTER VOL    MASTER BRICK             SLAVE                 STATUS             CHECKPOINT STATUS    CRAWL STATUS        
------------------------------------------------------------------------------------------------------------------------------------
ip-10-100-2-12    testvol       /export/brick/testvol    10.100.1.15::drvol    Initializing...    N/A                  N/A                 
ip-10-100-1-13    testvol       /export/brick/testvol    10.100.1.15::drvol    Initializing...    N/A                  N/A                 
ip-10-100-1-11    testvol       /export/brick/testvol    10.100.2.16::drvol    Initializing...    N/A                  N/A                 
ip-10-100-2-14    testvol       /export/brick/testvol    10.100.2.16::drvol    Initializing...    N/A                  N/A                 
[root@ip-10-100-2-12 ~]# 

[root@ip-10-100-0-34 test]# cd 
[root@ip-10-100-0-34 ~]# mount -t glusterfs 10.100.1.15:/drvol /media
[root@ip-10-100-0-34 ~]# cd /media/
[root@ip-10-100-0-34 media]# ls
test  tst
[root@ip-10-100-0-34 media]# cd test/
[root@ip-10-100-0-34 test]# ls
file1    file18  file27  file36  file45  file54  file63  file72  file81  file90
file10   file19  file28  file37  file46  file55  file64  file73  file82  file91
file100  file2   file29  file38  file47  file56  file65  file74  file83  file92
file11   file20  file3   file39  file48  file57  file66  file75  file84  file93
file12   file21  file30  file4   file49  file58  file67  file76  file85  file94
file13   file22  file31  file40  file5   file59  file68  file77  file86  file95
file14   file23  file32  file41  file50  file6   file69  file78  file87  file96
file15   file24  file33  file42  file51  file60  file7   file79  file88  file97
file16   file25  file34  file43  file52  file61  file70  file8   file89  file98
file17   file26  file35  file44  file53  file62  file71  file80  file9   file99
[root@ip-10-100-0-34 test]# 
Retrieved from http://www.itmission.org/Main/Redhat-Storage
Page last modified on October 31, 2014, at 05:22 AM