Home | 簡體中文 | 繁體中文 | 雜文 | 打賞(Donations) | Github | OSChina 博客 | 雲社區 | 雲棲社區 | Facebook | Linkedin | 知乎專欄 | 視頻教程 | About

6.6. Ceph

http://ceph.com/

6.6.1. Installation on Ubuntu

$ apt-cache search ceph
ceph - distributed storage
ceph-common - common utilities to mount and interact with a ceph filesystem
ceph-common-dbg - debugging symbols for ceph-common
ceph-dbg - debugging symbols for ceph
ceph-fs-common - common utilities to mount and interact with a ceph filesystem
ceph-fs-common-dbg - debugging symbols for ceph-fs-common
ceph-mds-dbg - debugging symbols for ceph
gceph - Graphical ceph cluster status utility
gceph-dbg - debugging symbols for gceph
libcephfs-dev - Ceph distributed file system client library (development files)
libcephfs1 - Ceph distributed file system client library
libcephfs1-dbg - debugging symbols for libcephfs1
librados-dev - RADOS distributed object store client library (development files)
librados2 - RADOS distributed object store client library
librados2-dbg - debugging symbols for librados2
librbd-dev - RADOS block device client library (development files)
librbd1 - RADOS block device client library
librbd1-dbg - debugging symbols for librbd1
ceph-mds - distributed filesystem service
ceph-resource-agents - OCF-compliant resource agents for Ceph
obsync - synchronize data between cloud object storage providers or a local directory
python-ceph - Python libraries for the Ceph distributed filesystem

$ sudo apt-get install ceph
$ sudo apt-get install ceph-mds
		

創建配置檔案 /etc/ceph/ceph.conf

		
$ vim /etc/ceph/ceph.conf
[global]

	# For version 0.55 and beyond, you must explicitly enable
	# or disable authentication with "auth" entries in [global].

	auth cluster required = cephx
	auth service required = cephx
	auth client required = cephx

[osd]
	osd journal size = 1000

	#The following assumes ext4 filesystem.
	filestore xattr use omap = true


	# For Bobtail (v 0.56) and subsequent versions, you may
	# add settings for mkcephfs so that it will create and mount
	# the file system on a particular OSD for you. Remove the comment `#`
	# character for the following settings and replace the values
	# in braces with appropriate values, or leave the following settings
	# commented out to accept the default values. You must specify the
	# --mkfs option with mkcephfs in order for the deployment script to
	# utilize the following settings, and you must define the 'devs'
	# option for each osd instance; see below.

	#osd mkfs type = {fs-type}
	#osd mkfs options {fs-type} = {mkfs options}   # default for xfs is "-f"
	#osd mount options {fs-type} = {mount options} # default mount option is "rw,noatime"

	# For example, for ext4, the mount option might look like this:

	#osd mkfs options ext4 = user_xattr,rw,noatime

	# Execute $ hostname to retrieve the name of your host,
	# and replace ubuntu with the name of your host.
	# For the monitor, replace 192.168.6.2 with the IP
	# address of your host.

[mon.a]

	host = ubuntu
	mon addr = 192.168.6.2:6789

[osd.0]
	host = ubuntu

	# For Bobtail (v 0.56) and subsequent versions, you may
	# add settings for mkcephfs so that it will create and mount
	# the file system on a particular OSD for you. Remove the comment `#`
	# character for the following setting for each OSD and specify
	# a path to the device if you use mkcephfs with the --mkfs option.

	#devs = {path-to-device}

[osd.1]
	host = ubuntu
	#devs = {path-to-device}

[mds.a]
	host = ubuntu
		
		

創建目錄

sudo mkdir -p /var/lib/ceph/osd/ceph-0
sudo mkdir -p /var/lib/ceph/osd/ceph-1
sudo mkdir -p /var/lib/ceph/mon/ceph-a
sudo mkdir -p /var/lib/ceph/mds/ceph-a
		

創建key檔案

$ cd /etc/ceph
$ sudo mkcephfs -a -c /etc/ceph/ceph.conf -k ceph.keyring
		

創建key檔案過程如下

$ sudo mkcephfs -a -c /etc/ceph/ceph.conf -k ceph.keyring
temp dir is /tmp/mkcephfs.4rUAn1MJYV
preparing monmap in /tmp/mkcephfs.4rUAn1MJYV/monmap
/usr/bin/monmaptool --create --clobber --add a 192.168.6.2:6789 --print /tmp/mkcephfs.4rUAn1MJYV/monmap
/usr/bin/monmaptool: monmap file /tmp/mkcephfs.4rUAn1MJYV/monmap
/usr/bin/monmaptool: generated fsid a5afe011-bfde-4784-8d3d-e488418897d6
epoch 0
fsid a5afe011-bfde-4784-8d3d-e488418897d6
last_changed 2013-04-10 18:05:46.409761
created 2013-04-10 18:05:46.409761
0: 192.168.6.2:6789/0 mon.a
/usr/bin/monmaptool: writing epoch 0 to /tmp/mkcephfs.4rUAn1MJYV/monmap (1 monitors)
=== osd.0 ===
2013-04-10 18:05:46.899898 7f8b26ec8780 -1 filestore(/var/lib/ceph/osd/ceph-0) limited size xattrs -- filestore_xattr_use_omap enabled
2013-04-10 18:05:47.303918 7f8b26ec8780 -1 filestore(/var/lib/ceph/osd/ceph-0) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2013-04-10 18:05:47.658550 7f8b26ec8780 -1 created object store /var/lib/ceph/osd/ceph-0 journal /var/lib/ceph/osd/ceph-0/journal for osd.0 fsid a5afe011-bfde-4784-8d3d-e488418897d6
2013-04-10 18:05:47.659360 7f8b26ec8780 -1 auth: error reading file: /var/lib/ceph/osd/ceph-0/keyring: can't open /var/lib/ceph/osd/ceph-0/keyring: (2) No such file or directory
2013-04-10 18:05:47.659489 7f8b26ec8780 -1 created new key in keyring /var/lib/ceph/osd/ceph-0/keyring
=== osd.1 ===
2013-04-10 18:05:48.039253 7f27289be780 -1 filestore(/var/lib/ceph/osd/ceph-1) limited size xattrs -- filestore_xattr_use_omap enabled
2013-04-10 18:05:48.338222 7f27289be780 -1 filestore(/var/lib/ceph/osd/ceph-1) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2013-04-10 18:05:48.734861 7f27289be780 -1 created object store /var/lib/ceph/osd/ceph-1 journal /var/lib/ceph/osd/ceph-1/journal for osd.1 fsid a5afe011-bfde-4784-8d3d-e488418897d6
2013-04-10 18:05:48.734992 7f27289be780 -1 auth: error reading file: /var/lib/ceph/osd/ceph-1/keyring: can't open /var/lib/ceph/osd/ceph-1/keyring: (2) No such file or directory
2013-04-10 18:05:48.735294 7f27289be780 -1 created new key in keyring /var/lib/ceph/osd/ceph-1/keyring
=== mds.a ===
creating private key for mds.a keyring /var/lib/ceph/mds/ceph-a/keyring
creating /var/lib/ceph/mds/ceph-a/keyring
Building generic osdmap from /tmp/mkcephfs.4rUAn1MJYV/conf
/usr/bin/osdmaptool: osdmap file '/tmp/mkcephfs.4rUAn1MJYV/osdmap'
/usr/bin/osdmaptool: writing epoch 1 to /tmp/mkcephfs.4rUAn1MJYV/osdmap
Generating admin key at /tmp/mkcephfs.4rUAn1MJYV/keyring.admin
creating /tmp/mkcephfs.4rUAn1MJYV/keyring.admin
Building initial monitor keyring
added entity mds.a auth auth(auid = 18446744073709551615 key=AQB8OWVR0JMKMhAAZNnl4D2JkWIppS7gkdYkhw== with 0 caps)
added entity osd.0 auth auth(auid = 18446744073709551615 key=AQB7OWVRIFdNJxAAHjgfc+J1uVTMj4uVLtTSaQ== with 0 caps)
added entity osd.1 auth auth(auid = 18446744073709551615 key=AQB8OWVROCLPKxAAJ/Jim86K7Ip1PGnCw3Fb/g== with 0 caps)
=== mon.a ===
/usr/bin/ceph-mon: created monfs at /var/lib/ceph/mon/ceph-a for mon.a
placing client.admin keyring in ceph.keyring

$ ls
ceph.conf  ceph.keyring
		

啟動ceph

$ sudo service ceph -a start
$ sudo ceph health
		

啟動過程如下

$ sudo service ceph -a start
=== mon.a ===
Starting Ceph mon.a on ubuntu...
starting mon.a rank 0 at 192.168.6.2:6789/0 mon_data /var/lib/ceph/mon/ceph-a fsid a5afe011-bfde-4784-8d3d-e488418897d6
=== mds.a ===
Starting Ceph mds.a on ubuntu...
starting mds.a at :/0
=== osd.0 ===
Starting Ceph osd.0 on ubuntu...
starting osd.0 at :/0 osd_data /var/lib/ceph/osd/ceph-0 /var/lib/ceph/osd/ceph-0/journal
=== osd.1 ===
Starting Ceph osd.1 on ubuntu...
starting osd.1 at :/0 osd_data /var/lib/ceph/osd/ceph-1 /var/lib/ceph/osd/ceph-1/journal

$ sudo ceph health
HEALTH_OK
		

$ sudo mkdir /mnt/ceph
$ sudo mount -t ceph 192.168.6.2:6789:/ /mnt/ceph
		

查看檔案系統的掛在情況

$ df -T
Filesystem              Type     1K-blocks     Used Available Use% Mounted on
/dev/mapper/ubuntu-root ext4      49263424  8860876  37900100  19% /
udev                    devtmpfs   2014956        4   2014952   1% /dev
tmpfs                   tmpfs       809808     1612    808196   1% /run
none                    tmpfs         5120        0      5120   0% /run/lock
none                    tmpfs      2024516        0   2024516   0% /run/shm
none                    tmpfs       102400        0    102400   0% /run/user
/dev/vda1               ext2        233191    80600    140150  37% /boot
192.168.6.2:6789:/      ceph      98526208 22726656  75799552  24% /mnt/ceph
		

嘗試創建一個檔案

$ sudo touch /mnt/ceph/hello
		

6.6.2. Installation on CentOS

CentOS 6.4

6.6.2.1. mon

rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
rpm -Uvh http://ceph.com/rpm-bobtail/el6/x86_64/ceph-release-1-0.el6.noarch.rpm
yum install ceph
			

配置檔案,可以參考/usr/share/doc/ceph/sample.ceph.conf,或者複製後修改

			
[global]

	# For version 0.55 and beyond, you must explicitly enable
	# or disable authentication with "auth" entries in [global].

	auth cluster required = cephx
	auth service required = cephx
	auth client required = cephx

[osd]
	osd journal size = 1000

	#The following assumes ext4 filesystem.
	filestore xattr use omap = true


	# For Bobtail (v 0.56) and subsequent versions, you may
	# add settings for mkcephfs so that it will create and mount
	# the file system on a particular OSD for you. Remove the comment `#`
	# character for the following settings and replace the values
	# in braces with appropriate values, or leave the following settings
	# commented out to accept the default values. You must specify the
	# --mkfs option with mkcephfs in order for the deployment script to
	# utilize the following settings, and you must define the 'devs'
	# option for each osd instance; see below.

	#osd mkfs type = {fs-type}
	#osd mkfs options {fs-type} = {mkfs options}   # default for xfs is "-f"
	#osd mount options {fs-type} = {mount options} # default mount option is "rw,noatime"

	# For example, for ext4, the mount option might look like this:

	#osd mkfs options ext4 = user_xattr,rw,noatime

	# Execute $ hostname to retrieve the name of your host,
	# and replace {hostname} with the name of your host.
	# For the monitor, replace {ip-address} with the IP
	# address of your host.

[mon.a]

	host = {hostname}
	mon addr = {ip-address}:6789

[osd.0]
	host = {hostname}

	# For Bobtail (v 0.56) and subsequent versions, you may
	# add settings for mkcephfs so that it will create and mount
	# the file system on a particular OSD for you. Remove the comment `#`
	# character for the following setting for each OSD and specify
	# a path to the device if you use mkcephfs with the --mkfs option.

	#devs = {path-to-device}

[osd.1]
	host = {hostname}
	#devs = {path-to-device}

[mds.a]
	host = {hostname}
			
			
# mkcephfs -a -c /etc/ceph/ceph.conf -k ceph.keyring
			

6.6.2.2. mds

rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
rpm -Uvh http://ceph.com/rpm-bobtail/el6/x86_64/ceph-release-1-0.el6.noarch.rpm
yum install ceph
			

6.6.2.3. osd

rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
rpm -Uvh http://ceph.com/rpm-bobtail/el6/x86_64/ceph-release-1-0.el6.noarch.rpm
yum install ceph
			

6.6.2.4. client

rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
rpm -Uvh http://ceph.com/rpm-bobtail/el6/x86_64/ceph-release-1-0.el6.noarch.rpm
yum install ceph-fuse
			

從伺服器複製ceph.keyring到客戶端

scp -a root@ceph-server:/etc/ceph/ceph.keyring /etc/ceph/
			
mkdir /mnt/cephfs/
ceph-fuse -m 192.168.6.2:6789 /mnt/cephfs/
			
mount -t ceph 192.168.6.2:6789:/ /mnt/cephfs
			

6.6.2.5. RADOS Gateway

yum install ceph-radosgw
			

6.6.3. Block Devices