[libvirt] [PATCH] storage backend: Add RBD (RADOS Block Device) support

Wido den Hollander wido at widodh.nl
Mon May 14 09:06:42 UTC 2012


This patch adds support for a new storage backend with RBD support.

RBD is the RADOS Block Device and is part of the Ceph distributed storage system.

It comes in two flavours: Qemu-RBD and Kernel RBD, this storage backend only supports
Qemu-RBD, thus limiting the use of this storage driver to Qemu only.

To function this backend relies on librbd and librados being present on the local system.

The backend also supports Cephx authentication for safe authentication with the Ceph cluster.

For storing credentials it uses the build-in secret mechanism of libvirt.

Signed-off-by: Wido den Hollander <wido at widodh.nl>
---
 configure.ac                             |   20 ++
 docs/drivers.html.in                     |    1 +
 docs/schemas/storagepool.rng             |   78 ++++-
 docs/storage.html.in                     |   72 ++++
 include/libvirt/libvirt.h.in             |    1 +
 libvirt.spec.in                          |   16 +
 src/Makefile.am                          |    9 +
 src/conf/storage_conf.c                  |  140 ++++++--
 src/conf/storage_conf.h                  |   12 +
 src/storage/storage_backend.c            |    6 +
 src/storage/storage_backend_rbd.c        |  544 ++++++++++++++++++++++++++++++
 src/storage/storage_backend_rbd.h        |   30 ++
 tests/storagepoolxml2xmlin/pool-rbd.xml  |   11 +
 tests/storagepoolxml2xmlout/pool-rbd.xml |   15 +
 tools/virsh.c                            |    7 +
 15 files changed, 917 insertions(+), 45 deletions(-)
 create mode 100644 src/storage/storage_backend_rbd.c
 create mode 100644 src/storage/storage_backend_rbd.h
 create mode 100644 tests/storagepoolxml2xmlin/pool-rbd.xml
 create mode 100644 tests/storagepoolxml2xmlout/pool-rbd.xml

diff --git a/configure.ac b/configure.ac
index a6894ae..1596841 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1810,6 +1810,8 @@ AC_ARG_WITH([storage-mpath],
   AC_HELP_STRING([--with-storage-mpath], [with mpath backend for the storage driver @<:@default=check@:>@]),[],[with_storage_mpath=check])
 AC_ARG_WITH([storage-disk],
   AC_HELP_STRING([--with-storage-disk], [with GPartd Disk backend for the storage driver @<:@default=check@:>@]),[],[with_storage_disk=check])
+AC_ARG_WITH([storage-rbd],
+  AC_HELP_STRING([--with-storage-rbd], [with RADOS Block Device backend for the storage driver @<:@default=check@:>@]),[],[with_storage_rbd=check])
 
 if test "$with_libvirtd" = "no"; then
   with_storage_dir=no
@@ -1819,6 +1821,7 @@ if test "$with_libvirtd" = "no"; then
   with_storage_scsi=no
   with_storage_mpath=no
   with_storage_disk=no
+  with_storage_rbd=no
 fi
 if test "$with_storage_dir" = "yes" ; then
   AC_DEFINE_UNQUOTED([WITH_STORAGE_DIR], 1, [whether directory backend for storage driver is enabled])
@@ -1977,6 +1980,22 @@ if test "$with_storage_mpath" = "check"; then
 fi
 AM_CONDITIONAL([WITH_STORAGE_MPATH], [test "$with_storage_mpath" = "yes"])
 
+if test "$with_storage_rbd" = "yes" || test "$with_storage_rbd" = "check"; then
+    AC_CHECK_HEADER([rbd/librbd.h], [LIBRBD_FOUND=yes; break;])
+
+    LIBRBD_LIBS="-lrbd -lrados -lcrypto"
+
+    if test "$LIBRBD_FOUND" = "yes"; then
+        with_storage_rbd=yes
+        LIBS="$LIBS $LIBRBD_LIBS"
+    else
+        with_storage_rbd=no
+    fi
+
+    AC_DEFINE_UNQUOTED([WITH_STORAGE_RBD], 1, [wether RBD backend for storage driver is enabled])
+fi
+AM_CONDITIONAL([WITH_STORAGE_RBD], [test "$with_storage_rbd" = "yes"])
+
 LIBPARTED_CFLAGS=
 LIBPARTED_LIBS=
 if test "$with_storage_disk" = "yes" ||
@@ -2753,6 +2772,7 @@ AC_MSG_NOTICE([   iSCSI: $with_storage_iscsi])
 AC_MSG_NOTICE([    SCSI: $with_storage_scsi])
 AC_MSG_NOTICE([   mpath: $with_storage_mpath])
 AC_MSG_NOTICE([    Disk: $with_storage_disk])
+AC_MSG_NOTICE([     RBD: $with_storage_rbd])
 AC_MSG_NOTICE([])
 AC_MSG_NOTICE([Security Drivers])
 AC_MSG_NOTICE([])
diff --git a/docs/drivers.html.in b/docs/drivers.html.in
index 75038fc..8ad2c33 100644
--- a/docs/drivers.html.in
+++ b/docs/drivers.html.in
@@ -42,6 +42,7 @@
       <li><strong><a href="storage.html#StorageBackendISCSI">iSCSI backend</a></strong></li>
       <li><strong><a href="storage.html#StorageBackendSCSI">SCSI backend</a></strong></li>
       <li><strong><a href="storage.html#StorageBackendMultipath">Multipath backend</a></strong></li>
+      <li><strong><a href="storage.html#StorageBackendRBD">RBD (RADOS Block Device) backend</a></strong></li>
     </ul>
   </body>
 </html>
diff --git a/docs/schemas/storagepool.rng b/docs/schemas/storagepool.rng
index d4c80da..7753493 100644
--- a/docs/schemas/storagepool.rng
+++ b/docs/schemas/storagepool.rng
@@ -19,6 +19,7 @@
         <ref name='pooliscsi'/>
         <ref name='poolscsi'/>
         <ref name='poolmpath'/>
+        <ref name='poolrbd'/>
       </choice>
     </element>
   </define>
@@ -105,6 +106,15 @@
     <ref name='target'/>
   </define>
 
+  <define name='poolrbd'>
+    <attribute name='type'>
+      <value>rbd</value>
+    </attribute>
+    <ref name='commonmetadata'/>
+    <ref name='sizing'/>
+    <ref name='sourcerbd'/>
+  </define>
+
   <define name='sourceinfovendor'>
     <optional>
       <element name='vendor'>
@@ -184,17 +194,19 @@
   </define>
 
   <define name='sourceinfohost'>
-    <element name='host'>
-      <attribute name='name'>
-        <text/>
-      </attribute>
-      <optional>
-        <attribute name='port'>
-          <ref name="PortNumber"/>
+    <oneOrMore>
+      <element name='host'>
+        <attribute name='name'>
+          <text/>
         </attribute>
-      </optional>
-      <empty/>
-    </element>
+        <optional>
+          <attribute name='port'>
+            <ref name="PortNumber"/>
+          </attribute>
+        </optional>
+        <empty/>
+      </element>
+    </oneOrMore>
   </define>
 
   <define name='sourceinfodev'>
@@ -265,14 +277,38 @@
       <attribute name='type'>
         <choice>
           <value>chap</value>
+          <value>ceph</value>
         </choice>
       </attribute>
-      <attribute name='login'>
-        <text/>
-      </attribute>
-      <attribute name='passwd'>
-        <text/>
-      </attribute>
+      <choice>
+        <attribute name='login'>
+          <text/>
+        </attribute>
+        <attribute name='username'>
+          <text/>
+        </attribute>
+      </choice>
+      <optional>
+        <attribute name='passwd'>
+          <text/>
+        </attribute>
+      </optional>
+      <optional>
+        <ref name='sourceinfoauthsecret'/>
+      </optional>
+    </element>
+  </define>
+
+  <define name='sourceinfoauthsecret'>
+    <element name='secret'>
+      <choice>
+        <attribute name='uuid'>
+          <text/>
+        </attribute>
+        <attribute name='usage'>
+          <text/>
+        </attribute>
+      </choice>
     </element>
   </define>
 
@@ -449,6 +485,16 @@
       <empty/>
     </element>
   </define>
+  
+  <define name='sourcerbd'>
+    <element name='source'>
+      <ref name='sourceinfoname'/>
+      <ref name='sourceinfohost'/>
+      <optional>
+        <ref name='sourceinfoauth'/>
+      </optional>
+    </element>
+  </define>
 
   <define name='name'>
     <data type='string'>
diff --git a/docs/storage.html.in b/docs/storage.html.in
index 0e3e289..277ea36 100644
--- a/docs/storage.html.in
+++ b/docs/storage.html.in
@@ -107,6 +107,9 @@
       <li>
         <a href="#StorageBackendMultipath">Multipath backend</a>
       </li>
+      <li>
+        <a href="#StorageBackendRBD">RBD (RADOS Block Device) backend</a>
+      </li>
     </ul>
 
     <h2><a name="StorageBackendDir">Directory pool</a></h2>
@@ -491,6 +494,75 @@
       The Multipath volume pool does not use the volume format type element.
     </p>
 
+    <h2><a name="StorageBackendRBD">RBD pools</a></h2>
+    <p>
+      This storage driver provides a pool which contains all RBD images in a RADOS pool.
+      <br />
+      RBD (RADOS Block Device) is part of the Ceph distributed storage project.
+      <br/>
+      This backend <i>only</i> supports Qemu with RBD support. Kernel RBD which exposes
+      RBD devices as block devices in /dev is <i>not</i> supported.
+      <br/>
+      RBD images created with this storage backend can be accessed through kernel RBD if
+      configured manually, but this backend does not provide mapping these images.
+      <br/>
+      Images created with this backend can be attached to Qemu guests when Qemu is build
+      with RBD support (Since Qemu 0.14.0)</a>.
+      <br/>
+      The backend supports cephx authentication for communication with the Ceph cluster.
+      <br/>
+      Storing the cephx authentication key is done with the libvirt secret mechanism.
+      The UUID in the example pool input refers to the UUID of the stored secret.
+      <span class="since">Since 0.9.13</span>
+    </p>
+
+    <h3>Example pool input</h3>
+    <pre>
+      <pool type="rbd">
+        <name>myrbdpool</name>
+        <source>
+          <name>rbdpool</name>
+            <host name='1.2.3.4' port='6789'/>
+            <host name='my.ceph.monitor' port='6789'/>
+            <host name='third.ceph.monitor' port='6789'/>
+            <auth username='admin' type='ceph'>
+              <secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/>
+            </auth>
+        </source>
+      </pool></pre>
+
+    <h3>Example volume output</h3>
+    <pre>
+       <volume>
+         <name>myvol</name>
+         <key>rbd/myvol</key>
+         <source>
+         </source>
+         <capacity unit='bytes'>53687091200</capacity>
+         <allocation unit='bytes'>53687091200</allocation>
+         <target>
+           <path>rbd:rbd/myvol</path>
+           <format type='unknown'/>
+           <permissions>
+             <mode>00</mode>
+             <owner>0</owner>
+             <group>0</group>
+           </permissions>
+         </target>
+       </volume></pre>
+
+    <h3>Example disk attachement</h3>
+    <p>RBD images can be attached to Qemu guests when Qemu is build with RBD support. Information about attaching a RBD image to a guest can be found at <a href="formatdomain.html#elementsDisks">format domain</a> page.</p>
+
+    <h3>Valid pool format types</h3>
+    <p>
+      The RBD pool does not use the pool format type element.
+    </p>
+
+    <h3>Valid volume format types</h3>
+    <p>
+      The RBD pool does not use the volume format type element.
+    </p>
 
   </body>
 </html>
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index ac5df95..96fd90a 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -2353,6 +2353,7 @@ typedef enum {
   VIR_STORAGE_VOL_FILE = 0,     /* Regular file based volumes */
   VIR_STORAGE_VOL_BLOCK = 1,    /* Block based volumes */
   VIR_STORAGE_VOL_DIR = 2,      /* Directory-passthrough based volume */
+  VIR_STORAGE_VOL_NETWORK = 3,  /* Network volumes like RBD (RADOS Block Device) */
 
 #ifdef VIR_ENUM_SENTINELS
     VIR_STORAGE_VOL_LAST
diff --git a/libvirt.spec.in b/libvirt.spec.in
index 2e08abb..acbdd2d 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -73,6 +73,9 @@
 %define with_storage_iscsi 0%{!?_without_storage_iscsi:%{server_drivers}}
 %define with_storage_disk  0%{!?_without_storage_disk:%{server_drivers}}
 %define with_storage_mpath 0%{!?_without_storage_mpath:%{server_drivers}}
+%if 0%{?fedora} >= 16
+%define with_storage_rbd   0%{!?_without_storage_rbd:%{server_drivers}}
+%endif
 %define with_numactl       0%{!?_without_numactl:%{server_drivers}}
 %define with_selinux       0%{!?_without_selinux:%{server_drivers}}
 
@@ -215,6 +218,7 @@
 %define with_storage_lvm 0
 %define with_storage_iscsi 0
 %define with_storage_mpath 0
+%define with_storage_rbd 0
 %define with_storage_disk 0
 %endif
 
@@ -407,6 +411,9 @@ BuildRequires: device-mapper
 %else
 BuildRequires: device-mapper-devel
 %endif
+%if %{with_storage_rbd}
+BuildRequires: ceph-devel
+%endif
 %endif
 %if %{with_numactl}
 # For QEMU/LXC numa info
@@ -558,6 +565,10 @@ Requires: device-mapper
 # For multipath support
 Requires: device-mapper
 %endif
+%if %{with_storage_rbd}
+# For RBD support
+Requires: ceph
+%endif
 %if %{with_cgconfig}
 Requires: libcgroup
 %endif
@@ -838,6 +849,10 @@ of recent versions of Linux (and other OSes).
 %define _without_storage_mpath --without-storage-mpath
 %endif
 
+%if ! %{with_storage_rbd}
+%define _without_storage_rbd --without-storage-rbd
+%endif
+
 %if ! %{with_numactl}
 %define _without_numactl --without-numactl
 %endif
@@ -931,6 +946,7 @@ autoreconf -if
            %{?_without_storage_iscsi} \
            %{?_without_storage_disk} \
            %{?_without_storage_mpath} \
+           %{?_without_storage_rbd} \
            %{?_without_numactl} \
            %{?_without_numad} \
            %{?_without_capng} \
diff --git a/src/Makefile.am b/src/Makefile.am
index 0dadc29..3b1d20e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -499,6 +499,9 @@ STORAGE_DRIVER_MPATH_SOURCES =					\
 STORAGE_DRIVER_DISK_SOURCES =					\
 		storage/storage_backend_disk.h storage/storage_backend_disk.c
 
+STORAGE_DRIVER_RBD_SOURCES =					\
+		storage/storage_backend_rbd.h storage/storage_backend_rbd.c
+
 STORAGE_HELPER_DISK_SOURCES =					\
 		storage/parthelper.c
 
@@ -1046,6 +1049,11 @@ if WITH_STORAGE_DISK
 libvirt_driver_storage_la_SOURCES += $(STORAGE_DRIVER_DISK_SOURCES)
 endif
 
+if WITH_STORAGE_RBD
+libvirt_driver_storage_la_SOURCES += $(STORAGE_DRIVER_RBD_SOURCES)
+libvirt_la_LIBADD += $(LIBRBD_LIBS)
+endif
+
 if WITH_NODE_DEVICES
 # Needed to keep automake quiet about conditionals
 if WITH_DRIVER_MODULES
@@ -1145,6 +1153,7 @@ EXTRA_DIST +=							\
 		$(STORAGE_DRIVER_SCSI_SOURCES)			\
 		$(STORAGE_DRIVER_MPATH_SOURCES)			\
 		$(STORAGE_DRIVER_DISK_SOURCES)			\
+		$(STORAGE_DRIVER_RBD_SOURCES)			\
 		$(NODE_DEVICE_DRIVER_SOURCES)			\
 		$(NODE_DEVICE_DRIVER_HAL_SOURCES)		\
 		$(NODE_DEVICE_DRIVER_UDEV_SOURCES)		\
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
index 188af6d..06334c3 100644
--- a/src/conf/storage_conf.c
+++ b/src/conf/storage_conf.c
@@ -52,7 +52,7 @@ VIR_ENUM_IMPL(virStoragePool,
               VIR_STORAGE_POOL_LAST,
               "dir", "fs", "netfs",
               "logical", "disk", "iscsi",
-              "scsi", "mpath")
+              "scsi", "mpath", "rbd")
 
 VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
               VIR_STORAGE_POOL_FS_LAST,
@@ -110,6 +110,7 @@ enum {
     VIR_STORAGE_POOL_SOURCE_ADAPTER         = (1<<3),
     VIR_STORAGE_POOL_SOURCE_NAME            = (1<<4),
     VIR_STORAGE_POOL_SOURCE_INITIATOR_IQN   = (1<<5),
+    VIR_STORAGE_POOL_SOURCE_NETWORK         = (1<<6),
 };
 
 
@@ -194,6 +195,17 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
             .formatToString = virStoragePoolFormatDiskTypeToString,
         }
     },
+    { .poolType = VIR_STORAGE_POOL_RBD,
+      .poolOptions = {
+             .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
+                       VIR_STORAGE_POOL_SOURCE_NETWORK |
+                       VIR_STORAGE_POOL_SOURCE_NAME),
+        },
+       .volOptions = {
+            .defaultFormat = VIR_STORAGE_FILE_RAW,
+            .formatToString = virStoragePoolFormatDiskTypeToString,
+        }
+    },
     { .poolType = VIR_STORAGE_POOL_MPATH,
       .volOptions = {
             .formatToString = virStoragePoolFormatDiskTypeToString,
@@ -297,6 +309,11 @@ virStoragePoolSourceClear(virStoragePoolSourcePtr source)
         VIR_FREE(source->auth.chap.login);
         VIR_FREE(source->auth.chap.passwd);
     }
+
+    if (source->authType == VIR_STORAGE_POOL_AUTH_CEPHX) {
+        VIR_FREE(source->auth.cephx.username);
+        VIR_FREE(source->auth.cephx.secret.usage);
+    }
 }
 
 void
@@ -399,6 +416,34 @@ virStoragePoolDefParseAuthChap(xmlXPathContextPtr ctxt,
 }
 
 static int
+virStoragePoolDefParseAuthCephx(xmlXPathContextPtr ctxt,
+                               virStoragePoolAuthCephxPtr auth) {
+    char *uuid = NULL;
+    auth->username = virXPathString("string(./auth/@username)", ctxt);
+    if (auth->username == NULL) {
+        virStorageReportError(VIR_ERR_XML_ERROR,
+                              "%s", _("missing auth username attribute"));
+        return -1;
+    }
+
+    uuid = virXPathString("string(./auth/secret/@uuid)", ctxt);
+    auth->secret.usage = virXPathString("string(./auth/secret/@usage)", ctxt);
+    if (uuid == NULL && auth->secret.usage == NULL) {
+        virStorageReportError(VIR_ERR_XML_ERROR,
+                              "%s", _("missing auth secret uuid or usage attribute"));
+        return -1;
+    }
+
+    if (virUUIDParse(uuid, auth->secret.uuid) < 0) {
+        virStorageReportError(VIR_ERR_XML_ERROR,
+                              "%s", _("invalid auth secret uuid"));
+        return -1;
+    }
+
+    return 0;
+}
+
+static int
 virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
                              virStoragePoolSourcePtr source,
                              int pool_type,
@@ -419,6 +464,12 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
     }
 
     source->name = virXPathString("string(./name)", ctxt);
+    if (pool_type == VIR_STORAGE_POOL_RBD && source->name == NULL) {
+        virStorageReportError(VIR_ERR_XML_ERROR,
+                                  "%s", _("missing mandatory 'name' field for RBD pool name"));
+            VIR_FREE(source->name);
+            goto cleanup;
+    }
 
     if (options->formatFromString) {
         char *format = virXPathString("string(./format/@type)", ctxt);
@@ -501,6 +552,8 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
     } else {
         if (STREQ(authType, "chap")) {
             source->authType = VIR_STORAGE_POOL_AUTH_CHAP;
+        } else if (STREQ(authType, "ceph")) {
+            source->authType = VIR_STORAGE_POOL_AUTH_CEPHX;
         } else {
             virStorageReportError(VIR_ERR_XML_ERROR,
                                   _("unknown auth type '%s'"),
@@ -514,6 +567,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
             goto cleanup;
     }
 
+    if (source->authType == VIR_STORAGE_POOL_AUTH_CEPHX) {
+        if (virStoragePoolDefParseAuthCephx(ctxt, &source->auth.cephx) < 0)
+            goto cleanup;
+    }
+
     source->vendor = virXPathString("string(./vendor/@name)", ctxt);
     source->product = virXPathString("string(./product/@name)", ctxt);
 
@@ -741,20 +799,22 @@ virStoragePoolDefParseXML(xmlXPathContextPtr ctxt) {
         }
     }
 
-    if ((tmppath = virXPathString("string(./target/path)", ctxt)) == NULL) {
-        virStorageReportError(VIR_ERR_XML_ERROR,
-                              "%s", _("missing storage pool target path"));
-        goto cleanup;
-    }
-    ret->target.path = virFileSanitizePath(tmppath);
-    VIR_FREE(tmppath);
-    if (!ret->target.path)
-        goto cleanup;
-
+    /* When we are working with a virtual disk we can skip the target path and permissions */
+    if (!(options->flags & VIR_STORAGE_POOL_SOURCE_NETWORK)) {
+        if ((tmppath = virXPathString("string(./target/path)", ctxt)) == NULL) {
+            virStorageReportError(VIR_ERR_XML_ERROR,
+                                  "%s", _("missing storage pool target path"));
+            goto cleanup;
+        }
+        ret->target.path = virFileSanitizePath(tmppath);
+        VIR_FREE(tmppath);
+        if (!ret->target.path)
+            goto cleanup;
 
-    if (virStorageDefParsePerms(ctxt, &ret->target.perms,
-                                "./target/permissions", 0700) < 0)
-        goto cleanup;
+        if (virStorageDefParsePerms(ctxt, &ret->target.perms,
+                                    "./target/permissions", 0700) < 0)
+            goto cleanup;
+    }
 
     return ret;
 
@@ -822,6 +882,7 @@ virStoragePoolSourceFormat(virBufferPtr buf,
                            virStoragePoolSourcePtr src)
 {
     int i, j;
+    char uuid[VIR_UUID_STRING_BUFLEN];
 
     virBufferAddLit(buf,"  <source>\n");
     if ((options->flags & VIR_STORAGE_POOL_SOURCE_HOST) && src->nhost) {
@@ -885,6 +946,24 @@ virStoragePoolSourceFormat(virBufferPtr buf,
                           src->auth.chap.login,
                           src->auth.chap.passwd);
 
+    if (src->authType == VIR_STORAGE_POOL_AUTH_CEPHX) {
+        virBufferAsprintf(buf,"    <auth username='%s' type='ceph'>\n",
+                          src->auth.cephx.username);
+
+        virBufferAsprintf(buf,"      %s", "<secret");
+        if (src->auth.cephx.secret.uuid != NULL) {
+            virUUIDFormat(src->auth.cephx.secret.uuid, uuid);
+            virBufferAsprintf(buf," uuid='%s'", uuid);
+        }
+
+        if (src->auth.cephx.secret.usage != NULL) {
+            virBufferAsprintf(buf," usage='%s'", src->auth.cephx.secret.usage);
+        }
+        virBufferAsprintf(buf,"%s", "/>\n");
+
+        virBufferAsprintf(buf,"    %s", "</auth>\n");
+    }
+
     if (src->vendor != NULL) {
         virBufferEscapeString(buf,"    <vendor name='%s'/>\n", src->vendor);
     }
@@ -932,25 +1011,28 @@ virStoragePoolDefFormat(virStoragePoolDefPtr def) {
     if (virStoragePoolSourceFormat(&buf, options, &def->source) < 0)
         goto cleanup;
 
-    virBufferAddLit(&buf,"  <target>\n");
+    /* RBD devices are no local block devs nor files, so it doesn't have a target */
+    if (def->type != VIR_STORAGE_POOL_RBD) {
+        virBufferAddLit(&buf,"  <target>\n");
 
-    if (def->target.path)
-        virBufferAsprintf(&buf,"    <path>%s</path>\n", def->target.path);
+        if (def->target.path)
+            virBufferAsprintf(&buf,"    <path>%s</path>\n", def->target.path);
 
-    virBufferAddLit(&buf,"    <permissions>\n");
-    virBufferAsprintf(&buf,"      <mode>0%o</mode>\n",
-                      def->target.perms.mode);
-    virBufferAsprintf(&buf,"      <owner>%u</owner>\n",
-                      (unsigned int) def->target.perms.uid);
-    virBufferAsprintf(&buf,"      <group>%u</group>\n",
-                      (unsigned int) def->target.perms.gid);
+        virBufferAddLit(&buf,"    <permissions>\n");
+        virBufferAsprintf(&buf,"      <mode>0%o</mode>\n",
+                          def->target.perms.mode);
+        virBufferAsprintf(&buf,"      <owner>%u</owner>\n",
+                          (unsigned int) def->target.perms.uid);
+        virBufferAsprintf(&buf,"      <group>%u</group>\n",
+                          (unsigned int) def->target.perms.gid);
 
-    if (def->target.perms.label)
-        virBufferAsprintf(&buf,"      <label>%s</label>\n",
-                          def->target.perms.label);
+        if (def->target.perms.label)
+            virBufferAsprintf(&buf,"      <label>%s</label>\n",
+                            def->target.perms.label);
 
-    virBufferAddLit(&buf,"    </permissions>\n");
-    virBufferAddLit(&buf,"  </target>\n");
+        virBufferAddLit(&buf,"    </permissions>\n");
+        virBufferAddLit(&buf,"  </target>\n");
+    }
     virBufferAddLit(&buf,"</pool>\n");
 
     if (virBufferError(&buf))
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
index 9222c4a..5733b57 100644
--- a/src/conf/storage_conf.h
+++ b/src/conf/storage_conf.h
@@ -120,6 +120,7 @@ enum virStoragePoolType {
     VIR_STORAGE_POOL_ISCSI,    /* iSCSI targets */
     VIR_STORAGE_POOL_SCSI,     /* SCSI HBA */
     VIR_STORAGE_POOL_MPATH,    /* Multipath devices */
+    VIR_STORAGE_POOL_RBD,      /* RADOS Block Device */
 
     VIR_STORAGE_POOL_LAST,
 };
@@ -137,6 +138,7 @@ enum virStoragePoolDeviceType {
 enum virStoragePoolAuthType {
     VIR_STORAGE_POOL_AUTH_NONE,
     VIR_STORAGE_POOL_AUTH_CHAP,
+    VIR_STORAGE_POOL_AUTH_CEPHX,
 };
 
 typedef struct _virStoragePoolAuthChap virStoragePoolAuthChap;
@@ -146,6 +148,15 @@ struct _virStoragePoolAuthChap {
     char *passwd;
 };
 
+typedef struct _virStoragePoolAuthCephx virStoragePoolAuthCephx;
+typedef virStoragePoolAuthCephx *virStoragePoolAuthCephxPtr;
+struct _virStoragePoolAuthCephx {
+    char *username;
+    struct {
+            unsigned char uuid[VIR_UUID_BUFLEN];
+            char *usage;
+    } secret;
+};
 
 /*
  * For remote pools, info on how to reach the host
@@ -235,6 +246,7 @@ struct _virStoragePoolSource {
     int authType;       /* virStoragePoolAuthType */
     union {
         virStoragePoolAuthChap chap;
+        virStoragePoolAuthCephx cephx;
     } auth;
 
     /* Vendor of the source */
diff --git a/src/storage/storage_backend.c b/src/storage/storage_backend.c
index caac2f8..e2e9b51 100644
--- a/src/storage/storage_backend.c
+++ b/src/storage/storage_backend.c
@@ -77,6 +77,9 @@
 #if WITH_STORAGE_DIR
 # include "storage_backend_fs.h"
 #endif
+#if WITH_STORAGE_RBD
+# include "storage_backend_rbd.h"
+#endif
 
 #define VIR_FROM_THIS VIR_FROM_STORAGE
 
@@ -103,6 +106,9 @@ static virStorageBackendPtr backends[] = {
 #if WITH_STORAGE_DISK
     &virStorageBackendDisk,
 #endif
+#if WITH_STORAGE_RBD
+    &virStorageBackendRBD,
+#endif
     NULL
 };
 
diff --git a/src/storage/storage_backend_rbd.c b/src/storage/storage_backend_rbd.c
new file mode 100644
index 0000000..5319749
--- /dev/null
+++ b/src/storage/storage_backend_rbd.c
@@ -0,0 +1,544 @@
+/*
+ * storage_backend_rbd.c: storage backend for RBD (RADOS Block Device) handling
+ *
+ * Copyright (C) 2012 Wido den Hollander
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ *
+ * Author: Wido den Hollander <wido at widodh.nl>
+ */
+
+#include <config.h>
+
+#include "virterror_internal.h"
+#include "storage_backend_rbd.h"
+#include "storage_conf.h"
+#include "util.h"
+#include "memory.h"
+#include "logging.h"
+#include "base64.h"
+#include "uuid.h"
+#include "rados/librados.h"
+#include "rbd/librbd.h"
+
+#define VIR_FROM_THIS VIR_FROM_STORAGE
+
+struct _virStorageBackendRBDState {
+    rados_t cluster;
+    rados_ioctx_t ioctx;
+    time_t starttime;
+};
+
+typedef struct _virStorageBackendRBDState virStorageBackendRBDState;
+typedef virStorageBackendRBDState virStorageBackendRBDStatePtr;
+
+static int virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr *ptr,
+                                             virConnectPtr conn,
+                                             virStoragePoolObjPtr pool)
+{
+    int ret = -1;
+    unsigned char *secret_value = NULL;
+    size_t secret_value_size;
+    char *rados_key = NULL;
+    virBuffer mon_host = VIR_BUFFER_INITIALIZER;
+    virSecretPtr secret = NULL;
+    char secretUuid[VIR_UUID_STRING_BUFLEN];
+
+    VIR_DEBUG("Found Cephx username: %s",
+              pool->def->source.auth.cephx.username);
+
+    if (pool->def->source.auth.cephx.username != NULL) {
+        VIR_DEBUG("Using cephx authorization");
+        if (rados_create(&ptr->cluster,
+            pool->def->source.auth.cephx.username) < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to initialize RADOS"));
+            goto cleanup;
+        }
+
+        if (pool->def->source.auth.cephx.secret.uuid != NULL) {
+            virUUIDFormat(pool->def->source.auth.cephx.secret.uuid, secretUuid);
+            VIR_DEBUG("Looking up secret by UUID: %s", secretUuid);
+            secret = virSecretLookupByUUIDString(conn, secretUuid);
+        }
+
+        if (pool->def->source.auth.cephx.secret.usage != NULL) {
+            VIR_DEBUG("Looking up secret by usage: %s",
+                      pool->def->source.auth.cephx.secret.usage);
+            secret = virSecretLookupByUsage(conn, VIR_SECRET_USAGE_TYPE_CEPH,
+                                            pool->def->source.auth.cephx.secret.usage);
+        }
+
+        if (secret == NULL) {
+            virStorageReportError(VIR_ERR_NO_SECRET,
+                                  _("failed to find the secret"));
+            goto cleanup;
+        }
+
+        secret_value = virSecretGetValue(secret, &secret_value_size, 0);
+        base64_encode_alloc((char *)secret_value,
+                            secret_value_size, &rados_key);
+        memset(secret_value, 0, secret_value_size);
+
+        if (rados_key == NULL) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to decode the RADOS key"));
+            goto cleanup;
+        }
+
+        VIR_DEBUG("Found cephx key: %s", rados_key);
+        if (rados_conf_set(ptr->cluster, "key", rados_key) < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to set RADOS option: %s"),
+                                  "rados_key");
+            goto cleanup;
+        }
+
+        memset(rados_key, 0, strlen(rados_key));
+
+        if (rados_conf_set(ptr->cluster, "auth_supported", "cephx") < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to set RADOS option: %s"),
+                                  "auth_supported");
+            goto cleanup;
+        }
+    } else {
+        VIR_DEBUG("Not using cephx authorization");
+        if (rados_create(&ptr->cluster, NULL) < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to create the RADOS cluster"));
+            goto cleanup;
+        }
+        if (rados_conf_set(ptr->cluster, "auth_supported", "none") < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to set RADOS option: %s"),
+                                  "auth_supported");
+            goto cleanup;
+        }
+    }
+
+    VIR_DEBUG("Found %Zu RADOS cluster monitors in the pool configuration",
+              pool->def->source.nhost); 
+
+    int i;
+    for (i = 0; i < pool->def->source.nhost; i++) {
+        if (pool->def->source.hosts[i].name != NULL &&
+            !pool->def->source.hosts[i].port) {
+            virBufferAsprintf(&mon_host, "%s:6789,",
+                              pool->def->source.hosts[i].name);
+        } else if (pool->def->source.hosts[i].name != NULL &&
+            pool->def->source.hosts[i].port) {
+            virBufferAsprintf(&mon_host, "%s:%d,",
+                              pool->def->source.hosts[i].name,
+                              pool->def->source.hosts[i].port);
+        } else {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("received malformed monitor, check the XML definition"));
+        }
+    }
+
+    if (virBufferError(&mon_host)) {
+       virReportOOMError();
+       goto cleanup;
+    }
+
+    char *mon_buff = virBufferContentAndReset(&mon_host);
+    VIR_DEBUG("RADOS mon_host has been set to: %s", mon_buff);
+    if (rados_conf_set(ptr->cluster, "mon_host", mon_buff) < 0) {
+       virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                             _("failed to set RADOS option: %s"),
+                             "mon_host");
+        goto cleanup;
+    }
+
+    ptr->starttime = time(0);
+    if (rados_connect(ptr->cluster) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to connect to the RADOS monitor on: %s"),
+                              mon_buff);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    VIR_FREE(secret_value);
+    VIR_FREE(rados_key);
+    virSecretFree(secret);
+    virBufferFreeAndReset(&mon_host);
+    return ret;
+}
+
+static int virStorageBackendRBDCloseRADOSConn(virStorageBackendRBDStatePtr ptr)
+{
+    int ret = 0;
+
+    if (ptr.ioctx != NULL) {
+        VIR_DEBUG("Closing RADOS IoCTX");
+        rados_ioctx_destroy(ptr.ioctx);
+        ret = -1;
+    }
+    ptr.ioctx = NULL;
+
+    if (ptr.cluster != NULL) {
+        VIR_DEBUG("Closing RADOS connection");
+        rados_shutdown(ptr.cluster);
+        ret = -2;
+    }
+    ptr.cluster = NULL;
+
+    time_t runtime = time(0) - ptr.starttime;
+    VIR_DEBUG("RADOS connection existed for %ld seconds", runtime);
+
+    return ret;
+}
+
+static int volStorageBackendRBDRefreshVolInfo(virStorageVolDefPtr vol,
+                                              virStoragePoolObjPtr pool,
+                                              virStorageBackendRBDStatePtr ptr)
+{
+    int ret = -1;
+    rbd_image_t image;
+    if (rbd_open(ptr.ioctx, vol->name, &image, NULL) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to open the RBD image '%s'"),
+                              vol->name);
+        return ret;
+    }
+
+    rbd_image_info_t info;
+    if (rbd_stat(image, &info, sizeof(info)) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                              _("failed to stat the RBD image"));
+        goto cleanup;
+    }
+
+    VIR_DEBUG("Refreshed RBD image %s/%s (size: %llu obj_size: %llu num_objs: %llu)",
+              pool->def->source.name, vol->name, (unsigned long long)info.size,
+              (unsigned long long)info.obj_size,
+              (unsigned long long)info.num_objs);
+
+    vol->capacity = info.size;
+    vol->allocation = info.obj_size * info.num_objs;
+    vol->type = VIR_STORAGE_VOL_NETWORK;
+
+    VIR_FREE(vol->target.path);
+    if (virAsprintf(&vol->target.path, "rbd:%s/%s",
+                    pool->def->source.name,
+                    vol->name) == -1) {
+        virReportOOMError();
+        goto cleanup;
+    }
+
+    VIR_FREE(vol->key);
+    if (virAsprintf(&vol->key, "%s/%s",
+                    pool->def->source.name,
+                    vol->name) == -1) {
+        virReportOOMError();
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    rbd_close(image);
+    return ret;
+}
+
+static int virStorageBackendRBDRefreshPool(virConnectPtr conn ATTRIBUTE_UNUSED,
+                                           virStoragePoolObjPtr pool)
+{
+    size_t max_size = 1024;
+    int ret = -1;
+    int len = -1;
+    int i;
+    char *name, *names = NULL;
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                              pool->def->source.name);
+        goto cleanup;
+    }
+
+    struct rados_cluster_stat_t stat;
+    if (rados_cluster_stat(ptr.cluster, &stat) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                              _("failed to stat the RADOS cluster"));
+        goto cleanup;
+    }
+
+    struct rados_pool_stat_t poolstat;
+    if (rados_ioctx_pool_stat(ptr.ioctx, &poolstat) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to stat the RADOS pool '%s'"),
+                              pool->def->source.name);
+        goto cleanup;
+    }
+
+    pool->def->capacity = stat.kb * 1024;
+    pool->def->available = stat.kb_avail * 1024;
+    pool->def->allocation = poolstat.num_bytes;
+
+    VIR_DEBUG("Utilization of RBD pool %s: (kb: %llu kb_avail: %llu num_bytes: %llu)",
+              pool->def->source.name, (unsigned long long)stat.kb,
+              (unsigned long long)stat.kb_avail,
+              (unsigned long long)poolstat.num_bytes);
+
+    while (true) {
+        if (VIR_ALLOC_N(names, max_size) < 0)
+            goto out_of_memory;
+
+        len = rbd_list(ptr.ioctx, names, &max_size);
+        if (len >= 0)
+            break;
+        if (len != -ERANGE) {
+            VIR_WARN("%s", _("A problem occured while listing RBD images"));
+            goto cleanup;
+        }
+    }
+
+    for (i = 0, name = names; name < names + max_size; i++) {
+        if (VIR_REALLOC_N(pool->volumes.objs, pool->volumes.count + 1) < 0) {
+            virStoragePoolObjClearVols(pool);
+            goto out_of_memory;
+        }
+
+        virStorageVolDefPtr vol;
+        if (VIR_ALLOC(vol) < 0)
+            goto out_of_memory;
+
+        vol->name = strdup(name);
+        if (vol->name == NULL)
+            goto out_of_memory;
+
+        if (STREQ(vol->name, ""))
+            break;
+
+        name += strlen(name) + 1;
+
+        if (volStorageBackendRBDRefreshVolInfo(vol, pool, ptr) < 0)
+            goto cleanup;
+
+        pool->volumes.objs[pool->volumes.count++] = vol;
+    }
+
+    VIR_DEBUG("Found %d images in RBD pool %s",
+              pool->volumes.count, pool->def->source.name);
+
+    ret = 0;
+
+cleanup:
+    VIR_FREE(names);
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+
+out_of_memory:
+    virReportOOMError();
+    goto cleanup;
+}
+
+static int virStorageBackendRBDDeleteVol(virConnectPtr conn,
+                                         virStoragePoolObjPtr pool,
+                                         virStorageVolDefPtr vol,
+                                         unsigned int flags)
+{
+    int ret = -1;
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+
+    VIR_DEBUG("Removing RBD image %s/%s", pool->def->source.name, vol->name);
+
+    if (flags & VIR_STORAGE_VOL_DELETE_ZEROED) {
+        VIR_WARN("%s", _("This storage backend does not supported zeroed removal of volumes"));
+    }
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                              pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (rbd_remove(ptr.ioctx, vol->name) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to remove volume '%s/%s'"),
+                              pool->def->source.name,
+                              vol->name);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+static int virStorageBackendRBDCreateVol(virConnectPtr conn,
+                                         virStoragePoolObjPtr pool,
+                                         virStorageVolDefPtr vol)
+{
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+    int order = 0;
+    int ret = -1;
+
+    VIR_DEBUG("Creating RBD image %s/%s with size %llu",
+              pool->def->source.name,
+              vol->name, vol->capacity);
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name,&ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                               pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (vol->target.encryption != NULL) {
+        virStorageReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+                              _("storage pool does not support encrypted volumes"));
+        goto cleanup;
+    }
+
+    if (rbd_create(ptr.ioctx, vol->name, vol->capacity, &order) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to create volume '%s/%s'"),
+                              pool->def->source.name,
+                              vol->name);
+        goto cleanup;
+    }
+
+    if (volStorageBackendRBDRefreshVolInfo(vol, pool, ptr) < 0) {
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+static int virStorageBackendRBDRefreshVol(virConnectPtr conn,
+                                          virStoragePoolObjPtr pool ATTRIBUTE_UNUSED,
+                                          virStorageVolDefPtr vol)
+{
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+    int ret = -1;
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                               pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (volStorageBackendRBDRefreshVolInfo(vol, pool, ptr) < 0) {
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+static int virStorageBackendRBDResizeVol(virConnectPtr conn ATTRIBUTE_UNUSED,
+                                     virStoragePoolObjPtr pool ATTRIBUTE_UNUSED,
+                                     virStorageVolDefPtr vol,
+                                     unsigned long long capacity,
+                                     unsigned int flags)
+{
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+    rbd_image_t image = NULL;
+    int ret = -1;
+
+    virCheckFlags(0, -1);
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                               pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (rbd_open(ptr.ioctx, vol->name, &image, NULL) < 0) {
+       virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("failed to open the RBD image '%s'"),
+                            vol->name);
+       goto cleanup;
+    }
+
+    if (rbd_resize(image, capacity) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("failed to resize the RBD image '%s'"),
+                            vol->name);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    if (image != NULL)
+       rbd_close(image);
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+virStorageBackend virStorageBackendRBD = {
+    .type = VIR_STORAGE_POOL_RBD,
+
+    .refreshPool = virStorageBackendRBDRefreshPool,
+    .createVol = virStorageBackendRBDCreateVol,
+    .refreshVol = virStorageBackendRBDRefreshVol,
+    .deleteVol = virStorageBackendRBDDeleteVol,
+    .resizeVol = virStorageBackendRBDResizeVol,
+};
diff --git a/src/storage/storage_backend_rbd.h b/src/storage/storage_backend_rbd.h
new file mode 100644
index 0000000..2ae2513
--- /dev/null
+++ b/src/storage/storage_backend_rbd.h
@@ -0,0 +1,30 @@
+/*
+ * storage_backend_rbd.h: storage backend for RBD (RADOS Block Device) handling
+ *
+ * Copyright (C) 2012 Wido den Hollander
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ *
+ * Author: Wido den Hollander <wido at widodh.nl>
+ */
+
+#ifndef __VIR_STORAGE_BACKEND_RBD_H__
+# define __VIR_STORAGE_BACKEND_RBD_H__
+
+# include "storage_backend.h"
+
+extern virStorageBackend virStorageBackendRBD;
+
+#endif /* __VIR_STORAGE_BACKEND_RBD_H__ */
diff --git a/tests/storagepoolxml2xmlin/pool-rbd.xml b/tests/storagepoolxml2xmlin/pool-rbd.xml
new file mode 100644
index 0000000..4ee8d56
--- /dev/null
+++ b/tests/storagepoolxml2xmlin/pool-rbd.xml
@@ -0,0 +1,11 @@
+<pool type='rbd'>
+  <name>ceph</name>
+  <source>
+    <name>rbd</name>
+    <host name='localhost' port='6789'/>
+    <host name='localhost' port='6790'/>
+    <auth username='admin' type='ceph'>
+	<secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/>
+    </auth>
+  </source>
+</pool>
diff --git a/tests/storagepoolxml2xmlout/pool-rbd.xml b/tests/storagepoolxml2xmlout/pool-rbd.xml
new file mode 100644
index 0000000..309a6d9
--- /dev/null
+++ b/tests/storagepoolxml2xmlout/pool-rbd.xml
@@ -0,0 +1,15 @@
+<pool type='rbd'>
+  <name>ceph</name>
+  <uuid>47c1faee-0207-e741-f5ae-d9b019b98fe2</uuid>
+  <capacity unit='bytes'>0</capacity>
+  <allocation unit='bytes'>0</allocation>
+  <available unit='bytes'>0</available>
+  <source>
+    <name>rbd</name>
+    <host name='localhost' port='6789'/>
+    <host name='localhost' port='6790'/>
+    <auth username='admin' type='ceph'>
+      <secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/>
+    </auth>
+  </source>
+</pool>
diff --git a/tools/virsh.c b/tools/virsh.c
index dd9292a..b4e51ff 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -12201,6 +12201,10 @@ cmdVolInfo(vshControl *ctl, const vshCmd *cmd)
             vshPrint(ctl, "%-15s %s\n", _("Type:"), _("dir"));
             break;
 
+        case VIR_STORAGE_VOL_NETWORK:
+            vshPrint(ctl, "%-15s %s\n", _("Type:"), _("network"));
+            break;
+
         default:
             vshPrint(ctl, "%-15s %s\n", _("Type:"), _("unknown"));
         }
@@ -20136,6 +20140,9 @@ vshShowVersion(vshControl *ctl ATTRIBUTE_UNUSED)
 #ifdef WITH_STORAGE_LVM
     vshPrint(ctl, " LVM");
 #endif
+#ifdef WITH_STORAGE_RBD
+    vshPrint(ctl, " RBD");
+#endif
     vshPrint(ctl, "\n");
 
     vshPrint(ctl, "%s", _(" Miscellaneous:"));
-- 
1.7.9.5




More information about the libvir-list mailing list