[libvirt] [PATCH] storage: implement rudimentary glusterfs pool refresh

Eric Blake eblake at redhat.com
Wed Oct 30 23:30:27 UTC 2013


Actually put gfapi to use, by allowing the creation of a gluster
pool.  Right now, all volumes are treated as raw; further patches
will allow peering into files to allow for qcow2 files and backing
chains, and reporting proper volume allocation.

I've reported a couple of glusterfs bugs; if we were to require a
minimum of (not-yet-released) glusterfs 3.5, we could use the new
glfs_readdir [1] and not worry about the bogus return value of
glfs_fini [2], but for now I'm testing with Fedora 19's glusterfs
3.4.1.

[1] http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00085.html
[2] http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00086.html

* src/storage/storage_backend_gluster.c
(virStorageBackendGlusterRefreshPool): Initial implementation.
(virStorageBackendGlusterOpen, virStorageBackendGlusterClose): New
helper functions.

Signed-off-by: Eric Blake <eblake at redhat.com>
---

Depends on these pre-req patches:
https://www.redhat.com/archives/libvir-list/2013-October/msg01266.html
https://www.redhat.com/archives/libvir-list/2013-October/msg00913.html

My next task - figuring out the use of glfs_open() to read metadata
from a file and determine backing chains.

 src/storage/storage_backend_gluster.c | 138 ++++++++++++++++++++++++++++++++--
 1 file changed, 133 insertions(+), 5 deletions(-)

diff --git a/src/storage/storage_backend_gluster.c b/src/storage/storage_backend_gluster.c
index 2863c73..b0b6ce6 100644
--- a/src/storage/storage_backend_gluster.c
+++ b/src/storage/storage_backend_gluster.c
@@ -23,20 +23,148 @@

 #include <glusterfs/api/glfs.h>

-#include "virerror.h"
 #include "storage_backend_gluster.h"
 #include "storage_conf.h"
+#include "viralloc.h"
+#include "virerror.h"
+#include "virlog.h"
+#include "virstoragefile.h"
+#include "virstring.h"

 #define VIR_FROM_THIS VIR_FROM_STORAGE

+struct _virStorageBackendGlusterState {
+    glfs_t *vol;
+};
+
+typedef struct _virStorageBackendGlusterState virStorageBackendGlusterState;
+typedef virStorageBackendGlusterState *virStorageBackendGlusterStatePtr;
+
+static void
+virStorageBackendGlusterClose(virStorageBackendGlusterStatePtr state)
+{
+    if (!state || !state->vol)
+        return;
+    /* Yuck - glusterfs-api-3.4.1 appears to always return -1 for
+     * glfs_fini, with errno containing random data, so there's no way
+     * to tell if it succeeded. 3.4.2 is supposed to fix this.*/
+    if (glfs_fini(state->vol) < 0)
+        VIR_DEBUG("shutdown of gluster failed with errno %d", errno);
+}
+
+static virStorageBackendGlusterStatePtr
+virStorageBackendGlusterOpen(virStoragePoolObjPtr pool)
+{
+    virStorageBackendGlusterStatePtr ret = NULL;
+
+    if (VIR_ALLOC(ret) < 0)
+        return NULL;
+
+    if (!(ret->vol = glfs_new(pool->def->source.name))) {
+        virReportOOMError();
+        goto error;
+    }
+
+    /* FIXME: allow alternate transport in the pool xml */
+    if (glfs_set_volfile_server(ret->vol, "tcp",
+                                pool->def->source.hosts[0].name,
+                                pool->def->source.hosts[0].port) < 0 ||
+        glfs_init(ret->vol) < 0) {
+        virReportSystemError(errno, _("failed to connect to gluster %s/%s"),
+                             pool->def->source.hosts[0].name,
+                             pool->def->name);
+        goto error;
+    }
+
+    return ret;
+
+error:
+    virStorageBackendGlusterClose(ret);
+    return NULL;
+}

 static int
 virStorageBackendGlusterRefreshPool(virConnectPtr conn ATTRIBUTE_UNUSED,
-                                    virStoragePoolObjPtr pool ATTRIBUTE_UNUSED)
+                                    virStoragePoolObjPtr pool)
 {
-    virReportError(VIR_ERR_NO_SUPPORT, "%s",
-                   _("gluster pool type not fully supported yet"));
-    return -1;
+    int ret = -1;
+    virStorageBackendGlusterStatePtr state = NULL;
+    struct {
+        struct dirent ent;
+        /* See comment below about readdir_r needing padding */
+        char padding[MAX(1, 256 - (int) (sizeof(struct dirent)
+                                         - offsetof(struct dirent, d_name)))];
+    } de;
+    struct dirent *ent;
+    glfs_fd_t *dir = NULL;
+    virStorageVolDefPtr vol = NULL;
+    struct statvfs sb;
+
+    if (!(state = virStorageBackendGlusterOpen(pool)))
+        goto cleanup;
+
+    /* Why oh why did glfs 3.4 decide to expose only readdir_r rather
+     * than readdir?  POSIX admits that readdir_r is inherently a
+     * flawed design, because systems are not required to define
+     * NAME_MAX: http://austingroupbugs.net/view.php?id=696
+     * http://womble.decadent.org.uk/readdir_r-advisory.html
+     *
+     * Fortunately, gluster uses _only_ XFS file systems, and XFS has
+     * a known NAME_MAX of 255; so we are guaranteed that if we
+     * provide 256 bytes of tail padding, then we have enough space to
+     * avoid buffer overflow no matter whether the OS used d_name[],
+     * d_name[1], or d_name[256] in its 'struct dirent'.
+     * http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00083.html
+     */
+
+    if (!(dir = glfs_opendir(state->vol, "."))) {
+        virReportSystemError(errno, _("cannot open path '%s'"),
+                             pool->def->name);
+        goto cleanup;
+    }
+    while (!(errno = glfs_readdir_r(dir, &de.ent, &ent)) && ent) {
+        if (STREQ(ent->d_name, ".") || STREQ(ent->d_name, ".."))
+            continue;
+        if (VIR_ALLOC(vol) < 0 ||
+            VIR_STRDUP(vol->name, ent->d_name) < 0)
+            goto cleanup;
+        /* FIXME - must open files to determine if they are non-raw */
+        vol->type = VIR_STORAGE_VOL_NETWORK;
+        vol->target.format = VIR_STORAGE_FILE_RAW;
+        if (virAsprintf(&vol->key, "%s/%s",
+                        pool->def->name, vol->name) < 0)
+            goto cleanup;
+        if (VIR_APPEND_ELEMENT(pool->volumes.objs, pool->volumes.count,
+                               vol) < 0)
+            goto cleanup;
+    }
+    if (errno) {
+        virReportSystemError(errno, _("failed to read directory '%s'"),
+                             pool->def->name);
+        goto cleanup;
+    }
+
+    if (glfs_statvfs(state->vol, ".", &sb) < 0) {
+        virReportSystemError(errno, _("cannot statvfs path '%s'"),
+                             pool->def->name);
+        goto cleanup;
+    }
+
+    pool->def->capacity = ((unsigned long long)sb.f_frsize *
+                           (unsigned long long)sb.f_blocks);
+    pool->def->available = ((unsigned long long)sb.f_bfree *
+                            (unsigned long long)sb.f_frsize);
+    pool->def->allocation = pool->def->capacity - pool->def->available;
+
+    ret = 0;
+cleanup:
+    if (dir)
+        glfs_closedir(dir);
+    virStorageVolDefFree(vol);
+    virStorageBackendGlusterClose(state);
+    if (ret < 0)
+        virStoragePoolObjClearVols(pool);
+    return ret;
 }

 virStorageBackend virStorageBackendGluster = {
-- 
1.8.3.1




More information about the libvir-list mailing list