[Ovirt-devel] [PATCH 1/5 ovirt-server] Add glusterfs to task-omatic API for {task_storage,utils}

Jason Guiditta jguiditt at redhat.com
Fri Jul 31 19:35:03 UTC 2009


On Tue, 2009-07-28 at 06:50 -0700, Harshavardhana wrote: 
> ---
>  src/task-omatic/task_storage.rb |   50 +++++++++++++++++++++++++++++++++++++++
>  src/task-omatic/utils.rb        |   40 +++++++++++++++++++++++++++++++
>  2 files changed, 90 insertions(+), 0 deletions(-)

Minor issue with this and the next patch(2/5), they both have trailing
whitespace errors, so they will not be able to be pushed to our repo w/o
that changed.  Please enable your pre-commit related hooks in
{repo}/.git/hooks/ 
Also, utils.rb has been removed in a recent commit, so please amend
patch to exclude that file. No other changes should be required for this
patch.

Aside from that, ACK to the entire series - new versions need to be in
place (set up today and still testing), but it does not appear to cause
any regressions in the meantime.  I you can get these patches updated as
mentioned above, and resend them today, we can include it in this
release.  Thanks for the effort!

-j
> 
> diff --git a/src/task-omatic/task_storage.rb b/src/task-omatic/task_storage.rb
> index 8165818..0272fbb 100644
> --- a/src/task-omatic/task_storage.rb
> +++ b/src/task-omatic/task_storage.rb
> @@ -202,6 +202,8 @@ class LibvirtPool
>        return IscsiLibvirtPool.new(pool.ip_addr, pool[:target], pool[:port], logger)
>      elsif pool[:type] == "NfsStoragePool"
>        return NFSLibvirtPool.new(pool.ip_addr, pool.export_path, logger)
> +    elsif pool[:type] == "GlusterfsStoragePool"
> +      return GLUSTERFSLibvirtPool.new(pool.ip_addr, pool.export_path, logger)
>      elsif pool[:type] == "LvmStoragePool"
>        # OK, if this is LVM storage, there are two cases we need to care about:
>        # 1) this is a LUN with LVM already on it.  In this case, all we need to
> @@ -293,6 +295,54 @@ class NFSLibvirtPool < LibvirtPool
>    end
>  end
>  
> +class GLUSTERFSLibvirtPool < LibvirtPool
> +  def initialize(ip_addr, export_path, logger)
> +    target = "#{ip_addr}-#{export_path.tr('/', '_')}"
> +    super('netfs', target, logger)
> +
> +    @type = 'netfs'
> +    @host = ip_addr
> +    @remote_vol = export_path
> +
> +    @xml.root.elements["source"].add_element("host", {"name" => @host})
> +    @xml.root.elements["source"].add_element("dir", {"path" => @remote_vol})
> +    @xml.root.elements["source"].add_element("format", {"type" => "glusterfs"})
> +
> +    @xml.root.elements["target"].elements["path"].text = "/mnt/" + @name
> +  end
> +
> +  def create_vol(name, size, owner, group, mode)
> +    # FIXME: this can actually take some time to complete (since we aren't
> +    # doing sparse allocations at the moment).  During that time, whichever
> +    # libvirtd we chose to use is completely hung up.  The solution is 3-fold:
> +    # 1.  Allow sparse allocations in the WUI front-end
> +    # 2.  Make libvirtd multi-threaded
> +    # 3.  Make taskomatic multi-threaded
> +    super("netfs", name, size, owner, group, mode)
> +
> +    # FIXME: we have to add the format as raw here because of a bug in libvirt;
> +    # if you specify a volume with no format, it will crash libvirtd
> +    @vol_xml.root.elements["target"].add_element("format", {"type" => "qcow2"})
> +
> +    # FIXME: Add allocation 0 element so that we create a sparse file.
> +    # This was done because qmf was timing out waiting for the create
> +    # operation to complete.  This needs to be fixed in a better way
> +    # however.  We want to have non-sparse files for performance reasons.
> +    @vol_xml.root.add_element("allocation").add_text('0')
> +
> +    @logger.debug("Creating new volume on pool #{@remote_pool.name} - XML: #{@vol_xml.to_s}")
> +    result = @remote_pool.createVolumeXML(@vol_xml.to_s)
> +    raise "Error creating remote pool: #{result.text}" unless result.status == 0
> +    return result.volume
> +  end
> +  
> +  def xmlequal?(docroot)
> +    return (docroot.attributes['type'] == @type and
> +            docroot.elements['source'].elements['host'].attributes['name'] == @host and
> +            docroot.elements['source'].elements['dir'].attributes['path'] == @remote_vol)
> +  end
> +end
> +
>  class LVMLibvirtPool < LibvirtPool
>    def initialize(vg_name, device, build_on_start, logger)
>      super('logical', vg_name, logger)
> diff --git a/src/task-omatic/utils.rb b/src/task-omatic/utils.rb
> index e3005ed..cf68cae 100644
> --- a/src/task-omatic/utils.rb
> +++ b/src/task-omatic/utils.rb
> @@ -114,6 +114,8 @@ class LibvirtPool
>        return IscsiLibvirtPool.new(pool.ip_addr, pool[:target])
>      elsif pool[:type] == "NfsStoragePool"
>        return NFSLibvirtPool.new(pool.ip_addr, pool.export_path)
> +    elsif pool[:type] == "GlusterfsStoragePool"
> +      return GLUSTERFSLibvirtPool.new(pool.ip_addr, pool.export_path)
>      elsif pool[:type] == "LvmStoragePool"
>        # OK, if this is LVM storage, there are two cases we need to care about:
>        # 1) this is a LUN with LVM already on it.  In this case, all we need to
> @@ -195,6 +197,44 @@ class NFSLibvirtPool < LibvirtPool
>    end
>  end
>  
> +class GLUSTERFSLibvirtPool < LibvirtPool
> +  def initialize(ip_addr, export_path)
> +    super('netfs')
> +
> +    @type = 'netfs'
> +    @host = ip_addr
> +    @remote_vol = export_path
> +    @name = String.random_alphanumeric
> +
> +    @xml.root.elements["source"].add_element("host", {"name" => @host})
> +    @xml.root.elements["source"].add_element("dir", {"path" => @export_path})
> +    @xml.root.elements["source"].add_element("format", {"type" => "glusterfs"})
> +
> +    @xml.root.elements["target"].elements["path"].text = "/mnt/" + @name
> +  end
> +
> +  def create_vol(name, size, owner, group, mode)
> +    # FIXME: this can actually take some time to complete (since we aren't
> +    # doing sparse allocations at the moment).  During that time, whichever
> +    # libvirtd we chose to use is completely hung up.  The solution is 3-fold:
> +    # 1.  Allow sparse allocations in the WUI front-end
> +    # 2.  Make libvirtd multi-threaded
> +    # 3.  Make taskomatic multi-threaded
> +    super("netfs", name, size, owner, group, mode)
> +
> +    # FIXME: we have to add the format as raw here because of a bug in libvirt;
> +    # if you specify a volume with no format, it will crash libvirtd
> +    @vol_xml.root.elements["target"].add_element("format", {"type" => "raw"})
> +    @remote_pool.create_vol_xml(@vol_xml.to_s)
> +  end
> +
> +  def xmlequal?(docroot)
> +    return (docroot.attributes['type'] == @type and
> +            docroot.elements['source'].elements['host'].attributes['name'] == @host) and
> +            docroot.elements['source'].elements['dir'].attributes['path'] == @remote_vol)
> +  end
> +end
> +
>  class LVMLibvirtPool < LibvirtPool
>    def initialize(vg_name, device, build_on_start)
>      super('logical', vg_name)




More information about the ovirt-devel mailing list