From danken at redhat.com Sun Nov 1 08:22:30 2009 From: danken at redhat.com (Dan Kenigsberg) Date: Sun, 1 Nov 2009 10:22:30 +0200 Subject: [Ovirt-devel] [PATCH] drop the -logrotate.conf from /etc/logrotate.d/ovirt-logrotate.conf Message-ID: <1257063750-30979-1-git-send-email-danken@redhat.com> No Fedora package uses .conf in that directory; furthermore, rpmlint protests about it. Let us conform. --- logrotate/ovirt-logrotate | 2 +- ovirt-node.spec.in | 4 ++-- scripts/ovirt-config-logging | 2 +- scripts/ovirt-functions | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/logrotate/ovirt-logrotate b/logrotate/ovirt-logrotate index a61af6f..23cd46a 100644 --- a/logrotate/ovirt-logrotate +++ b/logrotate/ovirt-logrotate @@ -1,3 +1,3 @@ # Run each log rotation every 10 minutes -*/10 * * * * root /usr/sbin/logrotate /etc/logrotate.d/ovirt-logrotate.conf +*/10 * * * * root /usr/sbin/logrotate /etc/logrotate.d/ovirt */10 * * * * root /usr/sbin/logrotate /etc/logrotate.d/syslog diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in index 3a8266e..8a31a04 100644 --- a/ovirt-node.spec.in +++ b/ovirt-node.spec.in @@ -180,7 +180,7 @@ cd - %{__install} -p -m0755 kinit/ovirt-kinit %{buildroot}%{_sysconfdir}/cron.hourly %{__install} -p -m0644 logrotate/ovirt-logrotate %{buildroot}%{_sysconfdir}/cron.d -%{__install} -p -m0644 logrotate/ovirt-logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d +%{__install} -p -m0644 logrotate/ovirt-logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ovirt echo "oVirt Node release %{version}-%{release}" > %{buildroot}%{_sysconfdir}/ovirt-release mkdir -p %{buildroot}/%{_sysconfdir}/default @@ -310,7 +310,7 @@ fi %{_initrddir}/ovirt %{_initrddir}/ovirt-post %config %{_sysconfdir}/cron.hourly/ovirt-kinit -%config %{_sysconfdir}/logrotate.d/ovirt-logrotate.conf +%config %{_sysconfdir}/logrotate.d/ovirt %config %{_sysconfdir}/cron.d/ovirt-logrotate %{_sysconfdir}/ovirt-config-boot.d %{_sysconfdir}/ovirt-config-setup.d diff --git a/scripts/ovirt-config-logging b/scripts/ovirt-config-logging index 464dfcb..d54c0e2 100755 --- a/scripts/ovirt-config-logging +++ b/scripts/ovirt-config-logging @@ -158,7 +158,7 @@ function prompt_user { $syslog_server_protocol fi sed -c -i -e "s/^size=.*/size=${max_log_size}k/" \ - /etc/logrotate.d/ovirt-logrotate.conf + /etc/logrotate.d/ovirt return ;; 1) diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions index 98e88e7..8a2c8c8 100644 --- a/scripts/ovirt-functions +++ b/scripts/ovirt-functions @@ -28,7 +28,7 @@ OVIRT_CONFIG_FILES="\ /etc/default/ovirt \ /etc/sysconfig/network \ /etc/collectd.conf \ - /etc/logrotate.d/ovirt-logrotate.conf + /etc/logrotate.d/ovirt " # Save stdout to fd 6 and stderr to fd 7. Redirect normal stdout/stderr -- 1.6.2.5 From danken at redhat.com Sun Nov 1 16:33:33 2009 From: danken at redhat.com (Dan Kenigsberg) Date: Sun, 1 Nov 2009 18:33:33 +0200 Subject: [Ovirt-devel] [PATCH] drop the -logrotate.conf from /etc/logrotate.d/ovirt-logrotate.conf In-Reply-To: <1257063750-30979-1-git-send-email-danken@redhat.com> References: <1257063750-30979-1-git-send-email-danken@redhat.com> Message-ID: <1257093213-12679-1-git-send-email-danken@redhat.com> No Fedora package uses .conf in that directory; furthermore, rpmlint protests about it: the file name should match the name of the containing rpm. Let us conform. --- logrotate/ovirt-logrotate | 2 +- ovirt-node.spec.in | 4 ++-- scripts/ovirt-config-logging | 2 +- scripts/ovirt-functions | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/logrotate/ovirt-logrotate b/logrotate/ovirt-logrotate index a61af6f..ad32951 100644 --- a/logrotate/ovirt-logrotate +++ b/logrotate/ovirt-logrotate @@ -1,3 +1,3 @@ # Run each log rotation every 10 minutes -*/10 * * * * root /usr/sbin/logrotate /etc/logrotate.d/ovirt-logrotate.conf +*/10 * * * * root /usr/sbin/logrotate /etc/logrotate.d/ovirt-node-stateless */10 * * * * root /usr/sbin/logrotate /etc/logrotate.d/syslog diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in index 3a8266e..20fea25 100644 --- a/ovirt-node.spec.in +++ b/ovirt-node.spec.in @@ -180,7 +180,7 @@ cd - %{__install} -p -m0755 kinit/ovirt-kinit %{buildroot}%{_sysconfdir}/cron.hourly %{__install} -p -m0644 logrotate/ovirt-logrotate %{buildroot}%{_sysconfdir}/cron.d -%{__install} -p -m0644 logrotate/ovirt-logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d +%{__install} -p -m0644 logrotate/ovirt-logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ovirt-node-stateless echo "oVirt Node release %{version}-%{release}" > %{buildroot}%{_sysconfdir}/ovirt-release mkdir -p %{buildroot}/%{_sysconfdir}/default @@ -310,7 +310,7 @@ fi %{_initrddir}/ovirt %{_initrddir}/ovirt-post %config %{_sysconfdir}/cron.hourly/ovirt-kinit -%config %{_sysconfdir}/logrotate.d/ovirt-logrotate.conf +%config %{_sysconfdir}/logrotate.d/ovirt-node-stateless %config %{_sysconfdir}/cron.d/ovirt-logrotate %{_sysconfdir}/ovirt-config-boot.d %{_sysconfdir}/ovirt-config-setup.d diff --git a/scripts/ovirt-config-logging b/scripts/ovirt-config-logging index 464dfcb..de238b7 100755 --- a/scripts/ovirt-config-logging +++ b/scripts/ovirt-config-logging @@ -158,7 +158,7 @@ function prompt_user { $syslog_server_protocol fi sed -c -i -e "s/^size=.*/size=${max_log_size}k/" \ - /etc/logrotate.d/ovirt-logrotate.conf + /etc/logrotate.d/ovirt-node-stateless return ;; 1) diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions index 98e88e7..7e84415 100644 --- a/scripts/ovirt-functions +++ b/scripts/ovirt-functions @@ -28,7 +28,7 @@ OVIRT_CONFIG_FILES="\ /etc/default/ovirt \ /etc/sysconfig/network \ /etc/collectd.conf \ - /etc/logrotate.d/ovirt-logrotate.conf + /etc/logrotate.d/ovirt-node-stateless " # Save stdout to fd 6 and stderr to fd 7. Redirect normal stdout/stderr -- 1.6.2.5 From dpierce at redhat.com Tue Nov 3 15:55:07 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Tue, 3 Nov 2009 10:55:07 -0500 Subject: [Ovirt-devel] Refactoring of storage admin and node In-Reply-To: <1256754353-18515-1-git-send-email-dpierce@redhat.com> References: <1256754353-18515-1-git-send-email-dpierce@redhat.com> Message-ID: <20091103155507.GA8460@mcpierce-laptop.rdu.redhat.com> On Wed, Oct 28, 2009 at 02:25:51PM -0400, Darryl L. Pierce wrote: > This patch set supercedes the previous set, and incorporates feedback from jboggs. > > There is a known issue in configuring volumes on an iSCSI pool that makes them > unsupported at the moment. This code functions identical to virt-manager in that > regard. Can I get some feedback or an ACK on these patches? -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From dpierce at redhat.com Tue Nov 3 15:55:36 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Tue, 3 Nov 2009 10:55:36 -0500 Subject: [Ovirt-devel] Re: [PATCH] Enables users to migrate virtual machines between hosts. In-Reply-To: <1256849472-31185-1-git-send-email-dpierce@redhat.com> References: <1256849472-31185-1-git-send-email-dpierce@redhat.com> Message-ID: <20091103155536.GB8460@mcpierce-laptop.rdu.redhat.com> On Thu, Oct 29, 2009 at 04:51:12PM -0400, Darryl L. Pierce wrote: > Users select a virtual machine on their current libvirt host. They then > select a target machine, which must have been previously configured as a > connection. They confirm the migration and then it runs. > > Signed-off-by: Darryl L. Pierce > --- Can I get some feedback or an ACK on this? -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From mmorsi at redhat.com Tue Nov 3 22:47:03 2009 From: mmorsi at redhat.com (Mohammed Morsi) Date: Tue, 3 Nov 2009 17:47:03 -0500 Subject: [Ovirt-devel] [PATCH server] suggest ip address for nics/bondings on edit host/vm network forms Message-ID: <1257288423-5979-1-git-send-email-mmorsi@redhat.com> Provides a mechanism to generate ip addresses for static networks and prefill the 'ip address' fields of the host and vm networking forms w/ the first suggested ip. --- src/app/controllers/vm_controller.rb | 2 +- src/app/models/ip_address.rb | 52 +++++++++++++++++++++++++++++++++ src/app/models/network.rb | 11 +++++++ src/app/models/physical_network.rb | 6 ++++ src/app/models/vlan.rb | 6 ++++ src/app/services/host_service.rb | 2 + src/app/views/host/edit_network.rhtml | 4 ++- src/test/unit/ip_address_test.rb | 17 +++++++++++ src/test/unit/network_test.rb | 19 ++++++++++++ 9 files changed, 117 insertions(+), 2 deletions(-) diff --git a/src/app/controllers/vm_controller.rb b/src/app/controllers/vm_controller.rb index 9860843..c4ad7ce 100644 --- a/src/app/controllers/vm_controller.rb +++ b/src/app/controllers/vm_controller.rb @@ -193,7 +193,7 @@ class VmController < ApplicationController networks.each{ |net| nnic = Nic.new(:mac => Nic::gen_mac, :network => net) if(net.boot_type.proto == 'static') - nnic.ip_addresses << IpAddress.new(:address => '127.0.0.1') # FIXME + nnic.ip_addresses << IpAddress.new(:address => net.gen_address) end @nics.push nnic } diff --git a/src/app/models/ip_address.rb b/src/app/models/ip_address.rb index 13f5d19..2909e4e 100644 --- a/src/app/models/ip_address.rb +++ b/src/app/models/ip_address.rb @@ -17,6 +17,8 @@ # MA 02110-1301, USA. A copy of the GNU General Public License is # also available at http://www.gnu.org/copyleft/gpl.html. +require 'ipaddr' + # +IpAddress+ is the base class for all address related classes. # class IpAddress < ActiveRecord::Base @@ -35,4 +37,54 @@ class IpAddress < ActiveRecord::Base raise ArgumentError("Invalid type #{params[:type]}") end end + + # get the range of valid addresses for this network address + def range + # exclude the gateway and broadcast addresses + # associated w/ this ip address + sgatewayi = IPAddr.new(gateway).to_i + sbcasti = IPAddr.new(broadcast).to_i + + addresses = [] + _numeric_range.each { |i| # this can be a big performance hit + # depending on your network / mask + unless i == sgatewayi || i == sbcasti + addresses.push IPAddr.new(i, Socket::AF_INET).to_s + end + } + + addresses + end + + # get first available address for this network address given list + # of addresses already used + def first_available_address(used_addresses) + # exclude the gateway and broadcast addresses + # associated w/ this ip address + sgatewayi = IPAddr.new(gateway).to_i + sbcasti = IPAddr.new(broadcast).to_i + + _numeric_range.each { |i| + ip = IPAddr.new(i, Socket::AF_INET).to_s + unless i == sgatewayi || i == sbcasti || used_addresses.include?(ip) + return ip + end + } + + nil + end + + private + # internal helper method, get the range of numeric addresses for this + # network address + def _numeric_range + return [] unless network_id != nil + netaddress_ip = IPAddr.new(address) + netmask_ip = IPAddr.new(netmask) + firstaddress = netaddress_ip.to_i + 1 + lastaddress = netaddress_ip.to_i + (~netmask_ip).to_i - 1 + (firstaddress...lastaddress+1) + end + + end diff --git a/src/app/models/network.rb b/src/app/models/network.rb index 22e5692..c9d5d84 100644 --- a/src/app/models/network.rb +++ b/src/app/models/network.rb @@ -42,5 +42,16 @@ class Network < ActiveRecord::Base end end + # return true / false if network has statically assigned ips + def is_static? + !boot_type.nil? && boot_type.proto == "static" + end + + # generate an ip address for this network based on the + # associated IpAddress and specified array of addreses already used + def gen_address(used_addresses = []) + return nil if ip_addresses.nil? || ip_addresses.size == 0 + ip_addresses[0].first_available_address(used_addresses) + end end diff --git a/src/app/models/physical_network.rb b/src/app/models/physical_network.rb index f50eeff..17a325e 100644 --- a/src/app/models/physical_network.rb +++ b/src/app/models/physical_network.rb @@ -20,4 +20,10 @@ class PhysicalNetwork < Network def is_destroyable? nics.empty? end + + # generate an ip address for this network based on associated ip and + # addresses already associated w/ nics + def gen_address + super(nics.collect { |n| n.ip_address}) + end end diff --git a/src/app/models/vlan.rb b/src/app/models/vlan.rb index e34dddc..ca80dfc 100644 --- a/src/app/models/vlan.rb +++ b/src/app/models/vlan.rb @@ -26,6 +26,12 @@ class Vlan < Network bondings.empty? && nics.empty? end + # generate an ip address for this network based on associated ip and + # addresses already associated w/ bondings/nics + def gen_address + super(bondings.collect { |b| b.ip_address } + nics.collect { |n| n.ip_address}) + end + protected def validate # ensure that any assigned nics only belong to vms, not hosts diff --git a/src/app/services/host_service.rb b/src/app/services/host_service.rb index 3b59f0e..e6b2810 100644 --- a/src/app/services/host_service.rb +++ b/src/app/services/host_service.rb @@ -73,6 +73,8 @@ module HostService def svc_modify(id) lookup(id, Privilege::MODIFY) @networks = Network.find(:all) + @suggested_ips = {} + @networks.each { |n| @suggested_ips[n.id] = n.gen_address } @bonding_types = BondingType.find(:all) end diff --git a/src/app/views/host/edit_network.rhtml b/src/app/views/host/edit_network.rhtml index 1154595..2b22a7c 100644 --- a/src/app/views/host/edit_network.rhtml +++ b/src/app/views/host/edit_network.rhtml @@ -241,6 +241,7 @@ jnet.static_ip = <%= rnet.boot_type.proto == 'static' %>; jnet.boot_type = "<%= rnet.boot_type.proto %>"; jnet.selected = false; + jnet.suggested_ip = "<%= @suggested_ips[rnet.id] %>"; networks.push(jnet); <% } %> @@ -380,7 +381,8 @@ div.children("input").removeAttr("disabled"); // grab value of address field from device - address = device ? device.ip_address : ""; + address = (device && (device.ip_address != "")) ? + device.ip_address : network.suggested_ip; div.children("input").val(address); // for non-static networks disable/hide ip address textbox diff --git a/src/test/unit/ip_address_test.rb b/src/test/unit/ip_address_test.rb index 89972ad..f0aa96b 100644 --- a/src/test/unit/ip_address_test.rb +++ b/src/test/unit/ip_address_test.rb @@ -17,4 +17,21 @@ class IpAddressTest < ActiveSupport::TestCase # this networking is in flux. Revisit this test once that stabilizes. flunk "Ip Address must be associated with network, nic, or bonding" unless @ip_address.valid? end + + def test_ip_address_range + ipv4addr = IpV4Address.new :address => '1.2.3.0', + :netmask => '255.255.255.0', + :gateway => '1.2.3.1', + :broadcast => '1.2.3.255', + :network_id => 5 + + range = ipv4addr.range + + assert_equal 253, range.size + assert !range.include?("1.2.3.1") + assert !range.include?("1.2.3.255") + (2...255).each { |i| + assert range.include?("1.2.3." + i.to_s) + } + end end diff --git a/src/test/unit/network_test.rb b/src/test/unit/network_test.rb index 73ea7eb..e9aa0d6 100644 --- a/src/test/unit/network_test.rb +++ b/src/test/unit/network_test.rb @@ -66,4 +66,23 @@ class NetworkTest < ActiveSupport::TestCase vl.bondings.push Bonding.new flunk "Vlan with bondings should not be destroyable" if vl.is_destroyable? end + + def test_gen_physical_net_address + net = networks :static_physical_network_one + addr = net.gen_address + + assert net.ip_addresses[0].address != addr + assert net.ip_addresses[0].netmask != addr + assert net.ip_addresses[0].broadcast != addr + assert net.ip_addresses[0].gateway != addr + net.nics.each { |nic| + assert nic.ip_address != addr + } + + addri = IPAddr.new(addr).to_i + naddri = IPAddr.new(net.ip_addresses[0].address).to_i + baddri = naddri + (~IPAddr.new(net.ip_addresses[0].netmask)).to_i + assert addri > naddri + assert addri < baddri + end end -- 1.6.2.5 From jboggs at redhat.com Wed Nov 4 00:36:59 2009 From: jboggs at redhat.com (Joey Boggs) Date: Tue, 03 Nov 2009 19:36:59 -0500 Subject: [Ovirt-devel] Refactoring of storage admin and node In-Reply-To: <20091103155507.GA8460@mcpierce-laptop.rdu.redhat.com> References: <1256754353-18515-1-git-send-email-dpierce@redhat.com> <20091103155507.GA8460@mcpierce-laptop.rdu.redhat.com> Message-ID: <4AF0CCAB.4090201@redhat.com> Darryl L. Pierce wrote: > On Wed, Oct 28, 2009 at 02:25:51PM -0400, Darryl L. Pierce wrote: > >> This patch set supercedes the previous set, and incorporates feedback from jboggs. >> >> There is a known issue in configuring volumes on an iSCSI pool that makes them >> unsupported at the moment. This code functions identical to virt-manager in that >> regard. >> > > Can I get some feedback or an ACK on these patches? > > > ------------------------------------------------------------------------ > > _______________________________________________ > Ovirt-devel mailing list > Ovirt-devel at redhat.com > https://www.redhat.com/mailman/listinfo/ovirt-devel > I'll grab these 2 to test first thing in the morning From jboggs at redhat.com Wed Nov 4 00:37:21 2009 From: jboggs at redhat.com (Joey Boggs) Date: Tue, 3 Nov 2009 19:37:21 -0500 Subject: [Ovirt-devel] [PATCH node] add ability to select separate disks for Root and HostVG in o-c-storage Message-ID: <1257295041-32027-1-git-send-email-jboggs@redhat.com> This adds the ability to select 2 different disks for root and HostVG. ovirt_init kernel arg still works as intended but does not support multiple arguments, this will be added in next --- scripts/ovirt-config-storage | 150 ++++++++++++++++++++++++++---------------- 1 files changed, 94 insertions(+), 56 deletions(-) diff --git a/scripts/ovirt-config-storage b/scripts/ovirt-config-storage index 77e00d0..57aaebd 100755 --- a/scripts/ovirt-config-storage +++ b/scripts/ovirt-config-storage @@ -85,30 +85,43 @@ check_partition_sizes() fi printf "\n" - get_drive_size $DRIVE SPACE - disk_size=$SPACE - need_size=$(echo "scale=0;" \ - "$SWAP_SIZE + $ROOT_SIZE * 2" \ - "+ $CONFIG_SIZE + $LOGGING_SIZE + $min_data_size" | bc -l) - - if [ $need_size -gt $disk_size ]; then - local gap_size=$(echo "scale=0; $need_size-$disk_size;" | bc -l) - printf "\n" - printf "=============================================================\n" - printf "The target storage device is too small for the desired sizes:\n" - printf " Size of target storage device: $disk_size MB\n" - printf " Total storage size to be used: $need_size MB\n" - printf "\n" - printf "You need an addition $gap_size MB of storage.\n" - printf "\n" - return 1 + get_drive_size $ROOTDRIVE ROOTDRIVESPACE + get_drive_size $HOSTVGDRIVE HOSTVGDRIVESPACE + ROOT_NEED_SIZE=$(echo "scale=0; $ROOT_SIZE * 2"| bc -l) + HOSTVG_NEED_SIZE=$(echo "scale=0;" \ + "$SWAP_SIZE + $CONFIG_SIZE + $LOGGING_SIZE + $min_data_size" | bc -l) + + if [ $ROOTDRIVE == $HOSTVGDRIVE ]; then + drive_list="ROOT" + ROOT_NEED_SIZE=$(echo "scale=0; $ROOT_SIZE * 2 + $HOSTVG_NEED_SIZE"| bc -l) else - printf "Required Space : $need_size MB\n\n" + drive_list="ROOT HOSTVG" fi + for drive in $drive_list; do + drive_need_size=$(eval "echo \${$(echo ${drive}_NEED_SIZE)"}) + drive_disk_size=$(eval "echo \${$(echo ${drive}DRIVESPACE)"}) + + if [ $drive_need_size -gt $drive_disk_size ]; then + local gap_size=$(echo "scale=0; $drive_need_size-$drive_disk_size;" | bc -l) + printf "\n" + printf "=============================================================\n" + printf "The target storage device is too small for the desired sizes:\n" + printf " Disk Target: $drive \n" + printf " Size of target storage device: $drive_disk_size MB\n" + printf " Total storage size to be used: $drive_need_size MB\n" + printf "\n" + printf "You need an additional $gap_size MB of storage.\n" + printf "\n" + return 1 + else + printf "Required Space : $drive_need_size MB\n\n" + fi + done + # check if an existing HostVG exists on a device other than the target devices="$(pvs -o pv_name,vg_name --noheadings | \ - grep "HostVG"|grep -v $DRIVE|awk '{ print $1 }')" + grep "HostVG"|grep -v $HOSTVGDRIVE|awk '{ print $1 }')" rc=0 if [ -n "$devices" ]; then printf "\n" @@ -193,8 +206,14 @@ get_dev_name() do_configure() { local name_and_size - DRIVE=$(get_dev_name) || return 0 - get_drive_size $DRIVE SPACE + printf "\n\nPlease select the disk to use for the Root.\n\n" + ROOTDRIVE=$(get_dev_name) || return 0 + get_drive_size $ROOTDRIVE ROOTDRIVESPACE + + printf "\n\nPlease select the disk to use for the HostVG.\n\n" + HOSTVGDRIVE=$(get_dev_name) || return 0 + get_drive_size $HOSTVGDRIVE HOSTVGDRIVESPACE + echo $HOSTVGDRIVESPACE printf "\n\nPlease configure storage partitions.\n\n" printf "* Enter partition sizes in MB.\n" @@ -210,7 +229,7 @@ do_configure() return fi - local space_left=$SPACE + local space_left=$HOSTVGDRIVESPACE for part in swap root config logging data ; do part_regexp="^0$" if [ "$part" = "data" ]; then @@ -250,13 +269,13 @@ do_configure() if ! check_partition_sizes; then printf "Please try partitioning again.\n" - DRIVE= + ROOTDRIVE= return 1 fi # save input variables augtool </dev/null|sort -u); do + for vg in $(pvs -o vg_name --noheadings $HOSTVGDRIVE* 2>/dev/null|sort -u); do wipe_volume_group $vg done } perform_partitioning() { - log "Partitioning drive: $DRIVE" - if [ -z "$DRIVE" ]; then + log "Partitioning drive: $ROOTDRIVE" + if [ -z "$HOSTVGDRIVE" ]; then printf "\nNo storage device selected.\n" return fi start_log - log "Starting partitioning of $DRIVE" + log "Starting partitioning of $ROOTDRIVE" log "Saving parameters" unmount_config /etc/default/ovirt @@ -326,40 +351,51 @@ perform_partitioning() # FIXME: save a backup copy, just in case? log "Wiping old boot sector" - dd if=/dev/zero of=$DRIVE bs=1024K count=1 - blockdev --rereadpt $DRIVE - partprobe -s $DRIVE + dd if=/dev/zero of=$ROOTDRIVE bs=1024K count=1 + blockdev --rereadpt $ROOTDRIVE + partprobe -s $ROOTDRIVE MEM_SIZE_MB=$(echo "scale=0; $MEM_SIZE_MB / 1024;" | bc -l) - log "Labeling Drive" - parted $DRIVE -s "mklabel ${LABEL_TYPE}" + log "Labeling Drive: $ROOTDRIVE" + parted $ROOTDRIVE -s "mklabel ${LABEL_TYPE}" + if [ $ROOTDRIVE != $HOSTVGDRIVE ]; then + log "Labeling Drive: $HOSTVGDRIVE" + parted $HOSTVGDRIVE -s "mklabel ${LABEL_TYPE}" + fi log "Creating Root and RootBackup Partitions" let RootBackup_end=${ROOT_SIZE}*2 - parted $DRIVE -s "mkpartfs primary ext2 0M ${ROOT_SIZE}M" - parted $DRIVE -s "mkpartfs primary ext2 ${ROOT_SIZE}M ${RootBackup_end}M" + parted $ROOTDRIVE -s "mkpartfs primary ext2 0M ${ROOT_SIZE}M" + parted $ROOTDRIVE -s "mkpartfs primary ext2 ${ROOT_SIZE}M ${RootBackup_end}M" # sleep to ensure filesystems are created before continuing sleep 10 - e2label ${DRIVE}1 Root - e2label ${DRIVE}2 RootBackup - tune2fs -c 0 -i 0 ${DRIVE}1 - tune2fs -c 0 -i 0 ${DRIVE}2 + e2label ${ROOTDRIVE}1 Root + e2label ${ROOTDRIVE}2 RootBackup + tune2fs -c 0 -i 0 ${ROOTDRIVE}1 + tune2fs -c 0 -i 0 ${ROOTDRIVE}2 log "Creating LVM partition" - parted $DRIVE -s "mkpart primary ext2 ${RootBackup_end}M -1" + + if [ $ROOTDRIVE == $HOSTVGDRIVE ]; then + parted $HOSTVGDRIVE -s "mkpart primary ext2 ${RootBackup_end}M -1" + hostvgpart="3" + else + parted $HOSTVGDRIVE -s "mkpart primary ext2 0M -1" + hostvgpart="1" + fi log "Toggling LVM on" - parted $DRIVE -s "set 3 lvm on" - parted $DRIVE -s "print" + parted $HOSTVGDRIVE -s "set $hostvgpart lvm on" + parted $ROOTDRIVE -s "print" udevadm settle 2> /dev/null || udevsettle # sync GPT to the legacy MBR partitions if [ "gpt" == "$LABEL_TYPE" ]; then log "Running gptsync to create legacy mbr" - gptsync $DRIVE + gptsync $ROOTDRIVE fi - partpv=${DRIVE}3 + partpv=${HOSTVGDRIVE}${hostvgpart} if [ ! -e "$partpv" ]; then # e.g. /dev/cciss/c0d0p2 - partpv=${DRIVE}p3 + partpv=${HOSTVGDRIVE}p${hostvgpart} fi log "Creating physical volume" if [ ! -e "$partpv" ]; then @@ -432,7 +468,7 @@ perform_partitioning() do_confirm() { - if [ -z "$DRIVE" ]; then + if [ -z "$ROOTDRIVE" ]; then printf "\nNo storage device selected.\n" return fi @@ -499,8 +535,10 @@ DATA_SIZE=${OVIRT_VOL_DATA_SIZE:-$default_data_size} if [ -n "$OVIRT_INIT" ]; then # if present, use the drive selected with 'ovirt_init' boot parameter - DRIVE=$OVIRT_INIT - get_drive_size $DRIVE SPACE + # setting these the same until kernel cmdline argument implemented + ROOTDRIVE=$OVIRT_INIT + HOSTVGDRIVE=$OVIRT_INIT + get_drive_size $ROOTDRIVE ROOTDRIVESPACE fi # if the node is Fedora then use GPT, otherwise use MBR @@ -514,7 +552,7 @@ if [ "$1" == "AUTO" ]; then log "Beginning automatic disk partitioning.\n" if [ -n "$OVIRT_INIT" ]; then # do not format if HostVG exists on selected disk... - pvs -o vg_name --noheadings $DRIVE* 2>/dev/null|grep -q -m1 "HostVG" + pvs -o vg_name --noheadings $HOSTVGDRIVE* 2>/dev/null|grep -q -m1 "HostVG" existingHostVG=$? # ... unless overridden by ovirt_firstboot parameter if is_firstboot || [ $existingHostVG -ne 0 ]; then @@ -529,7 +567,7 @@ if [ "$1" == "AUTO" ]; then log "Missing device parameter: unable to partition any disk" fi else - OPTIONS="\"Configure\" \"Review\" \"Commit Changes And Quit\" \"Return To Menu\"" + OPTIONS="\"Configure Storage\" \"Review\" \"Commit Changes And Quit\" \"Return To Menu\"" eval set $OPTIONS PS3="Choose an option: " @@ -539,7 +577,7 @@ else select OPTION in "$@" do case "$OPTION" in - "Configure") do_configure ; break ;; + "Configure Storage") do_configure ; break ;; "Review") do_review ; break ;; "Commit Changes And Quit") do_confirm ; break ;; "Return To Menu") printf "\nExiting.\n"; exit ;; -- 1.6.5.rc2 From jboggs at redhat.com Wed Nov 4 14:17:47 2009 From: jboggs at redhat.com (Joey Boggs) Date: Wed, 04 Nov 2009 09:17:47 -0500 Subject: [Ovirt-devel] [PATCH 2/2] Refactor domain storage setup to use pool and volume selection screens. In-Reply-To: <1256754353-18515-3-git-send-email-dpierce@redhat.com> References: <1256754353-18515-1-git-send-email-dpierce@redhat.com> <1256754353-18515-2-git-send-email-dpierce@redhat.com> <1256754353-18515-3-git-send-email-dpierce@redhat.com> Message-ID: <4AF18D0B.6000603@redhat.com> Darryl L. Pierce wrote: > Now, when the user elects to use managed storage, they're show the list > of available storage pools. Then, after selecting one, the user is shown > the list of volumes on that pool. These are then used to create the > domain. > > Signed-off-by: Darryl L. Pierce > --- > Makefile.am | 1 + > nodeadmin/adddomain.py | 186 ++++++++++++++++++++++++++------------------ > nodeadmin/domainconfig.py | 17 +++- > nodeadmin/libvirtworker.py | 34 ++++---- > 4 files changed, 142 insertions(+), 96 deletions(-) > > diff --git a/Makefile.am b/Makefile.am > index 55ef277..e712d6a 100644 > --- a/Makefile.am > +++ b/Makefile.am > @@ -48,6 +48,7 @@ EXTRA_DIST = \ > nodeadmin/netmenu.py \ > nodeadmin/nodeadmin.py \ > nodeadmin/nodemenu.py \ > + nodeadmin/poolconfig.py \ > nodeadmin/removedomain.py \ > nodeadmin/removepool.py \ > nodeadmin/removevolume.py \ > diff --git a/nodeadmin/adddomain.py b/nodeadmin/adddomain.py > index bb06a62..34aa59c 100755 > --- a/nodeadmin/adddomain.py > +++ b/nodeadmin/adddomain.py > @@ -37,10 +37,11 @@ OS_VARIANT_PAGE = 12 > RAM_CPU_PAGE = 13 > ENABLE_STORAGE_PAGE = 14 > LOCAL_STORAGE_PAGE = 15 > -MANAGED_STORAGE_PAGE = 16 > -BRIDGE_PAGE = 17 > -VIRT_DETAILS_PAGE = 18 > -CONFIRM_PAGE = 19 > +SELECT_POOL_PAGE = 16 > +SELECT_VOLUME_PAGE = 17 > +BRIDGE_PAGE = 18 > +VIRT_DETAILS_PAGE = 19 > +CONFIRM_PAGE = 20 > > LOCATION="location" > KICKSTART="kickstart" > @@ -58,24 +59,25 @@ class DomainConfigScreen(ConfigScreen): > self.__config.set_virt_type(self.get_libvirt().get_default_virt_type()) > > def get_elements_for_page(self, screen, page): > - if page == VM_DETAILS_PAGE: return self.get_vm_details_page(screen) > - elif page == LOCAL_INSTALL_PAGE: return self.get_local_install_page(screen) > - elif page == SELECT_CDROM_PAGE: return self.get_select_cdrom_page(screen) > - elif page == SELECT_ISO_PAGE: return self.get_select_iso_page(screen) > - elif page == NETWORK_INSTALL_PAGE: return self.get_network_install_page(screen) > - elif page == OS_TYPE_PAGE: return self.get_os_type_page(screen) > - elif page == OS_VARIANT_PAGE: return self.get_os_variant_page(screen) > - elif page == RAM_CPU_PAGE: return self.get_ram_and_cpu_page(screen) > - elif page == ENABLE_STORAGE_PAGE: return self.get_enable_storage_page(screen) > - elif page == LOCAL_STORAGE_PAGE: return self.get_local_storage_page(screen) > - elif page == MANAGED_STORAGE_PAGE: return self.get_managed_storage_page(screen) > - elif page == BRIDGE_PAGE: return self.get_bridge_page(screen) > - elif page == VIRT_DETAILS_PAGE: return self.get_virt_details_page(screen) > - elif page == CONFIRM_PAGE: return self.get_confirm_page(screen) > + if page is VM_DETAILS_PAGE: return self.get_vm_details_page(screen) > + elif page is LOCAL_INSTALL_PAGE: return self.get_local_install_page(screen) > + elif page is SELECT_CDROM_PAGE: return self.get_select_cdrom_page(screen) > + elif page is SELECT_ISO_PAGE: return self.get_select_iso_page(screen) > + elif page is NETWORK_INSTALL_PAGE: return self.get_network_install_page(screen) > + elif page is OS_TYPE_PAGE: return self.get_os_type_page(screen) > + elif page is OS_VARIANT_PAGE: return self.get_os_variant_page(screen) > + elif page is RAM_CPU_PAGE: return self.get_ram_and_cpu_page(screen) > + elif page is ENABLE_STORAGE_PAGE: return self.get_enable_storage_page(screen) > + elif page is LOCAL_STORAGE_PAGE: return self.get_local_storage_page(screen) > + elif page is SELECT_POOL_PAGE: return self.get_select_pool_page(screen) > + elif page is SELECT_VOLUME_PAGE: return self.get_select_volume_page(screen) > + elif page is BRIDGE_PAGE: return self.get_bridge_page(screen) > + elif page is VIRT_DETAILS_PAGE: return self.get_virt_details_page(screen) > + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) > return [] > > def validate_input(self, page, errors): > - if page == VM_DETAILS_PAGE: > + if page is VM_DETAILS_PAGE: > if len(self.__guest_name.value()) > 0: > if self.get_libvirt().domain_exists(self.__guest_name.value()): > errors.append("Guest name '%s' is already in use." % self.__guest_name.value()) > @@ -83,12 +85,12 @@ class DomainConfigScreen(ConfigScreen): > return True > else: > errors.append("Guest name must be a string between 0 and 50 characters.") > - elif page == LOCAL_INSTALL_PAGE: > + elif page is LOCAL_INSTALL_PAGE: > if self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_CDROM: > return True > elif self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_ISO: > return True > - elif page == SELECT_CDROM_PAGE: > + elif page is SELECT_CDROM_PAGE: > if self.__install_media.getSelection() != None: > if len(self.get_hal().list_installable_volumes()) == 0: > errors.append("No installable media is available.") > @@ -96,7 +98,7 @@ class DomainConfigScreen(ConfigScreen): > return True > else: > errors.append("You must select an install media.") > - elif page == SELECT_ISO_PAGE: > + elif page is SELECT_ISO_PAGE: > if len(self.__iso_path.value()) > 0: > if os.path.exists(self.__iso_path.value()): > if os.path.isfile(self.__iso_path.value()): > @@ -108,14 +110,14 @@ class DomainConfigScreen(ConfigScreen): > errors.append(self.__iso_path.value()) > else: > errors.append("An install media selection is required.") > - elif page == NETWORK_INSTALL_PAGE: > + elif page is NETWORK_INSTALL_PAGE: > if len(self.__install_url.value()) > 0: > return True > else: > errors.append("An install tree is required.") > - elif page == OS_TYPE_PAGE: return True > - elif page == OS_VARIANT_PAGE: return True > - elif page == RAM_CPU_PAGE: > + elif page is OS_TYPE_PAGE: return True > + elif page is OS_VARIANT_PAGE: return True > + elif page is RAM_CPU_PAGE: > if (len(self.__memory.value()) > 0 and len(self.__cpus.value()) > 0) \ > and (int(self.__memory.value()) > 0 and int(self.__cpus.value()) > 0): > return True > @@ -128,8 +130,8 @@ class DomainConfigScreen(ConfigScreen): > errors.append("A value must be entered for CPUs.") > elif int(self.__cpus.value()) <= 0: > errors.append("A positive integer value must be entered for memory.") > - elif page == ENABLE_STORAGE_PAGE: return True > - elif page == LOCAL_STORAGE_PAGE: > + elif page is ENABLE_STORAGE_PAGE: return True > + elif page is LOCAL_STORAGE_PAGE: > if len(self.__storage_size.value()) > 0: > if float(self.__storage_size.value()) > 0: > return True > @@ -137,12 +139,17 @@ class DomainConfigScreen(ConfigScreen): > errors.append("A positive value must be entered for the storage size.") > else: > errors.append("A value must be entered for the storage size.") > - elif page == MANAGED_STORAGE_PAGE: > - if self.__existing_storage.getSelection() is not None: > + elif page is SELECT_POOL_PAGE: > + if self.__storage_pool.getSelection() is not None: > + return True > + else: > + errors.append("Please select a storage pool.") > + elif page is SELECT_VOLUME_PAGE: > + if self.__storage_volume.getSelection() is not None: > return True > else: > errors.append("Please select a storage volume.") > - elif page == BRIDGE_PAGE: > + elif page is BRIDGE_PAGE: > if self.__network_bridges.getSelection() != None: > if len(self.__mac_address.value()) > 0: > # TODO: regex check the format > @@ -151,62 +158,66 @@ class DomainConfigScreen(ConfigScreen): > errors.append("MAC address must be supplied.") > else: > errors.append("A network bridge must be selected.") > - elif page == VIRT_DETAILS_PAGE: > + elif page is VIRT_DETAILS_PAGE: > if self.__virt_types.getSelection() != None and self.__architectures.getSelection() != None: > return True > if self.__virt_types.getSelection() is None: > errors.append("Please select a virtualization type.") > if self.__architectures.getSelection() is None: > errors.append("Please selection an architecture.") > - elif page == CONFIRM_PAGE: return True > + elif page is CONFIRM_PAGE: return True > return False > > def process_input(self, page): > - if page == VM_DETAILS_PAGE: > + if page is VM_DETAILS_PAGE: > self.__config.set_guest_name(self.__guest_name.value()) > self.__config.set_install_type(self.__install_type.getSelection()) > - elif page == LOCAL_INSTALL_PAGE: > + elif page is LOCAL_INSTALL_PAGE: > self.__config.set_use_cdrom_source(self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_CDROM) > - elif page == SELECT_CDROM_PAGE: > + elif page is SELECT_CDROM_PAGE: > self.__config.set_install_media(self.__install_media.getSelection()) > - elif page == SELECT_ISO_PAGE: > + elif page is SELECT_ISO_PAGE: > self.__config.set_iso_path(self.__iso_path.value()) > - elif page == NETWORK_INSTALL_PAGE: > + elif page is NETWORK_INSTALL_PAGE: > self.__config.set_install_url(self.__install_url.value()) > self.__config.set_kickstart_url(self.__kickstart_url.value()) > self.__config.set_kernel_options(self.__kernel_options.value()) > - elif page == OS_TYPE_PAGE: > + elif page is OS_TYPE_PAGE: > self.__config.set_os_type(self.__os_types.getSelection()) > - elif page == OS_VARIANT_PAGE: > + elif page is OS_VARIANT_PAGE: > self.__config.set_os_variant(self.__os_variants.getSelection()) > - elif page == RAM_CPU_PAGE: > + elif page is RAM_CPU_PAGE: > self.__config.set_memory(int(self.__memory.value())) > self.__config.set_cpus(int(self.__cpus.value())) > - elif page == ENABLE_STORAGE_PAGE: > + elif page is ENABLE_STORAGE_PAGE: > self.__config.set_enable_storage(self.__enable_storage.value()) > if self.__storage_type.getSelection() == DomainConfig.NEW_STORAGE: > self.__config.set_use_local_storage(True) > elif self.__storage_type.getSelection() == DomainConfig.EXISTING_STORAGE: > self.__config.set_use_local_storage(False) > - elif page == LOCAL_STORAGE_PAGE: > + elif page is LOCAL_STORAGE_PAGE: > self.__config.set_storage_size(float(self.__storage_size.value())) > self.__config.set_allocate_storage(self.__allocate_storage.value()) > - elif page == MANAGED_STORAGE_PAGE: > + elif page is SELECT_POOL_PAGE: > self.__config.set_use_local_storage(False) > - self.__config.set_existing_storage(self.__existing_storage.getSelection()) > - self.__config.set_storage_size(self.get_libvirt().get_storage_size(self.__existing_storage.getSelection())) > - elif page == BRIDGE_PAGE: > + self.__config.set_storage_pool(self.__storage_pool.getSelection()) > + elif page is SELECT_VOLUME_PAGE: > + self.__config.set_storage_volume(self.__storage_volume.getSelection()) > + volume = self.get_libvirt().get_storage_volume(self.__config.get_storage_pool(), > + self.__config.get_storage_volume()) > + self.__config.set_storage_size(volume.info()[1] / 1024.0 ** 3) > + elif page is BRIDGE_PAGE: > self.__config.set_network_bridge(self.__network_bridges.getSelection()) > - elif page == VIRT_DETAILS_PAGE: > + elif page is VIRT_DETAILS_PAGE: > self.__config.set_virt_type(self.__virt_types.getSelection()) > self.__config.set_architecture(self.__architectures.getSelection()) > - elif page == CONFIRM_PAGE: > + elif page is CONFIRM_PAGE: > self.get_libvirt().define_domain(self.__config, CreateMeter()) > self.set_finished() > > def get_back_page(self, page): > result = page > - if page == OS_TYPE_PAGE: > + if page is OS_TYPE_PAGE: > install_type = self.__config.get_install_type() > if install_type == DomainConfig.LOCAL_INSTALL: > if self.__config.get_use_cdrom_source(): > @@ -217,24 +228,26 @@ class DomainConfigScreen(ConfigScreen): > result = NETWORK_INSTALL_PAGE > elif install_type == DomainConfig.PXE_INSTALL: > result = VM_DETAILS_PAGE > - elif page == LOCAL_STORAGE_PAGE or page == MANAGED_STORAGE_PAGE: > + elif page is LOCAL_STORAGE_PAGE or page is SELECT_VOLUME_PAGE: > result = ENABLE_STORAGE_PAGE > - elif page == NETWORK_INSTALL_PAGE: > + elif page is SELECT_POOL_PAGE: > + result = ENABLE_STORAGE_PAGE > + elif page is NETWORK_INSTALL_PAGE: > result = VM_DETAILS_PAGE > - elif page == SELECT_CDROM_PAGE or page == SELECT_ISO_PAGE: > + elif page is SELECT_CDROM_PAGE or page is SELECT_ISO_PAGE: > result = LOCAL_INSTALL_PAGE > - elif page == BRIDGE_PAGE: > + elif page is BRIDGE_PAGE: > if self.__config.get_use_local_storage(): > result = LOCAL_STORAGE_PAGE > else: > - result = MANAGED_STORAGE_PAGE > + result = SELECT_VOLUME_PAGE > else: > if page > 1: result = page - 1 > return result > > def get_next_page(self, page): > result = page > - if page == VM_DETAILS_PAGE: > + if page is VM_DETAILS_PAGE: > install_type = self.__config.get_install_type() > if install_type == DomainConfig.LOCAL_INSTALL: > result = LOCAL_INSTALL_PAGE > @@ -242,34 +255,36 @@ class DomainConfigScreen(ConfigScreen): > result = NETWORK_INSTALL_PAGE > elif install_type == DomainConfig.PXE_INSTALL: > result = OS_TYPE_PAGE > - elif page == LOCAL_INSTALL_PAGE: > + elif page is LOCAL_INSTALL_PAGE: > if self.__config.get_use_cdrom_source(): > result = SELECT_CDROM_PAGE > else: > result = SELECT_ISO_PAGE > - elif page == SELECT_CDROM_PAGE or page == SELECT_ISO_PAGE: > + elif page is SELECT_CDROM_PAGE or page is SELECT_ISO_PAGE: > result = OS_TYPE_PAGE > - elif page == NETWORK_INSTALL_PAGE: > + elif page is NETWORK_INSTALL_PAGE: > result = OS_TYPE_PAGE > - elif page == ENABLE_STORAGE_PAGE: > + elif page is ENABLE_STORAGE_PAGE: > result = BRIDGE_PAGE > if self.__config.get_enable_storage(): > if self.__config.get_use_local_storage(): > result = LOCAL_STORAGE_PAGE > else: > - result = MANAGED_STORAGE_PAGE > - elif page == LOCAL_STORAGE_PAGE or page == MANAGED_STORAGE_PAGE: > + result = SELECT_POOL_PAGE > + elif page is LOCAL_STORAGE_PAGE: > result = BRIDGE_PAGE > else: > result = page + 1 > return result > > def page_has_finish(self, page): > - if page == CONFIRM_PAGE: return True > + if page is CONFIRM_PAGE: return True > return False > > def page_has_next(self, page): > - if page < CONFIRM_PAGE: > + if page is SELECT_POOL_PAGE: return self.__has_pools > + elif page is SELECT_VOLUME_PAGE: return self.__has_volumes > + elif page < CONFIRM_PAGE: > return True > > def get_vm_details_page(self, screen): > @@ -393,17 +408,36 @@ class DomainConfigScreen(ConfigScreen): > return [Label("Configure local storage"), > grid] > > - def get_managed_storage_page(self, screen): > + def get_select_pool_page(self, screen): > + pools = [] > + for pool in self.get_libvirt().list_storage_pools(): > + pools.append([pool, pool, pool == self.__config.get_storage_pool()]) > + if len(pools) > 0: > + self.__storage_pool = RadioBar(screen, (pools)) > + grid = Grid(2, 1) > + grid.setField(Label("Storage pool:"), 0, 0, anchorTop = 1) > + grid.setField(self.__storage_pool, 1, 0) > + self.__has_pools = True > + else: > + grid = Label("There are no storage pools available.") > + self.__has_pools = False > + return [Label("Configure Managed Storage: Select A Pool"), > + grid] > + > + def get_select_volume_page(self, screen): > volumes = [] > - for volume in self.get_libvirt().list_storage_volumes(): > - volumes.append(["%s (%d GB)" % (volume.name(), volume.info()[1] / (1024 ** 3)), > - volume.name(), > - self.__config.is_existing_storage(volume.name())]) > - self.__existing_storage = RadioBar(screen, (volumes)) > - grid = Grid(2, 1) > - grid.setField(Label("Existing storage:"), 0, 0) > - grid.setField(self.__existing_storage, 1, 0) > - return [Label("Configure managed storage"), > + for volume in self.get_libvirt().list_storage_volumes(self.__config.get_storage_pool()): > + volumes.append([volume, volume, volume == self.__config.get_storage_volume()]) > + if len(volumes) > 0: > + self.__storage_volume = RadioBar(screen, (volumes)) > + grid = Grid(2, 1) > + grid.setField(Label("Storage volumes:"), 0, 0, anchorTop = 1) > + grid.setField(self.__storage_volume, 1, 0) > + self.__has_volumes = True > + else: > + grid = Label("This storage pool has no defined volumes.") > + self.__has_volumes = False > + return [Label("Configure Managed Storage: Select A Volume"), > grid] > > def get_bridge_page(self, screen): > @@ -448,7 +482,9 @@ class DomainConfigScreen(ConfigScreen): > grid.setField(Label("CPUs:"), 0, 3, anchorRight = 1) > grid.setField(Label("%d" % self.__config.get_cpus()), 1, 3, anchorLeft = 1) > grid.setField(Label("Storage:"), 0, 4, anchorRight = 1) > - grid.setField(Label(self.__config.get_existing_storage()), 1, 4, anchorLeft = 1) > + grid.setField(Label("%s (on %s)" % (self.__config.get_storage_volume(), > + self.__config.get_storage_pool())), > + 1, 4, anchorLeft = 1) > grid.setField(Label("Network:"), 0, 5, anchorRight = 1) > grid.setField(Label(self.__config.get_network_bridge()), 1, 5, anchorLeft = 1) > return [Label("Ready to begin installation of %s" % self.__config.get_guest_name()), > diff --git a/nodeadmin/domainconfig.py b/nodeadmin/domainconfig.py > index ef39fe0..4466e67 100644 > --- a/nodeadmin/domainconfig.py > +++ b/nodeadmin/domainconfig.py > @@ -50,7 +50,8 @@ class DomainConfig: > self.__use_local_storage = True > self.__storage_size = 8.0 > self.__allocate_storage = True > - self.__existing_storage = "" > + self.__storage_pool = "" > + self.__storage_volume = "" > self.__network_bridge = None > self.__mac_address = None > self.__virt_type = None > @@ -177,11 +178,17 @@ class DomainConfig: > def get_allocate_storage(self): > return self.__allocate_storage > > - def set_existing_storage(self, storage): > - self.__existing_storage = storage > + def set_storage_pool(self, pool): > + self.__storage_pool = pool > > - def get_existing_storage(self): > - return self.__existing_storage > + def get_storage_pool(self): > + return self.__storage_pool > + > + def set_storage_volume(self, volume): > + self.__storage_volume = volume > + > + def get_storage_volume(self): > + return self.__storage_volume > > def is_existing_storage(self, storage): > return self.__existing_storage == storage > diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py > index b2acabe..f31266c 100644 > --- a/nodeadmin/libvirtworker.py > +++ b/nodeadmin/libvirtworker.py > @@ -196,6 +196,11 @@ class LibvirtWorker: > '''Returns the storage pool with the specified name.''' > return self.__conn.storagePoolLookupByName(name) > > + def list_storage_volumes(self, poolname): > + '''Returns the list of all defined storage volumes for a given pool.''' > + pool = self.get_storage_pool(poolname) > + return pool.listVolumes() > + > def define_storage_volume(self, config, meter): > '''Defines a new storage volume.''' > self.create_storage_pool(config.get_pool().name()) > @@ -204,10 +209,15 @@ class LibvirtWorker: > > def remove_storage_volume(self, poolname, volumename): > '''Removes the specified storage volume.''' > - pool = self.get_storage_pool(poolname) > - volume = pool.storageVolLookupByName(volumename) > + volume = self.get_storage_volume(poolname, volumename) > volume.delete(0) > > + def get_storage_volume(self, poolname, volumename): > + '''Returns a reference to the specified storage volume.''' > + pool =self.get_storage_pool(poolname) > + volume = pool.storageVolLookupByName(volumename) > + return volume > + > def list_bridges(self): > '''Lists all defined and active bridges.''' > bridges = self.__conn.listNetworks() > @@ -221,21 +231,9 @@ class LibvirtWorker: > def generate_mac_address(self): > return self.__net.macaddr > > - def list_storage_volumes(self): > - '''Lists all defined storage volumes.''' > - pools = self.__conn.listStoragePools() > - pools.extend(self.__conn.listDefinedStoragePools()) > - result = [] > - for name in pools: > - pool = self.__conn.storagePoolLookupByName(name) > - for volname in pool.listVolumes(): > - volume = self.__conn.storageVolLookupByPath("/var/lib/libvirt/images/%s" % volname) > - result.append(volume) > - return result > - > - def get_storage_size(self, name): > + def get_storage_size(self, poolname, volumename): > '''Returns the size of the specified storage volume.''' > - volume = self.__conn.storageVolLookupByPath("/var/lib/libvirt/images/%s" % name) > + volume = self.get_storage_volume(poolname, volumename) > return volume.info()[1] / (1024.0 ** 3) > > def get_virt_types(self): > @@ -381,6 +379,10 @@ class LibvirtWorker: > pool_object = pool, > suffix = ".img") > path = os.path.join(DEFAULT_POOL_TARGET_PATH, path) > + else: > + volume = self.get_storage_volume(config.get_storage_pool(), > + config.get_storage_volume()) > + path = volume.path() > > if path is not None: > storage= virtinst.VirtualDisk(conn = self.__conn, > 1/2 applies fine 2/2 won't Guessing it needs to be rebased since the latest commits would put them out of order commit 5b067ccef1cee68f6750c7867a7877a5b5ba6ac0 Author: Darryl L. Pierce Date: Thu Oct 29 15:08:09 2009 -0400 Two more missed API changes. commit 6e8af1b27ebeee0e710e24a1d7d253f78587de5f Author: Darryl L. Pierce Date: Thu Oct 29 13:40:07 2009 -0400 Fixed an error that kept nodes from being removed. From imain at redhat.com Wed Nov 4 19:59:37 2009 From: imain at redhat.com (Ian Main) Date: Wed, 4 Nov 2009 11:59:37 -0800 Subject: [Ovirt-devel] [PATCH server] Update daemons to use new QMF. Message-ID: <1257364777-19801-1-git-send-email-imain@redhat.com> This patch updates dbomatic, taskomatic and host-register to use the new C++ wrapped ruby QMF bindings. It also fixes a couple of bugs along the way including the 0 cpu bug for host-register. This is a compilation of work done by myself and Arjun Roy. Signed-off-by: Ian Main --- src/db-omatic/db_omatic.rb | 111 ++++++------- src/host-browser/host-register.rb | 337 ++++++++++++++++++++----------------- src/libvirt-list.rb | 31 +++-- src/matahari-list.rb | 33 +++-- src/task-omatic/task_storage.rb | 10 +- src/task-omatic/taskomatic.rb | 81 +++++---- 6 files changed, 323 insertions(+), 280 deletions(-) diff --git a/src/db-omatic/db_omatic.rb b/src/db-omatic/db_omatic.rb index c400097..686ad71 100755 --- a/src/db-omatic/db_omatic.rb +++ b/src/db-omatic/db_omatic.rb @@ -3,18 +3,18 @@ $: << File.join(File.dirname(__FILE__), "../dutils") $: << File.join(File.dirname(__FILE__), ".") -require "rubygems" -require "qpid" +require 'rubygems' require 'monitor' require 'dutils' require 'daemons' require 'optparse' require 'logger' require 'vnc' +require 'qmf' +require 'socket' include Daemonize - # This sad and pathetic readjustment to ruby logger class is # required to fix the formatting because rails does the same # thing but overrides it to just the message. @@ -29,12 +29,9 @@ end $logfile = '/var/log/ovirt-server/db-omatic.log' - -class DbOmatic < Qpid::Qmf::Console - +class DbOmatic < Qmf::ConsoleHandler # Use monitor mixin for mutual exclusion around checks to heartbeats # and updates to objects/heartbeats. - include MonitorMixin def initialize() @@ -77,7 +74,6 @@ class DbOmatic < Qpid::Qmf::Console begin ensure_credentials - database_connect server, port = nil @@ -91,8 +87,17 @@ class DbOmatic < Qpid::Qmf::Console end @logger.info "Connecting to amqp://#{server}:#{port}" - @session = Qpid::Qmf::Session.new(:console => self, :manage_connections => true) - @broker = @session.add_broker("amqp://#{server}:#{port}", :mechanism => 'GSSAPI') + @settings = Qmf::ConnectionSettings.new + @settings.host = server + @settings.port = port +# @settings.mechanism = 'GSSAPI' +# @settings.service = 'qpidd' + @settings.sendUserId = false + + @connection = Qmf::Connection.new(@settings) + @qmfc = Qmf::Console.new(self) + @broker = @qmfc.add_connection(@connection) + @broker.wait_for_stable db_init_cleanup rescue Exception => ex @@ -101,10 +106,8 @@ class DbOmatic < Qpid::Qmf::Console end end - def ensure_credentials() get_credentials('qpidd') - Thread.new do while true do sleep(3600) @@ -195,7 +198,7 @@ class DbOmatic < Qpid::Qmf::Console if state == Vm::STATE_STOPPED @logger.info "VM has moved to stopped, clearing VM attributes." - qmf_vm = @session.object(:class => "domain", 'uuid' => vm.uuid) + qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain", 'uuid' => vm.uuid)) if qmf_vm @logger.info "Deleting VM #{vm.description}." result = qmf_vm.undefine @@ -207,9 +210,9 @@ class DbOmatic < Qpid::Qmf::Console # If we are running, update the node that the domain is running on elsif state == Vm::STATE_RUNNING @logger.info "VM is running, determine the node it is running on" - qmf_vm = @session.object(:class => "domain", 'uuid' => vm.uuid) + qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain", 'uuid' => vm.uuid)) if qmf_vm - qmf_host = @session.object(:class => "node", :object_id => qmf_vm.node) + qmf_host = @qmfc.object(Qmf::Query.new(:class => "node", :object_id => qmf_vm.node)) db_host = Host.find(:first, :conditions => ['hostname = ?', qmf_host.hostname]) @logger.info "VM #{vm.description} is running on node #{db_host.hostname}" vm.host_id = db_host.id @@ -273,7 +276,7 @@ class DbOmatic < Qpid::Qmf::Console # Double check to make sure this host is still up. begin - qmf_host = @session.object(:class => 'node', 'hostname' => host_info['hostname']) + qmf_host = @qmfc.objects(Qmf::Query.new(:class => "node", 'hostname' => host_info['hostname'])) if !qmf_host @logger.info "Host #{host_info['hostname']} is not up after waiting 20 seconds, skipping dead VM check." else @@ -301,16 +304,23 @@ class DbOmatic < Qpid::Qmf::Console end end - def object_props(broker, obj) - target = obj.schema.klass_key.package + def object_update(obj, hasProps, hasStats) + target = obj.object_class.package_name + type = obj.object_class.class_name return if target != "com.redhat.libvirt" - type = obj.schema.klass_key.klass_name + if hasProps + update_props(obj, type) + end + if hasStats + update_stats(obj, type) + end + end + def update_props(obj, type) # I just sync this whole thing because there shouldn't be a lot of contention here.. synchronize do values = @cached_objects[obj.object_id.to_s] - new_object = false if values == nil @@ -318,8 +328,7 @@ class DbOmatic < Qpid::Qmf::Console # Save the agent and broker bank so that we can tell what objects # are expired when the heartbeat for them stops. - values[:broker_bank] = obj.object_id.broker_bank - values[:agent_bank] = obj.object_id.agent_bank + values[:agent_key] = obj.object_id.agent_key values[:obj_key] = obj.object_id.to_s values[:class_type] = type values[:timed_out] = false @@ -370,53 +379,48 @@ class DbOmatic < Qpid::Qmf::Console end end - def object_stats(broker, obj) - target = obj.schema.klass_key.package - return if target != "com.redhat.libvirt" - type = obj.schema.klass_key.klass_name - + def update_stats(obj, type) synchronize do values = @cached_objects[obj.object_id.to_s] - if !values + if values == nil values = {} @cached_objects[obj.object_id.to_s] = values - - values[:broker_bank] = obj.object_id.broker_bank - values[:agent_bank] = obj.object_id.agent_bank + values[:agent_key] = obj.object_id.agent_key values[:class_type] = type values[:timed_out] = false values[:synced] = false end + obj.statistics.each do |key, newval| if values[key.to_s] != newval values[key.to_s] = newval - #puts "new value for statistic #{key} : #{newval}" end end end end - def heartbeat(agent, timestamp) - puts "heartbeat from agent #{agent}" + def agent_heartbeat(agent, timestamp) + puts "heartbeat from agent #{agent.key}" return if agent == nil synchronize do - bank_key = "#{agent.agent_bank}.#{agent.broker.broker_bank}" - @heartbeats[bank_key] = [agent, timestamp] + @heartbeats[agent.key] = [agent, timestamp] end end + def agent_added(agent) + @logger.info("Agent connected: #{agent.key}") + end - def del_agent(agent) + def agent_deleted(agent) agent_disconnected(agent) end # This method marks objects associated with the given agent as timed out/invalid. Called either # when the agent heartbeats out, or we get a del_agent callback. def agent_disconnected(agent) + puts "agent_disconnected: #{agent.key}" @cached_objects.keys.each do |objkey| - if @cached_objects[objkey][:broker_bank] == agent.broker.broker_bank and - @cached_objects[objkey][:agent_bank] == agent.agent_bank - + if @cached_objects[objkey][:agent_key] == agent.key values = @cached_objects[objkey] if values[:timed_out] == false @logger.info "Marking object of type #{values[:class_type]} with key #{objkey} as timed out." @@ -430,8 +434,7 @@ class DbOmatic < Qpid::Qmf::Console values[:timed_out] = true end end - bank_key = "#{agent.agent_bank}.#{agent.broker.broker_bank}" - @heartbeats.delete(bank_key) + @heartbeats.delete(agent.key) end # The opposite of above, this is called when an agent is alive and well and makes sure @@ -439,9 +442,7 @@ class DbOmatic < Qpid::Qmf::Console def agent_connected(agent) @cached_objects.keys.each do |objkey| - if @cached_objects[objkey][:broker_bank] == agent.broker.broker_bank and - @cached_objects[objkey][:agent_bank] == agent.agent_bank - + if @cached_objects[objkey][:agent_key] == agent.key values = @cached_objects[objkey] if values[:timed_out] == true or values[:synced] == false if values[:class_type] == 'node' @@ -482,7 +483,7 @@ class DbOmatic < Qpid::Qmf::Console # them to stopped. VMs that exist as QMF objects will get set appropriately when the objects # appear on the bus. begin - qmf_vm = @session.object(:class => 'domain', 'uuid' => db_vm.uuid) + qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain", 'uuid' => db_vm.uuid)) if qmf_vm == nil set_stopped = true end @@ -498,15 +499,6 @@ class DbOmatic < Qpid::Qmf::Console end end - def broker_connected(broker) - @logger.info "Connected to broker." - end - - def broker_disconnected(broker) - @logger.error "Broker disconnected." - end - - # This is the mainloop that is called into as a separate thread. This just loops through # and makes sure all the agents are still reporting. If they aren't they get marked as # down. @@ -527,7 +519,7 @@ class DbOmatic < Qpid::Qmf::Console s = timestamp / 1000000000 delta = t - s - puts "Checking time delta for agent #{agent} - #{delta}" + puts "Checking time delta for agent #{agent.key} - #{delta}" if delta > 30 # No heartbeat for 30 seconds.. deal with dead/disconnected agent. @@ -545,15 +537,10 @@ class DbOmatic < Qpid::Qmf::Console end end - def main() - + Thread.abort_on_exception = true dbsync = DbOmatic.new() - - # Call into mainloop.. dbsync.check_heartbeats() - end main() - diff --git a/src/host-browser/host-register.rb b/src/host-browser/host-register.rb index 06d8553..e57b077 100755 --- a/src/host-browser/host-register.rb +++ b/src/host-browser/host-register.rb @@ -4,12 +4,13 @@ $: << File.join(File.dirname(__FILE__), "../dutils") $: << File.join(File.dirname(__FILE__), ".") require 'rubygems' -require 'qpid' require 'monitor' require 'dutils' require 'daemons' require 'optparse' require 'logger' +require 'qmf' +require 'socket' include Daemonize @@ -27,13 +28,17 @@ end $logfile = '/var/log/ovirt-server/host-register.log' -class HostRegister < Qpid::Qmf::Console +class HostRegister < Qmf::ConsoleHandler # Use monitor mixin for mutual exclusion around checks to heartbeats # and updates to objects/heartbeats. include MonitorMixin + # def initialize: Takes no parameters + # On initialize, we get a connection to the database. + # We then query the name and address of the qpidd server + # using dnsmasq records, and connect to qpidd. def initialize() super() @cached_hosts = {} @@ -78,7 +83,6 @@ class HostRegister < Qpid::Qmf::Console begin ensure_credentials - database_connect server, port = nil @@ -92,8 +96,17 @@ class HostRegister < Qpid::Qmf::Console end @logger.info "Connecting to amqp://#{server}:#{port}" - @session = Qpid::Qmf::Session.new(:console => self, :manage_connections => true) - @broker = @session.add_broker("amqp://#{server}:#{port}", :mechanism => 'GSSAPI') + @settings = Qmf::ConnectionSettings.new + @settings.host = server + @settings.port = port + # @settings.mechanism = 'GSSAPI' + # @settings.service = 'qpidd' + @settings.sendUserId = false + + @connection = Qmf::Connection.new(@settings) + @qmfc = Qmf::Console.new(self) + @broker = @qmfc.add_connection(@connection) + @broker.wait_for_stable rescue Exception => ex @logger.error "Error in hostregister: #{ex}" @@ -101,6 +114,7 @@ class HostRegister < Qpid::Qmf::Console end end + ###### Utility Methods ###### def debugputs(msg) puts msg if @debug == true and @do_daemon == false end @@ -116,20 +130,66 @@ class HostRegister < Qpid::Qmf::Console end end - def broker_connected(broker) - @logger.info 'Connected to broker.' + ###### QMF Callbacks ###### + def agent_heartbeat(agent, timestamp) + return if agent == nil + synchronize do + bank_key = "#{agent.agent_bank}.#{agent.broker_bank}" + @heartbeats[bank_key] = [agent, timestamp] + end end - def broker_disconnected(broker) - @logger.error 'Broker disconnected.' + def agent_added(agent) + agent_bank = agent.agent_bank + broker_bank = agent.broker_bank + key = "#{agent_bank}.#{broker_bank}" + + puts "AGENT ADDED: #{key}" + debugputs "Agent #{agent_bank}.#{broker_bank} connected!" + agent_connected(agent_bank, broker_bank) + + host_list = @qmfc.objects(:package => 'com.redhat.matahari', :class => 'host') + puts "host_list length is #{host_list.length}" + host_list.each do |host| + if host.object_id.agent_bank == agent_bank + # Grab the cpus and nics associated before we take any locks + cpu_info = @qmfc.objects(:package => 'com.redhat.matahari', :class => 'cpu', 'host' => host.object_id) + nic_info = @qmfc.objects(:package => 'com.redhat.matahari', :class => 'nic', 'host' => host.object_id) + + # And pass it on to the real handler + update_host(host, cpu_info, nic_info) + end + end end - def agent_disconnected(agent) + def agent_deleted(agent) + agent_bank = agent.agent_bank + broker_bank = agent.broker_bank + key = "#{agent_bank}.#{broker_bank}" + + debugputs "Agent #{key} disconnected!" + @heartbeats.delete(key) + agent_disconnected(agent_bank, broker_bank) + end + + def object_update(obj, hasProps, hasStats) + target = obj.object_class.package_name + type = obj.object_class.class_name + return if target != 'com.redhat.matahari' or type != 'host' or hasProps == false + + # Fix a race where the properties of an object are published by a reconnecting + # host (thus marking it active) right before the heartbeat timer considers it dead + # (and marks it inactive) + @heartbeats.delete("#{obj.object_id.agent_bank}.#{obj.object_id.broker_bank}") + end # def object_props + + ###### Handlers for QMF Callbacks ###### + def agent_disconnected(agent_bank, broker_bank) synchronize do - debugputs "Marking objects for agent #{agent.broker.broker_bank}.#{agent.agent_bank} inactive" + debugputs "Marking objects for agent #{broker_bank}.#{agent_bank} inactive" @cached_hosts.keys.each do |objkey| - if @cached_hosts[objkey][:broker_bank] == agent.broker.broker_bank and - @cached_hosts[objkey][:agent_bank] == agent.agent_bank + if @cached_hosts[objkey][:broker_bank] == broker_bank and + @cached_hosts[objkey][:agent_bank] == agent_bank cached_host = @cached_hosts[objkey] cached_host[:active] = false @@ -139,12 +199,12 @@ class HostRegister < Qpid::Qmf::Console end # synchronize do end - def agent_connected(agent) + def agent_connected(agent_bank, broker_bank) synchronize do - debugputs "Marking objects for agent #{agent.broker.broker_bank}.#{agent.agent_bank} active" + debugputs "Marking objects for agent #{broker_bank}.#{agent_bank} active" @cached_hosts.keys.each do |objkey| - if @cached_hosts[objkey][:broker_bank] == agent.broker.broker_bank and - @cached_hosts[objkey][:agent_bank] == agent.agent_bank + if @cached_hosts[objkey][:broker_bank] == broker_bank and + @cached_hosts[objkey][:agent_bank] == agent_bank cached_host = @cached_hosts[objkey] cached_host[:active] = true @@ -154,123 +214,10 @@ class HostRegister < Qpid::Qmf::Console end # synchronize do end - def update_cpus(host_qmf, host_db, cpu_info) - - @logger.info "Updating CPU info for host #{host_qmf.hostname}" - debugputs "Broker reports #{cpu_info.length} cpus for host #{host_qmf.hostname}" - - # delete an existing CPUs and create new ones based on the data - @logger.info "Deleting any existing CPUs for host #{host_qmf.hostname}" - Cpu.delete_all(['host_id = ?', host_db.id]) - - @logger.info "Saving new CPU records for host #{host_qmf.hostname}" - cpu_info.each do |cpu| - flags = (cpu.flags.length > 255) ? "#{cpu.flags[0..251]}..." : cpu.flags - detail = Cpu.new( - 'cpu_number' => cpu.cpunum, - 'core_number' => cpu.corenum, - 'number_of_cores' => cpu.numcores, - 'vendor' => cpu.vendor, - 'model' => cpu.model.to_s, - 'family' => cpu.family.to_s, - 'cpuid_level' => cpu.cpuid_lvl, - 'speed' => cpu.speed.to_s, - 'cache' => cpu.cache.to_s, - 'flags' => flags) - - host_db.cpus << detail - - debugputs "Added new CPU for #{host_qmf.hostname}: " - debugputs "CPU # : #{cpu.cpunum}" - debugputs "Core # : #{cpu.corenum}" - debugputs "Total Cores : #{cpu.numcores}" - debugputs "Vendor : #{cpu.vendor}" - debugputs "Model : #{cpu.model}" - debugputs "Family : #{cpu.family}" - debugputs "Cpuid_lvl : #{cpu.cpuid_lvl}" - debugputs "Speed : #{cpu.speed}" - debugputs "Cache : #{cpu.cache}" - debugputs "Flags : #{flags}" - end - - @logger.info "Saved #{cpu_info.length} cpus for #{host_qmf.hostname}" - end - - def update_nics(host_qmf, host_db, nic_info) - - # Update the NIC details for this host: - # -if the NIC exists, then update the IP address - # -if the NIC does not exist, create it - # -any nic not in this list is deleted - - @logger.info "Updating NIC records for host #{host_qmf.hostname}" - debugputs "Broker reports #{nic_info.length} NICs for host" - - nics = Array.new - nics_to_delete = Array.new - - host_db.nics.each do |nic| - found = false - - nic_info.each do |detail| - # if we have a match, then update the database and remove - # the received data to avoid creating a dupe later - @logger.info "Searching for existing record for: #{detail.macaddr.upcase} in host #{host_qmf.hostname}" - if detail.macaddr.upcase == nic.mac - @logger.info "Updating details for: #{detail.interface} [#{nic.mac}]}" - nic.bandwidth = detail.bandwidth - nic.interface_name = detail.interface - nic.save! - found = true - nic_info.delete(detail) - end - end - - # if the record wasn't found, then remove it from the database - unless found - @logger.info "Marking NIC for removal: #{nic.interface_name} [#{nic.mac}]" - nics_to_delete << nic - end - end - - debugputs "Deleting #{nics_to_delete.length} NICs that are no longer part of host #{host_qmf.hostname}" - nics_to_delete.each do |nic| - @logger.info "Removing NIC: #{nic.interface_name} [#{nic.mac}]" - host_db.nics.delete(nic) - end - - # iterate over any nics left and create new records for them. - debugputs "Adding new records for #{nic_info.length} NICs to host #{host_qmf.hostname}" - nic_info.each do |nic| - detail = Nic.new( - 'mac' => nic.macaddr.upcase, - 'bandwidth' => nic.bandwidth, - 'interface_name' => nic.interface, - 'usage_type' => 1) - - host_db.nics << detail - - @logger.info "Added NIC #{nic.interface} with MAC #{nic.macaddr} to host #{host_qmf.hostname}" - end - end - - def object_props(broker, obj) - target = obj.schema.klass_key.package - type = obj.schema.klass_key.klass_name - return if target != 'com.redhat.matahari' or type != 'host' - - # Fix a race where the properties of an object are published by a reconnecting - # host (thus marking it active) right before the heartbeat timer considers it dead - # (and marks it inactive) - @heartbeats.delete("#{obj.object_id.agent_bank}.#{obj.object_id.broker_bank}") - + def update_host(obj, cpu_info, nic_info) already_cache = false already_in_db = false - # Grab the cpus and nics associated before we take any locks - cpu_info = @session.objects(:class => 'cpu', 'host' => obj.object_id) - nic_info = @session.objects(:class => 'nic', 'host' => obj.object_id) - synchronize do cached_host = @cached_hosts[obj.object_id.to_s] host = Host.find(:first, :conditions => ['hostname = ?', obj.hostname]) @@ -318,7 +265,7 @@ class HostRegister < Qpid::Qmf::Console 'memory' => obj.memory, 'is_disabled' => 0, 'hardware_pool' => HardwarePool.get_default_pool, - # Let host-status mark it available when it + # Let db-omatic mark it available when it # successfully connects to it via libvirt. 'state' => Host::STATE_UNAVAILABLE) @@ -330,10 +277,11 @@ class HostRegister < Qpid::Qmf::Console debugputs "memory: #{obj.memory}" rescue Exception => error - @logger.error "Error while creating record: #{error.message}" - # We haven't added the host to the db, and it isn't cached, so we just - # return without having done anything. To retry, the host will have to - # restart its agent. + @logger.error "Error when creating record: #{error.message}" + @logger.error "Restart matahari on host #{obj.hostname}" + # We haven't added the host to the db, and it isn't cached, + # so we just return without having done anything. To retry, + # the host will have to restart its agent. return end else @@ -394,27 +342,106 @@ class HostRegister < Qpid::Qmf::Console cached_host['hypervisor'] = obj.hypervisor cached_host['arch'] = obj.arch end # synchronize do - end # def object_props + end # end update_host - def heartbeat(agent, timestamp) - return if agent == nil - synchronize do - bank_key = "#{agent.agent_bank}.#{agent.broker.broker_bank}" - @heartbeats[bank_key] = [agent, timestamp] + def update_cpus(host_qmf, host_db, cpu_info) + + @logger.info "Updating CPU info for host #{host_qmf.hostname}" + debugputs "Broker reports #{cpu_info.length} cpus for host #{host_qmf.hostname}" + + # delete an existing CPUs and create new ones based on the data + @logger.info "Deleting any existing CPUs for host #{host_qmf.hostname}" + Cpu.delete_all(['host_id = ?', host_db.id]) + + @logger.info "Saving new CPU records for host #{host_qmf.hostname}" + cpu_info.each do |cpu| + flags = (cpu.flags.length > 255) ? "#{cpu.flags[0..251]}..." : cpu.flags + detail = Cpu.new( + 'cpu_number' => cpu.cpunum, + 'core_number' => cpu.corenum, + 'number_of_cores' => cpu.numcores, + 'vendor' => cpu.vendor, + 'model' => cpu.model.to_s, + 'family' => cpu.family.to_s, + 'cpuid_level' => cpu.cpuid_lvl, + 'speed' => cpu.speed.to_s, + 'cache' => cpu.cache.to_s, + 'flags' => flags) + + host_db.cpus << detail + + debugputs "Added new CPU for #{host_qmf.hostname}: " + debugputs "CPU # : #{cpu.cpunum}" + debugputs "Core # : #{cpu.corenum}" + debugputs "Total Cores : #{cpu.numcores}" + debugputs "Vendor : #{cpu.vendor}" + debugputs "Model : #{cpu.model}" + debugputs "Family : #{cpu.family}" + debugputs "Cpuid_lvl : #{cpu.cpuid_lvl}" + debugputs "Speed : #{cpu.speed}" + debugputs "Cache : #{cpu.cache}" + debugputs "Flags : #{flags}" end - end - def new_agent(agent) - key = "#{agent.agent_bank}.#{agent.broker.broker_bank}" - debugputs "Agent #{key} connected!" - agent_connected(agent) + @logger.info "Saved #{cpu_info.length} cpus for #{host_qmf.hostname}" end - def del_agent(agent) - key = "#{agent.agent_bank}.#{agent.broker.broker_bank}" - debugputs "Agent #{key} disconnected!" - @heartbeats.delete(key) - agent_disconnected(agent) + def update_nics(host_qmf, host_db, nic_info) + + # Update the NIC details for this host: + # -if the NIC exists, then update the IP address + # -if the NIC does not exist, create it + # -any nic not in this list is deleted + + @logger.info "Updating NIC records for host #{host_qmf.hostname}" + debugputs "Broker reports #{nic_info.length} NICs for host" + + nics = Array.new + nics_to_delete = Array.new + + host_db.nics.each do |nic| + found = false + + nic_info.each do |detail| + # if we have a match, then update the database and remove + # the received data to avoid creating a dupe later + @logger.info "Searching for existing record for: #{detail.macaddr.upcase} in host #{host_qmf.hostname}" + if detail.macaddr.upcase == nic.mac + @logger.info "Updating details for: #{detail.interface} [#{nic.mac}]}" + nic.bandwidth = detail.bandwidth + nic.interface_name = detail.interface + nic.save! + found = true + nic_info.delete(detail) + end + end + + # if the record wasn't found, then remove it from the database + unless found + @logger.info "Marking NIC for removal: #{nic.interface_name} [#{nic.mac}]" + nics_to_delete << nic + end + end + + debugputs "Deleting #{nics_to_delete.length} NICs that are no longer part of host #{host_qmf.hostname}" + nics_to_delete.each do |nic| + @logger.info "Removing NIC: #{nic.interface_name} [#{nic.mac}]" + host_db.nics.delete(nic) + end + + # iterate over any nics left and create new records for them. + debugputs "Adding new records for #{nic_info.length} NICs to host #{host_qmf.hostname}" + nic_info.each do |nic| + detail = Nic.new( + 'mac' => nic.macaddr.upcase, + 'bandwidth' => nic.bandwidth, + 'interface_name' => nic.interface, + 'usage_type' => 1) + + host_db.nics << detail + + @logger.info "Added NIC #{nic.interface} with MAC #{nic.macaddr} to host #{host_qmf.hostname}" + end end def check_heartbeats() @@ -436,7 +463,10 @@ class HostRegister < Qpid::Qmf::Console # No heartbeat for 30 seconds.. deal with dead/disconnected agent. debugputs "Agent #{key} timed out!" @heartbeats.delete(key) - agent_disconnected(agent) + + agent_bank = agent.agent_bank + broker_bank = agent.broker_bank + agent_disconnected(agent_bank, broker_bank) end end @@ -461,6 +491,7 @@ class HostRegister < Qpid::Qmf::Console end # Class HostRegister def main() + Thread.abort_on_exception = true hostreg = HostRegister.new() hostreg.check_heartbeats() end diff --git a/src/libvirt-list.rb b/src/libvirt-list.rb index 54e8b7e..c81926a 100755 --- a/src/libvirt-list.rb +++ b/src/libvirt-list.rb @@ -2,21 +2,30 @@ $: << File.join(File.dirname(__FILE__), "./dutils") -require "rubygems" -require "qpid" -require "dutils" +require 'rubygems' +require 'dutils' +require 'qmf' +require 'socket' get_credentials('qpidd') server, port = get_srv('qpidd', 'tcp') raise "Unable to determine qpid server from DNS SRV record" if not server -srv = "amqp://#{server}:#{port}" -puts "Connecting to #{srv}.." -s = Qpid::Qmf::Session.new() -b = s.add_broker(srv, :mechanism => 'GSSAPI') +puts "Connecting to #{server}, #{port}" -nodes = s.objects(:class => "node") +settings = Qmf::ConnectionSettings.new +settings.host = server +settings.port = port +# settings.mechanism = 'GSSAPI' +# settings.service = 'qpidd' + +connection = Qmf::Connection.new(settings) +qmfc = Qmf::Console.new +broker = qmfc.add_connection(connection) +broker.wait_for_stable + +nodes = qmfc.objects(Qmf::Query.new(:class => "node")) nodes.each do |node| puts "node: #{node.hostname}" for (key, val) in node.properties @@ -24,7 +33,7 @@ nodes.each do |node| end # Find any domains that on the current node. - domains = s.objects(:class => "domain", 'node' => node.object_id) + domains = qmfc.objects(Qmf::Query.new(:class => "domain", 'node' => node.object_id)) domains.each do |domain| r = domain.getXMLDesc() puts "getXMLDesc() status: #{r.status}" @@ -39,7 +48,7 @@ nodes.each do |node| end end - pools = s.objects(:class => "pool", 'node' => node.object_id) + pools = qmfc.objects(Qmf::Query.new(:class => "pool", 'node' => node.object_id)) pools.each do |pool| puts " pool: #{pool.name}" for (key, val) in pool.properties @@ -54,7 +63,7 @@ nodes.each do |node| end # Find volumes that are part of the pool. - volumes = s.objects(:class => "volume", 'pool' => pool.object_id) + volumes = qmfc.objects(Qmf::Query.new(:class => "volume", 'pool' => pool.object_id)) volumes.each do |volume| puts " volume: #{volume.name}" for (key, val) in volume.properties diff --git a/src/matahari-list.rb b/src/matahari-list.rb index ff714c5..8795019 100755 --- a/src/matahari-list.rb +++ b/src/matahari-list.rb @@ -2,21 +2,30 @@ $: << File.join(File.dirname(__FILE__), "./dutils") -require "rubygems" -require "qpid" -require "dutils" +require 'rubygems' +require 'dutils' +require 'qmf' +require 'socket' get_credentials('qpidd') server, port = get_srv('qpidd', 'tcp') raise "Unable to determine qpid server from DNS SRV record" if not server -srv = "amqp://#{server}:#{port}" -puts "Connecting to #{srv}.." -s = Qpid::Qmf::Session.new() -b = s.add_broker(srv, :mechanism => 'GSSAPI') +puts "Connecting to #{server}, #{port}" -hosts = s.objects(:class => "host") +settings = Qmf::ConnectionSettings.new +settings.host = server +settings.port = port +# settings.mechanism = 'GSSAPI' +# settings.service = 'qpidd' + +connection = Qmf::Connection.new(settings) +qmfc = Qmf::Console.new +broker = qmfc.add_connection(connection) +broker.wait_for_stable + +hosts = qmfc.objects(Qmf::Query.new(:class => 'host')) hosts.each do |host| puts "HOST: #{host.hostname}" for (key, val) in host.properties @@ -24,18 +33,18 @@ hosts.each do |host| end # List cpus for current host - cpus = s.objects(:class => "cpu", 'host' => host.object_id) + cpus = qmfc.objects(Qmf::Query.new(:class => 'cpu', 'host' => host.object_id)) cpus.each do |cpu| - puts " CPU:" + puts ' CPU:' for (key, val) in cpu.properties puts " property: #{key}, #{val}" end end # cpus.each # List nics for current host - nics = s.objects(:class => "nic", 'host' => host.object_id) + nics = qmfc.objects(Qmf::Query.new(:class => 'nic', 'host' => host.object_id)) nics.each do |nic| - puts " NIC: " + puts ' NIC: ' for (key, val) in nic.properties puts " property: #{key}, #{val}" end diff --git a/src/task-omatic/task_storage.rb b/src/task-omatic/task_storage.rb index 77b0166..d698777 100644 --- a/src/task-omatic/task_storage.rb +++ b/src/task-omatic/task_storage.rb @@ -73,7 +73,7 @@ def task_storage_cobbler_setup(db_vm) unless found # Create a new transient NFS storage volume # This volume is *not* persisted. - image_volume = StorageVolume.factory("NFS", :filename => filename) + image_volume = StorageVolume.factory("NFS", :filename => filename, :key => filename) image_volume.storage_pool image_pool = StoragePool.factory(StoragePool::NFS) @@ -116,13 +116,14 @@ class LibvirtPool @xml.root.elements["target"].add_element("path") end - def connect(session, node) - pools = session.objects(:class => 'pool', 'node' => node.object_id) + def connect(qmfc, node) + pools = qmfc.objects(:class => 'pool', 'node' => node.object_id) pools.each do |pool| result = pool.getXMLDesc raise "Error getting xml description of pool: #{result.text}" unless result.status == 0 xml_desc = result.description + if self.xmlequal?(Document.new(xml_desc).root) @remote_pool = pool @logger.debug("Found existing storage pool #{pool.name} on host: #{node.hostname}") @@ -134,7 +135,8 @@ class LibvirtPool @logger.debug("Defining new storage pool: #{@xml.to_s} on host: #{node.hostname}") result = node.storagePoolDefineXML(@xml.to_s, :timeout => 60 * 10) raise "Error creating pool: #{result.text}" unless result.status == 0 - @remote_pool = session.object(:object_id => result.pool) + @remote_pool = qmfc.object(:object_id => result.pool) + obj_list = qmfc.objects(:object_id => result.pool) raise "Error finding newly created remote pool." unless @remote_pool # we need this because we don't want to "build" LVM pools, which would diff --git a/src/task-omatic/taskomatic.rb b/src/task-omatic/taskomatic.rb index ece60dc..13cf5af 100755 --- a/src/task-omatic/taskomatic.rb +++ b/src/task-omatic/taskomatic.rb @@ -23,7 +23,7 @@ $: << File.join(File.dirname(__FILE__), "../dutils") $: << File.join(File.dirname(__FILE__), ".") require 'rubygems' -require 'qpid' +require 'qmf' require 'monitor' require 'dutils' require 'optparse' @@ -115,10 +115,15 @@ class TaskOmatic sleepy *= 2 if sleepy < 120 end - @session = Qpid::Qmf::Session.new(:manage_connections => true) - @logger.info "Connecting to amqp://#{server}:#{port}" - @broker = @session.add_broker("amqp://#{server}:#{port}", :mechanism => 'GSSAPI') + settings = Qmf::ConnectionSettings.new + settings.host = server + settings.port = port + settings.sendUserId = false + @connection = Qmf::Connection.new(settings) + @qmfc = Qmf::Console.new + @broker = @qmfc.add_connection(@connection) + @broker.wait_for_stable end def ensure_credentials() @@ -141,13 +146,13 @@ class TaskOmatic # vm won't be returned. I think that's supposed to be for migration # but it could break creation of VMs in certain conditions.. - vm = @session.object(:class => "domain", 'uuid' => db_vm.uuid) + vm = @qmfc.object(:class => "domain", 'uuid' => db_vm.uuid) db_vm.vm_resource_pool.get_hardware_pool.hosts.each do |curr| # Now each of 'curr' is in the right hardware pool.. # now we check them out. - node = @session.object(:class => "node", 'hostname' => curr.hostname) + node = @qmfc.object(:class => "node", 'hostname' => curr.hostname) next unless node # So now we expect if the node was found it's alive and well, then @@ -205,12 +210,12 @@ class TaskOmatic # activate the underlying physical device, and then do the logical one if db_volume[:type] == "LvmStorageVolume" phys_libvirt_pool = get_libvirt_lvm_pool_from_volume(db_volume, @logger) - phys_libvirt_pool.connect(@session, node) + phys_libvirt_pool.connect(@qmfc, node) end @logger.debug "Verifying mount of pool #{db_pool.ip_addr}:#{db_pool.type}:#{db_pool.target}:#{db_pool.export_path}" libvirt_pool = LibvirtPool.factory(db_pool, @logger) - libvirt_pool.connect(@session, node) + libvirt_pool.connect(@qmfc, node) # OK, the pool should be all set. The last thing we need to do is get # the path based on the volume key @@ -220,12 +225,12 @@ class TaskOmatic @logger.debug "Pool mounted: #{pool.name}; state: #{pool.state}" - volume = @session.object(:class => 'volume', + volume = @qmfc.object(:class => 'volume', 'key' => volume_key, 'storagePool' => pool.object_id) if volume == nil @logger.info "Unable to find volume by key #{volume_key} attached to pool #{pool.name}, trying by filename..." - volume = @session.object(:class => 'volume', + volume = @qmfc.object(:class => 'volume', 'name' => db_volume.filename, 'storagePool' => pool.object_id) raise "Unable to find volume by key (#{volume_key}) or filename (#{db_volume.filename}), giving up." unless volume @@ -254,11 +259,11 @@ class TaskOmatic # This is rather silly because we only destroy pools if there are no # more vms on the node. We should be reference counting the pools # somehow so we know when they are no longer in use. - vms = @session.objects(:class => 'domain', 'node' => node.object_id) + vms = @qmfc.objects(:class => 'domain', 'node' => node.object_id) if vms.length > 0 return end - pools = @session.objects(:class => 'pool', 'node' => node.object_id) + pools = @qmfc.objects(:class => 'pool', 'node' => node.object_id) # We do this in two passes, first undefine/destroys LVM pools, then # we do physical pools. @@ -281,13 +286,13 @@ class TaskOmatic def task_shutdown_or_destroy_vm(task, action) @logger.info "starting task_shutdown_or_destroy_vm" db_vm = task.vm - vm = @session.object(:class => 'domain', 'uuid' => db_vm.uuid) + vm = @qmfc.object(:class => 'domain', 'uuid' => db_vm.uuid) if !vm @logger.error "VM already shut down?" return end - node = @session.object(:object_id => vm.node) + node = @qmfc.object(:object_id => vm.node) raise "Unable to get node that vm is on??" unless node if vm.state == "shutdown" or vm.state == "shutoff" @@ -337,7 +342,7 @@ class TaskOmatic @logger.info "starting task_start_vm" db_vm = find_vm(task, false) - vm = @session.object(:class => "domain", 'uuid' => db_vm.uuid) + vm = @qmfc.object(:class => "domain", 'uuid' => db_vm.uuid) if vm case vm.state @@ -351,7 +356,7 @@ class TaskOmatic end db_host = find_capable_host(db_vm) - node = @session.object(:class => "node", 'hostname' => db_host.hostname) + node = @qmfc.object(:class => "node", 'hostname' => db_host.hostname) raise "Unable to find host #{db_host.hostname} to create VM on." unless node @logger.info("VM will be started on node #{node.hostname}") @@ -400,7 +405,7 @@ class TaskOmatic result = node.domainDefineXML(xml.to_s) raise "Error defining virtual machine: #{result.text}" unless result.status == 0 - domain = @session.object(:object_id => result.domain) + domain = @qmfc.object(:object_id => result.domain) raise "Cannot find domain on host #{db_host.hostname}, cannot start virtual machine." unless domain result = domain.create @@ -432,7 +437,7 @@ class TaskOmatic def task_suspend_vm(task) @logger.info "starting task_suspend_vm" db_vm = task.vm - dom = @session.object(:class => 'domain', 'uuid' => db_vm.uuid) + dom = @qmfc.object(:class => 'domain', 'uuid' => db_vm.uuid) raise "Unable to locate VM to suspend" unless dom if dom.state != "running" and dom.state != "blocked" @@ -450,7 +455,7 @@ class TaskOmatic def task_resume_vm(task) @logger.info "starting task_resume_vm" db_vm = task.vm - dom = @session.object(:class => 'domain', 'uuid' => db_vm.uuid) + dom = @qmfc.object(:class => 'domain', 'uuid' => db_vm.uuid) raise "Unable to locate VM to resume" unless dom if dom.state == "running" @@ -478,7 +483,7 @@ class TaskOmatic # need to put it on the storage server and mark it in the database # where the image is stored. db_vm = task.vm - dom = @session.object(:class => 'domain', 'uuid' => db_vm.uuid) + dom = @qmfc.object(:class => 'domain', 'uuid' => db_vm.uuid) raise "Unable to locate VM to save" unless dom filename = "/tmp/#{dom.uuid}.save" @@ -495,7 +500,7 @@ class TaskOmatic # FIXME: This is also broken, see task_save_vm FIXME. db_vm = task.vm - dom = @session.object(:class => 'domain', 'uuid' => db_vm.uuid) + dom = @qmfc.object(:class => 'domain', 'uuid' => db_vm.uuid) raise "Unable to locate VM to restore" unless dom filename = "/tmp/#{dom.uuid}.save" @@ -508,9 +513,9 @@ class TaskOmatic def migrate(db_vm, dest = nil) - vm = @session.object(:class => "domain", 'uuid' => db_vm.uuid) + vm = @qmfc.object(:class => "domain", 'uuid' => db_vm.uuid) raise "Unable to find VM to migrate" unless vm - src_node = @session.object(:object_id => vm.node) + src_node = @qmfc.object(:object_id => vm.node) raise "Unable to find node that VM is on??" unless src_node @logger.info "Migrating domain lookup complete, domain is #{vm}" @@ -528,7 +533,7 @@ class TaskOmatic db_dst_host = find_capable_host(db_vm) end - dest_node = @session.object(:class => 'node', 'hostname' => db_dst_host.hostname) + dest_node = @qmfc.object(:class => 'node', 'hostname' => db_dst_host.hostname) raise "Unable to find host #{db_dst_host.hostname} to migrate to." unless dest_node volumes = [] @@ -589,7 +594,7 @@ class TaskOmatic next end puts "searching for node with hostname #{host.hostname}" - node = @session.object(:class => 'node', 'hostname' => host.hostname) + node = @qmfc.object(:class => 'node', 'hostname' => host.hostname) puts "node returned is #{node}" return node if node end @@ -643,13 +648,13 @@ class TaskOmatic @logger.info("refresh being done on node #{node.hostname}") phys_libvirt_pool = LibvirtPool.factory(db_pool_phys, @logger) - phys_libvirt_pool.connect(@session, node) + phys_libvirt_pool.connect(@qmfc, node) db_pool_phys.state = StoragePool::STATE_AVAILABLE db_pool_phys.save! begin # First we do the physical volumes. - volumes = @session.objects(:class => 'volume', + volumes = @qmfc.objects(:class => 'volume', 'storagePool' => phys_libvirt_pool.remote_pool.object_id) volumes.each do |volume| storage_volume = StorageVolume.factory(db_pool_phys.get_type_label) @@ -696,9 +701,9 @@ class TaskOmatic physical_vol.save! lvm_libvirt_pool = LibvirtPool.factory(lvm_db_pool, @logger) - lvm_libvirt_pool.connect(@session, node) + lvm_libvirt_pool.connect(@qmfc, node) - lvm_volumes = @session.objects(:class => 'volume', + lvm_volumes = @qmfc.objects(:class => 'volume', 'storagePool' => lvm_libvirt_pool.remote_pool.object_id) lvm_volumes.each do |lvm_volume| @@ -733,16 +738,16 @@ class TaskOmatic begin if db_volume[:type] == "LvmStorageVolume" phys_libvirt_pool = get_libvirt_lvm_pool_from_volume(db_volume, @logger) - phys_libvirt_pool.connect(@session, node) + phys_libvirt_pool.connect(@qmfc, node) end begin libvirt_pool = LibvirtPool.factory(db_pool, @logger) begin - libvirt_pool.connect(@session, node) + libvirt_pool.connect(@qmfc, node) volume_id = libvirt_pool.create_vol(*db_volume.volume_create_params) - volume = @session.object(:object_id => volume_id) + volume = @qmfc.object(:object_id => volume_id) raise "Unable to find newly created volume" unless volume @logger.debug " volume:" @@ -776,7 +781,7 @@ class TaskOmatic # I currently refresh ALL storage pools at this time as it # shouldn't be a long operation and it doesn't hurt to refresh # them once in a while. - pools = @session.objects(:class => 'pool') + pools = @qmfc.objects(:class => 'pool') pools.each do |pool| result = pool.refresh @logger.info "Problem refreshing pool (you can probably ignore this): #{result.text}" unless result.status == 0 @@ -798,16 +803,16 @@ class TaskOmatic begin if db_volume[:type] == "LvmStorageVolume" phys_libvirt_pool = get_libvirt_lvm_pool_from_volume(db_volume, @logger) - phys_libvirt_pool.connect(@session, node) + phys_libvirt_pool.connect(@qmfc, node) @logger.info "connected to lvm pool.." end begin libvirt_pool = LibvirtPool.factory(db_pool, @logger) - libvirt_pool.connect(@session, node) + libvirt_pool.connect(@qmfc, node) begin - volume = @session.object(:class => 'volume', + volume = @qmfc.object(:class => 'volume', 'storagePool' => libvirt_pool.remote_pool.object_id, 'key' => db_volume.key) @logger.error "Unable to find volume to delete" unless volume @@ -861,7 +866,7 @@ class TaskOmatic was_disconnected = false loop do - if not @broker.connected? + if not @connection.connected? @logger.info("Cannot implement tasks, not connected to broker. Sleeping.") sleep(@sleeptime * 3) was_disconnected = true @@ -870,7 +875,7 @@ class TaskOmatic @logger.info("Reconnected, resuming task checking..") if was_disconnected was_disconnected = false - @session.object(:class => 'agent') + @qmfc.object(:class => 'agent') tasks = Array.new begin -- 1.6.2.5 From imain at redhat.com Wed Nov 4 22:23:05 2009 From: imain at redhat.com (Ian Main) Date: Wed, 4 Nov 2009 14:23:05 -0800 Subject: [Ovirt-devel] [PATCH server] Missed this for QMF update. In-Reply-To: <1257364777-19801-1-git-send-email-imain@redhat.com> References: <1257364777-19801-1-git-send-email-imain@redhat.com> Message-ID: <1257373385-2957-1-git-send-email-imain@redhat.com> Somehow I missed some changes for host-register. These are required for the new API. Signed-off-by: Ian Main --- src/host-browser/host-register.rb | 48 ++++++++++++------------------------ 1 files changed, 16 insertions(+), 32 deletions(-) diff --git a/src/host-browser/host-register.rb b/src/host-browser/host-register.rb index e57b077..e49f08e 100755 --- a/src/host-browser/host-register.rb +++ b/src/host-browser/host-register.rb @@ -134,24 +134,19 @@ class HostRegister < Qmf::ConsoleHandler def agent_heartbeat(agent, timestamp) return if agent == nil synchronize do - bank_key = "#{agent.agent_bank}.#{agent.broker_bank}" - @heartbeats[bank_key] = [agent, timestamp] + @heartbeats[agent.key] = [agent, timestamp] end end def agent_added(agent) - agent_bank = agent.agent_bank - broker_bank = agent.broker_bank - key = "#{agent_bank}.#{broker_bank}" - puts "AGENT ADDED: #{key}" - debugputs "Agent #{agent_bank}.#{broker_bank} connected!" - agent_connected(agent_bank, broker_bank) + debugputs "Agent #{agent.key}.connected!" + agent_connected(agent) host_list = @qmfc.objects(:package => 'com.redhat.matahari', :class => 'host') puts "host_list length is #{host_list.length}" host_list.each do |host| - if host.object_id.agent_bank == agent_bank + if host.object_id.agent_key == agent.key # Grab the cpus and nics associated before we take any locks cpu_info = @qmfc.objects(:package => 'com.redhat.matahari', :class => 'cpu', 'host' => host.object_id) nic_info = @qmfc.objects(:package => 'com.redhat.matahari', :class => 'nic', 'host' => host.object_id) @@ -163,13 +158,9 @@ class HostRegister < Qmf::ConsoleHandler end def agent_deleted(agent) - agent_bank = agent.agent_bank - broker_bank = agent.broker_bank - key = "#{agent_bank}.#{broker_bank}" - - debugputs "Agent #{key} disconnected!" - @heartbeats.delete(key) - agent_disconnected(agent_bank, broker_bank) + debugputs "Agent #{agent.key} disconnected!" + @heartbeats.delete(agent.key) + agent_disconnected(agent) end def object_update(obj, hasProps, hasStats) @@ -180,17 +171,15 @@ class HostRegister < Qmf::ConsoleHandler # Fix a race where the properties of an object are published by a reconnecting # host (thus marking it active) right before the heartbeat timer considers it dead # (and marks it inactive) - @heartbeats.delete("#{obj.object_id.agent_bank}.#{obj.object_id.broker_bank}") + @heartbeats.delete("#{obj.object_id.agent_key}") end # def object_props ###### Handlers for QMF Callbacks ###### - def agent_disconnected(agent_bank, broker_bank) + def agent_disconnected(agent) synchronize do - debugputs "Marking objects for agent #{broker_bank}.#{agent_bank} inactive" + debugputs "Marking objects for agent #{agent.key} inactive" @cached_hosts.keys.each do |objkey| - if @cached_hosts[objkey][:broker_bank] == broker_bank and - @cached_hosts[objkey][:agent_bank] == agent_bank - + if @cached_hosts[objkey][:agent_key] == agent.key cached_host = @cached_hosts[objkey] cached_host[:active] = false @logger.info "Host #{cached_host['hostname']} marked inactive" @@ -199,13 +188,11 @@ class HostRegister < Qmf::ConsoleHandler end # synchronize do end - def agent_connected(agent_bank, broker_bank) + def agent_connected(agent) synchronize do - debugputs "Marking objects for agent #{broker_bank}.#{agent_bank} active" + debugputs "Marking objects for agent #{agent.key} active" @cached_hosts.keys.each do |objkey| - if @cached_hosts[objkey][:broker_bank] == broker_bank and - @cached_hosts[objkey][:agent_bank] == agent_bank - + if @cached_hosts[objkey][:agent_key] == agent.key cached_host = @cached_hosts[objkey] cached_host[:active] = true @logger.info "Host #{cached_host['hostname']} marked active" @@ -331,8 +318,7 @@ class HostRegister < Qmf::ConsoleHandler # By now, we either rekeyed a stale entry or started a new one. # Update the bookkeeping parts of the data. cached_host[:obj_key] = obj.object_id.to_s - cached_host[:broker_bank] = obj.object_id.broker_bank - cached_host[:agent_bank] = obj.object_id.agent_bank + cached_host[:agent_key] = obj.object_id.agent_key end # not already_cache # For now, only cache identity information (leave CPU/NIC/etc. to db only) @@ -464,9 +450,7 @@ class HostRegister < Qmf::ConsoleHandler debugputs "Agent #{key} timed out!" @heartbeats.delete(key) - agent_bank = agent.agent_bank - broker_bank = agent.broker_bank - agent_disconnected(agent_bank, broker_bank) + agent_disconnected(agent) end end -- 1.6.2.5 From imain at redhat.com Wed Nov 4 22:24:22 2009 From: imain at redhat.com (Ian Main) Date: Wed, 4 Nov 2009 14:24:22 -0800 Subject: [Ovirt-devel] [PATCH server] Update server spec to require new QMF. In-Reply-To: <1257364777-19801-1-git-send-email-imain@redhat.com> References: <1257364777-19801-1-git-send-email-imain@redhat.com> Message-ID: <1257373462-3018-1-git-send-email-imain@redhat.com> Update server spec to require new QMF. ruby-qpid package is no longer required. These newer qmf packages are now in the ovirt repo and will be going into fedora updates shortly. Signed-off-by: Ian Main --- ovirt-server.spec.in | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/ovirt-server.spec.in b/ovirt-server.spec.in index ad5ace1..02eb94c 100644 --- a/ovirt-server.spec.in +++ b/ovirt-server.spec.in @@ -41,9 +41,8 @@ Requires: rrdtool-ruby Requires: iscsi-initiator-utils Requires: cyrus-sasl-gssapi Requires: qpidd -Requires: ruby-qpid >= 0.5.776856 Requires: qpidc -Requires: qmf +Requires: qmf >= 0.5.829175-2 Requires: ruby-qmf Requires(post): /sbin/chkconfig Requires(preun): /sbin/chkconfig -- 1.6.2.5 From imain at redhat.com Wed Nov 4 22:28:10 2009 From: imain at redhat.com (Ian Main) Date: Wed, 4 Nov 2009 14:28:10 -0800 Subject: [Ovirt-devel] Re: [PATCH server] Update daemons to use new QMF. In-Reply-To: <1257364777-19801-1-git-send-email-imain@redhat.com> References: <1257364777-19801-1-git-send-email-imain@redhat.com> Message-ID: <20091104142810.4017418f@tp.mains.priv> On Wed, 4 Nov 2009 11:59:37 -0800 Ian Main wrote: > This patch updates dbomatic, taskomatic and host-register to use the > new C++ wrapped ruby QMF bindings. It also fixes a couple of bugs > along the way including the 0 cpu bug for host-register. This is a > compilation of work done by myself and Arjun Roy. > > Signed-off-by: Ian Main If anyone wants to test these, the best way is just to install the new qmf rpms from the ovirt repo, apply patches and rebuild only the server. The node will rebuild on f11/x86_64 only because libvirt-qpid and matahari had to be rebuilt and I afaict there's no way to use koji to do that so it's available on all architectures. The new qmf packages are going into fedora updates shortly so once they are in we can build new packages and all will be good again for the node. Ian From dpierce at redhat.com Thu Nov 5 14:11:11 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Thu, 5 Nov 2009 09:11:11 -0500 Subject: [Ovirt-devel] Rebased patches... Message-ID: <1257430273-25855-1-git-send-email-dpierce@redhat.com> I've rebased these patches to ensure they're up to date. From dpierce at redhat.com Thu Nov 5 14:11:12 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Thu, 5 Nov 2009 09:11:12 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <1257430273-25855-1-git-send-email-dpierce@redhat.com> References: <1257430273-25855-1-git-send-email-dpierce@redhat.com> Message-ID: <1257430273-25855-2-git-send-email-dpierce@redhat.com> Users can now: * Add a new storage pool. * Delete a storage pool. * Start and stop storage pools. * Add a new storage volume. * Delete a storage volume. * List existing storage pools, with details. Signed-off-by: Darryl L. Pierce --- nodeadmin/output.log | 1 + nodeadmin/setup.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 0 deletions(-) create mode 100644 nodeadmin/output.log create mode 100644 nodeadmin/setup.py diff --git a/nodeadmin/output.log b/nodeadmin/output.log new file mode 100644 index 0000000..e8302cb --- /dev/null +++ b/nodeadmin/output.log @@ -0,0 +1 @@ +libvir: Storage error : Storage pool not found: no pool with matching name 'iscsipool' diff --git a/nodeadmin/setup.py b/nodeadmin/setup.py new file mode 100644 index 0000000..9af2752 --- /dev/null +++ b/nodeadmin/setup.py @@ -0,0 +1,46 @@ +# setup.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from setuptools import setup, find_packages + +setup(name = "nodeadmin", + version = "1.0.3", + package_dir = {'nodeadmin': 'nodeadmin'}, + packages = find_packages('.'), + entry_points = { + 'console_scripts': [ + 'nodeadmin = nodeadmin.nodeadmin:NodeAdmin', + 'addvm = nodeadmin.adddomain:AddDomain', + 'startvm = nodeadmin.startdomain:StartDomain', + 'stopvm = nodeadmin.stopdomain:StopDomain', + 'rmvm = nodeadmin.removedomain:RemoveDomain', + 'createuser = nodeadmin.createuser:CreateUser', + 'listvms = nodeadmin.listdomains:ListDomains', + 'definenet = nodeadmin.definenet:DefineNetwork', + 'createnet = nodeadmin.createnetwork:CreateNetwork', + 'destroynet = nodeadmin.destroynetwork:DestroyNetwork', + 'undefinenet = nodeadmin.undefinenetwork:UndefineNetwork', + 'listnets = nodeadmin.listnetworks:ListNetworks', + 'addpool = nodeadmin.addpool:AddStoragePool', + 'rmpool = nodeadmin.removepool:RemoveStoragePool', + 'startpool = nodeadmin.startpool:StartStoragePool', + 'stoppool = nodeadmin.stoppool:StopStoragePool', + 'addvolume = nodeadmin.addvolume:AddStorageVolume', + 'rmvolume = nodeadmin.removevolume:RemoveStorageVolume', + 'listpools = nodeadmin.listpools:ListPools'] + }) -- 1.6.2.5 From dpierce at redhat.com Thu Nov 5 14:11:13 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Thu, 5 Nov 2009 09:11:13 -0500 Subject: [Ovirt-devel] [PATCH 2/2] Refactor domain storage setup to use pool and volume selection screens. In-Reply-To: <1257430273-25855-2-git-send-email-dpierce@redhat.com> References: <1257430273-25855-1-git-send-email-dpierce@redhat.com> <1257430273-25855-2-git-send-email-dpierce@redhat.com> Message-ID: <1257430273-25855-3-git-send-email-dpierce@redhat.com> Now, when the user elects to use managed storage, they're show the list of available storage pools. Then, after selecting one, the user is shown the list of volumes on that pool. These are then used to create the domain. Signed-off-by: Darryl L. Pierce --- Makefile.am | 1 + nodeadmin/adddomain.py | 186 ++++++++++++++++++++++++++------------------ nodeadmin/domainconfig.py | 17 +++- nodeadmin/libvirtworker.py | 34 ++++---- nodeadmin/setup.py | 46 ----------- 5 files changed, 142 insertions(+), 142 deletions(-) delete mode 100644 nodeadmin/setup.py diff --git a/Makefile.am b/Makefile.am index 55ef277..e712d6a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -48,6 +48,7 @@ EXTRA_DIST = \ nodeadmin/netmenu.py \ nodeadmin/nodeadmin.py \ nodeadmin/nodemenu.py \ + nodeadmin/poolconfig.py \ nodeadmin/removedomain.py \ nodeadmin/removepool.py \ nodeadmin/removevolume.py \ diff --git a/nodeadmin/adddomain.py b/nodeadmin/adddomain.py index bb06a62..34aa59c 100755 --- a/nodeadmin/adddomain.py +++ b/nodeadmin/adddomain.py @@ -37,10 +37,11 @@ OS_VARIANT_PAGE = 12 RAM_CPU_PAGE = 13 ENABLE_STORAGE_PAGE = 14 LOCAL_STORAGE_PAGE = 15 -MANAGED_STORAGE_PAGE = 16 -BRIDGE_PAGE = 17 -VIRT_DETAILS_PAGE = 18 -CONFIRM_PAGE = 19 +SELECT_POOL_PAGE = 16 +SELECT_VOLUME_PAGE = 17 +BRIDGE_PAGE = 18 +VIRT_DETAILS_PAGE = 19 +CONFIRM_PAGE = 20 LOCATION="location" KICKSTART="kickstart" @@ -58,24 +59,25 @@ class DomainConfigScreen(ConfigScreen): self.__config.set_virt_type(self.get_libvirt().get_default_virt_type()) def get_elements_for_page(self, screen, page): - if page == VM_DETAILS_PAGE: return self.get_vm_details_page(screen) - elif page == LOCAL_INSTALL_PAGE: return self.get_local_install_page(screen) - elif page == SELECT_CDROM_PAGE: return self.get_select_cdrom_page(screen) - elif page == SELECT_ISO_PAGE: return self.get_select_iso_page(screen) - elif page == NETWORK_INSTALL_PAGE: return self.get_network_install_page(screen) - elif page == OS_TYPE_PAGE: return self.get_os_type_page(screen) - elif page == OS_VARIANT_PAGE: return self.get_os_variant_page(screen) - elif page == RAM_CPU_PAGE: return self.get_ram_and_cpu_page(screen) - elif page == ENABLE_STORAGE_PAGE: return self.get_enable_storage_page(screen) - elif page == LOCAL_STORAGE_PAGE: return self.get_local_storage_page(screen) - elif page == MANAGED_STORAGE_PAGE: return self.get_managed_storage_page(screen) - elif page == BRIDGE_PAGE: return self.get_bridge_page(screen) - elif page == VIRT_DETAILS_PAGE: return self.get_virt_details_page(screen) - elif page == CONFIRM_PAGE: return self.get_confirm_page(screen) + if page is VM_DETAILS_PAGE: return self.get_vm_details_page(screen) + elif page is LOCAL_INSTALL_PAGE: return self.get_local_install_page(screen) + elif page is SELECT_CDROM_PAGE: return self.get_select_cdrom_page(screen) + elif page is SELECT_ISO_PAGE: return self.get_select_iso_page(screen) + elif page is NETWORK_INSTALL_PAGE: return self.get_network_install_page(screen) + elif page is OS_TYPE_PAGE: return self.get_os_type_page(screen) + elif page is OS_VARIANT_PAGE: return self.get_os_variant_page(screen) + elif page is RAM_CPU_PAGE: return self.get_ram_and_cpu_page(screen) + elif page is ENABLE_STORAGE_PAGE: return self.get_enable_storage_page(screen) + elif page is LOCAL_STORAGE_PAGE: return self.get_local_storage_page(screen) + elif page is SELECT_POOL_PAGE: return self.get_select_pool_page(screen) + elif page is SELECT_VOLUME_PAGE: return self.get_select_volume_page(screen) + elif page is BRIDGE_PAGE: return self.get_bridge_page(screen) + elif page is VIRT_DETAILS_PAGE: return self.get_virt_details_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) return [] def validate_input(self, page, errors): - if page == VM_DETAILS_PAGE: + if page is VM_DETAILS_PAGE: if len(self.__guest_name.value()) > 0: if self.get_libvirt().domain_exists(self.__guest_name.value()): errors.append("Guest name '%s' is already in use." % self.__guest_name.value()) @@ -83,12 +85,12 @@ class DomainConfigScreen(ConfigScreen): return True else: errors.append("Guest name must be a string between 0 and 50 characters.") - elif page == LOCAL_INSTALL_PAGE: + elif page is LOCAL_INSTALL_PAGE: if self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_CDROM: return True elif self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_ISO: return True - elif page == SELECT_CDROM_PAGE: + elif page is SELECT_CDROM_PAGE: if self.__install_media.getSelection() != None: if len(self.get_hal().list_installable_volumes()) == 0: errors.append("No installable media is available.") @@ -96,7 +98,7 @@ class DomainConfigScreen(ConfigScreen): return True else: errors.append("You must select an install media.") - elif page == SELECT_ISO_PAGE: + elif page is SELECT_ISO_PAGE: if len(self.__iso_path.value()) > 0: if os.path.exists(self.__iso_path.value()): if os.path.isfile(self.__iso_path.value()): @@ -108,14 +110,14 @@ class DomainConfigScreen(ConfigScreen): errors.append(self.__iso_path.value()) else: errors.append("An install media selection is required.") - elif page == NETWORK_INSTALL_PAGE: + elif page is NETWORK_INSTALL_PAGE: if len(self.__install_url.value()) > 0: return True else: errors.append("An install tree is required.") - elif page == OS_TYPE_PAGE: return True - elif page == OS_VARIANT_PAGE: return True - elif page == RAM_CPU_PAGE: + elif page is OS_TYPE_PAGE: return True + elif page is OS_VARIANT_PAGE: return True + elif page is RAM_CPU_PAGE: if (len(self.__memory.value()) > 0 and len(self.__cpus.value()) > 0) \ and (int(self.__memory.value()) > 0 and int(self.__cpus.value()) > 0): return True @@ -128,8 +130,8 @@ class DomainConfigScreen(ConfigScreen): errors.append("A value must be entered for CPUs.") elif int(self.__cpus.value()) <= 0: errors.append("A positive integer value must be entered for memory.") - elif page == ENABLE_STORAGE_PAGE: return True - elif page == LOCAL_STORAGE_PAGE: + elif page is ENABLE_STORAGE_PAGE: return True + elif page is LOCAL_STORAGE_PAGE: if len(self.__storage_size.value()) > 0: if float(self.__storage_size.value()) > 0: return True @@ -137,12 +139,17 @@ class DomainConfigScreen(ConfigScreen): errors.append("A positive value must be entered for the storage size.") else: errors.append("A value must be entered for the storage size.") - elif page == MANAGED_STORAGE_PAGE: - if self.__existing_storage.getSelection() is not None: + elif page is SELECT_POOL_PAGE: + if self.__storage_pool.getSelection() is not None: + return True + else: + errors.append("Please select a storage pool.") + elif page is SELECT_VOLUME_PAGE: + if self.__storage_volume.getSelection() is not None: return True else: errors.append("Please select a storage volume.") - elif page == BRIDGE_PAGE: + elif page is BRIDGE_PAGE: if self.__network_bridges.getSelection() != None: if len(self.__mac_address.value()) > 0: # TODO: regex check the format @@ -151,62 +158,66 @@ class DomainConfigScreen(ConfigScreen): errors.append("MAC address must be supplied.") else: errors.append("A network bridge must be selected.") - elif page == VIRT_DETAILS_PAGE: + elif page is VIRT_DETAILS_PAGE: if self.__virt_types.getSelection() != None and self.__architectures.getSelection() != None: return True if self.__virt_types.getSelection() is None: errors.append("Please select a virtualization type.") if self.__architectures.getSelection() is None: errors.append("Please selection an architecture.") - elif page == CONFIRM_PAGE: return True + elif page is CONFIRM_PAGE: return True return False def process_input(self, page): - if page == VM_DETAILS_PAGE: + if page is VM_DETAILS_PAGE: self.__config.set_guest_name(self.__guest_name.value()) self.__config.set_install_type(self.__install_type.getSelection()) - elif page == LOCAL_INSTALL_PAGE: + elif page is LOCAL_INSTALL_PAGE: self.__config.set_use_cdrom_source(self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_CDROM) - elif page == SELECT_CDROM_PAGE: + elif page is SELECT_CDROM_PAGE: self.__config.set_install_media(self.__install_media.getSelection()) - elif page == SELECT_ISO_PAGE: + elif page is SELECT_ISO_PAGE: self.__config.set_iso_path(self.__iso_path.value()) - elif page == NETWORK_INSTALL_PAGE: + elif page is NETWORK_INSTALL_PAGE: self.__config.set_install_url(self.__install_url.value()) self.__config.set_kickstart_url(self.__kickstart_url.value()) self.__config.set_kernel_options(self.__kernel_options.value()) - elif page == OS_TYPE_PAGE: + elif page is OS_TYPE_PAGE: self.__config.set_os_type(self.__os_types.getSelection()) - elif page == OS_VARIANT_PAGE: + elif page is OS_VARIANT_PAGE: self.__config.set_os_variant(self.__os_variants.getSelection()) - elif page == RAM_CPU_PAGE: + elif page is RAM_CPU_PAGE: self.__config.set_memory(int(self.__memory.value())) self.__config.set_cpus(int(self.__cpus.value())) - elif page == ENABLE_STORAGE_PAGE: + elif page is ENABLE_STORAGE_PAGE: self.__config.set_enable_storage(self.__enable_storage.value()) if self.__storage_type.getSelection() == DomainConfig.NEW_STORAGE: self.__config.set_use_local_storage(True) elif self.__storage_type.getSelection() == DomainConfig.EXISTING_STORAGE: self.__config.set_use_local_storage(False) - elif page == LOCAL_STORAGE_PAGE: + elif page is LOCAL_STORAGE_PAGE: self.__config.set_storage_size(float(self.__storage_size.value())) self.__config.set_allocate_storage(self.__allocate_storage.value()) - elif page == MANAGED_STORAGE_PAGE: + elif page is SELECT_POOL_PAGE: self.__config.set_use_local_storage(False) - self.__config.set_existing_storage(self.__existing_storage.getSelection()) - self.__config.set_storage_size(self.get_libvirt().get_storage_size(self.__existing_storage.getSelection())) - elif page == BRIDGE_PAGE: + self.__config.set_storage_pool(self.__storage_pool.getSelection()) + elif page is SELECT_VOLUME_PAGE: + self.__config.set_storage_volume(self.__storage_volume.getSelection()) + volume = self.get_libvirt().get_storage_volume(self.__config.get_storage_pool(), + self.__config.get_storage_volume()) + self.__config.set_storage_size(volume.info()[1] / 1024.0 ** 3) + elif page is BRIDGE_PAGE: self.__config.set_network_bridge(self.__network_bridges.getSelection()) - elif page == VIRT_DETAILS_PAGE: + elif page is VIRT_DETAILS_PAGE: self.__config.set_virt_type(self.__virt_types.getSelection()) self.__config.set_architecture(self.__architectures.getSelection()) - elif page == CONFIRM_PAGE: + elif page is CONFIRM_PAGE: self.get_libvirt().define_domain(self.__config, CreateMeter()) self.set_finished() def get_back_page(self, page): result = page - if page == OS_TYPE_PAGE: + if page is OS_TYPE_PAGE: install_type = self.__config.get_install_type() if install_type == DomainConfig.LOCAL_INSTALL: if self.__config.get_use_cdrom_source(): @@ -217,24 +228,26 @@ class DomainConfigScreen(ConfigScreen): result = NETWORK_INSTALL_PAGE elif install_type == DomainConfig.PXE_INSTALL: result = VM_DETAILS_PAGE - elif page == LOCAL_STORAGE_PAGE or page == MANAGED_STORAGE_PAGE: + elif page is LOCAL_STORAGE_PAGE or page is SELECT_VOLUME_PAGE: result = ENABLE_STORAGE_PAGE - elif page == NETWORK_INSTALL_PAGE: + elif page is SELECT_POOL_PAGE: + result = ENABLE_STORAGE_PAGE + elif page is NETWORK_INSTALL_PAGE: result = VM_DETAILS_PAGE - elif page == SELECT_CDROM_PAGE or page == SELECT_ISO_PAGE: + elif page is SELECT_CDROM_PAGE or page is SELECT_ISO_PAGE: result = LOCAL_INSTALL_PAGE - elif page == BRIDGE_PAGE: + elif page is BRIDGE_PAGE: if self.__config.get_use_local_storage(): result = LOCAL_STORAGE_PAGE else: - result = MANAGED_STORAGE_PAGE + result = SELECT_VOLUME_PAGE else: if page > 1: result = page - 1 return result def get_next_page(self, page): result = page - if page == VM_DETAILS_PAGE: + if page is VM_DETAILS_PAGE: install_type = self.__config.get_install_type() if install_type == DomainConfig.LOCAL_INSTALL: result = LOCAL_INSTALL_PAGE @@ -242,34 +255,36 @@ class DomainConfigScreen(ConfigScreen): result = NETWORK_INSTALL_PAGE elif install_type == DomainConfig.PXE_INSTALL: result = OS_TYPE_PAGE - elif page == LOCAL_INSTALL_PAGE: + elif page is LOCAL_INSTALL_PAGE: if self.__config.get_use_cdrom_source(): result = SELECT_CDROM_PAGE else: result = SELECT_ISO_PAGE - elif page == SELECT_CDROM_PAGE or page == SELECT_ISO_PAGE: + elif page is SELECT_CDROM_PAGE or page is SELECT_ISO_PAGE: result = OS_TYPE_PAGE - elif page == NETWORK_INSTALL_PAGE: + elif page is NETWORK_INSTALL_PAGE: result = OS_TYPE_PAGE - elif page == ENABLE_STORAGE_PAGE: + elif page is ENABLE_STORAGE_PAGE: result = BRIDGE_PAGE if self.__config.get_enable_storage(): if self.__config.get_use_local_storage(): result = LOCAL_STORAGE_PAGE else: - result = MANAGED_STORAGE_PAGE - elif page == LOCAL_STORAGE_PAGE or page == MANAGED_STORAGE_PAGE: + result = SELECT_POOL_PAGE + elif page is LOCAL_STORAGE_PAGE: result = BRIDGE_PAGE else: result = page + 1 return result def page_has_finish(self, page): - if page == CONFIRM_PAGE: return True + if page is CONFIRM_PAGE: return True return False def page_has_next(self, page): - if page < CONFIRM_PAGE: + if page is SELECT_POOL_PAGE: return self.__has_pools + elif page is SELECT_VOLUME_PAGE: return self.__has_volumes + elif page < CONFIRM_PAGE: return True def get_vm_details_page(self, screen): @@ -393,17 +408,36 @@ class DomainConfigScreen(ConfigScreen): return [Label("Configure local storage"), grid] - def get_managed_storage_page(self, screen): + def get_select_pool_page(self, screen): + pools = [] + for pool in self.get_libvirt().list_storage_pools(): + pools.append([pool, pool, pool == self.__config.get_storage_pool()]) + if len(pools) > 0: + self.__storage_pool = RadioBar(screen, (pools)) + grid = Grid(2, 1) + grid.setField(Label("Storage pool:"), 0, 0, anchorTop = 1) + grid.setField(self.__storage_pool, 1, 0) + self.__has_pools = True + else: + grid = Label("There are no storage pools available.") + self.__has_pools = False + return [Label("Configure Managed Storage: Select A Pool"), + grid] + + def get_select_volume_page(self, screen): volumes = [] - for volume in self.get_libvirt().list_storage_volumes(): - volumes.append(["%s (%d GB)" % (volume.name(), volume.info()[1] / (1024 ** 3)), - volume.name(), - self.__config.is_existing_storage(volume.name())]) - self.__existing_storage = RadioBar(screen, (volumes)) - grid = Grid(2, 1) - grid.setField(Label("Existing storage:"), 0, 0) - grid.setField(self.__existing_storage, 1, 0) - return [Label("Configure managed storage"), + for volume in self.get_libvirt().list_storage_volumes(self.__config.get_storage_pool()): + volumes.append([volume, volume, volume == self.__config.get_storage_volume()]) + if len(volumes) > 0: + self.__storage_volume = RadioBar(screen, (volumes)) + grid = Grid(2, 1) + grid.setField(Label("Storage volumes:"), 0, 0, anchorTop = 1) + grid.setField(self.__storage_volume, 1, 0) + self.__has_volumes = True + else: + grid = Label("This storage pool has no defined volumes.") + self.__has_volumes = False + return [Label("Configure Managed Storage: Select A Volume"), grid] def get_bridge_page(self, screen): @@ -448,7 +482,9 @@ class DomainConfigScreen(ConfigScreen): grid.setField(Label("CPUs:"), 0, 3, anchorRight = 1) grid.setField(Label("%d" % self.__config.get_cpus()), 1, 3, anchorLeft = 1) grid.setField(Label("Storage:"), 0, 4, anchorRight = 1) - grid.setField(Label(self.__config.get_existing_storage()), 1, 4, anchorLeft = 1) + grid.setField(Label("%s (on %s)" % (self.__config.get_storage_volume(), + self.__config.get_storage_pool())), + 1, 4, anchorLeft = 1) grid.setField(Label("Network:"), 0, 5, anchorRight = 1) grid.setField(Label(self.__config.get_network_bridge()), 1, 5, anchorLeft = 1) return [Label("Ready to begin installation of %s" % self.__config.get_guest_name()), diff --git a/nodeadmin/domainconfig.py b/nodeadmin/domainconfig.py index ef39fe0..4466e67 100644 --- a/nodeadmin/domainconfig.py +++ b/nodeadmin/domainconfig.py @@ -50,7 +50,8 @@ class DomainConfig: self.__use_local_storage = True self.__storage_size = 8.0 self.__allocate_storage = True - self.__existing_storage = "" + self.__storage_pool = "" + self.__storage_volume = "" self.__network_bridge = None self.__mac_address = None self.__virt_type = None @@ -177,11 +178,17 @@ class DomainConfig: def get_allocate_storage(self): return self.__allocate_storage - def set_existing_storage(self, storage): - self.__existing_storage = storage + def set_storage_pool(self, pool): + self.__storage_pool = pool - def get_existing_storage(self): - return self.__existing_storage + def get_storage_pool(self): + return self.__storage_pool + + def set_storage_volume(self, volume): + self.__storage_volume = volume + + def get_storage_volume(self): + return self.__storage_volume def is_existing_storage(self, storage): return self.__existing_storage == storage diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py index b2acabe..f31266c 100644 --- a/nodeadmin/libvirtworker.py +++ b/nodeadmin/libvirtworker.py @@ -196,6 +196,11 @@ class LibvirtWorker: '''Returns the storage pool with the specified name.''' return self.__conn.storagePoolLookupByName(name) + def list_storage_volumes(self, poolname): + '''Returns the list of all defined storage volumes for a given pool.''' + pool = self.get_storage_pool(poolname) + return pool.listVolumes() + def define_storage_volume(self, config, meter): '''Defines a new storage volume.''' self.create_storage_pool(config.get_pool().name()) @@ -204,10 +209,15 @@ class LibvirtWorker: def remove_storage_volume(self, poolname, volumename): '''Removes the specified storage volume.''' - pool = self.get_storage_pool(poolname) - volume = pool.storageVolLookupByName(volumename) + volume = self.get_storage_volume(poolname, volumename) volume.delete(0) + def get_storage_volume(self, poolname, volumename): + '''Returns a reference to the specified storage volume.''' + pool =self.get_storage_pool(poolname) + volume = pool.storageVolLookupByName(volumename) + return volume + def list_bridges(self): '''Lists all defined and active bridges.''' bridges = self.__conn.listNetworks() @@ -221,21 +231,9 @@ class LibvirtWorker: def generate_mac_address(self): return self.__net.macaddr - def list_storage_volumes(self): - '''Lists all defined storage volumes.''' - pools = self.__conn.listStoragePools() - pools.extend(self.__conn.listDefinedStoragePools()) - result = [] - for name in pools: - pool = self.__conn.storagePoolLookupByName(name) - for volname in pool.listVolumes(): - volume = self.__conn.storageVolLookupByPath("/var/lib/libvirt/images/%s" % volname) - result.append(volume) - return result - - def get_storage_size(self, name): + def get_storage_size(self, poolname, volumename): '''Returns the size of the specified storage volume.''' - volume = self.__conn.storageVolLookupByPath("/var/lib/libvirt/images/%s" % name) + volume = self.get_storage_volume(poolname, volumename) return volume.info()[1] / (1024.0 ** 3) def get_virt_types(self): @@ -381,6 +379,10 @@ class LibvirtWorker: pool_object = pool, suffix = ".img") path = os.path.join(DEFAULT_POOL_TARGET_PATH, path) + else: + volume = self.get_storage_volume(config.get_storage_pool(), + config.get_storage_volume()) + path = volume.path() if path is not None: storage= virtinst.VirtualDisk(conn = self.__conn, diff --git a/nodeadmin/setup.py b/nodeadmin/setup.py deleted file mode 100644 index 9af2752..0000000 --- a/nodeadmin/setup.py +++ /dev/null @@ -1,46 +0,0 @@ -# setup.py - Copyright (C) 2009 Red Hat, Inc. -# Written by Darryl L. Pierce -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, -# MA 02110-1301, USA. A copy of the GNU General Public License is -# also available at http://www.gnu.org/copyleft/gpl.html. - -from setuptools import setup, find_packages - -setup(name = "nodeadmin", - version = "1.0.3", - package_dir = {'nodeadmin': 'nodeadmin'}, - packages = find_packages('.'), - entry_points = { - 'console_scripts': [ - 'nodeadmin = nodeadmin.nodeadmin:NodeAdmin', - 'addvm = nodeadmin.adddomain:AddDomain', - 'startvm = nodeadmin.startdomain:StartDomain', - 'stopvm = nodeadmin.stopdomain:StopDomain', - 'rmvm = nodeadmin.removedomain:RemoveDomain', - 'createuser = nodeadmin.createuser:CreateUser', - 'listvms = nodeadmin.listdomains:ListDomains', - 'definenet = nodeadmin.definenet:DefineNetwork', - 'createnet = nodeadmin.createnetwork:CreateNetwork', - 'destroynet = nodeadmin.destroynetwork:DestroyNetwork', - 'undefinenet = nodeadmin.undefinenetwork:UndefineNetwork', - 'listnets = nodeadmin.listnetworks:ListNetworks', - 'addpool = nodeadmin.addpool:AddStoragePool', - 'rmpool = nodeadmin.removepool:RemoveStoragePool', - 'startpool = nodeadmin.startpool:StartStoragePool', - 'stoppool = nodeadmin.stoppool:StopStoragePool', - 'addvolume = nodeadmin.addvolume:AddStorageVolume', - 'rmvolume = nodeadmin.removevolume:RemoveStorageVolume', - 'listpools = nodeadmin.listpools:ListPools'] - }) -- 1.6.2.5 From dpierce at redhat.com Thu Nov 5 14:11:59 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Thu, 5 Nov 2009 09:11:59 -0500 Subject: [Ovirt-devel] [PATCH 2/2] Refactor domain storage setup to use pool and volume selection screens. In-Reply-To: <4AF18D0B.6000603@redhat.com> References: <1256754353-18515-1-git-send-email-dpierce@redhat.com> <1256754353-18515-2-git-send-email-dpierce@redhat.com> <1256754353-18515-3-git-send-email-dpierce@redhat.com> <4AF18D0B.6000603@redhat.com> Message-ID: <20091105141159.GA4743@mcpierce-laptop.rdu.redhat.com> On Wed, Nov 04, 2009 at 09:17:47AM -0500, Joey Boggs wrote: > 1/2 applies fine 2/2 won't > > Guessing it needs to be rebased since the latest commits would put them > out of order I've rebased and resent the two patches. #2 is branced from #1 so not sure why it wouldn't apply. But, I'll ping you in IRC to go over it. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From imain at redhat.com Fri Nov 6 22:17:10 2009 From: imain at redhat.com (Ian Main) Date: Fri, 6 Nov 2009 14:17:10 -0800 Subject: [Ovirt-devel] Re: [PATCH server] Update daemons to use new QMF. In-Reply-To: <1257364777-19801-1-git-send-email-imain@redhat.com> References: <1257364777-19801-1-git-send-email-imain@redhat.com> Message-ID: <20091106141710.6efface3@tp.mains.priv> OK, I pushed these patches since no one seems keen on testing it soon. Just requested bodhi updates for all the related packages in f11 as well. Ian From dpierce at redhat.com Mon Nov 9 15:59:07 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 10:59:07 -0500 Subject: [Ovirt-devel] [PATCH node] Add iSCSI initiator setup option In-Reply-To: <1256327613-20260-1-git-send-email-jboggs@redhat.com> References: <1256327613-20260-1-git-send-email-jboggs@redhat.com> Message-ID: <20091109155907.GB4428@mcpierce-laptop.rdu.redhat.com> On Fri, Oct 23, 2009 at 03:53:33PM -0400, Joey Boggs wrote: > This add an iscsi initiator name setup option to the main menu in ovirt-config-setup and can also be accessed via ovirt-config-iscsi. > > Also accepts AUTO setup by running ovirt-config-iscsi AUTO and generates a random name as before > > --- ACK. The iscsi-iname tool generates a differently formated name; i.e., it produces something like "iqn.1994-05.com.fedora:e02b3f52c90". But I'm told that those differences aren't that import. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From dpierce at redhat.com Mon Nov 9 16:41:03 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 11:41:03 -0500 Subject: [Ovirt-devel] [PATCH node] validify ipv4/ipv6 static/dhcp choice else loop In-Reply-To: <1255110680-19004-1-git-send-email-jboggs@redhat.com> References: <1255110680-19004-1-git-send-email-jboggs@redhat.com> Message-ID: <20091109164103.GC4428@mcpierce-laptop.rdu.redhat.com> On Fri, Oct 09, 2009 at 01:51:20PM -0400, Joey Boggs wrote: > If you select an option that's not listed during ipv4/ipv6 setup it will accept it without verification. This corrects that behavior and forces a valid option to be picked. > --- > scripts/ovirt-config-networking | 106 +++++++++++++++++++++------------------ > 1 files changed, 58 insertions(+), 48 deletions(-) > > diff --git a/scripts/ovirt-config-networking b/scripts/ovirt-config-networking > index 7d4e363..45f7129 100755 > --- a/scripts/ovirt-config-networking > +++ b/scripts/ovirt-config-networking > @@ -160,56 +160,66 @@ function configure_interface > return;; > esac > > - read -ep "Enable IPv4 support ([S]tatic IP, [D]HCP, [N]o or [A]bort)? " > - case $REPLY in > - D|d) > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO dhcp" > - ;; > - S|s) > - printf "\n" > - read -ep "IP Address: "; IPADDR=$REPLY > - read -ep " Netmask: "; NETMASK=$REPLY > - read -ep " Gateway: "; GATEWAY=$REPLY > - > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/NETMASK $NETMASK" > - if [ -n "${GATEWAY}" ]; then > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/GATEWAY $GATEWAY" > - fi > - ;; > - A|a) > - CONFIGURED_NIC="" > - VLAN_ID="" > - return > - ;; > - esac > + while true; do > + read -ep "Enable IPv4 support ([S]tatic IP, [D]HCP, [N]o or [A]bort)? " > + case $REPLY in > + D|d) > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO dhcp" > + break > + ;; > + S|s) > + printf "\n" > + read -ep "IP Address: "; IPADDR=$REPLY > + read -ep " Netmask: "; NETMASK=$REPLY > + read -ep " Gateway: "; GATEWAY=$REPLY > + > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/NETMASK $NETMASK" > + if [ -n "${GATEWAY}" ]; then > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/GATEWAY $GATEWAY" > + fi > + break > + ;; > + A|a) > + CONFIGURED_NIC="" > + VLAN_ID="" > + return > + ;; > + esac > + done > > printf "\n" > - read -ep "Enable IPv6 support ([S]tatic, [D]HCPv6, A[u]to, [N]o or [A]bort)? " > - case $REPLY in > - S|s) > - read -ep "IPv6 Address: "; IPADDR=$REPLY > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IP6ADDR $IPADDR" > - ;; > - D|d) > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTCONF no" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/DHCPV6C yes" > - ;; > - U|u) > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" > - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTOCONF yes" > - ;; > - A|a) > - CONFIGURED_NIC="" > - VLAN_ID="" > - return > - ;; > - esac > + > + while true; do > + read -ep "Enable IPv6 support ([S]tatic, [D]HCPv6, A[u]to, [N]o or [A]bort)? " > + case $REPLY in > + S|s) > + read -ep "IPv6 Address: "; IPADDR=$REPLY > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IP6ADDR $IPADDR" > + break > + ;; > + D|d) > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTCONF no" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/DHCPV6C yes" > + break > + ;; > + U|u) > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" > + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTOCONF yes" > + break > + ;; > + A|a) > + CONFIGURED_NIC="" > + VLAN_ID="" > + return > + ;; > + esac > + done NAK. This portion is missing support for [N]o on IPv6 support. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From dpierce at redhat.com Mon Nov 9 16:50:59 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 11:50:59 -0500 Subject: [Ovirt-devel] [PATCH node] add ability to select separate disks for Root and HostVG in o-c-storage In-Reply-To: <1257295041-32027-1-git-send-email-jboggs@redhat.com> References: <1257295041-32027-1-git-send-email-jboggs@redhat.com> Message-ID: <20091109165059.GA21222@mcpierce-laptop.rdu.redhat.com> On Tue, Nov 03, 2009 at 07:37:21PM -0500, Joey Boggs wrote: > This adds the ability to select 2 different disks for root and HostVG. ovirt_init kernel arg still works as intended but does not support multiple arguments, this will be added in next > --- Can you rebase this on upstream and resend? -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From jboggs at redhat.com Mon Nov 9 17:43:02 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 09 Nov 2009 12:43:02 -0500 Subject: [Ovirt-devel] [PATCH node] validify ipv4/ipv6 static/dhcp choice else loop In-Reply-To: <20091109164103.GC4428@mcpierce-laptop.rdu.redhat.com> References: <1255110680-19004-1-git-send-email-jboggs@redhat.com> <20091109164103.GC4428@mcpierce-laptop.rdu.redhat.com> Message-ID: <4AF854A6.9070800@redhat.com> Darryl L. Pierce wrote: > On Fri, Oct 09, 2009 at 01:51:20PM -0400, Joey Boggs wrote: > >> If you select an option that's not listed during ipv4/ipv6 setup it will accept it without verification. This corrects that behavior and forces a valid option to be picked. >> --- >> scripts/ovirt-config-networking | 106 +++++++++++++++++++++------------------ >> 1 files changed, 58 insertions(+), 48 deletions(-) >> >> diff --git a/scripts/ovirt-config-networking b/scripts/ovirt-config-networking >> index 7d4e363..45f7129 100755 >> --- a/scripts/ovirt-config-networking >> +++ b/scripts/ovirt-config-networking >> @@ -160,56 +160,66 @@ function configure_interface >> return;; >> esac >> >> - read -ep "Enable IPv4 support ([S]tatic IP, [D]HCP, [N]o or [A]bort)? " >> - case $REPLY in >> - D|d) >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO dhcp" >> - ;; >> - S|s) >> - printf "\n" >> - read -ep "IP Address: "; IPADDR=$REPLY >> - read -ep " Netmask: "; NETMASK=$REPLY >> - read -ep " Gateway: "; GATEWAY=$REPLY >> - >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/NETMASK $NETMASK" >> - if [ -n "${GATEWAY}" ]; then >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/GATEWAY $GATEWAY" >> - fi >> - ;; >> - A|a) >> - CONFIGURED_NIC="" >> - VLAN_ID="" >> - return >> - ;; >> - esac >> + while true; do >> + read -ep "Enable IPv4 support ([S]tatic IP, [D]HCP, [N]o or [A]bort)? " >> + case $REPLY in >> + D|d) >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO dhcp" >> + break >> + ;; >> + S|s) >> + printf "\n" >> + read -ep "IP Address: "; IPADDR=$REPLY >> + read -ep " Netmask: "; NETMASK=$REPLY >> + read -ep " Gateway: "; GATEWAY=$REPLY >> + >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/NETMASK $NETMASK" >> + if [ -n "${GATEWAY}" ]; then >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/GATEWAY $GATEWAY" >> + fi >> + break >> + ;; >> + A|a) >> + CONFIGURED_NIC="" >> + VLAN_ID="" >> + return >> + ;; >> + esac >> + done >> >> printf "\n" >> - read -ep "Enable IPv6 support ([S]tatic, [D]HCPv6, A[u]to, [N]o or [A]bort)? " >> - case $REPLY in >> - S|s) >> - read -ep "IPv6 Address: "; IPADDR=$REPLY >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IP6ADDR $IPADDR" >> - ;; >> - D|d) >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTCONF no" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/DHCPV6C yes" >> - ;; >> - U|u) >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" >> - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTOCONF yes" >> - ;; >> - A|a) >> - CONFIGURED_NIC="" >> - VLAN_ID="" >> - return >> - ;; >> - esac >> + >> + while true; do >> + read -ep "Enable IPv6 support ([S]tatic, [D]HCPv6, A[u]to, [N]o or [A]bort)? " >> + case $REPLY in >> + S|s) >> + read -ep "IPv6 Address: "; IPADDR=$REPLY >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IP6ADDR $IPADDR" >> + break >> + ;; >> + D|d) >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTCONF no" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/DHCPV6C yes" >> + break >> + ;; >> + U|u) >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" >> + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTOCONF yes" >> + break >> + ;; >> + A|a) >> + CONFIGURED_NIC="" >> + VLAN_ID="" >> + return >> + ;; >> + esac >> + done >> > > NAK. > > This portion is missing support for [N]o on IPv6 support. > > interesting, it was never there to begin with :/ I'll add it From jboggs at redhat.com Mon Nov 9 17:50:10 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 09 Nov 2009 12:50:10 -0500 Subject: [Ovirt-devel] [PATCH node] Add iSCSI initiator setup option In-Reply-To: <20091109155907.GB4428@mcpierce-laptop.rdu.redhat.com> References: <1256327613-20260-1-git-send-email-jboggs@redhat.com> <20091109155907.GB4428@mcpierce-laptop.rdu.redhat.com> Message-ID: <4AF85652.1040604@redhat.com> Darryl L. Pierce wrote: > On Fri, Oct 23, 2009 at 03:53:33PM -0400, Joey Boggs wrote: > >> This add an iscsi initiator name setup option to the main menu in ovirt-config-setup and can also be accessed via ovirt-config-iscsi. >> >> Also accepts AUTO setup by running ovirt-config-iscsi AUTO and generates a random name as before >> >> --- >> > > ACK. The iscsi-iname tool generates a differently formated name; i.e., > it produces something like "iqn.1994-05.com.fedora:e02b3f52c90". But I'm > told that those differences aren't that import. > > pushed From dpierce at redhat.com Mon Nov 9 18:03:52 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 13:03:52 -0500 Subject: [Ovirt-devel] [PATCH node] validify ipv4/ipv6 static/dhcp choice else loop In-Reply-To: <4AF854A6.9070800@redhat.com> References: <1255110680-19004-1-git-send-email-jboggs@redhat.com> <20091109164103.GC4428@mcpierce-laptop.rdu.redhat.com> <4AF854A6.9070800@redhat.com> Message-ID: <20091109180352.GB21222@mcpierce-laptop.rdu.redhat.com> On Mon, Nov 09, 2009 at 12:43:02PM -0500, Joey Boggs wrote: >> NAK. >> >> This portion is missing support for [N]o on IPv6 support. >> > interesting, it was never there to begin with :/ I'll add it Hrm. It used to have it. At least, whenever I go through networking support I always hit "N" on IPv6 to bypass it in most of my tests. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From jboggs at redhat.com Mon Nov 9 18:15:09 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 09 Nov 2009 13:15:09 -0500 Subject: [Ovirt-devel] [PATCH node] validify ipv4/ipv6 static/dhcp choice else loop In-Reply-To: <20091109180352.GB21222@mcpierce-laptop.rdu.redhat.com> References: <1255110680-19004-1-git-send-email-jboggs@redhat.com> <20091109164103.GC4428@mcpierce-laptop.rdu.redhat.com> <4AF854A6.9070800@redhat.com> <20091109180352.GB21222@mcpierce-laptop.rdu.redhat.com> Message-ID: <4AF85C2D.6090702@redhat.com> Darryl L. Pierce wrote: > On Mon, Nov 09, 2009 at 12:43:02PM -0500, Joey Boggs wrote: > >>> NAK. >>> >>> This portion is missing support for [N]o on IPv6 support. >>> >>> >> interesting, it was never there to begin with :/ I'll add it >> > > Hrm. It used to have it. At least, whenever I go through networking > support I always hit "N" on IPv6 to bypass it in most of my tests. > > figured it out, it doesn't have a catch all and "N" did nothing before so the while true loop kills it now, I'll fix it From jboggs at redhat.com Mon Nov 9 18:27:08 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 9 Nov 2009 13:27:08 -0500 Subject: [Ovirt-devel] [PATCH node] RESEND validify ipv4/ipv6 static/dhcp choice else loop Message-ID: <1257791228-28572-1-git-send-email-jboggs@redhat.com> Rebased on upstream - and corrected "N" choice behavior --- scripts/ovirt-config-networking | 112 ++++++++++++++++++++++----------------- 1 files changed, 64 insertions(+), 48 deletions(-) diff --git a/scripts/ovirt-config-networking b/scripts/ovirt-config-networking index 7d4e363..8e47830 100755 --- a/scripts/ovirt-config-networking +++ b/scripts/ovirt-config-networking @@ -160,56 +160,72 @@ function configure_interface return;; esac - read -ep "Enable IPv4 support ([S]tatic IP, [D]HCP, [N]o or [A]bort)? " - case $REPLY in - D|d) - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO dhcp" - ;; - S|s) - printf "\n" - read -ep "IP Address: "; IPADDR=$REPLY - read -ep " Netmask: "; NETMASK=$REPLY - read -ep " Gateway: "; GATEWAY=$REPLY - - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/NETMASK $NETMASK" - if [ -n "${GATEWAY}" ]; then - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/GATEWAY $GATEWAY" - fi - ;; - A|a) - CONFIGURED_NIC="" - VLAN_ID="" - return - ;; - esac + while true; do + read -ep "Enable IPv4 support ([S]tatic IP, [D]HCP, [N]o or [A]bort)? " + case $REPLY in + D|d) + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO dhcp" + break + ;; + S|s) + printf "\n" + read -ep "IP Address: "; IPADDR=$REPLY + read -ep " Netmask: "; NETMASK=$REPLY + read -ep " Gateway: "; GATEWAY=$REPLY + + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/NETMASK $NETMASK" + if [ -n "${GATEWAY}" ]; then + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/GATEWAY $GATEWAY" + fi + break + ;; + A|a) + CONFIGURED_NIC="" + VLAN_ID="" + return + ;; + N|n) + break + ;; + esac + done printf "\n" - read -ep "Enable IPv6 support ([S]tatic, [D]HCPv6, A[u]to, [N]o or [A]bort)? " - case $REPLY in - S|s) - read -ep "IPv6 Address: "; IPADDR=$REPLY - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IP6ADDR $IPADDR" - ;; - D|d) - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTCONF no" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/DHCPV6C yes" - ;; - U|u) - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" - BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTOCONF yes" - ;; - A|a) - CONFIGURED_NIC="" - VLAN_ID="" - return - ;; - esac + + while true; do + read -ep "Enable IPv6 support ([S]tatic, [D]HCPv6, A[u]to, [N]o or [A]bort)? " + case $REPLY in + S|s) + read -ep "IPv6 Address: "; IPADDR=$REPLY + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IP6ADDR $IPADDR" + break + ;; + D|d) + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTCONF no" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/DHCPV6C yes" + break + ;; + U|u) + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6INIT yes" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6FORWARDING no" + BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPV6AUTOCONF yes" + break + ;; + A|a) + CONFIGURED_NIC="" + VLAN_ID="" + return + ;; + N|n) + break + ;; + esac + done printf "\n" ask_yes_or_no "Is this correct ([Y]es/[N]o/[A]bort)?" true true -- 1.6.2.5 From dpierce at redhat.com Mon Nov 9 18:33:30 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 13:33:30 -0500 Subject: [Ovirt-devel] [PATCH node] validify ipv4/ipv6 static/dhcp choice else loop In-Reply-To: <4AF85C2D.6090702@redhat.com> References: <1255110680-19004-1-git-send-email-jboggs@redhat.com> <20091109164103.GC4428@mcpierce-laptop.rdu.redhat.com> <4AF854A6.9070800@redhat.com> <20091109180352.GB21222@mcpierce-laptop.rdu.redhat.com> <4AF85C2D.6090702@redhat.com> Message-ID: <20091109183329.GC21222@mcpierce-laptop.rdu.redhat.com> On Mon, Nov 09, 2009 at 01:15:09PM -0500, Joey Boggs wrote: >> Hrm. It used to have it. At least, whenever I go through networking >> support I always hit "N" on IPv6 to bypass it in most of my tests. >> >> > figured it out, it doesn't have a catch all and "N" did nothing before > so the while true loop kills it now, I'll fix it Ah, that's right. N was a fall-thru rather than a case that had to be handled. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From dpierce at redhat.com Mon Nov 9 19:20:36 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 14:20:36 -0500 Subject: [Ovirt-devel] Rebased again... Message-ID: <1257794438-10826-1-git-send-email-dpierce@redhat.com> This patch again rebases on upstream and should apply on next as of right now. From dpierce at redhat.com Mon Nov 9 19:20:37 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 14:20:37 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <1257794438-10826-1-git-send-email-dpierce@redhat.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> Message-ID: <1257794438-10826-2-git-send-email-dpierce@redhat.com> Users can now: * Add a new storage pool. * Delete a storage pool. * Start and stop storage pools. * Add a new storage volume. * Delete a storage volume. * List existing storage pools, with details. Signed-off-by: Darryl L. Pierce --- Makefile.am | 28 +++++-- nodeadmin/adddomain.py | 15 +--- nodeadmin/addpool.py | 183 ++++++++++++++++++++++++++++++++++++++++++++ nodeadmin/addvolume.py | 177 ++++++++++++++++++++++++++++++++++++++++++ nodeadmin/configscreen.py | 52 +++++++++++++ nodeadmin/createmeter.py | 30 +++++++ nodeadmin/libvirtworker.py | 67 ++++++++++++++-- nodeadmin/listpools.py | 63 +++++++++++++++ nodeadmin/mainmenu.py | 24 ++++--- nodeadmin/poolconfig.py | 143 ++++++++++++++++++++++++++++++++++ nodeadmin/removepool.py | 72 +++++++++++++++++ nodeadmin/removevolume.py | 76 ++++++++++++++++++ nodeadmin/setup.py.in | 9 ++- nodeadmin/startpool.py | 62 +++++++++++++++ nodeadmin/stoppool.py | 62 +++++++++++++++ nodeadmin/storagemenu.py | 63 +++++++++++++++ nodeadmin/utils.py | 10 +++ nodeadmin/volumeconfig.py | 83 ++++++++++++++++++++ ovirt-node.spec.in | 7 ++ 19 files changed, 1186 insertions(+), 40 deletions(-) create mode 100644 nodeadmin/addpool.py create mode 100644 nodeadmin/addvolume.py create mode 100644 nodeadmin/createmeter.py create mode 100644 nodeadmin/listpools.py create mode 100644 nodeadmin/poolconfig.py create mode 100644 nodeadmin/removepool.py create mode 100644 nodeadmin/removevolume.py create mode 100644 nodeadmin/startpool.py create mode 100644 nodeadmin/stoppool.py create mode 100644 nodeadmin/storagemenu.py create mode 100644 nodeadmin/volumeconfig.py diff --git a/Makefile.am b/Makefile.am index 3ce24c1..55ef277 100644 --- a/Makefile.am +++ b/Makefile.am @@ -28,29 +28,39 @@ EXTRA_DIST = \ images/syslinux-vesa-splash.jpg \ nodeadmin/__init__.py \ nodeadmin/adddomain.py \ + nodeadmin/addpool.py \ + nodeadmin/addvolume.py \ nodeadmin/configscreen.py \ + nodeadmin/createmeter.py \ nodeadmin/createnetwork.py \ nodeadmin/createuser.py \ + nodeadmin/definenet.py \ nodeadmin/destroynetwork.py \ + nodeadmin/domainconfig.py \ nodeadmin/halworker.py \ nodeadmin/libvirtworker.py \ - nodeadmin/userworker.py \ + nodeadmin/listdomains.py \ + nodeadmin/listnetworks.py \ + nodeadmin/listpools.py \ nodeadmin/mainmenu.py \ nodeadmin/menuscreen.py \ + nodeadmin/networkconfig.py \ nodeadmin/netmenu.py \ + nodeadmin/nodeadmin.py \ nodeadmin/nodemenu.py \ nodeadmin/removedomain.py \ - nodeadmin/undefinenetwork.py \ + nodeadmin/removepool.py \ + nodeadmin/removevolume.py \ + nodeadmin/setup.py \ nodeadmin/startdomain.py \ + nodeadmin/startpool.py \ nodeadmin/stopdomain.py \ - nodeadmin/definenet.py \ - nodeadmin/domainconfig.py \ - nodeadmin/networkconfig.py \ - nodeadmin/listdomains.py \ - nodeadmin/listnetworks.py \ - nodeadmin/nodeadmin.py \ - nodeadmin/setup.py \ + nodeadmin/stoppool.py \ + nodeadmin/storagemenu.py \ + nodeadmin/undefinenetwork.py \ + nodeadmin/userworker.py \ nodeadmin/utils.py \ + nodeadmin/volumeconfig.py \ scripts/collectd.conf.in \ scripts/ovirt \ scripts/ovirt-awake \ diff --git a/nodeadmin/adddomain.py b/nodeadmin/adddomain.py index 70a2011..bb06a62 100755 --- a/nodeadmin/adddomain.py +++ b/nodeadmin/adddomain.py @@ -20,11 +20,10 @@ from snack import * import os +from createmeter import CreateMeter from domainconfig import DomainConfig from configscreen import ConfigScreen -import urlgrabber.progress as progress import utils -import logging from virtinst import * @@ -51,16 +50,6 @@ OS_VARIANT="os.variant" MEMORY="memory" CPUS="cpus" -class DummyMeter(progress.BaseMeter): - def _do_start(self, now = None): - logging.info("Starting...") - - def _do_end(self, amount_read, now = None): - logging.info("Ending: read=%d" % amount_read) - - def _do_update(self, amount_read, now = None): - logging.info("Update: read=%d" % amount_read) - class DomainConfigScreen(ConfigScreen): def __init__(self): ConfigScreen.__init__(self, "Create A New Virtual Machine") @@ -212,7 +201,7 @@ class DomainConfigScreen(ConfigScreen): self.__config.set_virt_type(self.__virt_types.getSelection()) self.__config.set_architecture(self.__architectures.getSelection()) elif page == CONFIRM_PAGE: - self.get_libvirt().define_domain(self.__config, DummyMeter()) + self.get_libvirt().define_domain(self.__config, CreateMeter()) self.set_finished() def get_back_page(self, page): diff --git a/nodeadmin/addpool.py b/nodeadmin/addpool.py new file mode 100644 index 0000000..9fa1e7d --- /dev/null +++ b/nodeadmin/addpool.py @@ -0,0 +1,183 @@ +# addstorage.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +import traceback +import utils + +from configscreen import * +from poolconfig import PoolConfig +from virtinst import Storage + +POOL_NAME_PAGE = 1 +POOL_DETAILS_PAGE = 2 +CONFIRM_PAGE = 3 + +class AddStoragePoolConfigScreen(ConfigScreen): + def __init__(self): + ConfigScreen.__init__(self, "Add A Storage Pool") + self.__config = PoolConfig(self.get_libvirt()) + + def get_elements_for_page(self, screen, page): + if page is POOL_NAME_PAGE: return self.get_pool_name_page(screen) + elif page is POOL_DETAILS_PAGE: return self.get_pool_details_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) + + def page_has_next(self, page): + return page < CONFIRM_PAGE + + def page_has_back(self, page): + return page > POOL_NAME_PAGE + + def page_has_finish(self, page): + return page is CONFIRM_PAGE + + def validate_input(self, page, errors): + if page is POOL_NAME_PAGE: + if utils.string_is_not_blank(self.__name.value()): + if self.get_libvirt().storage_pool_exists(self.__name.value()): + errors.append("Name '%s' already in use by another pool." % self.__name.value()) + else: + return True + else: + errors.append("Storage object name must be a string between 0 and 50 characters.") + elif page is POOL_DETAILS_PAGE: + result = True + if self.__config.needs_target_path(): + if utils.string_is_not_blank(self.__target_path.value()): + if self.__target_path.value()[0:1] is not '/': + errors.append("'%s' is not an absolute path." % self.__target_path.value()) + result = False + else: + errors.append("You must enter a target path.") + result = False + if self.__config.needs_format(): + if self.__formats.getSelection() is None: + errors.append("You must select a pool format.") + result = False + if self.__config.needs_hostname(): + if not utils.string_is_not_blank(self.__hostname.value()): + errors.append("You must enter a hostname.") + result = False + if self.__config.needs_source_path(): + if utils.string_is_not_blank(self.__source_path.value()): + if self.__config.source_must_be_absolute(): + if self.__source_path.value()[0:1] is not '/': + errors.append("'%s' is not an absolute path." % self.__source_path.value()) + result = False + else: + errors.append("you must enter a source path.") + result = False + return result + elif page is CONFIRM_PAGE: return True + return False + + def process_input(self, page): + if page is POOL_NAME_PAGE: + self.__config.set_name(self.__name.value()) + self.__config.set_type(self.__type.getSelection()) + #self._reset_flags(self.__type.current()) + elif page is POOL_DETAILS_PAGE: + if self.__config.needs_target_path(): + self.__config.set_target_path(self.__target_path.value()) + if self.__config.needs_format(): + self.__config.set_format(self.__formats.getSelection()) + if self.__config.needs_hostname(): + self.__config.set_hostname(self.__hostname.value()) + if self.__config.needs_source_path(): + self.__config.set_source_path(self.__source_path.value()) + if self.__config.needs_build_pool(): + self.__config.set_build_pool(self.__build_pool.value()) + elif page is CONFIRM_PAGE: + self.get_libvirt().define_storage_pool(self.__config.get_name(), config = self.__config) + self.get_libvirt().create_storage_pool(self.__config.get_name()) + self.set_finished() + + def get_pool_name_page(self, screen): + self.__name = Entry(50, self.__config.get_name()) + pooltypes = [] + for pooltype in Storage.StoragePool.get_pool_types(): + pooltypes.append(["%s: %s" % (pooltype, Storage.StoragePool.get_pool_type_desc(pooltype)), + pooltype, + self.__config.get_type() is pooltype]) + self.__type = RadioBar(screen, pooltypes) + grid = Grid(2, 2) + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) + grid.setField(self.__name, 1, 0, anchorLeft = 1) + grid.setField(Label("Type:"), 0, 1, anchorRight = 1, anchorTop = 1) + grid.setField(self.__type, 1, 1, anchorLeft = 1) + return [Label("Add Storage Pool"), + grid] + + def get_pool_details_page(self, screen): + rows = 0 + if self.__config.needs_target_path(): + self.__target_path = Entry(50, self.__config.get_target_path()) + rows += 1 + if self.__config.needs_format(): + formats = [] + for format in self.__config.get_formats(): + formats.append([format, format, format is self.__config.get_format()]) + self.__formats = RadioBar(screen, formats) + rows += 1 + if self.__config.needs_hostname(): + self.__hostname = Entry(50, self.__config.get_hostname()) + rows += 1 + if self.__config.needs_source_path(): + self.__source_path = Entry(50, self.__config.get_source_path()) + rows += 1 + if self.__config.needs_build_pool(): + self.__build_pool = Checkbox("Build Pool", self.__config.get_build_pool()) + rows += 1 + grid = Grid(2, rows) + currentrow = 0 + if self.__config.needs_target_path(): + grid.setField(Label("Target Path:"), 0, currentrow, anchorRight = 1) + grid.setField(self.__target_path, 1, currentrow, anchorLeft = 1) + currentrow += 1 + if self.__config.needs_format(): + grid.setField(Label("Format:"), 0, currentrow, anchorRight = 1, anchorTop = 1) + grid.setField(self.__formats, 1, currentrow, anchorLeft = 1) + currentrow += 1 + if self.__config.needs_hostname(): + grid.setField(Label("Host Name:"), 0, currentrow, anchorRight = 1) + grid.setField(self.__hostname, 1, currentrow, anchorRight = 1) + currentrow += 1 + if self.__config.needs_source_path(): + grid.setField(Label("Source Path:"), 0, currentrow, anchorRight = 1) + grid.setField(self.__source_path, 1, currentrow, anchorLeft = 1) + currentrow += 1 + if self.__config.needs_build_pool(): + grid.setField(Label(" "), 0, currentrow, anchorRight = 1) + grid.setField(self.__build_pool, 1, currentrow, anchorLeft = 1) + currentrow += 1 + return [Label("Specify a storage location to be later split into virtual machine storage"), + grid] + + def get_confirm_page(self, screen): + grid = Grid(2, 2) + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) + grid.setField(Label(self.__config.get_name()), 1, 0, anchorLeft = 1) + grid.setField(Label("Target Path:"), 0, 1, anchorRight = 1) + grid.setField(Label(self.__config.get_target_path()), 1, 1, anchorLeft = 1) + return [Label("Confirm Pool Details"), + grid] + +def AddStoragePool(): + screen = AddStoragePoolConfigScreen() + screen.start() diff --git a/nodeadmin/addvolume.py b/nodeadmin/addvolume.py new file mode 100644 index 0000000..63cc54e --- /dev/null +++ b/nodeadmin/addvolume.py @@ -0,0 +1,177 @@ +# addvolume.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +import traceback + +from createmeter import CreateMeter +from configscreen import * +from volumeconfig import StorageVolumeConfig +from utils import * + +SELECT_POOL_PAGE = 1 +VOLUME_NAME_PAGE = 2 +VOLUME_FORMAT_PAGE = 3 +MAX_CAPACITY_PAGE = 4 +CONFIRM_PAGE = 5 + +class AddVolumeConfigScreen(StorageListConfigScreen): + def __init__(self): + StorageListConfigScreen.__init__(self, "Add A New Storage Volume") + self.__config = StorageVolumeConfig() + + def get_elements_for_page(self, screen, page): + if page is SELECT_POOL_PAGE: return self.get_storage_pool_list_page(screen) + elif page is VOLUME_NAME_PAGE: return self.get_volume_name_page(screen) + elif page is VOLUME_FORMAT_PAGE: return self.get_volume_format_page(screen) + elif page is MAX_CAPACITY_PAGE: return self.get_max_capacity_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) + + def page_has_next(self, page): + if page is SELECT_POOL_PAGE: + return self.has_selectable_pools() + else: + if page < CONFIRM_PAGE: return True + return False + + def page_has_back(self, page): + if page > SELECT_POOL_PAGE: return True + return False + + def page_has_finish(self, page): + return page is CONFIRM_PAGE + + def get_next_page(self, page): + if page is VOLUME_NAME_PAGE: + if self.__config.needs_format(): + return VOLUME_FORMAT_PAGE + else: + return MAX_CAPACITY_PAGE + return StorageListConfigScreen.get_next_page(self, page) + + def get_back_page(self, page): + if page is MAX_CAPACITY_PAGE: + if self.__config.needs_format(): + return VOLUME_FORMAT_PATH + else: + return VOLUME_NAME_PAGE + return StorageListConfigScreen.get_back_page(self, page) + + def validate_input(self, page, errors): + if page is SELECT_POOL_PAGE: + if self.get_selected_pool() is not None: + return True + else: + errors.append("You must select a storage pool.") + elif page is VOLUME_NAME_PAGE: + if string_is_not_blank(self.__name.value()): + return True + else: + errors.append("Storage object name can only contain alphanumeric, '_', '.', or '-' characters.") + elif page is VOLUME_FORMAT_PAGE: + if self.__formats.current() is not None: + return True + else: + errors.append("You must select a volume format.") + elif page is MAX_CAPACITY_PAGE: + if string_is_not_blank(self.__capacity.value()): + if string_is_not_blank(self.__allocation.value()): + capacity = int(self.__capacity.value()) + allocation = int(self.__allocation.value()) + if capacity > 0: + if capacity <= self.__config.get_pool().info()[3] / 1024**2: + if allocation >= 0: + if allocation <= capacity: + return True + else: + errors.append("Allocation cannot exceed the maximum capacity.") + else: + errors.append("The allocation must be greater than or equal to 0.") + else: + errors.append("The maximum capacity cannot exceed the storage pool size.") + else: + errors.append("The capacity must be greater than zero.") + else: + errors.append("An allocation value must be entered.") + else: + errors.append("A maximum volume capacity must be entered.") + elif page is CONFIRM_PAGE: return True + return False + + def process_input(self, page): + if page is SELECT_POOL_PAGE: + self.__config.set_pool(self.get_libvirt().get_storage_pool(self.get_selected_pool())) + elif page is VOLUME_NAME_PAGE: + self.__config.set_name(self.__name.value()) + elif page is VOLUME_FORMAT_PAGE: + self.__config.set_format(self.__formats.current()) + elif page is MAX_CAPACITY_PAGE: + self.__config.set_max_capacity(int(self.__capacity.value())) + self.__config.set_allocation(int(self.__allocation.value())) + elif page is CONFIRM_PAGE: + self.get_libvirt().define_storage_volume(self.__config, CreateMeter()) + self.set_finished() + + def get_volume_name_page(self, screen): + self.__name = Entry(50, self.__config.get_name()) + grid = Grid(2, 1) + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) + grid.setField(self.__name, 1, 0, anchorLeft = 1) + return [Label("New Storage Volume"), + grid, + Label("Name of the volume to create. File extension may be appended.")] + + def get_volume_format_page(self, screen): + self.__formats = Listbox(0) + for format in self.__config.get_formats_for_pool(): + self.__formats.append(format, format) + grid = Grid(1, 1) + grid.setField(self.__formats, 0, 0) + return [Label("Select The Volume Format"), + grid] + + def get_max_capacity_page(self, screen): + self.__capacity = Entry(6, str(self.__config.get_max_capacity())) + self.__allocation = Entry(6, str(self.__config.get_allocation())) + grid = Grid(2, 2) + grid.setField(Label("Max. Capacity (MB):"), 0, 0, anchorRight = 1) + grid.setField(self.__capacity, 1, 0, anchorLeft = 1) + grid.setField(Label("Allocation (MB):"), 0, 1, anchorRight = 1) + grid.setField(self.__allocation, 1, 1, anchorLeft = 1) + return [Label("Storage Volume Quota"), + Label("%s's available space: %0.2f GB" % (self.__config.get_pool().name(), + self.__config.get_pool().info()[3] / 1024.0**3)), + grid] + + def get_confirm_page(self, screen): + grid = Grid(2, 5) + grid.setField(Label("Volume Name:"), 0, 0, anchorRight = 1) + grid.setField(Label("%s (%s)" % (self.__config.get_name(), self.__config.get_pool().name())), 1, 0, anchorLeft = 1) + if self.__config.needs_format(): + grid.setField(Label("Format:"), 0, 1, anchorRight = 1) + grid.setField(Label(self.__config.get_format()), 1, 1, anchorLeft = 1) + grid.setField(Label("Max. Capacity:"), 0, 2, anchorRight = 1) + grid.setField(Label("%0.2f GB" % (self.__config.get_max_capacity() / 1024.0)), 1, 2, anchorLeft = 1) + grid.setField(Label("Allocation:"), 0, 3, anchorRight = 1) + grid.setField(Label("%0.2f GB" % (self.__config.get_allocation() / 1024.0)), 1, 3, anchorLeft = 1) + return [Label("Ready To Allocation New Storage Volume"), + grid] + +def AddStorageVolume(): + screen = AddVolumeConfigScreen() + screen.start() diff --git a/nodeadmin/configscreen.py b/nodeadmin/configscreen.py index f214aea..7654697 100644 --- a/nodeadmin/configscreen.py +++ b/nodeadmin/configscreen.py @@ -179,3 +179,55 @@ class NetworkListConfigScreen(ConfigScreen): def has_selectable_networks(self): return self.__has_networks + +class StorageListConfigScreen(ConfigScreen): + '''Provides a base class for any configuration screen that deals with storage pool lists.''' + + def __init__(self, title): + ConfigScreen.__init__(self, title) + + def get_storage_pool_list_page(self, screen, defined=True, created=True): + pools = self.get_libvirt().list_storage_pools(defined=defined, created=created) + if len(pools) > 0: + self.__has_pools = True + self.__pools_list = Listbox(0) + for pool in pools: + self.__pools_list.append(pool, pool) + result = self.__pools_list + else: + self.__has_pools = False + result = Label("There are no storage pools available.") + grid = Grid(1, 1) + grid.setField(result, 0, 0) + return [Label("Storage Pool List"), + grid] + + def get_selected_pool(self): + return self.__pools_list.current() + + def has_selectable_pools(self): + return self.__has_pools + + def get_storage_volume_list_page(self, screen): + '''Requires that self.__pools_list have a selected element.''' + pool = self.get_libvirt().get_storage_pool(self.get_selected_pool()) + if len(pool.listVolumes()) > 0: + self.__has_volumes = True + self.__volumes_list = Listbox(0) + for volname in pool.listVolumes(): + volume = pool.storageVolLookupByName(volname) + self.__volumes_list.append("%s (%0.2f GB)" % (volume.name(), volume.info()[2] / 1024**3), volume.name()) + result = self.__volumes_list + else: + self.__has_volumes = False + result = Label("There are no storage volumes available.") + grid = Grid(1, 1) + grid.setField(result, 0, 0) + return [Label("Storage Volume List"), + grid] + + def get_selected_volume(self): + return self.__volumes_list.current() + + def has_selectable_volumes(self): + return self.__has_volumes diff --git a/nodeadmin/createmeter.py b/nodeadmin/createmeter.py new file mode 100644 index 0000000..521e7d8 --- /dev/null +++ b/nodeadmin/createmeter.py @@ -0,0 +1,30 @@ +# createmeter.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +import urlgrabber.progress as progress +import logging + +class CreateMeter(progress.BaseMeter): + def _do_start(self, now = None): + logging.info("Starting...") + + def _do_end(self, amount_read, now = None): + logging.info("Ending: read=%d" % amount_read) + + def _do_update(self, amount_read, now = None): + logging.info("Update: read=%d" % amount_read) diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py index ba07605..b2acabe 100644 --- a/nodeadmin/libvirtworker.py +++ b/nodeadmin/libvirtworker.py @@ -35,6 +35,10 @@ class LibvirtWorker: self.__net.setup(self.__conn) (self.__new_guest, self.__new_domain) = virtinst.CapabilitiesParser.guest_lookup(conn = self.__conn) + def get_connection(self): + '''Returns the underlying connection.''' + return self.__conn + def list_domains(self, defined = True, started = True): '''Lists all domains.''' result = [] @@ -134,9 +138,12 @@ class LibvirtWorker: network = self.get_network(name) network.undefine() - def list_storage_pools(self): + def list_storage_pools(self, defined=True, created=True): '''Returns the list of all defined storage pools.''' - return self.__conn.listStoragePools() + pools = [] + if defined: pools.extend(self.__conn.listDefinedStoragePools()) + if created: pools.extend(self.__conn.listStoragePools()) + return pools def storage_pool_exists(self, name): '''Returns whether a storage pool exists.''' @@ -144,16 +151,62 @@ class LibvirtWorker: if name in pools: return True return False - def define_storage_pool(self, name): + def create_storage_pool(self, name): + '''Starts the named storage pool if it is not currently started.''' + if name not in self.list_storage_pools(defined = False): + pool = self.get_storage_pool(name) + pool.create(0) + + def destroy_storage_pool(self, name): + '''Stops the specified storage pool.''' + if name in self.list_storage_pools(defined = False): + pool = self.get_storage_pool(name) + pool.destroy() + + def define_storage_pool(self, name, config = None, meter = None): '''Defines a storage pool with the given name.''' - try: + if config is None: pool = virtinst.Storage.DirectoryPool(conn=self.__conn, name=name, target_path=DEFAULT_POOL_TARGET_PATH) - newpool = pool.install(build=True, create=True) + newpool = pool.install(build=True, create=True, meter=meter) newpool.setAutostart(True) - except Exception, error: - raise RuntimeError("Could not create pool: %s - %s", str(error)) + else: + pool = config.get_pool() + pool.target_path = config.get_target_path() + if config.needs_hostname(): + pool.host = config.get_hostname() + if config.needs_source_path(): + pool.source_path = config.get_source_path() + if config.needs_format(): + pool.format = config.get_format() + pool.conn = self.__conn + pool.get_xml_config() + newpool = pool.install(meter=meter, + build=True, # config.get_build_pool(), + create=True) + newpool.setAutostart(True) + + def undefine_storage_pool(self, name): + '''Undefines the specified storage pool.''' + pool = self.get_storage_pool(name) + pool.undefine() + + def get_storage_pool(self, name): + '''Returns the storage pool with the specified name.''' + return self.__conn.storagePoolLookupByName(name) + + def define_storage_volume(self, config, meter): + '''Defines a new storage volume.''' + self.create_storage_pool(config.get_pool().name()) + volume = config.create_volume() + volume.install(meter = meter) + + def remove_storage_volume(self, poolname, volumename): + '''Removes the specified storage volume.''' + pool = self.get_storage_pool(poolname) + volume = pool.storageVolLookupByName(volumename) + volume.delete(0) def list_bridges(self): '''Lists all defined and active bridges.''' diff --git a/nodeadmin/listpools.py b/nodeadmin/listpools.py new file mode 100644 index 0000000..686c42d --- /dev/null +++ b/nodeadmin/listpools.py @@ -0,0 +1,63 @@ +# listpools.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * + +from configscreen import * + +LIST_PAGE = 1 +DETAILS_PAGE = 2 + +class ListStoragePoolsConfigScreen(StorageListConfigScreen): + def __init__(self): + StorageListConfigScreen.__init__(self, "List Storage Pools") + + def get_elements_for_page(self, screen, page): + if page is LIST_PAGE: return self.get_storage_pool_list_page(screen) + elif page is DETAILS_PAGE: return self.get_pool_details_page(screen) + + def page_has_next(self, page): + if page is LIST_PAGE and self.has_selectable_pools(): + return True + return False + + def page_has_back(self, page): + if page is DETAILS_PAGE: return True + return False + + def get_pool_details_page(self, screen): + pool = self.get_libvirt().get_storage_pool(self.get_selected_pool()) + volumes = Listbox(0); + for name in pool.listVolumes(): + volume = pool.storageVolLookupByName(name) + volumes.append("%s (%0.1f G)" % (name, volume.info()[1] / 1024**3), name) + grid = Grid(2, 3) + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) + grid.setField(Label(pool.name()), 1, 0, anchorLeft = 1) + grid.setField(Label("Volumes:"), 0, 1, anchorRight = 1) + grid.setField(volumes, 1, 1, anchorLeft = 1) + grid.setField(Label("Autostart:"), 0, 2, anchorRight = 1) + label = "No" + if pool.autostart(): label = "Yes" + grid.setField(Label(label), 1, 2, anchorLeft = 1) + return [Label("Details For Storage Pool: %s" % self.get_selected_pool()), + grid] + +def ListStoragePools(): + screen = ListStoragePoolsConfigScreen() + screen.start() diff --git a/nodeadmin/mainmenu.py b/nodeadmin/mainmenu.py index 73501fa..52d9298 100755 --- a/nodeadmin/mainmenu.py +++ b/nodeadmin/mainmenu.py @@ -19,28 +19,32 @@ from snack import * import traceback -from menuscreen import MenuScreen -from nodemenu import NodeMenu -from netmenu import NetworkMenu +from menuscreen import MenuScreen +from nodemenu import NodeMenu +from netmenu import NetworkMenu +from storagemenu import StoragePoolMenu import utils import logging NODE_MENU = 1 NETWORK_MENU = 2 -EXIT_CONSOLE = 99 +STORAGE_MENU = 3 +EXIT_CONSOLE = 4 class MainMenuScreen(MenuScreen): def __init__(self): MenuScreen.__init__(self, "Main Menu") def get_menu_items(self): - return (("Node Administration", NODE_MENU), - ("Network Administration", NETWORK_MENU)) - - def handle_selection(self, page): - if page is NODE_MENU: NodeMenu() - elif page is NETWORK_MENU: NetworkMenu() + return (("Node Administration", NODE_MENU), + ("Network Administration", NETWORK_MENU), + ("Storage Pool Administration", STORAGE_MENU)) + + def handle_selection(self, item): + if item is NODE_MENU: NodeMenu() + elif item is NETWORK_MENU: NetworkMenu() + elif item is STORAGE_MENU: StoragePoolMenu() def MainMenu(): screen = MainMenuScreen() diff --git a/nodeadmin/poolconfig.py b/nodeadmin/poolconfig.py new file mode 100644 index 0000000..6ece6c7 --- /dev/null +++ b/nodeadmin/poolconfig.py @@ -0,0 +1,143 @@ +# poolconfig.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from virtinst import Storage + +ROOT_TARGET_PATH="/var/lib/libvirt/images/%s" + +class PoolConfig: + def __init__(self, libvirt): + self.__libvirt = libvirt + self.__name = "" + self.set_type(None) + self.__format = None + self.__hostname = "" + self.__target_path = "" + self.__source_path = "" + self.__build_pool = False + + def get_pool(self): + return self.__pool + + def set_name(self, name): + self.__name = name + + def get_name(self): + return self.__name + + def set_type(self, pooltype): + self.__type = pooltype + self.__needs_target_path = False + self.__needs_format = False + self.__needs_hostname = False + self.__needs_source_path = False + self.__needs_build_pool = False + if pooltype is not None: + if pooltype is Storage.StoragePool.TYPE_DIR: + self.__needs_target_path = True + self.__target_path = ROOT_TARGET_PATH % self.__name + self.__build_pool = True + elif pooltype is Storage.StoragePool.TYPE_DISK: + self.__needs_target_path = True + self.__needs_format = True + self.__needs_source_path = True + self.__needs_build_pool = True + elif pooltype is Storage.StoragePool.TYPE_FS: + self.__needs_target_path = True + self.__needs_format = True + self.__needs_source_path = True + self.__build_pool = True + elif pooltype is Storage.StoragePool.TYPE_ISCSI: + self.__needs_target_path = True + self.__needs_hostname = True + self.__needs_source_path = True + self.__build_pool = False + elif pooltype is Storage.StoragePool.TYPE_LOGICAL: + self.__needs_target_path = True + self.__needs_source_path = True + self.__needs_build_pool = True + elif pooltype is Storage.StoragePool.TYPE_NETFS: + self.__needs_target_path = True + self.__needs_format = True + self.__needs_hostname = True + self.__needs_source_path = True + self.__build_pool = True + # create pool + pool_class = Storage.StoragePool.get_pool_class(self.__type) + self.__pool = pool_class(name = self.__name, + conn = self.__libvirt.get_connection()) + if self.__needs_format: + self.__format = self.__pool.formats[0] + else: + self.__type = Storage.StoragePool.get_pool_types()[0] + + def get_type(self): + return self.__type + + def needs_target_path(self): + return self.__needs_target_path + + def needs_format(self): + return self.__needs_format + + def needs_hostname(self): + return self.__needs_hostname + + def source_must_be_absolute(self): + if self.__type is Storage.StoragePool.TYPE_ISCSI: + return False + return True + + def needs_source_path(self): + return self.__needs_source_path + + def needs_build_pool(self): + return self.__needs_build_pool + + def set_target_path(self, path): + self.__target_path = path + + def get_target_path(self): + return self.__target_path + + def get_formats(self): + return self.__pool.formats + + def set_format(self, format): + self.__format = format + + def get_format(self): + return self.__format + + def set_hostname(self, hostname): + self.__hostname = hostname + + def get_hostname(self): + return self.__hostname + + def set_source_path(self, source_path): + self.__source_path = source_path + + def get_source_path(self): + return self.__source_path + + def set_build_pool(self, build_pool): + self.__build_pool = build_pool + + def get_build_pool(self): + return self.__build_pool diff --git a/nodeadmin/removepool.py b/nodeadmin/removepool.py new file mode 100644 index 0000000..7a7f46d --- /dev/null +++ b/nodeadmin/removepool.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# removepool.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +from configscreen import * + +LIST_POOLS_PAGE = 1 +CONFIRM_PAGE = 2 + +class RemoveStoragePoolConfigScreen(StorageListConfigScreen): + def __init__(self): + StorageListConfigScreen.__init__(self, "Remove A Storage Pool") + + def get_elements_for_page(self, screen, page): + if page is LIST_POOLS_PAGE: return self.get_storage_pool_list_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) + + def page_has_next(self, page): + return page is LIST_POOLS_PAGE and self.has_selectable_pools() + + def page_has_back(self, page): + return False + + def page_has_finish(self, page): + return page is CONFIRM_PAGE + + def validate_input(self, page, errors): + if page is LIST_POOLS_PAGE: + if self.get_selected_pool() is not None: + return True + else: + errors.append("Please select a storage pool to be removed.") + elif page is CONFIRM_PAGE: + if self.__confirm.value(): + return True + else: + errors.append("You must confirm removing a storage pool.") + return False + + def process_input(self, page): + if page is CONFIRM_PAGE: + self.get_libvirt().destroy_storage_pool(self.get_selected_pool()) + self.get_libvirt().undefine_storage_pool(self.get_selected_pool()) + self.set_finished() + + def get_confirm_page(self, screen): + self.__confirm = Checkbox("Check here to confirm deleting pool: %s" % self.get_selected_pool()) + grid = Grid(1, 1) + grid.setField(self.__confirm, 0, 0) + return [Label("Remove Selected Storage Pool"), + grid] + +def RemoveStoragePool(): + screen = RemoveStoragePoolConfigScreen() + screen.start() diff --git a/nodeadmin/removevolume.py b/nodeadmin/removevolume.py new file mode 100644 index 0000000..5ad3058 --- /dev/null +++ b/nodeadmin/removevolume.py @@ -0,0 +1,76 @@ +# removevolume.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +import traceback + +from createmeter import CreateMeter +from configscreen import * +from volumeconfig import StorageVolumeConfig +from utils import * + +SELECT_POOL_PAGE = 1 +SELECT_VOLUME_PAGE = 2 +CONFIRM_PAGE = 3 + +class RemoveVolumeConfigScreen(StorageListConfigScreen): + def __init__(self): + StorageListConfigScreen.__init__(self, "Add A New Storage Volume") + self.__config = StorageVolumeConfig() + + def get_elements_for_page(self, screen, page): + if page is SELECT_POOL_PAGE: return self.get_storage_pool_list_page(screen) + elif page is SELECT_VOLUME_PAGE: return self.get_storage_volume_list_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) + + def page_has_next(self, page): + if page is SELECT_POOL_PAGE: return self.has_selectable_pools() + elif page is SELECT_VOLUME_PAGE: return self.has_selectable_volumes() + return False + + def validate_input(self, page, errors): + if page is SELECT_POOL_PAGE: return self.get_selected_pool() is not None + elif page is SELECT_VOLUME_PAGE: return self.get_selected_volume() is not None + elif page is CONFIRM_PAGE: + if self.__confirm.value(): + return True + else: + errors.append("You must confirm deleting a storage volume.") + return False + + def process_input(self, page): + if page is CONFIRM_PAGE: + self.get_libvirt().remove_storage_volume(self.get_selected_pool(), self.get_selected_volume()) + self.set_finished() + + def page_has_back(self, page): + return page > SELECT_POOL_PAGE + + def page_has_finish(self, page): + return page is CONFIRM_PAGE + + def get_confirm_page(self, screen): + self.__confirm = Checkbox("Check here to confirm deleting volume: %s" % self.get_selected_volume()) + grid = Grid(1, 1) + grid.setField(self.__confirm, 0, 0) + return [Label("Remove Selected Storage Volume"), + grid] + +def RemoveStorageVolume(): + screen = RemoveVolumeConfigScreen() + screen.start() diff --git a/nodeadmin/setup.py.in b/nodeadmin/setup.py.in index 1e6e028..17bfe93 100644 --- a/nodeadmin/setup.py.in +++ b/nodeadmin/setup.py.in @@ -35,5 +35,12 @@ setup(name = "nodeadmin", 'createnet = nodeadmin.createnetwork:CreateNetwork', 'destroynet = nodeadmin.destroynetwork:DestroyNetwork', 'undefinenet = nodeadmin.undefinenetwork:UndefineNetwork', - 'listnets = nodeadmin.listnetworks:ListNetworks'] + 'listnets = nodeadmin.listnetworks:ListNetworks', + 'addpool = nodeadmin.addpool:AddStoragePool', + 'rmpool = nodeadmin.removepool:RemoveStoragePool', + 'startpool = nodeadmin.startpool:StartStoragePool', + 'stoppool = nodeadmin.stoppool:StopStoragePool', + 'addvolume = nodeadmin.addvolume:AddStorageVolume', + 'rmvolume = nodeadmin.removevolume:RemoveStorageVolume', + 'listpools = nodeadmin.listpools:ListPools'] }) diff --git a/nodeadmin/startpool.py b/nodeadmin/startpool.py new file mode 100644 index 0000000..8a84512 --- /dev/null +++ b/nodeadmin/startpool.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# startpool.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +from configscreen import * + +LIST_POOLS_PAGE = 1 +FINAL_PAGE = 2 + +class StartStoragePoolConfigScreen(StorageListConfigScreen): + def __init__(self): + StorageListConfigScreen.__init__(self, "Start A Storage Pool") + + def get_elements_for_page(self, screen, page): + if page is LIST_POOLS_PAGE: return self.get_storage_pool_list_page(screen, created = False) + elif page is FINAL_PAGE: return self.get_final_page(screen) + + def page_has_next(self, page): + return page is LIST_POOLS_PAGE and self.has_selectable_pools() + + def page_has_back(self, page): + return False + + def page_has_finish(self, page): + return page is FINAL_PAGE + + def validate_input(self, page, errors): + if page is LIST_POOLS_PAGE: + if self.get_selected_pool() is not None: + return True + else: + errors.append("Please select a storage pool to be started.") + return False + + def process_input(self, page): + if page is LIST_POOLS_PAGE: + self.get_libvirt().create_storage_pool(self.get_selected_pool()) + self.set_finished() + + def get_final_page(self, screen): + return [Label("Storage pool started: %s" % self.get_selected_pool())] + +def StartStoragePool(): + screen = StartStoragePoolConfigScreen() + screen.start() diff --git a/nodeadmin/stoppool.py b/nodeadmin/stoppool.py new file mode 100644 index 0000000..0522b95 --- /dev/null +++ b/nodeadmin/stoppool.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# stoppool.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +from configscreen import * + +LIST_POOLS_PAGE = 1 +FINAL_PAGE = 2 + +class StopStoragePoolConfigScreen(StorageListConfigScreen): + def __init__(self): + StorageListConfigScreen.__init__(self, "Stop A Storage Pool") + + def get_elements_for_page(self, screen, page): + if page is LIST_POOLS_PAGE: return self.get_storage_pool_list_page(screen, defined = False) + elif page is FINAL_PAGE: return self.get_final_page(screen) + + def page_has_next(self, page): + return page is LIST_POOLS_PAGE and self.has_selectable_pools() + + def page_has_back(self, page): + return False + + def page_has_finish(self, page): + return page is FINAL_PAGE + + def validate_input(self, page, errors): + if page is LIST_POOLS_PAGE: + if self.get_selected_pool() is not None: + return True + else: + errors.append("Please select a storage pool to be stopped.") + return False + + def process_input(self, page): + if page is LIST_POOLS_PAGE: + self.get_libvirt().destroy_storage_pool(self.get_selected_pool()) + self.set_finished() + + def get_final_page(self, screen): + return [Label("Storage pool stopped: %s" % self.get_selected_pool())] + +def StopStoragePool(): + screen = StopStoragePoolConfigScreen() + screen.start() diff --git a/nodeadmin/storagemenu.py b/nodeadmin/storagemenu.py new file mode 100644 index 0000000..0b56dae --- /dev/null +++ b/nodeadmin/storagemenu.py @@ -0,0 +1,63 @@ +# storagemenu.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +import traceback + +from menuscreen import MenuScreen +from addpool import AddStoragePool +from startpool import StartStoragePool +from stoppool import StopStoragePool +from removepool import RemoveStoragePool +from addvolume import AddStorageVolume +from removevolume import RemoveStorageVolume +from listpools import ListStoragePools + +ADD_POOL = 1 +START_POOL = 2 +STOP_POOL = 3 +REMOVE_POOL = 4 +ADD_VOLUME = 5 +REMOVE_VOLUME = 6 +LIST_POOLS = 7 + +class StoragePoolMenuScreen(MenuScreen): + def __init__(self): + MenuScreen.__init__(self, "Storage Pool Administration") + + def get_menu_items(self): + return (("Add A Storage Pool", ADD_POOL), + ("Start A Storage Pool", START_POOL), + ("Stop A Storage Pool", STOP_POOL), + ("Remove A Storage Pool", REMOVE_POOL), + ("Add A Storage Volume", ADD_VOLUME), + ("Remove A Storage Volume", REMOVE_VOLUME), + ("List Storage Pools", LIST_POOLS)) + + def handle_selection(self, item): + if item is ADD_POOL: AddStoragePool() + elif item is START_POOL: StartStoragePool() + elif item is STOP_POOL: StopStoragePool() + elif item is REMOVE_POOL: RemoveStoragePool() + elif item is ADD_VOLUME: AddStorageVolume() + elif item is REMOVE_VOLUME: RemoveStorageVolume() + elif item is LIST_POOLS: ListStoragePools() + +def StoragePoolMenu(): + screen = StoragePoolMenuScreen() + screen.start() diff --git a/nodeadmin/utils.py b/nodeadmin/utils.py index 55a838c..28ccb8b 100644 --- a/nodeadmin/utils.py +++ b/nodeadmin/utils.py @@ -17,9 +17,19 @@ # also available at http://www.gnu.org/copyleft/gpl.html. import logging +import re logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='/var/log/ovirt-nodeadmin.log', filemode='w') + +def string_is_not_blank(value): + if len(value) > 0: return True + return False + +def string_has_no_spaces(value): + if re.match("^[a-zA-Z0-9_]*$", value): + return True + return False diff --git a/nodeadmin/volumeconfig.py b/nodeadmin/volumeconfig.py new file mode 100644 index 0000000..86ada74 --- /dev/null +++ b/nodeadmin/volumeconfig.py @@ -0,0 +1,83 @@ +# volumeconfig.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +import virtinst +from virtinst import Storage + +class StorageVolumeConfig: + def __init__(self): + self.__pool = None + self.__name = "" + self.__formats = None + self.__format = None + self.__max_capacity = 10000 + self.__allocation = 0 + + def set_pool(self, pool): + self.__pool = pool + self.__formats = None + self.__pool_type = virtinst.util.get_xml_path(self.__pool.XMLDesc(0), '/pool/@type') + self.__volume_class = Storage.StoragePool.get_volume_for_pool(self.__pool_type) + + def get_pool(self): + return self.__pool + + def create_volume(self): + volume = self.__volume_class(name = self.__name + ".img", + allocation = self.__allocation * 1024**2, + capacity = self.__max_capacity * 1024**2, + pool = self.__pool) + volume.pool = self.__pool + if self.needs_format(): + volume.format = self.__format + return volume + + def set_name(self, name): + self.__name = name + + def get_name(self): + return self.__name + + def needs_format(self): + if self.__pool.__dict__.keys().count("get_formats_for_pool") > 0: + return self.__pool.get_formats_for_pool() is not 0 + else: + return False + + def get_formats_for_pool(self): + if self.__formats is None: + self.__formats = self.__volume_class.formats + return self.__formats + + def set_format(self, format): + self.__format = format + + def get_format(self): + return self.__format + + def set_max_capacity(self, capacity): + self.__max_capacity = capacity + + def get_max_capacity(self): + return self.__max_capacity + + def set_allocation(self, allocation): + self.__allocation = allocation + + def get_allocation(self): + return self.__allocation diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in index d23a4ef..6509fa0 100644 --- a/ovirt-node.spec.in +++ b/ovirt-node.spec.in @@ -369,11 +369,18 @@ fi %{_initrddir}/ovirt-functions %defattr(-,root,root,0644) %{_bindir}/nodeadmin +%{_bindir}/addpool %{_bindir}/addvm +%{_bindir}/addvolume %{_bindir}/startvm %{_bindir}/stopvm %{_bindir}/rmvm +%{_bindir}/listpools %{_bindir}/listvms +%{_bindir}/rmpool +%{_bindir}/rmvolume +%{_bindir}/startpool +%{_bindir}/stoppool %{_bindir}/definenet %{_bindir}/createnet %{_bindir}/destroynet -- 1.6.2.5 From dpierce at redhat.com Mon Nov 9 19:20:38 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 14:20:38 -0500 Subject: [Ovirt-devel] [PATCH 2/2] Refactor domain storage setup to use pool and volume selection screens. In-Reply-To: <1257794438-10826-2-git-send-email-dpierce@redhat.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> <1257794438-10826-2-git-send-email-dpierce@redhat.com> Message-ID: <1257794438-10826-3-git-send-email-dpierce@redhat.com> Now, when the user elects to use managed storage, they're show the list of available storage pools. Then, after selecting one, the user is shown the list of volumes on that pool. These are then used to create the domain. Signed-off-by: Darryl L. Pierce --- Makefile.am | 1 + nodeadmin/adddomain.py | 186 ++++++++++++++++++++++++++------------------ nodeadmin/domainconfig.py | 17 +++- nodeadmin/libvirtworker.py | 34 ++++---- 4 files changed, 142 insertions(+), 96 deletions(-) diff --git a/Makefile.am b/Makefile.am index 55ef277..e712d6a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -48,6 +48,7 @@ EXTRA_DIST = \ nodeadmin/netmenu.py \ nodeadmin/nodeadmin.py \ nodeadmin/nodemenu.py \ + nodeadmin/poolconfig.py \ nodeadmin/removedomain.py \ nodeadmin/removepool.py \ nodeadmin/removevolume.py \ diff --git a/nodeadmin/adddomain.py b/nodeadmin/adddomain.py index bb06a62..34aa59c 100755 --- a/nodeadmin/adddomain.py +++ b/nodeadmin/adddomain.py @@ -37,10 +37,11 @@ OS_VARIANT_PAGE = 12 RAM_CPU_PAGE = 13 ENABLE_STORAGE_PAGE = 14 LOCAL_STORAGE_PAGE = 15 -MANAGED_STORAGE_PAGE = 16 -BRIDGE_PAGE = 17 -VIRT_DETAILS_PAGE = 18 -CONFIRM_PAGE = 19 +SELECT_POOL_PAGE = 16 +SELECT_VOLUME_PAGE = 17 +BRIDGE_PAGE = 18 +VIRT_DETAILS_PAGE = 19 +CONFIRM_PAGE = 20 LOCATION="location" KICKSTART="kickstart" @@ -58,24 +59,25 @@ class DomainConfigScreen(ConfigScreen): self.__config.set_virt_type(self.get_libvirt().get_default_virt_type()) def get_elements_for_page(self, screen, page): - if page == VM_DETAILS_PAGE: return self.get_vm_details_page(screen) - elif page == LOCAL_INSTALL_PAGE: return self.get_local_install_page(screen) - elif page == SELECT_CDROM_PAGE: return self.get_select_cdrom_page(screen) - elif page == SELECT_ISO_PAGE: return self.get_select_iso_page(screen) - elif page == NETWORK_INSTALL_PAGE: return self.get_network_install_page(screen) - elif page == OS_TYPE_PAGE: return self.get_os_type_page(screen) - elif page == OS_VARIANT_PAGE: return self.get_os_variant_page(screen) - elif page == RAM_CPU_PAGE: return self.get_ram_and_cpu_page(screen) - elif page == ENABLE_STORAGE_PAGE: return self.get_enable_storage_page(screen) - elif page == LOCAL_STORAGE_PAGE: return self.get_local_storage_page(screen) - elif page == MANAGED_STORAGE_PAGE: return self.get_managed_storage_page(screen) - elif page == BRIDGE_PAGE: return self.get_bridge_page(screen) - elif page == VIRT_DETAILS_PAGE: return self.get_virt_details_page(screen) - elif page == CONFIRM_PAGE: return self.get_confirm_page(screen) + if page is VM_DETAILS_PAGE: return self.get_vm_details_page(screen) + elif page is LOCAL_INSTALL_PAGE: return self.get_local_install_page(screen) + elif page is SELECT_CDROM_PAGE: return self.get_select_cdrom_page(screen) + elif page is SELECT_ISO_PAGE: return self.get_select_iso_page(screen) + elif page is NETWORK_INSTALL_PAGE: return self.get_network_install_page(screen) + elif page is OS_TYPE_PAGE: return self.get_os_type_page(screen) + elif page is OS_VARIANT_PAGE: return self.get_os_variant_page(screen) + elif page is RAM_CPU_PAGE: return self.get_ram_and_cpu_page(screen) + elif page is ENABLE_STORAGE_PAGE: return self.get_enable_storage_page(screen) + elif page is LOCAL_STORAGE_PAGE: return self.get_local_storage_page(screen) + elif page is SELECT_POOL_PAGE: return self.get_select_pool_page(screen) + elif page is SELECT_VOLUME_PAGE: return self.get_select_volume_page(screen) + elif page is BRIDGE_PAGE: return self.get_bridge_page(screen) + elif page is VIRT_DETAILS_PAGE: return self.get_virt_details_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) return [] def validate_input(self, page, errors): - if page == VM_DETAILS_PAGE: + if page is VM_DETAILS_PAGE: if len(self.__guest_name.value()) > 0: if self.get_libvirt().domain_exists(self.__guest_name.value()): errors.append("Guest name '%s' is already in use." % self.__guest_name.value()) @@ -83,12 +85,12 @@ class DomainConfigScreen(ConfigScreen): return True else: errors.append("Guest name must be a string between 0 and 50 characters.") - elif page == LOCAL_INSTALL_PAGE: + elif page is LOCAL_INSTALL_PAGE: if self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_CDROM: return True elif self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_ISO: return True - elif page == SELECT_CDROM_PAGE: + elif page is SELECT_CDROM_PAGE: if self.__install_media.getSelection() != None: if len(self.get_hal().list_installable_volumes()) == 0: errors.append("No installable media is available.") @@ -96,7 +98,7 @@ class DomainConfigScreen(ConfigScreen): return True else: errors.append("You must select an install media.") - elif page == SELECT_ISO_PAGE: + elif page is SELECT_ISO_PAGE: if len(self.__iso_path.value()) > 0: if os.path.exists(self.__iso_path.value()): if os.path.isfile(self.__iso_path.value()): @@ -108,14 +110,14 @@ class DomainConfigScreen(ConfigScreen): errors.append(self.__iso_path.value()) else: errors.append("An install media selection is required.") - elif page == NETWORK_INSTALL_PAGE: + elif page is NETWORK_INSTALL_PAGE: if len(self.__install_url.value()) > 0: return True else: errors.append("An install tree is required.") - elif page == OS_TYPE_PAGE: return True - elif page == OS_VARIANT_PAGE: return True - elif page == RAM_CPU_PAGE: + elif page is OS_TYPE_PAGE: return True + elif page is OS_VARIANT_PAGE: return True + elif page is RAM_CPU_PAGE: if (len(self.__memory.value()) > 0 and len(self.__cpus.value()) > 0) \ and (int(self.__memory.value()) > 0 and int(self.__cpus.value()) > 0): return True @@ -128,8 +130,8 @@ class DomainConfigScreen(ConfigScreen): errors.append("A value must be entered for CPUs.") elif int(self.__cpus.value()) <= 0: errors.append("A positive integer value must be entered for memory.") - elif page == ENABLE_STORAGE_PAGE: return True - elif page == LOCAL_STORAGE_PAGE: + elif page is ENABLE_STORAGE_PAGE: return True + elif page is LOCAL_STORAGE_PAGE: if len(self.__storage_size.value()) > 0: if float(self.__storage_size.value()) > 0: return True @@ -137,12 +139,17 @@ class DomainConfigScreen(ConfigScreen): errors.append("A positive value must be entered for the storage size.") else: errors.append("A value must be entered for the storage size.") - elif page == MANAGED_STORAGE_PAGE: - if self.__existing_storage.getSelection() is not None: + elif page is SELECT_POOL_PAGE: + if self.__storage_pool.getSelection() is not None: + return True + else: + errors.append("Please select a storage pool.") + elif page is SELECT_VOLUME_PAGE: + if self.__storage_volume.getSelection() is not None: return True else: errors.append("Please select a storage volume.") - elif page == BRIDGE_PAGE: + elif page is BRIDGE_PAGE: if self.__network_bridges.getSelection() != None: if len(self.__mac_address.value()) > 0: # TODO: regex check the format @@ -151,62 +158,66 @@ class DomainConfigScreen(ConfigScreen): errors.append("MAC address must be supplied.") else: errors.append("A network bridge must be selected.") - elif page == VIRT_DETAILS_PAGE: + elif page is VIRT_DETAILS_PAGE: if self.__virt_types.getSelection() != None and self.__architectures.getSelection() != None: return True if self.__virt_types.getSelection() is None: errors.append("Please select a virtualization type.") if self.__architectures.getSelection() is None: errors.append("Please selection an architecture.") - elif page == CONFIRM_PAGE: return True + elif page is CONFIRM_PAGE: return True return False def process_input(self, page): - if page == VM_DETAILS_PAGE: + if page is VM_DETAILS_PAGE: self.__config.set_guest_name(self.__guest_name.value()) self.__config.set_install_type(self.__install_type.getSelection()) - elif page == LOCAL_INSTALL_PAGE: + elif page is LOCAL_INSTALL_PAGE: self.__config.set_use_cdrom_source(self.__install_source.getSelection() == DomainConfig.INSTALL_SOURCE_CDROM) - elif page == SELECT_CDROM_PAGE: + elif page is SELECT_CDROM_PAGE: self.__config.set_install_media(self.__install_media.getSelection()) - elif page == SELECT_ISO_PAGE: + elif page is SELECT_ISO_PAGE: self.__config.set_iso_path(self.__iso_path.value()) - elif page == NETWORK_INSTALL_PAGE: + elif page is NETWORK_INSTALL_PAGE: self.__config.set_install_url(self.__install_url.value()) self.__config.set_kickstart_url(self.__kickstart_url.value()) self.__config.set_kernel_options(self.__kernel_options.value()) - elif page == OS_TYPE_PAGE: + elif page is OS_TYPE_PAGE: self.__config.set_os_type(self.__os_types.getSelection()) - elif page == OS_VARIANT_PAGE: + elif page is OS_VARIANT_PAGE: self.__config.set_os_variant(self.__os_variants.getSelection()) - elif page == RAM_CPU_PAGE: + elif page is RAM_CPU_PAGE: self.__config.set_memory(int(self.__memory.value())) self.__config.set_cpus(int(self.__cpus.value())) - elif page == ENABLE_STORAGE_PAGE: + elif page is ENABLE_STORAGE_PAGE: self.__config.set_enable_storage(self.__enable_storage.value()) if self.__storage_type.getSelection() == DomainConfig.NEW_STORAGE: self.__config.set_use_local_storage(True) elif self.__storage_type.getSelection() == DomainConfig.EXISTING_STORAGE: self.__config.set_use_local_storage(False) - elif page == LOCAL_STORAGE_PAGE: + elif page is LOCAL_STORAGE_PAGE: self.__config.set_storage_size(float(self.__storage_size.value())) self.__config.set_allocate_storage(self.__allocate_storage.value()) - elif page == MANAGED_STORAGE_PAGE: + elif page is SELECT_POOL_PAGE: self.__config.set_use_local_storage(False) - self.__config.set_existing_storage(self.__existing_storage.getSelection()) - self.__config.set_storage_size(self.get_libvirt().get_storage_size(self.__existing_storage.getSelection())) - elif page == BRIDGE_PAGE: + self.__config.set_storage_pool(self.__storage_pool.getSelection()) + elif page is SELECT_VOLUME_PAGE: + self.__config.set_storage_volume(self.__storage_volume.getSelection()) + volume = self.get_libvirt().get_storage_volume(self.__config.get_storage_pool(), + self.__config.get_storage_volume()) + self.__config.set_storage_size(volume.info()[1] / 1024.0 ** 3) + elif page is BRIDGE_PAGE: self.__config.set_network_bridge(self.__network_bridges.getSelection()) - elif page == VIRT_DETAILS_PAGE: + elif page is VIRT_DETAILS_PAGE: self.__config.set_virt_type(self.__virt_types.getSelection()) self.__config.set_architecture(self.__architectures.getSelection()) - elif page == CONFIRM_PAGE: + elif page is CONFIRM_PAGE: self.get_libvirt().define_domain(self.__config, CreateMeter()) self.set_finished() def get_back_page(self, page): result = page - if page == OS_TYPE_PAGE: + if page is OS_TYPE_PAGE: install_type = self.__config.get_install_type() if install_type == DomainConfig.LOCAL_INSTALL: if self.__config.get_use_cdrom_source(): @@ -217,24 +228,26 @@ class DomainConfigScreen(ConfigScreen): result = NETWORK_INSTALL_PAGE elif install_type == DomainConfig.PXE_INSTALL: result = VM_DETAILS_PAGE - elif page == LOCAL_STORAGE_PAGE or page == MANAGED_STORAGE_PAGE: + elif page is LOCAL_STORAGE_PAGE or page is SELECT_VOLUME_PAGE: result = ENABLE_STORAGE_PAGE - elif page == NETWORK_INSTALL_PAGE: + elif page is SELECT_POOL_PAGE: + result = ENABLE_STORAGE_PAGE + elif page is NETWORK_INSTALL_PAGE: result = VM_DETAILS_PAGE - elif page == SELECT_CDROM_PAGE or page == SELECT_ISO_PAGE: + elif page is SELECT_CDROM_PAGE or page is SELECT_ISO_PAGE: result = LOCAL_INSTALL_PAGE - elif page == BRIDGE_PAGE: + elif page is BRIDGE_PAGE: if self.__config.get_use_local_storage(): result = LOCAL_STORAGE_PAGE else: - result = MANAGED_STORAGE_PAGE + result = SELECT_VOLUME_PAGE else: if page > 1: result = page - 1 return result def get_next_page(self, page): result = page - if page == VM_DETAILS_PAGE: + if page is VM_DETAILS_PAGE: install_type = self.__config.get_install_type() if install_type == DomainConfig.LOCAL_INSTALL: result = LOCAL_INSTALL_PAGE @@ -242,34 +255,36 @@ class DomainConfigScreen(ConfigScreen): result = NETWORK_INSTALL_PAGE elif install_type == DomainConfig.PXE_INSTALL: result = OS_TYPE_PAGE - elif page == LOCAL_INSTALL_PAGE: + elif page is LOCAL_INSTALL_PAGE: if self.__config.get_use_cdrom_source(): result = SELECT_CDROM_PAGE else: result = SELECT_ISO_PAGE - elif page == SELECT_CDROM_PAGE or page == SELECT_ISO_PAGE: + elif page is SELECT_CDROM_PAGE or page is SELECT_ISO_PAGE: result = OS_TYPE_PAGE - elif page == NETWORK_INSTALL_PAGE: + elif page is NETWORK_INSTALL_PAGE: result = OS_TYPE_PAGE - elif page == ENABLE_STORAGE_PAGE: + elif page is ENABLE_STORAGE_PAGE: result = BRIDGE_PAGE if self.__config.get_enable_storage(): if self.__config.get_use_local_storage(): result = LOCAL_STORAGE_PAGE else: - result = MANAGED_STORAGE_PAGE - elif page == LOCAL_STORAGE_PAGE or page == MANAGED_STORAGE_PAGE: + result = SELECT_POOL_PAGE + elif page is LOCAL_STORAGE_PAGE: result = BRIDGE_PAGE else: result = page + 1 return result def page_has_finish(self, page): - if page == CONFIRM_PAGE: return True + if page is CONFIRM_PAGE: return True return False def page_has_next(self, page): - if page < CONFIRM_PAGE: + if page is SELECT_POOL_PAGE: return self.__has_pools + elif page is SELECT_VOLUME_PAGE: return self.__has_volumes + elif page < CONFIRM_PAGE: return True def get_vm_details_page(self, screen): @@ -393,17 +408,36 @@ class DomainConfigScreen(ConfigScreen): return [Label("Configure local storage"), grid] - def get_managed_storage_page(self, screen): + def get_select_pool_page(self, screen): + pools = [] + for pool in self.get_libvirt().list_storage_pools(): + pools.append([pool, pool, pool == self.__config.get_storage_pool()]) + if len(pools) > 0: + self.__storage_pool = RadioBar(screen, (pools)) + grid = Grid(2, 1) + grid.setField(Label("Storage pool:"), 0, 0, anchorTop = 1) + grid.setField(self.__storage_pool, 1, 0) + self.__has_pools = True + else: + grid = Label("There are no storage pools available.") + self.__has_pools = False + return [Label("Configure Managed Storage: Select A Pool"), + grid] + + def get_select_volume_page(self, screen): volumes = [] - for volume in self.get_libvirt().list_storage_volumes(): - volumes.append(["%s (%d GB)" % (volume.name(), volume.info()[1] / (1024 ** 3)), - volume.name(), - self.__config.is_existing_storage(volume.name())]) - self.__existing_storage = RadioBar(screen, (volumes)) - grid = Grid(2, 1) - grid.setField(Label("Existing storage:"), 0, 0) - grid.setField(self.__existing_storage, 1, 0) - return [Label("Configure managed storage"), + for volume in self.get_libvirt().list_storage_volumes(self.__config.get_storage_pool()): + volumes.append([volume, volume, volume == self.__config.get_storage_volume()]) + if len(volumes) > 0: + self.__storage_volume = RadioBar(screen, (volumes)) + grid = Grid(2, 1) + grid.setField(Label("Storage volumes:"), 0, 0, anchorTop = 1) + grid.setField(self.__storage_volume, 1, 0) + self.__has_volumes = True + else: + grid = Label("This storage pool has no defined volumes.") + self.__has_volumes = False + return [Label("Configure Managed Storage: Select A Volume"), grid] def get_bridge_page(self, screen): @@ -448,7 +482,9 @@ class DomainConfigScreen(ConfigScreen): grid.setField(Label("CPUs:"), 0, 3, anchorRight = 1) grid.setField(Label("%d" % self.__config.get_cpus()), 1, 3, anchorLeft = 1) grid.setField(Label("Storage:"), 0, 4, anchorRight = 1) - grid.setField(Label(self.__config.get_existing_storage()), 1, 4, anchorLeft = 1) + grid.setField(Label("%s (on %s)" % (self.__config.get_storage_volume(), + self.__config.get_storage_pool())), + 1, 4, anchorLeft = 1) grid.setField(Label("Network:"), 0, 5, anchorRight = 1) grid.setField(Label(self.__config.get_network_bridge()), 1, 5, anchorLeft = 1) return [Label("Ready to begin installation of %s" % self.__config.get_guest_name()), diff --git a/nodeadmin/domainconfig.py b/nodeadmin/domainconfig.py index ef39fe0..4466e67 100644 --- a/nodeadmin/domainconfig.py +++ b/nodeadmin/domainconfig.py @@ -50,7 +50,8 @@ class DomainConfig: self.__use_local_storage = True self.__storage_size = 8.0 self.__allocate_storage = True - self.__existing_storage = "" + self.__storage_pool = "" + self.__storage_volume = "" self.__network_bridge = None self.__mac_address = None self.__virt_type = None @@ -177,11 +178,17 @@ class DomainConfig: def get_allocate_storage(self): return self.__allocate_storage - def set_existing_storage(self, storage): - self.__existing_storage = storage + def set_storage_pool(self, pool): + self.__storage_pool = pool - def get_existing_storage(self): - return self.__existing_storage + def get_storage_pool(self): + return self.__storage_pool + + def set_storage_volume(self, volume): + self.__storage_volume = volume + + def get_storage_volume(self): + return self.__storage_volume def is_existing_storage(self, storage): return self.__existing_storage == storage diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py index b2acabe..f31266c 100644 --- a/nodeadmin/libvirtworker.py +++ b/nodeadmin/libvirtworker.py @@ -196,6 +196,11 @@ class LibvirtWorker: '''Returns the storage pool with the specified name.''' return self.__conn.storagePoolLookupByName(name) + def list_storage_volumes(self, poolname): + '''Returns the list of all defined storage volumes for a given pool.''' + pool = self.get_storage_pool(poolname) + return pool.listVolumes() + def define_storage_volume(self, config, meter): '''Defines a new storage volume.''' self.create_storage_pool(config.get_pool().name()) @@ -204,10 +209,15 @@ class LibvirtWorker: def remove_storage_volume(self, poolname, volumename): '''Removes the specified storage volume.''' - pool = self.get_storage_pool(poolname) - volume = pool.storageVolLookupByName(volumename) + volume = self.get_storage_volume(poolname, volumename) volume.delete(0) + def get_storage_volume(self, poolname, volumename): + '''Returns a reference to the specified storage volume.''' + pool =self.get_storage_pool(poolname) + volume = pool.storageVolLookupByName(volumename) + return volume + def list_bridges(self): '''Lists all defined and active bridges.''' bridges = self.__conn.listNetworks() @@ -221,21 +231,9 @@ class LibvirtWorker: def generate_mac_address(self): return self.__net.macaddr - def list_storage_volumes(self): - '''Lists all defined storage volumes.''' - pools = self.__conn.listStoragePools() - pools.extend(self.__conn.listDefinedStoragePools()) - result = [] - for name in pools: - pool = self.__conn.storagePoolLookupByName(name) - for volname in pool.listVolumes(): - volume = self.__conn.storageVolLookupByPath("/var/lib/libvirt/images/%s" % volname) - result.append(volume) - return result - - def get_storage_size(self, name): + def get_storage_size(self, poolname, volumename): '''Returns the size of the specified storage volume.''' - volume = self.__conn.storageVolLookupByPath("/var/lib/libvirt/images/%s" % name) + volume = self.get_storage_volume(poolname, volumename) return volume.info()[1] / (1024.0 ** 3) def get_virt_types(self): @@ -381,6 +379,10 @@ class LibvirtWorker: pool_object = pool, suffix = ".img") path = os.path.join(DEFAULT_POOL_TARGET_PATH, path) + else: + volume = self.get_storage_volume(config.get_storage_pool(), + config.get_storage_volume()) + path = volume.path() if path is not None: storage= virtinst.VirtualDisk(conn = self.__conn, -- 1.6.2.5 From jboggs at redhat.com Mon Nov 9 19:54:50 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 09 Nov 2009 14:54:50 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <1257794438-10826-2-git-send-email-dpierce@redhat.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> <1257794438-10826-2-git-send-email-dpierce@redhat.com> Message-ID: <4AF8738A.3010508@redhat.com> Darryl L. Pierce wrote: > Users can now: > * Add a new storage pool. > * Delete a storage pool. > * Start and stop storage pools. > * Add a new storage volume. > * Delete a storage volume. > * List existing storage pools, with details. > > Signed-off-by: Darryl L. Pierce > --- > Makefile.am | 28 +++++-- > nodeadmin/adddomain.py | 15 +--- > nodeadmin/addpool.py | 183 ++++++++++++++++++++++++++++++++++++++++++++ > nodeadmin/addvolume.py | 177 ++++++++++++++++++++++++++++++++++++++++++ > nodeadmin/configscreen.py | 52 +++++++++++++ > nodeadmin/createmeter.py | 30 +++++++ > nodeadmin/libvirtworker.py | 67 ++++++++++++++-- > nodeadmin/listpools.py | 63 +++++++++++++++ > nodeadmin/mainmenu.py | 24 ++++--- > nodeadmin/poolconfig.py | 143 ++++++++++++++++++++++++++++++++++ > nodeadmin/removepool.py | 72 +++++++++++++++++ > nodeadmin/removevolume.py | 76 ++++++++++++++++++ > nodeadmin/setup.py.in | 9 ++- > nodeadmin/startpool.py | 62 +++++++++++++++ > nodeadmin/stoppool.py | 62 +++++++++++++++ > nodeadmin/storagemenu.py | 63 +++++++++++++++ > nodeadmin/utils.py | 10 +++ > nodeadmin/volumeconfig.py | 83 ++++++++++++++++++++ > ovirt-node.spec.in | 7 ++ > 19 files changed, 1186 insertions(+), 40 deletions(-) > create mode 100644 nodeadmin/addpool.py > create mode 100644 nodeadmin/addvolume.py > create mode 100644 nodeadmin/createmeter.py > create mode 100644 nodeadmin/listpools.py > create mode 100644 nodeadmin/poolconfig.py > create mode 100644 nodeadmin/removepool.py > create mode 100644 nodeadmin/removevolume.py > create mode 100644 nodeadmin/startpool.py > create mode 100644 nodeadmin/stoppool.py > create mode 100644 nodeadmin/storagemenu.py > create mode 100644 nodeadmin/volumeconfig.py > > diff --git a/Makefile.am b/Makefile.am > index 3ce24c1..55ef277 100644 > --- a/Makefile.am > +++ b/Makefile.am > @@ -28,29 +28,39 @@ EXTRA_DIST = \ > images/syslinux-vesa-splash.jpg \ > nodeadmin/__init__.py \ > nodeadmin/adddomain.py \ > + nodeadmin/addpool.py \ > + nodeadmin/addvolume.py \ > nodeadmin/configscreen.py \ > + nodeadmin/createmeter.py \ > nodeadmin/createnetwork.py \ > nodeadmin/createuser.py \ > + nodeadmin/definenet.py \ > nodeadmin/destroynetwork.py \ > + nodeadmin/domainconfig.py \ > nodeadmin/halworker.py \ > nodeadmin/libvirtworker.py \ > - nodeadmin/userworker.py \ > + nodeadmin/listdomains.py \ > + nodeadmin/listnetworks.py \ > + nodeadmin/listpools.py \ > nodeadmin/mainmenu.py \ > nodeadmin/menuscreen.py \ > + nodeadmin/networkconfig.py \ > nodeadmin/netmenu.py \ > + nodeadmin/nodeadmin.py \ > nodeadmin/nodemenu.py \ > nodeadmin/removedomain.py \ > - nodeadmin/undefinenetwork.py \ > + nodeadmin/removepool.py \ > + nodeadmin/removevolume.py \ > + nodeadmin/setup.py \ > nodeadmin/startdomain.py \ > + nodeadmin/startpool.py \ > nodeadmin/stopdomain.py \ > - nodeadmin/definenet.py \ > - nodeadmin/domainconfig.py \ > - nodeadmin/networkconfig.py \ > - nodeadmin/listdomains.py \ > - nodeadmin/listnetworks.py \ > - nodeadmin/nodeadmin.py \ > - nodeadmin/setup.py \ > + nodeadmin/stoppool.py \ > + nodeadmin/storagemenu.py \ > + nodeadmin/undefinenetwork.py \ > + nodeadmin/userworker.py \ > nodeadmin/utils.py \ > + nodeadmin/volumeconfig.py \ > scripts/collectd.conf.in \ > scripts/ovirt \ > scripts/ovirt-awake \ > diff --git a/nodeadmin/adddomain.py b/nodeadmin/adddomain.py > index 70a2011..bb06a62 100755 > --- a/nodeadmin/adddomain.py > +++ b/nodeadmin/adddomain.py > @@ -20,11 +20,10 @@ > > from snack import * > import os > +from createmeter import CreateMeter > from domainconfig import DomainConfig > from configscreen import ConfigScreen > -import urlgrabber.progress as progress > import utils > -import logging > > from virtinst import * > > @@ -51,16 +50,6 @@ OS_VARIANT="os.variant" > MEMORY="memory" > CPUS="cpus" > > -class DummyMeter(progress.BaseMeter): > - def _do_start(self, now = None): > - logging.info("Starting...") > - > - def _do_end(self, amount_read, now = None): > - logging.info("Ending: read=%d" % amount_read) > - > - def _do_update(self, amount_read, now = None): > - logging.info("Update: read=%d" % amount_read) > - > class DomainConfigScreen(ConfigScreen): > def __init__(self): > ConfigScreen.__init__(self, "Create A New Virtual Machine") > @@ -212,7 +201,7 @@ class DomainConfigScreen(ConfigScreen): > self.__config.set_virt_type(self.__virt_types.getSelection()) > self.__config.set_architecture(self.__architectures.getSelection()) > elif page == CONFIRM_PAGE: > - self.get_libvirt().define_domain(self.__config, DummyMeter()) > + self.get_libvirt().define_domain(self.__config, CreateMeter()) > self.set_finished() > > def get_back_page(self, page): > diff --git a/nodeadmin/addpool.py b/nodeadmin/addpool.py > new file mode 100644 > index 0000000..9fa1e7d > --- /dev/null > +++ b/nodeadmin/addpool.py > @@ -0,0 +1,183 @@ > +# addstorage.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +import traceback > +import utils > + > +from configscreen import * > +from poolconfig import PoolConfig > +from virtinst import Storage > + > +POOL_NAME_PAGE = 1 > +POOL_DETAILS_PAGE = 2 > +CONFIRM_PAGE = 3 > + > +class AddStoragePoolConfigScreen(ConfigScreen): > + def __init__(self): > + ConfigScreen.__init__(self, "Add A Storage Pool") > + self.__config = PoolConfig(self.get_libvirt()) > + > + def get_elements_for_page(self, screen, page): > + if page is POOL_NAME_PAGE: return self.get_pool_name_page(screen) > + elif page is POOL_DETAILS_PAGE: return self.get_pool_details_page(screen) > + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) > + > + def page_has_next(self, page): > + return page < CONFIRM_PAGE > + > + def page_has_back(self, page): > + return page > POOL_NAME_PAGE > + > + def page_has_finish(self, page): > + return page is CONFIRM_PAGE > + > + def validate_input(self, page, errors): > + if page is POOL_NAME_PAGE: > + if utils.string_is_not_blank(self.__name.value()): > + if self.get_libvirt().storage_pool_exists(self.__name.value()): > + errors.append("Name '%s' already in use by another pool." % self.__name.value()) > + else: > + return True > + else: > + errors.append("Storage object name must be a string between 0 and 50 characters.") > + elif page is POOL_DETAILS_PAGE: > + result = True > + if self.__config.needs_target_path(): > + if utils.string_is_not_blank(self.__target_path.value()): > + if self.__target_path.value()[0:1] is not '/': > + errors.append("'%s' is not an absolute path." % self.__target_path.value()) > + result = False > + else: > + errors.append("You must enter a target path.") > + result = False > + if self.__config.needs_format(): > + if self.__formats.getSelection() is None: > + errors.append("You must select a pool format.") > + result = False > + if self.__config.needs_hostname(): > + if not utils.string_is_not_blank(self.__hostname.value()): > + errors.append("You must enter a hostname.") > + result = False > + if self.__config.needs_source_path(): > + if utils.string_is_not_blank(self.__source_path.value()): > + if self.__config.source_must_be_absolute(): > + if self.__source_path.value()[0:1] is not '/': > + errors.append("'%s' is not an absolute path." % self.__source_path.value()) > + result = False > + else: > + errors.append("you must enter a source path.") > + result = False > + return result > + elif page is CONFIRM_PAGE: return True > + return False > + > + def process_input(self, page): > + if page is POOL_NAME_PAGE: > + self.__config.set_name(self.__name.value()) > + self.__config.set_type(self.__type.getSelection()) > + #self._reset_flags(self.__type.current()) > + elif page is POOL_DETAILS_PAGE: > + if self.__config.needs_target_path(): > + self.__config.set_target_path(self.__target_path.value()) > + if self.__config.needs_format(): > + self.__config.set_format(self.__formats.getSelection()) > + if self.__config.needs_hostname(): > + self.__config.set_hostname(self.__hostname.value()) > + if self.__config.needs_source_path(): > + self.__config.set_source_path(self.__source_path.value()) > + if self.__config.needs_build_pool(): > + self.__config.set_build_pool(self.__build_pool.value()) > + elif page is CONFIRM_PAGE: > + self.get_libvirt().define_storage_pool(self.__config.get_name(), config = self.__config) > + self.get_libvirt().create_storage_pool(self.__config.get_name()) > + self.set_finished() > + > + def get_pool_name_page(self, screen): > + self.__name = Entry(50, self.__config.get_name()) > + pooltypes = [] > + for pooltype in Storage.StoragePool.get_pool_types(): > + pooltypes.append(["%s: %s" % (pooltype, Storage.StoragePool.get_pool_type_desc(pooltype)), > + pooltype, > + self.__config.get_type() is pooltype]) > + self.__type = RadioBar(screen, pooltypes) > + grid = Grid(2, 2) > + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) > + grid.setField(self.__name, 1, 0, anchorLeft = 1) > + grid.setField(Label("Type:"), 0, 1, anchorRight = 1, anchorTop = 1) > + grid.setField(self.__type, 1, 1, anchorLeft = 1) > + return [Label("Add Storage Pool"), > + grid] > + > + def get_pool_details_page(self, screen): > + rows = 0 > + if self.__config.needs_target_path(): > + self.__target_path = Entry(50, self.__config.get_target_path()) > + rows += 1 > + if self.__config.needs_format(): > + formats = [] > + for format in self.__config.get_formats(): > + formats.append([format, format, format is self.__config.get_format()]) > + self.__formats = RadioBar(screen, formats) > + rows += 1 > + if self.__config.needs_hostname(): > + self.__hostname = Entry(50, self.__config.get_hostname()) > + rows += 1 > + if self.__config.needs_source_path(): > + self.__source_path = Entry(50, self.__config.get_source_path()) > + rows += 1 > + if self.__config.needs_build_pool(): > + self.__build_pool = Checkbox("Build Pool", self.__config.get_build_pool()) > + rows += 1 > + grid = Grid(2, rows) > + currentrow = 0 > + if self.__config.needs_target_path(): > + grid.setField(Label("Target Path:"), 0, currentrow, anchorRight = 1) > + grid.setField(self.__target_path, 1, currentrow, anchorLeft = 1) > + currentrow += 1 > + if self.__config.needs_format(): > + grid.setField(Label("Format:"), 0, currentrow, anchorRight = 1, anchorTop = 1) > + grid.setField(self.__formats, 1, currentrow, anchorLeft = 1) > + currentrow += 1 > + if self.__config.needs_hostname(): > + grid.setField(Label("Host Name:"), 0, currentrow, anchorRight = 1) > + grid.setField(self.__hostname, 1, currentrow, anchorRight = 1) > + currentrow += 1 > + if self.__config.needs_source_path(): > + grid.setField(Label("Source Path:"), 0, currentrow, anchorRight = 1) > + grid.setField(self.__source_path, 1, currentrow, anchorLeft = 1) > + currentrow += 1 > + if self.__config.needs_build_pool(): > + grid.setField(Label(" "), 0, currentrow, anchorRight = 1) > + grid.setField(self.__build_pool, 1, currentrow, anchorLeft = 1) > + currentrow += 1 > + return [Label("Specify a storage location to be later split into virtual machine storage"), > + grid] > + > + def get_confirm_page(self, screen): > + grid = Grid(2, 2) > + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) > + grid.setField(Label(self.__config.get_name()), 1, 0, anchorLeft = 1) > + grid.setField(Label("Target Path:"), 0, 1, anchorRight = 1) > + grid.setField(Label(self.__config.get_target_path()), 1, 1, anchorLeft = 1) > + return [Label("Confirm Pool Details"), > + grid] > + > +def AddStoragePool(): > + screen = AddStoragePoolConfigScreen() > + screen.start() > diff --git a/nodeadmin/addvolume.py b/nodeadmin/addvolume.py > new file mode 100644 > index 0000000..63cc54e > --- /dev/null > +++ b/nodeadmin/addvolume.py > @@ -0,0 +1,177 @@ > +# addvolume.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +import traceback > + > +from createmeter import CreateMeter > +from configscreen import * > +from volumeconfig import StorageVolumeConfig > +from utils import * > + > +SELECT_POOL_PAGE = 1 > +VOLUME_NAME_PAGE = 2 > +VOLUME_FORMAT_PAGE = 3 > +MAX_CAPACITY_PAGE = 4 > +CONFIRM_PAGE = 5 > + > +class AddVolumeConfigScreen(StorageListConfigScreen): > + def __init__(self): > + StorageListConfigScreen.__init__(self, "Add A New Storage Volume") > + self.__config = StorageVolumeConfig() > + > + def get_elements_for_page(self, screen, page): > + if page is SELECT_POOL_PAGE: return self.get_storage_pool_list_page(screen) > + elif page is VOLUME_NAME_PAGE: return self.get_volume_name_page(screen) > + elif page is VOLUME_FORMAT_PAGE: return self.get_volume_format_page(screen) > + elif page is MAX_CAPACITY_PAGE: return self.get_max_capacity_page(screen) > + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) > + > + def page_has_next(self, page): > + if page is SELECT_POOL_PAGE: > + return self.has_selectable_pools() > + else: > + if page < CONFIRM_PAGE: return True > + return False > + > + def page_has_back(self, page): > + if page > SELECT_POOL_PAGE: return True > + return False > + > + def page_has_finish(self, page): > + return page is CONFIRM_PAGE > + > + def get_next_page(self, page): > + if page is VOLUME_NAME_PAGE: > + if self.__config.needs_format(): > + return VOLUME_FORMAT_PAGE > + else: > + return MAX_CAPACITY_PAGE > + return StorageListConfigScreen.get_next_page(self, page) > + > + def get_back_page(self, page): > + if page is MAX_CAPACITY_PAGE: > + if self.__config.needs_format(): > + return VOLUME_FORMAT_PATH > + else: > + return VOLUME_NAME_PAGE > + return StorageListConfigScreen.get_back_page(self, page) > + > + def validate_input(self, page, errors): > + if page is SELECT_POOL_PAGE: > + if self.get_selected_pool() is not None: > + return True > + else: > + errors.append("You must select a storage pool.") > + elif page is VOLUME_NAME_PAGE: > + if string_is_not_blank(self.__name.value()): > + return True > + else: > + errors.append("Storage object name can only contain alphanumeric, '_', '.', or '-' characters.") > + elif page is VOLUME_FORMAT_PAGE: > + if self.__formats.current() is not None: > + return True > + else: > + errors.append("You must select a volume format.") > + elif page is MAX_CAPACITY_PAGE: > + if string_is_not_blank(self.__capacity.value()): > + if string_is_not_blank(self.__allocation.value()): > + capacity = int(self.__capacity.value()) > + allocation = int(self.__allocation.value()) > + if capacity > 0: > + if capacity <= self.__config.get_pool().info()[3] / 1024**2: > + if allocation >= 0: > + if allocation <= capacity: > + return True > + else: > + errors.append("Allocation cannot exceed the maximum capacity.") > + else: > + errors.append("The allocation must be greater than or equal to 0.") > + else: > + errors.append("The maximum capacity cannot exceed the storage pool size.") > + else: > + errors.append("The capacity must be greater than zero.") > + else: > + errors.append("An allocation value must be entered.") > + else: > + errors.append("A maximum volume capacity must be entered.") > + elif page is CONFIRM_PAGE: return True > + return False > + > + def process_input(self, page): > + if page is SELECT_POOL_PAGE: > + self.__config.set_pool(self.get_libvirt().get_storage_pool(self.get_selected_pool())) > + elif page is VOLUME_NAME_PAGE: > + self.__config.set_name(self.__name.value()) > + elif page is VOLUME_FORMAT_PAGE: > + self.__config.set_format(self.__formats.current()) > + elif page is MAX_CAPACITY_PAGE: > + self.__config.set_max_capacity(int(self.__capacity.value())) > + self.__config.set_allocation(int(self.__allocation.value())) > + elif page is CONFIRM_PAGE: > + self.get_libvirt().define_storage_volume(self.__config, CreateMeter()) > + self.set_finished() > + > + def get_volume_name_page(self, screen): > + self.__name = Entry(50, self.__config.get_name()) > + grid = Grid(2, 1) > + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) > + grid.setField(self.__name, 1, 0, anchorLeft = 1) > + return [Label("New Storage Volume"), > + grid, > + Label("Name of the volume to create. File extension may be appended.")] > + > + def get_volume_format_page(self, screen): > + self.__formats = Listbox(0) > + for format in self.__config.get_formats_for_pool(): > + self.__formats.append(format, format) > + grid = Grid(1, 1) > + grid.setField(self.__formats, 0, 0) > + return [Label("Select The Volume Format"), > + grid] > + > + def get_max_capacity_page(self, screen): > + self.__capacity = Entry(6, str(self.__config.get_max_capacity())) > + self.__allocation = Entry(6, str(self.__config.get_allocation())) > + grid = Grid(2, 2) > + grid.setField(Label("Max. Capacity (MB):"), 0, 0, anchorRight = 1) > + grid.setField(self.__capacity, 1, 0, anchorLeft = 1) > + grid.setField(Label("Allocation (MB):"), 0, 1, anchorRight = 1) > + grid.setField(self.__allocation, 1, 1, anchorLeft = 1) > + return [Label("Storage Volume Quota"), > + Label("%s's available space: %0.2f GB" % (self.__config.get_pool().name(), > + self.__config.get_pool().info()[3] / 1024.0**3)), > + grid] > + > + def get_confirm_page(self, screen): > + grid = Grid(2, 5) > + grid.setField(Label("Volume Name:"), 0, 0, anchorRight = 1) > + grid.setField(Label("%s (%s)" % (self.__config.get_name(), self.__config.get_pool().name())), 1, 0, anchorLeft = 1) > + if self.__config.needs_format(): > + grid.setField(Label("Format:"), 0, 1, anchorRight = 1) > + grid.setField(Label(self.__config.get_format()), 1, 1, anchorLeft = 1) > + grid.setField(Label("Max. Capacity:"), 0, 2, anchorRight = 1) > + grid.setField(Label("%0.2f GB" % (self.__config.get_max_capacity() / 1024.0)), 1, 2, anchorLeft = 1) > + grid.setField(Label("Allocation:"), 0, 3, anchorRight = 1) > + grid.setField(Label("%0.2f GB" % (self.__config.get_allocation() / 1024.0)), 1, 3, anchorLeft = 1) > + return [Label("Ready To Allocation New Storage Volume"), > + grid] > + > +def AddStorageVolume(): > + screen = AddVolumeConfigScreen() > + screen.start() > diff --git a/nodeadmin/configscreen.py b/nodeadmin/configscreen.py > index f214aea..7654697 100644 > --- a/nodeadmin/configscreen.py > +++ b/nodeadmin/configscreen.py > @@ -179,3 +179,55 @@ class NetworkListConfigScreen(ConfigScreen): > > def has_selectable_networks(self): > return self.__has_networks > + > +class StorageListConfigScreen(ConfigScreen): > + '''Provides a base class for any configuration screen that deals with storage pool lists.''' > + > + def __init__(self, title): > + ConfigScreen.__init__(self, title) > + > + def get_storage_pool_list_page(self, screen, defined=True, created=True): > + pools = self.get_libvirt().list_storage_pools(defined=defined, created=created) > + if len(pools) > 0: > + self.__has_pools = True > + self.__pools_list = Listbox(0) > + for pool in pools: > + self.__pools_list.append(pool, pool) > + result = self.__pools_list > + else: > + self.__has_pools = False > + result = Label("There are no storage pools available.") > + grid = Grid(1, 1) > + grid.setField(result, 0, 0) > + return [Label("Storage Pool List"), > + grid] > + > + def get_selected_pool(self): > + return self.__pools_list.current() > + > + def has_selectable_pools(self): > + return self.__has_pools > + > + def get_storage_volume_list_page(self, screen): > + '''Requires that self.__pools_list have a selected element.''' > + pool = self.get_libvirt().get_storage_pool(self.get_selected_pool()) > + if len(pool.listVolumes()) > 0: > + self.__has_volumes = True > + self.__volumes_list = Listbox(0) > + for volname in pool.listVolumes(): > + volume = pool.storageVolLookupByName(volname) > + self.__volumes_list.append("%s (%0.2f GB)" % (volume.name(), volume.info()[2] / 1024**3), volume.name()) > + result = self.__volumes_list > + else: > + self.__has_volumes = False > + result = Label("There are no storage volumes available.") > + grid = Grid(1, 1) > + grid.setField(result, 0, 0) > + return [Label("Storage Volume List"), > + grid] > + > + def get_selected_volume(self): > + return self.__volumes_list.current() > + > + def has_selectable_volumes(self): > + return self.__has_volumes > diff --git a/nodeadmin/createmeter.py b/nodeadmin/createmeter.py > new file mode 100644 > index 0000000..521e7d8 > --- /dev/null > +++ b/nodeadmin/createmeter.py > @@ -0,0 +1,30 @@ > +# createmeter.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +import urlgrabber.progress as progress > +import logging > + > +class CreateMeter(progress.BaseMeter): > + def _do_start(self, now = None): > + logging.info("Starting...") > + > + def _do_end(self, amount_read, now = None): > + logging.info("Ending: read=%d" % amount_read) > + > + def _do_update(self, amount_read, now = None): > + logging.info("Update: read=%d" % amount_read) > diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py > index ba07605..b2acabe 100644 > --- a/nodeadmin/libvirtworker.py > +++ b/nodeadmin/libvirtworker.py > @@ -35,6 +35,10 @@ class LibvirtWorker: > self.__net.setup(self.__conn) > (self.__new_guest, self.__new_domain) = virtinst.CapabilitiesParser.guest_lookup(conn = self.__conn) > > + def get_connection(self): > + '''Returns the underlying connection.''' > + return self.__conn > + > def list_domains(self, defined = True, started = True): > '''Lists all domains.''' > result = [] > @@ -134,9 +138,12 @@ class LibvirtWorker: > network = self.get_network(name) > network.undefine() > > - def list_storage_pools(self): > + def list_storage_pools(self, defined=True, created=True): > '''Returns the list of all defined storage pools.''' > - return self.__conn.listStoragePools() > + pools = [] > + if defined: pools.extend(self.__conn.listDefinedStoragePools()) > + if created: pools.extend(self.__conn.listStoragePools()) > + return pools > > def storage_pool_exists(self, name): > '''Returns whether a storage pool exists.''' > @@ -144,16 +151,62 @@ class LibvirtWorker: > if name in pools: return True > return False > > - def define_storage_pool(self, name): > + def create_storage_pool(self, name): > + '''Starts the named storage pool if it is not currently started.''' > + if name not in self.list_storage_pools(defined = False): > + pool = self.get_storage_pool(name) > + pool.create(0) > + > + def destroy_storage_pool(self, name): > + '''Stops the specified storage pool.''' > + if name in self.list_storage_pools(defined = False): > + pool = self.get_storage_pool(name) > + pool.destroy() > + > + def define_storage_pool(self, name, config = None, meter = None): > '''Defines a storage pool with the given name.''' > - try: > + if config is None: > pool = virtinst.Storage.DirectoryPool(conn=self.__conn, > name=name, > target_path=DEFAULT_POOL_TARGET_PATH) > - newpool = pool.install(build=True, create=True) > + newpool = pool.install(build=True, create=True, meter=meter) > newpool.setAutostart(True) > - except Exception, error: > - raise RuntimeError("Could not create pool: %s - %s", str(error)) > + else: > + pool = config.get_pool() > + pool.target_path = config.get_target_path() > + if config.needs_hostname(): > + pool.host = config.get_hostname() > + if config.needs_source_path(): > + pool.source_path = config.get_source_path() > + if config.needs_format(): > + pool.format = config.get_format() > + pool.conn = self.__conn > + pool.get_xml_config() > + newpool = pool.install(meter=meter, > + build=True, # config.get_build_pool(), > + create=True) > + newpool.setAutostart(True) > + > + def undefine_storage_pool(self, name): > + '''Undefines the specified storage pool.''' > + pool = self.get_storage_pool(name) > + pool.undefine() > + > + def get_storage_pool(self, name): > + '''Returns the storage pool with the specified name.''' > + return self.__conn.storagePoolLookupByName(name) > + > + def define_storage_volume(self, config, meter): > + '''Defines a new storage volume.''' > + self.create_storage_pool(config.get_pool().name()) > + volume = config.create_volume() > + volume.install(meter = meter) > + > + def remove_storage_volume(self, poolname, volumename): > + '''Removes the specified storage volume.''' > + pool = self.get_storage_pool(poolname) > + volume = pool.storageVolLookupByName(volumename) > + volume.delete(0) > > def list_bridges(self): > '''Lists all defined and active bridges.''' > diff --git a/nodeadmin/listpools.py b/nodeadmin/listpools.py > new file mode 100644 > index 0000000..686c42d > --- /dev/null > +++ b/nodeadmin/listpools.py > @@ -0,0 +1,63 @@ > +# listpools.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > + > +from configscreen import * > + > +LIST_PAGE = 1 > +DETAILS_PAGE = 2 > + > +class ListStoragePoolsConfigScreen(StorageListConfigScreen): > + def __init__(self): > + StorageListConfigScreen.__init__(self, "List Storage Pools") > + > + def get_elements_for_page(self, screen, page): > + if page is LIST_PAGE: return self.get_storage_pool_list_page(screen) > + elif page is DETAILS_PAGE: return self.get_pool_details_page(screen) > + > + def page_has_next(self, page): > + if page is LIST_PAGE and self.has_selectable_pools(): > + return True > + return False > + > + def page_has_back(self, page): > + if page is DETAILS_PAGE: return True > + return False > + > + def get_pool_details_page(self, screen): > + pool = self.get_libvirt().get_storage_pool(self.get_selected_pool()) > + volumes = Listbox(0); > + for name in pool.listVolumes(): > + volume = pool.storageVolLookupByName(name) > + volumes.append("%s (%0.1f G)" % (name, volume.info()[1] / 1024**3), name) > + grid = Grid(2, 3) > + grid.setField(Label("Name:"), 0, 0, anchorRight = 1) > + grid.setField(Label(pool.name()), 1, 0, anchorLeft = 1) > + grid.setField(Label("Volumes:"), 0, 1, anchorRight = 1) > + grid.setField(volumes, 1, 1, anchorLeft = 1) > + grid.setField(Label("Autostart:"), 0, 2, anchorRight = 1) > + label = "No" > + if pool.autostart(): label = "Yes" > + grid.setField(Label(label), 1, 2, anchorLeft = 1) > + return [Label("Details For Storage Pool: %s" % self.get_selected_pool()), > + grid] > + > +def ListStoragePools(): > + screen = ListStoragePoolsConfigScreen() > + screen.start() > diff --git a/nodeadmin/mainmenu.py b/nodeadmin/mainmenu.py > index 73501fa..52d9298 100755 > --- a/nodeadmin/mainmenu.py > +++ b/nodeadmin/mainmenu.py > @@ -19,28 +19,32 @@ > from snack import * > import traceback > > -from menuscreen import MenuScreen > -from nodemenu import NodeMenu > -from netmenu import NetworkMenu > +from menuscreen import MenuScreen > +from nodemenu import NodeMenu > +from netmenu import NetworkMenu > +from storagemenu import StoragePoolMenu > > import utils > import logging > > NODE_MENU = 1 > NETWORK_MENU = 2 > -EXIT_CONSOLE = 99 > +STORAGE_MENU = 3 > +EXIT_CONSOLE = 4 > > class MainMenuScreen(MenuScreen): > def __init__(self): > MenuScreen.__init__(self, "Main Menu") > > def get_menu_items(self): > - return (("Node Administration", NODE_MENU), > - ("Network Administration", NETWORK_MENU)) > - > - def handle_selection(self, page): > - if page is NODE_MENU: NodeMenu() > - elif page is NETWORK_MENU: NetworkMenu() > + return (("Node Administration", NODE_MENU), > + ("Network Administration", NETWORK_MENU), > + ("Storage Pool Administration", STORAGE_MENU)) > + > + def handle_selection(self, item): > + if item is NODE_MENU: NodeMenu() > + elif item is NETWORK_MENU: NetworkMenu() > + elif item is STORAGE_MENU: StoragePoolMenu() > > def MainMenu(): > screen = MainMenuScreen() > diff --git a/nodeadmin/poolconfig.py b/nodeadmin/poolconfig.py > new file mode 100644 > index 0000000..6ece6c7 > --- /dev/null > +++ b/nodeadmin/poolconfig.py > @@ -0,0 +1,143 @@ > +# poolconfig.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from virtinst import Storage > + > +ROOT_TARGET_PATH="/var/lib/libvirt/images/%s" > + > +class PoolConfig: > + def __init__(self, libvirt): > + self.__libvirt = libvirt > + self.__name = "" > + self.set_type(None) > + self.__format = None > + self.__hostname = "" > + self.__target_path = "" > + self.__source_path = "" > + self.__build_pool = False > + > + def get_pool(self): > + return self.__pool > + > + def set_name(self, name): > + self.__name = name > + > + def get_name(self): > + return self.__name > + > + def set_type(self, pooltype): > + self.__type = pooltype > + self.__needs_target_path = False > + self.__needs_format = False > + self.__needs_hostname = False > + self.__needs_source_path = False > + self.__needs_build_pool = False > + if pooltype is not None: > + if pooltype is Storage.StoragePool.TYPE_DIR: > + self.__needs_target_path = True > + self.__target_path = ROOT_TARGET_PATH % self.__name > + self.__build_pool = True > + elif pooltype is Storage.StoragePool.TYPE_DISK: > + self.__needs_target_path = True > + self.__needs_format = True > + self.__needs_source_path = True > + self.__needs_build_pool = True > + elif pooltype is Storage.StoragePool.TYPE_FS: > + self.__needs_target_path = True > + self.__needs_format = True > + self.__needs_source_path = True > + self.__build_pool = True > + elif pooltype is Storage.StoragePool.TYPE_ISCSI: > + self.__needs_target_path = True > + self.__needs_hostname = True > + self.__needs_source_path = True > + self.__build_pool = False > + elif pooltype is Storage.StoragePool.TYPE_LOGICAL: > + self.__needs_target_path = True > + self.__needs_source_path = True > + self.__needs_build_pool = True > + elif pooltype is Storage.StoragePool.TYPE_NETFS: > + self.__needs_target_path = True > + self.__needs_format = True > + self.__needs_hostname = True > + self.__needs_source_path = True > + self.__build_pool = True > + # create pool > + pool_class = Storage.StoragePool.get_pool_class(self.__type) > + self.__pool = pool_class(name = self.__name, > + conn = self.__libvirt.get_connection()) > + if self.__needs_format: > + self.__format = self.__pool.formats[0] > + else: > + self.__type = Storage.StoragePool.get_pool_types()[0] > + > + def get_type(self): > + return self.__type > + > + def needs_target_path(self): > + return self.__needs_target_path > + > + def needs_format(self): > + return self.__needs_format > + > + def needs_hostname(self): > + return self.__needs_hostname > + > + def source_must_be_absolute(self): > + if self.__type is Storage.StoragePool.TYPE_ISCSI: > + return False > + return True > + > + def needs_source_path(self): > + return self.__needs_source_path > + > + def needs_build_pool(self): > + return self.__needs_build_pool > + > + def set_target_path(self, path): > + self.__target_path = path > + > + def get_target_path(self): > + return self.__target_path > + > + def get_formats(self): > + return self.__pool.formats > + > + def set_format(self, format): > + self.__format = format > + > + def get_format(self): > + return self.__format > + > + def set_hostname(self, hostname): > + self.__hostname = hostname > + > + def get_hostname(self): > + return self.__hostname > + > + def set_source_path(self, source_path): > + self.__source_path = source_path > + > + def get_source_path(self): > + return self.__source_path > + > + def set_build_pool(self, build_pool): > + self.__build_pool = build_pool > + > + def get_build_pool(self): > + return self.__build_pool > diff --git a/nodeadmin/removepool.py b/nodeadmin/removepool.py > new file mode 100644 > index 0000000..7a7f46d > --- /dev/null > +++ b/nodeadmin/removepool.py > @@ -0,0 +1,72 @@ > +#!/usr/bin/env python > +# > +# removepool.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +from configscreen import * > + > +LIST_POOLS_PAGE = 1 > +CONFIRM_PAGE = 2 > + > +class RemoveStoragePoolConfigScreen(StorageListConfigScreen): > + def __init__(self): > + StorageListConfigScreen.__init__(self, "Remove A Storage Pool") > + > + def get_elements_for_page(self, screen, page): > + if page is LIST_POOLS_PAGE: return self.get_storage_pool_list_page(screen) > + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) > + > + def page_has_next(self, page): > + return page is LIST_POOLS_PAGE and self.has_selectable_pools() > + > + def page_has_back(self, page): > + return False > + > + def page_has_finish(self, page): > + return page is CONFIRM_PAGE > + > + def validate_input(self, page, errors): > + if page is LIST_POOLS_PAGE: > + if self.get_selected_pool() is not None: > + return True > + else: > + errors.append("Please select a storage pool to be removed.") > + elif page is CONFIRM_PAGE: > + if self.__confirm.value(): > + return True > + else: > + errors.append("You must confirm removing a storage pool.") > + return False > + > + def process_input(self, page): > + if page is CONFIRM_PAGE: > + self.get_libvirt().destroy_storage_pool(self.get_selected_pool()) > + self.get_libvirt().undefine_storage_pool(self.get_selected_pool()) > + self.set_finished() > + > + def get_confirm_page(self, screen): > + self.__confirm = Checkbox("Check here to confirm deleting pool: %s" % self.get_selected_pool()) > + grid = Grid(1, 1) > + grid.setField(self.__confirm, 0, 0) > + return [Label("Remove Selected Storage Pool"), > + grid] > + > +def RemoveStoragePool(): > + screen = RemoveStoragePoolConfigScreen() > + screen.start() > diff --git a/nodeadmin/removevolume.py b/nodeadmin/removevolume.py > new file mode 100644 > index 0000000..5ad3058 > --- /dev/null > +++ b/nodeadmin/removevolume.py > @@ -0,0 +1,76 @@ > +# removevolume.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +import traceback > + > +from createmeter import CreateMeter > +from configscreen import * > +from volumeconfig import StorageVolumeConfig > +from utils import * > + > +SELECT_POOL_PAGE = 1 > +SELECT_VOLUME_PAGE = 2 > +CONFIRM_PAGE = 3 > + > +class RemoveVolumeConfigScreen(StorageListConfigScreen): > + def __init__(self): > + StorageListConfigScreen.__init__(self, "Add A New Storage Volume") > + self.__config = StorageVolumeConfig() > + > + def get_elements_for_page(self, screen, page): > + if page is SELECT_POOL_PAGE: return self.get_storage_pool_list_page(screen) > + elif page is SELECT_VOLUME_PAGE: return self.get_storage_volume_list_page(screen) > + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) > + > + def page_has_next(self, page): > + if page is SELECT_POOL_PAGE: return self.has_selectable_pools() > + elif page is SELECT_VOLUME_PAGE: return self.has_selectable_volumes() > + return False > + > + def validate_input(self, page, errors): > + if page is SELECT_POOL_PAGE: return self.get_selected_pool() is not None > + elif page is SELECT_VOLUME_PAGE: return self.get_selected_volume() is not None > + elif page is CONFIRM_PAGE: > + if self.__confirm.value(): > + return True > + else: > + errors.append("You must confirm deleting a storage volume.") > + return False > + > + def process_input(self, page): > + if page is CONFIRM_PAGE: > + self.get_libvirt().remove_storage_volume(self.get_selected_pool(), self.get_selected_volume()) > + self.set_finished() > + > + def page_has_back(self, page): > + return page > SELECT_POOL_PAGE > + > + def page_has_finish(self, page): > + return page is CONFIRM_PAGE > + > + def get_confirm_page(self, screen): > + self.__confirm = Checkbox("Check here to confirm deleting volume: %s" % self.get_selected_volume()) > + grid = Grid(1, 1) > + grid.setField(self.__confirm, 0, 0) > + return [Label("Remove Selected Storage Volume"), > + grid] > + > +def RemoveStorageVolume(): > + screen = RemoveVolumeConfigScreen() > + screen.start() > diff --git a/nodeadmin/setup.py.in b/nodeadmin/setup.py.in > index 1e6e028..17bfe93 100644 > --- a/nodeadmin/setup.py.in > +++ b/nodeadmin/setup.py.in > @@ -35,5 +35,12 @@ setup(name = "nodeadmin", > 'createnet = nodeadmin.createnetwork:CreateNetwork', > 'destroynet = nodeadmin.destroynetwork:DestroyNetwork', > 'undefinenet = nodeadmin.undefinenetwork:UndefineNetwork', > - 'listnets = nodeadmin.listnetworks:ListNetworks'] > + 'listnets = nodeadmin.listnetworks:ListNetworks', > + 'addpool = nodeadmin.addpool:AddStoragePool', > + 'rmpool = nodeadmin.removepool:RemoveStoragePool', > + 'startpool = nodeadmin.startpool:StartStoragePool', > + 'stoppool = nodeadmin.stoppool:StopStoragePool', > + 'addvolume = nodeadmin.addvolume:AddStorageVolume', > + 'rmvolume = nodeadmin.removevolume:RemoveStorageVolume', > + 'listpools = nodeadmin.listpools:ListPools'] > }) > diff --git a/nodeadmin/startpool.py b/nodeadmin/startpool.py > new file mode 100644 > index 0000000..8a84512 > --- /dev/null > +++ b/nodeadmin/startpool.py > @@ -0,0 +1,62 @@ > +#!/usr/bin/env python > +# > +# startpool.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +from configscreen import * > + > +LIST_POOLS_PAGE = 1 > +FINAL_PAGE = 2 > + > +class StartStoragePoolConfigScreen(StorageListConfigScreen): > + def __init__(self): > + StorageListConfigScreen.__init__(self, "Start A Storage Pool") > + > + def get_elements_for_page(self, screen, page): > + if page is LIST_POOLS_PAGE: return self.get_storage_pool_list_page(screen, created = False) > + elif page is FINAL_PAGE: return self.get_final_page(screen) > + > + def page_has_next(self, page): > + return page is LIST_POOLS_PAGE and self.has_selectable_pools() > + > + def page_has_back(self, page): > + return False > + > + def page_has_finish(self, page): > + return page is FINAL_PAGE > + > + def validate_input(self, page, errors): > + if page is LIST_POOLS_PAGE: > + if self.get_selected_pool() is not None: > + return True > + else: > + errors.append("Please select a storage pool to be started.") > + return False > + > + def process_input(self, page): > + if page is LIST_POOLS_PAGE: > + self.get_libvirt().create_storage_pool(self.get_selected_pool()) > + self.set_finished() > + > + def get_final_page(self, screen): > + return [Label("Storage pool started: %s" % self.get_selected_pool())] > + > +def StartStoragePool(): > + screen = StartStoragePoolConfigScreen() > + screen.start() > diff --git a/nodeadmin/stoppool.py b/nodeadmin/stoppool.py > new file mode 100644 > index 0000000..0522b95 > --- /dev/null > +++ b/nodeadmin/stoppool.py > @@ -0,0 +1,62 @@ > +#!/usr/bin/env python > +# > +# stoppool.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +from configscreen import * > + > +LIST_POOLS_PAGE = 1 > +FINAL_PAGE = 2 > + > +class StopStoragePoolConfigScreen(StorageListConfigScreen): > + def __init__(self): > + StorageListConfigScreen.__init__(self, "Stop A Storage Pool") > + > + def get_elements_for_page(self, screen, page): > + if page is LIST_POOLS_PAGE: return self.get_storage_pool_list_page(screen, defined = False) > + elif page is FINAL_PAGE: return self.get_final_page(screen) > + > + def page_has_next(self, page): > + return page is LIST_POOLS_PAGE and self.has_selectable_pools() > + > + def page_has_back(self, page): > + return False > + > + def page_has_finish(self, page): > + return page is FINAL_PAGE > + > + def validate_input(self, page, errors): > + if page is LIST_POOLS_PAGE: > + if self.get_selected_pool() is not None: > + return True > + else: > + errors.append("Please select a storage pool to be stopped.") > + return False > + > + def process_input(self, page): > + if page is LIST_POOLS_PAGE: > + self.get_libvirt().destroy_storage_pool(self.get_selected_pool()) > + self.set_finished() > + > + def get_final_page(self, screen): > + return [Label("Storage pool stopped: %s" % self.get_selected_pool())] > + > +def StopStoragePool(): > + screen = StopStoragePoolConfigScreen() > + screen.start() > diff --git a/nodeadmin/storagemenu.py b/nodeadmin/storagemenu.py > new file mode 100644 > index 0000000..0b56dae > --- /dev/null > +++ b/nodeadmin/storagemenu.py > @@ -0,0 +1,63 @@ > +# storagemenu.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +from snack import * > +import traceback > + > +from menuscreen import MenuScreen > +from addpool import AddStoragePool > +from startpool import StartStoragePool > +from stoppool import StopStoragePool > +from removepool import RemoveStoragePool > +from addvolume import AddStorageVolume > +from removevolume import RemoveStorageVolume > +from listpools import ListStoragePools > + > +ADD_POOL = 1 > +START_POOL = 2 > +STOP_POOL = 3 > +REMOVE_POOL = 4 > +ADD_VOLUME = 5 > +REMOVE_VOLUME = 6 > +LIST_POOLS = 7 > + > +class StoragePoolMenuScreen(MenuScreen): > + def __init__(self): > + MenuScreen.__init__(self, "Storage Pool Administration") > + > + def get_menu_items(self): > + return (("Add A Storage Pool", ADD_POOL), > + ("Start A Storage Pool", START_POOL), > + ("Stop A Storage Pool", STOP_POOL), > + ("Remove A Storage Pool", REMOVE_POOL), > + ("Add A Storage Volume", ADD_VOLUME), > + ("Remove A Storage Volume", REMOVE_VOLUME), > + ("List Storage Pools", LIST_POOLS)) > + > + def handle_selection(self, item): > + if item is ADD_POOL: AddStoragePool() > + elif item is START_POOL: StartStoragePool() > + elif item is STOP_POOL: StopStoragePool() > + elif item is REMOVE_POOL: RemoveStoragePool() > + elif item is ADD_VOLUME: AddStorageVolume() > + elif item is REMOVE_VOLUME: RemoveStorageVolume() > + elif item is LIST_POOLS: ListStoragePools() > + > +def StoragePoolMenu(): > + screen = StoragePoolMenuScreen() > + screen.start() > diff --git a/nodeadmin/utils.py b/nodeadmin/utils.py > index 55a838c..28ccb8b 100644 > --- a/nodeadmin/utils.py > +++ b/nodeadmin/utils.py > @@ -17,9 +17,19 @@ > # also available at http://www.gnu.org/copyleft/gpl.html. > > import logging > +import re > > logging.basicConfig(level=logging.DEBUG, > format='%(asctime)s %(levelname)-8s %(message)s', > datefmt='%a, %d %b %Y %H:%M:%S', > filename='/var/log/ovirt-nodeadmin.log', > filemode='w') > + > +def string_is_not_blank(value): > + if len(value) > 0: return True > + return False > + > +def string_has_no_spaces(value): > + if re.match("^[a-zA-Z0-9_]*$", value): > + return True > + return False > diff --git a/nodeadmin/volumeconfig.py b/nodeadmin/volumeconfig.py > new file mode 100644 > index 0000000..86ada74 > --- /dev/null > +++ b/nodeadmin/volumeconfig.py > @@ -0,0 +1,83 @@ > +# volumeconfig.py - Copyright (C) 2009 Red Hat, Inc. > +# Written by Darryl L. Pierce > +# > +# This program is free software; you can redistribute it and/or modify > +# it under the terms of the GNU General Public License as published by > +# the Free Software Foundation; version 2 of the License. > +# > +# This program is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > +# GNU General Public License for more details. > +# > +# You should have received a copy of the GNU General Public License > +# along with this program; if not, write to the Free Software > +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, > +# MA 02110-1301, USA. A copy of the GNU General Public License is > +# also available at http://www.gnu.org/copyleft/gpl.html. > + > +import virtinst > +from virtinst import Storage > + > +class StorageVolumeConfig: > + def __init__(self): > + self.__pool = None > + self.__name = "" > + self.__formats = None > + self.__format = None > + self.__max_capacity = 10000 > + self.__allocation = 0 > + > + def set_pool(self, pool): > + self.__pool = pool > + self.__formats = None > + self.__pool_type = virtinst.util.get_xml_path(self.__pool.XMLDesc(0), '/pool/@type') > + self.__volume_class = Storage.StoragePool.get_volume_for_pool(self.__pool_type) > + > + def get_pool(self): > + return self.__pool > + > + def create_volume(self): > + volume = self.__volume_class(name = self.__name + ".img", > + allocation = self.__allocation * 1024**2, > + capacity = self.__max_capacity * 1024**2, > + pool = self.__pool) > + volume.pool = self.__pool > + if self.needs_format(): > + volume.format = self.__format > + return volume > + > + def set_name(self, name): > + self.__name = name > + > + def get_name(self): > + return self.__name > + > + def needs_format(self): > + if self.__pool.__dict__.keys().count("get_formats_for_pool") > 0: > + return self.__pool.get_formats_for_pool() is not 0 > + else: > + return False > + > + def get_formats_for_pool(self): > + if self.__formats is None: > + self.__formats = self.__volume_class.formats > + return self.__formats > + > + def set_format(self, format): > + self.__format = format > + > + def get_format(self): > + return self.__format > + > + def set_max_capacity(self, capacity): > + self.__max_capacity = capacity > + > + def get_max_capacity(self): > + return self.__max_capacity > + > + def set_allocation(self, allocation): > + self.__allocation = allocation > + > + def get_allocation(self): > + return self.__allocation > diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in > index d23a4ef..6509fa0 100644 > --- a/ovirt-node.spec.in > +++ b/ovirt-node.spec.in > @@ -369,11 +369,18 @@ fi > %{_initrddir}/ovirt-functions > %defattr(-,root,root,0644) > %{_bindir}/nodeadmin > +%{_bindir}/addpool > %{_bindir}/addvm > +%{_bindir}/addvolume > %{_bindir}/startvm > %{_bindir}/stopvm > %{_bindir}/rmvm > +%{_bindir}/listpools > %{_bindir}/listvms > +%{_bindir}/rmpool > +%{_bindir}/rmvolume > +%{_bindir}/startpool > +%{_bindir}/stoppool > %{_bindir}/definenet > %{_bindir}/createnet > %{_bindir}/destroynet > ACK pending below comments: For lvm -It creates a pool fine, when creating a volume its missing the volume group name when running lvcreate +-----+ An Exception Has Occurred +-----+ Couldn't create storage volume '1.img': 'internal error '/sbin/lvcreate --name 1.img -L 1024K /tmp/lvm' exited with non-zero status 3 and signal 0 Volume group name expected (no slash) Run `lvcreate --help' for more information. iscsi creates a pool fine but when you reach creating a volume it tells you it's no implemented, any way to block this option for even being available or alert the user ahead of time? All the other options create pools/volumes fine From dpierce at redhat.com Mon Nov 9 20:04:00 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 15:04:00 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <4AF8738A.3010508@redhat.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> <1257794438-10826-2-git-send-email-dpierce@redhat.com> <4AF8738A.3010508@redhat.com> Message-ID: <20091109200359.GD21222@mcpierce-laptop.rdu.redhat.com> On Mon, Nov 09, 2009 at 02:54:50PM -0500, Joey Boggs wrote: > ACK pending below comments: > > For lvm -It creates a pool fine, when creating a volume its missing the > volume group name when running lvcreate > > +-----+ An Exception Has Occurred +-----+ > Couldn't create storage volume '1.img': 'internal error '/sbin/lvcreate > --name 1.img -L 1024K /tmp/lvm' exited with non-zero status 3 and signal > 0 > Volume group name expected (no slash) Run `lvcreate --help' for more > information. Does the same thing happen with virt-manager when doing the same thing? > iscsi creates a pool fine but when you reach creating a volume it tells > you it's no implemented, any way to block this option for even being > available or alert the user ahead of time? > > > All the other options create pools/volumes fine I'm not sure if there's an easy way to block it without forcing us to have to do an upgrade later when it's implemented. When I first saw that, I double checked virt-manager and it's doing the same thing, giving an error that the iSCSI pool is not available. So at least in that way we're consistent. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From jboggs at redhat.com Mon Nov 9 20:22:56 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 09 Nov 2009 15:22:56 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <20091109200359.GD21222@mcpierce-laptop.rdu.redhat.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> <1257794438-10826-2-git-send-email-dpierce@redhat.com> <4AF8738A.3010508@redhat.com> <20091109200359.GD21222@mcpierce-laptop.rdu.redhat.com> Message-ID: <4AF87A20.7040307@redhat.com> Darryl L. Pierce wrote: > On Mon, Nov 09, 2009 at 02:54:50PM -0500, Joey Boggs wrote: > >> ACK pending below comments: >> >> For lvm -It creates a pool fine, when creating a volume its missing the >> volume group name when running lvcreate >> >> +-----+ An Exception Has Occurred +-----+ >> Couldn't create storage volume '1.img': 'internal error '/sbin/lvcreate >> --name 1.img -L 1024K /tmp/lvm' exited with non-zero status 3 and signal >> 0 >> Volume group name expected (no slash) Run `lvcreate --help' for more >> information. >> > > Does the same thing happen with virt-manager when doing the same thing? > > virt-manager works fine but here's what wrong /sbin/lvcreate --name 1.img -L 1024K /tmp/lvm' exited with non-zero status 3 and signal 0 /tmp/lvm should be /dev/$poolname not the target path, if you run the command manually and sub in the right path. >> iscsi creates a pool fine but when you reach creating a volume it tells >> you it's no implemented, any way to block this option for even being >> available or alert the user ahead of time? >> >> >> All the other options create pools/volumes fine >> > > I'm not sure if there's an easy way to block it without forcing us to > have to do an upgrade later when it's implemented. When I first saw > that, I double checked virt-manager and it's doing the same thing, > giving an error that the iSCSI pool is not available. So at least in > that way we're consistent. > > ok wfm From dpierce at redhat.com Mon Nov 9 20:43:36 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 15:43:36 -0500 Subject: [Ovirt-devel] [PATCH] Fixed a typo in ovirt-functions. Message-ID: <1257799416-28348-1-git-send-email-dpierce@redhat.com> Replaced an instance of "mount" that should have been "mkdir". Signed-off-by: Darryl L. Pierce --- scripts/ovirt-functions | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions index fc43343..b61e317 100644 --- a/scripts/ovirt-functions +++ b/scripts/ovirt-functions @@ -388,7 +388,7 @@ mount_data() { mkdir -p /data mount /data mkdir -p /data/images - mount -p /var/lib/libvirt/images + mkdir -p /var/lib/libvirt/images mount /var/lib/libvirt/images restorecon -rv /var/lib/libvirt/images -- 1.6.2.5 From jboggs at redhat.com Mon Nov 9 20:55:06 2009 From: jboggs at redhat.com (Joey Boggs) Date: Mon, 09 Nov 2009 15:55:06 -0500 Subject: [Ovirt-devel] [PATCH] Fixed a typo in ovirt-functions. In-Reply-To: <1257799416-28348-1-git-send-email-dpierce@redhat.com> References: <1257799416-28348-1-git-send-email-dpierce@redhat.com> Message-ID: <4AF881AA.2060202@redhat.com> Darryl L. Pierce wrote: > Replaced an instance of "mount" that should have been "mkdir". > > Signed-off-by: Darryl L. Pierce > --- > scripts/ovirt-functions | 2 +- > 1 files changed, 1 insertions(+), 1 deletions(-) > > diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions > index fc43343..b61e317 100644 > --- a/scripts/ovirt-functions > +++ b/scripts/ovirt-functions > @@ -388,7 +388,7 @@ mount_data() { > mkdir -p /data > mount /data > mkdir -p /data/images > - mount -p /var/lib/libvirt/images > + mkdir -p /var/lib/libvirt/images > mount /var/lib/libvirt/images > restorecon -rv /var/lib/libvirt/images > > ACK From dpierce at redhat.com Mon Nov 9 21:25:12 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 9 Nov 2009 16:25:12 -0500 Subject: [Ovirt-devel] [PATCH] Fixed a typo in ovirt-functions. In-Reply-To: <4AF881AA.2060202@redhat.com> References: <1257799416-28348-1-git-send-email-dpierce@redhat.com> <4AF881AA.2060202@redhat.com> Message-ID: <20091109212511.GE21222@mcpierce-laptop.rdu.redhat.com> On Mon, Nov 09, 2009 at 03:55:06PM -0500, Joey Boggs wrote: > Darryl L. Pierce wrote: >> Replaced an instance of "mount" that should have been "mkdir". >> >> Signed-off-by: Darryl L. Pierce >> --- >> scripts/ovirt-functions | 2 +- >> 1 files changed, 1 insertions(+), 1 deletions(-) >> >> diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions >> index fc43343..b61e317 100644 >> --- a/scripts/ovirt-functions >> +++ b/scripts/ovirt-functions >> @@ -388,7 +388,7 @@ mount_data() { >> mkdir -p /data >> mount /data >> mkdir -p /data/images >> - mount -p /var/lib/libvirt/images >> + mkdir -p /var/lib/libvirt/images >> mount /var/lib/libvirt/images >> restorecon -rv /var/lib/libvirt/images >> > ACK Pushed upstream. Thanks. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From fishy at linux.vnet.ibm.com Tue Nov 10 13:41:38 2009 From: fishy at linux.vnet.ibm.com (abhishek misra) Date: Tue, 10 Nov 2009 19:11:38 +0530 Subject: [Ovirt-devel] [RFC][PATCH] ovirt-node-image : edit-livecd : fail on error in arbitrary code Message-ID: <4AF96D92.4090503@linux.vnet.ibm.com> Purpose : fail iso build on encountering error in arbitrary code '$CODE' 1. export 'WDIR' , for 'CODE' to pick-up location to create 'fail' file to indicate failure, note that it may not always be possible to return some error code on failure in 'CODE' 2. 'set' commands enclosing 'CODE' will not be required 3. 'CODE' is expected to use 'set -e and trap signals like EXIT (which are generated when some command fails ) and write a 'fail' file 4. look for 'fail' file once 'CODE' has completed if positive then unmount proc ( note that if 'CODE', proc still remains mounted; this is not unmounted by existing trap in edit-livecd script ) rest of the cleanup is taken care off by existing trap statement Signed-off-by: Abhishek Misra --- --- a/edit-livecd 2009-11-10 17:23:21.000000000 +0530 +++ b/edit-livecd 2009-11-10 18:39:11.000000000 +0530 @@ -159,13 +159,14 @@ cp -pr $WDIR/sq $WDIR/sq-w # mount root filesystem mnt "-t ext2 $WDIR/sq-w/LiveOS/ext3fs.img -o rw,loop" ex +# exporting for tarp in 'CODE' +# $CODE can 'trap' EXIT signal and touch '$WDIR/fail' to notify this script of its failure + echo ">>> Updating CD content" if [ -n "$CODE" ]; then ( cd $WDIR/ex - set +e eval "$CODE" - set -e ) else echo "***" @@ -174,6 +175,14 @@ else read fi +# exit if $CODE failed. Look if '$WDIR/fail' exists +if [ -a $WDIR/fail ] ; then + rm $WDIR/fail + # need to explictly look for proc as failure in $CODE leaves it mounted causing 'umount $WDIR/ex-rw' to fail + mount | grep livecd | grep proc && addExit "umount -v $WDIR/ex-rw/proc" + exit +fi + # Try to unmount. But this is likely to fail, so let the user retry, # e.g., if he forgot to "cd" out of $WDIR/ex. while :; do From dpierce at redhat.com Tue Nov 10 14:39:01 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Tue, 10 Nov 2009 09:39:01 -0500 Subject: [Ovirt-devel] Re: [PATCH] Users can now work with remote libvirt hosts. In-Reply-To: <1256761793-8344-1-git-send-email-dpierce@redhat.com> References: <1256761793-8344-1-git-send-email-dpierce@redhat.com> Message-ID: <20091110143844.GA7227@mcpierce-desktop.usersys.redhat.com> On Wed, Oct 28, 2009 at 04:29:53PM -0400, Darryl L. Pierce wrote: > The user can: > * select a remote machine > * add a remote machine > * remove a remote machine > > Signed-off-by: Darryl L. Pierce > --- Can I get feedback or an ACK on this? -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From dpierce at redhat.com Wed Nov 11 15:51:21 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Wed, 11 Nov 2009 10:51:21 -0500 Subject: [Ovirt-devel] Combined patch set for migration... Message-ID: <1257954683-5853-1-git-send-email-dpierce@redhat.com> This patch set includes both the host configuration patch and the node migration patch which depends on it. And the latter has a fix for missing files. From dpierce at redhat.com Wed Nov 11 15:51:22 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Wed, 11 Nov 2009 10:51:22 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Users can now work with remote libvirt hosts. In-Reply-To: <1257954683-5853-1-git-send-email-dpierce@redhat.com> References: <1257954683-5853-1-git-send-email-dpierce@redhat.com> Message-ID: <1257954683-5853-2-git-send-email-dpierce@redhat.com> The user can: * select a remote machine * add a remote machine * remove a remote machine Signed-off-by: Darryl L. Pierce --- Makefile.am | 5 ++ nodeadmin/addhost.py | 129 ++++++++++++++++++++++++++++++++++++++++++++ nodeadmin/changehost.py | 58 ++++++++++++++++++++ nodeadmin/configscreen.py | 36 ++++++++++++- nodeadmin/definenet.py | 1 + nodeadmin/hostconnect.py | 29 ++++++++++ nodeadmin/hostmenu.py | 46 ++++++++++++++++ nodeadmin/libvirtworker.py | 53 +++++++++++++++++- nodeadmin/mainmenu.py | 14 +++-- nodeadmin/removehost.py | 66 ++++++++++++++++++++++ ovirt-node.spec.in | 5 ++ 11 files changed, 434 insertions(+), 8 deletions(-) create mode 100644 nodeadmin/addhost.py create mode 100644 nodeadmin/changehost.py create mode 100644 nodeadmin/hostconnect.py create mode 100644 nodeadmin/hostmenu.py create mode 100644 nodeadmin/removehost.py diff --git a/Makefile.am b/Makefile.am index b3929de..1671405 100644 --- a/Makefile.am +++ b/Makefile.am @@ -28,11 +28,15 @@ EXTRA_DIST = \ images/syslinux-vesa-splash.jpg \ nodeadmin/__init__.py \ nodeadmin/adddomain.py \ + nodeadmin/addhost.py \ + nodeadmin/changehost.py \ nodeadmin/configscreen.py \ nodeadmin/createnetwork.py \ nodeadmin/createuser.py \ nodeadmin/destroynetwork.py \ nodeadmin/halworker.py \ + nodeadmin/hostconnect.py \ + nodeadmin/hostmenu.py \ nodeadmin/libvirtworker.py \ nodeadmin/userworker.py \ nodeadmin/mainmenu.py \ @@ -40,6 +44,7 @@ EXTRA_DIST = \ nodeadmin/netmenu.py \ nodeadmin/nodemenu.py \ nodeadmin/removedomain.py \ + nodeadmin/removehost.py \ nodeadmin/undefinenetwork.py \ nodeadmin/startdomain.py \ nodeadmin/stopdomain.py \ diff --git a/nodeadmin/addhost.py b/nodeadmin/addhost.py new file mode 100644 index 0000000..ef35b7d --- /dev/null +++ b/nodeadmin/addhost.py @@ -0,0 +1,129 @@ +# addhost.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * + +from configscreen import * + +DETAILS_PAGE = 1 +CONFIRM_PAGE = 2 + +HYPERVISOR_XEN = "xen" +HYPERVISOR_KVM = "kvm" + +HYPERVISORS = {HYPERVISOR_XEN : "Xen", + HYPERVISOR_KVM : "QEMU/KVM"} + +CONNECTION_LOCAL = "local" +CONNECTION_KERBEROS = "kerberos" +CONNECTION_SSL = "ssl" +CONNECTION_SSH = "ssh" + +CONNECTIONS = {CONNECTION_LOCAL : "Local", + CONNECTION_KERBEROS : "Remote Password or Kerberos", + CONNECTION_SSL : "Remote SSL/TLS with x509 certificate", + CONNECTION_SSH : "Remote tunnel over SSH"} + +class AddHostConfigScreen(ConfigScreen): + def __init__(self): + ConfigScreen.__init__(self, "Add A Remote Host") + self.__configured = False + + def get_elements_for_page(self, screen, page): + if page is DETAILS_PAGE: return self.get_details_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) + + def page_has_next(self, page): + return page < CONFIRM_PAGE + + def page_has_back(self, page): + return page > DETAILS_PAGE + + def page_has_finish(self, page): + return page is CONFIRM_PAGE + + def validate_input(self, page, errors): + if page is DETAILS_PAGE: + if len(self.__hostname.value()) > 0: + return True + else: + errors.append("You must enter a remote hostname.") + elif page is CONFIRM_PAGE: return True + return False + + def process_input(self, page): + if page is CONFIRM_PAGE: + hv = self.__hypervisor.getSelection() + conn = self.__connection.getSelection() + hostname = self.__hostname.value() + + if hv is HYPERVISOR_XEN: + if conn is CONNECTION_LOCAL: url = "xen:///" + elif conn is CONNECTION_KERBEROS: url = "xen+tcp:///" + hostname + "/" + elif conn is CONNECTION_SSL: url = "xen+tls:///" + hostname + "/" + elif conn is CONNECTION_SSH: url = "xen+ssh:///" + hostname + "/" + elif hv is HYPERVISOR_KVM: + if conn is CONNECTION_LOCAL: url = "qemu:///system" + elif conn is CONNECTION_KERBEROS: url = "qemu+tcp://" + hostname + "/system" + elif conn is CONNECTION_SSL: url = "qemu+tls://" + hostname + "/system" + elif conn is CONNECTION_SSH: url = "qemu+ssh://" + hostname + "/system" + + self.get_virt_manager_config().add_connection(url) + self.set_finished() + + def get_details_page(self, screen): + if not self.__configured: + self.__hypervisor = RadioBar(screen, ((HYPERVISORS[HYPERVISOR_XEN], HYPERVISOR_XEN, True), + (HYPERVISORS[HYPERVISOR_KVM], HYPERVISOR_KVM, False))) + self.__connection = RadioBar(screen, ((CONNECTIONS[CONNECTION_LOCAL], CONNECTION_LOCAL, True), + (CONNECTIONS[CONNECTION_KERBEROS], CONNECTION_KERBEROS, False), + (CONNECTIONS[CONNECTION_SSL], CONNECTION_SSL, False), + (CONNECTIONS[CONNECTION_SSH], CONNECTION_SSH, False))) + self.__hostname = Entry(50, "") + self.__autoconnect = Checkbox("Autoconnect on Startup") + self.__configured = True + grid = Grid(2, 4) + grid.setField(Label("Hypervisor:"), 0, 0, anchorRight = 1, anchorTop = 1) + grid.setField(self.__hypervisor, 1, 0, anchorLeft = 1) + grid.setField(Label("Connection:"), 0, 1, anchorRight = 1, anchorTop = 1) + grid.setField(self.__connection, 1, 1, anchorLeft = 1) + grid.setField(Label("Hostname:"), 0, 2, anchorRight = 1) + grid.setField(self.__hostname, 1, 2, anchorLeft = 1) + grid.setField(Label(""), 0, 3, anchorRight = 1) + grid.setField(self.__autoconnect, 1, 3, anchorLeft = 1) + return [Label("Add Connection"), + grid] + + def get_confirm_page(self, screen): + grid = Grid(2, 4) + grid.setField(Label("Hypervisor:"), 0, 0, anchorRight = 1) + grid.setField(Label(HYPERVISORS[self.__hypervisor.getSelection()]), 1, 0, anchorLeft = 1) + grid.setField(Label("Connection:"), 0, 1, anchorRight = 1) + grid.setField(Label(CONNECTIONS[self.__connection.getSelection()]), 1, 1, anchorLeft = 1) + grid.setField(Label("Hostname:"), 0, 2, anchorRight = 1) + grid.setField(Label(self.__hostname.value()), 1, 2, anchorLeft = 1) + grid.setField(Label("Autoconnect on Startup:"), 0, 3, anchorRight = 1) + label = "Yes" + if not self.__autoconnect.value(): label = "No" + grid.setField(Label(label), 1, 3, anchorLeft = 1) + return [Label("Confirm Connection"), + grid] + +def AddHost(): + screen = AddHostConfigScreen() + screen.start() diff --git a/nodeadmin/changehost.py b/nodeadmin/changehost.py new file mode 100644 index 0000000..23e6854 --- /dev/null +++ b/nodeadmin/changehost.py @@ -0,0 +1,58 @@ +# changehost.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * + +import logging +import libvirtworker +from configscreen import * + +CONNECTION_LIST_PAGE = 1 +CONNECTED_PAGE = 2 + +class ChangeHostConfigScreen(HostListConfigScreen): + def __init__(self): + HostListConfigScreen.__init__(self, "Change Host") + + def get_elements_for_page(self, screen, page): + if page is CONNECTION_LIST_PAGE: return self.get_connection_list_page(screen) + elif page is CONNECTED_PAGE: return self.get_connected_page(screen) + + def process_input(self, page): + if page is CONNECTION_LIST_PAGE: + logging.info("Changing libvirt connection to %s" % self.get_selected_connection()) + libvirtworker.set_default_url(self.get_selected_connection()) + self.get_libvirt().open_connection(self.get_selected_connection()) + elif page is CONNECTED_PAGE: self.set_finished() + + def page_has_next(self, page): + if page is CONNECTION_LIST_PAGE: return self.has_selectable_connections() + return False + + def page_has_back(self, page): + return page > CONNECTION_LIST_PAGE + + def page_has_finish(self, page): + return page is CONNECTED_PAGE + + def get_connected_page(self, screen): + return [Label("Connected to %s" % self.get_selected_connection())] + +def ChangeHost(): + screen = ChangeHostConfigScreen() + screen.start() diff --git a/nodeadmin/configscreen.py b/nodeadmin/configscreen.py index f214aea..98e0338 100644 --- a/nodeadmin/configscreen.py +++ b/nodeadmin/configscreen.py @@ -18,7 +18,7 @@ from snack import * from halworker import HALWorker -from libvirtworker import LibvirtWorker +from libvirtworker import * import traceback BACK_BUTTON = "back" @@ -35,6 +35,7 @@ class ConfigScreen: self.__finished = False self.__hal = HALWorker() self.__libvirt = LibvirtWorker() + self.__vm_config = VirtManagerConfig() def get_hal(self): return self.__hal @@ -42,6 +43,9 @@ class ConfigScreen: def get_libvirt(self): return self.__libvirt + def get_virt_manager_config(self): + return self.__vm_config + def set_finished(self): self.__finished = True @@ -179,3 +183,33 @@ class NetworkListConfigScreen(ConfigScreen): def has_selectable_networks(self): return self.__has_networks + +class HostListConfigScreen(ConfigScreen): + '''Provides a base class for working with lists of libvirt hosts.''' + + def __init__(self, title): + ConfigScreen.__init__(self, title) + + def get_connection_list_page(self, screen): + connections = self.get_virt_manager_config().get_connection_list() + result = None + + if len(connections) > 0: + self.__has_connections = True + self.__connection_list = Listbox(0) + for connection in connections: + self.__connection_list.append(connection, connection) + result = self.__connection_list + else: + self.__has_connections = False + result = Label("There are no defined connections.") + grid = Grid(1, 1) + grid.setField(result, 0, 0) + return [Label("Host List"), + grid] + + def get_selected_connection(self): + return self.__connection_list.current() + + def has_selectable_connections(self): + return self.__has_connections diff --git a/nodeadmin/definenet.py b/nodeadmin/definenet.py index 4aa37d5..6dff18f 100644 --- a/nodeadmin/definenet.py +++ b/nodeadmin/definenet.py @@ -20,6 +20,7 @@ from snack import * from IPy import IP import traceback import logging +import re from configscreen import ConfigScreen from networkconfig import NetworkConfig diff --git a/nodeadmin/hostconnect.py b/nodeadmin/hostconnect.py new file mode 100644 index 0000000..a1be569 --- /dev/null +++ b/nodeadmin/hostconnect.py @@ -0,0 +1,29 @@ +# hostconnect.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * + +from configscreen import * + +class HostConnectConfigScreen(ConfigScreen): + def __init__(self): + ConfigScree + +def HostConnect(): + screen = HostConnectConfigScreen() + screen.start() diff --git a/nodeadmin/hostmenu.py b/nodeadmin/hostmenu.py new file mode 100644 index 0000000..4054d6b --- /dev/null +++ b/nodeadmin/hostmenu.py @@ -0,0 +1,46 @@ +# hostmenu.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * + +from menuscreen import MenuScreen +from changehost import ChangeHost +from addhost import AddHost +from removehost import RemoveHost + +SELECT_HOST = 1 +ADD_HOST = 2 +REMOVE_HOST = 3 + +class HostMenuScreen(MenuScreen): + def __init__(self): + MenuScreen.__init__(self, "Host Menu Screen") + + def get_menu_items(self): + return (("Select A Host", SELECT_HOST), + ("Add A Host", ADD_HOST), + ("Remove A Host", REMOVE_HOST)) + + def handle_selection(self, item): + if item is SELECT_HOST: ChangeHost() + elif item is ADD_HOST: AddHost() + elif item is REMOVE_HOST: RemoveHost() + +def HostMenu(): + screen = HostMenuScreen() + screen.start() diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py index ba07605..2998486 100644 --- a/nodeadmin/libvirtworker.py +++ b/nodeadmin/libvirtworker.py @@ -21,20 +21,69 @@ import libvirt import os import virtinst import utils +import logging from domainconfig import DomainConfig DEFAULT_POOL_TARGET_PATH="/var/lib/libvirt/images" +DEFAULT_URL="qemu:///system" + +default_url = DEFAULT_URL + +def set_default_url(url): + logging.info("Changing DEFAULT_URL to %s" % url) + global default_url + + default_url = url + +def get_default_url(): + logging.info("Returning default URL of %s" % default_url) + return default_url + +class VirtManagerConfig: + def __init__(self, filename = "/etc/remote-libvirt.conf"): + self.__filename = filename + + def get_connection_list(self): + result = [] + if os.path.exists(self.__filename): + input = file(self.__filename, "r") + for entry in input: result.append(entry[0:-1]) + return result + + def add_connection(self, connection): + connections = self.get_connection_list() + if connections.count(connection) is 0: + connections.append(connection) + self._save_connections(connections) + + def remove_connection(self, connection): + connections = self.get_connection_list() + if connections.count(connection) > 0: + connections.remove(connection) + self._save_connections(connections) + + def _save_connections(self, connections): + output = file(self.__filename, "w") + for entry in connections: + print >> output, entry + output.close class LibvirtWorker: '''Provides utilities for interfacing with libvirt.''' - def __init__(self, url = "qemu:///system"): - self.__conn = libvirt.open(url) + def __init__(self, url = None): + if url is None: url = get_default_url() + logging.info("Connecting to libvirt: %s" % url) + self.open_connection(url) self.__capabilities = virtinst.CapabilitiesParser.parse(self.__conn.getCapabilities()) self.__net = virtinst.VirtualNetworkInterface(conn = self.__conn) self.__net.setup(self.__conn) (self.__new_guest, self.__new_domain) = virtinst.CapabilitiesParser.guest_lookup(conn = self.__conn) + def open_connection(self, url): + '''Lets the user change the url for the connection.''' + self.__conn = libvirt.open(url) + def list_domains(self, defined = True, started = True): '''Lists all domains.''' result = [] diff --git a/nodeadmin/mainmenu.py b/nodeadmin/mainmenu.py index 73501fa..944ffeb 100755 --- a/nodeadmin/mainmenu.py +++ b/nodeadmin/mainmenu.py @@ -19,15 +19,17 @@ from snack import * import traceback -from menuscreen import MenuScreen -from nodemenu import NodeMenu -from netmenu import NetworkMenu +from menuscreen import MenuScreen +from nodemenu import NodeMenu +from netmenu import NetworkMenu +from hostmenu import HostMenu import utils import logging NODE_MENU = 1 NETWORK_MENU = 2 +HOST_MENU = 3 EXIT_CONSOLE = 99 class MainMenuScreen(MenuScreen): @@ -35,12 +37,14 @@ class MainMenuScreen(MenuScreen): MenuScreen.__init__(self, "Main Menu") def get_menu_items(self): - return (("Node Administration", NODE_MENU), - ("Network Administration", NETWORK_MENU)) + return (("Node Administration", NODE_MENU), + ("Network Administration", NETWORK_MENU), + ("Host Administration", HOST_MENU)) def handle_selection(self, page): if page is NODE_MENU: NodeMenu() elif page is NETWORK_MENU: NetworkMenu() + elif page is HOST_MENU: HostMenu() def MainMenu(): screen = MainMenuScreen() diff --git a/nodeadmin/removehost.py b/nodeadmin/removehost.py new file mode 100644 index 0000000..cf3c46c --- /dev/null +++ b/nodeadmin/removehost.py @@ -0,0 +1,66 @@ +# removehost.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * + +from configscreen import * + +SELECT_HOST_PAGE = 1 +CONFIRM_REMOVE_PAGE = 2 + +class RemoveHostConfigScreen(HostListConfigScreen): + def __init__(self): + HostListConfigScreen.__init__(self, "Remove Host Connection") + + def get_elements_for_page(self, screen, page): + if page is SELECT_HOST_PAGE: return self.get_connection_list_page(screen) + elif page is CONFIRM_REMOVE_PAGE: return self.get_confirm_remove_page(screen) + + def page_has_next(self, page): + return page is SELECT_HOST_PAGE and self.has_selectable_connections() + + def page_has_back(self, page): + return page is CONFIRM_REMOVE_PAGE + + def page_has_finish(self, page): + return page is CONFIRM_REMOVE_PAGE + + def validate_input(self, page, errors): + if page is SELECT_HOST_PAGE: return True + elif page is CONFIRM_REMOVE_PAGE: + if self.__confirm.value(): + return True + else: + errors.append("You must confirm removing the connection.") + return False + + def process_input(self, page): + if page is CONFIRM_REMOVE_PAGE: + self.get_virt_manager_config().remove_connection(self.get_selected_connection()) + self.set_finished() + + def get_confirm_remove_page(self, screen): + self.__confirm = Checkbox("Remove this connection: %s" % self.get_selected_connection(), 0) + grid = Grid(1, 1) + grid.setField(self.__confirm, 0, 0) + return [Label("Remove Host Connection"), + grid] + +def RemoveHost(): + screen = RemoveHostConfigScreen() + screen.start() diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in index 00050a4..39d46b3 100644 --- a/ovirt-node.spec.in +++ b/ovirt-node.spec.in @@ -198,6 +198,11 @@ cd - %{__install} -p -m0755 nodeadmin/destroynetwork.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0755 nodeadmin/undefinenetwork.py %{buildroot}%{python_sitelib}/nodeadmin +%{__install} -p -m0755 nodeadmin/addhost.py %{buildroot}%{python_sitelib}/nodeadmin +%{__install} -p -m0644 nodeadmin/changehost.py %{buildroot}%{python_sitelib}/nodeadmin +%{__install} -p -m0755 nodeadmin/hostmenu.py %{buildroot}%{python_sitelib}/nodeadmin +%{__install} -p -m0755 nodeadmin/removehost.py %{buildroot}%{python_sitelib}/nodeadmin + %{__install} -p -m0755 nodeadmin/createuser.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0644 nodeadmin/halworker.py %{buildroot}%{python_sitelib}/nodeadmin -- 1.6.2.5 From dpierce at redhat.com Wed Nov 11 15:51:23 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Wed, 11 Nov 2009 10:51:23 -0500 Subject: [Ovirt-devel] [PATCH 2/2] Enables users to migrate virtual machines between hosts. In-Reply-To: <1257954683-5853-2-git-send-email-dpierce@redhat.com> References: <1257954683-5853-1-git-send-email-dpierce@redhat.com> <1257954683-5853-2-git-send-email-dpierce@redhat.com> Message-ID: <1257954683-5853-3-git-send-email-dpierce@redhat.com> Users select a virtual machine on their current libvirt host. They then select a target machine, which must have been previously configured as a connection. They confirm the migration and then it runs. Signed-off-by: Darryl L. Pierce --- Makefile.am | 1 + nodeadmin/addhost.py | 10 ++++- nodeadmin/libvirtworker.py | 6 +++ nodeadmin/migratedomain.py | 81 ++++++++++++++++++++++++++++++++++++++++++++ nodeadmin/nodemenu.py | 28 +++++++++------ nodeadmin/setup.py.in | 1 + ovirt-node.spec.in | 2 + 7 files changed, 115 insertions(+), 14 deletions(-) create mode 100644 nodeadmin/migratedomain.py diff --git a/Makefile.am b/Makefile.am index 1671405..f557ea2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -41,6 +41,7 @@ EXTRA_DIST = \ nodeadmin/userworker.py \ nodeadmin/mainmenu.py \ nodeadmin/menuscreen.py \ + nodeadmin/migratedomain.py \ nodeadmin/netmenu.py \ nodeadmin/nodemenu.py \ nodeadmin/removedomain.py \ diff --git a/nodeadmin/addhost.py b/nodeadmin/addhost.py index ef35b7d..ebcb4ea 100644 --- a/nodeadmin/addhost.py +++ b/nodeadmin/addhost.py @@ -59,7 +59,9 @@ class AddHostConfigScreen(ConfigScreen): def validate_input(self, page, errors): if page is DETAILS_PAGE: - if len(self.__hostname.value()) > 0: + if self.__connection.getSelection() is CONNECTION_LOCAL: + return True + elif len(self.__hostname.value()) > 0: return True else: errors.append("You must enter a remote hostname.") @@ -115,8 +117,12 @@ class AddHostConfigScreen(ConfigScreen): grid.setField(Label(HYPERVISORS[self.__hypervisor.getSelection()]), 1, 0, anchorLeft = 1) grid.setField(Label("Connection:"), 0, 1, anchorRight = 1) grid.setField(Label(CONNECTIONS[self.__connection.getSelection()]), 1, 1, anchorLeft = 1) + if self.__connection.getSelection() is not CONNECTION_LOCAL: + hostname = self.__hostname.value() + else: + hostname = "local" grid.setField(Label("Hostname:"), 0, 2, anchorRight = 1) - grid.setField(Label(self.__hostname.value()), 1, 2, anchorLeft = 1) + grid.setField(Label(hostname), 1, 2, anchorLeft = 1) grid.setField(Label("Autoconnect on Startup:"), 0, 3, anchorRight = 1) label = "Yes" if not self.__autoconnect.value(): label = "No" diff --git a/nodeadmin/libvirtworker.py b/nodeadmin/libvirtworker.py index 2998486..878b01c 100644 --- a/nodeadmin/libvirtworker.py +++ b/nodeadmin/libvirtworker.py @@ -122,6 +122,12 @@ class LibvirtWorker: domain = self.get_domain(name) domain.undefine() + def migrate_domain(self, name, target): + '''Migrates the specified domain to the target machine.''' + target_conn = libvirt.open(target) + virtmachine = self.get_domain(name) + virtmachine.migrate(target_conn, libvirt.VIR_MIGRATE_LIVE, None, None, 0) + def list_networks(self, defined = True, started = True): '''Lists all networks.''' result = [] diff --git a/nodeadmin/migratedomain.py b/nodeadmin/migratedomain.py new file mode 100644 index 0000000..8c8c268 --- /dev/null +++ b/nodeadmin/migratedomain.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# +# migratedomain.py - Copyright (C) 2009 Red Hat, Inc. +# Written by Darryl L. Pierce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +from snack import * +from libvirtworker import LibvirtWorker +from configscreen import * + +LIST_DOMAINS = 1 +SELECT_TARGET = 2 +CONFIRM_PAGE = 3 + +class MigrateDomainConfigScreen(DomainListConfigScreen): + def __init__(self): + DomainListConfigScreen.__init__(self, "Migrate Virtual Machine") + self.__configured = False + + def get_elements_for_page(self, screen, page): + if page is LIST_DOMAINS: return self.get_domain_list_page(screen) + elif page is SELECT_TARGET: return self.get_target_page(screen) + elif page is CONFIRM_PAGE: return self.get_confirm_page(screen) + + def page_has_next(self, page): + if page is LIST_DOMAINS: return self.has_selectable_domains() + else: return page < CONFIRM_PAGE + + def page_has_back(self, page): + return page < CONFIRM_PAGE + + def page_has_finish(self, page): + return page is CONFIRM_PAGE + + def validate_input(self, page, errors): + if page is LIST_DOMAINS: return self.get_selected_domain() is not None + elif page is SELECT_TARGET: + if self.__targets.current() is None: + errors.append("Please enter a target hostname or IP address.") + return False + elif page is CONFIRM_PAGE: + if not self.__confirm.value(): + errors.append("You must confirm migrating this virtual machine to proceed.") + return False + return True + + def process_input(self, page): + if page is CONFIRM_PAGE: + self.get_libvirt().migrate_domain(self.get_selected_domain(), self.__targets.current()) + self.set_finished() + + def get_target_page(self, screen): + self.__targets = Listbox(0) + for connection in self.get_virt_manager_config().get_connection_list(): + self.__targets.append(connection, connection) + return [Label("Select A Target Host"), + self.__targets] + + def get_confirm_page(self, screen): + self.__confirm = Checkbox("Confirm migrating this virtual machine.") + grid = Grid(1, 1) + grid.setField(self.__confirm, 0, 0) + return [grid] + +def MigrateDomain(): + screen = MigrateDomainConfigScreen() + screen.start() diff --git a/nodeadmin/nodemenu.py b/nodeadmin/nodemenu.py index 16be89c..f213e09 100755 --- a/nodeadmin/nodemenu.py +++ b/nodeadmin/nodemenu.py @@ -26,17 +26,19 @@ from startdomain import StartDomain from stopdomain import StopDomain from removedomain import RemoveDomain from listdomains import ListDomains +from migratedomain import MigrateDomain from createuser import CreateUser import utils import logging -ADD_DOMAIN = 1 -START_DOMAIN = 2 -STOP_DOMAIN = 3 -REMOVE_DOMAIN = 4 -LIST_DOMAINS = 5 -CREATE_USER = 6 +ADD_DOMAIN = 1 +START_DOMAIN = 2 +STOP_DOMAIN = 3 +REMOVE_DOMAIN = 4 +LIST_DOMAINS = 5 +MIGRATE_DOMAIN = 6 +CREATE_USER = 7 class NodeMenuScreen(MenuScreen): def __init__(self): @@ -48,15 +50,17 @@ class NodeMenuScreen(MenuScreen): ("Stop A Virtual Machine", STOP_DOMAIN), ("Remove A Virtual Machine", REMOVE_DOMAIN), ("List All Virtual Machines", LIST_DOMAINS), + ("Migrate Virtual Machine", MIGRATE_DOMAIN), ("Create A User", CREATE_USER)) def handle_selection(self, item): - if item is ADD_DOMAIN: AddDomain() - elif item is START_DOMAIN: StartDomain() - elif item is STOP_DOMAIN: StopDomain() - elif item is REMOVE_DOMAIN: RemoveDomain() - elif item is LIST_DOMAINS: ListDomains() - elif item is CREATE_USER: CreateUser() + if item is ADD_DOMAIN: AddDomain() + elif item is START_DOMAIN: StartDomain() + elif item is STOP_DOMAIN: StopDomain() + elif item is REMOVE_DOMAIN: RemoveDomain() + elif item is LIST_DOMAINS: ListDomains() + elif item is MIGRATE_DOMAIN: MigrateDomain() + elif item is CREATE_USER: CreateUser() def NodeMenu(): screen = NodeMenuScreen() diff --git a/nodeadmin/setup.py.in b/nodeadmin/setup.py.in index 1e6e028..8b17487 100644 --- a/nodeadmin/setup.py.in +++ b/nodeadmin/setup.py.in @@ -29,6 +29,7 @@ setup(name = "nodeadmin", 'startvm = nodeadmin.startdomain:StartDomain', 'stopvm = nodeadmin.stopdomain:StopDomain', 'rmvm = nodeadmin.removedomain:RemoveDomain', + 'migratevm = nodeadmin.migratedomain:MigradeDomain', 'createuser = nodeadmin.createuser:CreateUser', 'listvms = nodeadmin.listdomains:ListDomains', 'definenet = nodeadmin.definenet:DefineNetwork', diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in index 39d46b3..ea711dc 100644 --- a/ovirt-node.spec.in +++ b/ovirt-node.spec.in @@ -187,6 +187,7 @@ cd - %{__install} -p -m0755 nodeadmin/adddomain.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0644 nodeadmin/domainconfig.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0755 nodeadmin/listdomains.py %{buildroot}%{python_sitelib}/nodeadmin +%{__install} -p -m0755 nodeadmin/migratedomain.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0755 nodeadmin/removedomain.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0755 nodeadmin/startdomain.py %{buildroot}%{python_sitelib}/nodeadmin %{__install} -p -m0755 nodeadmin/stopdomain.py %{buildroot}%{python_sitelib}/nodeadmin @@ -381,6 +382,7 @@ fi %{_bindir}/startvm %{_bindir}/stopvm %{_bindir}/rmvm +%{_bindir}/migratevm %{_bindir}/listvms %{_bindir}/definenet %{_bindir}/createnet -- 1.6.2.5 From dpierce at redhat.com Wed Nov 11 20:02:27 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Wed, 11 Nov 2009 15:02:27 -0500 Subject: [Ovirt-devel] [PATCH] Static IPv4 addresses entered are validated when they're entered. Message-ID: <1257969747-2216-1-git-send-email-dpierce@redhat.com> If the address is not blank then it is validated. Only those that are properly formed are accepted. Otherwise an error message is displayed and the user is prompted again. Resolves: rhbz#536912 - validation for static IP should be optimized Signed-off-by: Darryl L. Pierce --- scripts/ovirt-config-networking | 28 +++++++++++++++++++++++++--- 1 files changed, 25 insertions(+), 3 deletions(-) diff --git a/scripts/ovirt-config-networking b/scripts/ovirt-config-networking index 7d4e363..781553c 100755 --- a/scripts/ovirt-config-networking +++ b/scripts/ovirt-config-networking @@ -30,6 +30,28 @@ if ! is_local_storage_configured; then exit 99 fi +# $1 - the variable name to set +# $2 - the input prompt +function input_ipv4_address { + local varname=$1 + local prompt=$2 + + eval $varname=\"\" + + while true; do + read -ep "${prompt}: " + + if [ -z "$REPLY" ]; then return; fi + + if is_valid_ipv4 $REPLY; then + eval $varname=\"$REPLY\" + return + else + printf "\nThe address $REPLY is not a valid IPv4 address.\n" + fi + done +} + # Checks that a network interface was already configured. function has_configured_interface { @@ -167,9 +189,9 @@ function configure_interface ;; S|s) printf "\n" - read -ep "IP Address: "; IPADDR=$REPLY - read -ep " Netmask: "; NETMASK=$REPLY - read -ep " Gateway: "; GATEWAY=$REPLY + input_ipv4_address IPADDR "IP Address" + input_ipv4_address NETMASK " Netmask" + input_ipv4_address GATEWAY " Gateway" BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/BOOTPROTO none" BR_CONFIG="$BR_CONFIG\nset $BR_ROOT/IPADDR $IPADDR" -- 1.6.2.5 From dhuff at redhat.com Wed Nov 11 21:46:33 2009 From: dhuff at redhat.com (David Huff) Date: Wed, 11 Nov 2009 16:46:33 -0500 Subject: [Ovirt-devel] [RFC][PATCH] ovirt-node-image : edit-livecd : fail on error in arbitrary code In-Reply-To: <4AF96D92.4090503@linux.vnet.ibm.com> References: <4AF96D92.4090503@linux.vnet.ibm.com> Message-ID: <4AFB30B9.2020607@redhat.com> I took a look at this today, I am not sure that a requirement for 'CODE' to touch a "fail" file is the best way to address this. I assume your script is mounting proc inside the ext filesystem, ie to use yum or something. And using trap and a similar mnt function, like what is in edit-livecd, is not cleaning up all the mounts in this case? A fix may be to use /proc/mounts instead of df when adding the Exits in the mnt function. Also can you clarify the statement, "note that it may not always be possible to return some error code on failure in 'CODE'" I am not really sure what you mean here. Does this make sense? -D On 11/10/2009 08:41 AM, abhishek misra wrote: > Purpose : fail iso build on encountering error in arbitrary code '$CODE' > > 1. export 'WDIR' , for 'CODE' to pick-up location to create 'fail' > file to indicate failure, note that it may not always be possible to > return some error code on failure in 'CODE' > 2. 'set' commands enclosing 'CODE' will not be required > 3. 'CODE' is expected to use 'set -e and trap signals like EXIT > (which are generated when some command fails ) and write a 'fail' file > 4. look for 'fail' file once 'CODE' has completed > if positive then unmount proc ( note that if 'CODE', proc still > remains mounted; this is not unmounted by existing trap in edit-livecd > script ) > rest of the cleanup is taken care off by existing trap statement > > Signed-off-by: Abhishek Misra > > --- > > --- a/edit-livecd 2009-11-10 17:23:21.000000000 +0530 > +++ b/edit-livecd 2009-11-10 18:39:11.000000000 +0530 > @@ -159,13 +159,14 @@ cp -pr $WDIR/sq $WDIR/sq-w > # mount root filesystem > mnt "-t ext2 $WDIR/sq-w/LiveOS/ext3fs.img -o rw,loop" ex > > +# exporting for tarp in 'CODE' > +# $CODE can 'trap' EXIT signal and touch '$WDIR/fail' to notify this > script of its failure > + > echo ">>> Updating CD content" > if [ -n "$CODE" ]; then > ( > cd $WDIR/ex > - set +e > eval "$CODE" > - set -e > ) > else > echo "***" > @@ -174,6 +175,14 @@ else > read > fi > > +# exit if $CODE failed. Look if '$WDIR/fail' exists > +if [ -a $WDIR/fail ] ; then > + rm $WDIR/fail > + # need to explictly look for proc as failure in $CODE leaves it > mounted causing 'umount $WDIR/ex-rw' to fail > + mount | grep livecd | grep proc && addExit "umount -v > $WDIR/ex-rw/proc" > + exit > +fi > + > # Try to unmount. But this is likely to fail, so let the user retry, > # e.g., if he forgot to "cd" out of $WDIR/ex. > while :; do > > _______________________________________________ > Ovirt-devel mailing list > Ovirt-devel at redhat.com > https://www.redhat.com/mailman/listinfo/ovirt-devel From fishy at linux.vnet.ibm.com Fri Nov 13 08:36:03 2009 From: fishy at linux.vnet.ibm.com (abhishek misra) Date: Fri, 13 Nov 2009 14:06:03 +0530 Subject: [Ovirt-devel] [RFC][PATCH v2] edit-livecd : fail on error in arbitrary code In-Reply-To: <4AFB30B9.2020607@redhat.com> References: <4AF96D92.4090503@linux.vnet.ibm.com> <4AFB30B9.2020607@redhat.com> Message-ID: <4AFD1A73.5020904@linux.vnet.ibm.com> Hello All, Below are David's comments on my last patch David Huff wrote: > I took a look at this today, I am not sure that a requirement for 'CODE' > to touch a "fail" file is the best way to address this. > > I assume your script is mounting proc inside the ext filesystem, ie to > use yum or something. And using trap and a similar mnt function, like > what is in edit-livecd, is not cleaning up all the mounts in this case? > > A fix may be to use /proc/mounts instead of df when adding the Exits in > the mnt function. > > Also can you clarify the statement, "note that it may not always be > possible to return some error code on failure in 'CODE'" I am not really > sure what you mean here. > > Does this make sense? > > -D > David, you were right about proc , I've now taken care of that in my 'CODE' Here is another patch that tries to achieve the same without using 'touch fail' 1. removed compound statement ( list ) I found that its usage does not prevent trap to come into action if $CODE fails (which is desired ) but allows commands after (list) to continue executing ( which is not desired ) 2 added addExit "cd -" when $CODE fails and control returns to edit-livecd script, it is still in $WDIR/ex this causes failure when trap attempts umount 3 added EXIT=${EXIT/cd - ;/} we no longer need it if $CODE goes well 4 added cd - we need it as we removed (list) 5 removed set +/- e Signed-off-by: Abhishek Misra >> Updating CD content" if [ -n "$CODE" ]; then - ( cd $WDIR/ex - set +e + addExit "cd -" eval "$CODE" - set -e - ) + EXIT=${EXIT/cd - ;/} + cd - else echo "***" echo "*** Pausing to allow manual changes. Press any key to continue." From mloiseleur at linagora.com Fri Nov 13 11:46:52 2009 From: mloiseleur at linagora.com (Michel Loiseleur) Date: Fri, 13 Nov 2009 11:46:52 +0000 Subject: [Ovirt-devel] [PATCH server] Replace the occurence of the type @qmfc.object(Qmf::Query.new(:class => "xxx", 'key' => search_key)) for @qmfc.object(Qmf::Query.new(:class => "xxx"), 'key' => search_key) else the search on the key is not functionnal. Message-ID: <1258112812-16402-1-git-send-email-mloiseleur@linagora.com> This fix db-omatic that cannot recover the node on wich the vm run for example. We have not investigate far away but that can be due to a change on the ruby-qmf API. Signed-off-by: Michel Loiseleur --- src/db-omatic/db_omatic.rb | 10 ++++++---- src/libvirt-list.rb | 6 +++--- src/matahari-list.rb | 4 ++-- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/db-omatic/db_omatic.rb b/src/db-omatic/db_omatic.rb index 686ad71..da11bfa 100755 --- a/src/db-omatic/db_omatic.rb +++ b/src/db-omatic/db_omatic.rb @@ -198,7 +198,7 @@ class DbOmatic < Qmf::ConsoleHandler if state == Vm::STATE_STOPPED @logger.info "VM has moved to stopped, clearing VM attributes." - qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain", 'uuid' => vm.uuid)) + qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain"), 'uuid' => vm.uuid) if qmf_vm @logger.info "Deleting VM #{vm.description}." result = qmf_vm.undefine @@ -210,12 +210,14 @@ class DbOmatic < Qmf::ConsoleHandler # If we are running, update the node that the domain is running on elsif state == Vm::STATE_RUNNING @logger.info "VM is running, determine the node it is running on" - qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain", 'uuid' => vm.uuid)) + qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain"), 'uuid' => vm.uuid) if qmf_vm qmf_host = @qmfc.object(Qmf::Query.new(:class => "node", :object_id => qmf_vm.node)) db_host = Host.find(:first, :conditions => ['hostname = ?', qmf_host.hostname]) @logger.info "VM #{vm.description} is running on node #{db_host.hostname}" vm.host_id = db_host.id + elsif + @logger.info "Cannot find in QMF the node corresponding to #{domain['name']} " end end @@ -276,7 +278,7 @@ class DbOmatic < Qmf::ConsoleHandler # Double check to make sure this host is still up. begin - qmf_host = @qmfc.objects(Qmf::Query.new(:class => "node", 'hostname' => host_info['hostname'])) + qmf_host = @qmfc.objects(Qmf::Query.new(:class => "node"), 'hostname' => host_info['hostname']) if !qmf_host @logger.info "Host #{host_info['hostname']} is not up after waiting 20 seconds, skipping dead VM check." else @@ -483,7 +485,7 @@ class DbOmatic < Qmf::ConsoleHandler # them to stopped. VMs that exist as QMF objects will get set appropriately when the objects # appear on the bus. begin - qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain", 'uuid' => db_vm.uuid)) + qmf_vm = @qmfc.object(Qmf::Query.new(:class => "domain"), 'uuid' => db_vm.uuid) if qmf_vm == nil set_stopped = true end diff --git a/src/libvirt-list.rb b/src/libvirt-list.rb index c81926a..f4df672 100755 --- a/src/libvirt-list.rb +++ b/src/libvirt-list.rb @@ -33,7 +33,7 @@ nodes.each do |node| end # Find any domains that on the current node. - domains = qmfc.objects(Qmf::Query.new(:class => "domain", 'node' => node.object_id)) + domains = qmfc.objects(Qmf::Query.new(:class => "domain"), 'node' => node.object_id) domains.each do |domain| r = domain.getXMLDesc() puts "getXMLDesc() status: #{r.status}" @@ -48,7 +48,7 @@ nodes.each do |node| end end - pools = qmfc.objects(Qmf::Query.new(:class => "pool", 'node' => node.object_id)) + pools = qmfc.objects(Qmf::Query.new(:class => "pool"), 'node' => node.object_id) pools.each do |pool| puts " pool: #{pool.name}" for (key, val) in pool.properties @@ -63,7 +63,7 @@ nodes.each do |node| end # Find volumes that are part of the pool. - volumes = qmfc.objects(Qmf::Query.new(:class => "volume", 'pool' => pool.object_id)) + volumes = qmfc.objects(Qmf::Query.new(:class => "volume"), 'pool' => pool.object_id) volumes.each do |volume| puts " volume: #{volume.name}" for (key, val) in volume.properties diff --git a/src/matahari-list.rb b/src/matahari-list.rb index 8795019..6671f7a 100755 --- a/src/matahari-list.rb +++ b/src/matahari-list.rb @@ -33,7 +33,7 @@ hosts.each do |host| end # List cpus for current host - cpus = qmfc.objects(Qmf::Query.new(:class => 'cpu', 'host' => host.object_id)) + cpus = qmfc.objects(Qmf::Query.new(:class => 'cpu'), 'host' => host.object_id) cpus.each do |cpu| puts ' CPU:' for (key, val) in cpu.properties @@ -42,7 +42,7 @@ hosts.each do |host| end # cpus.each # List nics for current host - nics = qmfc.objects(Qmf::Query.new(:class => 'nic', 'host' => host.object_id)) + nics = qmfc.objects(Qmf::Query.new(:class => 'nic'), 'host' => host.object_id) nics.each do |nic| puts ' NIC: ' for (key, val) in nic.properties -- 1.6.2.5 From dpierce at redhat.com Fri Nov 13 16:39:15 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Fri, 13 Nov 2009 11:39:15 -0500 Subject: [Ovirt-devel] [PATCH] Provides an explicit upgrade path for an installed node. Message-ID: <1258130355-5447-1-git-send-email-dpierce@redhat.com> This patch allows the node to be upgraded without destroying any configuration. The new kernel argument, ovirt_upgrade, will boot cause the node to install the upgraded image, then reboot. Resolves: rhbz#527217 Signed-off-by: Darryl L. Pierce --- scripts/ovirt-early | 14 +++++++------- scripts/ovirt-firstboot | 22 +++++++++++++--------- scripts/ovirt-functions | 12 +++++++++++- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/scripts/ovirt-early b/scripts/ovirt-early index cdd4afd..cda1b3f 100755 --- a/scripts/ovirt-early +++ b/scripts/ovirt-early @@ -135,7 +135,7 @@ start() { # ovirt_init=[usb|scsi[:serial#]|/dev/...] # ovirt_vol=BOOT_MB:SWAP_MB:ROOT_MB:CONFIG_MB:LOGGING_MB:DATA_MB # ovirt_overcommit= - # ovirt_local_boot + # ovirt_upgrade # ovirt_standalone # ovirt_firstboot # rescue @@ -182,9 +182,9 @@ start() { # or a specific positive number in MB vol_data_size= - # ovirt_local_boot + # ovirt_upgrade # install/update oVirt Node image on the local installation target disk - local_boot= + upgrade= # ovirt_overcommit= # set the swap size coefficient @@ -305,8 +305,8 @@ start() { i=${i#ovirt_vol=} eval $(printf $i|awk -F: '{print "vol_boot_size="$1; print "vol_swap_size="$2; print "vol_root_size="$3; print "vol_config_size="$4; print "vol_logging_size="$5; print "vol_data_size="$6;}') ;; - ovirt_local_boot*) - local_boot=1 + ovirt_upgrade*) + upgrade=1 ;; ovirt_standalone*) standalone=1 @@ -379,7 +379,7 @@ start() { ip_gateway=$gateway fi # save boot parameters as defaults for ovirt-config-* - params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" + params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size upgrade standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" # mount /config unless firstboot is forced if [ "$firstboot" != "1" ]; then mount_config @@ -426,7 +426,7 @@ start() { ovirt_store_config \ /etc/sysconfig/network-scripts/ifcfg-* \ $BONDING_MODCONF_FILE - if [ $local_boot = 1 ]; then + if [ $upgrade = 1 ]; then # local disk installation for managed mode mount_live ovirt-config-boot /live "$bootparams" diff --git a/scripts/ovirt-firstboot b/scripts/ovirt-firstboot index bdafb33..650dcf7 100755 --- a/scripts/ovirt-firstboot +++ b/scripts/ovirt-firstboot @@ -32,7 +32,7 @@ trap 'exit $?' 1 2 13 15 start () { - if ! is_firstboot && ! is_auto_install; then + if ! is_firstboot && ! is_auto_install && ! is_upgrade; then return fi @@ -46,14 +46,18 @@ start () ovirt-config-networking AUTO ovirt-config-logging AUTO ovirt-config-collectd AUTO - ovirt-config-password AUTO - if [ "$OVIRT_LOCAL_BOOT" = 1 ]; then - mount_live - ovirt-config-boot /live "$OVIRT_BOOTPARAMS" no - disable_firstboot - reboot - fi - elif is_firstboot; then + ovirt-config-password AUTO + fi + + if is_upgrade; then + mount_live + ovirt-config-boot /live "$OVIRT_BOOTPARAMS" no + disable_firstboot + reboot + return + fi + + if is_firstboot; then plymouth --hide-splash ovirt-config-setup -x < /dev/console diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions index b61e317..69e2a58 100644 --- a/scripts/ovirt-functions +++ b/scripts/ovirt-functions @@ -102,6 +102,16 @@ is_auto_install() { fi } +# return 0 if this is an upgrade +# return 1 otherwise +is_upgrade() { + if [ "$OVIRT_UPGRADE" = "1" ]; then + return 0 + else + return 1 + fi +} + # return 0 if booted from local disk # return 1 if booted from other media is_booted_from_local_disk() { @@ -123,7 +133,7 @@ disable_firstboot() { augtool < References: <1258130355-5447-1-git-send-email-dpierce@redhat.com> Message-ID: <2be7262f0911131312u20e7baefn2452c79dd3e84819@mail.gmail.com> On Fri, Nov 13, 2009 at 5:39 PM, Darryl L. Pierce wrote: > - ? ? ? ? ? ?ovirt_local_boot*) > - ? ? ? ? ? ?local_boot=1 > + ? ? ? ? ? ?ovirt_upgrade*) > + ? ? ? ? ? ?upgrade=1 I think it shouldn't hurt to keep old parameter name for compatibility reasons: ovirt_local_boot* | ovirt_upgrade*) otherwise it looks good, just as a follow patch please add a test case in o-n-i autotest.sh - even upgrading to the same version would be a good sanity check that upgrade didn't break existing configuration From dpierce at redhat.com Mon Nov 16 14:23:30 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 16 Nov 2009 09:23:30 -0500 Subject: [Ovirt-devel] [PATCH] Provides an explicit upgrade path for an installed node. In-Reply-To: <2be7262f0911131312u20e7baefn2452c79dd3e84819@mail.gmail.com> References: <1258130355-5447-1-git-send-email-dpierce@redhat.com> <2be7262f0911131312u20e7baefn2452c79dd3e84819@mail.gmail.com> Message-ID: <20091116142330.GB15321@mcpierce-desktop.usersys.redhat.com> On Fri, Nov 13, 2009 at 10:12:17PM +0100, Alan Pevec wrote: > On Fri, Nov 13, 2009 at 5:39 PM, Darryl L. Pierce wrote: > > - ? ? ? ? ? ?ovirt_local_boot*) > > - ? ? ? ? ? ?local_boot=1 > > + ? ? ? ? ? ?ovirt_upgrade*) > > + ? ? ? ? ? ?upgrade=1 > > I think it shouldn't hurt to keep old parameter name for compatibility reasons: > ovirt_local_boot* | ovirt_upgrade*) > > otherwise it looks good, just as a follow patch please add a test case > in o-n-i autotest.sh - even upgrading to the same version would be a > good sanity check that upgrade didn't break existing configuration Very good. I'll have a refactored patch out shortly. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From mloiseleur at linagora.com Mon Nov 16 14:39:37 2009 From: mloiseleur at linagora.com (Michel Loiseleur) Date: Mon, 16 Nov 2009 15:39:37 +0100 Subject: [Ovirt-devel] Re: [PATCH] fix storages crazyness In-Reply-To: <1255030846-8053-1-git-send-email-mloiseleur@linagora.com> References: <1255030846-8053-1-git-send-email-mloiseleur@linagora.com> Message-ID: <4B016429.1080803@linagora.com> Hi, Just to point you this patch, which is quite important and not ACK at the moment. Thanks, Loiseleur Michel a ?crit : > Signed-off-by: Loiseleur Michel > --- > app/models/vm.rb | 2 +- > 1 files changed, 1 insertions(+), 1 deletions(-) > > diff --git a/app/models/vm.rb b/app/models/vm.rb > index 88e0aef..0be3f89 100644 > --- a/app/models/vm.rb > +++ b/app/models/vm.rb > @@ -27,7 +27,7 @@ class Vm < ActiveRecord::Base > find(:all, :conditions=>{:state=>Task::STATE_QUEUED}) > end > end > - has_and_belongs_to_many :storage_volumes > + has_and_belongs_to_many :storage_volumes, :uniq => true > > has_many :nics, :dependent => :destroy > > -- Loiseleur Michel Responsable de l'OSSA Linagora / 27, rue de Berri / 75008 PARIS Tel/Fax : 01 58 18 68 28 / 01 58 18 68 29 http://job.linagora.com/ | http://www.tosca-project.net "Ce n'est pas le logiciel qui est libre, c'est vous" From dpierce at redhat.com Mon Nov 16 15:19:41 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 16 Nov 2009 10:19:41 -0500 Subject: [Ovirt-devel] Refactored upgrade patch... Message-ID: <1258384782-3901-1-git-send-email-dpierce@redhat.com> This patch includes feedback from apevec to remain backward compatible with the previous karg, ovirt_local_boot. From dpierce at redhat.com Mon Nov 16 15:19:42 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 16 Nov 2009 10:19:42 -0500 Subject: [Ovirt-devel] [PATCH] Provides an explicit upgrade path for an installed node. In-Reply-To: <1258384782-3901-1-git-send-email-dpierce@redhat.com> References: <1258384782-3901-1-git-send-email-dpierce@redhat.com> Message-ID: <1258384782-3901-2-git-send-email-dpierce@redhat.com> This patch allows the node to be upgraded without destroying any configuration. The new kernel argument, ovirt_upgrade, will boot cause the node to install the upgraded image, then reboot. Resolves: rhbz#527217 Signed-off-by: Darryl L. Pierce --- scripts/ovirt-early | 14 +++++++------- scripts/ovirt-firstboot | 22 +++++++++++++--------- scripts/ovirt-functions | 12 +++++++++++- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/scripts/ovirt-early b/scripts/ovirt-early index cdd4afd..0a91f99 100755 --- a/scripts/ovirt-early +++ b/scripts/ovirt-early @@ -135,7 +135,7 @@ start() { # ovirt_init=[usb|scsi[:serial#]|/dev/...] # ovirt_vol=BOOT_MB:SWAP_MB:ROOT_MB:CONFIG_MB:LOGGING_MB:DATA_MB # ovirt_overcommit= - # ovirt_local_boot + # ovirt_upgrade # ovirt_standalone # ovirt_firstboot # rescue @@ -182,9 +182,9 @@ start() { # or a specific positive number in MB vol_data_size= - # ovirt_local_boot + # ovirt_upgrade # install/update oVirt Node image on the local installation target disk - local_boot= + upgrade= # ovirt_overcommit= # set the swap size coefficient @@ -305,8 +305,8 @@ start() { i=${i#ovirt_vol=} eval $(printf $i|awk -F: '{print "vol_boot_size="$1; print "vol_swap_size="$2; print "vol_root_size="$3; print "vol_config_size="$4; print "vol_logging_size="$5; print "vol_data_size="$6;}') ;; - ovirt_local_boot*) - local_boot=1 + ovirt_upgrade* |ovirt_local_boot*) + upgrade=1 ;; ovirt_standalone*) standalone=1 @@ -379,7 +379,7 @@ start() { ip_gateway=$gateway fi # save boot parameters as defaults for ovirt-config-* - params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" + params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size upgrade standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" # mount /config unless firstboot is forced if [ "$firstboot" != "1" ]; then mount_config @@ -426,7 +426,7 @@ start() { ovirt_store_config \ /etc/sysconfig/network-scripts/ifcfg-* \ $BONDING_MODCONF_FILE - if [ $local_boot = 1 ]; then + if [ $upgrade = 1 ]; then # local disk installation for managed mode mount_live ovirt-config-boot /live "$bootparams" diff --git a/scripts/ovirt-firstboot b/scripts/ovirt-firstboot index bdafb33..650dcf7 100755 --- a/scripts/ovirt-firstboot +++ b/scripts/ovirt-firstboot @@ -32,7 +32,7 @@ trap 'exit $?' 1 2 13 15 start () { - if ! is_firstboot && ! is_auto_install; then + if ! is_firstboot && ! is_auto_install && ! is_upgrade; then return fi @@ -46,14 +46,18 @@ start () ovirt-config-networking AUTO ovirt-config-logging AUTO ovirt-config-collectd AUTO - ovirt-config-password AUTO - if [ "$OVIRT_LOCAL_BOOT" = 1 ]; then - mount_live - ovirt-config-boot /live "$OVIRT_BOOTPARAMS" no - disable_firstboot - reboot - fi - elif is_firstboot; then + ovirt-config-password AUTO + fi + + if is_upgrade; then + mount_live + ovirt-config-boot /live "$OVIRT_BOOTPARAMS" no + disable_firstboot + reboot + return + fi + + if is_firstboot; then plymouth --hide-splash ovirt-config-setup -x < /dev/console diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions index b61e317..69e2a58 100644 --- a/scripts/ovirt-functions +++ b/scripts/ovirt-functions @@ -102,6 +102,16 @@ is_auto_install() { fi } +# return 0 if this is an upgrade +# return 1 otherwise +is_upgrade() { + if [ "$OVIRT_UPGRADE" = "1" ]; then + return 0 + else + return 1 + fi +} + # return 0 if booted from local disk # return 1 if booted from other media is_booted_from_local_disk() { @@ -123,7 +133,7 @@ disable_firstboot() { augtool < Resending the refactored patch: I realized after I emailed it that it referenced the wrong bugzilla. From dpierce at redhat.com Mon Nov 16 15:39:23 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 16 Nov 2009 10:39:23 -0500 Subject: [Ovirt-devel] [PATCH] Provides an explicit upgrade path for an installed node. In-Reply-To: <1258385963-5787-1-git-send-email-dpierce@redhat.com> References: <1258385963-5787-1-git-send-email-dpierce@redhat.com> Message-ID: <1258385963-5787-2-git-send-email-dpierce@redhat.com> This patch allows the node to be upgraded without destroying any configuration. The new kernel argument, ovirt_upgrade, will boot cause the node to install the upgraded image, then reboot. Resolves: rhbz#532547 Signed-off-by: Darryl L. Pierce --- scripts/ovirt-early | 14 +++++++------- scripts/ovirt-firstboot | 22 +++++++++++++--------- scripts/ovirt-functions | 12 +++++++++++- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/scripts/ovirt-early b/scripts/ovirt-early index cdd4afd..0a91f99 100755 --- a/scripts/ovirt-early +++ b/scripts/ovirt-early @@ -135,7 +135,7 @@ start() { # ovirt_init=[usb|scsi[:serial#]|/dev/...] # ovirt_vol=BOOT_MB:SWAP_MB:ROOT_MB:CONFIG_MB:LOGGING_MB:DATA_MB # ovirt_overcommit= - # ovirt_local_boot + # ovirt_upgrade # ovirt_standalone # ovirt_firstboot # rescue @@ -182,9 +182,9 @@ start() { # or a specific positive number in MB vol_data_size= - # ovirt_local_boot + # ovirt_upgrade # install/update oVirt Node image on the local installation target disk - local_boot= + upgrade= # ovirt_overcommit= # set the swap size coefficient @@ -305,8 +305,8 @@ start() { i=${i#ovirt_vol=} eval $(printf $i|awk -F: '{print "vol_boot_size="$1; print "vol_swap_size="$2; print "vol_root_size="$3; print "vol_config_size="$4; print "vol_logging_size="$5; print "vol_data_size="$6;}') ;; - ovirt_local_boot*) - local_boot=1 + ovirt_upgrade* |ovirt_local_boot*) + upgrade=1 ;; ovirt_standalone*) standalone=1 @@ -379,7 +379,7 @@ start() { ip_gateway=$gateway fi # save boot parameters as defaults for ovirt-config-* - params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" + params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size upgrade standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" # mount /config unless firstboot is forced if [ "$firstboot" != "1" ]; then mount_config @@ -426,7 +426,7 @@ start() { ovirt_store_config \ /etc/sysconfig/network-scripts/ifcfg-* \ $BONDING_MODCONF_FILE - if [ $local_boot = 1 ]; then + if [ $upgrade = 1 ]; then # local disk installation for managed mode mount_live ovirt-config-boot /live "$bootparams" diff --git a/scripts/ovirt-firstboot b/scripts/ovirt-firstboot index bdafb33..650dcf7 100755 --- a/scripts/ovirt-firstboot +++ b/scripts/ovirt-firstboot @@ -32,7 +32,7 @@ trap 'exit $?' 1 2 13 15 start () { - if ! is_firstboot && ! is_auto_install; then + if ! is_firstboot && ! is_auto_install && ! is_upgrade; then return fi @@ -46,14 +46,18 @@ start () ovirt-config-networking AUTO ovirt-config-logging AUTO ovirt-config-collectd AUTO - ovirt-config-password AUTO - if [ "$OVIRT_LOCAL_BOOT" = 1 ]; then - mount_live - ovirt-config-boot /live "$OVIRT_BOOTPARAMS" no - disable_firstboot - reboot - fi - elif is_firstboot; then + ovirt-config-password AUTO + fi + + if is_upgrade; then + mount_live + ovirt-config-boot /live "$OVIRT_BOOTPARAMS" no + disable_firstboot + reboot + return + fi + + if is_firstboot; then plymouth --hide-splash ovirt-config-setup -x < /dev/console diff --git a/scripts/ovirt-functions b/scripts/ovirt-functions index b61e317..69e2a58 100644 --- a/scripts/ovirt-functions +++ b/scripts/ovirt-functions @@ -102,6 +102,16 @@ is_auto_install() { fi } +# return 0 if this is an upgrade +# return 1 otherwise +is_upgrade() { + if [ "$OVIRT_UPGRADE" = "1" ]; then + return 0 + else + return 1 + fi +} + # return 0 if booted from local disk # return 1 if booted from other media is_booted_from_local_disk() { @@ -123,7 +133,7 @@ disable_firstboot() { augtool < Add a kernel parameter, vendor= which takes a path to a script embedded in the image. If this script is executable, we will then source it during ovirt-early start() after command line processing and before mounting of /config. We also call a post hook at the end of ovirt-early start(). Also include a sample vendor script. Signed-off-by: Ryan Harper --- scripts/ovirt-early | 25 ++++++++++++++++++++++++- scripts/ovirt-vendor.sample | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletions(-) create mode 100644 scripts/ovirt-vendor.sample diff --git a/scripts/ovirt-early b/scripts/ovirt-early index cdd4afd..7683948 100755 --- a/scripts/ovirt-early +++ b/scripts/ovirt-early @@ -369,9 +369,27 @@ start() { console=*) bootparams="$bootparams $i" ;; + vendor=*) + i=${i#vendor=} + # path to vendor script: + # has 2 stages: + # vendor_pre_hook() + # vendor_post_host() + # pre_hook runs after cmdline processing but before the rest of ovirt-early + # post_hook runs at the end of ovirt-early start() + [ -x "${i}" ] && { + vendor_script="$i" + log "Found vendor script: ${vendor_script}"; + bootparams="$bootparams $i" + } esac done + if [ -n "${vendor_script}" ]; then + . ${vendor_script} + vendor_pre_hook + fi + if [ -z "$ip_netmask" ]; then ip_netmask=$netmask fi @@ -379,7 +397,8 @@ start() { ip_gateway=$gateway fi # save boot parameters as defaults for ovirt-config-* - params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" + # and allow vendor prehook set params to be saved + params="${params} bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" # mount /config unless firstboot is forced if [ "$firstboot" != "1" ]; then mount_config @@ -434,6 +453,10 @@ start() { fi fi + if [ -n "${vendor_script}" ]; then + vendor_post_hook + fi + return 0 } diff --git a/scripts/ovirt-vendor.sample b/scripts/ovirt-vendor.sample new file mode 100644 index 0000000..7a57ddd --- /dev/null +++ b/scripts/ovirt-vendor.sample @@ -0,0 +1,35 @@ +#!/bin/bash + +# This is a sample vendor script +# +# We need to provide two hook functions: +# - vendor_pre_hook() +# - vendor_post_hook() +# +# pre_hook is called after command line processing in ovirt-early, before +# /config is mounted +# +# post_hook is called at the very end of ovirt-early start() +# + +# as an example, lets look for a new kernel parameter and save it +function vendor_pre_hook() +{ + log "Entering vendor pre hook"; + for i in $(cat /proc/cmdline); do + case $i in + vendor=*) + bootparams="$bootparams $i" + ;; + esac + done + params="${params} vendor"; + log "Exiting vendor pre hook"; +} + +function vendor_post_hook() +{ + log "Entering vendor post hook"; + + log "Exiting vendor post hook"; +} -- 1.6.2.5 -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From dpierce at redhat.com Mon Nov 16 20:50:13 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Mon, 16 Nov 2009 15:50:13 -0500 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <20091116192145.GA10280@us.ibm.com> References: <20091116192145.GA10280@us.ibm.com> Message-ID: <20091116205012.GD18278@mcpierce-desktop.usersys.redhat.com> On Mon, Nov 16, 2009 at 01:21:45PM -0600, Ryan Harper wrote: > Add a kernel parameter, vendor= which takes a path to a script > embedded in the image. If this script is executable, we will then > source it during ovirt-early start() after command line processing > and before mounting of /config. We also call a post hook at the end of > ovirt-early start(). > > Also include a sample vendor script. > > Signed-off-by: Ryan Harper > --- > scripts/ovirt-early | 25 ++++++++++++++++++++++++- > scripts/ovirt-vendor.sample | 35 +++++++++++++++++++++++++++++++++++ > 2 files changed, 59 insertions(+), 1 deletions(-) > create mode 100644 scripts/ovirt-vendor.sample > > diff --git a/scripts/ovirt-early b/scripts/ovirt-early > index cdd4afd..7683948 100755 > --- a/scripts/ovirt-early > +++ b/scripts/ovirt-early > @@ -369,9 +369,27 @@ start() { > console=*) > bootparams="$bootparams $i" > ;; > + vendor=*) > + i=${i#vendor=} > + # path to vendor script: > + # has 2 stages: > + # vendor_pre_hook() > + # vendor_post_host() > + # pre_hook runs after cmdline processing but before the rest of ovirt-early > + # post_hook runs at the end of ovirt-early start() > + [ -x "${i}" ] && { > + vendor_script="$i" > + log "Found vendor script: ${vendor_script}"; > + bootparams="$bootparams $i" > + } If there would only be one such vendor script, then perhaps a different way to go would be to use a well-known script name and, if such exists, source and execute that, rather than specifying it in the kernel commandline? Then an optional kernel argument, "novendor", could be used to ignore that script if it exists. That way the default behavior would be to execute the script if it's found. > esac > done > > + if [ -n "${vendor_script}" ]; then > + . ${vendor_script} > + vendor_pre_hook > + fi Here, and also in the call to vendor_post_hook, the script should probably check that the function being called exist. + if [ -n "${vendor_script}" ]; then + . ${vendor_script} + if type vendor_pre_hook > /dev/null 2>&1; then + vendor_pre_hook + fi + fi > + > if [ -z "$ip_netmask" ]; then > ip_netmask=$netmask > fi > @@ -379,7 +397,8 @@ start() { > ip_gateway=$gateway > fi > # save boot parameters as defaults for ovirt-config-* > - params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" > + # and allow vendor prehook set params to be saved > + params="${params} bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" > # mount /config unless firstboot is forced > if [ "$firstboot" != "1" ]; then > mount_config > @@ -434,6 +453,10 @@ start() { > fi > fi > > + if [ -n "${vendor_script}" ]; then > + vendor_post_hook > + fi Same here with the method call, checking if it exists first: + if [ -n "${vendor_script}" ]; then + if type vendor_post_host > /dev/null 2>&1; then + vendor_post_hook + fi + fi > + > return 0 > } > > diff --git a/scripts/ovirt-vendor.sample b/scripts/ovirt-vendor.sample > new file mode 100644 > index 0000000..7a57ddd > --- /dev/null > +++ b/scripts/ovirt-vendor.sample > @@ -0,0 +1,35 @@ > +#!/bin/bash > + > +# This is a sample vendor script > +# > +# We need to provide two hook functions: > +# - vendor_pre_hook() > +# - vendor_post_hook() > +# > +# pre_hook is called after command line processing in ovirt-early, before > +# /config is mounted > +# > +# post_hook is called at the very end of ovirt-early start() > +# > + > +# as an example, lets look for a new kernel parameter and save it > +function vendor_pre_hook() > +{ > + log "Entering vendor pre hook"; > + for i in $(cat /proc/cmdline); do > + case $i in > + vendor=*) > + bootparams="$bootparams $i" > + ;; > + esac > + done > + params="${params} vendor"; > + log "Exiting vendor pre hook"; > +} > + > +function vendor_post_hook() > +{ > + log "Entering vendor post hook"; > + > + log "Exiting vendor post hook"; > +} > -- > 1.6.2.5 > > > -- > Ryan Harper > Software Engineer; Linux Technology Center > IBM Corp., Austin, Tx > ryanh at us.ibm.com > > _______________________________________________ > Ovirt-devel mailing list > Ovirt-devel at redhat.com > https://www.redhat.com/mailman/listinfo/ovirt-devel -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 197 bytes Desc: not available URL: From pmyers at redhat.com Mon Nov 16 20:55:13 2009 From: pmyers at redhat.com (Perry Myers) Date: Mon, 16 Nov 2009 15:55:13 -0500 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <20091116192145.GA10280@us.ibm.com> References: <20091116192145.GA10280@us.ibm.com> Message-ID: <4B01BC31.2050302@redhat.com> On 11/16/2009 02:21 PM, Ryan Harper wrote: > Add a kernel parameter, vendor= which takes a path to a script > embedded in the image. If this script is executable, we will then > source it during ovirt-early start() after command line processing > and before mounting of /config. We also call a post hook at the end of > ovirt-early start(). > > Also include a sample vendor script. If you're using edit-livecd to embed a vendor specific script in the oVirt ISO image, why wouldn't you just change the init process to call that script instead of using a kernel cmdline parameter like this? i.e. edit-livecd to add a new init script called vendor-foo to /etc/init.d, then chkconfig that script on. Now during startup the script will automatically execute. I suppose maybe some information about the use case that is driving this request might help to explain. Perry From ryanh at us.ibm.com Mon Nov 16 21:17:06 2009 From: ryanh at us.ibm.com (Ryan Harper) Date: Mon, 16 Nov 2009 15:17:06 -0600 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <20091116205012.GD18278@mcpierce-desktop.usersys.redhat.com> References: <20091116192145.GA10280@us.ibm.com> <20091116205012.GD18278@mcpierce-desktop.usersys.redhat.com> Message-ID: <20091116211705.GC10280@us.ibm.com> * Darryl L. Pierce [2009-11-16 14:51]: > On Mon, Nov 16, 2009 at 01:21:45PM -0600, Ryan Harper wrote: > > Add a kernel parameter, vendor= which takes a path to a script > > embedded in the image. If this script is executable, we will then > > source it during ovirt-early start() after command line processing > > and before mounting of /config. We also call a post hook at the end of > > ovirt-early start(). > > > > Also include a sample vendor script. > > > > Signed-off-by: Ryan Harper > > --- > > scripts/ovirt-early | 25 ++++++++++++++++++++++++- > > scripts/ovirt-vendor.sample | 35 +++++++++++++++++++++++++++++++++++ > > 2 files changed, 59 insertions(+), 1 deletions(-) > > create mode 100644 scripts/ovirt-vendor.sample > > > > diff --git a/scripts/ovirt-early b/scripts/ovirt-early > > index cdd4afd..7683948 100755 > > --- a/scripts/ovirt-early > > +++ b/scripts/ovirt-early > > @@ -369,9 +369,27 @@ start() { > > console=*) > > bootparams="$bootparams $i" > > ;; > > + vendor=*) > > + i=${i#vendor=} > > + # path to vendor script: > > + # has 2 stages: > > + # vendor_pre_hook() > > + # vendor_post_host() > > + # pre_hook runs after cmdline processing but before the rest of ovirt-early > > + # post_hook runs at the end of ovirt-early start() > > + [ -x "${i}" ] && { > > + vendor_script="$i" > > + log "Found vendor script: ${vendor_script}"; > > + bootparams="$bootparams $i" > > + } > > If there would only be one such vendor script, then perhaps a different > way to go would be to use a well-known script name and, if such exists, > source and execute that, rather than specifying it in the kernel > commandline? Then an optional kernel argument, "novendor", could be used > to ignore that script if it exists. That way the default behavior would > be to execute the script if it's found. By leaving it up to the kernel parameter to specify the vendor script, then one can embed multiple vendor scripts into a common image and then select which one to use at boot time. > > > esac > > done > > > > + if [ -n "${vendor_script}" ]; then > > + . ${vendor_script} > > + vendor_pre_hook > > + fi > > Here, and also in the call to vendor_post_hook, the script should > probably check that the function being called exist. > > + if [ -n "${vendor_script}" ]; then > + . ${vendor_script} > + if type vendor_pre_hook > /dev/null 2>&1; then > + vendor_pre_hook > + fi > + fi Yep; That'll protect against vendor scripts that aren't implemented correctly. > > > + > > if [ -z "$ip_netmask" ]; then > > ip_netmask=$netmask > > fi > > @@ -379,7 +397,8 @@ start() { > > ip_gateway=$gateway > > fi > > # save boot parameters as defaults for ovirt-config-* > > - params="bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" > > + # and allow vendor prehook set params to be saved > > + params="${params} bootif init vol_boot_size vol_swap_size vol_root_size vol_config_size vol_logging_size vol_data_size local_boot standalone overcommit ip_address ip_netmask ip_gateway ipv6 dns ntp vlan ssh_pwauth syslog_server syslog_port collectd_server collectd_port bootparams hostname firstboot" > > # mount /config unless firstboot is forced > > if [ "$firstboot" != "1" ]; then > > mount_config > > @@ -434,6 +453,10 @@ start() { > > fi > > fi > > > > + if [ -n "${vendor_script}" ]; then > > + vendor_post_hook > > + fi > > Same here with the method call, checking if it exists first: > > + if [ -n "${vendor_script}" ]; then > + if type vendor_post_host > /dev/null 2>&1; then > + vendor_post_hook > + fi > + fi OK. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From ryanh at us.ibm.com Mon Nov 16 21:40:23 2009 From: ryanh at us.ibm.com (Ryan Harper) Date: Mon, 16 Nov 2009 15:40:23 -0600 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <4B01BC31.2050302@redhat.com> References: <20091116192145.GA10280@us.ibm.com> <4B01BC31.2050302@redhat.com> Message-ID: <20091116214023.GD10280@us.ibm.com> * Perry Myers [2009-11-16 14:56]: > On 11/16/2009 02:21 PM, Ryan Harper wrote: > > Add a kernel parameter, vendor= which takes a path to a script > > embedded in the image. If this script is executable, we will then > > source it during ovirt-early start() after command line processing > > and before mounting of /config. We also call a post hook at the end of > > ovirt-early start(). > > > > Also include a sample vendor script. > > If you're using edit-livecd to embed a vendor specific script in the oVirt > ISO image, why wouldn't you just change the init process to call that > script instead of using a kernel cmdline parameter like this? > > i.e. edit-livecd to add a new init script called vendor-foo to > /etc/init.d, then chkconfig that script on. Now during startup the script > will automatically execute. > > I suppose maybe some information about the use case that is driving this > request might help to explain. Sure, one of the use cases we need is using an iscsi target as "local" disk. This requires bringing up networking and iscsi prior to ovirt-early trying to mount the /config partition. Previously, had just injected code around the area where the pre hook is now. Recently moved that to it's own function so we could conditionally call it.. then looking at how to merge any of that upstream and it made more sense to keep the very vendory specific changes out of ovirt-early altogether. I hadn't thought to do a completely separate file since some of the logic triggers on other boot parameters that ovirt-early processes (like firstboot, and storage_init). I'll have to look at seeing if we could move that into it's own init script; though there might be some duplication of code (command parsing, and such). With the vendor approach, it executes in the same context as ovirt-early so one can potentially override function implementations and exert control over the flow in the remainder of ovirt-early; that wouldn't be doable in a separate init script. One example that we rely upon that can't be done from separate script is appending boot parameters to the params list to be saved. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From pmyers at redhat.com Mon Nov 16 21:47:29 2009 From: pmyers at redhat.com (Perry Myers) Date: Mon, 16 Nov 2009 16:47:29 -0500 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <20091116214023.GD10280@us.ibm.com> References: <20091116192145.GA10280@us.ibm.com> <4B01BC31.2050302@redhat.com> <20091116214023.GD10280@us.ibm.com> Message-ID: <4B01C871.4020304@redhat.com> On 11/16/2009 04:40 PM, Ryan Harper wrote: > * Perry Myers [2009-11-16 14:56]: >> On 11/16/2009 02:21 PM, Ryan Harper wrote: >>> Add a kernel parameter, vendor= which takes a path to a script >>> embedded in the image. If this script is executable, we will then >>> source it during ovirt-early start() after command line processing >>> and before mounting of /config. We also call a post hook at the end of >>> ovirt-early start(). >>> >>> Also include a sample vendor script. >> >> If you're using edit-livecd to embed a vendor specific script in the oVirt >> ISO image, why wouldn't you just change the init process to call that >> script instead of using a kernel cmdline parameter like this? >> >> i.e. edit-livecd to add a new init script called vendor-foo to >> /etc/init.d, then chkconfig that script on. Now during startup the script >> will automatically execute. >> >> I suppose maybe some information about the use case that is driving this >> request might help to explain. > > Sure, one of the use cases we need is using an iscsi target as "local" > disk. This requires bringing up networking and iscsi prior to > ovirt-early trying to mount the /config partition. Previously, had just > injected code around the area where the pre hook is now. Recently moved > that to it's own function so we could conditionally call it.. then > looking at how to merge any of that upstream and it made more sense to > keep the very vendory specific changes out of ovirt-early altogether. I > hadn't thought to do a completely separate file since some of the logic > triggers on other boot parameters that ovirt-early processes (like > firstboot, and storage_init). jboggs was working on providing iscsi as local disk and the plan was to integrate that as fully supported functionality for the base oVirt ISO image. So I'd prefer in this particular case that we work to get it integrated into the base oVirt ISO image rather than put it in as a vendor specific script. apevec, since jboggs is out of the office for a bit can you follow up here? > I'll have to look at seeing if we could move that into it's own init > script; though there might be some duplication of code (command parsing, > and such). With the vendor approach, it executes in the same context as > ovirt-early so one can potentially override function implementations and > exert control over the flow in the remainder of ovirt-early; that > wouldn't be doable in a separate init script. > > One example that we rely upon that can't be done from separate script is > appending boot parameters to the params list to be saved. ... But regardless, it is probably worth creating an easy to use interface for vendors to add their own init scripts in without needing to duplicate code, and trampling on ovirt-early. Perhaps an /etc/insert-your-vendor-script-here.d directory that ovirt-early and ovirt-post reads and executes functions from? That would provide an easy way to drop in custom functionality and would also not require a new kernel cmdline param. Thoughts? Perry From ryanh at us.ibm.com Mon Nov 16 22:14:39 2009 From: ryanh at us.ibm.com (Ryan Harper) Date: Mon, 16 Nov 2009 16:14:39 -0600 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <4B01C871.4020304@redhat.com> References: <20091116192145.GA10280@us.ibm.com> <4B01BC31.2050302@redhat.com> <20091116214023.GD10280@us.ibm.com> <4B01C871.4020304@redhat.com> Message-ID: <20091116221439.GF10280@us.ibm.com> * Perry Myers [2009-11-16 15:48]: > On 11/16/2009 04:40 PM, Ryan Harper wrote: > > * Perry Myers [2009-11-16 14:56]: > >> On 11/16/2009 02:21 PM, Ryan Harper wrote: > >>> Add a kernel parameter, vendor= which takes a path to a script > >>> embedded in the image. If this script is executable, we will then > >>> source it during ovirt-early start() after command line processing > >>> and before mounting of /config. We also call a post hook at the end of > >>> ovirt-early start(). > >>> > >>> Also include a sample vendor script. > >> > >> If you're using edit-livecd to embed a vendor specific script in the oVirt > >> ISO image, why wouldn't you just change the init process to call that > >> script instead of using a kernel cmdline parameter like this? > >> > >> i.e. edit-livecd to add a new init script called vendor-foo to > >> /etc/init.d, then chkconfig that script on. Now during startup the script > >> will automatically execute. > >> > >> I suppose maybe some information about the use case that is driving this > >> request might help to explain. > > > > Sure, one of the use cases we need is using an iscsi target as "local" > > disk. This requires bringing up networking and iscsi prior to > > ovirt-early trying to mount the /config partition. Previously, had just > > injected code around the area where the pre hook is now. Recently moved > > that to it's own function so we could conditionally call it.. then > > looking at how to merge any of that upstream and it made more sense to > > keep the very vendory specific changes out of ovirt-early altogether. I > > hadn't thought to do a completely separate file since some of the logic > > triggers on other boot parameters that ovirt-early processes (like > > firstboot, and storage_init). > > jboggs was working on providing iscsi as local disk and the plan was to > integrate that as fully supported functionality for the base oVirt ISO > image. So I'd prefer in this particular case that we work to get it > integrated into the base oVirt ISO image rather than put it in as a vendor > specific script. I can share what we've had to do to get it working so far; not the prettiest bit of code. Networking must be brough up prior to iscsi connection and one can't access /config. Ideally, we could pass a simple CONFIG_IP_PNP parameter to the kernel to get networking autoconfigured but neither RHEL nor Fedora enable IP_PNP. Passing the config via command is doable, but makes having a common boot image/config troublesome since each server has to have a unique config file. Additionally, we wanted to configure a bonded bridge prior to establishing the iscsi connection since once you mount /config over iscsi one cannot restart networking without dropping your filesystem into RO mode. The per-server config issue ended up being resolved by embedding additional config (iscsi server, initiatorname, and nfs server path) via DHCP vendor options. Here is what we're doing to bring up networking and connect to an iscsi target prior to mounting /config; this code runs where vendor_pre_hook() would. # don't let networking autostart later chkconfig --del network # generate custom ifcfg network files that # - build a bonded interface over 10g nics # - create a bridge with the bond0 as port configure_network_files # disable any current networking service network restart # look for custom DHCP paramters if they exist # and extract iscsi server IP and initator name echo "InitiatorName=${iscsi_initiator}" > /etc/iscsi/initiatorname.iscsi # disable iscsi autostart chkconfig --del iscsi chkconfig --del iscsd # bring up iscsi service iscsi start # discover iscsi targets on iscsi server TARGET=$(iscsiadm -m discovery -t sendtargets -p ${iscsi_server}) # restart iscsi server server iscsi restart # wait for devices to populate /dev/disk/by-id l=0 count=`find /dev/disk/by-id -mindeth 1 2>/dev/null | wc -l` while [ $l -lt 5 ]; do curr=`find /dev/disk/by-id -mindeth 1 2>/dev/null | wc -l` log "by-id now has $curr entries" [ $curr -gt $count ] && break l=$(($l + 1)) sleep 1; done # if not firstboot, expect LVs if [ "$firstboot" != "1" ]; then # it's not the firstboot log "Scanning for lvm config" l=0 while [ $l -lt 3 ]; do echo "lvm scan count=$l" >> /tmp/lvm.log pvscan >> /tmp/lvm.log sleep 1 vgscan >> /tmp/lvm.log sleep 1 lvscan >> /tmp/lvm.log sleep 1 vgchange -ay >> /tmp/lvm.log [ -e /dev/HostVG/Config ] && break; l=$(($l + 1)) done if [ ! -e /dev/HostVG/Config ] ; then log "Rescue mode requested, starting emergency shell" stop_log bash < /dev/console start_log fi fi At this point, we've made an iscsi connection and repopulated LVM and we can mount /config. > > apevec, since jboggs is out of the office for a bit can you follow up here? > > > I'll have to look at seeing if we could move that into it's own init > > script; though there might be some duplication of code (command parsing, > > and such). With the vendor approach, it executes in the same context as > > ovirt-early so one can potentially override function implementations and > > exert control over the flow in the remainder of ovirt-early; that > > wouldn't be doable in a separate init script. > > > > One example that we rely upon that can't be done from separate script is > > appending boot parameters to the params list to be saved. > > ... But regardless, it is probably worth creating an easy to use interface > for vendors to add their own init scripts in without needing to duplicate > code, and trampling on ovirt-early. Perhaps an > /etc/insert-your-vendor-script-here.d directory that ovirt-early and > ovirt-post reads and executes functions from? That would provide an easy > way to drop in custom functionality and would also not require a new > kernel cmdline param. Thoughts? That does seem cleaner; though I do still like having a way to choose which set of vendor scripts to run via kernel parameter. We'd like to have a single iso image that we could support different types of startups. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From pmyers at redhat.com Mon Nov 16 23:09:39 2009 From: pmyers at redhat.com (Perry Myers) Date: Mon, 16 Nov 2009 18:09:39 -0500 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <20091116221439.GF10280@us.ibm.com> References: <20091116192145.GA10280@us.ibm.com> <4B01BC31.2050302@redhat.com> <20091116214023.GD10280@us.ibm.com> <4B01C871.4020304@redhat.com> <20091116221439.GF10280@us.ibm.com> Message-ID: <4B01DBB3.60300@redhat.com> On 11/16/2009 05:14 PM, Ryan Harper wrote: > * Perry Myers [2009-11-16 15:48]: >> On 11/16/2009 04:40 PM, Ryan Harper wrote: >>> * Perry Myers [2009-11-16 14:56]: >>>> On 11/16/2009 02:21 PM, Ryan Harper wrote: >>>>> Add a kernel parameter, vendor= which takes a path to a script >>>>> embedded in the image. If this script is executable, we will then >>>>> source it during ovirt-early start() after command line processing >>>>> and before mounting of /config. We also call a post hook at the end of >>>>> ovirt-early start(). >>>>> >>>>> Also include a sample vendor script. >>>> >>>> If you're using edit-livecd to embed a vendor specific script in the oVirt >>>> ISO image, why wouldn't you just change the init process to call that >>>> script instead of using a kernel cmdline parameter like this? >>>> >>>> i.e. edit-livecd to add a new init script called vendor-foo to >>>> /etc/init.d, then chkconfig that script on. Now during startup the script >>>> will automatically execute. >>>> >>>> I suppose maybe some information about the use case that is driving this >>>> request might help to explain. >>> >>> Sure, one of the use cases we need is using an iscsi target as "local" >>> disk. This requires bringing up networking and iscsi prior to >>> ovirt-early trying to mount the /config partition. Previously, had just >>> injected code around the area where the pre hook is now. Recently moved >>> that to it's own function so we could conditionally call it.. then >>> looking at how to merge any of that upstream and it made more sense to >>> keep the very vendory specific changes out of ovirt-early altogether. I >>> hadn't thought to do a completely separate file since some of the logic >>> triggers on other boot parameters that ovirt-early processes (like >>> firstboot, and storage_init). >> >> jboggs was working on providing iscsi as local disk and the plan was to >> integrate that as fully supported functionality for the base oVirt ISO >> image. So I'd prefer in this particular case that we work to get it >> integrated into the base oVirt ISO image rather than put it in as a vendor >> specific script. > > I can share what we've had to do to get it working so far; not the > prettiest bit of code. > > Networking must be brough up prior to iscsi connection and one can't > access /config. Ideally, we could pass a simple CONFIG_IP_PNP parameter > to the kernel to get networking autoconfigured but neither RHEL nor > Fedora enable IP_PNP. Passing the config via command is doable, but > makes having a common boot image/config troublesome since each server > has to have a unique config file. Additionally, we wanted to configure > a bonded bridge prior to establishing the iscsi connection since once > you mount /config over iscsi one cannot restart networking without > dropping your filesystem into RO mode. The per-server config issue > ended up being resolved by embedding additional config (iscsi server, > initiatorname, and nfs server path) via DHCP vendor options. > Here is what we're doing to bring up networking and connect to an iscsi > target prior to mounting /config; this code runs where vendor_pre_hook() > would. The ovirt-early script uses the standard ip= and ipv6= kernel command line options to bring up autoconfigured network interfaces. The interface to bring up can be specified using the standard BOOTIF= kernel cmdline arg. So what you're talking about should work fine, as both ip= and ipv6= both support dhcp as an argument. But yeah, doing automatic bonding is a different story. There's no way to do that automagically right now. You could add some additional kernel cmdline options similar to BOOTIF... i.e. STORAGE_IFS=MAC:MAC:MAC STORAGE_BONDING_TYPE= Gets a little complicated though so perhaps this is not the best way to do it... BOOTIF is nice since it is set to the MAC addr of the iface used to pxe boot the server. But there's no simple way to reliably define which ifaces you'd want to bond other than by MACaddr. Or... maybe you could do the following... rely in BOOTIF to tell you which device is the primary connection. Then bring up each other iface on the host in sequence and test to see what network they're on. The ones that are on the same network as the BOOTIF network iface would automatically all be bonded together. That would be pretty slick... Then all you need to do is pass as a kernel cmdline arg what bonding style you want to use. > > # don't let networking autostart later > chkconfig --del network > > # generate custom ifcfg network files that > # - build a bonded interface over 10g nics > # - create a bridge with the bond0 as port > configure_network_files > > # disable any current networking > service network restart > > # look for custom DHCP paramters if they exist > # and extract iscsi server IP and initator name > echo "InitiatorName=${iscsi_initiator}" > /etc/iscsi/initiatorname.iscsi > > # disable iscsi autostart > chkconfig --del iscsi > chkconfig --del iscsd > > # bring up iscsi > service iscsi start > > # discover iscsi targets on iscsi server > TARGET=$(iscsiadm -m discovery -t sendtargets -p ${iscsi_server}) > > # restart iscsi server > server iscsi restart > > # wait for devices to populate /dev/disk/by-id > l=0 > count=`find /dev/disk/by-id -mindeth 1 2>/dev/null | wc -l` > while [ $l -lt 5 ]; do > curr=`find /dev/disk/by-id -mindeth 1 2>/dev/null | wc -l` > log "by-id now has $curr entries" > [ $curr -gt $count ] && break > l=$(($l + 1)) > sleep 1; > done > > # if not firstboot, expect LVs > if [ "$firstboot" != "1" ]; then > # it's not the firstboot > log "Scanning for lvm config" > l=0 > while [ $l -lt 3 ]; do > echo "lvm scan count=$l" >> /tmp/lvm.log > pvscan >> /tmp/lvm.log > sleep 1 > vgscan >> /tmp/lvm.log > sleep 1 > lvscan >> /tmp/lvm.log > sleep 1 > vgchange -ay >> /tmp/lvm.log > [ -e /dev/HostVG/Config ] && break; > l=$(($l + 1)) > done > if [ ! -e /dev/HostVG/Config ] ; then > log "Rescue mode requested, starting emergency shell" > stop_log > bash < /dev/console > start_log > fi > fi > > At this point, we've made an iscsi connection and repopulated LVM and we can mount /config. Seems reasonable. I'm sure with a little tweaking this could be included somehow, but I'll defer to Alan and Joey on the specifics :) > >> >> apevec, since jboggs is out of the office for a bit can you follow up here? >> >>> I'll have to look at seeing if we could move that into it's own init >>> script; though there might be some duplication of code (command parsing, >>> and such). With the vendor approach, it executes in the same context as >>> ovirt-early so one can potentially override function implementations and >>> exert control over the flow in the remainder of ovirt-early; that >>> wouldn't be doable in a separate init script. >>> >>> One example that we rely upon that can't be done from separate script is >>> appending boot parameters to the params list to be saved. >> >> ... But regardless, it is probably worth creating an easy to use interface >> for vendors to add their own init scripts in without needing to duplicate >> code, and trampling on ovirt-early. Perhaps an >> /etc/insert-your-vendor-script-here.d directory that ovirt-early and >> ovirt-post reads and executes functions from? That would provide an easy >> way to drop in custom functionality and would also not require a new >> kernel cmdline param. Thoughts? > > That does seem cleaner; though I do still like having a way to choose > which set of vendor scripts to run via kernel parameter. We'd like to > have a single iso image that we could support different types of > startups. Yeah, I have no opposition to the kernel cmdline parameter used for that. From ryanh at us.ibm.com Tue Nov 17 03:57:29 2009 From: ryanh at us.ibm.com (Ryan Harper) Date: Mon, 16 Nov 2009 21:57:29 -0600 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <4B01DBB3.60300@redhat.com> References: <20091116192145.GA10280@us.ibm.com> <4B01BC31.2050302@redhat.com> <20091116214023.GD10280@us.ibm.com> <4B01C871.4020304@redhat.com> <20091116221439.GF10280@us.ibm.com> <4B01DBB3.60300@redhat.com> Message-ID: <20091117035729.GG10280@us.ibm.com> * Perry Myers [2009-11-16 17:11]: > On 11/16/2009 05:14 PM, Ryan Harper wrote: > > * Perry Myers [2009-11-16 15:48]: > >> On 11/16/2009 04:40 PM, Ryan Harper wrote: > >>> * Perry Myers [2009-11-16 14:56]: > >>>> On 11/16/2009 02:21 PM, Ryan Harper wrote: > >>>>> Add a kernel parameter, vendor= which takes a path to a script > >>>>> embedded in the image. If this script is executable, we will then > >>>>> source it during ovirt-early start() after command line processing > >>>>> and before mounting of /config. We also call a post hook at the end of > >>>>> ovirt-early start(). > >>>>> > >>>>> Also include a sample vendor script. > >>>> > >>>> If you're using edit-livecd to embed a vendor specific script in the oVirt > >>>> ISO image, why wouldn't you just change the init process to call that > >>>> script instead of using a kernel cmdline parameter like this? > >>>> > >>>> i.e. edit-livecd to add a new init script called vendor-foo to > >>>> /etc/init.d, then chkconfig that script on. Now during startup the script > >>>> will automatically execute. > >>>> > >>>> I suppose maybe some information about the use case that is driving this > >>>> request might help to explain. > >>> > >>> Sure, one of the use cases we need is using an iscsi target as "local" > >>> disk. This requires bringing up networking and iscsi prior to > >>> ovirt-early trying to mount the /config partition. Previously, had just > >>> injected code around the area where the pre hook is now. Recently moved > >>> that to it's own function so we could conditionally call it.. then > >>> looking at how to merge any of that upstream and it made more sense to > >>> keep the very vendory specific changes out of ovirt-early altogether. I > >>> hadn't thought to do a completely separate file since some of the logic > >>> triggers on other boot parameters that ovirt-early processes (like > >>> firstboot, and storage_init). > >> > >> jboggs was working on providing iscsi as local disk and the plan was to > >> integrate that as fully supported functionality for the base oVirt ISO > >> image. So I'd prefer in this particular case that we work to get it > >> integrated into the base oVirt ISO image rather than put it in as a vendor > >> specific script. > > > > I can share what we've had to do to get it working so far; not the > > prettiest bit of code. > > > > Networking must be brough up prior to iscsi connection and one can't > > access /config. Ideally, we could pass a simple CONFIG_IP_PNP parameter > > to the kernel to get networking autoconfigured but neither RHEL nor > > Fedora enable IP_PNP. Passing the config via command is doable, but > > makes having a common boot image/config troublesome since each server > > has to have a unique config file. Additionally, we wanted to configure > > a bonded bridge prior to establishing the iscsi connection since once > > you mount /config over iscsi one cannot restart networking without > > dropping your filesystem into RO mode. The per-server config issue > > ended up being resolved by embedding additional config (iscsi server, > > initiatorname, and nfs server path) via DHCP vendor options. > > Here is what we're doing to bring up networking and connect to an iscsi > > target prior to mounting /config; this code runs where vendor_pre_hook() > > would. > > The ovirt-early script uses the standard ip= and ipv6= kernel command line > options to bring up autoconfigured network interfaces. The interface to > bring up can be specified using the standard BOOTIF= kernel cmdline arg. > So what you're talking about should work fine, as both ip= and ipv6= both > support dhcp as an argument. Sure, except AFAICT, this won't bring up networking until after we've attempted to mount /config ... which is of course too late if we're using an iscsi "local" disk. > > But yeah, doing automatic bonding is a different story. There's no way to > do that automagically right now. You could add some additional kernel > cmdline options similar to BOOTIF... i.e. > STORAGE_IFS=MAC:MAC:MAC > STORAGE_BONDING_TYPE= > > Gets a little complicated though so perhaps this is not the best way to do > it... BOOTIF is nice since it is set to the MAC addr of the iface used to > pxe boot the server. But there's no simple way to reliably define which > ifaces you'd want to bond other than by MACaddr. Or... maybe you could > do the following... > > rely in BOOTIF to tell you which device is the primary connection. Then > bring up each other iface on the host in sequence and test to see what > network they're on. The ones that are on the same network as the BOOTIF > network iface would automatically all be bonded together. That would be > pretty slick... Then all you need to do is pass as a kernel cmdline arg > what bonding style you want to use. Oddly enough, we pxe from our 1g nics, but want to bond the 10g nics... I'd prefer specifying input for creating a bond, or bonds... BONDIF=bond0,bond1 bond0_MACS=$primary_mac,$secondary_mac bond0_OPTIONS="mode=6 miimon=100 use_carrier=1" bond0_PROTO="dhcp" bond0_ONBOOT="yes" bond1_MACS=$primary_mac,$secondary_mac bond1_OPTIONS="mode=6 miimon=100 use_carrier=1" We'd also need bridging parameters so we can configure a bridge with the bond port... BRIDGEIF=brbond0 and ... this gets ugly rather quickly; which is why we ended up with a vendor script instead... How far do you want to go with these kernel parameters for custom networking config? > > > > > # don't let networking autostart later > > chkconfig --del network > > > > # generate custom ifcfg network files that > > # - build a bonded interface over 10g nics > > # - create a bridge with the bond0 as port > > configure_network_files > > > > # disable any current networking > > service network restart > > > > # look for custom DHCP paramters if they exist > > # and extract iscsi server IP and initator name > > echo "InitiatorName=${iscsi_initiator}" > /etc/iscsi/initiatorname.iscsi > > > > # disable iscsi autostart > > chkconfig --del iscsi > > chkconfig --del iscsd > > > > # bring up iscsi > > service iscsi start > > > > # discover iscsi targets on iscsi server > > TARGET=$(iscsiadm -m discovery -t sendtargets -p ${iscsi_server}) > > > > # restart iscsi server > > server iscsi restart > > > > # wait for devices to populate /dev/disk/by-id > > l=0 > > count=`find /dev/disk/by-id -mindeth 1 2>/dev/null | wc -l` > > while [ $l -lt 5 ]; do > > curr=`find /dev/disk/by-id -mindeth 1 2>/dev/null | wc -l` > > log "by-id now has $curr entries" > > [ $curr -gt $count ] && break > > l=$(($l + 1)) > > sleep 1; > > done > > > > # if not firstboot, expect LVs > > if [ "$firstboot" != "1" ]; then > > # it's not the firstboot > > log "Scanning for lvm config" > > l=0 > > while [ $l -lt 3 ]; do > > echo "lvm scan count=$l" >> /tmp/lvm.log > > pvscan >> /tmp/lvm.log > > sleep 1 > > vgscan >> /tmp/lvm.log > > sleep 1 > > lvscan >> /tmp/lvm.log > > sleep 1 > > vgchange -ay >> /tmp/lvm.log > > [ -e /dev/HostVG/Config ] && break; > > l=$(($l + 1)) > > done > > if [ ! -e /dev/HostVG/Config ] ; then > > log "Rescue mode requested, starting emergency shell" > > stop_log > > bash < /dev/console > > start_log > > fi > > fi > > > > At this point, we've made an iscsi connection and repopulated LVM and we can mount /config. > > Seems reasonable. I'm sure with a little tweaking this could be included > somehow, but I'll defer to Alan and Joey on the specifics :) > > > > >> > >> apevec, since jboggs is out of the office for a bit can you follow up here? > >> > >>> I'll have to look at seeing if we could move that into it's own init > >>> script; though there might be some duplication of code (command parsing, > >>> and such). With the vendor approach, it executes in the same context as > >>> ovirt-early so one can potentially override function implementations and > >>> exert control over the flow in the remainder of ovirt-early; that > >>> wouldn't be doable in a separate init script. > >>> > >>> One example that we rely upon that can't be done from separate script is > >>> appending boot parameters to the params list to be saved. > >> > >> ... But regardless, it is probably worth creating an easy to use interface > >> for vendors to add their own init scripts in without needing to duplicate > >> code, and trampling on ovirt-early. Perhaps an > >> /etc/insert-your-vendor-script-here.d directory that ovirt-early and > >> ovirt-post reads and executes functions from? That would provide an easy > >> way to drop in custom functionality and would also not require a new > >> kernel cmdline param. Thoughts? > > > > That does seem cleaner; though I do still like having a way to choose > > which set of vendor scripts to run via kernel parameter. We'd like to > > have a single iso image that we could support different types of > > startups. > > Yeah, I have no opposition to the kernel cmdline parameter used for that. OK. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From apevec at redhat.com Tue Nov 17 14:15:16 2009 From: apevec at redhat.com (Alan Pevec) Date: Tue, 17 Nov 2009 09:15:16 -0500 (EST) Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <25838483.115771258467128900.JavaMail.root@zmail06.collab.prod.int.phx2.redhat.com> Message-ID: <238203822.116201258467316912.JavaMail.root@zmail06.collab.prod.int.phx2.redhat.com> > > The ovirt-early script uses the standard ip= and ipv6= kernel command line > > options to bring up autoconfigured network interfaces. The interface to > > bring up can be specified using the standard BOOTIF= kernel cmdline arg. > > So what you're talking about should work fine, as both ip= and ipv6= both > > support dhcp as an argument. > Sure, except AFAICT, this won't bring up networking until after we've > attempted to mount /config ... which is of course too late if we're > using an iscsi "local" disk. right, ovirt-early is only parsing ip parameters, they're used for the firstboot configuration in stand-alone mode. But let me try to understand the use-case here: is this PXE boot of the node image with /config on iSCSI ? B/c with iSCSI root networking must be brought up even earlier in initrd, right? In any case, for boot parameters we should reuse or make compatible with Dracut parameters http://sourceforge.net/apps/trac/dracut/wiki/commandline > Oddly enough, we pxe from our 1g nics, but want to bond the 10g > nics... > I'd prefer specifying input for creating a bond, or bonds... I guess bonding should be added to Dracut as well, let's propose it there. Dracut has bridge parameter which could be extended: http://sourceforge.net/apps/trac/dracut/wiki/commandline#Bridging > BONDIF=bond0,bond1 > bond0_MACS=$primary_mac,$secondary_mac > bond0_OPTIONS="mode=6 miimon=100 use_carrier=1" > bond0_PROTO="dhcp" > bond0_ONBOOT="yes" > bond1_MACS=$primary_mac,$secondary_mac > bond1_OPTIONS="mode=6 miimon=100 use_carrier=1" > > We'd also need bridging parameters so we can configure a bridge with > the > bond port... > BRIDGEIF=brbond0 > > >> ... But regardless, it is probably worth creating an easy to use interface > > >> for vendors to add their own init scripts in without needing to duplicate > > >> code, and trampling on ovirt-early. Perhaps an > > >> /etc/insert-your-vendor-script-here.d directory that ovirt-early and > > >> ovirt-post reads and executes functions from? That would provide an easy > > >> way to drop in custom functionality and would also not require a new > > >> kernel cmdline param. Thoughts? > > > > > > That does seem cleaner; though I do still like having a way to choose > > > which set of vendor scripts to run via kernel parameter. We'd like to > > > have a single iso image that we could support different types of startups. There's already similar hooks folder /etc/ovirt-config-boot.d/ (scripts there are executed after upgrade). What about /etc/ovirt-early.d/ where vendor could put pre-* and post-* scripts and parameter would then select one or more scripts to execute: ovirt_early=*) i=${i#ovirt_early=} ovirt_early=$(echo $i|tr ",:;" " ") ;; EARLY_DIR=/etc/ovirt-early.d for hook in $ovirt_early; do pre="$EARLY_DIR/pre-$hook" if [ -e "$pre" ]; then . "$pre" fi done ... for hook in $ovirt_early; do post="$EARLY_DIR/post-$hook" if [ -e "$post" ]; then . "$post" fi done From ryanh at us.ibm.com Tue Nov 17 14:59:53 2009 From: ryanh at us.ibm.com (Ryan Harper) Date: Tue, 17 Nov 2009 08:59:53 -0600 Subject: [Ovirt-devel] [PATCH] Add support for vendor hooks during ovirt-early start() In-Reply-To: <238203822.116201258467316912.JavaMail.root@zmail06.collab.prod.int.phx2.redhat.com> References: <25838483.115771258467128900.JavaMail.root@zmail06.collab.prod.int.phx2.redhat.com> <238203822.116201258467316912.JavaMail.root@zmail06.collab.prod.int.phx2.redhat.com> Message-ID: <20091117145953.GJ10280@us.ibm.com> * Alan Pevec [2009-11-17 08:16]: > > > The ovirt-early script uses the standard ip= and ipv6= kernel command line > > > options to bring up autoconfigured network interfaces. The interface to > > > bring up can be specified using the standard BOOTIF= kernel cmdline arg. > > > So what you're talking about should work fine, as both ip= and ipv6= both > > > support dhcp as an argument. > > Sure, except AFAICT, this won't bring up networking until after we've > > attempted to mount /config ... which is of course too late if we're > > using an iscsi "local" disk. > > right, ovirt-early is only parsing ip parameters, they're used for the firstboot configuration in stand-alone mode. > > But let me try to understand the use-case here: is this PXE boot of the node image with /config on iSCSI ? > B/c with iSCSI root networking must be brought up even earlier in initrd, right? Originally we were iscsi firmware booting (iscsi_ibft.ko module to pass iscsi params from firmware to Linux). However, the initrd isn't quite cut out for that; especially considering we'd need to do that complex network config (bridged bonding pair) prior to connecting to iscsi lun. This forced me to move the connection from the initrd into ovirt-early. > In any case, for boot parameters we should reuse or make compatible with Dracut parameters http://sourceforge.net/apps/trac/dracut/wiki/commandline > > > Oddly enough, we pxe from our 1g nics, but want to bond the 10g > > nics... > > I'd prefer specifying input for creating a bond, or bonds... > > I guess bonding should be added to Dracut as well, let's propose it there. > Dracut has bridge parameter which could be extended: http://sourceforge.net/apps/trac/dracut/wiki/commandline#Bridging > Yeah, this looks reasonable if we add bonding. Something like: bond=,,, I suppose we could use colons to remain consistent, but splitting up the fields will be a bit more work. One other item that we use is DHCP vendor options which require the use of DHCLIENTARGS. Not sure if it's reasonable to add a DHCP parameter that we might be able to use to pass in something like DHCLIENT args? Besides the args, not sure how many other DHCP options we might want in a dhcp= parameter. We'd also want ip= , bond=, and bridge= to be processed in in such a way as to allow for building the bond, creating the bridge with the bond as the port and then applying the ip= config to the bridge. Something like: ip=dhcp bond=bond0,00:11:22:33:44:55,00:11:22:33:44:66,'mode=6' \ bridge=brbond0:bond0 dhcp=args="-cf /etc/isan-dhcp.conf -I ISAN" \ iscsi=192.168.68.21:::: would be sufficient to bring up the connection. > > BONDIF=bond0,bond1 > > bond0_MACS=$primary_mac,$secondary_mac > > bond0_OPTIONS="mode=6 miimon=100 use_carrier=1" > > bond0_PROTO="dhcp" > > bond0_ONBOOT="yes" > > bond1_MACS=$primary_mac,$secondary_mac > > bond1_OPTIONS="mode=6 miimon=100 use_carrier=1" > > > > We'd also need bridging parameters so we can configure a bridge with > > the > > bond port... > > BRIDGEIF=brbond0 > > > > >> ... But regardless, it is probably worth creating an easy to use interface > > > >> for vendors to add their own init scripts in without needing to duplicate > > > >> code, and trampling on ovirt-early. Perhaps an > > > >> /etc/insert-your-vendor-script-here.d directory that ovirt-early and > > > >> ovirt-post reads and executes functions from? That would provide an easy > > > >> way to drop in custom functionality and would also not require a new > > > >> kernel cmdline param. Thoughts? > > > > > > > > That does seem cleaner; though I do still like having a way to choose > > > > which set of vendor scripts to run via kernel parameter. We'd like to > > > > have a single iso image that we could support different types of startups. > > There's already similar hooks folder /etc/ovirt-config-boot.d/ (scripts there are executed after upgrade). > What about /etc/ovirt-early.d/ where vendor could put pre-* and post-* scripts and parameter would then select one or more scripts to execute: > ovirt_early=*) > i=${i#ovirt_early=} > ovirt_early=$(echo $i|tr ",:;" " ") > ;; > EARLY_DIR=/etc/ovirt-early.d > for hook in $ovirt_early; do > pre="$EARLY_DIR/pre-$hook" > if [ -e "$pre" ]; then > . "$pre" > fi > done > > ... > > for hook in $ovirt_early; do > post="$EARLY_DIR/post-$hook" > if [ -e "$post" ]; then > . "$post" > fi > done Looks good; let me give that a try and post a patch. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From ryanh at us.ibm.com Thu Nov 19 18:44:02 2009 From: ryanh at us.ibm.com (Ryan Harper) Date: Thu, 19 Nov 2009 12:44:02 -0600 Subject: [Ovirt-devel] [PATCH]v2 Add ovirt-early vendor hooks Message-ID: <20091119184402.GL27346@us.ibm.com> Create /etc/ovirt-early.d and add a kernel parameter, ovirt_early which can specify one or more strings which reference scripts that will be embedded into the ovirt-node image. ovirt-early pre-* are called after processing command line parameters but before mount the config partition in ovirt-early start(), and post-* are called at the very end of the start() function. Update ovirt-node specfile to create /etc/ovirt-early.d Signed-off-by: Ryan Harper --- ovirt-node.spec.in | 3 +++ scripts/ovirt-early | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 0 deletions(-) diff --git a/ovirt-node.spec.in b/ovirt-node.spec.in index 2a6b7b6..07ba742 100644 --- a/ovirt-node.spec.in +++ b/ovirt-node.spec.in @@ -272,6 +272,8 @@ ln -s ovirt-release %{buildroot}/etc/system-release %{__ln_s} ../..%{_sbindir}/ovirt-config-boot-wrapper %{buildroot}%{_sysconfdir}/ovirt-config-setup.d/"98_Local install and reboot" %{__ln_s} ../..%{_sbindir}/ovirt-config-uninstall %{buildroot}%{_sysconfdir}/ovirt-config-setup.d/"99_Uninstall node" +# ovirt-early vendor hook dir +%{__install} -d -m0755 %{buildroot}%{_sysconfdir}/ovirt-early.d %clean %{__rm} -rf %{buildroot} @@ -357,6 +359,7 @@ fi %config %{_sysconfdir}/cron.d/ovirt-logrotate %{_sysconfdir}/ovirt-config-boot.d %{_sysconfdir}/ovirt-config-setup.d +%{_sysconfdir}/ovirt-early.d %files stateful %defattr(-,root,root,0755) diff --git a/scripts/ovirt-early b/scripts/ovirt-early index cdd4afd..b688014 100755 --- a/scripts/ovirt-early +++ b/scripts/ovirt-early @@ -12,6 +12,7 @@ BONDING_MODCONF_FILE=/etc/modprobe.d/bonding AUGTOOL_CONFIG=/var/tmp/augtool-config +EARLY_DIR=/etc/ovirt-early.d get_mac_addresses() { local DEVICE=$1 @@ -369,9 +370,20 @@ start() { console=*) bootparams="$bootparams $i" ;; + ovirt_early=*) + i=${i#ovirt_early=} + ovirt_early=$(echo $i|tr ",:;" " ") + ;; esac done + for hook in $ovirt_early; do + pre="$EARLY_DIR/pre-$hook" + if [ -e "$pre" ]; then + . "$pre" + fi + done + if [ -z "$ip_netmask" ]; then ip_netmask=$netmask fi @@ -434,6 +446,13 @@ start() { fi fi + for hook in $ovirt_early; do + post="$EARLY_DIR/post-$hook" + if [ -e "$post" ]; then + . "$post" + fi + done + return 0 } -- 1.6.2.5 -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx ryanh at us.ibm.com From apevec at gmail.com Thu Nov 19 22:27:23 2009 From: apevec at gmail.com (Alan Pevec) Date: Thu, 19 Nov 2009 23:27:23 +0100 Subject: [Ovirt-devel] [PATCH 1/2] Users can now work with remote libvirt hosts. In-Reply-To: <1257954683-5853-2-git-send-email-dpierce@redhat.com> References: <1257954683-5853-1-git-send-email-dpierce@redhat.com> <1257954683-5853-2-git-send-email-dpierce@redhat.com> Message-ID: <2be7262f0911191427v6704109bud425d3966c49dc27@mail.gmail.com> On Wed, Nov 11, 2009 at 4:51 PM, Darryl L. Pierce wrote: > The user can: > ?* select a remote machine I didn't see how deselect a remote machine, once selected all operations are over remote libvirt connection... > ?* add a remote machine > ?* remove a remote machine BTW in current next there's nodeadmin API mismatch: configscreen.py: def get_domain_list_page(self, screen, defined=True, created=True): startdomain.py: return self.get_domain_list_page(screen, started = False) which gets traceback: get_domain_list_page() got an unexpected keyword argument 'started' From apevec at gmail.com Thu Nov 19 22:30:40 2009 From: apevec at gmail.com (Alan Pevec) Date: Thu, 19 Nov 2009 23:30:40 +0100 Subject: [Ovirt-devel] [PATCH 2/2] Enables users to migrate virtual machines between hosts. In-Reply-To: <1257954683-5853-3-git-send-email-dpierce@redhat.com> References: <1257954683-5853-1-git-send-email-dpierce@redhat.com> <1257954683-5853-2-git-send-email-dpierce@redhat.com> <1257954683-5853-3-git-send-email-dpierce@redhat.com> Message-ID: <2be7262f0911191430j3f68ee69h5f27890a6c7dff4a@mail.gmail.com> On Wed, Nov 11, 2009 at 4:51 PM, Darryl L. Pierce wrote: > Users select a virtual machine on their current libvirt host. They then > select a target machine, which must have been previously configured as a > connection. They confirm the migration and then it runs. ... > +class MigrateDomainConfigScreen(DomainListConfigScreen): ... > + ? ?def get_elements_for_page(self, screen, page): > + ? ? ? ?if ? page is LIST_DOMAINS: ?return self.get_domain_list_page(screen) this shows all defined VMs, shouldn't it show only running VMs if this is a live migration? From apevec at gmail.com Thu Nov 19 23:25:55 2009 From: apevec at gmail.com (Alan Pevec) Date: Fri, 20 Nov 2009 00:25:55 +0100 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <1257794438-10826-2-git-send-email-dpierce@redhat.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> <1257794438-10826-2-git-send-email-dpierce@redhat.com> Message-ID: <2be7262f0911191525n5c8317d9mc08be74464a46c8d@mail.gmail.com> On Mon, Nov 9, 2009 at 8:20 PM, Darryl L. Pierce wrote: > Users can now: > ?* Add a new storage pool. > ?* Delete a storage pool. > ?* Start and stop storage pools. > ?* Add a new storage volume. > ?* Delete a storage volume. > ?* List existing storage pools, with details. I'm getting volumes with 1G or less rounded to 0.0 G in display: ????????????? List Storage Pools ????????????? ? ? ? Details For Storage Pool: default ? ? Name:default ? ? gpxe-0.9.7-6.el5rhev.iso (0.0 G) ? ? nodel5.iso (0.0 G) ? ? Volumes:nodel5a.img (2.0 G) ? ? xp2.img (0.0 G) ? ? xp.img (2.0 G) ? ? winxp-sp2.iso (0.0 G) ? ? Autostart:Yes ? you need to / 1024.0**3 to get the fractional part From dpierce at redhat.com Fri Nov 20 14:16:41 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Fri, 20 Nov 2009 09:16:41 -0500 Subject: [Ovirt-devel] [PATCH 1/2] Provides a new storage administration system to the managed node. In-Reply-To: <2be7262f0911191525n5c8317d9mc08be74464a46c8d@mail.gmail.com> References: <1257794438-10826-1-git-send-email-dpierce@redhat.com> <1257794438-10826-2-git-send-email-dpierce@redhat.com> <2be7262f0911191525n5c8317d9mc08be74464a46c8d@mail.gmail.com> Message-ID: <20091120141641.GC30843@mcpierce-desktop.usersys.redhat.com> On Fri, Nov 20, 2009 at 12:25:55AM +0100, Alan Pevec wrote: > On Mon, Nov 9, 2009 at 8:20 PM, Darryl L. Pierce wrote: > > Users can now: > > ?* Add a new storage pool. > > ?* Delete a storage pool. > > ?* Start and stop storage pools. > > ?* Add a new storage volume. > > ?* Delete a storage volume. > > ?* List existing storage pools, with details. > > I'm getting volumes with 1G or less rounded to 0.0 G in display: > ????????????? List Storage Pools ????????????? > ? ? > ? Details For Storage Pool: default ? > ? Name:default ? > ? gpxe-0.9.7-6.el5rhev.iso (0.0 G) ? > ? nodel5.iso (0.0 G) ? > ? Volumes:nodel5a.img (2.0 G) ? > ? xp2.img (0.0 G) ? > ? xp.img (2.0 G) ? > ? winxp-sp2.iso (0.0 G) ? > ? Autostart:Yes ? > > you need to / 1024.0**3 to get the fractional part Okay. I'll change it to show megabytes when less than 1G is the size. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 198 bytes Desc: not available URL: From dpierce at redhat.com Fri Nov 20 14:22:39 2009 From: dpierce at redhat.com (Darryl L. Pierce) Date: Fri, 20 Nov 2009 09:22:39 -0500 Subject: [Ovirt-devel] [PATCH 2/2] Enables users to migrate virtual machines between hosts. In-Reply-To: <2be7262f0911191430j3f68ee69h5f27890a6c7dff4a@mail.gmail.com> References: <1257954683-5853-1-git-send-email-dpierce@redhat.com> <1257954683-5853-2-git-send-email-dpierce@redhat.com> <1257954683-5853-3-git-send-email-dpierce@redhat.com> <2be7262f0911191430j3f68ee69h5f27890a6c7dff4a@mail.gmail.com> Message-ID: <20091120142239.GD30843@mcpierce-desktop.usersys.redhat.com> On Thu, Nov 19, 2009 at 11:30:40PM +0100, Alan Pevec wrote: > On Wed, Nov 11, 2009 at 4:51 PM, Darryl L. Pierce wrote: > > Users select a virtual machine on their current libvirt host. They then > > select a target machine, which must have been previously configured as a > > connection. They confirm the migration and then it runs. > > ... > > +class MigrateDomainConfigScreen(DomainListConfigScreen): > ... > > + ? ?def get_elements_for_page(self, screen, page): > > + ? ? ? ?if ? page is LIST_DOMAINS: ?return self.get_domain_list_page(screen) > > this shows all defined VMs, shouldn't it show only running VMs if this > is a live migration? You can migrate a VM that's not in a running state between machines. -- Darryl L. Pierce, Sr. Software Engineer @ Red Hat, Inc. Delivering value year after year. Red Hat ranks #1 in value among software vendors. http://www.redhat.com/promo/vendor/ -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 198 bytes Desc: not available URL: From imain at redhat.com Mon Nov 23 20:16:02 2009 From: imain at redhat.com (Ian Main) Date: Mon, 23 Nov 2009 12:16:02 -0800 Subject: [Ovirt-devel] [PATCH server] Replace the occurence of the type @qmfc.object(Qmf::Query.new(:class => "xxx", 'key' => search_key)) for @qmfc.object(Qmf::Query.new(:class => "xxx"), 'key' => search_key) else the search on the key is not functionnal. In-Reply-To: <1258112812-16402-1-git-send-email-mloiseleur@linagora.com> References: <1258112812-16402-1-git-send-email-mloiseleur@linagora.com> Message-ID: <20091123121602.237e05e6@tp.mains.priv> On Fri, 13 Nov 2009 11:46:52 +0000 Michel Loiseleur wrote: > This fix db-omatic that cannot recover the node on wich the vm run for example. > > We have not investigate far away but that can be due to a change on the ruby-qmf API. > > Signed-off-by: Michel Loiseleur ACK Thanks Michel for catching that for me. :) I'll push this shortly. I also talked to Ted Ross and he's going to look into that bug soon. Ian From jorick at netbulae.com Thu Nov 26 11:55:56 2009 From: jorick at netbulae.com (Jorick Astrego) Date: Thu, 26 Nov 2009 12:55:56 +0100 Subject: [Ovirt-devel] [error] avahi_entry_group_add_service_strlst("AdminNodeFQDN") failed: Invalid host name Message-ID: <4B0E6CCC.9020205@netbulae.com> Hi, After doing a clean install of ovirt in a Fedora 11 VM, I am unable to get anything working in the web interface. All I get is an error 500. After reviewing the logs I see the following error in /var/log/http/error.log [Thu Nov 26 11:29:11 2009] [notice] Apache/2.2.13 (Unix) DAV/2 mod_auth_kerb/5.4 mod_nss/2.2.11 NSS/3.12.2.0 mod_python/3.3.1 Python/2.6 configured -- resuming normal operations [Thu Nov 26 11:29:11 2009] [error] avahi_entry_group_add_service_strlst("AdminNodeFQDN") failed: Invalid host name /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/abstract_adapter.rb:212:in `log': PGError: ERROR: relation "pools" does not exist (ActiveRecord::StatementInvalid) : SELECT a.attname, format_type(a.atttypid, a.atttypmod), d.adsrc, a.attnotnull FROM pg_attribute a LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum WHERE a.attrelid = 'pools'::regclass AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum from /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:494:in `query' from /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:1039:in `column_definitions' from /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:659:in `columns' from /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/base.rb:1276:in `columns_without_gettext_activerecord' from /usr/lib/ruby/gems/1.8/gems/gettext_activerecord-2.0.4/lib/gettext_activerecord/base.rb:62:in `columns' from /usr/share/ovirt-server/vendor/plugins/betternestedset/lib/better_nested_set.rb:35:in `acts_as_nested_set' from /usr/share/ovirt-server/app/models/pool.rb:21 from /usr/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:31:in `gem_original_require' ... 25 levels... from /usr/share/ovirt-server/dutils/dutils.rb:19:in `require' from /usr/share/ovirt-server/dutils/dutils.rb:19 from /usr/bin/ovirt-vm2node:6:in `require' from /usr/bin/ovirt-vm2node:6 /usr/lib64/python2.6/site-packages/mod_python/importer.py:32: DeprecationWarning: the md5 module is deprecated; use hashlib instead import md5 [Thu Nov 26 11:29:30 2009] [INFO] IPA: get_entry_by_cn 'ipausers' Anything I can do to fix this? -- Regards, Netbulae Jorick Astrego From jboggs at redhat.com Sat Nov 28 02:20:51 2009 From: jboggs at redhat.com (Joey Boggs) Date: Fri, 27 Nov 2009 21:20:51 -0500 Subject: [Ovirt-devel] [error] avahi_entry_group_add_service_strlst("AdminNodeFQDN") failed: Invalid host name In-Reply-To: <4B0E6CCC.9020205@netbulae.com> References: <4B0E6CCC.9020205@netbulae.com> Message-ID: <4B108903.2030009@redhat.com> Jorick Astrego wrote: > Hi, > > After doing a clean install of ovirt in a Fedora 11 VM, I am unable to > get anything working in the web interface. All I get is an error 500. > > After reviewing the logs I see the following error in > /var/log/http/error.log > > [Thu Nov 26 11:29:11 2009] [notice] Apache/2.2.13 (Unix) DAV/2 > mod_auth_kerb/5.4 mod_nss/2.2.11 NSS/3.12.2.0 mod_python/3.3.1 > Python/2.6 configured -- resuming normal operations > [Thu Nov 26 11:29:11 2009] [error] > avahi_entry_group_add_service_strlst("AdminNodeFQDN") failed: Invalid > host name > /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/abstract_adapter.rb:212:in > `log': PGError: ERROR: relation "pools" does not exist > (ActiveRecord::StatementInvalid) > : SELECT a.attname, format_type(a.atttypid, a.atttypmod), > d.adsrc, a.attnotnull > FROM pg_attribute a LEFT JOIN pg_attrdef d > ON a.attrelid = d.adrelid AND a.attnum = d.adnum > WHERE a.attrelid = 'pools'::regclass > AND a.attnum > 0 AND NOT a.attisdropped > ORDER BY a.attnum > from > /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:494:in > `query' > from > /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:1039:in > `column_definitions' > from > /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:659:in > `columns' > from > /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/base.rb:1276:in > `columns_without_gettext_activerecord' > from > /usr/lib/ruby/gems/1.8/gems/gettext_activerecord-2.0.4/lib/gettext_activerecord/base.rb:62:in > `columns' > from > /usr/share/ovirt-server/vendor/plugins/betternestedset/lib/better_nested_set.rb:35:in > `acts_as_nested_set' > from /usr/share/ovirt-server/app/models/pool.rb:21 > from > /usr/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:31:in > `gem_original_require' > ... 25 levels... > from /usr/share/ovirt-server/dutils/dutils.rb:19:in `require' > from /usr/share/ovirt-server/dutils/dutils.rb:19 > from /usr/bin/ovirt-vm2node:6:in `require' > from /usr/bin/ovirt-vm2node:6 > /usr/lib64/python2.6/site-packages/mod_python/importer.py:32: > DeprecationWarning: the md5 module is deprecated; use hashlib instead > import md5 > [Thu Nov 26 11:29:30 2009] [INFO] IPA: get_entry_by_cn 'ipausers' > > Anything I can do to fix this? > Can you attach your /etc/httpd/conf.d/ovirt-server.conf from the management server. Looks like one of the variables didn't get properly replaced. From jboggs at redhat.com Mon Nov 30 02:21:09 2009 From: jboggs at redhat.com (Joey Boggs) Date: Sun, 29 Nov 2009 21:21:09 -0500 Subject: [Ovirt-devel] [error] avahi_entry_group_add_service_strlst("AdminNodeFQDN") failed: Invalid host name In-Reply-To: <4B12C23B.8080107@netbulae.com> References: <4B0E6CCC.9020205@netbulae.com> <4B108903.2030009@redhat.com> <4B12C23B.8080107@netbulae.com> Message-ID: <4B132C15.9020808@redhat.com> Jorick Astrego wrote: > On 11/28/2009 03:20 AM, Joey Boggs wrote: >> Jorick Astrego wrote: >>> Hi, >>> >>> After doing a clean install of ovirt in a Fedora 11 VM, I am unable >>> to get anything working in the web interface. All I get is an error >>> 500. >>> >>> After reviewing the logs I see the following error in >>> /var/log/http/error.log >>> >>> [Thu Nov 26 11:29:11 2009] [notice] Apache/2.2.13 (Unix) DAV/2 >>> mod_auth_kerb/5.4 mod_nss/2.2.11 NSS/3.12.2.0 mod_python/3.3.1 >>> Python/2.6 configured -- resuming normal operations >>> [Thu Nov 26 11:29:11 2009] [error] >>> avahi_entry_group_add_service_strlst("AdminNodeFQDN") failed: >>> Invalid host name >>> /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/abstract_adapter.rb:212:in >>> `log': PGError: ERROR: relation "pools" does not exist >>> (ActiveRecord::StatementInvalid) >>> : SELECT a.attname, format_type(a.atttypid, >>> a.atttypmod), d.adsrc, a.attnotnull >>> FROM pg_attribute a LEFT JOIN pg_attrdef d >>> ON a.attrelid = d.adrelid AND a.attnum = d.adnum >>> WHERE a.attrelid = 'pools'::regclass >>> AND a.attnum > 0 AND NOT a.attisdropped >>> ORDER BY a.attnum >>> from >>> /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:494:in >>> `query' >>> from >>> /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:1039:in >>> `column_definitions' >>> from >>> /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/connection_adapters/postgresql_adapter.rb:659:in >>> `columns' >>> from >>> /usr/lib/ruby/gems/1.8/gems/activerecord-2.3.2/lib/active_record/base.rb:1276:in >>> `columns_without_gettext_activerecord' >>> from >>> /usr/lib/ruby/gems/1.8/gems/gettext_activerecord-2.0.4/lib/gettext_activerecord/base.rb:62:in >>> `columns' >>> from >>> /usr/share/ovirt-server/vendor/plugins/betternestedset/lib/better_nested_set.rb:35:in >>> `acts_as_nested_set' >>> from /usr/share/ovirt-server/app/models/pool.rb:21 >>> from >>> /usr/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:31:in >>> `gem_original_require' >>> ... 25 levels... >>> from /usr/share/ovirt-server/dutils/dutils.rb:19:in `require' >>> from /usr/share/ovirt-server/dutils/dutils.rb:19 >>> from /usr/bin/ovirt-vm2node:6:in `require' >>> from /usr/bin/ovirt-vm2node:6 >>> /usr/lib64/python2.6/site-packages/mod_python/importer.py:32: >>> DeprecationWarning: the md5 module is deprecated; use hashlib instead >>> import md5 >>> [Thu Nov 26 11:29:30 2009] [INFO] IPA: get_entry_by_cn 'ipausers' >>> >>> Anything I can do to fix this? >>> >> >> Can you attach your /etc/httpd/conf.d/ovirt-server.conf from the >> management server. Looks like one of the variables didn't get >> properly replaced. > > Sure. > > NameVirtualHost 192.168.2.70:80 > > > RewriteEngine on > RewriteRule ^.*$ https://%{SERVER_NAME}%{REQUEST_URI} [R,L] > > > > NameVirtualHost 192.168.2.70:443 > NameVirtualHost 10.0.0.70:80 > > > > NSSEngine on > NSSCipherSuite > +rsa_rc4_128_md5,+rsa_rc4_128_sha,+rsa_3des_sha,-rsa_des_sha,-rsa_rc4_40_md5,-rsa_rc2_40_md5,-rsa_null_md5,-rsa_null_sha,+fips_3des_sha,-fips_des_sha,-fortezza,-fortezza_rc4_128_sha,-fortezza_null,-rsa_des_56_sha,-rsa_rc4_56_sha,+rsa_aes_128_sha,+rsa_aes_256_sha > > NSSProtocol SSLv3,TLSv1 > NSSNickname Server-Cert > NSSCertificateDatabase /etc/httpd/alias > > ErrorLog /etc/httpd/logs/error_log > TransferLog /etc/httpd/logs/access_log > LogLevel warn > > RewriteEngine On > RewriteMap vmnodes prg:/usr/bin/ovirt-vm2node > RewriteRule ^/terminal/(.+)/anyterm-module$ > http://${vmnodes:$1}:81/anyterm-module [P] > RewriteRule ^/terminal/(.+)/(.*\.(html|js|css|gif))*$ > http://127.0.0.1/terminal/$2 [P,NE] > > ProxyPass /ovirt http://control01.netbulae.com/ovirt retry=3 > ProxyPassReverse /ovirt http://control01.netbulae.com/ovirt > > > > > ServerAlias control01.netbulae.com > ServerName control01.netbulae.com:80 > > ErrorLog /etc/httpd/logs/error_log > TransferLog /etc/httpd/logs/access_log > LogLevel warn > > ProxyRequests Off > > > AuthType Kerberos > AuthName "Kerberos Login" > KrbMethodNegotiate on > KrbMethodK5Passwd on > KrbServiceName HTTP > Krb5KeyTab /etc/httpd/conf/ipa.keytab > KrbSaveCredentials on > Require valid-user > ErrorDocument 401 /ovirt/errors/401.html > ErrorDocument 404 /ovirt/errors/404.html > ErrorDocument 500 /ovirt/errors/500.html > RewriteEngine on > Order deny,allow > Allow from all > > # We create a subrequest to find REMOTE_USER. Don't do this for every > # subrequest too (slow and huge logs result) > RewriteCond %{IS_SUBREQ}% false > RewriteRule .* - [E=RU:%{LA-U:REMOTE_USER}] > RequestHeader set X-Forwarded-User %{RU}e > RequestHeader set X-Forwarded-Keytab %{KRB5CCNAME}e > > # RequestHeader unset Authorization > > > Alias /ovirt/stylesheets "/usr/share/ovirt-server/public/stylesheets" > Alias /ovirt/images "/usr/share/ovirt-server/public/images" > Alias /ovirt/errors "/usr/share/ovirt-server/public/" > > ProxyPass /ovirt/images ! > ProxyPass /ovirt/stylesheets ! > ProxyPass /ovirt/errors ! > ProxyPass /ovirt http://localhost:3000/ovirt > ProxyPassReverse /ovirt http://localhost:3000/ovirt > ProxyPassReverse /ovirt/images ! > ProxyPassReverse /ovirt/stylesheets ! > ProxyPassReverse /ovirt/errors ! > > > > Alias /terminal /usr/share/ovirt-anyterm > > DirectoryIndex anyterm.html > > > Looks like the AdminNodeFQDN mentioned in the logs might just be a remnant of a service starting too early. If you restart the httpd service it should longer show up but your config file looks normal to me and shouldn't affect anything at this point. I'm not sure on the postgres portions so I'll let some of the others comment in on it.