[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Libguestfs] [PATCH 6/8] v2v: -o rhv-upload: collect disks UUIDs right after copy



Instead of waiting for the completion of the nbdkit transfers to get the
UUIDs of the disks, use the new #disk_copied hook to do that after each
disk is copied.

This has almost no behaviour on rhv-upload, except for the --no-copy
mode:
- previously it used to hit the 5 minute timeout while waiting for the
  finalization of the first disk
- now it asserts on the different number of collected UUIDs vs the
  actual targets; at the moment there is nothing else that can be done,
  as this assumption is needed e.g. when creating the OVF file
---
 v2v/output_rhv_upload.ml | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/v2v/output_rhv_upload.ml b/v2v/output_rhv_upload.ml
index 5599ef2c2..0952af188 100644
--- a/v2v/output_rhv_upload.ml
+++ b/v2v/output_rhv_upload.ml
@@ -227,6 +227,8 @@ object
   val mutable rhv_storagedomain_uuid = None
   (* The cluster UUID. *)
   val mutable rhv_cluster_uuid = None
+  (* List of disk UUIDs. *)
+  val mutable disks_uuids = []
 
   method precheck () =
     Python_script.error_unless_python_interpreter_found ();
@@ -374,23 +376,21 @@ If the messages above are not sufficient to diagnose the problem then add the 
         TargetURI ("json:" ^ JSON.string_of_doc json_params)
     ) overlays
 
-  method create_metadata source targets _ guestcaps inspect target_firmware =
-    (* Get the UUIDs of each disk image.  These files are written
-     * out by the nbdkit plugins on successful finalization of the
+  method disk_copied t i nr_disks =
+    (* Get the UUID of the disk image.  This file is written
+     * out by the nbdkit plugin on successful finalization of the
      * transfer.
      *)
-    let nr_disks = List.length targets in
-    let image_uuids =
-      List.mapi (
-        fun i t ->
-          let id = t.target_overlay.ov_source.s_disk_id in
-          let diskid_file = diskid_file_of_id id in
-          if not (wait_for_file diskid_file finalization_timeout) then
-            error (f_"transfer of disk %d/%d failed, see earlier error messages")
-                  (i+1) nr_disks;
-          let diskid = read_whole_file diskid_file in
-          diskid
-      ) targets in
+    let id = t.target_overlay.ov_source.s_disk_id in
+    let diskid_file = diskid_file_of_id id in
+    if not (wait_for_file diskid_file finalization_timeout) then
+      error (f_"transfer of disk %d/%d failed, see earlier error messages")
+            (i+1) nr_disks;
+    let diskid = read_whole_file diskid_file in
+    disks_uuids <- disks_uuids @ [diskid];
+
+  method create_metadata source targets _ guestcaps inspect target_firmware =
+    assert (List.length disks_uuids = List.length targets);
 
     (* The storage domain UUID. *)
     let sd_uuid =
@@ -406,7 +406,7 @@ If the messages above are not sufficient to diagnose the problem then add the 
     let ovf =
       Create_ovf.create_ovf source targets guestcaps inspect
                             target_firmware output_alloc
-                            sd_uuid image_uuids vol_uuids vm_uuid
+                            sd_uuid disks_uuids vol_uuids vm_uuid
                             OVirt in
     let ovf = DOM.doc_to_string ovf in
 
-- 
2.21.0


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]