[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[libvirt] [PATCH 4/4] virsh: Expose virNodeAllocPages



The new virsh command is named 'allocpages'.

Signed-off-by: Michal Privoznik <mprivozn redhat com>
---
 tools/virsh-host.c | 134 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 tools/virsh.pod    |  12 +++++
 2 files changed, 146 insertions(+)

diff --git a/tools/virsh-host.c b/tools/virsh-host.c
index 7fc2120..15bfb7a 100644
--- a/tools/virsh-host.c
+++ b/tools/virsh-host.c
@@ -418,6 +418,134 @@ cmdFreepages(vshControl *ctl, const vshCmd *cmd)
 
 
 /*
+ * "allocpages" command
+ */
+static const vshCmdInfo info_allocpages[] = {
+    {.name = "help",
+     .data = N_("Manipulate pages pool size")
+    },
+    {.name = "desc",
+     .data = N_("Allocate or free some pages in the pool for NUMA cell.")
+    },
+    {.name = NULL}
+};
+static const vshCmdOptDef opts_allocpages[] = {
+    {.name = "pagesize",
+     .type = VSH_OT_INT,
+     .flags = VSH_OFLAG_REQ,
+     .help = N_("page size (in kibibytes)")
+    },
+    {.name = "pagecount",
+     .type = VSH_OT_INT,
+     .flags = VSH_OFLAG_REQ,
+     .help = N_("page count")
+    },
+    {.name = "cellno",
+     .type = VSH_OT_INT,
+     .help = N_("NUMA cell number")
+    },
+    {.name = "add",
+     .type = VSH_OT_BOOL,
+     .help = N_("instead of setting new pool size add pages to it")
+    },
+    {.name = "all",
+     .type = VSH_OT_BOOL,
+     .help = N_("set on all NUMA cells")
+    },
+    {.name = NULL}
+};
+
+static bool
+cmdAllocpages(vshControl *ctl, const vshCmd *cmd)
+{
+    bool ret = false;
+    bool add = vshCommandOptBool(cmd, "add");
+    bool all = vshCommandOptBool(cmd, "all");
+    bool cellno = vshCommandOptBool(cmd, "cellno");
+    int startCell = -1;
+    int cellCount = 1;
+    unsigned int pageSizes[1];
+    unsigned long long pageCounts[1];
+    unsigned int flags = 0;
+    char *cap_xml = NULL;
+    xmlDocPtr xml = NULL;
+    xmlXPathContextPtr ctxt = NULL;
+    xmlNodePtr *nodes = NULL;
+
+    VSH_EXCLUSIVE_OPTIONS_VAR(all, cellno);
+
+    if (cellno && vshCommandOptInt(cmd, "cellno", &startCell) < 0) {
+        vshError(ctl, "%s", _("cell number has to be a number"));
+        return false;
+    }
+
+    if (vshCommandOptUInt(cmd, "pagesize", &pageSizes[0]) < 0) {
+        vshError(ctl, "%s", _("pagesize has to be a number"));
+        return false;
+    }
+
+    if (vshCommandOptULongLong(cmd, "pagecount", &pageCounts[0]) < 0) {
+        vshError(ctl, "%s", _("pagecount hat to be a number"));
+        return false;
+    }
+
+    flags |= add ? VIR_NODE_ALLOC_PAGES_ADD : VIR_NODE_ALLOC_PAGES_SET;
+
+    if (all) {
+        unsigned long nodes_cnt;
+        size_t i;
+
+        if (!(cap_xml = virConnectGetCapabilities(ctl->conn))) {
+            vshError(ctl, "%s", _("unable to get node capabilities"));
+            goto cleanup;
+        }
+
+        xml = virXMLParseStringCtxt(cap_xml, _("(capabilities)"), &ctxt);
+        if (!xml) {
+            vshError(ctl, "%s", _("unable to get node capabilities"));
+            goto cleanup;
+        }
+
+        nodes_cnt = virXPathNodeSet("/capabilities/host/topology/cells/cell",
+                                    ctxt, &nodes);
+
+        if (nodes_cnt == -1) {
+            vshError(ctl, "%s", _("could not get information about "
+                                  "NUMA topology"));
+            goto cleanup;
+        }
+
+        for (i = 0; i < nodes_cnt; i++) {
+            unsigned long id;
+            char *val = virXMLPropString(nodes[i], "id");
+            if (virStrToLong_ul(val, NULL, 10, &id)) {
+                vshError(ctl, "%s", _("conversion from string failed"));
+                VIR_FREE(val);
+                goto cleanup;
+            }
+            VIR_FREE(val);
+
+            if (virNodeAllocPages(ctl->conn, 1, pageSizes,
+                                  pageCounts, id, 1, flags) < 0)
+                goto cleanup;
+        }
+    } else {
+        if (virNodeAllocPages(ctl->conn, 1, pageSizes, pageCounts,
+                              startCell, cellCount, flags) < 0)
+            goto cleanup;
+    }
+
+    ret = true;
+ cleanup:
+    xmlXPathFreeContext(ctxt);
+    xmlFreeDoc(xml);
+    VIR_FREE(nodes);
+    VIR_FREE(cap_xml);
+    return ret;
+}
+
+
+/*
  * "maxvcpus" command
  */
 static const vshCmdInfo info_maxvcpus[] = {
@@ -1183,6 +1311,12 @@ cmdNodeMemoryTune(vshControl *ctl, const vshCmd *cmd)
 }
 
 const vshCmdDef hostAndHypervisorCmds[] = {
+    {.name = "allocpages",
+     .handler = cmdAllocpages,
+     .opts = opts_allocpages,
+     .info = info_capabilities,
+     .flags = 0
+    },
     {.name = "capabilities",
      .handler = cmdCapabilities,
      .opts = NULL,
diff --git a/tools/virsh.pod b/tools/virsh.pod
index 9919f92..eae9195 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -561,6 +561,18 @@ to the NUMA cell you're interested in. I<pagesize> is a scaled integer (see
 B<NOTES> above).  Alternatively, if I<--all> is used, info on each possible
 combination of NUMA cell and page size is printed out.
 
+=item B<allocpages> [I<--pagesize>] I<pagesize> [I<--pagecount>] I<pagecount>
+[[I<--cellno>] I<cellno>] [I<--add>] [I<--all>]
+
+Change the size of pages pool of I<pagesize> on the host. If
+I<--add> is specified, then I<pagecount> pages are added into the
+pool. However, if I<--add> wasn't specified, then the
+I<pagecount> is taken as the new absolute size of the pool (this
+may be used to free some pages and size the pool down). The
+I<cellno> modifier can be used to narrow the modification down to
+a single host NUMA cell. On the other end of spectrum lies
+I<--all> which executes the modification on all NUMA cells.
+
 =item B<cpu-baseline> I<FILE> [I<--features>]
 
 Compute baseline CPU which will be supported by all host CPUs given in <file>.
-- 
1.8.5.5


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]