[Libguestfs] [nbdkit PATCH] cache: Reduce use of bounce-buffer

Eric Blake eblake at redhat.com
Sat May 11 20:30:04 UTC 2019


Although the time spent in memcpy/memset probably pales in comparison
to time spent in socket I/O, it's still worth worth reducing the
number of times we have to utilize a bounce buffer when we already
have aligned data.

Signed-off-by: Eric Blake <eblake at redhat.com>
---
 filters/cache/cache.c | 60 ++++++++++++++++++++++++++++---------------
 1 file changed, 39 insertions(+), 21 deletions(-)

diff --git a/filters/cache/cache.c b/filters/cache/cache.c
index 19ce555..98786b5 100644
--- a/filters/cache/cache.c
+++ b/filters/cache/cache.c
@@ -1,5 +1,5 @@
 /* nbdkit
- * Copyright (C) 2018 Red Hat Inc.
+ * Copyright (C) 2018-2019 Red Hat Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -58,6 +58,7 @@
 #include "cache.h"
 #include "blk.h"
 #include "reclaim.h"
+#include "isaligned.h"

 #define THREAD_MODEL NBDKIT_THREAD_MODEL_PARALLEL

@@ -233,11 +234,13 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata,
   CLEANUP_FREE uint8_t *block = NULL;

   assert (!flags);
-  block = malloc (blksize);
-  if (block == NULL) {
-    *err = errno;
-    nbdkit_error ("malloc: %m");
-    return -1;
+  if (!IS_ALIGNED (count | offset, blksize)) {
+    block = malloc (blksize);
+    if (block == NULL) {
+      *err = errno;
+      nbdkit_error ("malloc: %m");
+      return -1;
+    }
   }

   /* XXX This breaks up large read requests into smaller ones, which
@@ -258,12 +261,14 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata,

     {
       ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock);
-      r = blk_read (next_ops, nxdata, blknum, block, err);
+      r = blk_read (next_ops, nxdata, blknum,
+                    blkoffs || n < blksize ? block : buf, err);
     }
     if (r == -1)
       return -1;

-    memcpy (buf, &block[blkoffs], n);
+    if (blkoffs || n < blksize)
+      memcpy (buf, &block[blkoffs], n);

     buf += n;
     count -= n;
@@ -282,11 +287,13 @@ cache_pwrite (struct nbdkit_next_ops *next_ops, void *nxdata,
   CLEANUP_FREE uint8_t *block = NULL;
   bool need_flush = false;

-  block = malloc (blksize);
-  if (block == NULL) {
-    *err = errno;
-    nbdkit_error ("malloc: %m");
-    return -1;
+  if (!IS_ALIGNED (count | offset, blksize)) {
+    block = malloc (blksize);
+    if (block == NULL) {
+      *err = errno;
+      nbdkit_error ("malloc: %m");
+      return -1;
+    }
   }

   if ((flags & NBDKIT_FLAG_FUA) &&
@@ -308,11 +315,15 @@ cache_pwrite (struct nbdkit_next_ops *next_ops, void *nxdata,
      * Hold the lock over the whole operation.
      */
     ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock);
-    r = blk_read (next_ops, nxdata, blknum, block, err);
-    if (r != -1) {
-      memcpy (&block[blkoffs], buf, n);
-      r = blk_write (next_ops, nxdata, blknum, block, flags, err);
+    if (blkoffs || n < blksize) {
+      r = blk_read (next_ops, nxdata, blknum, block, err);
+      if (r != -1) {
+        memcpy (&block[blkoffs], buf, n);
+        r = blk_write (next_ops, nxdata, blknum, block, flags, err);
+      }
     }
+    else
+      r = blk_write (next_ops, nxdata, blknum, buf, flags, err);
     if (r == -1)
       return -1;

@@ -334,6 +345,7 @@ cache_zero (struct nbdkit_next_ops *next_ops, void *nxdata,
 {
   CLEANUP_FREE uint8_t *block = NULL;
   bool need_flush = false;
+  bool clean = false;

   block = malloc (blksize);
   if (block == NULL) {
@@ -350,7 +362,7 @@ cache_zero (struct nbdkit_next_ops *next_ops, void *nxdata,
   }
   while (count > 0) {
     uint64_t blknum, blkoffs, n;
-    int r;
+    int r = 0;

     blknum = offset / blksize;  /* block number */
     blkoffs = offset % blksize; /* offset within the block */
@@ -362,11 +374,17 @@ cache_zero (struct nbdkit_next_ops *next_ops, void *nxdata,
      * Hold the lock over the whole operation.
      */
     ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock);
-    r = blk_read (next_ops, nxdata, blknum, block, err);
-    if (r != -1) {
+    if (blkoffs || n < blksize) {
+      r = blk_read (next_ops, nxdata, blknum, block, err);
       memset (&block[blkoffs], 0, n);
+      clean = false;
+    }
+    else if (!clean) {
+      memset (block, 0, blksize);
+      clean = true;
+    }
+    if (r != -1)
       r = blk_write (next_ops, nxdata, blknum, block, flags, err);
-    }
     if (r == -1)
       return -1;

-- 
2.20.1




More information about the Libguestfs mailing list