rpms/kernel/devel kernel.spec, 1.357, 1.358 linux-2.6-firewire-git-update.patch, 1.1, 1.2
Jarod Wilson (jwilson)
fedora-extras-commits at redhat.com
Fri Jan 11 20:57:44 UTC 2008
Author: jwilson
Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv10802
Modified Files:
kernel.spec linux-2.6-firewire-git-update.patch
Log Message:
Update to new linux1394 git snap that incorporates dynamic ir buffer alloc
Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.357
retrieving revision 1.358
diff -u -r1.357 -r1.358
--- kernel.spec 11 Jan 2008 19:24:21 -0000 1.357
+++ kernel.spec 11 Jan 2008 20:56:53 -0000 1.358
@@ -659,8 +659,6 @@
# linux1394 git patches
Patch2200: linux-2.6-firewire-git-update.patch
-# should be committed shortly, in a very similar form...
-Patch2201: linux-2.6-firewire-dynamic-ir-buffer-alloc.patch
%endif
@@ -1189,7 +1187,6 @@
# linux1394 git patches
ApplyPatch linux-2.6-firewire-git-update.patch
-ApplyPatch linux-2.6-firewire-dynamic-ir-buffer-alloc.patch
# ---------- below all scheduled for 2.6.24 -----------------
linux-2.6-firewire-git-update.patch:
Index: linux-2.6-firewire-git-update.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-firewire-git-update.patch,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- linux-2.6-firewire-git-update.patch 11 Jan 2008 19:24:21 -0000 1.1
+++ linux-2.6-firewire-git-update.patch 11 Jan 2008 20:56:53 -0000 1.2
@@ -1,7 +1,960 @@
-diff -Naurp linux-2.6-git/drivers/firewire/fw-ohci.c linux1394-2.6/drivers/firewire/fw-ohci.c
---- linux-2.6-git/drivers/firewire/fw-ohci.c 2008-01-01 22:50:33.000000000 -0500
-+++ linux1394-2.6/drivers/firewire/fw-ohci.c 2008-01-10 14:19:14.000000000 -0500
-@@ -125,6 +125,7 @@ struct context {
+IEEE 1394 updates for Linux 2.6.24-rc6 (v601 2008-01-11)
+
+ drivers/firewire/fw-ohci.c | 386 ++++++++++++++---------
+ drivers/firewire/fw-sbp2.c | 85 ++---
+ drivers/firewire/fw-transaction.c | 2
+ drivers/ieee1394/dma.c | 37 --
+ drivers/ieee1394/ieee1394_transactions.c | 68 ----
+ drivers/ieee1394/ohci1394.c | 12
+ drivers/ieee1394/raw1394.c | 4
+ drivers/ieee1394/sbp2.c | 53 +--
+ drivers/ieee1394/sbp2.h | 1
+ 9 files changed, 337 insertions(+), 311 deletions(-)
+
+========================================================================
+Date: Sun, 06 Jan 2008 17:21:41 -0500
+From: David Moore <dcm at MIT.EDU>
+Subject: firewire: fw-ohci: Dynamically allocate buffers for DMA descriptors
+
+Previously, the fw-ohci driver used fixed-length buffers for storing
+descriptors for isochronous receive DMA programs. If an application
+(such as libdc1394) generated a DMA program that was too large, fw-ohci
+would reach the limit of its fixed-sized buffer and return an error to
+userspace.
+
+This patch replaces the fixed-length ring-buffer with a linked-list of
+page-sized buffers. Additional buffers can be dynamically allocated and
+appended to the list when necessary. For a particular context, buffers
+are kept around after use and reused as necessary, so there is no
+allocation taking place after the DMA program is generated for the first
+time.
+
+In addition, the buffers it uses are coherent for DMA so there is no
+syncing required before and after writes. This syncing wasn't properly
+done in the previous version of the code.
+
+-
+
+This is the fourth version of my patch that replaces a fixed-length
+buffer for DMA descriptors with a dynamically allocated linked-list of
+buffers.
+
+As we discovered with the last attempt, new context programs are
+sometimes queued from interrupt context, making it unacceptable to call
+tasklet_disable() from context_get_descriptors().
+
+This version of the patch uses ohci->lock for all locking needs instead
+of tasklet_disable/enable. There is a new requirement that
+context_get_descriptors() be called while holding ohci->lock. It was
+already held for the AT context, so adding the requirement for the iso
+context did not seem particularly onerous. In addition, this has the
+side benefit of allowing iso queue to be safely called from concurrent
+user-space threads, which previously was not safe.
+
+Signed-off-by: David Moore <dcm at acm.org>
+Signed-off-by: Kristian Høgsberg <krh at redhat.com>
+Signed-off-by: Jarod Wilson <jwilson at redhat.com>
+
+-
+
+Fixes the following issues:
+ - Isochronous reception stopped prematurely if an application used a
+ larger buffer. (Reproduced with coriander.)
+ - Isochronous reception stopped after one or a few frames on VT630x
+ in OHCI 1.0 mode. (Fixes reception in coriander, but dvgrab still
+ doesn't work with these chips.)
+
+Patch update: struct member alignment, whitespace nits
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-ohci.c | 232 ++++++++++++++++++++++++-------------
+ 1 file changed, 154 insertions(+), 78 deletions(-)
+
+========================================================================
+Date: Sat, 22 Dec 2007 22:14:52 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: firewire: fw-ohci: CycleTooLong interrupt management
+
+The firewire-ohci driver so far lacked the ability to resume cycle
+master duty after that condition happened, as added to ohci1394 in Linux
+2.6.18 by commit 57fdb58fa5a140bdd52cf4c4ffc30df73676f0a5. This ports
+this patch to fw-ohci.
+
+The "cycle too long" condition has been seen in practice
+ - with IIDC cameras if a mode with packets too large for a speed is
+ chosen,
+ - sporadically when capturing DV on a VIA VT6306 card with ohci1394/
+ ieee1394/ raw1394/ dvgrab 2.
+ https://bugzilla.redhat.com/show_bug.cgi?id=415841#c7
+(This does not fix Fedora bug 415841.)
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-ohci.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+========================================================================
+Date: Fri, 21 Dec 2007 23:02:15 +0530
+From: Rabin Vincent <rabin at rab.in>
+Subject: firewire: Fix extraction of source node id
+
+Fix extraction of the source node id from the packet header.
+
+Signed-off-by: Rabin Vincent <rabin at rab.in>
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-transaction.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+========================================================================
+Date: Wed, 19 Dec 2007 15:26:38 -0500
+From: David Moore <dcm at MIT.EDU>
+Subject: firewire: fw-ohci: Bug fixes for packet-per-buffer support
+
+This patch corrects a number of bugs in the current OHCI 1.0
+packet-per-buffer support:
+
+1. Correctly deal with payloads that cross a page boundary. The
+previous version would not split the descriptor at such a boundary,
+potentially corrupting unrelated memory.
+
+2. Allow user-space to specify multiple packets per struct
+fw_cdev_iso_packet in the same way that dual-buffer allows. This is
+signaled by header_length being a multiple of header_size. This
+multiple determines the number of packets. The payload size allocated
+per packet is determined by dividing the total payload size by the
+number of packets.
+
+3. Make sync support work properly for packet-per-buffer.
+
+I have tested this patch with libdc1394 by forcing my OHCI 1.1
+controller to use the packet-per-buffer support instead of dual-buffer.
+
+I would greatly appreciate testing by those who have a DV devices and
+other types of iso streamers to make sure I didn't cause any
+regressions.
+
+Stefan, with this patch, I'm hoping that libdc1394 will work with all
+your OHCI 1.0 controllers now.
+
+The one bit of future work that remains for packet-per-buffer support is
+the automatic compaction of short payloads that I discussed with
+Kristian.
+
+Signed-off-by: David Moore <dcm at acm.org>
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-ohci.c | 99 ++++++++++++++++++-------------------
+ 1 file changed, 49 insertions(+), 50 deletions(-)
+
+========================================================================
+Date: Wed, 19 Dec 2007 03:09:18 -0500
+From: David Moore <dcm at MIT.EDU>
+Subject: firewire: fw-ohci: Fix for dualbuffer three-or-more buffers
+
+This patch fixes the problem where different OHCI 1.1 controllers behave
+differently when a received iso packet straddles three or more buffers
+when using the dual-buffer receive mode. Two changes are made in order
+to handle this situation:
+
+1. The packet sync DMA descriptor is given a non-zero header length and
+non-zero payload length. This is because zero-payload descriptors are
+not discussed in the OHCI 1.1 specs and their behavior is thus
+undefined. Instead we use a header size just large enough for a single
+header and a payload length of 4 bytes for this first descriptor.
+
+2. As we process received packets in the context's tasklet, read the
+packet length out of the headers. Keep track of the running total of
+the packet length as "excess_bytes", so we can ignore any descriptors
+where no packet starts or ends. These descriptors may not have had
+their first_res_count or second_res_count fields updated by the
+controller so we cannot rely on those values.
+
+The main drawback of this patch is that the excess_bytes value might get
+"out of sync" with the packet descriptors if something strange happens
+to the DMA program. I'm not if such a thing could ever happen, but I
+appreciate any suggestions in making it more robust.
+
+Also, the packet-per-buffer support may need a similar fix to deal with
+issue 1, but I haven't done any work on that yet.
+
+Stefan, I'm hoping that with this patch, all your OHCI 1.1 controllers
+will work properly with an unmodified version of libdc1394.
+
+Signed-off-by: David Moore <dcm at acm.org>
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-ohci.c | 44 ++++++++++++++++++++-----------------
+ 1 file changed, 24 insertions(+), 20 deletions(-)
+
+========================================================================
+Date: Sun, 16 Dec 2007 20:53:13 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: ieee1394: ohci1394: don't schedule IT tasklets on IR events
+
+Bug noted by Pieter Palmers: Isochronous transmit tasklets were
+scheduled on isochronous receive events, in addition to the proper
+isochronous receive tasklets.
+
+http://marc.info/?l=linux1394-devel&m=119783196222802
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/ohci1394.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+========================================================================
+Date: Sun, 16 Dec 2007 17:32:11 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: firewire: fw-sbp2: remove unused misleading macro
+
+SBP2_MAX_SECTORS is nowhere used in fw-sbp2.
+It merely got copied over from sbp2 where it played a role in the past.
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-sbp2.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+========================================================================
+Date: Sun, 16 Dec 2007 17:31:26 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: ieee1394: sbp2: raise default transfer size limit
+
+This patch speeds up sbp2 a little bit --- but more importantly, it
+brings the behavior of sbp2 and fw-sbp2 closer to each other. Like
+fw-sbp2, sbp2 now does not limit the size of single transfers to 255
+sectors anymore, unless told so by a blacklist flag or by module load
+parameters.
+
+Only very old bridge chips have been known to need the 255 sectors
+limit, and we have got one such chip in our hardwired blacklist. There
+certainly is a danger that more bridges need that limit; but I prefer to
+have this issue present in both fw-sbp2 and sbp2 rather than just one of
+them.
+
+An OXUF922 with 400GB 7200RPM disk on an S400 controller is sped up by
+this patch from 22.9 to 23.5 MB/s according to hdparm. The same effect
+could be achieved before by setting a higher max_sectors module
+parameter. On buses which use 1394b beta mode, sbp2 and fw-sbp2 will
+now achieve virtually the same bandwidth. Fw-sbp2 only remains faster
+on 1394a buses due to fw-core's gap count optimization.
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/sbp2.c | 26 +++++++++++++++-----------
+ drivers/ieee1394/sbp2.h | 1 -
+ 2 files changed, 15 insertions(+), 12 deletions(-)
+
+========================================================================
+Date: Sat, 15 Dec 2007 14:11:41 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: ieee1394: remove unused code
+
+The code has been in "#if 0 - #endif" since Linux 2.6.12.
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/ieee1394_transactions.c | 68 -----------------------
+ 1 file changed, 68 deletions(-)
+
+========================================================================
+Date: Sat, 15 Dec 2007 14:04:42 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: ieee1394: small cleanup after "nopage"
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/dma.c | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+========================================================================
+Date: Wed, 05 Dec 2007 18:15:53 +1100
+From: Nick Piggin <npiggin at suse.de>
+Subject: ieee1394: nopage
+
+Convert ieee1394 from nopage to fault.
+Remove redundant vma range checks (correct resource range check is retained).
+
+Signed-off-by: Nick Piggin <npiggin at suse.de>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/dma.c | 39 +++++++++++++++++----------------------
+ 1 file changed, 17 insertions(+), 22 deletions(-)
+
+========================================================================
+Date: Mon, 19 Nov 2007 17:48:10 -0800
+From: Joe Perches <joe at perches.com>
+Subject: ieee1394: Add missing "space"
+
+Signed-off-by: Joe Perches <joe at perches.com>
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/raw1394.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+========================================================================
+Date: Wed, 7 Nov 2007 17:39:00 -0500
+From: Jay Fenlason <fenlason at redhat.com>
+Subject: firewire: fw-sbp2: quiet logout errors on device removal
+
+This suppresses both reply timed out and management write failed
+errors on LOGOUT requests.
+
+Signed-off-by: Jay Fenlason <fenlason at redhat.com>
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ fw-sbp2.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+========================================================================
+Date: Sun, 4 Nov 2007 14:59:24 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: ieee1394: sbp2: s/g list access cosmetics
+
+Replace sg->length by sg_dma_len(sg). Rename a variable for shorter
+line lengths and eliminate some superfluous local variables.
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/sbp2.c | 26 ++++++++++++--------------
+ 1 file changed, 12 insertions(+), 14 deletions(-)
+
+========================================================================
+Date: Sun, 4 Nov 2007 14:58:43 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: ieee1394: sbp2: enable s/g chaining
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/ieee1394/sbp2.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+========================================================================
+Date: Sun, 4 Nov 2007 14:58:11 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: firewire: fw-sbp2: enable s/g chaining
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-sbp2.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+========================================================================
+Date: Wed, 7 Nov 2007 01:12:51 +0100 (CET)
+From: Stefan Richter <stefanr at s5r6.in-berlin.de>
+Subject: firewire: fw-sbp2: refactor workq and kref handling
+
+This somewhat reduces the size of firewire-sbp2.ko.
+
+Signed-off-by: Stefan Richter <stefanr at s5r6.in-berlin.de>
+---
+ drivers/firewire/fw-sbp2.c | 56 +++++++++++++++++++------------------
+ 1 file changed, 30 insertions(+), 26 deletions(-)
+
+========================================================================
+Index: linux/drivers/firewire/fw-sbp2.c
+===================================================================
+--- linux.orig/drivers/firewire/fw-sbp2.c
++++ linux/drivers/firewire/fw-sbp2.c
+@@ -151,9 +151,7 @@ struct sbp2_target {
+ };
+
+ #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
+-#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
+ #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
+-
+ #define SBP2_ORB_NULL 0x80000000
+
+ #define SBP2_DIRECTION_TO_MEDIA 0x0
+@@ -540,14 +538,26 @@ sbp2_send_management_orb(struct sbp2_log
+
+ retval = -EIO;
+ if (sbp2_cancel_orbs(lu) == 0) {
+- fw_error("orb reply timed out, rcode=0x%02x\n",
+- orb->base.rcode);
++ /*
++ * Logout requests frequently get sent to devices that aren't
++ * there any more, resulting in extraneous error messages in
++ * the logs. Unfortunately, this means logout requests that
++ * actually fail don't get logged.
++ */
++ if (function != SBP2_LOGOUT_REQUEST)
++ fw_error("orb reply timed out, rcode=0x%02x\n",
++ orb->base.rcode);
+ goto out;
+ }
+
+ if (orb->base.rcode != RCODE_COMPLETE) {
+- fw_error("management write failed, rcode 0x%02x\n",
+- orb->base.rcode);
++ /*
++ * On device removal from the bus, sometimes the logout
++ * request times out, sometimes it just fails.
++ */
++ if (function != SBP2_LOGOUT_REQUEST)
++ fw_error("management write failed, rcode 0x%02x\n",
++ orb->base.rcode);
+ goto out;
+ }
+
+@@ -628,6 +638,21 @@ static void sbp2_release_target(struct k
+
+ static struct workqueue_struct *sbp2_wq;
+
++/*
++ * Always get the target's kref when scheduling work on one its units.
++ * Each workqueue job is responsible to call sbp2_target_put() upon return.
++ */
++static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
++{
++ if (queue_delayed_work(sbp2_wq, &lu->work, delay))
++ kref_get(&lu->tgt->kref);
++}
++
++static void sbp2_target_put(struct sbp2_target *tgt)
++{
++ kref_put(&tgt->kref, sbp2_release_target);
++}
++
+ static void sbp2_reconnect(struct work_struct *work);
+
+ static void sbp2_login(struct work_struct *work)
+@@ -649,16 +674,12 @@ static void sbp2_login(struct work_struc
+
+ if (sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
+- if (lu->retries++ < 5) {
+- if (queue_delayed_work(sbp2_wq, &lu->work,
+- DIV_ROUND_UP(HZ, 5)))
+- kref_get(&lu->tgt->kref);
+- } else {
++ if (lu->retries++ < 5)
++ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
++ else
+ fw_error("failed to login to %s LUN %04x\n",
+ unit->device.bus_id, lu->lun);
+- }
+- kref_put(&lu->tgt->kref, sbp2_release_target);
+- return;
++ goto out;
+ }
+
+ lu->generation = generation;
+@@ -700,7 +721,8 @@ static void sbp2_login(struct work_struc
+ lu->sdev = sdev;
+ scsi_device_put(sdev);
+ }
+- kref_put(&lu->tgt->kref, sbp2_release_target);
++ out:
++ sbp2_target_put(lu->tgt);
+ }
+
+ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+@@ -865,18 +887,13 @@ static int sbp2_probe(struct device *dev
+
+ get_device(&unit->device);
+
+- /*
+- * We schedule work to do the login so we can easily
+- * reschedule retries. Always get the ref before scheduling
+- * work.
+- */
++ /* Do the login in a workqueue so we can easily reschedule retries. */
+ list_for_each_entry(lu, &tgt->lu_list, link)
+- if (queue_delayed_work(sbp2_wq, &lu->work, 0))
+- kref_get(&tgt->kref);
++ sbp2_queue_work(lu, 0);
+ return 0;
+
+ fail_tgt_put:
+- kref_put(&tgt->kref, sbp2_release_target);
++ sbp2_target_put(tgt);
+ return -ENOMEM;
+
+ fail_shost_put:
+@@ -889,7 +906,7 @@ static int sbp2_remove(struct device *de
+ struct fw_unit *unit = fw_unit(dev);
+ struct sbp2_target *tgt = unit->device.driver_data;
+
+- kref_put(&tgt->kref, sbp2_release_target);
++ sbp2_target_put(tgt);
+ return 0;
+ }
+
+@@ -915,10 +932,8 @@ static void sbp2_reconnect(struct work_s
+ lu->retries = 0;
+ PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ }
+- if (queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5)))
+- kref_get(&lu->tgt->kref);
+- kref_put(&lu->tgt->kref, sbp2_release_target);
+- return;
++ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
++ goto out;
+ }
+
+ lu->generation = generation;
+@@ -930,8 +945,8 @@ static void sbp2_reconnect(struct work_s
+
+ sbp2_agent_reset(lu);
+ sbp2_cancel_orbs(lu);
+-
+- kref_put(&lu->tgt->kref, sbp2_release_target);
++ out:
++ sbp2_target_put(lu->tgt);
+ }
+
+ static void sbp2_update(struct fw_unit *unit)
+@@ -947,8 +962,7 @@ static void sbp2_update(struct fw_unit *
+ */
+ list_for_each_entry(lu, &tgt->lu_list, link) {
+ lu->retries = 0;
+- if (queue_delayed_work(sbp2_wq, &lu->work, 0))
+- kref_get(&tgt->kref);
++ sbp2_queue_work(lu, 0);
+ }
+ }
+
+@@ -1103,9 +1117,9 @@ sbp2_map_scatterlist(struct sbp2_command
+ * elements larger than 65535 bytes, some IOMMUs may merge sg elements
+ * during DMA mapping, and Linux currently doesn't prevent this.
+ */
+- for (i = 0, j = 0; i < count; i++) {
+- sg_len = sg_dma_len(sg + i);
+- sg_addr = sg_dma_address(sg + i);
++ for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
++ sg_len = sg_dma_len(sg);
++ sg_addr = sg_dma_address(sg);
+ while (sg_len) {
+ /* FIXME: This won't get us out of the pinch. */
+ if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
+@@ -1325,6 +1339,7 @@ static struct scsi_host_template scsi_dr
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .use_clustering = ENABLE_CLUSTERING,
++ .use_sg_chaining = ENABLE_SG_CHAINING,
+ .cmd_per_lun = 1,
+ .can_queue = 1,
+ .sdev_attrs = sbp2_scsi_sysfs_attrs,
+Index: linux/drivers/ieee1394/sbp2.c
+===================================================================
+--- linux.orig/drivers/ieee1394/sbp2.c
++++ linux/drivers/ieee1394/sbp2.c
+@@ -51,6 +51,7 @@
+ * Grep for inline FIXME comments below.
+ */
+
++#include <linux/blkdev.h>
+ #include <linux/compiler.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+@@ -127,17 +128,21 @@ MODULE_PARM_DESC(serialize_io, "Serializ
+ "(default = Y, faster but buggy = N)");
+
+ /*
+- * Bump up max_sectors if you'd like to support very large sized
+- * transfers. Please note that some older sbp2 bridge chips are broken for
+- * transfers greater or equal to 128KB. Default is a value of 255
+- * sectors, or just under 128KB (at 512 byte sector size). I can note that
+- * the Oxsemi sbp2 chipsets have no problems supporting very large
+- * transfer sizes.
++ * Adjust max_sectors if you'd like to influence how many sectors each SCSI
++ * command can transfer at most. Please note that some older SBP-2 bridge
++ * chips are broken for transfers greater or equal to 128KB, therefore
++ * max_sectors used to be a safe 255 sectors for many years. We now have a
++ * default of 0 here which means that we let the SCSI stack choose a limit.
++ *
++ * The SBP2_WORKAROUND_128K_MAX_TRANS flag, if set either in the workarounds
++ * module parameter or in the sbp2_workarounds_table[], will override the
++ * value of max_sectors. We should use sbp2_workarounds_table[] to cover any
++ * bridge chip which becomes known to need the 255 sectors limit.
+ */
+-static int sbp2_max_sectors = SBP2_MAX_SECTORS;
++static int sbp2_max_sectors;
+ module_param_named(max_sectors, sbp2_max_sectors, int, 0444);
+ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
+- "(default = " __stringify(SBP2_MAX_SECTORS) ")");
++ "(default = 0 = use SCSI stack's default)");
+
+ /*
+ * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
+@@ -326,6 +331,7 @@ static struct scsi_host_template sbp2_sh
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .use_clustering = ENABLE_CLUSTERING,
++ .use_sg_chaining = ENABLE_SG_CHAINING,
+ .cmd_per_lun = SBP2_MAX_CMDS,
+ .can_queue = SBP2_MAX_CMDS,
+ .sdev_attrs = sbp2_sysfs_sdev_attrs,
+@@ -1451,7 +1457,7 @@ static void sbp2_prep_command_orb_sg(str
+ struct sbp2_fwhost_info *hi,
+ struct sbp2_command_info *cmd,
+ unsigned int scsi_use_sg,
+- struct scatterlist *sgpnt,
++ struct scatterlist *sg,
+ u32 orb_direction,
+ enum dma_data_direction dma_dir)
+ {
+@@ -1461,12 +1467,12 @@ static void sbp2_prep_command_orb_sg(str
+
+ /* special case if only one element (and less than 64KB in size) */
+ if ((scsi_use_sg == 1) &&
+- (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
++ (sg_dma_len(sg) <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
+
+- cmd->dma_size = sgpnt[0].length;
++ cmd->dma_size = sg_dma_len(sg);
+ cmd->dma_type = CMD_DMA_PAGE;
+ cmd->cmd_dma = dma_map_page(hi->host->device.parent,
+- sg_page(&sgpnt[0]), sgpnt[0].offset,
++ sg_page(sg), sg->offset,
+ cmd->dma_size, cmd->dma_dir);
+
+ orb->data_descriptor_lo = cmd->cmd_dma;
+@@ -1477,11 +1483,11 @@ static void sbp2_prep_command_orb_sg(str
+ &cmd->scatter_gather_element[0];
+ u32 sg_count, sg_len;
+ dma_addr_t sg_addr;
+- int i, count = dma_map_sg(hi->host->device.parent, sgpnt,
++ int i, count = dma_map_sg(hi->host->device.parent, sg,
+ scsi_use_sg, dma_dir);
+
+ cmd->dma_size = scsi_use_sg;
+- cmd->sge_buffer = sgpnt;
++ cmd->sge_buffer = sg;
+
+ /* use page tables (s/g) */
+ orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+@@ -1489,9 +1495,9 @@ static void sbp2_prep_command_orb_sg(str
+
+ /* loop through and fill out our SBP-2 page tables
+ * (and split up anything too large) */
+- for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
+- sg_len = sg_dma_len(sgpnt);
+- sg_addr = sg_dma_address(sgpnt);
++ for (i = 0, sg_count = 0; i < count; i++, sg = sg_next(sg)) {
++ sg_len = sg_dma_len(sg);
++ sg_addr = sg_dma_address(sg);
+ while (sg_len) {
+ sg_element[sg_count].segment_base_lo = sg_addr;
+ if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+@@ -1521,11 +1527,10 @@ static void sbp2_create_command_orb(stru
+ unchar *scsi_cmd,
+ unsigned int scsi_use_sg,
+ unsigned int scsi_request_bufflen,
+- void *scsi_request_buffer,
++ struct scatterlist *sg,
+ enum dma_data_direction dma_dir)
+ {
+ struct sbp2_fwhost_info *hi = lu->hi;
+- struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
+ struct sbp2_command_orb *orb = &cmd->command_orb;
+ u32 orb_direction;
+
+@@ -1560,7 +1565,7 @@ static void sbp2_create_command_orb(stru
+ orb->data_descriptor_lo = 0x0;
+ orb->misc |= ORB_SET_DIRECTION(1);
+ } else
+- sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
++ sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sg,
+ orb_direction, dma_dir);
+
+ sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
+@@ -1650,7 +1655,6 @@ static int sbp2_send_command(struct sbp2
+ void (*done)(struct scsi_cmnd *))
+ {
+ unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
+- unsigned int request_bufflen = scsi_bufflen(SCpnt);
+ struct sbp2_command_info *cmd;
+
+ cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
+@@ -1658,7 +1662,7 @@ static int sbp2_send_command(struct sbp2
+ return -EIO;
+
+ sbp2_create_command_orb(lu, cmd, scsi_cmd, scsi_sg_count(SCpnt),
+- request_bufflen, scsi_sglist(SCpnt),
++ scsi_bufflen(SCpnt), scsi_sglist(SCpnt),
+ SCpnt->sc_data_direction);
+ sbp2_link_orb_command(lu, cmd);
+
+@@ -1981,6 +1985,8 @@ static int sbp2scsi_slave_configure(stru
+ sdev->skip_ms_page_8 = 1;
+ if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+ sdev->fix_capacity = 1;
++ if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
++ blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+ return 0;
+ }
+
+@@ -2087,9 +2093,6 @@ static int sbp2_module_init(void)
+ sbp2_shost_template.cmd_per_lun = 1;
+ }
+
+- if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+- (sbp2_max_sectors * 512) > (128 * 1024))
+- sbp2_max_sectors = 128 * 1024 / 512;
+ sbp2_shost_template.max_sectors = sbp2_max_sectors;
+
+ hpsb_register_highlevel(&sbp2_highlevel);
+Index: linux/drivers/ieee1394/raw1394.c
+===================================================================
+--- linux.orig/drivers/ieee1394/raw1394.c
++++ linux/drivers/ieee1394/raw1394.c
+@@ -858,7 +858,7 @@ static int arm_read(struct hpsb_host *ho
+ int found = 0, size = 0, rcode = -1;
+ struct arm_request_response *arm_req_resp = NULL;
+
+- DBGMSG("arm_read called by node: %X"
++ DBGMSG("arm_read called by node: %X "
+ "addr: %4.4x %8.8x length: %Zu", nodeid,
+ (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
+ length);
+@@ -1012,7 +1012,7 @@ static int arm_write(struct hpsb_host *h
+ int found = 0, size = 0, rcode = -1, length_conflict = 0;
+ struct arm_request_response *arm_req_resp = NULL;
+
+- DBGMSG("arm_write called by node: %X"
++ DBGMSG("arm_write called by node: %X "
+ "addr: %4.4x %8.8x length: %Zu", nodeid,
+ (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
+ length);
+Index: linux/drivers/ieee1394/dma.c
+===================================================================
+--- linux.orig/drivers/ieee1394/dma.c
++++ linux/drivers/ieee1394/dma.c
+@@ -231,37 +231,24 @@ void dma_region_sync_for_device(struct d
+
+ #ifdef CONFIG_MMU
+
+-/* nopage() handler for mmap access */
+-
+-static struct page *dma_region_pagefault(struct vm_area_struct *area,
+- unsigned long address, int *type)
++static int dma_region_pagefault(struct vm_area_struct *vma,
++ struct vm_fault *vmf)
+ {
+- unsigned long offset;
+- unsigned long kernel_virt_addr;
+- struct page *ret = NOPAGE_SIGBUS;
+-
+- struct dma_region *dma = (struct dma_region *)area->vm_private_data;
++ struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
+
+ if (!dma->kvirt)
+- goto out;
++ return VM_FAULT_SIGBUS;
+
+- if ((address < (unsigned long)area->vm_start) ||
+- (address >
+- (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
+- goto out;
+-
+- if (type)
+- *type = VM_FAULT_MINOR;
+- offset = address - area->vm_start;
+- kernel_virt_addr = (unsigned long)dma->kvirt + offset;
+- ret = vmalloc_to_page((void *)kernel_virt_addr);
+- get_page(ret);
+- out:
+- return ret;
++ if (vmf->pgoff >= dma->n_pages)
++ return VM_FAULT_SIGBUS;
++
++ vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
++ get_page(vmf->page);
++ return 0;
+ }
+
+ static struct vm_operations_struct dma_region_vm_ops = {
+- .nopage = dma_region_pagefault,
++ .fault = dma_region_pagefault,
+ };
+
+ /**
+@@ -275,7 +262,7 @@ int dma_region_mmap(struct dma_region *d
+ if (!dma->kvirt)
+ return -EINVAL;
+
+- /* must be page-aligned */
++ /* must be page-aligned (XXX: comment is wrong, we could allow pgoff) */
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+Index: linux/drivers/ieee1394/ieee1394_transactions.c
+===================================================================
+--- linux.orig/drivers/ieee1394/ieee1394_transactions.c
++++ linux/drivers/ieee1394/ieee1394_transactions.c
+@@ -570,71 +570,3 @@ int hpsb_write(struct hpsb_host *host, n
+
+ return retval;
+ }
+-
+-#if 0
+-
+-int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+- u64 addr, int extcode, quadlet_t * data, quadlet_t arg)
+-{
+- struct hpsb_packet *packet;
+- int retval = 0;
+-
+- BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+-
+- packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
+- if (!packet)
+- return -ENOMEM;
+-
+- packet->generation = generation;
+- retval = hpsb_send_packet_and_wait(packet);
+- if (retval < 0)
+- goto hpsb_lock_fail;
+-
+- retval = hpsb_packet_success(packet);
+-
+- if (retval == 0) {
+- *data = packet->data[0];
+- }
+-
+- hpsb_lock_fail:
+- hpsb_free_tlabel(packet);
+- hpsb_free_packet(packet);
+-
+- return retval;
+-}
+-
+-int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
+- quadlet_t * buffer, size_t length, u32 specifier_id,
+- unsigned int version)
+-{
+- struct hpsb_packet *packet;
+- int retval = 0;
+- u16 specifier_id_hi = (specifier_id & 0x00ffff00) >> 8;
+- u8 specifier_id_lo = specifier_id & 0xff;
+-
+- HPSB_VERBOSE("Send GASP: channel = %d, length = %Zd", channel, length);
+-
+- length += 8;
+-
+- packet = hpsb_make_streampacket(host, NULL, length, channel, 3, 0);
+- if (!packet)
+- return -ENOMEM;
+-
+- packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
+- packet->data[1] =
+- cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
+-
+- memcpy(&(packet->data[2]), buffer, length - 8);
+-
+- packet->generation = generation;
+-
+- packet->no_waiter = 1;
+-
+- retval = hpsb_send_packet(packet);
+- if (retval < 0)
+- hpsb_free_packet(packet);
+-
+- return retval;
+-}
+-
+-#endif /* 0 */
+Index: linux/drivers/ieee1394/sbp2.h
+===================================================================
+--- linux.orig/drivers/ieee1394/sbp2.h
++++ linux/drivers/ieee1394/sbp2.h
+@@ -222,7 +222,6 @@ struct sbp2_status_block {
+ */
+
+ #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
+-#define SBP2_MAX_SECTORS 255
+ /* There is no real limitation of the queue depth (i.e. length of the linked
+ * list of command ORBs) at the target. The chosen depth is merely an
+ * implementation detail of the sbp2 driver. */
+Index: linux/drivers/ieee1394/ohci1394.c
+===================================================================
+--- linux.orig/drivers/ieee1394/ohci1394.c
++++ linux/drivers/ieee1394/ohci1394.c
+@@ -2126,10 +2126,14 @@ static void ohci_schedule_iso_tasklets(s
+ list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
+ mask = 1 << t->context;
+
+- if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
+- tasklet_schedule(&t->tasklet);
+- else if (rx_event & mask)
+- tasklet_schedule(&t->tasklet);
++ if (t->type == OHCI_ISO_TRANSMIT) {
++ if (tx_event & mask)
++ tasklet_schedule(&t->tasklet);
++ } else {
++ /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
++ if (rx_event & mask)
++ tasklet_schedule(&t->tasklet);
++ }
+ }
+
+ spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
+Index: linux/drivers/firewire/fw-ohci.c
+===================================================================
+--- linux.orig/drivers/firewire/fw-ohci.c
++++ linux/drivers/firewire/fw-ohci.c
+@@ -98,17 +98,48 @@ struct context;
+ typedef int (*descriptor_callback_t)(struct context *ctx,
+ struct descriptor *d,
+ struct descriptor *last);
++
++/*
++ * A buffer that contains a block of DMA-able coherent memory used for
++ * storing a portion of a DMA descriptor program.
++ */
++struct descriptor_buffer {
++ struct list_head list;
++ dma_addr_t buffer_bus;
++ size_t buffer_size;
++ size_t used;
++ struct descriptor buffer[0];
++};
++
+ struct context {
+ struct fw_ohci *ohci;
+ u32 regs;
++ int total_allocation;
+
+- struct descriptor *buffer;
+- dma_addr_t buffer_bus;
+- size_t buffer_size;
+- struct descriptor *head_descriptor;
+- struct descriptor *tail_descriptor;
+- struct descriptor *tail_descriptor_last;
+- struct descriptor *prev_descriptor;
++ /*
++ * List of page-sized buffers for storing DMA descriptors.
++ * Head of list contains buffers in use and tail of list contains
++ * free buffers.
++ */
++ struct list_head buffer_list;
++
++ /*
++ * Pointer to a buffer inside buffer_list that contains the tail
++ * end of the current DMA program.
++ */
++ struct descriptor_buffer *buffer_tail;
++
++ /*
++ * The descriptor containing the branch address of the first
++ * descriptor that has not yet been filled by the device.
++ */
++ struct descriptor *last;
++
++ /*
++ * The last descriptor in the DMA program. It contains the branch
++ * address that must be updated upon appending a new descriptor.
++ */
++ struct descriptor *prev;
+
+ descriptor_callback_t callback;
+
+@@ -125,6 +156,7 @@ struct context {
struct iso_context {
struct fw_iso_context base;
struct context context;
@@ -9,7 +962,252 @@
void *header;
size_t header_length;
};
-@@ -1078,6 +1079,13 @@ static irqreturn_t irq_handler(int irq,
+@@ -197,8 +229,6 @@ static inline struct fw_ohci *fw_ohci(st
+ #define SELF_ID_BUF_SIZE 0x800
+ #define OHCI_TCODE_PHY_PACKET 0x0e
+ #define OHCI_VERSION_1_1 0x010010
+-#define ISO_BUFFER_SIZE (64 * 1024)
+-#define AT_BUFFER_SIZE 4096
+
+ static char ohci_driver_name[] = KBUILD_MODNAME;
+
+@@ -455,71 +485,108 @@ find_branch_descriptor(struct descriptor
+ static void context_tasklet(unsigned long data)
+ {
+ struct context *ctx = (struct context *) data;
+- struct fw_ohci *ohci = ctx->ohci;
+ struct descriptor *d, *last;
+ u32 address;
+ int z;
++ struct descriptor_buffer *desc;
+
+- dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
+- ctx->buffer_size, DMA_TO_DEVICE);
+-
+- d = ctx->tail_descriptor;
+- last = ctx->tail_descriptor_last;
+-
++ desc = list_entry(ctx->buffer_list.next,
++ struct descriptor_buffer, list);
++ last = ctx->last;
+ while (last->branch_address != 0) {
++ struct descriptor_buffer *old_desc = desc;
+ address = le32_to_cpu(last->branch_address);
+ z = address & 0xf;
+- d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
++ address &= ~0xf;
++
++ /* If the branch address points to a buffer outside of the
++ * current buffer, advance to the next buffer. */
++ if (address < desc->buffer_bus ||
++ address >= desc->buffer_bus + desc->used)
++ desc = list_entry(desc->list.next,
++ struct descriptor_buffer, list);
++ d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
+ last = find_branch_descriptor(d, z);
+
+ if (!ctx->callback(ctx, d, last))
+ break;
+
+- ctx->tail_descriptor = d;
+- ctx->tail_descriptor_last = last;
++ if (old_desc != desc) {
++ /* If we've advanced to the next buffer, move the
++ * previous buffer to the free list. */
++ unsigned long flags;
++ old_desc->used = 0;
++ spin_lock_irqsave(&ctx->ohci->lock, flags);
++ list_move_tail(&old_desc->list, &ctx->buffer_list);
++ spin_unlock_irqrestore(&ctx->ohci->lock, flags);
++ }
++ ctx->last = last;
+ }
+ }
+
++/*
++ * Allocate a new buffer and add it to the list of free buffers for this
++ * context. Must be called with ohci->lock held.
++ */
++static int
++context_add_buffer(struct context *ctx)
++{
++ struct descriptor_buffer *desc;
++ dma_addr_t bus_addr;
++ int offset;
++
++ /*
++ * 16MB of descriptors should be far more than enough for any DMA
++ * program. This will catch run-away userspace or DoS attacks.
++ */
++ if (ctx->total_allocation >= 16*1024*1024)
++ return -ENOMEM;
++
++ desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
++ &bus_addr, GFP_ATOMIC);
++ if (!desc)
++ return -ENOMEM;
++
++ offset = (void *)&desc->buffer - (void *)desc;
++ desc->buffer_size = PAGE_SIZE - offset;
++ desc->buffer_bus = bus_addr + offset;
++ desc->used = 0;
++
++ list_add_tail(&desc->list, &ctx->buffer_list);
++ ctx->total_allocation += PAGE_SIZE;
++
++ return 0;
++}
++
+ static int
+ context_init(struct context *ctx, struct fw_ohci *ohci,
+- size_t buffer_size, u32 regs,
+- descriptor_callback_t callback)
++ u32 regs, descriptor_callback_t callback)
+ {
+ ctx->ohci = ohci;
+ ctx->regs = regs;
+- ctx->buffer_size = buffer_size;
+- ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
+- if (ctx->buffer == NULL)
++ ctx->total_allocation = 0;
++
++ INIT_LIST_HEAD(&ctx->buffer_list);
++ if (context_add_buffer(ctx) < 0)
+ return -ENOMEM;
+
++ ctx->buffer_tail = list_entry(ctx->buffer_list.next,
++ struct descriptor_buffer, list);
++
+ tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+ ctx->callback = callback;
+
+- ctx->buffer_bus =
+- dma_map_single(ohci->card.device, ctx->buffer,
+- buffer_size, DMA_TO_DEVICE);
+- if (dma_mapping_error(ctx->buffer_bus)) {
+- kfree(ctx->buffer);
+- return -ENOMEM;
+- }
+-
+- ctx->head_descriptor = ctx->buffer;
+- ctx->prev_descriptor = ctx->buffer;
+- ctx->tail_descriptor = ctx->buffer;
+- ctx->tail_descriptor_last = ctx->buffer;
+-
+ /*
+ * We put a dummy descriptor in the buffer that has a NULL
+ * branch address and looks like it's been sent. That way we
+- * have a descriptor to append DMA programs to. Also, the
+- * ring buffer invariant is that it always has at least one
+- * element so that head == tail means buffer full.
++ * have a descriptor to append DMA programs to.
+ */
+-
+- memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
+- ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+- ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
+- ctx->head_descriptor++;
++ memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
++ ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
++ ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
++ ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
++ ctx->last = ctx->buffer_tail->buffer;
++ ctx->prev = ctx->buffer_tail->buffer;
+
+ return 0;
+ }
+@@ -528,35 +595,42 @@ static void
+ context_release(struct context *ctx)
+ {
+ struct fw_card *card = &ctx->ohci->card;
++ struct descriptor_buffer *desc, *tmp;
+
+- dma_unmap_single(card->device, ctx->buffer_bus,
+- ctx->buffer_size, DMA_TO_DEVICE);
+- kfree(ctx->buffer);
++ list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
++ dma_free_coherent(card->device, PAGE_SIZE, desc,
++ desc->buffer_bus -
++ ((void *)&desc->buffer - (void *)desc));
+ }
+
++/* Must be called with ohci->lock held */
+ static struct descriptor *
+ context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+ {
+- struct descriptor *d, *tail, *end;
++ struct descriptor *d = NULL;
++ struct descriptor_buffer *desc = ctx->buffer_tail;
+
+- d = ctx->head_descriptor;
+- tail = ctx->tail_descriptor;
+- end = ctx->buffer + ctx->buffer_size / sizeof(*d);
++ if (z * sizeof(*d) > desc->buffer_size)
++ return NULL;
+
+- if (d + z <= tail) {
+- goto has_space;
+- } else if (d > tail && d + z <= end) {
+- goto has_space;
+- } else if (d > tail && ctx->buffer + z <= tail) {
+- d = ctx->buffer;
+- goto has_space;
++ if (z * sizeof(*d) > desc->buffer_size - desc->used) {
++ /* No room for the descriptor in this buffer, so advance to the
++ * next one. */
++
++ if (desc->list.next == &ctx->buffer_list) {
++ /* If there is no free buffer next in the list,
++ * allocate one. */
++ if (context_add_buffer(ctx) < 0)
++ return NULL;
++ }
++ desc = list_entry(desc->list.next,
++ struct descriptor_buffer, list);
++ ctx->buffer_tail = desc;
+ }
+
+- return NULL;
+-
+- has_space:
++ d = desc->buffer + desc->used / sizeof(*d);
+ memset(d, 0, z * sizeof(*d));
+- *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
++ *d_bus = desc->buffer_bus + desc->used;
+
+ return d;
+ }
+@@ -566,7 +640,7 @@ static void context_run(struct context *
+ struct fw_ohci *ohci = ctx->ohci;
+
+ reg_write(ohci, COMMAND_PTR(ctx->regs),
+- le32_to_cpu(ctx->tail_descriptor_last->branch_address));
++ le32_to_cpu(ctx->last->branch_address));
+ reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
+ reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+ flush_writes(ohci);
+@@ -576,15 +650,13 @@ static void context_append(struct contex
+ struct descriptor *d, int z, int extra)
+ {
+ dma_addr_t d_bus;
++ struct descriptor_buffer *desc = ctx->buffer_tail;
+
+- d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
+-
+- ctx->head_descriptor = d + z + extra;
+- ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
+- ctx->prev_descriptor = find_branch_descriptor(d, z);
++ d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
+
+- dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
+- ctx->buffer_size, DMA_TO_DEVICE);
++ desc->used += (z + extra) * sizeof(*d);
++ ctx->prev->branch_address = cpu_to_le32(d_bus | z);
++ ctx->prev = find_branch_descriptor(d, z);
+
+ reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ flush_writes(ctx->ohci);
+@@ -1078,6 +1150,13 @@ static irqreturn_t irq_handler(int irq,
if (unlikely(event & OHCI1394_postedWriteErr))
fw_error("PCI posted write error\n");
@@ -23,7 +1221,7 @@
if (event & OHCI1394_cycle64Seconds) {
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if ((cycle_time & 0x80000000) == 0)
-@@ -1151,8 +1159,8 @@ static int ohci_enable(struct fw_card *c
+@@ -1151,8 +1230,8 @@ static int ohci_enable(struct fw_card *c
OHCI1394_RQPkt | OHCI1394_RSPkt |
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
@@ -34,7 +1232,7 @@
/* Activate link_on bit and contender bit in our self ID packets.*/
if (ohci_update_phy_reg(card, 4, 0,
-@@ -1408,9 +1416,13 @@ static int handle_ir_dualbuffer_packet(s
+@@ -1408,9 +1487,13 @@ static int handle_ir_dualbuffer_packet(s
void *p, *end;
int i;
@@ -51,7 +1249,7 @@
header_length = le16_to_cpu(db->first_req_count) -
le16_to_cpu(db->first_res_count);
-@@ -1429,11 +1441,15 @@ static int handle_ir_dualbuffer_packet(s
+@@ -1429,11 +1512,15 @@ static int handle_ir_dualbuffer_packet(s
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
i += ctx->base.header_size;
@@ -68,7 +1266,7 @@
if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) (db + 1);
ctx->base.callback(&ctx->base,
-@@ -1452,24 +1468,24 @@ static int handle_ir_packet_per_buffer(s
+@@ -1452,24 +1539,24 @@ static int handle_ir_packet_per_buffer(s
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
@@ -104,7 +1302,7 @@
/*
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
-@@ -1478,14 +1494,11 @@ static int handle_ir_packet_per_buffer(s
+@@ -1478,14 +1565,11 @@ static int handle_ir_packet_per_buffer(s
*/
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
@@ -122,7 +1320,7 @@
ctx->base.callback(&ctx->base,
le32_to_cpu(ir_header[0]) & 0xffff,
ctx->header_length, ctx->header,
-@@ -1493,7 +1506,6 @@ static int handle_ir_packet_per_buffer(s
+@@ -1493,7 +1577,6 @@ static int handle_ir_packet_per_buffer(s
ctx->header_length = 0;
}
@@ -130,7 +1328,17 @@
return 1;
}
-@@ -1775,19 +1787,6 @@ ohci_queue_iso_receive_dualbuffer(struct
+@@ -1559,8 +1642,7 @@ ohci_allocate_iso_context(struct fw_card
+ if (ctx->header == NULL)
+ goto out;
+
+- retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
+- regs, callback);
++ retval = context_init(&ctx->context, ohci, regs, callback);
+ if (retval < 0)
+ goto out_with_header;
+
+@@ -1775,19 +1857,6 @@ ohci_queue_iso_receive_dualbuffer(struct
* packet, retransmit or terminate..
*/
@@ -150,7 +1358,7 @@
p = packet;
z = 2;
-@@ -1815,11 +1814,18 @@ ohci_queue_iso_receive_dualbuffer(struct
+@@ -1815,11 +1884,18 @@ ohci_queue_iso_receive_dualbuffer(struct
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
db->first_size = cpu_to_le16(ctx->base.header_size + 4);
@@ -171,7 +1379,7 @@
length = rest;
else
length = PAGE_SIZE - offset;
-@@ -1835,7 +1841,8 @@ ohci_queue_iso_receive_dualbuffer(struct
+@@ -1835,7 +1911,8 @@ ohci_queue_iso_receive_dualbuffer(struct
context_append(&ctx->context, d, z, header_z);
offset = (offset + length) & ~PAGE_MASK;
rest -= length;
@@ -181,7 +1389,7 @@
}
return 0;
-@@ -1849,67 +1856,70 @@ ohci_queue_iso_receive_packet_per_buffer
+@@ -1849,67 +1926,70 @@ ohci_queue_iso_receive_packet_per_buffer
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d = NULL, *pd = NULL;
@@ -284,192 +1492,49 @@
context_append(&ctx->context, d, z, header_z);
}
-diff -Naurp linux-2.6-git/drivers/firewire/fw-sbp2.c linux1394-2.6/drivers/firewire/fw-sbp2.c
---- linux-2.6-git/drivers/firewire/fw-sbp2.c 2008-01-01 22:50:33.000000000 -0500
-+++ linux1394-2.6/drivers/firewire/fw-sbp2.c 2008-01-10 14:19:14.000000000 -0500
-@@ -151,9 +151,7 @@ struct sbp2_target {
- };
-
- #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
--#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
- #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
--
- #define SBP2_ORB_NULL 0x80000000
-
- #define SBP2_DIRECTION_TO_MEDIA 0x0
-@@ -540,14 +538,26 @@ sbp2_send_management_orb(struct sbp2_log
-
- retval = -EIO;
- if (sbp2_cancel_orbs(lu) == 0) {
-- fw_error("orb reply timed out, rcode=0x%02x\n",
-- orb->base.rcode);
-+ /*
-+ * Logout requests frequently get sent to devices that aren't
-+ * there any more, resulting in extraneous error messages in
-+ * the logs. Unfortunately, this means logout requests that
-+ * actually fail don't get logged.
-+ */
-+ if (function != SBP2_LOGOUT_REQUEST)
-+ fw_error("orb reply timed out, rcode=0x%02x\n",
-+ orb->base.rcode);
- goto out;
- }
-
- if (orb->base.rcode != RCODE_COMPLETE) {
-- fw_error("management write failed, rcode 0x%02x\n",
-- orb->base.rcode);
-+ /*
-+ * On device removal from the bus, sometimes the logout
-+ * request times out, sometimes it just fails.
-+ */
-+ if (function != SBP2_LOGOUT_REQUEST)
-+ fw_error("management write failed, rcode 0x%02x\n",
-+ orb->base.rcode);
- goto out;
- }
-
-@@ -628,6 +638,21 @@ static void sbp2_release_target(struct k
-
- static struct workqueue_struct *sbp2_wq;
+@@ -1923,16 +2003,22 @@ ohci_queue_iso(struct fw_iso_context *ba
+ unsigned long payload)
+ {
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
++ unsigned long flags;
++ int retval;
-+/*
-+ * Always get the target's kref when scheduling work on one its units.
-+ * Each workqueue job is responsible to call sbp2_target_put() upon return.
-+ */
-+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
-+{
-+ if (queue_delayed_work(sbp2_wq, &lu->work, delay))
-+ kref_get(&lu->tgt->kref);
-+}
++ spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+ if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+- return ohci_queue_iso_transmit(base, packet, buffer, payload);
++ retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
+ else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
+- return ohci_queue_iso_receive_dualbuffer(base, packet,
++ retval = ohci_queue_iso_receive_dualbuffer(base, packet,
+ buffer, payload);
+ else
+- return ohci_queue_iso_receive_packet_per_buffer(base, packet,
++ retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+ buffer,
+ payload);
++ spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
+
-+static void sbp2_target_put(struct sbp2_target *tgt)
-+{
-+ kref_put(&tgt->kref, sbp2_release_target);
-+}
-+
- static void sbp2_reconnect(struct work_struct *work);
-
- static void sbp2_login(struct work_struct *work)
-@@ -649,16 +674,12 @@ static void sbp2_login(struct work_struc
-
- if (sbp2_send_management_orb(lu, node_id, generation,
- SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
-- if (lu->retries++ < 5) {
-- if (queue_delayed_work(sbp2_wq, &lu->work,
-- DIV_ROUND_UP(HZ, 5)))
-- kref_get(&lu->tgt->kref);
-- } else {
-+ if (lu->retries++ < 5)
-+ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-+ else
- fw_error("failed to login to %s LUN %04x\n",
- unit->device.bus_id, lu->lun);
-- }
-- kref_put(&lu->tgt->kref, sbp2_release_target);
-- return;
-+ goto out;
- }
-
- lu->generation = generation;
-@@ -700,7 +721,8 @@ static void sbp2_login(struct work_struc
- lu->sdev = sdev;
- scsi_device_put(sdev);
- }
-- kref_put(&lu->tgt->kref, sbp2_release_target);
-+ out:
-+ sbp2_target_put(lu->tgt);
++ return retval;
}
- static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
-@@ -865,18 +887,13 @@ static int sbp2_probe(struct device *dev
-
- get_device(&unit->device);
-
-- /*
-- * We schedule work to do the login so we can easily
-- * reschedule retries. Always get the ref before scheduling
-- * work.
-- */
-+ /* Do the login in a workqueue so we can easily reschedule retries. */
- list_for_each_entry(lu, &tgt->lu_list, link)
-- if (queue_delayed_work(sbp2_wq, &lu->work, 0))
-- kref_get(&tgt->kref);
-+ sbp2_queue_work(lu, 0);
- return 0;
-
- fail_tgt_put:
-- kref_put(&tgt->kref, sbp2_release_target);
-+ sbp2_target_put(tgt);
- return -ENOMEM;
-
- fail_shost_put:
-@@ -889,7 +906,7 @@ static int sbp2_remove(struct device *de
- struct fw_unit *unit = fw_unit(dev);
- struct sbp2_target *tgt = unit->device.driver_data;
-
-- kref_put(&tgt->kref, sbp2_release_target);
-+ sbp2_target_put(tgt);
- return 0;
- }
-
-@@ -915,10 +932,8 @@ static void sbp2_reconnect(struct work_s
- lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- }
-- if (queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5)))
-- kref_get(&lu->tgt->kref);
-- kref_put(&lu->tgt->kref, sbp2_release_target);
-- return;
-+ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-+ goto out;
- }
-
- lu->generation = generation;
-@@ -930,8 +945,8 @@ static void sbp2_reconnect(struct work_s
-
- sbp2_agent_reset(lu);
- sbp2_cancel_orbs(lu);
--
-- kref_put(&lu->tgt->kref, sbp2_release_target);
-+ out:
-+ sbp2_target_put(lu->tgt);
- }
-
- static void sbp2_update(struct fw_unit *unit)
-@@ -947,8 +962,7 @@ static void sbp2_update(struct fw_unit *
- */
- list_for_each_entry(lu, &tgt->lu_list, link) {
- lu->retries = 0;
-- if (queue_delayed_work(sbp2_wq, &lu->work, 0))
-- kref_get(&tgt->kref);
-+ sbp2_queue_work(lu, 0);
- }
- }
-
-@@ -1103,9 +1117,9 @@ sbp2_map_scatterlist(struct sbp2_command
- * elements larger than 65535 bytes, some IOMMUs may merge sg elements
- * during DMA mapping, and Linux currently doesn't prevent this.
- */
-- for (i = 0, j = 0; i < count; i++) {
-- sg_len = sg_dma_len(sg + i);
-- sg_addr = sg_dma_address(sg + i);
-+ for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
-+ sg_len = sg_dma_len(sg);
-+ sg_addr = sg_dma_address(sg);
- while (sg_len) {
- /* FIXME: This won't get us out of the pinch. */
- if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
-@@ -1325,6 +1339,7 @@ static struct scsi_host_template scsi_dr
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
-+ .use_sg_chaining = ENABLE_SG_CHAINING,
- .cmd_per_lun = 1,
- .can_queue = 1,
- .sdev_attrs = sbp2_scsi_sysfs_attrs,
-diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.c linux1394-2.6/drivers/firewire/fw-transaction.c
---- linux-2.6-git/drivers/firewire/fw-transaction.c 2008-01-01 22:50:33.000000000 -0500
-+++ linux1394-2.6/drivers/firewire/fw-transaction.c 2008-01-10 14:19:14.000000000 -0500
+ static const struct fw_card_driver ohci_driver = {
+@@ -2004,10 +2090,10 @@ pci_probe(struct pci_dev *dev, const str
+ ar_context_init(&ohci->ar_response_ctx, ohci,
+ OHCI1394_AsRspRcvContextControlSet);
+
+- context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
++ context_init(&ohci->at_request_ctx, ohci,
+ OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+
+- context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
++ context_init(&ohci->at_response_ctx, ohci,
+ OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+
+ reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
+Index: linux/drivers/firewire/fw-transaction.c
+===================================================================
+--- linux.orig/drivers/firewire/fw-transaction.c
++++ linux/drivers/firewire/fw-transaction.c
@@ -650,7 +650,7 @@ fw_core_handle_request(struct fw_card *c
HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
tcode = HEADER_GET_TCODE(p->header[0]);
More information about the fedora-extras-commits
mailing list