rpms/kernel/devel linux-2.6-firewire-dynamic-ir-buffer-alloc.patch, NONE, 1.1 linux-2.6-firewire-git-update.patch, NONE, 1.1 kernel.spec, 1.356, 1.357

Jarod Wilson (jwilson) fedora-extras-commits at redhat.com
Fri Jan 11 19:25:10 UTC 2008


Author: jwilson

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv787

Modified Files:
	kernel.spec 
Added Files:
	linux-2.6-firewire-dynamic-ir-buffer-alloc.patch 
	linux-2.6-firewire-git-update.patch 
Log Message:
* Fri Jan 11 2008 Jarod Wilson <jwilson at redhat.com>
- Add linux1394/firewire git patches
- FireWire IR dynamic buffer alloc (David Moore)


linux-2.6-firewire-dynamic-ir-buffer-alloc.patch:

--- NEW FILE linux-2.6-firewire-dynamic-ir-buffer-alloc.patch ---
This is the fourth version of my patch that replaces a fixed-length
buffer for DMA descriptors with a dynamically allocated linked-list of
buffers.

As we discovered with the last attempt, new context programs are
sometimes queued from interrupt context, making it unacceptable to call
tasklet_disable() from context_get_descriptors().

This version of the patch uses ohci->lock for all locking needs instead
of tasklet_disable/enable.  There is a new requirement that
context_get_descriptors() be called while holding ohci->lock.  It was
already held for the AT context, so adding the requirement for the iso
context did not seem particularly onerous.  In addition, this has the
side benefit of allowing iso queue to be safely called from concurrent
user-space threads, which previously was not safe.

Signed-off-by: David Moore <dcm at acm.org>
Signed-off-by: Kristian Hoegsberg <krh at redhat.com>
Signed-off-by: Jarod Wilson <jwilson at redhat.com>

---
 drivers/firewire/fw-ohci.c |  221 ++++++++++++++++++++++++++++----------------
 1 files changed, 140 insertions(+), 81 deletions(-)

diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 74d5d94..c27e8e2 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -98,17 +98,38 @@ struct context;
 typedef int (*descriptor_callback_t)(struct context *ctx,
 				     struct descriptor *d,
 				     struct descriptor *last);
+
+/* A buffer that contains a block of DMA-able coherent memory used for
+ * storing a portion of a DMA descriptor program. */
+struct descriptor_buffer {
+	struct list_head list;
+	dma_addr_t buffer_bus;
+	size_t buffer_size;
+	size_t used;
+	struct descriptor buffer[0] __attribute__((aligned(16)));
+};
+
 struct context {
 	struct fw_ohci *ohci;
 	u32 regs;
+	int total_allocation;
 
-	struct descriptor *buffer;
-	dma_addr_t buffer_bus;
-	size_t buffer_size;
-	struct descriptor *head_descriptor;
-	struct descriptor *tail_descriptor;
-	struct descriptor *tail_descriptor_last;
-	struct descriptor *prev_descriptor;
+	/* List of page-sized buffers for storing DMA descriptors.  Head of
+	 * list contains buffers in use and tail of list contains free
+	 * buffers. */
+	struct list_head buffer_list;
+
+	/* Pointer to a buffer inside buffer_list that contains the tail
+	 * end of the current DMA program. */
+	struct descriptor_buffer *buffer_tail;
+
+	/* The descriptor containing the branch address of the first
+	 * descriptor that has not yet been filled by the device. */
+	struct descriptor *last;
+
+	/* The last descriptor in the DMA program.  It contains the branch
+	 * address that must be updated upon appending a new descriptor. */
+	struct descriptor *prev;
 
 	descriptor_callback_t callback;
 
@@ -198,8 +219,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
 #define SELF_ID_BUF_SIZE		0x800
 #define OHCI_TCODE_PHY_PACKET		0x0e
 #define OHCI_VERSION_1_1		0x010010
-#define ISO_BUFFER_SIZE			(64 * 1024)
-#define AT_BUFFER_SIZE			4096
 
 static char ohci_driver_name[] = KBUILD_MODNAME;
 
@@ -456,71 +475,103 @@ find_branch_descriptor(struct descriptor *d, int z)
 static void context_tasklet(unsigned long data)
 {
 	struct context *ctx = (struct context *) data;
-	struct fw_ohci *ohci = ctx->ohci;
 	struct descriptor *d, *last;
 	u32 address;
 	int z;
+	struct descriptor_buffer *desc;
 
-	dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
-				ctx->buffer_size, DMA_TO_DEVICE);
-
-	d    = ctx->tail_descriptor;
-	last = ctx->tail_descriptor_last;
-
+	desc = list_entry(ctx->buffer_list.next,
+			struct descriptor_buffer, list);
+	last = ctx->last;
 	while (last->branch_address != 0) {
+		struct descriptor_buffer *old_desc = desc;
 		address = le32_to_cpu(last->branch_address);
 		z = address & 0xf;
-		d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
+		address &= ~0xf;
+
+		/* If the branch address points to a buffer outside of the
+		 * current buffer, advance to the next buffer. */
+		if (address < desc->buffer_bus ||
+				address >= desc->buffer_bus + desc->used)
+			desc = list_entry(desc->list.next,
+					struct descriptor_buffer, list);
+		d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
 		last = find_branch_descriptor(d, z);
 
 		if (!ctx->callback(ctx, d, last))
 			break;
 
-		ctx->tail_descriptor      = d;
-		ctx->tail_descriptor_last = last;
+		if (old_desc != desc) {
+			/* If we've advanced to the next buffer, move the
+			 * previous buffer to the free list. */
+			unsigned long flags;
+			old_desc->used = 0;
+			spin_lock_irqsave(&ctx->ohci->lock, flags);
+			list_move_tail(&old_desc->list, &ctx->buffer_list);
+			spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+		}
+		ctx->last = last;
 	}
 }
 
+/* Allocate a new buffer and add it to the list of free buffers for this
+ * context.  Must be called with ohci->lock held. */
+static int
+context_add_buffer(struct context *ctx)
+{
+	struct descriptor_buffer *desc;
+	dma_addr_t bus_addr;
+	int offset;
+
+	/* 16MB of descriptors should be far more than enough for any DMA
+	 * program.  This will catch run-away userspace or DoS attacks. */
+	if (ctx->total_allocation >= 16*1024*1024)
+		return -ENOMEM;
+
+	desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
+			&bus_addr, GFP_ATOMIC);
+	if (!desc)
+		return -ENOMEM;
+
+	offset = (void *)&desc->buffer - (void *)desc;
+	desc->buffer_size = PAGE_SIZE - offset;
+	desc->buffer_bus = bus_addr + offset;
+	desc->used = 0;
+
+	list_add_tail(&desc->list, &ctx->buffer_list);
+	ctx->total_allocation += PAGE_SIZE;
+	return 0;
+}
+
 static int
 context_init(struct context *ctx, struct fw_ohci *ohci,
-	     size_t buffer_size, u32 regs,
-	     descriptor_callback_t callback)
+	     u32 regs, descriptor_callback_t callback)
 {
 	ctx->ohci = ohci;
 	ctx->regs = regs;
-	ctx->buffer_size = buffer_size;
-	ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
-	if (ctx->buffer == NULL)
+	ctx->total_allocation = 0;
+
+	INIT_LIST_HEAD(&ctx->buffer_list);
+	if (context_add_buffer(ctx) < 0)
 		return -ENOMEM;
 
+	ctx->buffer_tail = list_entry(ctx->buffer_list.next,
+			struct descriptor_buffer, list);
+
 	tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
 	ctx->callback = callback;
 
-	ctx->buffer_bus =
-		dma_map_single(ohci->card.device, ctx->buffer,
-			       buffer_size, DMA_TO_DEVICE);
-	if (dma_mapping_error(ctx->buffer_bus)) {
-		kfree(ctx->buffer);
-		return -ENOMEM;
-	}
-
-	ctx->head_descriptor      = ctx->buffer;
-	ctx->prev_descriptor      = ctx->buffer;
-	ctx->tail_descriptor      = ctx->buffer;
-	ctx->tail_descriptor_last = ctx->buffer;
-
 	/*
 	 * We put a dummy descriptor in the buffer that has a NULL
 	 * branch address and looks like it's been sent.  That way we
-	 * have a descriptor to append DMA programs to.  Also, the
-	 * ring buffer invariant is that it always has at least one
-	 * element so that head == tail means buffer full.
+	 * have a descriptor to append DMA programs to.
 	 */
-
-	memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
-	ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
-	ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
-	ctx->head_descriptor++;
+	memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
+	ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+	ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
+	ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
+	ctx->last = ctx->buffer_tail->buffer;
+	ctx->prev = ctx->buffer_tail->buffer;
 
 	return 0;
 }
@@ -529,36 +580,42 @@ static void
 context_release(struct context *ctx)
 {
 	struct fw_card *card = &ctx->ohci->card;
+	struct descriptor_buffer *desc, *tmp;
 
-	dma_unmap_single(card->device, ctx->buffer_bus,
-			 ctx->buffer_size, DMA_TO_DEVICE);
-	kfree(ctx->buffer);
+	list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
+		dma_free_coherent(card->device, PAGE_SIZE, desc,
+			desc->buffer_bus -
+			((void *)&desc->buffer - (void *)desc));
 }
 
+/* Must be called with ohci->lock held */
 static struct descriptor *
 context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
 {
-	struct descriptor *d, *tail, *end;
-
-	d = ctx->head_descriptor;
-	tail = ctx->tail_descriptor;
-	end = ctx->buffer + ctx->buffer_size / sizeof(*d);
-
-	if (d + z <= tail) {
-		goto has_space;
-	} else if (d > tail && d + z <= end) {
-		goto has_space;
-	} else if (d > tail && ctx->buffer + z <= tail) {
-		d = ctx->buffer;
-		goto has_space;
-	}
+	struct descriptor *d = NULL;
+	struct descriptor_buffer *desc = ctx->buffer_tail;
 
-	return NULL;
+	if (z * sizeof(*d) > desc->buffer_size)
+		return NULL;
 
- has_space:
-	memset(d, 0, z * sizeof(*d));
-	*d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
+	if (z * sizeof(*d) > desc->buffer_size - desc->used) {
+		/* No room for the descriptor in this buffer, so advance to the
+		 * next one. */
+
+		if (desc->list.next == &ctx->buffer_list) {
+			/* If there is no free buffer next in the list,
+			 * allocate one. */
+			if (context_add_buffer(ctx) < 0)
+				return NULL;
+		}
+		desc = list_entry(desc->list.next,
+				struct descriptor_buffer, list);
+		ctx->buffer_tail = desc;
+	}
 
+	d = desc->buffer + desc->used / sizeof(*d);
+	memset(d, 0, z * sizeof(*d));
+	*d_bus = desc->buffer_bus + desc->used;
 	return d;
 }
 
@@ -567,7 +624,7 @@ static void context_run(struct context *ctx, u32 extra)
 	struct fw_ohci *ohci = ctx->ohci;
 
 	reg_write(ohci, COMMAND_PTR(ctx->regs),
-		  le32_to_cpu(ctx->tail_descriptor_last->branch_address));
+		  le32_to_cpu(ctx->last->branch_address));
 	reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
 	reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
 	flush_writes(ohci);
@@ -577,15 +634,13 @@ static void context_append(struct context *ctx,
 			   struct descriptor *d, int z, int extra)
 {
 	dma_addr_t d_bus;
+	struct descriptor_buffer *desc = ctx->buffer_tail;
 
-	d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
-
-	ctx->head_descriptor = d + z + extra;
-	ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
-	ctx->prev_descriptor = find_branch_descriptor(d, z);
+	d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
 
-	dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
-				   ctx->buffer_size, DMA_TO_DEVICE);
+	desc->used += (z + extra) * sizeof(*d);
+	ctx->prev->branch_address = cpu_to_le32(d_bus | z);
+	ctx->prev = find_branch_descriptor(d, z);
 
 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
 	flush_writes(ctx->ohci);
@@ -1571,8 +1626,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
 	if (ctx->header == NULL)
 		goto out;
 
-	retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
-			      regs, callback);
+	retval = context_init(&ctx->context, ohci, regs, callback);
 	if (retval < 0)
 		goto out_with_header;
 
@@ -1933,16 +1987,21 @@ ohci_queue_iso(struct fw_iso_context *base,
 	       unsigned long payload)
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
+	unsigned long flags;
+	int retval;
 
+	spin_lock_irqsave(&ctx->context.ohci->lock, flags);
 	if (base->type == FW_ISO_CONTEXT_TRANSMIT)
-		return ohci_queue_iso_transmit(base, packet, buffer, payload);
+		retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
 	else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
-		return ohci_queue_iso_receive_dualbuffer(base, packet,
+		retval = ohci_queue_iso_receive_dualbuffer(base, packet,
 							 buffer, payload);
 	else
-		return ohci_queue_iso_receive_packet_per_buffer(base, packet,
+		retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
 								buffer,
 								payload);
+	spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
+	return retval;
 }
 
 static const struct fw_card_driver ohci_driver = {
@@ -2014,10 +2073,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 	ar_context_init(&ohci->ar_response_ctx, ohci,
 			OHCI1394_AsRspRcvContextControlSet);
 
-	context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
+	context_init(&ohci->at_request_ctx, ohci,
 		     OHCI1394_AsReqTrContextControlSet, handle_at_packet);
 
-	context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
+	context_init(&ohci->at_response_ctx, ohci,
 		     OHCI1394_AsRspTrContextControlSet, handle_at_packet);
 
 	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
-- 
1.5.3.3




-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
mailing list linux1394-devel at lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux1394-devel

linux-2.6-firewire-git-update.patch:

--- NEW FILE linux-2.6-firewire-git-update.patch ---
diff -Naurp linux-2.6-git/drivers/firewire/fw-ohci.c linux1394-2.6/drivers/firewire/fw-ohci.c
--- linux-2.6-git/drivers/firewire/fw-ohci.c	2008-01-01 22:50:33.000000000 -0500
+++ linux1394-2.6/drivers/firewire/fw-ohci.c	2008-01-10 14:19:14.000000000 -0500
@@ -125,6 +125,7 @@ struct context {
 struct iso_context {
 	struct fw_iso_context base;
 	struct context context;
+	int excess_bytes;
 	void *header;
 	size_t header_length;
 };
@@ -1078,6 +1079,13 @@ static irqreturn_t irq_handler(int irq, 
 	if (unlikely(event & OHCI1394_postedWriteErr))
 		fw_error("PCI posted write error\n");
 
+	if (unlikely(event & OHCI1394_cycleTooLong)) {
+		if (printk_ratelimit())
+			fw_notify("isochronous cycle too long\n");
+		reg_write(ohci, OHCI1394_LinkControlSet,
+			  OHCI1394_LinkControl_cycleMaster);
+	}
+
 	if (event & OHCI1394_cycle64Seconds) {
 		cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
 		if ((cycle_time & 0x80000000) == 0)
@@ -1151,8 +1159,8 @@ static int ohci_enable(struct fw_card *c
 		  OHCI1394_RQPkt | OHCI1394_RSPkt |
 		  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
 		  OHCI1394_isochRx | OHCI1394_isochTx |
-		  OHCI1394_postedWriteErr | OHCI1394_cycle64Seconds |
-		  OHCI1394_masterIntEnable);
+		  OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
+		  OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
 
 	/* Activate link_on bit and contender bit in our self ID packets.*/
 	if (ohci_update_phy_reg(card, 4, 0,
@@ -1408,9 +1416,13 @@ static int handle_ir_dualbuffer_packet(s
 	void *p, *end;
 	int i;
 
-	if (db->first_res_count > 0 && db->second_res_count > 0)
-		/* This descriptor isn't done yet, stop iteration. */
-		return 0;
+	if (db->first_res_count > 0 && db->second_res_count > 0) {
+		if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
+			/* This descriptor isn't done yet, stop iteration. */
+			return 0;
+		}
+		ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
+	}
 
 	header_length = le16_to_cpu(db->first_req_count) -
 		le16_to_cpu(db->first_res_count);
@@ -1429,11 +1441,15 @@ static int handle_ir_dualbuffer_packet(s
 		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
 		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
 		i += ctx->base.header_size;
+		ctx->excess_bytes +=
+			(le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff;
 		p += ctx->base.header_size + 4;
 	}
-
 	ctx->header_length = i;
 
+	ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
+		le16_to_cpu(db->second_res_count);
+
 	if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
 		ir_header = (__le32 *) (db + 1);
 		ctx->base.callback(&ctx->base,
@@ -1452,24 +1468,24 @@ static int handle_ir_packet_per_buffer(s
 {
 	struct iso_context *ctx =
 		container_of(context, struct iso_context, context);
-	struct descriptor *pd = d + 1;
+	struct descriptor *pd;
 	__le32 *ir_header;
-	size_t header_length;
-	void *p, *end;
-	int i, z;
+	void *p;
+	int i;
 
-	if (pd->res_count == pd->req_count)
+	for (pd = d; pd <= last; pd++) {
+		if (pd->transfer_status)
+			break;
+	}
+	if (pd > last)
 		/* Descriptor(s) not done yet, stop iteration */
 		return 0;
 
-	header_length = le16_to_cpu(d->req_count);
-
 	i   = ctx->header_length;
-	z   = le32_to_cpu(pd->branch_address) & 0xf;
-	p   = d + z;
-	end = p + header_length;
+	p   = last + 1;
 
-	while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
+	if (ctx->base.header_size > 0 &&
+			i + ctx->base.header_size <= PAGE_SIZE) {
 		/*
 		 * The iso header is byteswapped to little endian by
 		 * the controller, but the remaining header quadlets
@@ -1478,14 +1494,11 @@ static int handle_ir_packet_per_buffer(s
 		 */
 		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
 		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
-		i += ctx->base.header_size;
-		p += ctx->base.header_size + 4;
+		ctx->header_length += ctx->base.header_size;
 	}
 
-	ctx->header_length = i;
-
-	if (le16_to_cpu(pd->control) & DESCRIPTOR_IRQ_ALWAYS) {
-		ir_header = (__le32 *) (d + z);
+	if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
+		ir_header = (__le32 *) p;
 		ctx->base.callback(&ctx->base,
 				   le32_to_cpu(ir_header[0]) & 0xffff,
 				   ctx->header_length, ctx->header,
@@ -1493,7 +1506,6 @@ static int handle_ir_packet_per_buffer(s
 		ctx->header_length = 0;
 	}
 
-
 	return 1;
 }
 
@@ -1775,19 +1787,6 @@ ohci_queue_iso_receive_dualbuffer(struct
 	 * packet, retransmit or terminate..
 	 */
 
-	if (packet->skip) {
-		d = context_get_descriptors(&ctx->context, 2, &d_bus);
-		if (d == NULL)
-			return -ENOMEM;
-
-		db = (struct db_descriptor *) d;
-		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
-					  DESCRIPTOR_BRANCH_ALWAYS |
-					  DESCRIPTOR_WAIT);
-		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
-		context_append(&ctx->context, d, 2, 0);
-	}
-
 	p = packet;
 	z = 2;
 
@@ -1815,11 +1814,18 @@ ohci_queue_iso_receive_dualbuffer(struct
 		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
 					  DESCRIPTOR_BRANCH_ALWAYS);
 		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
-		db->first_req_count = cpu_to_le16(header_size);
+		if (p->skip && rest == p->payload_length) {
+			db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+			db->first_req_count = db->first_size;
+		} else {
+			db->first_req_count = cpu_to_le16(header_size);
+		}
 		db->first_res_count = db->first_req_count;
 		db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
 
-		if (offset + rest < PAGE_SIZE)
+		if (p->skip && rest == p->payload_length)
+			length = 4;
+		else if (offset + rest < PAGE_SIZE)
 			length = rest;
 		else
 			length = PAGE_SIZE - offset;
@@ -1835,7 +1841,8 @@ ohci_queue_iso_receive_dualbuffer(struct
 		context_append(&ctx->context, d, z, header_z);
 		offset = (offset + length) & ~PAGE_MASK;
 		rest -= length;
-		page++;
+		if (offset == 0)
+			page++;
 	}
 
 	return 0;
@@ -1849,67 +1856,70 @@ ohci_queue_iso_receive_packet_per_buffer
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
 	struct descriptor *d = NULL, *pd = NULL;
-	struct fw_iso_packet *p;
+	struct fw_iso_packet *p = packet;
 	dma_addr_t d_bus, page_bus;
 	u32 z, header_z, rest;
-	int i, page, offset, packet_count, header_size;
-
-	if (packet->skip) {
-		d = context_get_descriptors(&ctx->context, 1, &d_bus);
-		if (d == NULL)
-			return -ENOMEM;
-
-		d->control = cpu_to_le16(DESCRIPTOR_STATUS |
-					 DESCRIPTOR_INPUT_LAST |
-					 DESCRIPTOR_BRANCH_ALWAYS |
-					 DESCRIPTOR_WAIT);
-		context_append(&ctx->context, d, 1, 0);
-	}
-
-	/* one descriptor for header, one for payload */
-	/* FIXME: handle cases where we need multiple desc. for payload */
-	z = 2;
-	p = packet;
+	int i, j, length;
+	int page, offset, packet_count, header_size, payload_per_buffer;
 
 	/*
 	 * The OHCI controller puts the status word in the
 	 * buffer too, so we need 4 extra bytes per packet.
 	 */
 	packet_count = p->header_length / ctx->base.header_size;
-	header_size  = packet_count * (ctx->base.header_size + 4);
+	header_size  = ctx->base.header_size + 4;
 
 	/* Get header size in number of descriptors. */
 	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
 	page     = payload >> PAGE_SHIFT;
 	offset   = payload & ~PAGE_MASK;
-	rest     = p->payload_length;
+	payload_per_buffer = p->payload_length / packet_count;
 
 	for (i = 0; i < packet_count; i++) {
 		/* d points to the header descriptor */
+		z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
 		d = context_get_descriptors(&ctx->context,
-					    z + header_z, &d_bus);
+				z + header_z, &d_bus);
 		if (d == NULL)
 			return -ENOMEM;
 
-		d->control      = cpu_to_le16(DESCRIPTOR_INPUT_MORE);
+		d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
+					      DESCRIPTOR_INPUT_MORE);
+		if (p->skip && i == 0)
+			d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
 		d->req_count    = cpu_to_le16(header_size);
 		d->res_count    = d->req_count;
+		d->transfer_status = 0;
 		d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
 
-		/* pd points to the payload descriptor */
-		pd = d + 1;
+		rest = payload_per_buffer;
+		for (j = 1; j < z; j++) {
+			pd = d + j;
+			pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
+						  DESCRIPTOR_INPUT_MORE);
+
+			if (offset + rest < PAGE_SIZE)
+				length = rest;
+			else
+				length = PAGE_SIZE - offset;
+			pd->req_count = cpu_to_le16(length);
+			pd->res_count = pd->req_count;
+			pd->transfer_status = 0;
+
+			page_bus = page_private(buffer->pages[page]);
+			pd->data_address = cpu_to_le32(page_bus + offset);
+
+			offset = (offset + length) & ~PAGE_MASK;
+			rest -= length;
+			if (offset == 0)
+				page++;
+		}
 		pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
 					  DESCRIPTOR_INPUT_LAST |
 					  DESCRIPTOR_BRANCH_ALWAYS);
-		if (p->interrupt)
+		if (p->interrupt && i == packet_count - 1)
 			pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
 
-		pd->req_count = cpu_to_le16(rest);
-		pd->res_count = pd->req_count;
-
-		page_bus = page_private(buffer->pages[page]);
-		pd->data_address = cpu_to_le32(page_bus + offset);
-
 		context_append(&ctx->context, d, z, header_z);
 	}
 
diff -Naurp linux-2.6-git/drivers/firewire/fw-sbp2.c linux1394-2.6/drivers/firewire/fw-sbp2.c
--- linux-2.6-git/drivers/firewire/fw-sbp2.c	2008-01-01 22:50:33.000000000 -0500
+++ linux1394-2.6/drivers/firewire/fw-sbp2.c	2008-01-10 14:19:14.000000000 -0500
@@ -151,9 +151,7 @@ struct sbp2_target {
 };
 
 #define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
-#define SBP2_MAX_SECTORS		255	/* Max sectors supported */
 #define SBP2_ORB_TIMEOUT		2000	/* Timeout in ms */
-
 #define SBP2_ORB_NULL			0x80000000
 
 #define SBP2_DIRECTION_TO_MEDIA		0x0
@@ -540,14 +538,26 @@ sbp2_send_management_orb(struct sbp2_log
 
 	retval = -EIO;
 	if (sbp2_cancel_orbs(lu) == 0) {
-		fw_error("orb reply timed out, rcode=0x%02x\n",
-			 orb->base.rcode);
+		/*
+		 * Logout requests frequently get sent to devices that aren't
+		 * there any more, resulting in extraneous error messages in
+		 * the logs.  Unfortunately, this means logout requests that
+		 * actually fail don't get logged.
+		 */
+		if (function != SBP2_LOGOUT_REQUEST)
+			fw_error("orb reply timed out, rcode=0x%02x\n",
+				 orb->base.rcode);
 		goto out;
 	}
 
 	if (orb->base.rcode != RCODE_COMPLETE) {
-		fw_error("management write failed, rcode 0x%02x\n",
-			 orb->base.rcode);
+		/*
+		 * On device removal from the bus, sometimes the logout
+		 * request times out, sometimes it just fails.
+		 */
+		if (function != SBP2_LOGOUT_REQUEST)
+			fw_error("management write failed, rcode 0x%02x\n",
+				 orb->base.rcode);
 		goto out;
 	}
 
@@ -628,6 +638,21 @@ static void sbp2_release_target(struct k
 
 static struct workqueue_struct *sbp2_wq;
 
+/*
+ * Always get the target's kref when scheduling work on one its units.
+ * Each workqueue job is responsible to call sbp2_target_put() upon return.
+ */
+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
+{
+	if (queue_delayed_work(sbp2_wq, &lu->work, delay))
+		kref_get(&lu->tgt->kref);
+}
+
+static void sbp2_target_put(struct sbp2_target *tgt)
+{
+	kref_put(&tgt->kref, sbp2_release_target);
+}
+
 static void sbp2_reconnect(struct work_struct *work);
 
 static void sbp2_login(struct work_struct *work)
@@ -649,16 +674,12 @@ static void sbp2_login(struct work_struc
 
 	if (sbp2_send_management_orb(lu, node_id, generation,
 				SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
-		if (lu->retries++ < 5) {
-			if (queue_delayed_work(sbp2_wq, &lu->work,
-					       DIV_ROUND_UP(HZ, 5)))
-				kref_get(&lu->tgt->kref);
-		} else {
+		if (lu->retries++ < 5)
+			sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+		else
 			fw_error("failed to login to %s LUN %04x\n",
 				 unit->device.bus_id, lu->lun);
-		}
-		kref_put(&lu->tgt->kref, sbp2_release_target);
-		return;
+		goto out;
 	}
 
 	lu->generation        = generation;
@@ -700,7 +721,8 @@ static void sbp2_login(struct work_struc
 		lu->sdev = sdev;
 		scsi_device_put(sdev);
 	}
-	kref_put(&lu->tgt->kref, sbp2_release_target);
+ out:
+	sbp2_target_put(lu->tgt);
 }
 
 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -865,18 +887,13 @@ static int sbp2_probe(struct device *dev
 
 	get_device(&unit->device);
 
-	/*
-	 * We schedule work to do the login so we can easily
-	 * reschedule retries. Always get the ref before scheduling
-	 * work.
-	 */
+	/* Do the login in a workqueue so we can easily reschedule retries. */
 	list_for_each_entry(lu, &tgt->lu_list, link)
-		if (queue_delayed_work(sbp2_wq, &lu->work, 0))
-			kref_get(&tgt->kref);
+		sbp2_queue_work(lu, 0);
 	return 0;
 
  fail_tgt_put:
-	kref_put(&tgt->kref, sbp2_release_target);
+	sbp2_target_put(tgt);
 	return -ENOMEM;
 
  fail_shost_put:
@@ -889,7 +906,7 @@ static int sbp2_remove(struct device *de
 	struct fw_unit *unit = fw_unit(dev);
 	struct sbp2_target *tgt = unit->device.driver_data;
 
-	kref_put(&tgt->kref, sbp2_release_target);
+	sbp2_target_put(tgt);
 	return 0;
 }
 
@@ -915,10 +932,8 @@ static void sbp2_reconnect(struct work_s
 			lu->retries = 0;
 			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
 		}
-		if (queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5)))
-			kref_get(&lu->tgt->kref);
-		kref_put(&lu->tgt->kref, sbp2_release_target);
-		return;
+		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+		goto out;
 	}
 
 	lu->generation        = generation;
@@ -930,8 +945,8 @@ static void sbp2_reconnect(struct work_s
 
 	sbp2_agent_reset(lu);
 	sbp2_cancel_orbs(lu);
-
-	kref_put(&lu->tgt->kref, sbp2_release_target);
+ out:
+	sbp2_target_put(lu->tgt);
 }
 
 static void sbp2_update(struct fw_unit *unit)
@@ -947,8 +962,7 @@ static void sbp2_update(struct fw_unit *
 	 */
 	list_for_each_entry(lu, &tgt->lu_list, link) {
 		lu->retries = 0;
-		if (queue_delayed_work(sbp2_wq, &lu->work, 0))
-			kref_get(&tgt->kref);
+		sbp2_queue_work(lu, 0);
 	}
 }
 
@@ -1103,9 +1117,9 @@ sbp2_map_scatterlist(struct sbp2_command
 	 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
 	 * during DMA mapping, and Linux currently doesn't prevent this.
 	 */
-	for (i = 0, j = 0; i < count; i++) {
-		sg_len = sg_dma_len(sg + i);
-		sg_addr = sg_dma_address(sg + i);
+	for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
+		sg_len = sg_dma_len(sg);
+		sg_addr = sg_dma_address(sg);
 		while (sg_len) {
 			/* FIXME: This won't get us out of the pinch. */
 			if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
@@ -1325,6 +1339,7 @@ static struct scsi_host_template scsi_dr
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.use_clustering		= ENABLE_CLUSTERING,
+	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.cmd_per_lun		= 1,
 	.can_queue		= 1,
 	.sdev_attrs		= sbp2_scsi_sysfs_attrs,
diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.c linux1394-2.6/drivers/firewire/fw-transaction.c
--- linux-2.6-git/drivers/firewire/fw-transaction.c	2008-01-01 22:50:33.000000000 -0500
+++ linux1394-2.6/drivers/firewire/fw-transaction.c	2008-01-10 14:19:14.000000000 -0500
@@ -650,7 +650,7 @@ fw_core_handle_request(struct fw_card *c
 		 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
 	tcode       = HEADER_GET_TCODE(p->header[0]);
 	destination = HEADER_GET_DESTINATION(p->header[0]);
-	source      = HEADER_GET_SOURCE(p->header[0]);
+	source      = HEADER_GET_SOURCE(p->header[1]);
 
 	spin_lock_irqsave(&address_handler_lock, flags);
 	handler = lookup_enclosing_address_handler(&address_handler_list,


Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.356
retrieving revision 1.357
diff -u -r1.356 -r1.357
--- kernel.spec	10 Jan 2008 23:10:46 -0000	1.356
+++ kernel.spec	11 Jan 2008 19:24:21 -0000	1.357
@@ -657,6 +657,11 @@
 Patch2100: linux-2.6-ext4-jbd2-support-patch-queue.patch
 Patch2101: linux-2.6-ext4-jbd2-patch-queue.patch
 
+# linux1394 git patches
+Patch2200: linux-2.6-firewire-git-update.patch
+# should be committed shortly, in a very similar form...
+Patch2201: linux-2.6-firewire-dynamic-ir-buffer-alloc.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root-%{_target_cpu}
@@ -1182,6 +1187,10 @@
 ApplyPatch linux-2.6-ext4-jbd2-support-patch-queue.patch
 ApplyPatch linux-2.6-ext4-jbd2-patch-queue.patch
 
+# linux1394 git patches
+ApplyPatch linux-2.6-firewire-git-update.patch
+ApplyPatch linux-2.6-firewire-dynamic-ir-buffer-alloc.patch
+
 # ---------- below all scheduled for 2.6.24 -----------------
 
 # END OF PATCH APPLICATIONS
@@ -1750,6 +1759,10 @@
 %kernel_variant_files -a /%{image_install_path}/xen*-%{KVERREL} -e /etc/ld.so.conf.d/kernelcap-%{KVERREL}.conf %{with_xen} xen
 
 %changelog
+* Fri Jan 11 2008 Jarod Wilson <jwilson at redhat.com>
+- Add linux1394/firewire git patches
+- FireWire IR dynamic buffer alloc (David Moore)
+
 * Thu Jan 10 2008 Chuck Ebbert <cebbert at redhat.com>
 - temporarily fix up utrace breakage
 




More information about the fedora-extras-commits mailing list