rpms/kernel/devel drm-ddc-caching-bug.patch, NONE, 1.1.2.2 kernel.spec, 1.1294.2.62, 1.1294.2.63 lirc-2.6.31.patch, 1.2.2.2, 1.2.2.3 xen.pvops.patch, 1.1.2.39, 1.1.2.40 xen.pvops.post.patch, 1.1.2.25, 1.1.2.26 xen.pvops.pre.patch, 1.1.2.16, 1.1.2.17
myoung
myoung at fedoraproject.org
Sat Sep 5 19:00:39 UTC 2009
Author: myoung
Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs1.fedora.phx.redhat.com:/tmp/cvs-serv8305
Modified Files:
Tag: private-myoung-dom0-branch
kernel.spec lirc-2.6.31.patch xen.pvops.patch
xen.pvops.post.patch xen.pvops.pre.patch
Added Files:
Tag: private-myoung-dom0-branch
drm-ddc-caching-bug.patch
Log Message:
update pvops which includes swiotlb updates and a network fix and try a drm build fix
drm-ddc-caching-bug.patch:
drm_crtc_helper.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
--- NEW FILE drm-ddc-caching-bug.patch ---
When an output was disconnected, its mode list would remain. If you later
plugged into a sink with no EDID (projector, etc), you'd inherit the mode
list from the old sink, which is not what you want.
diff -up linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c.jx linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c
--- linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c.jx 2009-09-03 16:56:11.000000000 -0400
+++ linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c 2009-09-03 16:57:24.911505245 -0400
@@ -104,8 +104,7 @@ int drm_helper_probe_single_connector_mo
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
- /* TODO set EDID to NULL */
- return 0;
+ goto prune;
}
count = (*connector_funcs->get_modes)(connector);
@@ -130,7 +129,7 @@ int drm_helper_probe_single_connector_mo
mode);
}
-
+prune:
drm_mode_prune_invalid(dev, &connector->modes, true);
if (list_empty(&connector->modes))
Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.1294.2.62
retrieving revision 1.1294.2.63
diff -u -p -r1.1294.2.62 -r1.1294.2.63
--- kernel.spec 3 Sep 2009 20:37:22 -0000 1.1294.2.62
+++ kernel.spec 5 Sep 2009 19:00:23 -0000 1.1294.2.63
@@ -702,6 +702,7 @@ Patch1825: drm-intel-pm.patch
Patch1826: drm-r600-kms.patch
Patch1827: drm-rv710-ucode-fix.patch
Patch1828: drm-god-shut-up-about-edid-already.patch
+Patch1829: drm-ddc-caching-bug.patch
# vga arb
Patch1900: linux-2.6-vga-arb.patch
@@ -777,7 +778,6 @@ glibc package.
%package firmware
Summary: Firmware files used by the Linux kernel
Group: Development/System
-Buildarch: noarch
# This is... complicated.
# Look at the WHENCE file.
License: GPL+ and GPLv2+ and MIT and Redistributable, no modification permitted
@@ -984,6 +984,12 @@ ApplyOptionalPatch()
fi
}
+# we don't want a .config file when building firmware: it just confuses the build system
+%define build_firmware \
+ mv .config .config.firmware_save \
+ make INSTALL_FW_PATH=$RPM_BUILD_ROOT/lib/firmware firmware_install \
+ mv .config.firmware_save .config
+
# First we unpack the kernel tarball.
# If this isn't the first make prep, we use links to the existing clean tarball
# which speeds things up quite a bit.
@@ -1330,6 +1336,7 @@ ApplyPatch drm-next.patch
ApplyPatch drm-r600-kms.patch
ApplyPatch drm-rv710-ucode-fix.patch
ApplyPatch drm-god-shut-up-about-edid-already.patch
+ApplyPatch drm-ddc-caching-bug.patch
ApplyPatch drm-nouveau.patch
# pm broken on my thinkpad t60p - airlied
@@ -1780,7 +1787,7 @@ rm -f $RPM_BUILD_ROOT/usr/include/asm*/i
%endif
%if %{with_firmware}
-make INSTALL_FW_PATH=$RPM_BUILD_ROOT/lib/firmware firmware_install
+%{build_firmware}
%endif
%if %{with_bootwrapper}
@@ -1790,7 +1797,7 @@ make DESTDIR=$RPM_BUILD_ROOT bootwrapper
%if %{with_dracut}
%if !%{with_firmware}
# dracut needs the firmware files
- make INSTALL_FW_PATH=$RPM_BUILD_ROOT/lib/firmware firmware_install
+ %{build_firmware}
%endif
for i in $RPM_BUILD_ROOT/lib/modules/*; do
[ -d $i ] || continue
@@ -2029,6 +2036,22 @@ fi
# and build.
%changelog
+* Sat Sep 04 2009 Michael Young <m.a.young at durham.ac.uk>
+- update pvops which includes swiotlb updates and a network fix
+- try a drm build fix
+- re-enable CONFIG_DRM_NOUVEAU and CONFIG_DRM_RADEON_KMS options
+
+* Fri Sep 04 2009 Chuck Ebbert <cebbert at redhat.com> 2.6.31-0.203.rc8.git2
+- Fix kernel build errors when building firmware by removing the
+ .config file before that step and restoring it afterward.
+
+* Thu Sep 03 2009 Adam Jackson <ajax at redhat.com>
+- drm-ddc-caching-bug.patch: Empty the connector's mode list when it's
+ disconnected.
+
+* Thu Sep 03 2009 Jarod Wilson <jarod at redhat.com>
+- Update hdpvr and lirc_zilog drivers for 2.6.31 i2c
+
* Thu Sep 03 2009 Michael Young <m.a.young at durham.ac.uk>
- Update pvops patch to try stack protector on i686 again
- disable linux-2.6-xen-stack-protector-fix.patch as we already have it
lirc-2.6.31.patch:
MAINTAINERS | 9
drivers/input/Kconfig | 2
drivers/input/Makefile | 2
drivers/input/lirc/Kconfig | 119 +
drivers/input/lirc/Makefile | 21
drivers/input/lirc/lirc.h | 100 +
drivers/input/lirc/lirc_bt829.c | 383 +++++
drivers/input/lirc/lirc_dev.c | 839 ++++++++++++
drivers/input/lirc/lirc_dev.h | 184 ++
drivers/input/lirc/lirc_ene0100.c | 644 +++++++++
drivers/input/lirc/lirc_ene0100.h | 169 ++
drivers/input/lirc/lirc_i2c.c | 537 +++++++
drivers/input/lirc/lirc_igorplugusb.c | 556 ++++++++
drivers/input/lirc/lirc_imon.c | 2301 ++++++++++++++++++++++++++++++++++
drivers/input/lirc/lirc_it87.c | 986 ++++++++++++++
drivers/input/lirc/lirc_it87.h | 116 +
drivers/input/lirc/lirc_ite8709.c | 539 +++++++
drivers/input/lirc/lirc_mceusb.c | 1242 ++++++++++++++++++
drivers/input/lirc/lirc_parallel.c | 709 ++++++++++
drivers/input/lirc/lirc_parallel.h | 26
drivers/input/lirc/lirc_sasem.c | 931 +++++++++++++
drivers/input/lirc/lirc_serial.c | 1316 +++++++++++++++++++
drivers/input/lirc/lirc_sir.c | 1283 ++++++++++++++++++
drivers/input/lirc/lirc_streamzap.c | 794 +++++++++++
drivers/input/lirc/lirc_ttusbir.c | 397 +++++
drivers/input/lirc/lirc_zilog.c | 1395 ++++++++++++++++++++
26 files changed, 15600 insertions(+)
Index: lirc-2.6.31.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/lirc-2.6.31.patch,v
retrieving revision 1.2.2.2
retrieving revision 1.2.2.3
diff -u -p -r1.2.2.2 -r1.2.2.3
--- lirc-2.6.31.patch 3 Sep 2009 20:02:38 -0000 1.2.2.2
+++ lirc-2.6.31.patch 5 Sep 2009 19:00:23 -0000 1.2.2.3
@@ -1,6 +1,6 @@
Linux Infrared Remote Control drivers -- http://www.lirc.org
-Last updated: Tuesday, September 01, 2009
+Last updated: Thursday, September 03, 2009
From http://git.wilsonet.com/linux-2.6-lirc.git/
@@ -24,7 +24,7 @@ Signed-off-by: Jarod Wilson <jarod at redha
drivers/input/lirc/lirc_it87.c | 986 ++++++++++++++
drivers/input/lirc/lirc_it87.h | 116 ++
drivers/input/lirc/lirc_ite8709.c | 539 ++++++++
- drivers/input/lirc/lirc_mceusb.c | 1244 ++++++++++++++++++
+ drivers/input/lirc/lirc_mceusb.c | 1242 ++++++++++++++++++
drivers/input/lirc/lirc_parallel.c | 709 ++++++++++
drivers/input/lirc/lirc_parallel.h | 26 +
drivers/input/lirc/lirc_sasem.c | 931 +++++++++++++
@@ -33,7 +33,7 @@ Signed-off-by: Jarod Wilson <jarod at redha
drivers/input/lirc/lirc_streamzap.c | 794 ++++++++++++
drivers/input/lirc/lirc_ttusbir.c | 397 ++++++
drivers/input/lirc/lirc_zilog.c | 1395 ++++++++++++++++++++
- 26 files changed, 15602 insertions(+), 0 deletions(-)
+ 26 files changed, 15600 insertions(+), 0 deletions(-)
diff --git a/MAINTAINERS b/MAINTAINERS
index 8dca9d8..f25dc26 100644
@@ -7658,10 +7658,10 @@ index 0000000..3d53181
+MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/input/lirc/lirc_mceusb.c b/drivers/input/lirc/lirc_mceusb.c
new file mode 100644
-index 0000000..3dd4cfb
+index 0000000..8b48a56
--- /dev/null
+++ b/drivers/input/lirc/lirc_mceusb.c
-@@ -0,0 +1,1244 @@
+@@ -0,0 +1,1242 @@
+/*
+ * LIRC driver for Windows Media Center Edition USB Infrared Transceivers
+ *
@@ -7675,12 +7675,12 @@ index 0000000..3dd4cfb
+ *
+ * Original lirc_mceusb driver deprecated in favor of this driver, which
+ * supports the 1st-gen device now too. Transmitting on the 1st-gen device
-+ * is as yet untested, but receiving definitely works.
++ * only functions on port #2 at the moment.
+ *
+ * Support for 1st-gen device added June 2009,
+ * by Jarod Wilson <jarod at wilsonet.com>
+ *
-+ * Transmission support for 1st-gen device added August 2009,
++ * Initial transmission support for 1st-gen device added August 2009,
+ * by Patrick Calhoun <phineas at ou.edu>
+ *
+ * Derived from ATI USB driver by Paul Miller and the original
@@ -8791,32 +8791,30 @@ index 0000000..3dd4cfb
+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND);
+ request_packet_async(ir, ep_out, pin_init3, sizeof(pin_init3),
+ MCEUSB_OUTBOUND);
-+ /* if we don't issue the correct number of receives
-+ * (MCEUSB_INBOUND) for each outbound, then the first few ir
-+ * pulses will be interpreted by the usb_async_callback routine
-+ * - we should ensure we have the right amount OR less - as the
-+ * mceusb_dev_recv routine will handle the control packets OK -
-+ * they start with 0x9f - but the async callback doesn't handle
-+ * ir pulse packets
-+ */
-+ request_packet_async(ir, ep_in, NULL, maxp, 0);
-+ } else {
++ } else if (ir->flags.microsoft_gen1) {
+ /* original ms mce device requires some additional setup */
-+ if (ir->flags.microsoft_gen1)
-+ mceusb_gen1_init(ir);
++ mceusb_gen1_init(ir);
++ } else {
+
+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND);
+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND);
+ request_packet_async(ir, ep_out, init1,
+ sizeof(init1), MCEUSB_OUTBOUND);
+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND);
-+ /* This breaks transmit on orig ms mce device */
-+ if (!ir->flags.microsoft_gen1)
-+ request_packet_async(ir, ep_out, init2,
-+ sizeof(init2), MCEUSB_OUTBOUND);
-+ request_packet_async(ir, ep_in, NULL, maxp, 0);
++ request_packet_async(ir, ep_out, init2,
++ sizeof(init2), MCEUSB_OUTBOUND);
+ }
+
++ /*
++ * if we don't issue the correct number of receives (MCEUSB_INBOUND)
++ * for each outbound, then the first few ir pulses will be interpreted
++ * by the usb_async_callback routine - we should ensure we have the
++ * right amount OR less - as the mceusb_dev_recv routine will handle
++ * the control packets OK - they start with 0x9f - but the async
++ * callback doesn't handle ir pulse packets
++ */
++ request_packet_async(ir, ep_in, NULL, maxp, 0);
++
+ usb_set_intfdata(intf, ir);
+
+ return 0;
@@ -14406,7 +14404,7 @@ index 0000000..2955bad
+module_exit(ttusbir_exit_module);
diff --git a/drivers/input/lirc/lirc_zilog.c b/drivers/input/lirc/lirc_zilog.c
new file mode 100644
-index 0000000..f86db02
+index 0000000..c4080f5
--- /dev/null
+++ b/drivers/input/lirc/lirc_zilog.c
@@ -0,0 +1,1395 @@
@@ -14476,31 +14474,30 @@ index 0000000..f86db02
+ struct lirc_driver l;
+
+ /* Device info */
-+ struct mutex lock;
-+ int open;
++ struct mutex lock;
++ int open;
+
+ /* RX device */
-+ struct i2c_client c_rx;
++ struct i2c_client c_rx;
++ int have_rx;
+
+ /* RX device buffer & lock */
+ struct lirc_buffer buf;
-+ struct mutex buf_lock;
++ struct mutex buf_lock;
+
+ /* RX polling thread data */
-+ struct completion *t_notify;
-+ struct completion *t_notify2;
-+ int shutdown;
++ struct completion *t_notify;
++ struct completion *t_notify2;
++ int shutdown;
+ struct task_struct *task;
+
+ /* RX read data */
-+ unsigned char b[3];
++ unsigned char b[3];
+
+ /* TX device */
-+ struct i2c_client c_tx;
-+ int need_boot;
-+
-+ /* # devices, for shutdown */
-+ int devs;
++ struct i2c_client c_tx;
++ int need_boot;
++ int have_tx;
+};
+
+/* Minor -> data mapping */
@@ -14531,13 +14528,15 @@ index 0000000..f86db02
+};
+
+static struct tx_data_struct *tx_data;
-+struct mutex tx_data_lock;
++static struct mutex tx_data_lock;
+
-+#define DEVICE_NAME "lirc_zilog"
+#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
+ ## args)
+#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
+
++#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX"
++#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX"
++
+/* module parameters */
+static int debug; /* debug output */
+static int disable_rx; /* disable RX device */
@@ -14547,7 +14546,7 @@ index 0000000..f86db02
+#define dprintk(fmt, args...) \
+ do { \
+ if (debug) \
-+ printk(KERN_DEBUG DEVICE_NAME ": " fmt, \
++ printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
+ ## args); \
+ } while (0)
+
@@ -14926,8 +14925,8 @@ index 0000000..f86db02
+ zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+ return 0;
+ }
-+ zilog_notify("Zilog/Hauppauge IR blaster: firmware version "
-+ "%d.%d.%d\n", buf[1], buf[2], buf[3]);
++ zilog_notify("Zilog/Hauppauge IR blaster firmware version "
++ "%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
+
+ return 0;
+}
@@ -14962,7 +14961,7 @@ index 0000000..f86db02
+ int ret;
+ unsigned int i;
+ unsigned char *data, version, num_global_fixed;
-+ const struct firmware *fw_entry = NULL;
++ const struct firmware *fw_entry;
+
+ /* Already loaded? */
+ mutex_lock(&tx_data_lock);
@@ -14979,7 +14978,7 @@ index 0000000..f86db02
+ ret = ret < 0 ? ret : -EFAULT;
+ goto out;
+ }
-+ zilog_notify("firmware of size %zu loaded\n", fw_entry->size);
++ dprintk("firmware of size %zu loaded\n", fw_entry->size);
+
+ /* Parse the file */
+ tx_data = vmalloc(sizeof(*tx_data));
@@ -15026,7 +15025,7 @@ index 0000000..f86db02
+ &tx_data->num_code_sets))
+ goto corrupt;
+
-+ zilog_notify("%u codesets loaded\n", tx_data->num_code_sets);
++ dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets);
+
+ tx_data->code_sets = vmalloc(
+ tx_data->num_code_sets * sizeof(char *));
@@ -15520,7 +15519,6 @@ index 0000000..f86db02
+ .owner = THIS_MODULE
+};
+
-+static int ir_attach(struct i2c_adapter *adap, int have_rx, int have_tx);
+static int ir_remove(struct i2c_client *client);
+static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
+static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg);
@@ -15528,14 +15526,16 @@ index 0000000..f86db02
+static const struct i2c_device_id ir_transceiver_id[] = {
+ /* Generic entry for any IR transceiver */
+ { "ir_video", 0 },
-+ /* IR device specific entries could be added here */
++ /* IR device specific entries should be added here */
++ { "ir_tx_z8f0811_haup", 0 },
++ { "ir_rx_z8f0811_haup", 0 },
+ { }
+};
+
+static struct i2c_driver driver = {
+ .driver = {
+ .owner = THIS_MODULE,
-+ .name = "i2c ir driver",
++ .name = "Zilog/Hauppauge i2c IR",
+ },
+ .probe = ir_probe,
+ .remove = ir_remove,
@@ -15543,11 +15543,6 @@ index 0000000..f86db02
+ .id_table = ir_transceiver_id,
+};
+
-+static struct i2c_client client_template = {
-+ .name = "unset",
-+ .driver = &driver
-+};
-+
+static struct file_operations lirc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = lseek,
@@ -15559,30 +15554,107 @@ index 0000000..f86db02
+ .release = close
+};
+
-+static int i2c_attach(struct i2c_client *client, struct IR *ir)
++static int ir_remove(struct i2c_client *client)
+{
-+ i2c_set_clientdata(client, ir);
++ struct IR *ir = i2c_get_clientdata(client);
++
++ mutex_lock(&ir->lock);
++
++ if (ir->have_rx || ir->have_tx) {
++ DECLARE_COMPLETION(tn);
++ DECLARE_COMPLETION(tn2);
++
++ /* end up polling thread */
++ if (ir->task && !IS_ERR(ir->task)) {
++ ir->t_notify = &tn;
++ ir->t_notify2 = &tn2;
++ ir->shutdown = 1;
++ wake_up_process(ir->task);
++ complete(&tn2);
++ wait_for_completion(&tn);
++ ir->t_notify = NULL;
++ ir->t_notify2 = NULL;
++ }
++
++ } else {
++ mutex_unlock(&ir->lock);
++ zilog_error("%s: detached from something we didn't "
++ "attach to\n", __func__);
++ return -ENODEV;
++ }
++
++ /* unregister lirc driver */
++ if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
++ lirc_unregister_driver(ir->l.minor);
++ ir_devices[ir->l.minor] = NULL;
++ }
++
++ /* free memory */
++ lirc_buffer_free(&ir->buf);
++ mutex_unlock(&ir->lock);
++ kfree(ir);
+
-+ ++ir->devs;
+ return 0;
+}
+
-+static int ir_attach(struct i2c_adapter *adap, int have_rx, int have_tx)
++static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
-+ struct IR *ir;
++ struct IR *ir = NULL;
++ struct i2c_adapter *adap = client->adapter;
++ char buf;
+ int ret;
++ int have_rx = 0, have_tx = 0;
++
++ dprintk("%s: adapter id=0x%x, client addr=0x%02x\n",
++ __func__, adap->id, client->addr);
+
-+ printk("lirc_zilog: chip found with %s\n",
++ /* if this isn't an appropriate device, bail w/-ENODEV now */
++ if (!(adap->id == I2C_HW_B_BT848 ||
++#ifdef I2C_HW_B_HDPVR
++ adap->id == I2C_HW_B_HDPVR ||
++#endif
++ adap->id == I2C_HW_B_CX2341X))
++ goto out_nodev;
++
++ /*
++ * The external IR receiver is at i2c address 0x71.
++ * The IR transmitter is at 0x70.
++ */
++ client->addr = 0x70;
++
++ if (!disable_tx) {
++ if (i2c_master_recv(client, &buf, 1) == 1)
++ have_tx = 1;
++ dprintk("probe 0x70 @ %s: %s\n",
++ adap->name, have_tx ? "success" : "failed");
++ }
++
++ if (!disable_rx) {
++ client->addr = 0x71;
++ if (i2c_master_recv(client, &buf, 1) == 1)
++ have_rx = 1;
++ dprintk("probe 0x71 @ %s: %s\n",
++ adap->name, have_rx ? "success" : "failed");
++ }
++
++ if (!(have_rx || have_tx)) {
++ zilog_error("%s: no devices found\n", adap->name);
++ goto out_nodev;
++ }
++
++ printk(KERN_INFO "lirc_zilog: chip found with %s\n",
+ have_rx && have_tx ? "RX and TX" :
+ have_rx ? "RX only" : "TX only");
+
+ ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
-+ if (ir == NULL)
-+ return -ENOMEM;
-+ if (lirc_buffer_init(&ir->buf, 2, BUFLEN/2) != 0) {
-+ kfree(ir);
-+ return -ENOMEM;
-+ }
++
++ if (!ir)
++ goto out_nomem;
++
++ ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2);
++ if (ret)
++ goto out_nomem;
++
+ mutex_init(&ir->lock);
+ mutex_init(&ir->buf_lock);
+ ir->need_boot = 1;
@@ -15590,18 +15662,17 @@ index 0000000..f86db02
+ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+ ir->l.minor = -1;
+
++ /* I2C attach to device */
++ i2c_set_clientdata(client, ir);
++
+ /* initialise RX device */
-+ client_template.adapter = adap;
-+ memcpy(&ir->c_rx, &client_template, sizeof(struct i2c_client));
+ if (have_rx) {
+ DECLARE_COMPLETION(tn);
++ memcpy(&ir->c_rx, client, sizeof(struct i2c_client));
+
-+ /* I2C attach to device */
+ ir->c_rx.addr = 0x71;
-+ strncpy(ir->c_rx.name, "Zilog/Hauppauge RX", I2C_NAME_SIZE);
-+ ret = i2c_attach(&ir->c_rx, ir);
-+ if (ret != 0)
-+ goto err;
++ strncpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME,
++ I2C_NAME_SIZE);
+
+ /* try to fire up polling thread */
+ ir->t_notify = &tn;
@@ -15614,24 +15685,23 @@ index 0000000..f86db02
+ }
+ wait_for_completion(&tn);
+ ir->t_notify = NULL;
++ ir->have_rx = 1;
+ }
+
+ /* initialise TX device */
-+ memcpy(&ir->c_tx, &client_template, sizeof(struct i2c_client));
+ if (have_tx) {
-+ /* I2C attach to device */
++ memcpy(&ir->c_tx, client, sizeof(struct i2c_client));
+ ir->c_tx.addr = 0x70;
-+ strncpy(ir->c_tx.name, "Zilog/Hauppauge TX", I2C_NAME_SIZE);
-+ ret = i2c_attach(&ir->c_tx, ir);
-+ if (ret != 0)
-+ goto err;
++ strncpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME,
++ I2C_NAME_SIZE);
++ ir->have_tx = 1;
+ }
+
+ /* set lirc_dev stuff */
+ ir->l.code_length = 13;
+ ir->l.rbuf = &ir->buf;
-+ ir->l.fops = &lirc_fops;
-+ ir->l.data = ir;
++ ir->l.fops = &lirc_fops;
++ ir->l.data = ir;
+ ir->l.minor = minor;
+ ir->l.sample_rate = 0;
+
@@ -15658,6 +15728,7 @@ index 0000000..f86db02
+ if (ret != 0)
+ goto err;
+ }
++
+ return 0;
+
+err:
@@ -15667,100 +15738,15 @@ index 0000000..f86db02
+ if (ir->c_tx.addr)
+ ir_remove(&ir->c_tx);
+ return ret;
-+}
-+
-+static int ir_remove(struct i2c_client *client)
-+{
-+ struct IR *ir = i2c_get_clientdata(client);
-+ mutex_lock(&ir->lock);
-+
-+ if (client == &ir->c_rx) {
-+ DECLARE_COMPLETION(tn);
-+ DECLARE_COMPLETION(tn2);
-+
-+ /* end up polling thread */
-+ if (ir->task && !IS_ERR(ir->task)) {
-+ ir->t_notify = &tn;
-+ ir->t_notify2 = &tn2;
-+ ir->shutdown = 1;
-+ wake_up_process(ir->task);
-+ complete(&tn2);
-+ wait_for_completion(&tn);
-+ ir->t_notify = NULL;
-+ ir->t_notify2 = NULL;
-+ }
-+
-+ } else {
-+ mutex_unlock(&ir->lock);
-+ zilog_error("%s: detached from something we didn't "
-+ "attach to\n", __func__);
-+ return -ENODEV;
-+ }
-+
-+ --ir->devs;
-+ if (ir->devs < 0) {
-+ mutex_unlock(&ir->lock);
-+ zilog_error("%s: invalid device count\n", __func__);
-+ return -ENODEV;
-+ } else if (ir->devs == 0) {
-+ /* unregister lirc driver */
-+ if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
-+ lirc_unregister_driver(ir->l.minor);
-+ ir_devices[ir->l.minor] = NULL;
-+ }
+
-+ /* free memory */
-+ lirc_buffer_free(&ir->buf);
-+ mutex_unlock(&ir->lock);
-+ kfree(ir);
-+ return 0;
-+ }
-+ mutex_unlock(&ir->lock);
-+ return 0;
-+}
++out_nodev:
++ zilog_error("no device found\n");
++ return -ENODEV;
+
-+static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
-+{
-+ struct i2c_adapter *adap = client->adapter;
-+ char buf;
-+
-+ if (adap->id == I2C_HW_B_BT848 ||
-+#ifdef I2C_HW_B_HDPVR
-+ adap->id == I2C_HW_B_HDPVR ||
-+#endif
-+ adap->id == I2C_HW_B_CX2341X) {
-+ int have_rx = 0, have_tx = 0;
-+
-+ /*
-+ * The external IR receiver is at i2c address 0x71.
-+ * The IR transmitter is at 0x70.
-+ */
-+ client->addr = 0x70;
-+
-+ if (!disable_rx) {
-+ if (i2c_master_recv(client, &buf, 1) == 1)
-+ have_rx = 1;
-+ dprintk("probe 0x70 @ %s: %s\n",
-+ adap->name,
-+ have_rx ? "yes" : "no");
-+ }
-+
-+ if (!disable_tx) {
-+ client->addr = 0x71;
-+ if (i2c_master_recv(client, &buf, 1) == 1)
-+ have_tx = 1;
-+ dprintk("probe 0x71 @ %s: %s\n",
-+ adap->name,
-+ have_tx ? "yes" : "no");
-+ }
-+
-+ if (have_rx || have_tx)
-+ return ir_attach(adap, have_rx, have_tx);
-+ else
-+ zilog_error("%s: no devices found\n", adap->name);
-+ }
-+
-+ return 0;
++out_nomem:
++ zilog_error("memory allocation failure\n");
++ kfree(ir);
++ return -ENOMEM;
+}
+
+static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg)
@@ -15771,10 +15757,21 @@ index 0000000..f86db02
+
+static int __init zilog_init(void)
+{
++ int ret;
++
++ zilog_notify("Zilog/Hauppauge IR driver initializing\n");
++
+ mutex_init(&tx_data_lock);
++
+ request_module("firmware_class");
-+ i2c_add_driver(&driver);
-+ return 0;
++
++ ret = i2c_add_driver(&driver);
++ if (ret)
++ zilog_error("initialization failed\n");
++ else
++ zilog_notify("initialization complete\n");
++
++ return ret;
+}
+
+static void __exit zilog_exit(void)
@@ -15782,6 +15779,7 @@ index 0000000..f86db02
+ i2c_del_driver(&driver);
+ /* if loaded */
+ fw_unload();
++ zilog_notify("Zilog/Hauppauge IR driver unloaded\n");
+}
+
+module_init(zilog_init);
xen.pvops.patch:
arch/x86/Kconfig | 4
arch/x86/Makefile | 2
arch/x86/include/asm/agp.h | 15
arch/x86/include/asm/e820.h | 2
arch/x86/include/asm/i387.h | 1
arch/x86/include/asm/io.h | 15
arch/x86/include/asm/io_apic.h | 7
arch/x86/include/asm/microcode.h | 9
arch/x86/include/asm/paravirt.h | 718 ------------
arch/x86/include/asm/paravirt_types.h | 722 +++++++++++++
arch/x86/include/asm/pci.h | 8
arch/x86/include/asm/pci_x86.h | 2
arch/x86/include/asm/pgtable.h | 3
arch/x86/include/asm/processor.h | 4
arch/x86/include/asm/tlbflush.h | 6
arch/x86/include/asm/xen/hypercall.h | 44
arch/x86/include/asm/xen/interface.h | 8
arch/x86/include/asm/xen/interface_32.h | 5
arch/x86/include/asm/xen/interface_64.h | 13
arch/x86/include/asm/xen/iommu.h | 12
arch/x86/include/asm/xen/page.h | 16
arch/x86/include/asm/xen/pci.h | 37
arch/x86/include/asm/xen/swiotlb.h | 12
arch/x86/kernel/Makefile | 1
arch/x86/kernel/acpi/boot.c | 18
arch/x86/kernel/acpi/processor.c | 4
arch/x86/kernel/acpi/sleep.c | 2
arch/x86/kernel/apic/io_apic.c | 49
arch/x86/kernel/cpu/mtrr/Makefile | 1
arch/x86/kernel/cpu/mtrr/amd.c | 6
arch/x86/kernel/cpu/mtrr/centaur.c | 6
arch/x86/kernel/cpu/mtrr/cyrix.c | 6
arch/x86/kernel/cpu/mtrr/generic.c | 10
arch/x86/kernel/cpu/mtrr/main.c | 19
arch/x86/kernel/cpu/mtrr/mtrr.h | 11
arch/x86/kernel/cpu/mtrr/xen.c | 104 +
arch/x86/kernel/e820.c | 30
arch/x86/kernel/ioport.c | 29
arch/x86/kernel/microcode_core.c | 5
arch/x86/kernel/microcode_xen.c | 200 +++
arch/x86/kernel/paravirt.c | 1
arch/x86/kernel/pci-dma.c | 8
arch/x86/kernel/pci-swiotlb.c | 25
arch/x86/kernel/process.c | 27
arch/x86/kernel/process_32.c | 27
arch/x86/kernel/process_64.c | 33
arch/x86/kernel/setup.c | 4
arch/x86/kernel/traps.c | 33
arch/x86/mm/Makefile | 4
arch/x86/mm/init_32.c | 42
arch/x86/mm/pat.c | 2
arch/x86/mm/pgtable.c | 10
arch/x86/mm/tlb.c | 35
arch/x86/pci/Makefile | 1
arch/x86/pci/common.c | 18
arch/x86/pci/i386.c | 3
arch/x86/pci/init.c | 6
arch/x86/pci/xen.c | 51
arch/x86/xen/Kconfig | 33
arch/x86/xen/Makefile | 5
arch/x86/xen/apic.c | 57 +
arch/x86/xen/enlighten.c | 202 +++
arch/x86/xen/mmu.c | 455 ++++++++
arch/x86/xen/pci-swiotlb.c | 988 +++++++++++++++++
arch/x86/xen/pci.c | 111 ++
arch/x86/xen/setup.c | 50
arch/x86/xen/smp.c | 3
arch/x86/xen/time.c | 2
arch/x86/xen/vga.c | 67 +
arch/x86/xen/xen-ops.h | 19
block/blk-core.c | 2
drivers/acpi/acpica/hwsleep.c | 17
drivers/acpi/processor_core.c | 29
drivers/acpi/processor_idle.c | 23
drivers/acpi/processor_perflib.c | 10
drivers/acpi/sleep.c | 19
drivers/block/Kconfig | 1
drivers/char/agp/intel-agp.c | 17
drivers/char/hvc_xen.c | 99 +
drivers/net/Kconfig | 1
drivers/pci/Makefile | 2
drivers/pci/xen-iommu.c | 271 ++++
drivers/xen/Kconfig | 41
drivers/xen/Makefile | 23
drivers/xen/acpi.c | 23
drivers/xen/acpi_processor.c | 451 ++++++++
drivers/xen/balloon.c | 161 ++
drivers/xen/biomerge.c | 14
drivers/xen/blkback/Makefile | 3
drivers/xen/blkback/blkback.c | 658 +++++++++++
drivers/xen/blkback/common.h | 137 ++
drivers/xen/blkback/interface.c | 182 +++
drivers/xen/blkback/vbd.c | 118 ++
drivers/xen/blkback/xenbus.c | 542 +++++++++
drivers/xen/events.c | 422 +++++++
drivers/xen/evtchn.c | 1
drivers/xen/features.c | 2
drivers/xen/grant-table.c | 103 +
drivers/xen/mce.c | 213 +++
drivers/xen/netback/Makefile | 3
drivers/xen/netback/common.h | 221 +++
drivers/xen/netback/interface.c | 401 +++++++
drivers/xen/netback/netback.c | 1604 +++++++++++++++++++++++++++++
drivers/xen/netback/xenbus.c | 454 ++++++++
drivers/xen/pci.c | 124 ++
drivers/xen/xenbus/Makefile | 5
drivers/xen/xenbus/xenbus_comms.c | 1
drivers/xen/xenbus/xenbus_probe.c | 380 +-----
drivers/xen/xenbus/xenbus_probe.h | 29
drivers/xen/xenbus/xenbus_probe_backend.c | 298 +++++
drivers/xen/xenbus/xenbus_probe_frontend.c | 292 +++++
drivers/xen/xenfs/Makefile | 3
drivers/xen/xenfs/privcmd.c | 403 +++++++
drivers/xen/xenfs/super.c | 98 +
drivers/xen/xenfs/xenfs.h | 3
drivers/xen/xenfs/xenstored.c | 67 +
include/acpi/processor.h | 2
include/asm-generic/pci.h | 2
include/linux/interrupt.h | 1
include/linux/page-flags.h | 18
include/xen/Kbuild | 1
include/xen/acpi.h | 84 +
include/xen/balloon.h | 8
include/xen/blkif.h | 122 ++
include/xen/events.h | 27
include/xen/grant_table.h | 43
include/xen/interface/grant_table.h | 22
include/xen/interface/memory.h | 92 +
include/xen/interface/physdev.h | 51
include/xen/interface/platform.h | 336 ++++++
include/xen/interface/xen-mca.h | 429 +++++++
include/xen/interface/xen.h | 44
include/xen/privcmd.h | 80 +
include/xen/swiotlb.h | 102 +
include/xen/xen-ops.h | 11
include/xen/xenbus.h | 2
kernel/irq/manage.c | 3
lib/swiotlb.c | 5
mm/page_alloc.c | 14
139 files changed, 12619 insertions(+), 1339 deletions(-)
Index: xen.pvops.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/Attic/xen.pvops.patch,v
retrieving revision 1.1.2.39
retrieving revision 1.1.2.40
diff -u -p -r1.1.2.39 -r1.1.2.40
--- xen.pvops.patch 3 Sep 2009 20:37:22 -0000 1.1.2.39
+++ xen.pvops.patch 5 Sep 2009 19:00:23 -0000 1.1.2.40
@@ -2040,6 +2040,24 @@ index 0000000..cb84abe
+#endif
+
+#endif /* _ASM_X86_XEN_PCI_H */
+diff --git a/arch/x86/include/asm/xen/swiotlb.h b/arch/x86/include/asm/xen/swiotlb.h
+new file mode 100644
+index 0000000..81d8502
+--- /dev/null
++++ b/arch/x86/include/asm/xen/swiotlb.h
+@@ -0,0 +1,12 @@
++#ifndef _ASM_X86_XEN_SWIOTLB_H
++#define _ASM_X86_XEN_SWIOTLB_H
++
++#ifdef CONFIG_PCI_XEN
++extern int xen_swiotlb_init(void);
++extern void xen_swiotlb_init_alloc(void);
++#else
++static inline int xen_swiotlb_init(void) { return -ENODEV; }
++static void xen_swiotlb_init_alloc(void) { }
++#endif
++
++#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 430d5b2..96f9ecb 100644
--- a/arch/x86/kernel/Makefile
@@ -2140,7 +2158,7 @@ index ca93638..9eff23c 100644
#include "sleep.h"
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index d2ed6c5..18d957e 100644
+index d2ed6c5..77151ce 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -63,8 +63,11 @@
@@ -2200,7 +2218,17 @@ index d2ed6c5..18d957e 100644
if (sis_apic_bug)
writel(reg, &io_apic->index);
-@@ -3489,6 +3511,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+@@ -3140,6 +3162,9 @@ static int __init ioapic_init_sysfs(void)
+ struct sys_device * dev;
+ int i, size, error;
+
++ if (xen_initial_domain())
++ return 0;
++
+ error = sysdev_class_register(&ioapic_sysdev_class);
+ if (error)
+ return error;
+@@ -3489,6 +3514,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
@@ -2210,7 +2238,7 @@ index d2ed6c5..18d957e 100644
node = dev_to_node(&dev->dev);
irq_want = nr_irqs_gsi;
sub_handle = 0;
-@@ -3538,7 +3563,10 @@ error:
+@@ -3538,7 +3566,10 @@ error:
void arch_teardown_msi_irq(unsigned int irq)
{
@@ -2222,7 +2250,7 @@ index d2ed6c5..18d957e 100644
}
#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
-@@ -3854,6 +3882,11 @@ void __init probe_nr_irqs_gsi(void)
+@@ -3854,6 +3885,11 @@ void __init probe_nr_irqs_gsi(void)
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
@@ -2234,7 +2262,7 @@ index d2ed6c5..18d957e 100644
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
-@@ -4147,6 +4180,11 @@ void __init ioapic_init_mappings(void)
+@@ -4147,6 +4183,11 @@ void __init ioapic_init_mappings(void)
struct resource *ioapic_res;
int i;
@@ -2885,36 +2913,46 @@ index 70ec9b9..cef3d70 100644
.start_context_switch = paravirt_nop,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
-index 1a041bc..ee5162e 100644
+index 1a041bc..90da583 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
-@@ -10,6 +10,7 @@
+@@ -10,6 +10,8 @@
#include <asm/gart.h>
#include <asm/calgary.h>
#include <asm/amd_iommu.h>
+#include <asm/xen/iommu.h>
++#include <asm/xen/swiotlb.h>
static int forbid_dac __read_mostly;
-@@ -281,6 +282,8 @@ static int __init pci_iommu_init(void)
+@@ -128,6 +130,8 @@ void __init pci_iommu_alloc(void)
+
+ amd_iommu_detect();
+
++ xen_swiotlb_init_alloc();
++
+ pci_swiotlb_init();
+ }
+
+@@ -281,6 +285,10 @@ static int __init pci_iommu_init(void)
dma_debug_add_bus(&pci_bus_type);
#endif
+ xen_iommu_init();
+
++ xen_swiotlb_init();
++
calgary_iommu_init();
intel_iommu_init();
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
-index 6af96ee..efe3691 100644
+index 6af96ee..e8a3501 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
-@@ -11,32 +11,9 @@
- #include <asm/swiotlb.h>
- #include <asm/dma.h>
+@@ -13,31 +13,6 @@
+
+ int swiotlb __read_mostly;
--int swiotlb __read_mostly;
--
-void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
-{
- return alloc_bootmem_low_pages(size);
@@ -2929,8 +2967,7 @@ index 6af96ee..efe3691 100644
-{
- return paddr;
-}
-+#include <xen/swiotlb.h>
-
+-
-phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
-{
- return baddr;
@@ -2940,20 +2977,10 @@ index 6af96ee..efe3691 100644
-{
- return 0;
-}
-+int swiotlb __read_mostly;
-
+-
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
-@@ -75,6 +52,9 @@ void __init pci_swiotlb_init(void)
- iommu_pass_through)
- swiotlb = 1;
- #endif
-+ if (xen_wants_swiotlb())
-+ swiotlb = 1;
-+
- if (swiotlb_force)
- swiotlb = 1;
- if (swiotlb) {
+ {
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 071166a..cbf8440 100644
--- a/arch/x86/kernel/process.c
@@ -3679,7 +3706,7 @@ index 0000000..ee0db39
+#endif
+}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index eb33aaa..97a2764 100644
+index eb33aaa..7119d45 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -51,6 +51,7 @@
@@ -3970,7 +3997,7 @@ index eb33aaa..97a2764 100644
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
-@@ -1019,16 +1157,9 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1019,17 +1153,8 @@ asmlinkage void __init xen_start_kernel(void)
xen_smp_init();
@@ -3984,11 +4011,11 @@ index eb33aaa..97a2764 100644
- __supported_pte_mask &= ~_PAGE_GLOBAL;
- if (!xen_initial_domain())
- __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
-+ __supported_pte_mask |= _PAGE_IOMAP;
-
+-
#ifdef CONFIG_X86_64
/* Work out if we support NX */
-@@ -1044,6 +1175,7 @@ asmlinkage void __init xen_start_kernel(void)
+ check_efer();
+@@ -1044,6 +1169,7 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
@@ -3996,7 +4023,7 @@ index eb33aaa..97a2764 100644
init_mm.pgd = pgd;
-@@ -1053,9 +1185,21 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1053,9 +1179,21 @@ asmlinkage void __init xen_start_kernel(void)
if (xen_feature(XENFEAT_supervisor_mode_kernel))
pv_info.kernel_rpl = 0;
@@ -4018,7 +4045,7 @@ index eb33aaa..97a2764 100644
#ifdef CONFIG_X86_32
/* set up basic CPUID stuff */
cpu_detect(&new_cpu_data);
-@@ -1075,6 +1219,16 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1075,6 +1213,16 @@ asmlinkage void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
add_preferred_console("tty", 0, NULL);
add_preferred_console("hvc", 0, NULL);
@@ -4586,126 +4613,1061 @@ index 4ceb285..a654a49 100644
+ struct remap_data *rmd = data;
+ pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
+
-+ rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
-+ rmd->mmu_update->val = pte_val_ma(pte);
-+ rmd->mmu_update++;
++ rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
++ rmd->mmu_update->val = pte_val_ma(pte);
++ rmd->mmu_update++;
++
++ return 0;
++}
++
++int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
++ unsigned long addr,
++ unsigned long mfn, int nr,
++ pgprot_t prot, unsigned domid)
++{
++ struct remap_data rmd;
++ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
++ int batch;
++ unsigned long range;
++ int err = 0;
++
++ prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
++
++ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
++
++ rmd.mfn = mfn;
++ rmd.prot = prot;
++
++ while (nr) {
++ batch = min(REMAP_BATCH_SIZE, nr);
++ range = (unsigned long)batch << PAGE_SHIFT;
++
++ rmd.mmu_update = mmu_update;
++ err = apply_to_page_range(vma->vm_mm, addr, range,
++ remap_area_mfn_pte_fn, &rmd);
++ if (err)
++ goto out;
++
++ err = -EFAULT;
++ if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
++ goto out;
++
++ nr -= batch;
++ addr += range;
++ }
++
++ err = 0;
++out:
++
++ flush_tlb_all();
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
++
+ #ifdef CONFIG_XEN_DEBUG_FS
+
+ static struct dentry *d_mmu_debug;
+diff --git a/arch/x86/xen/pci-swiotlb.c b/arch/x86/xen/pci-swiotlb.c
+new file mode 100644
+index 0000000..19b78e2
+--- /dev/null
++++ b/arch/x86/xen/pci-swiotlb.c
+@@ -0,0 +1,988 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ * David Mosberger-Tang <davidm at hpl.hp.com>
++ *
++ * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
++ * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
++ * unnecessary i-cache flushing.
++ * 04/07/.. ak Better overflow handling. Assorted fixes.
++ * 05/09/10 linville Add support for syncing ranges, support syncing for
++ * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
++ * 08/12/11 beckyb Add highmem support
++ */
++
++#include <linux/cache.h>
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <xen/swiotlb.h>
++#include <linux/pfn.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/highmem.h>
++
++#include <asm/io.h>
++#include <asm/dma.h>
++#include <asm/scatterlist.h>
++
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/iommu-helper.h>
++#include <asm/iommu.h>
++
++#include <xen/interface/xen.h>
++#include <xen/grant_table.h>
++#include <xen/page.h>
++#include <xen/xen-ops.h>
++
++#define OFFSET(val,align) ((unsigned long) \
++ ( (val) & ( (align) - 1)))
++
++#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
++
++/*
++ * Minimum IO TLB size to bother booting with. Systems with mainly
++ * 64bit capable cards will only lightly use the swiotlb. If we can't
++ * allocate a contiguous 1MB, we're probably in trouble anyway.
++ */
++#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
++
++/*
++ * Enumeration for sync targets
++ */
++enum dma_sync_target {
++ SYNC_FOR_CPU = 0,
++ SYNC_FOR_DEVICE = 1,
++};
++
++/*
++ * Used to do a quick range check in unmap_single and
++ * sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static char *xen_io_tlb_start, *xen_io_tlb_end;
++
++/*
++ * The number of IO TLB blocks (in groups of 64) betweeen xen_io_tlb_start and
++ * xen_io_tlb_end. This is command line adjustable via setup_xen_io_tlb_npages.
++ */
++static unsigned long xen_io_tlb_nslabs;
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long xen_io_tlb_overflow = 32*1024;
++
++void *xen_io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *xen_io_tlb_list;
++static unsigned int xen_io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static phys_addr_t *xen_io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(xen_io_tlb_lock);
++
++
++void * __weak __init xen_swiotlb_alloc_boot(size_t size, unsigned long nslabs)
++{
++ return alloc_bootmem_low_pages(size);
++}
++
++void * __weak xen_swiotlb_alloc(unsigned order, unsigned long nslabs)
++{
++ return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
++}
++
++dma_addr_t __weak xen_swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
++{
++ return phys_to_machine(XPADDR(paddr)).maddr;;
++}
++
++phys_addr_t __weak xen_swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
++{
++ return machine_to_phys(XMADDR(baddr)).paddr;
++}
++
++static dma_addr_t xen_swiotlb_virt_to_bus(struct device *hwdev,
++ volatile void *address)
++{
++ return xen_swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
++}
++
++void * __weak xen_swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
++{
++ return phys_to_virt(xen_swiotlb_bus_to_phys(hwdev, address));
++}
++
++int __weak xen_swiotlb_arch_address_needs_mapping(struct device *hwdev,
++ dma_addr_t addr, size_t size)
++{
++ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
++}
++
++static int check_pages_physically_contiguous(unsigned long pfn,
++ unsigned int offset,
++ size_t length)
++{
++ unsigned long next_mfn;
++ int i;
++ int nr_pages;
++
++ next_mfn = pfn_to_mfn(pfn);
++ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
++
++ for (i = 1; i < nr_pages; i++) {
++ if (pfn_to_mfn(++pfn) != ++next_mfn)
++ return 0;
++ }
++ return 1;
++}
++
++static int range_straddles_page_boundary(phys_addr_t p, size_t size)
++{
++ unsigned long pfn = PFN_DOWN(p);
++ unsigned int offset = p & ~PAGE_MASK;
++
++ if (offset + size <= PAGE_SIZE)
++ return 0;
++ if (check_pages_physically_contiguous(pfn, offset, size))
++ return 0;
++ return 1;
++}
++int __weak xen_swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
++{
++ return range_straddles_page_boundary(paddr, size);
++}
++
++static void xen_swiotlb_print_info(unsigned long bytes)
++{
++ phys_addr_t pstart, pend;
++
++ pstart = virt_to_phys(xen_io_tlb_start);
++ pend = virt_to_phys(xen_io_tlb_end);
++
++ printk(KERN_INFO "Placing %luMB Xen software IO TLB between %p - %p\n",
++ bytes >> 20, xen_io_tlb_start, xen_io_tlb_end);
++ printk(KERN_INFO "Xen software IO TLB at phys %#llx - %#llx\n",
++ (unsigned long long)pstart,
++ (unsigned long long)pend);
++}
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the DMA API.
++ */
++void __init
++xen_swiotlb_init_with_default_size(size_t default_size)
++{
++ unsigned long i, bytes;
++
++ if (!xen_io_tlb_nslabs) {
++ xen_io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
++ }
++
++ bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
++
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ xen_io_tlb_start = xen_swiotlb_alloc_boot(bytes, xen_io_tlb_nslabs);
++ if (!xen_io_tlb_start)
++ panic("Cannot allocate Xen-SWIOTLB buffer");
++ xen_io_tlb_end = xen_io_tlb_start + bytes;
++ xen_swiotlb_fixup(xen_io_tlb_start, bytes, xen_io_tlb_nslabs);
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
++ * between xen_io_tlb_start and xen_io_tlb_end.
++ */
++ xen_io_tlb_list = alloc_bootmem(xen_io_tlb_nslabs * sizeof(int));
++ for (i = 0; i < xen_io_tlb_nslabs; i++)
++ xen_io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ xen_io_tlb_index = 0;
++ xen_io_tlb_orig_addr = alloc_bootmem(xen_io_tlb_nslabs * sizeof(phys_addr_t));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ xen_io_tlb_overflow_buffer = xen_swiotlb_alloc_boot(xen_io_tlb_overflow,
++ xen_io_tlb_overflow >> IO_TLB_SHIFT);
++ if (!xen_io_tlb_overflow_buffer)
++ panic("Cannot allocate Xen-SWIOTLB overflow buffer!\n");
++
++ xen_swiotlb_fixup(xen_io_tlb_overflow_buffer, xen_io_tlb_overflow,
++ xen_io_tlb_overflow >> IO_TLB_SHIFT);
++
++ xen_swiotlb_print_info(bytes);
++}
++
++/*
++ * Systems with larger DMA zones (those that don't support ISA) can
++ * initialize the swiotlb later using the slab allocator if needed.
++ * This should be just like above, but with some error catching.
++ */
++int
++xen_swiotlb_late_init_with_default_size(size_t default_size)
++{
++ unsigned long i, bytes, req_nslabs = xen_io_tlb_nslabs;
++ unsigned int order;
++
++ if (!xen_io_tlb_nslabs) {
++ xen_io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
++ }
++
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
++ xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
++ bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
++
++ while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
++ xen_io_tlb_start = xen_swiotlb_alloc(order, xen_io_tlb_nslabs);
++ if (xen_io_tlb_start)
++ break;
++ order--;
++ }
++
++ if (!xen_io_tlb_start)
++ goto cleanup1;
++
++ if (order != get_order(bytes)) {
++ printk(KERN_WARNING "Warning: only able to allocate %ld MB "
++ "for Xen software IO TLB\n", (PAGE_SIZE << order) >> 20);
++ xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
++ bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
++ }
++ xen_io_tlb_end = xen_io_tlb_start + bytes;
++ xen_swiotlb_fixup(xen_io_tlb_start, bytes, xen_io_tlb_nslabs);
++ memset(xen_io_tlb_start, 0, bytes);
++
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
++ * between xen_io_tlb_start and xen_io_tlb_end.
++ */
++ xen_io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
++ get_order(xen_io_tlb_nslabs * sizeof(int)));
++ if (!xen_io_tlb_list)
++ goto cleanup2;
++
++ for (i = 0; i < xen_io_tlb_nslabs; i++)
++ xen_io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ xen_io_tlb_index = 0;
++
++ xen_io_tlb_orig_addr = (phys_addr_t *)
++ __get_free_pages(GFP_KERNEL,
++ get_order(xen_io_tlb_nslabs *
++ sizeof(phys_addr_t)));
++ if (!xen_io_tlb_orig_addr)
++ goto cleanup3;
++
++ xen_swiotlb_fixup(xen_io_tlb_overflow_buffer, xen_io_tlb_overflow,
++ xen_io_tlb_overflow >> IO_TLB_SHIFT);
++ memset(xen_io_tlb_orig_addr, 0, xen_io_tlb_nslabs * sizeof(phys_addr_t));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ xen_io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
++ get_order(xen_io_tlb_overflow));
++ if (!xen_io_tlb_overflow_buffer)
++ goto cleanup4;
++
++ xen_swiotlb_print_info(bytes);
++
++ return 0;
++
++cleanup4:
++ free_pages((unsigned long)xen_io_tlb_orig_addr,
++ get_order(xen_io_tlb_nslabs * sizeof(phys_addr_t)));
++ xen_io_tlb_orig_addr = NULL;
++cleanup3:
++ free_pages((unsigned long)xen_io_tlb_list, get_order(xen_io_tlb_nslabs *
++ sizeof(int)));
++ xen_io_tlb_list = NULL;
++cleanup2:
++ xen_io_tlb_end = NULL;
++ free_pages((unsigned long)xen_io_tlb_start, order);
++ xen_io_tlb_start = NULL;
++cleanup1:
++ xen_io_tlb_nslabs = req_nslabs;
++ return -ENOMEM;
++}
++
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
++{
++ return xen_swiotlb_arch_address_needs_mapping(hwdev, addr, size);
++}
++
++static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
++{
++ return swiotlb_force || xen_swiotlb_arch_range_needs_mapping(paddr, size);
++}
++
++static int is_xen_swiotlb_buffer(char *addr)
++{
++ return addr >= xen_io_tlb_start && addr < xen_io_tlb_end;
++}
++
++/*
++ * Bounce: copy the swiotlb buffer back to the original dma location
++ */
++static void xen_swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
++ enum dma_data_direction dir)
++{
++ unsigned long pfn = PFN_DOWN(phys);
++
++ if (PageHighMem(pfn_to_page(pfn))) {
++ /* The buffer does not have a mapping. Map it in and copy */
++ unsigned int offset = phys & ~PAGE_MASK;
++ char *buffer;
++ unsigned int sz = 0;
++ unsigned long flags;
++
++ while (size) {
++ sz = min_t(size_t, PAGE_SIZE - offset, size);
++
++ local_irq_save(flags);
++ buffer = kmap_atomic(pfn_to_page(pfn),
++ KM_BOUNCE_READ);
++ if (dir == DMA_TO_DEVICE)
++ memcpy(dma_addr, buffer + offset, sz);
++ else
++ memcpy(buffer + offset, dma_addr, sz);
++ kunmap_atomic(buffer, KM_BOUNCE_READ);
++ local_irq_restore(flags);
++
++ size -= sz;
++ pfn++;
++ dma_addr += sz;
++ offset = 0;
++ }
++ } else {
++ if (dir == DMA_TO_DEVICE)
++ memcpy(dma_addr, phys_to_virt(phys), size);
++ else
++ memcpy(phys_to_virt(phys), dma_addr, size);
++ }
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
++{
++ unsigned long flags;
++ char *dma_addr;
++ unsigned int nslots, stride, index, wrap;
++ int i;
++ unsigned long start_dma_addr;
++ unsigned long mask;
++ unsigned long offset_slots;
++ unsigned long max_slots;
++
++ mask = dma_get_seg_boundary(hwdev);
++ start_dma_addr = xen_swiotlb_virt_to_bus(hwdev, xen_io_tlb_start) & mask;
++
++ offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++
++ /*
++ * Carefully handle integer overflow which can occur when mask == ~0UL.
++ */
++ max_slots = mask + 1
++ ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
++ : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
++
++ /*
++ * For mappings greater than a page, we limit the stride (and
++ * hence alignment) to a page size.
++ */
++ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ if (size > PAGE_SIZE)
++ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++ else
++ stride = 1;
++
++ BUG_ON(!nslots);
++
++ /*
++ * Find suitable number of IO TLB entries size that will fit this
++ * request and allocate a buffer from that IO TLB pool.
++ */
++ spin_lock_irqsave(&xen_io_tlb_lock, flags);
++ index = ALIGN(xen_io_tlb_index, stride);
++ if (index >= xen_io_tlb_nslabs)
++ index = 0;
++ wrap = index;
++
++ do {
++ while (iommu_is_span_boundary(index, nslots, offset_slots,
++ max_slots)) {
++ index += stride;
++ if (index >= xen_io_tlb_nslabs)
++ index = 0;
++ if (index == wrap)
++ goto not_found;
++ }
++
++ /*
++ * If we find a slot that indicates we have 'nslots' number of
++ * contiguous buffers, we allocate the buffers from that slot
++ * and mark the entries as '0' indicating unavailable.
++ */
++ if (xen_io_tlb_list[index] >= nslots) {
++ int count = 0;
++
++ for (i = index; i < (int) (index + nslots); i++)
++ xen_io_tlb_list[i] = 0;
++ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && xen_io_tlb_list[i]; i--)
++ xen_io_tlb_list[i] = ++count;
++ dma_addr = xen_io_tlb_start + (index << IO_TLB_SHIFT);
++
++ /*
++ * Update the indices to avoid searching in the next
++ * round.
++ */
++ xen_io_tlb_index = ((index + nslots) < xen_io_tlb_nslabs
++ ? (index + nslots) : 0);
++
++ goto found;
++ }
++ index += stride;
++ if (index >= xen_io_tlb_nslabs)
++ index = 0;
++ } while (index != wrap);
++
++not_found:
++ spin_unlock_irqrestore(&xen_io_tlb_lock, flags);
++ return NULL;
++found:
++ spin_unlock_irqrestore(&xen_io_tlb_lock, flags);
++
++ /*
++ * Save away the mapping from the original address to the DMA address.
++ * This is needed when we sync the memory. Then we sync the buffer if
++ * needed.
++ */
++ for (i = 0; i < nslots; i++)
++ xen_io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
++ xen_swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
++
++ return dma_addr;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ unsigned long flags;
++ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ int index = (dma_addr - xen_io_tlb_start) >> IO_TLB_SHIFT;
++ phys_addr_t phys = xen_io_tlb_orig_addr[index];
++
++ /*
++ * First, sync the memory before unmapping the entry
++ */
++ if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
++ xen_swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
++
++ /*
++ * Return the buffer to the free list by setting the corresponding
++ * entries to indicate the number of contigous entries available.
++ * While returning the entries to the free list, we merge the entries
++ * with slots below and above the pool being returned.
++ */
++ spin_lock_irqsave(&xen_io_tlb_lock, flags);
++ {
++ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++ xen_io_tlb_list[index + nslots] : 0);
++ /*
++ * Step 1: return the slots to the free list, merging the
++ * slots with superceeding slots
++ */
++ for (i = index + nslots - 1; i >= index; i--)
++ xen_io_tlb_list[i] = ++count;
++ /*
++ * Step 2: merge the returned slots with the preceding slots,
++ * if available (non zero)
++ */
++ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && xen_io_tlb_list[i]; i--)
++ xen_io_tlb_list[i] = ++count;
++ }
++ spin_unlock_irqrestore(&xen_io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size,
++ int dir, int target)
++{
++ int index = (dma_addr - xen_io_tlb_start) >> IO_TLB_SHIFT;
++ phys_addr_t phys = xen_io_tlb_orig_addr[index];
++
++ phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
++
++ switch (target) {
++ case SYNC_FOR_CPU:
++ if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
++ xen_swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
++ else
++ BUG_ON(dir != DMA_TO_DEVICE);
++ break;
++ case SYNC_FOR_DEVICE:
++ if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
++ xen_swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
++ else
++ BUG_ON(dir != DMA_FROM_DEVICE);
++ break;
++ default:
++ BUG();
++ }
++}
++
++void *
++xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags)
++{
++ void *ret;
++ unsigned int order = get_order(size);
++ unsigned long vstart;
++ u64 mask;
++
++ /*
++ * Ignore region specifiers - the kernel's ideas of
++ * pseudo-phys memory layout has nothing to do with the
++ * machine physical layout. We can't allocate highmem
++ * because we can't return a pointer to it.
++ */
++ flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++ if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
++ return ret;
++
++ vstart = __get_free_pages(flags, order);
++ ret = (void *)vstart;
++
++ if (hwdev != NULL && hwdev->coherent_dma_mask)
++ mask = hwdev->coherent_dma_mask;
++ else
++ mask = DMA_BIT_MASK(32);
++
++ if (ret != NULL) {
++ if (xen_create_contiguous_region(vstart, order,
++ fls64(mask)) != 0) {
++ free_pages(vstart, order);
++ return NULL;
++ }
++ memset(ret, 0, size);
++ *dma_handle = virt_to_machine(ret).maddr;
++ }
++ return ret;
++}
++
++void
++xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
++ dma_addr_t dma_handle)
++{
++ int order = get_order(size);
++
++ if (dma_release_from_coherent(hwdev, order, vaddr))
++ return;
++
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++}
++
++static void
++xen_swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++ /*
++ * Ran out of IOMMU space for this operation. This is very bad.
++ * Unfortunately the drivers cannot handle this operation properly.
++ * unless they check for dma_mapping_error (most don't)
++ * When the mapping is small enough return a static buffer to limit
++ * the damage, or panic when the transfer is too big.
++ */
++ printk(KERN_ERR "Xen-DMA: Out of Xen-SWIOTLB space for %zu bytes at "
++ "device %s\n", size, dev ? dev_name(dev) : "?");
++
++ if (size > xen_io_tlb_overflow && do_panic) {
++ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
++ panic("Xen-DMA: Memory would be corrupted\n");
++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
++ panic("Xen-DMA: Random memory would be DMAed\n");
++ }
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode. The
++ * physical address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ phys_addr_t phys = page_to_phys(page) + offset;
++ dma_addr_t dev_addr = xen_swiotlb_phys_to_bus(dev, phys);
++ void *map;
++
++ BUG_ON(dir == DMA_NONE);
++ /*
++ * If the address happens to be in the device's DMA window,
++ * we can safely return the device addr and not worry about bounce
++ * buffering it.
++ */
++ if (!address_needs_mapping(dev, dev_addr, size) &&
++ !range_needs_mapping(phys, size))
++ return dev_addr;
++
++ /*
++ * Oh well, have to allocate and map a bounce buffer.
++ */
++ map = map_single(dev, phys, size, dir);
++ if (!map) {
++ xen_swiotlb_full(dev, size, dir, 1);
++ map = xen_io_tlb_overflow_buffer;
++ }
++
++ dev_addr = xen_swiotlb_virt_to_bus(dev, map);
++
++ /*
++ * Ensure that the address returned is DMA'ble
++ */
++ if (address_needs_mapping(dev, dev_addr, size))
++ panic("xen_swiotlb_map_single: bounce buffer is not DMA'ble");
++
++ return dev_addr;
++}
++
++/*
++ * Unmap a single streaming mode DMA translation. The dma_addr and size must
++ * match what was provided for in a previous xen_swiotlb_map_page call. All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ char *dma_addr = xen_swiotlb_bus_to_virt(hwdev, dev_addr);
++
++ BUG_ON(dir == DMA_NONE);
++
++ if (is_xen_swiotlb_buffer(dma_addr)) {
++ do_unmap_single(hwdev, dma_addr, size, dir);
++ return;
++ }
++
++ if (dir != DMA_FROM_DEVICE)
++ return;
++
++ dma_mark_clean(dma_addr, size);
++}
++
++void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ unmap_single(hwdev, dev_addr, size, dir);
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the dma mapping, you must
++ * call this function before doing so. At the next point you give the dma
++ * address back to the card, you must first perform a
++ * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++static void
++xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir, int target)
++{
++ char *dma_addr = xen_swiotlb_bus_to_virt(hwdev, dev_addr);
++
++ BUG_ON(dir == DMA_NONE);
++
++ if (is_xen_swiotlb_buffer(dma_addr)) {
++ sync_single(hwdev, dma_addr, size, dir, target);
++ return;
++ }
++
++ if (dir != DMA_FROM_DEVICE)
++ return;
+
-+ return 0;
++ dma_mark_clean(dma_addr, size);
+}
+
-+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
-+ unsigned long addr,
-+ unsigned long mfn, int nr,
-+ pgprot_t prot, unsigned domid)
++void
++xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir)
+{
-+ struct remap_data rmd;
-+ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
-+ int batch;
-+ unsigned long range;
-+ int err = 0;
++ xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
++}
+
-+ prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
++void
++xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir)
++{
++ xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
++}
+
-+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
++/*
++ * Same as above, but for a sub-range of the mapping.
++ */
++static void
++xen_swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size,
++ int dir, int target)
++{
++ xen_swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
++}
+
-+ rmd.mfn = mfn;
-+ rmd.prot = prot;
++void
++xen_swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir)
++{
++ xen_swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
++ SYNC_FOR_CPU);
++}
+
-+ while (nr) {
-+ batch = min(REMAP_BATCH_SIZE, nr);
-+ range = (unsigned long)batch << PAGE_SHIFT;
++void
++xen_swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir)
++{
++ xen_swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
++ SYNC_FOR_DEVICE);
++}
+
-+ rmd.mmu_update = mmu_update;
-+ err = apply_to_page_range(vma->vm_mm, addr, range,
-+ remap_area_mfn_pte_fn, &rmd);
-+ if (err)
-+ goto out;
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above xen_swiotlb_map_page
++ * interface. Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ * DMA address/length pairs than there are SG table elements.
++ * (for example via virtual mapping capabilities)
++ * The routine returns the number of addr/length pairs actually
++ * used, at most nents.
++ *
++ * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
++ * same here.
++ */
++int
++xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
+
-+ err = -EFAULT;
-+ if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
-+ goto out;
++ BUG_ON(dir == DMA_NONE);
+
-+ nr -= batch;
-+ addr += range;
++ for_each_sg(sgl, sg, nelems, i) {
++ phys_addr_t paddr = sg_phys(sg);
++ dma_addr_t dev_addr = xen_swiotlb_phys_to_bus(hwdev, paddr);
++
++ if (range_needs_mapping(paddr, sg->length) ||
++ address_needs_mapping(hwdev, dev_addr, sg->length)) {
++ void *map = map_single(hwdev, sg_phys(sg),
++ sg->length, dir);
++ if (!map) {
++ /* Don't panic here, we expect map_sg users
++ to do proper error handling. */
++ xen_swiotlb_full(hwdev, sg->length, dir, 0);
++ xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
++ attrs);
++ sgl[0].dma_length = 0;
++ return 0;
++ }
++ sg->dma_address = xen_swiotlb_virt_to_bus(hwdev, map);
++ } else
++ sg->dma_address = dev_addr;
++ sg->dma_length = sg->length;
+ }
++ return nelems;
++}
+
-+ err = 0;
-+out:
++int
++xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
++ int dir)
++{
++ return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
++}
+
-+ flush_tlb_all();
++/*
++ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
++ * concerning calls here are the same as for xen_swiotlb_unmap_page() above.
++ */
++void
++xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
++ int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
++ BUG_ON(dir == DMA_NONE);
+
- #ifdef CONFIG_XEN_DEBUG_FS
-
- static struct dentry *d_mmu_debug;
-diff --git a/arch/x86/xen/pci-swiotlb.c b/arch/x86/xen/pci-swiotlb.c
-new file mode 100644
-index 0000000..3b96c87
---- /dev/null
-+++ b/arch/x86/xen/pci-swiotlb.c
-@@ -0,0 +1,53 @@
-+#include <linux/bootmem.h>
-+#include <linux/gfp.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/swiotlb.h>
++ for_each_sg(sgl, sg, nelems, i)
++ unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
+
-+#include <xen/swiotlb.h>
-+#include <asm/xen/hypervisor.h>
++}
+
-+/*
-+ * This file defines overrides for weak functions with default
-+ * implementations in lib/swiotlb.c.
-+ */
++void
++xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
++ int dir)
++{
++ return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
++}
+
-+void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as xen_swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++static void
++xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
++ int nelems, int dir, int target)
+{
-+ void *ret = alloc_bootmem_low_pages(size);
++ struct scatterlist *sg;
++ int i;
+
-+ if (ret && xen_pv_domain())
-+ xen_swiotlb_fixup(ret, size, nslabs);
++ for_each_sg(sgl, sg, nelems, i)
++ xen_swiotlb_sync_single(hwdev, sg->dma_address,
++ sg->dma_length, dir, target);
++}
+
-+ return ret;
++void
++xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir)
++{
++ xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+}
+
-+void *swiotlb_alloc(unsigned order, unsigned long nslabs)
++void
++xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir)
+{
-+ /* Never called on x86. Warn, just in case it ever is. */
-+ WARN_ON(1);
-+ return NULL;
++ xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+}
+
-+dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
++int
++xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
+{
-+ if (xen_pv_domain())
-+ return xen_phys_to_bus(paddr);
++ return (dma_addr == xen_swiotlb_virt_to_bus(hwdev, xen_io_tlb_overflow_buffer));
++}
+
-+ return paddr;
++/*
++ * Return whether the given device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
++{
++ return xen_swiotlb_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
+}
+
-+phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
++static int max_dma_bits = 32;
++
++void
++xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
+{
-+ if (xen_pv_domain())
-+ return xen_bus_to_phys(baddr);
++ int i, rc;
++ int dma_bits;
++
++ printk(KERN_DEBUG "xen_swiotlb_fixup: buf=%p size=%zu\n",
++ buf, size);
++
++ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
++
++ i = 0;
++ do {
++ int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
++
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)buf + (i << IO_TLB_SHIFT),
++ get_order(slabs << IO_TLB_SHIFT),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc)
++ panic(KERN_ERR "xen_create_contiguous_region failed\n");
+
-+ return baddr;
++ i += slabs;
++ } while(i < nslabs);
+}
++static struct dma_map_ops xen_swiotlb_dma_ops = {
++ .mapping_error = xen_swiotlb_dma_mapping_error,
++ .alloc_coherent = xen_swiotlb_alloc_coherent,
++ .free_coherent = xen_swiotlb_free_coherent,
++ .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = xen_swiotlb_sync_single_for_device,
++ .sync_single_range_for_cpu = xen_swiotlb_sync_single_range_for_cpu,
++ .sync_single_range_for_device = xen_swiotlb_sync_single_range_for_device,
++ .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
++ .map_sg = xen_swiotlb_map_sg_attrs,
++ .unmap_sg = xen_swiotlb_unmap_sg_attrs,
++ .map_page = xen_swiotlb_map_page,
++ .unmap_page = xen_swiotlb_unmap_page,
++ .dma_supported = NULL,
++};
+
-+int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
++int __init xen_swiotlb_init(void)
+{
-+ if (xen_pv_domain())
-+ return xen_range_needs_mapping(paddr, size);
-+
-+ return 0;
++ if (xen_pv_domain() && xen_initial_domain()) {
++ iommu_detected = 1;
++ return 0;
++ }
++ return -ENODEV;
++}
++void __init xen_swiotlb_init_alloc(void)
++{
++ if (xen_pv_domain() && xen_initial_domain()) {
++ printk(KERN_INFO "PCI-DMA: Using Xen software bounce buffering for IO (Xen-SWIOTLB)\n");
++ xen_swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
++ dma_ops = &xen_swiotlb_dma_ops;
++ }
+}
diff --git a/arch/x86/xen/pci.c b/arch/x86/xen/pci.c
new file mode 100644
-index 0000000..60bab67
+index 0000000..44d91ad
--- /dev/null
+++ b/arch/x86/xen/pci.c
-@@ -0,0 +1,112 @@
+@@ -0,0 +1,111 @@
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
@@ -4798,13 +5760,12 @@ index 0000000..60bab67
+#ifdef CONFIG_PCI_MSI
+int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
-+ unsigned int irq;
-+ int ret;
++ int irq, ret;
+ struct msi_desc *msidesc;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_create_msi_irq(dev, msidesc, type);
-+ if (irq == 0)
++ if (irq < 0)
+ return -1;
+
+ ret = set_irq_msi(irq, msidesc);
@@ -5618,10 +6579,10 @@ index 1ebd6b4..3590082 100644
obj-$(CONFIG_PCI_IOV) += iov.o
diff --git a/drivers/pci/xen-iommu.c b/drivers/pci/xen-iommu.c
new file mode 100644
-index 0000000..b1a7d93
+index 0000000..9ba63b1
--- /dev/null
+++ b/drivers/pci/xen-iommu.c
-@@ -0,0 +1,332 @@
+@@ -0,0 +1,271 @@
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
@@ -5637,7 +6598,6 @@ index 0000000..b1a7d93
+#include <xen/grant_table.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
-+#include <xen/swiotlb.h>
+
+#include <asm/iommu.h>
+#include <asm/swiotlb.h>
@@ -5661,40 +6621,6 @@ index 0000000..b1a7d93
+} while (0)
+
+
-+static int max_dma_bits = 32;
-+
-+void xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
-+{
-+ int i, rc;
-+ int dma_bits;
-+
-+ printk(KERN_DEBUG "xen_swiotlb_fixup: buf=%p size=%zu\n",
-+ buf, size);
-+
-+ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
-+
-+ i = 0;
-+ do {
-+ int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-+
-+ do {
-+ rc = xen_create_contiguous_region(
-+ (unsigned long)buf + (i << IO_TLB_SHIFT),
-+ get_order(slabs << IO_TLB_SHIFT),
-+ dma_bits);
-+ } while (rc && dma_bits++ < max_dma_bits);
-+ if (rc)
-+ panic(KERN_ERR "xen_create_contiguous_region failed\n");
-+
-+ i += slabs;
-+ } while(i < nslabs);
-+}
-+
-+int xen_wants_swiotlb(void)
-+{
-+ return xen_initial_domain();
-+}
-+
+dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+{
+ return phys_to_machine(XPADDR(paddr)).maddr;
@@ -5914,44 +6840,18 @@ index 0000000..b1a7d93
+ .is_phys = 0,
+};
+
-+static struct dma_map_ops xen_swiotlb_dma_ops = {
-+ .dma_supported = swiotlb_dma_supported,
-+
-+ .alloc_coherent = xen_alloc_coherent,
-+ .free_coherent = xen_free_coherent,
-+
-+ .map_page = swiotlb_map_page,
-+ .unmap_page = swiotlb_unmap_page,
-+
-+ .map_sg = swiotlb_map_sg_attrs,
-+ .unmap_sg = swiotlb_unmap_sg_attrs,
-+
-+ .mapping_error = swiotlb_dma_mapping_error,
-+
-+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-+ .sync_single_for_device = swiotlb_sync_single_for_device,
-+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
-+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
-+
-+ .is_phys = 0,
-+};
-+
+void __init xen_iommu_init(void)
+{
+ if (!xen_pv_domain())
+ return;
+
++ if (xen_initial_domain()) /* For dom0, the IOMMU is handled by arch/x86/xen/pci-swiotlb.c. */
++ return;
++
+ printk(KERN_INFO "Xen: Initializing Xen DMA ops\n");
+
+ force_iommu = 0;
+ dma_ops = &xen_dma_ops;
-+
-+ if (swiotlb) {
-+ printk(KERN_INFO "Xen: Enabling DMA fallback to swiotlb\n");
-+ dma_ops = &xen_swiotlb_dma_ops;
-+ }
+}
+
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
@@ -8531,7 +9431,7 @@ index 0000000..650f4b3
+ (void)xenbus_register_backend(&blkback);
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index abad71b..96aebd4 100644
+index abad71b..68c287c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -16,7 +16,7 @@
@@ -8568,11 +9468,13 @@ index abad71b..96aebd4 100644
/*
* This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables.
-@@ -83,20 +90,25 @@ struct irq_info
+@@ -82,21 +89,26 @@ struct irq_info
+ unsigned short virq;
enum ipi_vector ipi;
struct {
- unsigned short gsi;
+- unsigned short gsi;
- unsigned short vector;
++ unsigned short nr;
+ unsigned char vector;
+ unsigned char flags;
} pirq;
@@ -8608,6 +9510,28 @@ index abad71b..96aebd4 100644
/* Constructor for packed IRQ information. */
static struct irq_info mk_unbound_info(void)
+@@ -132,10 +145,10 @@ static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+ }
+
+ static struct irq_info mk_pirq_info(unsigned short evtchn,
+- unsigned short gsi, unsigned short vector)
++ unsigned short pirq, unsigned short vector)
+ {
+ return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
+- .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
++ .cpu = 0, .u.pirq = { .nr = pirq, .vector = vector } };
+ }
+
+ /*
+@@ -184,7 +197,7 @@ static unsigned gsi_from_irq(unsigned irq)
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_PIRQ);
+
+- return info->u.pirq.gsi;
++ return info->u.pirq.nr;
+ }
+
+ static unsigned vector_from_irq(unsigned irq)
@@ -218,6 +231,15 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
return ret;
}
@@ -8650,7 +9574,7 @@ index abad71b..96aebd4 100644
if (irq_info[irq].type == IRQT_UNBOUND)
break;
-@@ -350,6 +384,299 @@ static int find_unbound_irq(void)
+@@ -350,6 +384,300 @@ static int find_unbound_irq(void)
return irq;
}
@@ -8662,7 +9586,8 @@ index abad71b..96aebd4 100644
+
+static void pirq_unmask_notify(int irq)
+{
-+ struct physdev_eoi eoi = { .irq = irq };
++ struct irq_info *info = info_for_irq(irq);
++ struct physdev_eoi eoi = { .irq = info->u.pirq.nr };
+
+ if (unlikely(pirq_needs_eoi(irq))) {
+ int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -8677,7 +9602,7 @@ index abad71b..96aebd4 100644
+
+ BUG_ON(info->type != IRQT_PIRQ);
+
-+ irq_status.irq = irq;
++ irq_status.irq = info->u.pirq.nr;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
+ irq_status.flags = 0;
+
@@ -8705,7 +9630,7 @@ index abad71b..96aebd4 100644
+ if (VALID_EVTCHN(evtchn))
+ goto out;
+
-+ bind_pirq.pirq = irq;
++ bind_pirq.pirq = info->u.pirq.nr;
+ /* NB. We are happy to share unless we are probing. */
+ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
@@ -8836,7 +9761,7 @@ index abad71b..96aebd4 100644
+ set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
+ handle_level_irq, name);
+
-+ irq_op.irq = irq;
++ irq_op.irq = gsi;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
+ dynamic_irq_cleanup(irq);
+ irq = -ENOSPC;
@@ -8854,6 +9779,7 @@ index abad71b..96aebd4 100644
+{
+ struct irq_desc *desc;
+ struct physdev_unmap_pirq unmap_irq;
++ struct irq_info *info = info_for_irq(irq);
+ int rc = -ENOENT;
+
+ spin_lock(&irq_mapping_update_lock);
@@ -8862,11 +9788,11 @@ index abad71b..96aebd4 100644
+ if (!desc)
+ goto out;
+
-+ unmap_irq.pirq = irq;
++ unmap_irq.pirq = info->u.pirq.nr;
+ unmap_irq.domid = DOMID_SELF;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
+ if (rc) {
-+ printk(KERN_WARNING "unmap irq failed %x\n", rc);
++ printk(KERN_WARNING "unmap irq failed %d\n", rc);
+ goto out;
+ }
+
@@ -8892,6 +9818,7 @@ index abad71b..96aebd4 100644
+ map_irq.domid = domid;
+ map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.index = -1;
++ map_irq.pirq = -1;
+ map_irq.bus = dev->bus->number;
+ map_irq.devfn = dev->devfn;
+
@@ -8913,12 +9840,10 @@ index abad71b..96aebd4 100644
+ if (irq == -1)
+ goto out;
+
-+ map_irq.pirq = irq;
-+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (rc) {
+
-+ printk(KERN_WARNING "xen map irq failed %x\n", rc);
++ printk(KERN_WARNING "xen map irq failed %d\n", rc);
+
+ dynamic_irq_cleanup(irq);
+
@@ -8926,7 +9851,7 @@ index abad71b..96aebd4 100644
+ goto out;
+ }
+
-+ irq_info[irq] = mk_pirq_info(0, -1, map_irq.index);
++ irq_info[irq] = mk_pirq_info(0, map_irq.pirq, map_irq.index);
+ set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
+ handle_level_irq,
+ (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
@@ -8950,7 +9875,7 @@ index abad71b..96aebd4 100644
int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
-@@ -409,8 +736,23 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+@@ -409,8 +737,23 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
return irq;
}
@@ -8975,7 +9900,7 @@ index abad71b..96aebd4 100644
{
struct evtchn_bind_virq bind_virq;
int evtchn, irq;
-@@ -501,6 +843,29 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
+@@ -501,6 +844,29 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
@@ -9005,7 +9930,7 @@ index abad71b..96aebd4 100644
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
-@@ -532,6 +897,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+@@ -532,6 +898,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
if (irq < 0)
return irq;
@@ -9013,7 +9938,7 @@ index abad71b..96aebd4 100644
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
-@@ -924,13 +1290,38 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
+@@ -924,13 +1291,38 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.retrigger = retrigger_dynirq,
};
@@ -9053,7 +9978,7 @@ index abad71b..96aebd4 100644
init_evtchn_cpu_bindings();
-@@ -939,4 +1330,6 @@ void __init xen_init_IRQ(void)
+@@ -939,4 +1331,6 @@ void __init xen_init_IRQ(void)
mask_evtchn(i);
irq_ctx_init(smp_processor_id());
@@ -10063,10 +10988,10 @@ index 0000000..21c1f95
+}
diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
new file mode 100644
-index 0000000..80b424f
+index 0000000..7c0f05b
--- /dev/null
+++ b/drivers/xen/netback/netback.c
-@@ -0,0 +1,1513 @@
+@@ -0,0 +1,1604 @@
+/******************************************************************************
+ * drivers/xen/netback/netback.c
+ *
@@ -10254,6 +11179,82 @@ index 0000000..80b424f
+ tasklet_schedule(&net_tx_tasklet);
+}
+
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
++{
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
++
++ skb_reserve(nskb, NET_SKB_PAD + NET_IP_ALIGN);
++ headlen = skb_end_pointer(nskb) - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
++
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
++
++ offset = headlen;
++ len = skb->len - headlen;
++
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
++
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
++
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
++ }
++
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
++
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
++
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
++
++ offset += copy;
++ len -= copy;
++ }
++
++ offset = nskb->data - skb->data;
++
++ nskb->transport_header = skb->transport_header + offset;
++ nskb->network_header = skb->network_header + offset;
++ nskb->mac_header = skb->mac_header + offset;
++
++ return nskb;
++
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
++}
++
+static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
+{
+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
@@ -10287,6 +11288,21 @@ index 0000000..80b424f
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
+ goto drop;
+
++ /*
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if ( unlikely(nskb == NULL) )
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ dev_kfree_skb(skb);
++ skb = nskb;
++ }
++
+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
+ !!skb_shinfo(skb)->gso_size;
+ netif_get(netif);
@@ -15662,28 +16678,112 @@ index 0000000..b42cdfd
+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/xen/swiotlb.h b/include/xen/swiotlb.h
new file mode 100644
-index 0000000..75d1da1
+index 0000000..670233c
--- /dev/null
+++ b/include/xen/swiotlb.h
-@@ -0,0 +1,18 @@
-+#ifndef _XEN_SWIOTLB_H
-+#define _XEN_SWIOTLB_H
-+
-+extern void xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs);
-+extern phys_addr_t xen_bus_to_phys(dma_addr_t daddr);
-+extern dma_addr_t xen_phys_to_bus(phys_addr_t paddr);
-+extern int xen_range_needs_mapping(phys_addr_t phys, size_t size);
+@@ -0,0 +1,102 @@
++#ifndef __LINUX_XEN_SWIOTLB_H
++#define __LINUX_XEN_SWIOTLB_H
+
-+#ifdef CONFIG_PCI_XEN
-+extern int xen_wants_swiotlb(void);
-+#else
-+static inline int xen_wants_swiotlb(void)
-+{
-+ return 0;
-+}
-+#endif
++#include <linux/types.h>
++
++struct device;
++struct dma_attrs;
++struct scatterlist;
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2. What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE 128
++
++
++/*
++ * log of the size of each IO TLB slab. The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++extern void *xen_swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
++extern void *xen_swiotlb_alloc(unsigned order, unsigned long nslabs);
++
++extern dma_addr_t xen_swiotlb_phys_to_bus(struct device *hwdev,
++ phys_addr_t address);
++extern phys_addr_t xen_swiotlb_bus_to_phys(struct device *hwdev,
++ dma_addr_t address);
++
++extern int xen_swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
++
++extern void
++*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags);
++
++extern void
++xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++
++extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs);
++extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs);
++
++extern int
++xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ int direction);
++
++extern void
++xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ int direction);
++
++extern int
++xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir, struct dma_attrs *attrs);
++
++extern void
++xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
++ int nelems, enum dma_data_direction dir,
++ struct dma_attrs *attrs);
++
++extern void
++xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir);
++
++extern void
++xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir);
++
++extern void
++xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir);
++
++extern void
++xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir);
++
++extern void
++xen_swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir);
++
++extern void
++xen_swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir);
++
++extern int
++xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
++
++extern int
++xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
++
++extern void
++xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs);
+
-+#endif /* _XEN_SWIOTLB_H */
++#endif /* __LINUX_XEN_SWIOTLB_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 883a21b..9769738 100644
--- a/include/xen/xen-ops.h
xen.pvops.post.patch:
b/arch/x86/include/asm/paravirt_types.h | 3 +++
b/arch/x86/kernel/process_32.c | 2 ++
b/arch/x86/pci/common.c | 16 ++++++++++++++++
b/include/linux/swiotlb.h | 1 +
drivers/pci/pci.h | 2 ++
5 files changed, 24 insertions(+)
Index: xen.pvops.post.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/Attic/xen.pvops.post.patch,v
retrieving revision 1.1.2.25
retrieving revision 1.1.2.26
diff -u -p -r1.1.2.25 -r1.1.2.26
--- xen.pvops.post.patch 24 Aug 2009 21:34:54 -0000 1.1.2.25
+++ xen.pvops.post.patch 5 Sep 2009 19:00:27 -0000 1.1.2.26
@@ -68,19 +68,15 @@ index 2202b62..f371fe8 100644
}
int __init pcibios_init(void)
-diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
-index 6af96ee..1e66b18 100644
---- a/arch/x86/kernel/pci-swiotlb.c
-+++ b/arch/x86/kernel/pci-swiotlb.c
-@@ -71,9 +71,8 @@ void __init pci_swiotlb_init(void)
- {
- /* don't initialize swiotlb if iommu=off (no_iommu=1) */
- #ifdef CONFIG_X86_64
-- if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
-- iommu_pass_through)
-- swiotlb = 1;
-+ if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
-+ swiotlb = 1;
- #endif
- if (xen_wants_swiotlb())
- swiotlb = 1;
+diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
+index cb1a663..f4ebffb 100644
+--- a/include/linux/swiotlb.h
++++ b/include/linux/swiotlb.h
+@@ -2,6 +2,7 @@
+ #define __LINUX_SWIOTLB_H
+
+ #include <linux/types.h>
++#include <linux/dma-mapping.h>
+
+ struct device;
+ struct dma_attrs;
xen.pvops.pre.patch:
b/arch/x86/include/asm/paravirt.h | 3 ---
b/arch/x86/kernel/process_32.c | 3 +--
b/arch/x86/pci/common.c | 16 ----------------
drivers/pci/pci.h | 2 --
4 files changed, 1 insertion(+), 23 deletions(-)
Index: xen.pvops.pre.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/Attic/xen.pvops.pre.patch,v
retrieving revision 1.1.2.16
retrieving revision 1.1.2.17
diff -u -p -r1.1.2.16 -r1.1.2.17
--- xen.pvops.pre.patch 10 Aug 2009 21:22:20 -0000 1.1.2.16
+++ xen.pvops.pre.patch 5 Sep 2009 19:00:27 -0000 1.1.2.17
@@ -2,7 +2,6 @@ temporarily revert various Fedora change
Affected patches;
linux-2.6-defaults-pci_no_msi.patch - drivers/pci/pci.h
linux-2.6-execshield.patch - arch/x86/include/asm/paravirt.h arch/x86/kernel/process_32.c
-linux-2.6-intel-iommu-updates.patch - arch/x86/kernel/pci-swiotlb.c
linux-2.6-pci-cacheline-sizing.patch - arch/x86/pci/common.c
--- a/drivers/pci/pci.h 2009-04-24 20:46:50.000000000 +0100
@@ -74,19 +73,3 @@ index 2202b62..f371fe8 100644
pcibios_resource_survey();
if (pci_bf_sort >= pci_force_bf)
-diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
-index 6af96ee..1e66b18 100644
---- a/arch/x86/kernel/pci-swiotlb.c
-+++ b/arch/x86/kernel/pci-swiotlb.c
-@@ -71,8 +71,9 @@ void __init pci_swiotlb_init(void)
- {
- /* don't initialize swiotlb if iommu=off (no_iommu=1) */
- #ifdef CONFIG_X86_64
-- if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
-- swiotlb = 1;
-+ if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
-+ iommu_pass_through)
-+ swiotlb = 1;
- #endif
- if (swiotlb_force)
- swiotlb = 1;
More information about the fedora-extras-commits
mailing list