rpms/kernel/devel .cvsignore, 1.1014.2.13, 1.1014.2.14 TODO, 1.54.6.6, 1.54.6.7 config-generic, 1.238.6.21, 1.238.6.22 config-powerpc-generic, 1.33.6.5, 1.33.6.6 kernel.spec, 1.1294.2.28, 1.1294.2.29 linux-2.6-utrace.patch, 1.107.6.3, 1.107.6.4 sources, 1.976.2.14, 1.976.2.15 upstream, 1.888.2.13, 1.888.2.14 xen.pvops.patch, 1.1.2.22, 1.1.2.23 xen.pvops.post.patch, 1.1.2.15, 1.1.2.16 xen.pvops.pre.patch, 1.1.2.12, 1.1.2.13 cpufreq-add-atom-to-p4-clockmod.patch, 1.1.2.2, NONE patch-2.6.30-rc6-git6.bz2.sign, 1.1.2.2, NONE patch-2.6.30-rc6.bz2.sign, 1.1.2.2, NONE rds-only-on-64-bit-or-x86.patch, 1.1.10.2, NONE

Michael Young myoung at fedoraproject.org
Thu Jun 4 19:35:54 UTC 2009


Author: myoung

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs1.fedora.phx.redhat.com:/tmp/cvs-serv22181

Modified Files:
      Tag: private-myoung-dom0-branch
	.cvsignore TODO config-generic config-powerpc-generic 
	kernel.spec linux-2.6-utrace.patch sources upstream 
	xen.pvops.patch xen.pvops.post.patch xen.pvops.pre.patch 
Removed Files:
      Tag: private-myoung-dom0-branch
	cpufreq-add-atom-to-p4-clockmod.patch 
	patch-2.6.30-rc6-git6.bz2.sign patch-2.6.30-rc6.bz2.sign 
	rds-only-on-64-bit-or-x86.patch 
Log Message:
Update pvops kernel to rc8



Index: .cvsignore
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/.cvsignore,v
retrieving revision 1.1014.2.13
retrieving revision 1.1014.2.14
diff -u -p -r1.1014.2.13 -r1.1014.2.14
--- .cvsignore	21 May 2009 22:11:56 -0000	1.1014.2.13
+++ .cvsignore	4 Jun 2009 19:34:56 -0000	1.1014.2.14
@@ -5,5 +5,4 @@ kernel-2.6.*.config
 temp-*
 kernel-2.6.29
 linux-2.6.29.tar.bz2
-patch-2.6.30-rc6.bz2
-patch-2.6.30-rc6-git6.bz2
+patch-2.6.30-rc8.bz2


Index: TODO
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/TODO,v
retrieving revision 1.54.6.6
retrieving revision 1.54.6.7
diff -u -p -r1.54.6.6 -r1.54.6.7
--- TODO	24 Apr 2009 22:27:03 -0000	1.54.6.6
+++ TODO	4 Jun 2009 19:34:57 -0000	1.54.6.7
@@ -20,11 +20,6 @@
 * linux-2.6-debug-vm-would-have-oomkilled.patch
 	Push for 2.6.29
 
-* linux-2.6-cdrom-door-status.patch
-	Getting some testing here before I go back to upstream with more
-	data.  See the thread at:
-	http://marc.info/?l=linux-scsi&m=121572509510899&w=2
-
 * linux-2.6-compile-fixes.patch
 * linux-2.6-hotfixes.patch
 	Empty


Index: config-generic
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/config-generic,v
retrieving revision 1.238.6.21
retrieving revision 1.238.6.22
diff -u -p -r1.238.6.21 -r1.238.6.22
--- config-generic	22 May 2009 20:02:04 -0000	1.238.6.21
+++ config-generic	4 Jun 2009 19:34:57 -0000	1.238.6.22
@@ -1450,7 +1450,7 @@ CONFIG_PCMCIA_HERMES=m
 CONFIG_PCMCIA_SPECTRUM=m
 CONFIG_PCMCIA_ATMEL=m
 CONFIG_PCMCIA_WL3501=m
-CONFIG_RT2X00=y
+CONFIG_RT2X00=m
 CONFIG_RT2X00_LIB_DEBUGFS=y
 # CONFIG_RT2X00_DEBUG is not set
 CONFIG_RT2400PCI=m
@@ -3940,6 +3940,7 @@ CONFIG_SCSI_CXGB3_ISCSI=m
 CONFIG_LIBFC=m
 CONFIG_LIBFCOE=m
 CONFIG_FCOE=m
+CONFIG_FCOE_FNIC=m
 # CONFIG_SCSI_LPFC_DEBUG_FS is not set
 
 CONFIG_NOP_USB_XCEIV=m


Index: config-powerpc-generic
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/config-powerpc-generic,v
retrieving revision 1.33.6.5
retrieving revision 1.33.6.6
diff -u -p -r1.33.6.5 -r1.33.6.6
--- config-powerpc-generic	24 Apr 2009 22:27:04 -0000	1.33.6.5
+++ config-powerpc-generic	4 Jun 2009 19:34:57 -0000	1.33.6.6
@@ -318,3 +318,5 @@ CONFIG_PPC_OF_BOOT_TRAMPOLINE=y
 CONFIG_DTL=y
 
 CONFIG_MMC_SDHCI_OF=m
+
+# CONFIG_CONSISTENT_SIZE_BOOL is not set


Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.1294.2.28
retrieving revision 1.1294.2.29
diff -u -p -r1.1294.2.28 -r1.1294.2.29
--- kernel.spec	22 May 2009 20:02:04 -0000	1.1294.2.28
+++ kernel.spec	4 Jun 2009 19:34:57 -0000	1.1294.2.29
@@ -57,9 +57,9 @@ Summary: The Linux kernel
 # The next upstream release sublevel (base_sublevel+1)
 %define upstream_sublevel %(echo $((%{base_sublevel} + 1)))
 # The rc snapshot level
-%define rcrev 6
+%define rcrev 8
 # The git snapshot level
-%define gitrev 6
+%define gitrev 0
 # Set rpm version accordingly
 %define rpmversion 2.6.%{upstream_sublevel}
 %endif
@@ -593,8 +593,6 @@ Patch22: linux-2.6-utrace.patch
 
 Patch41: linux-2.6-sysrq-c.patch
 
-Patch50: rds-only-on-64-bit-or-x86.patch
-
 Patch141: linux-2.6-ps3-storage-alias.patch
 Patch143: linux-2.6-g5-therm-shutdown.patch
 Patch144: linux-2.6-vio-modalias.patch
@@ -668,8 +666,6 @@ Patch2903: linux-2.6-revert-dvb-net-kabi
 # fs fixes
 Patch3000: linux-2.6-btrfs-experimental-branch.patch
 
-Patch9002: cpufreq-add-atom-to-p4-clockmod.patch
-
 #snmp fixes
 Patch10000: linux-2.6-missing-rfc2465-stats.patch
 
@@ -732,11 +728,10 @@ Requires: gzip
 Kernel-bootwrapper contains the wrapper code which makes bootable "zImage"
 files combining both kernel and initial ramdisk.
 
-%package debuginfo-common
+%package debuginfo-common-%{_target_cpu}
 Summary: Kernel source files used by %{name}-debuginfo packages
 Group: Development/Debug
-Provides: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release}
-%description debuginfo-common
+%description debuginfo-common-%{_target_cpu}
 This package is required by %{name}-debuginfo subpackages.
 It provides the kernel source files common to all builds.
 
@@ -1084,8 +1079,6 @@ ApplyPatch linux-2.6-utrace.patch
 # enable sysrq-c on all kernels, not only kexec
 ApplyPatch linux-2.6-sysrq-c.patch
 
-ApplyPatch rds-only-on-64-bit-or-x86.patch
-
 ApplyPatch linux-2.6-missing-rfc2465-stats.patch
 
 # Architecture patches
@@ -1228,8 +1221,6 @@ ApplyPatch linux-2.6-silence-acpi-blackl
 #ApplyPatch linux-2.6-v4l-dvb-experimental.patch
 #ApplyPatch linux-2.6-revert-dvb-net-kabi-change.patch
 
-#ApplyPatch cpufreq-add-atom-to-p4-clockmod.patch
-
 ApplyPatch xen.pvops.pre.patch
 ApplyPatch xen.pvops.patch
 ApplyPatch xen.pvops.post.patch
@@ -1558,7 +1549,7 @@ find Documentation -type d | xargs chmod
 %if %{with_debuginfo}
 %ifnarch noarch
 %global __debug_package 1
-%files -f debugfiles.list debuginfo-common
+%files -f debugfiles.list debuginfo-common-%{_target_cpu}
 %defattr(-,root,root)
 %endif
 %endif
@@ -1826,6 +1817,46 @@ fi
 #	                ||----w |
 #	                ||     ||
 %changelog
+* Thu Jun 04 2009 Michael Young <m.a.young at durham.ac.uk>
+- pvops update
+
+* Wed Jun 03 2009 Kyle McMartin <kyle at redhat.com>
+- Linux 2.6.30-rc8
+
+* Tue Jun  2 2009 Roland McGrath <roland at redhat.com>
+- utrace update (fixes stap PR10185)
+
+* Tue Jun 02 2009 Dave Jones <davej at redhat.com>
+- For reasons unknown, RT2X00 driver was being built-in.
+  Make it modular.
+
+* Tue Jun 02 2009 Dave Jones <davej at redhat.com>
+- 2.6.30-rc7-git5
+
+* Sat May 30 2009 Dave Jones <davej at redhat.com>
+- 2.6.30-rc7-git4
+
+* Thu May 28 2009 Dave Jones <davej at redhat.com
+- 2.6.30-rc7-git3
+
+* Wed May 27 2009 Dave Jones <davej at redhat.com>
+- 2.6.30-rc7-git2
+
+* Tue May 26 2009 Dave Jones <davej at redhat.com>
+- Various cpufreq patches from git.
+
+* Tue May 26 2009 Dave Jones <davej at redhat.com
+- 2.6.30-rc7-git1
+
+* Tue May 26 2009 Dave Jones <davej at redhat.com>
+- 2.6.30-rc7-git1
+
+* Mon May 25 2009 Kyle McMartin <kyle at redhat.com>
+- rds-only-on-64-bit-or-x86.patch: drop patch, issue is fixed upstream.
+
+* Sat May 23 2009 Dave Jones <davej at redhat.com>
+- 2.6.30-rc7
+
 * Fri May 22 2009 Michael Young <m.a.young at durham.ac.uk>
 - update pvops patch to latest xen-tip/next version
 - pull in patch for !PERF_COUNTERS build failure

linux-2.6-utrace.patch:

Index: linux-2.6-utrace.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-utrace.patch,v
retrieving revision 1.107.6.3
retrieving revision 1.107.6.4
diff -u -p -r1.107.6.3 -r1.107.6.4
--- linux-2.6-utrace.patch	6 May 2009 21:48:45 -0000	1.107.6.3
+++ linux-2.6-utrace.patch	4 Jun 2009 19:34:57 -0000	1.107.6.4
@@ -1,5 +1,46 @@
+utrace core
+
+This adds the utrace facility, a new modular interface in the kernel for
+implementing user thread tracing and debugging.  This fits on top of the
+tracehook_* layer, so the new code is well-isolated.
+
+The new interface is in <linux/utrace.h> and the DocBook utrace book
+describes it.  It allows for multiple separate tracing engines to work in
+parallel without interfering with each other.  Higher-level tracing
+facilities can be implemented as loadable kernel modules using this layer.
+
+The new facility is made optional under CONFIG_UTRACE.
+When this is not enabled, no new code is added.
+It can only be enabled on machines that have all the
+prerequisites and select CONFIG_HAVE_ARCH_TRACEHOOK.
+
+In this initial version, utrace and ptrace do not play together at all.
+If ptrace is attached to a thread, the attach calls in the utrace kernel
+API return -EBUSY.  If utrace is attached to a thread, the PTRACE_ATTACH
+or PTRACE_TRACEME request will return EBUSY to userland.  The old ptrace
+code is otherwise unchanged and nothing using ptrace should be affected
+by this patch as long as utrace is not used at the same time.  In the
+future we can clean up the ptrace implementation and rework it to use
+the utrace API.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+---
+ Documentation/DocBook/Makefile    |    2 +-
+ Documentation/DocBook/utrace.tmpl |  590 ++++++++++
+ fs/proc/array.c                   |    3 +
+ include/linux/init_task.h         |    1 +
+ include/linux/sched.h             |    6 +
+ include/linux/tracehook.h         |   61 +-
+ include/linux/utrace.h            |  692 +++++++++++
+ include/linux/utrace_struct.h     |   58 +
+ init/Kconfig                      |    9 +
+ kernel/Makefile                   |    1 +
+ kernel/ptrace.c                   |   18 +-
+ kernel/utrace.c                   | 2357 +++++++++++++++++++++++++++++++++++++
+ 12 files changed, 3795 insertions(+), 3 deletions(-)
+
 diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
-index b1eb661..02851f6 100644
+index b1eb661..02851f6 100644  
 --- a/Documentation/DocBook/Makefile
 +++ b/Documentation/DocBook/Makefile
 @@ -9,7 +9,7 @@
@@ -13,10 +54,10 @@ index b1eb661..02851f6 100644
  	    mac80211.xml debugobjects.xml sh.xml regulator.xml \
 diff --git a/Documentation/DocBook/utrace.tmpl b/Documentation/DocBook/utrace.tmpl
 new file mode 100644
-index 0000000..b802c55
+index ...6cc58a1 100644  
 --- /dev/null
 +++ b/Documentation/DocBook/utrace.tmpl
-@@ -0,0 +1,571 @@
+@@ -0,0 +1,590 @@
 +<?xml version="1.0" encoding="UTF-8"?>
 +<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
 +"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
@@ -82,12 +123,12 @@ index 0000000..b802c55
 +  <para>
 +    Many engines can be attached to each thread.  When a thread has an
 +    event, each engine gets a callback if it has set the event flag for
-+    that event type.  Engines are called in the order they attached.
-+    Engines that attach after the event has occurred do not get callbacks
-+    for that event.  This includes any new engines just attached by an
-+    existing engine's callback function.  Once the sequence of callbacks
-+    for that one event has completed, such new engines are then eligible in
-+    the next sequence that starts when there is another event.
++    that event type.  For most events, engines are called in the order they
++    attached.  Engines that attach after the event has occurred do not get
++    callbacks for that event.  This includes any new engines just attached
++    by an existing engine's callback function.  Once the sequence of
++    callbacks for that one event has completed, such new engines are then
++    eligible in the next sequence that starts when there is another event.
 +  </para>
 +
 +  <para>
@@ -105,6 +146,25 @@ index 0000000..b802c55
 +    <function>utrace_control</function> to resume the thread.
 +  </para>
 +
++  <para>
++    The <constant>UTRACE_EVENT(SYSCALL_ENTRY)</constant> event is a special
++    case.  While other events happen in the kernel when it will return to
++    user mode soon, this event happens when entering the kernel before it
++    will proceed with the work requested from user mode.  Because of this
++    difference, the <function>report_syscall_entry</function> callback is
++    special in two ways.  For this event, engines are called in reverse of
++    the normal order (this includes the <function>report_quiesce</function>
++    call that precedes a <function>report_syscall_entry</function> call).
++    This preserves the semantics that the last engine to attach is called
++    "closest to user mode"--the engine that is first to see a thread's user
++    state when it enters the kernel is also the last to see that state when
++    the thread returns to user mode.  For the same reason, if these
++    callbacks use <constant>UTRACE_STOP</constant> (see the next section),
++    the thread stops immediately after callbacks rather than only when it's
++    ready to return to user mode; when allowed to resume, it will actually
++    attempt the system call indicated by the register values at that time.
++  </para>
++
 +  </sect1>
 +
 +  <sect1 id="safely"><title>Stopping Safely</title>
@@ -589,7 +649,7 @@ index 0000000..b802c55
 +
 +</book>
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 725a650..e299a63 100644
+index 725a650..e299a63 100644  
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
 @@ -82,6 +82,7 @@
@@ -600,7 +660,7 @@ index 725a650..e299a63 100644
  
  #include <asm/pgtable.h>
  #include <asm/processor.h>
-@@ -188,6 +189,8 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
+@@ -188,6 +189,8 @@ static inline void task_state(struct seq
  		cred->uid, cred->euid, cred->suid, cred->fsuid,
  		cred->gid, cred->egid, cred->sgid, cred->fsgid);
  
@@ -610,7 +670,7 @@ index 725a650..e299a63 100644
  	if (p->files)
  		fdt = files_fdtable(p->files);
 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
-index d87247d..0d0b55d 100644
+index d87247d..0d0b55d 100644  
 --- a/include/linux/init_task.h
 +++ b/include/linux/init_task.h
 @@ -170,6 +170,7 @@ extern struct cred init_cred;
@@ -622,7 +682,7 @@ index d87247d..0d0b55d 100644
  	INIT_TRACE_IRQFLAGS						\
  	INIT_LOCKDEP							\
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index b4c38bc..30db106 100644
+index b4c38bc..30db106 100644  
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -59,6 +59,7 @@ struct sched_param {
@@ -646,7 +706,7 @@ index b4c38bc..30db106 100644
     	u32 parent_exec_id;
     	u32 self_exec_id;
 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
-index 4ec4821..a7de30f 100644
+index 4ec4821..a7de30f 100644  
 --- a/include/linux/tracehook.h
 +++ b/include/linux/tracehook.h
 @@ -49,6 +49,7 @@
@@ -666,7 +726,7 @@ index 4ec4821..a7de30f 100644
  	return (task_ptrace(task) & PT_PTRACED) != 0;
  }
  
-@@ -111,6 +114,9 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
+@@ -111,6 +114,9 @@ static inline void ptrace_report_syscall
  static inline __must_check int tracehook_report_syscall_entry(
  	struct pt_regs *regs)
  {
@@ -676,7 +736,7 @@ index 4ec4821..a7de30f 100644
  	ptrace_report_syscall(regs);
  	return 0;
  }
-@@ -134,6 +140,8 @@ static inline __must_check int tracehook_report_syscall_entry(
+@@ -134,6 +140,8 @@ static inline __must_check int tracehook
   */
  static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
  {
@@ -685,7 +745,7 @@ index 4ec4821..a7de30f 100644
  	ptrace_report_syscall(regs);
  }
  
-@@ -194,6 +202,8 @@ static inline void tracehook_report_exec(struct linux_binfmt *fmt,
+@@ -194,6 +202,8 @@ static inline void tracehook_report_exec
  					 struct linux_binprm *bprm,
  					 struct pt_regs *regs)
  {
@@ -694,7 +754,7 @@ index 4ec4821..a7de30f 100644
  	if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
  	    unlikely(task_ptrace(current) & PT_PTRACED))
  		send_sig(SIGTRAP, current, 0);
-@@ -211,6 +221,8 @@ static inline void tracehook_report_exec(struct linux_binfmt *fmt,
+@@ -211,6 +221,8 @@ static inline void tracehook_report_exec
   */
  static inline void tracehook_report_exit(long *exit_code)
  {
@@ -703,7 +763,7 @@ index 4ec4821..a7de30f 100644
  	ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
  }
  
-@@ -254,6 +266,7 @@ static inline int tracehook_prepare_clone(unsigned clone_flags)
+@@ -254,6 +266,7 @@ static inline int tracehook_prepare_clon
  static inline void tracehook_finish_clone(struct task_struct *child,
  					  unsigned long clone_flags, int trace)
  {
@@ -711,7 +771,7 @@ index 4ec4821..a7de30f 100644
  	ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
  }
  
-@@ -280,6 +293,8 @@ static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
+@@ -280,6 +293,8 @@ static inline void tracehook_report_clon
  					  unsigned long clone_flags,
  					  pid_t pid, struct task_struct *child)
  {
@@ -720,7 +780,7 @@ index 4ec4821..a7de30f 100644
  	if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) {
  		/*
  		 * The child starts up with an immediate SIGSTOP.
-@@ -311,6 +326,9 @@ static inline void tracehook_report_clone_complete(int trace,
+@@ -311,6 +326,9 @@ static inline void tracehook_report_clon
  						   pid_t pid,
  						   struct task_struct *child)
  {
@@ -730,7 +790,7 @@ index 4ec4821..a7de30f 100644
  	if (unlikely(trace))
  		ptrace_event(0, trace, pid);
  }
-@@ -345,6 +363,7 @@ static inline void tracehook_report_vfork_done(struct task_struct *child,
+@@ -345,6 +363,7 @@ static inline void tracehook_report_vfor
   */
  static inline void tracehook_prepare_release_task(struct task_struct *task)
  {
@@ -738,7 +798,7 @@ index 4ec4821..a7de30f 100644
  }
  
  /**
-@@ -359,6 +378,7 @@ static inline void tracehook_prepare_release_task(struct task_struct *task)
+@@ -359,6 +378,7 @@ static inline void tracehook_prepare_rel
  static inline void tracehook_finish_release_task(struct task_struct *task)
  {
  	ptrace_release_task(task);
@@ -746,7 +806,7 @@ index 4ec4821..a7de30f 100644
  }
  
  /**
-@@ -380,6 +400,8 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
+@@ -380,6 +400,8 @@ static inline void tracehook_signal_hand
  					    const struct k_sigaction *ka,
  					    struct pt_regs *regs, int stepping)
  {
@@ -755,7 +815,7 @@ index 4ec4821..a7de30f 100644
  	if (stepping)
  		ptrace_notify(SIGTRAP);
  }
-@@ -397,6 +419,8 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
+@@ -397,6 +419,8 @@ static inline void tracehook_signal_hand
  static inline int tracehook_consider_ignored_signal(struct task_struct *task,
  						    int sig)
  {
@@ -764,7 +824,7 @@ index 4ec4821..a7de30f 100644
  	return (task_ptrace(task) & PT_PTRACED) != 0;
  }
  
-@@ -416,6 +440,9 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task,
+@@ -416,6 +440,9 @@ static inline int tracehook_consider_ign
  static inline int tracehook_consider_fatal_signal(struct task_struct *task,
  						  int sig)
  {
@@ -774,7 +834,7 @@ index 4ec4821..a7de30f 100644
  	return (task_ptrace(task) & PT_PTRACED) != 0;
  }
  
-@@ -430,6 +457,8 @@ static inline int tracehook_consider_fatal_signal(struct task_struct *task,
+@@ -430,6 +457,8 @@ static inline int tracehook_consider_fat
   */
  static inline int tracehook_force_sigpending(void)
  {
@@ -783,7 +843,7 @@ index 4ec4821..a7de30f 100644
  	return 0;
  }
  
-@@ -459,6 +488,8 @@ static inline int tracehook_get_signal(struct task_struct *task,
+@@ -459,6 +488,8 @@ static inline int tracehook_get_signal(s
  				       siginfo_t *info,
  				       struct k_sigaction *return_ka)
  {
@@ -792,7 +852,7 @@ index 4ec4821..a7de30f 100644
  	return 0;
  }
  
-@@ -486,6 +517,8 @@ static inline int tracehook_get_signal(struct task_struct *task,
+@@ -486,6 +517,8 @@ static inline int tracehook_get_signal(s
   */
  static inline int tracehook_notify_jctl(int notify, int why)
  {
@@ -801,7 +861,7 @@ index 4ec4821..a7de30f 100644
  	return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
  }
  
-@@ -509,6 +542,8 @@ static inline int tracehook_notify_jctl(int notify, int why)
+@@ -509,6 +542,8 @@ static inline int tracehook_notify_jctl(
  static inline int tracehook_notify_death(struct task_struct *task,
  					 void **death_cookie, int group_dead)
  {
@@ -810,7 +870,7 @@ index 4ec4821..a7de30f 100644
  	if (task_detached(task))
  		return task->ptrace ? SIGCHLD : DEATH_REAP;
  
-@@ -545,6 +580,20 @@ static inline void tracehook_report_death(struct task_struct *task,
+@@ -545,6 +580,20 @@ static inline void tracehook_report_deat
  					  int signal, void *death_cookie,
  					  int group_dead)
  {
@@ -831,7 +891,7 @@ index 4ec4821..a7de30f 100644
  }
  
  #ifdef TIF_NOTIFY_RESUME
-@@ -574,10 +623,20 @@ static inline void set_notify_resume(struct task_struct *task)
+@@ -574,10 +623,20 @@ static inline void set_notify_resume(str
   * asynchronously, this will be called again before we return to
   * user mode.
   *
@@ -855,7 +915,7 @@ index 4ec4821..a7de30f 100644
  
 diff --git a/include/linux/utrace.h b/include/linux/utrace.h
 new file mode 100644
-index 0000000..f46cc0f
+index ...f877ec6 100644  
 --- /dev/null
 +++ b/include/linux/utrace.h
 @@ -0,0 +1,692 @@
@@ -1037,7 +1097,7 @@ index 0000000..f46cc0f
 + * that is developed concurrently with utrace API improvements before they
 + * are merged into the kernel, making LINUX_VERSION_CODE checks unwieldy.
 + */
-+#define UTRACE_API_VERSION	20090302
++#define UTRACE_API_VERSION	20090416
 +
 +/**
 + * enum utrace_resume_action - engine's choice of action for a traced task
@@ -1553,7 +1613,7 @@ index 0000000..f46cc0f
 +#endif	/* linux/utrace.h */
 diff --git a/include/linux/utrace_struct.h b/include/linux/utrace_struct.h
 new file mode 100644
-index 0000000..aba7e09
+index ...aba7e09 100644  
 --- /dev/null
 +++ b/include/linux/utrace_struct.h
 @@ -0,0 +1,58 @@
@@ -1616,7 +1676,7 @@ index 0000000..aba7e09
 +
 +#endif	/* linux/utrace_struct.h */
 diff --git a/init/Kconfig b/init/Kconfig
-index 7be4d38..a6987df 100644
+index 7be4d38..a6987df 100644  
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -1149,6 +1149,15 @@ config STOP_MACHINE
@@ -1636,7 +1696,7 @@ index 7be4d38..a6987df 100644
  
  config PREEMPT_NOTIFIERS
 diff --git a/kernel/Makefile b/kernel/Makefile
-index 4242366..a79634e 100644
+index 4242366..a79634e 100644  
 --- a/kernel/Makefile
 +++ b/kernel/Makefile
 @@ -68,6 +68,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o
@@ -1648,7 +1708,7 @@ index 4242366..a79634e 100644
  obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
  obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 0692ab5..1d33e9c 100644
+index 0692ab5..1d33e9c 100644  
 --- a/kernel/ptrace.c
 +++ b/kernel/ptrace.c
 @@ -16,6 +16,7 @@
@@ -1659,7 +1719,7 @@ index 0692ab5..1d33e9c 100644
  #include <linux/security.h>
  #include <linux/signal.h>
  #include <linux/audit.h>
-@@ -174,6 +175,14 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -174,6 +175,14 @@ bool ptrace_may_access(struct task_struc
  	return !err;
  }
  
@@ -1699,10 +1759,10 @@ index 0692ab5..1d33e9c 100644
  		 */
 diff --git a/kernel/utrace.c b/kernel/utrace.c
 new file mode 100644
-index 0000000..c2bb162
+index ...74b5fc5 100644  
 --- /dev/null
 +++ b/kernel/utrace.c
-@@ -0,0 +1,2351 @@
+@@ -0,0 +1,2357 @@
 +/*
 + * utrace infrastructure interface for debugging user processes
 + *
@@ -1919,6 +1979,11 @@ index 0000000..c2bb162
 + *
 + * UTRACE_ATTACH_MATCH_OPS: Only consider engines matching @ops.
 + * UTRACE_ATTACH_MATCH_DATA: Only consider engines matching @data.
++ *
++ * Calls with neither %UTRACE_ATTACH_MATCH_OPS nor %UTRACE_ATTACH_MATCH_DATA
++ * match the first among any engines attached to @target.  That means that
++ * %UTRACE_ATTACH_EXCLUSIVE in such a call fails with -%EEXIST if there
++ * are any engines on @target at all.
 + */
 +struct utrace_engine *utrace_attach_task(
 +	struct task_struct *target, int flags,
@@ -3151,6 +3216,7 @@ index 0000000..c2bb162
 +		return ops;
 +	}
 +
++	utrace->reporting = NULL;
 +	return NULL;
 +}
 +
@@ -3162,16 +3228,16 @@ index 0000000..c2bb162
 +#define REPORT(task, utrace, report, event, callback, ...)		      \
 +	do {								      \
 +		start_report(utrace);					      \
-+		REPORT_CALLBACKS(task, utrace, report, event, callback,	      \
++		REPORT_CALLBACKS(, task, utrace, report, event, callback,     \
 +				 (report)->action, engine, current,	      \
 +				 ## __VA_ARGS__);  	   		      \
 +		finish_report(report, task, utrace);			      \
 +	} while (0)
-+#define REPORT_CALLBACKS(task, utrace, report, event, callback, ...)	      \
++#define REPORT_CALLBACKS(rev, task, utrace, report, event, callback, ...)     \
 +	do {								      \
 +		struct utrace_engine *engine;				      \
 +		const struct utrace_engine_ops *ops;			      \
-+		list_for_each_entry(engine, &utrace->attached, entry) {	      \
++		list_for_each_entry##rev(engine, &utrace->attached, entry) {  \
 +			ops = start_callback(utrace, report, engine, task,    \
 +					     event);			      \
 +			if (!ops)					      \
@@ -3206,9 +3272,9 @@ index 0000000..c2bb162
 +	INIT_REPORT(report);
 +
 +	start_report(utrace);
-+	REPORT_CALLBACKS(task, utrace, &report, UTRACE_EVENT(SYSCALL_ENTRY),
-+			 report_syscall_entry, report.result | report.action,
-+			 engine, current, regs);
++	REPORT_CALLBACKS(_reverse, task, utrace, &report,
++			 UTRACE_EVENT(SYSCALL_ENTRY), report_syscall_entry,
++			 report.result | report.action, engine, current, regs);
 +	finish_report(&report, task, utrace);
 +
 +	if (report.action == UTRACE_STOP &&
@@ -3256,7 +3322,7 @@ index 0000000..c2bb162
 +	start_report(utrace);
 +	utrace->cloning = child;
 +
-+	REPORT_CALLBACKS(task, utrace, &report,
++	REPORT_CALLBACKS(, task, utrace, &report,
 +			 UTRACE_EVENT(CLONE), report_clone,
 +			 report.action, engine, task, clone_flags, child);
 +
@@ -3406,7 +3472,7 @@ index 0000000..c2bb162
 +	utrace->interrupt = 0;
 +	spin_unlock(&utrace->lock);
 +
-+	REPORT_CALLBACKS(task, utrace, &report, UTRACE_EVENT(DEATH),
++	REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
 +			 report_death, engine, task, group_dead, signal);
 +
 +	spin_lock(&utrace->lock);


Index: sources
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/sources,v
retrieving revision 1.976.2.14
retrieving revision 1.976.2.15
diff -u -p -r1.976.2.14 -r1.976.2.15
--- sources	21 May 2009 22:11:57 -0000	1.976.2.14
+++ sources	4 Jun 2009 19:34:58 -0000	1.976.2.15
@@ -1,3 +1,2 @@
 64921b5ff5cdadbccfcd3820f03be7d8  linux-2.6.29.tar.bz2
-a83e42a8cd5d7b9d7b6703429cba4c73  patch-2.6.30-rc6.bz2
-78311c587593f514bfb7b61e0235f50b  patch-2.6.30-rc6-git6.bz2
+cc1673bc22fec3f504ffa35377997a0a  patch-2.6.30-rc8.bz2


Index: upstream
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/upstream,v
retrieving revision 1.888.2.13
retrieving revision 1.888.2.14
diff -u -p -r1.888.2.13 -r1.888.2.14
--- upstream	21 May 2009 22:11:57 -0000	1.888.2.13
+++ upstream	4 Jun 2009 19:34:58 -0000	1.888.2.14
@@ -1,3 +1,2 @@
 linux-2.6.29.tar.bz2
-patch-2.6.30-rc6.bz2
-patch-2.6.30-rc6-git6.bz2
+patch-2.6.30-rc8.bz2

xen.pvops.patch:

Index: xen.pvops.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/Attic/xen.pvops.patch,v
retrieving revision 1.1.2.22
retrieving revision 1.1.2.23
diff -u -p -r1.1.2.22 -r1.1.2.23
--- xen.pvops.patch	22 May 2009 20:02:04 -0000	1.1.2.22
+++ xen.pvops.patch	4 Jun 2009 19:34:58 -0000	1.1.2.23
@@ -401,7 +401,7 @@ index 0000000..9dc1ff4
 +nr_wake=1.  nr_requeue should be INT_MAX for broadcast and 0 for
 +signal.
 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index e87bdbf..a768702 100644
+index fd5cac0..3e26cc6 100644
 --- a/Documentation/kernel-parameters.txt
 +++ b/Documentation/kernel-parameters.txt
 @@ -1246,6 +1246,11 @@ and is between 256 and 4096 characters. It is defined in the file
@@ -416,7 +416,7 @@ index e87bdbf..a768702 100644
  	md=		[HW] RAID subsystems devices and level
  			See Documentation/md.txt.
  
-@@ -1571,6 +1576,9 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1575,6 +1580,9 @@ and is between 256 and 4096 characters. It is defined in the file
  	noinitrd	[RAM] Tells the kernel not to load any configured
  			initial RAM disk.
  
@@ -426,7 +426,7 @@ index e87bdbf..a768702 100644
  	nointroute	[IA-64]
  
  	nojitter	[IA64] Disables jitter checking for ITC timers.
-@@ -1656,6 +1664,15 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1660,6 +1668,15 @@ and is between 256 and 4096 characters. It is defined in the file
  	oprofile.timer=	[HW]
  			Use timer interrupt instead of performance counters
  
@@ -6283,7 +6283,7 @@ index 0000000..594d270
 +}
 diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h
 new file mode 100644
-index 0000000..81a7374
+index 0000000..6fa3656
 --- /dev/null
 +++ b/Documentation/perf_counter/perf.h
 @@ -0,0 +1,62 @@
@@ -6343,8 +6343,8 @@ index 0000000..81a7374
 +		       group_fd, flags);
 +}
 +
-+#define MAX_COUNTERS			1024
-+#define MAX_NR_CPUS			4096
++#define MAX_COUNTERS			64
++#define MAX_NR_CPUS			256
 +
 +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
 +
@@ -12206,10 +12206,10 @@ index 29b52b1..d6498e3 100644
  ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
  ffffffffa0000000 - fffffffffff00000 (=1536 MB) module mapping space
 diff --git a/MAINTAINERS b/MAINTAINERS
-index 2b349ba..85826f5 100644
+index cf4abdd..d7b0244 100644
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
-@@ -3333,6 +3333,14 @@ F:	drivers/serial/kgdboc.c
+@@ -3350,6 +3350,14 @@ F:	drivers/serial/kgdboc.c
  F:	include/linux/kgdb.h
  F:	kernel/kgdb.c
  
@@ -12224,7 +12224,7 @@ index 2b349ba..85826f5 100644
  KMEMTRACE
  P:	Eduard - Gabriel Munteanu
  M:	eduard.munteanu at linux360.ro
-@@ -4375,6 +4383,16 @@ S:	Maintained
+@@ -4392,6 +4400,16 @@ S:	Maintained
  F:	include/linux/delayacct.h
  F:	kernel/delayacct.c
  
@@ -12242,7 +12242,7 @@ index 2b349ba..85826f5 100644
  P:	Christoph Hellwig
  M:	hch at infradead.org
 diff --git a/Makefile b/Makefile
-index b57e1f5..593464b 100644
+index 610d1c3..03903eb 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -557,6 +557,10 @@ ifdef CONFIG_FUNCTION_TRACER
@@ -17391,7 +17391,7 @@ index 8c86b72..1b68659 100644
  # drivers-y are linked after core-y
  drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
-index 6633b6e..8d16ada 100644
+index 6633b6e..8979283 100644
 --- a/arch/x86/boot/Makefile
 +++ b/arch/x86/boot/Makefile
 @@ -26,9 +26,10 @@ targets		:= vmlinux.bin setup.bin setup.elf bzImage
@@ -17426,7 +17426,7 @@ index 6633b6e..8d16ada 100644
 +$(obj)/voffset.h: vmlinux FORCE
 +	$(call if_changed,voffset)
 +
-+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 +
 +quiet_cmd_zoffset = ZOFFSET $@
 +      cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
@@ -18849,7 +18849,7 @@ index 1aae8f3..c501a5b 100644
  
  	return 0;
 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
-index 5d84d1c..b31cc54 100644
+index 5d84d1c..25fc36b 100644
 --- a/arch/x86/boot/header.S
 +++ b/arch/x86/boot/header.S
 @@ -22,7 +22,8 @@
@@ -18908,7 +18908,7 @@ index 5d84d1c..b31cc54 100644
  
 +pref_address:		.quad LOAD_PHYSICAL_ADDR	# preferred load addr
 +
-+#define ZO_INIT_SIZE	(ZO__end - ZO_startup_32 + ZO_z_extract_offset)
++#define ZO_INIT_SIZE	(ZO__end - ZO_startup_32 + ZO_extract_offset)
 +#define VO_INIT_SIZE	(VO__end - VO__text)
 +#if ZO_INIT_SIZE > VO_INIT_SIZE
 +#define INIT_SIZE ZO_INIT_SIZE
@@ -19052,10 +19052,10 @@ index 911eaae..a95a531 100644
  	return 0;
  }
 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
-index 5054c2d..d989de8 100644
+index 74b3d2b..82f6b2d 100644
 --- a/arch/x86/boot/memory.c
 +++ b/arch/x86/boot/memory.c
-@@ -25,12 +25,16 @@ struct e820_ext_entry {
+@@ -20,12 +20,16 @@
  static int detect_memory_e820(void)
  {
  	int count = 0;
@@ -19064,7 +19064,7 @@ index 5054c2d..d989de8 100644
 -	u8 err;
 +	struct biosregs ireg, oreg;
  	struct e820entry *desc = boot_params.e820_map;
- 	static struct e820_ext_entry buf; /* static so it is zeroed */
+ 	static struct e820entry buf; /* static so it is zeroed */
  
 +	initregs(&ireg);
 +	ireg.ax  = 0xe820;
@@ -19073,10 +19073,10 @@ index 5054c2d..d989de8 100644
 +	ireg.di  = (size_t)&buf;
 +
  	/*
- 	 * Set this here so that if the BIOS doesn't change this field
- 	 * but still doesn't change %ecx, we're still okay...
-@@ -38,22 +42,13 @@ static int detect_memory_e820(void)
- 	buf.ext_flags = 1;
+ 	 * Note: at least one BIOS is known which assumes that the
+ 	 * buffer pointed to by one e820 call is the same one as
+@@ -41,22 +45,13 @@ static int detect_memory_e820(void)
+ 	 */
  
  	do {
 -		size = sizeof buf;
@@ -19101,7 +19101,7 @@ index 5054c2d..d989de8 100644
  			break;
  
  		/* Some BIOSes stop returning SMAP in the middle of
-@@ -61,7 +56,7 @@ static int detect_memory_e820(void)
+@@ -64,60 +59,70 @@ static int detect_memory_e820(void)
  		   screwed up the map at that point, we might have a
  		   partial map, the full map, or complete garbage, so
  		   just return failure. */
@@ -19110,15 +19110,15 @@ index 5054c2d..d989de8 100644
  			count = 0;
  			break;
  		}
-@@ -69,58 +64,62 @@ static int detect_memory_e820(void)
- 		/* ACPI 3.0 added the extended flags support.  If bit 0
- 		   in the extended flags is zero, we're supposed to simply
- 		   ignore the entry -- a backwards incompatible change! */
--		if (size > 20 && !(buf.ext_flags & 1))
-+		if (oreg.cx > 20 && !(buf.ext_flags & 1))
- 			continue;
  
- 		*desc++ = buf.std;
+-		*desc++ = buf;
++		/* ACPI 3.0 added the extended flags support.  If bit 0
++		   in the extended flags is zero, we're supposed to simply
++		   ignore the entry -- a backwards incompatible change! */
++		if (oreg.cx > 20 && !(buf.ext_flags & 1))
++			continue;
++
++		*desc++ = buf.std;
  		count++;
 -	} while (next && count < ARRAY_SIZE(boot_params.e820_map));
 +	} while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map));
@@ -25709,18 +25709,10 @@ index 0000000..6206033
 @@ -0,0 +1 @@
 +#include "../../../boot/regs.c"
 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index f287092..9e2c551 100644
+index f287092..9376ff4 100644
 --- a/arch/x86/kernel/apic/apic.c
 +++ b/arch/x86/kernel/apic/apic.c
-@@ -14,6 +14,7 @@
-  *	Mikael Pettersson	:	PM converted to driver model.
-  */
- 
-+#include <linux/perf_counter.h>
- #include <linux/kernel_stat.h>
- #include <linux/mc146818rtc.h>
- #include <linux/acpi_pmtmr.h>
-@@ -34,6 +35,7 @@
+@@ -34,6 +34,7 @@
  #include <linux/smp.h>
  #include <linux/mm.h>
  
@@ -25728,7 +25720,7 @@ index f287092..9e2c551 100644
  #include <asm/pgalloc.h>
  #include <asm/atomic.h>
  #include <asm/mpspec.h>
-@@ -98,6 +100,29 @@ early_param("lapic", parse_lapic);
+@@ -98,6 +99,29 @@ early_param("lapic", parse_lapic);
  /* Local APIC was disabled by the BIOS and enabled by the kernel */
  static int enabled_via_apicbase;
  
@@ -25758,7 +25750,7 @@ index f287092..9e2c551 100644
  #endif
  
  #ifdef CONFIG_X86_64
-@@ -111,13 +136,19 @@ static __init int setup_apicpmtimer(char *s)
+@@ -111,13 +135,19 @@ static __init int setup_apicpmtimer(char *s)
  __setup("apicpmtimer", setup_apicpmtimer);
  #endif
  
@@ -25779,7 +25771,7 @@ index f287092..9e2c551 100644
  	disable_x2apic = 1;
  	setup_clear_cpu_cap(X86_FEATURE_X2APIC);
  	return 0;
-@@ -209,6 +240,31 @@ static int modern_apic(void)
+@@ -209,6 +239,31 @@ static int modern_apic(void)
  	return lapic_get_version() >= 0x14;
  }
  
@@ -25811,7 +25803,7 @@ index f287092..9e2c551 100644
  void native_apic_wait_icr_idle(void)
  {
  	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
-@@ -348,7 +404,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
+@@ -348,7 +403,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
  
  static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
  {
@@ -25820,7 +25812,7 @@ index f287092..9e2c551 100644
  	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
  
  	apic_write(reg, v);
-@@ -761,6 +817,8 @@ static void local_apic_timer_interrupt(void)
+@@ -761,6 +816,8 @@ static void local_apic_timer_interrupt(void)
  	inc_irq_stat(apic_timer_irqs);
  
  	evt->event_handler(evt);
@@ -25829,7 +25821,7 @@ index f287092..9e2c551 100644
  }
  
  /*
-@@ -815,7 +873,7 @@ void clear_local_APIC(void)
+@@ -815,7 +872,7 @@ void clear_local_APIC(void)
  	u32 v;
  
  	/* APIC hasn't been mapped yet */
@@ -25838,7 +25830,7 @@ index f287092..9e2c551 100644
  		return;
  
  	maxlvt = lapic_get_maxlvt();
-@@ -1133,6 +1191,7 @@ void __cpuinit setup_local_APIC(void)
+@@ -1133,6 +1190,7 @@ void __cpuinit setup_local_APIC(void)
  		apic_write(APIC_ESR, 0);
  	}
  #endif
@@ -25846,7 +25838,7 @@ index f287092..9e2c551 100644
  
  	preempt_disable();
  
-@@ -1287,7 +1346,7 @@ void check_x2apic(void)
+@@ -1287,7 +1345,7 @@ void check_x2apic(void)
  {
  	if (x2apic_enabled()) {
  		pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
@@ -25855,7 +25847,7 @@ index f287092..9e2c551 100644
  	}
  }
  
-@@ -1295,7 +1354,7 @@ void enable_x2apic(void)
+@@ -1295,7 +1353,7 @@ void enable_x2apic(void)
  {
  	int msr, msr2;
  
@@ -25864,7 +25856,7 @@ index f287092..9e2c551 100644
  		return;
  
  	rdmsr(MSR_IA32_APICBASE, msr, msr2);
-@@ -1304,6 +1363,7 @@ void enable_x2apic(void)
+@@ -1304,6 +1362,7 @@ void enable_x2apic(void)
  		wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
  	}
  }
@@ -25872,7 +25864,7 @@ index f287092..9e2c551 100644
  
  void __init enable_IR_x2apic(void)
  {
-@@ -1312,32 +1372,21 @@ void __init enable_IR_x2apic(void)
+@@ -1312,32 +1371,21 @@ void __init enable_IR_x2apic(void)
  	unsigned long flags;
  	struct IO_APIC_route_entry **ioapic_entries = NULL;
  
@@ -25915,7 +25907,7 @@ index f287092..9e2c551 100644
  		return;
  	}
  
-@@ -1357,19 +1406,16 @@ void __init enable_IR_x2apic(void)
+@@ -1357,19 +1405,16 @@ void __init enable_IR_x2apic(void)
  	mask_IO_APIC_setup(ioapic_entries);
  	mask_8259A();
  
@@ -25941,7 +25933,7 @@ index f287092..9e2c551 100644
  	}
  
  end_restore:
-@@ -1378,37 +1424,34 @@ end_restore:
+@@ -1378,37 +1423,34 @@ end_restore:
  		 * IR enabling failed
  		 */
  		restore_IO_APIC_setup(ioapic_entries);
@@ -25990,7 +25982,7 @@ index f287092..9e2c551 100644
  
  #ifdef CONFIG_X86_64
  /*
-@@ -1425,7 +1468,6 @@ static int __init detect_init_APIC(void)
+@@ -1425,7 +1467,6 @@ static int __init detect_init_APIC(void)
  	}
  
  	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
@@ -25998,7 +25990,7 @@ index f287092..9e2c551 100644
  	return 0;
  }
  #else
-@@ -1539,32 +1581,42 @@ void __init early_init_lapic_mapping(void)
+@@ -1539,32 +1580,42 @@ void __init early_init_lapic_mapping(void)
   */
  void __init init_apic_mappings(void)
  {
@@ -26055,7 +26047,7 @@ index f287092..9e2c551 100644
  }
  
  /*
-@@ -1733,8 +1785,7 @@ void __init connect_bsp_APIC(void)
+@@ -1733,8 +1784,7 @@ void __init connect_bsp_APIC(void)
  		 */
  		apic_printk(APIC_VERBOSE, "leaving PIC mode, "
  				"enabling APIC mode.\n");
@@ -26065,7 +26057,7 @@ index f287092..9e2c551 100644
  	}
  #endif
  	if (apic->enable_apic_mode)
-@@ -1762,8 +1813,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
+@@ -1762,8 +1812,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
  		 */
  		apic_printk(APIC_VERBOSE, "disabling APIC mode, "
  				"entering PIC mode.\n");
@@ -26075,7 +26067,7 @@ index f287092..9e2c551 100644
  		return;
  	}
  #endif
-@@ -1969,10 +2019,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
+@@ -1969,10 +2018,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
  
  	local_irq_save(flags);
  	disable_local_APIC();
@@ -26088,7 +26080,7 @@ index f287092..9e2c551 100644
  	local_irq_restore(flags);
  	return 0;
  }
-@@ -1982,8 +2032,6 @@ static int lapic_resume(struct sys_device *dev)
+@@ -1982,8 +2031,6 @@ static int lapic_resume(struct sys_device *dev)
  	unsigned int l, h;
  	unsigned long flags;
  	int maxlvt;
@@ -26097,7 +26089,7 @@ index f287092..9e2c551 100644
  	int ret;
  	struct IO_APIC_route_entry **ioapic_entries = NULL;
  
-@@ -1991,7 +2039,7 @@ static int lapic_resume(struct sys_device *dev)
+@@ -1991,7 +2038,7 @@ static int lapic_resume(struct sys_device *dev)
  		return 0;
  
  	local_irq_save(flags);
@@ -26106,7 +26098,7 @@ index f287092..9e2c551 100644
  		ioapic_entries = alloc_ioapic_entries();
  		if (!ioapic_entries) {
  			WARN(1, "Alloc ioapic_entries in lapic resume failed.");
-@@ -2007,17 +2055,10 @@ static int lapic_resume(struct sys_device *dev)
+@@ -2007,17 +2054,10 @@ static int lapic_resume(struct sys_device *dev)
  
  		mask_IO_APIC_setup(ioapic_entries);
  		mask_8259A();
@@ -26125,7 +26117,7 @@ index f287092..9e2c551 100644
  	else {
  		/*
  		 * Make sure the APICBASE points to the right address
-@@ -2055,20 +2096,15 @@ static int lapic_resume(struct sys_device *dev)
+@@ -2055,20 +2095,15 @@ static int lapic_resume(struct sys_device *dev)
  	apic_write(APIC_ESR, 0);
  	apic_read(APIC_ESR);
  
@@ -26148,7 +26140,7 @@ index f287092..9e2c551 100644
  	return 0;
  }
  
-@@ -2117,31 +2153,14 @@ static void apic_pm_activate(void) { }
+@@ -2117,31 +2152,14 @@ static void apic_pm_activate(void) { }
  #endif	/* CONFIG_PM */
  
  #ifdef CONFIG_X86_64
@@ -26182,7 +26174,7 @@ index f287092..9e2c551 100644
  	bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
  	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
  
-@@ -2177,18 +2196,67 @@ __cpuinit int apic_is_clustered_box(void)
+@@ -2177,18 +2195,67 @@ __cpuinit int apic_is_clustered_box(void)
  			++zeros;
  	}
  
@@ -27992,7 +27984,7 @@ index 7e4a459..728b375 100644
  	node = c->phys_proc_id;
  	if (apicid_to_node[apicid] != NUMA_NO_NODE)
 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index c1caefc..5a60535 100644
+index 77848d9..3ffdcfa 100644
 --- a/arch/x86/kernel/cpu/common.c
 +++ b/arch/x86/kernel/cpu/common.c
 @@ -13,6 +13,7 @@
@@ -28003,7 +27995,7 @@ index c1caefc..5a60535 100644
  #include <asm/mmu_context.h>
  #include <asm/hypervisor.h>
  #include <asm/processor.h>
-@@ -292,7 +293,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
+@@ -299,7 +300,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
  	return NULL;		/* Not found */
  }
  
@@ -28013,7 +28005,7 @@ index c1caefc..5a60535 100644
  
  void load_percpu_segment(int cpu)
  {
-@@ -761,6 +763,12 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -768,6 +770,12 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  	if (this_cpu->c_identify)
  		this_cpu->c_identify(c);
  
@@ -28026,7 +28018,7 @@ index c1caefc..5a60535 100644
  #ifdef CONFIG_X86_64
  	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
  #endif
-@@ -806,6 +814,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -813,6 +821,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  #endif
  
  	init_hypervisor(c);
@@ -28043,7 +28035,7 @@ index c1caefc..5a60535 100644
  	/*
  	 * On SMP, boot_cpu_data holds the common feature set between
  	 * all CPUs; so make sure that we indicate which features are
-@@ -818,10 +836,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -825,10 +843,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
  	}
  
@@ -28054,7 +28046,7 @@ index c1caefc..5a60535 100644
  #ifdef CONFIG_X86_MCE
  	/* Init Machine Check Exception if available. */
  	mcheck_init(c);
-@@ -854,6 +868,7 @@ void __init identify_boot_cpu(void)
+@@ -861,6 +875,7 @@ void __init identify_boot_cpu(void)
  #else
  	vgetcpu_set_mode();
  #endif
@@ -35376,25 +35368,6 @@ index 7563b31..af71d06 100644
 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
 +			quirk_amd_nb_node);
  #endif
-diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-index 1340dad..667188e 100644
---- a/arch/x86/kernel/reboot.c
-+++ b/arch/x86/kernel/reboot.c
-@@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
- 		},
- 	},
-+	{	/* Handle problems with rebooting on Sony VGN-Z540N */
-+		.callback = set_bios_reboot,
-+		.ident = "Sony VGN-Z540N",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
-+		},
-+	},
- 	{ }
- };
- 
 diff --git a/arch/x86/kernel/scx200_32.c b/arch/x86/kernel/scx200_32.c
 index 7e004ac..1b6e3d1 100644
 --- a/arch/x86/kernel/scx200_32.c
@@ -35517,10 +35490,10 @@ index b415843..ee49616 100644
   *
   * Description:
 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
-index 3a97a4c..3b5f327 100644
+index 8f0e13b..9c3f082 100644
 --- a/arch/x86/kernel/setup_percpu.c
 +++ b/arch/x86/kernel/setup_percpu.c
-@@ -423,6 +423,14 @@ void __init setup_per_cpu_areas(void)
+@@ -425,6 +425,14 @@ void __init setup_per_cpu_areas(void)
  	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
  #endif
  
@@ -39363,10 +39336,10 @@ index 2d05a12..459913b 100644
  	return 0;
  }
 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 797f9f1..4fa8996 100644
+index e17efed..3cfe9ce 100644
 --- a/arch/x86/mm/pageattr.c
 +++ b/arch/x86/mm/pageattr.c
-@@ -475,7 +475,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
+@@ -470,7 +470,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
  
  	if (!debug_pagealloc)
  		spin_unlock(&cpa_lock);
@@ -39375,7 +39348,7 @@ index 797f9f1..4fa8996 100644
  	if (!debug_pagealloc)
  		spin_lock(&cpa_lock);
  	if (!base)
-@@ -844,13 +844,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+@@ -839,13 +839,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
  
  	vm_unmap_aliases();
  
@@ -39389,7 +39362,7 @@ index 797f9f1..4fa8996 100644
  	cpa.vaddr = addr;
  	cpa.pages = pages;
  	cpa.numpages = numpages;
-@@ -895,13 +888,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+@@ -890,13 +883,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
  	} else
  		cpa_flush_all(cache);
  
@@ -43015,10 +42988,10 @@ index 51b9f82..2faa9e2 100644
  		dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
  			 pin_name(pin));
 diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
-index cafb410..85af717 100644
+index 60e543d..717c770 100644
 --- a/drivers/acpi/processor_perflib.c
 +++ b/drivers/acpi/processor_perflib.c
-@@ -348,7 +348,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
+@@ -354,7 +354,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
  	if (result)
  		goto update_bios;
  
@@ -43532,7 +43505,7 @@ index afd9247..e550151 100644
 -MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
 +MODULE_STATIC_DEVICE_TABLE(pci, ip2main_pci_tbl);
 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
-index aa83a08..6768777 100644
+index 0905079..11a1e2d 100644
 --- a/drivers/char/ipmi/ipmi_msghandler.c
 +++ b/drivers/char/ipmi/ipmi_msghandler.c
 @@ -1812,7 +1812,8 @@ int ipmi_request_settime(ipmi_user_t      user,
@@ -44641,7 +44614,7 @@ index 214a92d..e08fc22 100644
  	help
  	  The network device frontend driver allows the kernel to
 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
-index 1fc4602..3665633 100644
+index a1c25cb..bd168d1 100644
 --- a/drivers/net/Makefile
 +++ b/drivers/net/Makefile
 @@ -113,7 +113,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
@@ -44885,33 +44858,6 @@ index 8574622..0c6cb40 100644
  /* The function can be used to add a buffer worth of data directly to
   * the kernel buffer. The buffer is assumed to be a circular buffer.
   * Take the entries from index start and end at index end, wrapping
-diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
-index f0e99d4..242257b 100644
---- a/drivers/oprofile/cpu_buffer.c
-+++ b/drivers/oprofile/cpu_buffer.c
-@@ -78,16 +78,20 @@ void free_cpu_buffers(void)
- 	op_ring_buffer_write = NULL;
- }
- 
-+#define RB_EVENT_HDR_SIZE 4
-+
- int alloc_cpu_buffers(void)
- {
- 	int i;
- 
- 	unsigned long buffer_size = oprofile_cpu_buffer_size;
-+	unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
-+						 RB_EVENT_HDR_SIZE);
- 
--	op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
-+	op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- 	if (!op_ring_buffer_read)
- 		goto fail;
--	op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
-+	op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- 	if (!op_ring_buffer_write)
- 		goto fail;
- 
 diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
 index 73348c4..4a9cc92 100644
 --- a/drivers/parisc/iosapic.c
@@ -45658,10 +45604,10 @@ index cfe8685..34742f2 100644
 +
  EXPORT_SYMBOL(pnpbios_protocol);
 diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
-index 8ed2990..1ff1d88 100644
+index fb27407..7ab4c6d 100644
 --- a/drivers/scsi/Kconfig
 +++ b/drivers/scsi/Kconfig
-@@ -1545,6 +1545,13 @@ config SCSI_NSP32
+@@ -1556,6 +1556,13 @@ config SCSI_NSP32
  config SCSI_DEBUG
  	tristate "SCSI debugging host simulator"
  	depends on SCSI
@@ -56311,10 +56257,33 @@ index 0e06c17..8a11f04 100644
  
  #ifdef MODULE
 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
-index d87247d..889bf99 100644
+index d87247d..0d91196 100644
 --- a/include/linux/init_task.h
 +++ b/include/linux/init_task.h
-@@ -174,6 +174,7 @@ extern struct cred init_cred;
+@@ -108,6 +108,18 @@ extern struct group_info init_groups;
+ 
+ extern struct cred init_cred;
+ 
++#ifdef CONFIG_PERF_COUNTERS
++# define INIT_PERF_COUNTERS(tsk)					\
++	.perf_counter_ctx.counter_list =				\
++		LIST_HEAD_INIT(tsk.perf_counter_ctx.counter_list),	\
++	.perf_counter_ctx.event_list =					\
++		LIST_HEAD_INIT(tsk.perf_counter_ctx.event_list),	\
++	.perf_counter_ctx.lock =					\
++		__SPIN_LOCK_UNLOCKED(tsk.perf_counter_ctx.lock),
++#else
++# define INIT_PERF_COUNTERS(tsk)
++#endif
++
+ /*
+  *  INIT_TASK is used to set up the first task table, touch at
+  * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -171,9 +183,11 @@ extern struct cred init_cred;
+ 	},								\
+ 	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
+ 	INIT_IDS							\
++	INIT_PERF_COUNTERS(tsk)						\
  	INIT_TRACE_IRQFLAGS						\
  	INIT_LOCKDEP							\
  	INIT_FTRACE_GRAPH						\
@@ -56953,10 +56922,10 @@ index 72698d8..75b0645 100644
  #endif /* LINUX_PCI_H */
 diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
 new file mode 100644
-index 0000000..2eedae8
+index 0000000..f612941
 --- /dev/null
 +++ b/include/linux/perf_counter.h
-@@ -0,0 +1,659 @@
+@@ -0,0 +1,650 @@
 +/*
 + *  Performance counters:
 + *
@@ -57408,6 +57377,7 @@ index 0000000..2eedae8
 +	struct hw_perf_counter		hw;
 +
 +	struct perf_counter_context	*ctx;
++	struct task_struct		*task;
 +	struct file			*filp;
 +
 +	struct perf_counter		*parent;
@@ -57456,6 +57426,7 @@ index 0000000..2eedae8
 + * Used as a container for task counters and CPU counters as well:
 + */
 +struct perf_counter_context {
++#ifdef CONFIG_PERF_COUNTERS
 +	/*
 +	 * Protect the states of the counters in the list,
 +	 * nr_active, and the list:
@@ -57472,9 +57443,7 @@ index 0000000..2eedae8
 +	struct list_head	event_list;
 +	int			nr_counters;
 +	int			nr_active;
-+	int			nr_enabled;
 +	int			is_active;
-+	atomic_t		refcount;
 +	struct task_struct	*task;
 +
 +	/*
@@ -57482,14 +57451,7 @@ index 0000000..2eedae8
 +	 */
 +	u64			time;
 +	u64			timestamp;
-+
-+	/*
-+	 * These fields let us detect when two contexts have both
-+	 * been cloned (inherited) from a common ancestor.
-+	 */
-+	struct perf_counter_context *parent_ctx;
-+	u32			parent_gen;
-+	u32			generation;
++#endif
 +};
 +
 +/**
@@ -57520,8 +57482,7 @@ index 0000000..2eedae8
 +extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
 +
 +extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
-+extern void perf_counter_task_sched_out(struct task_struct *task,
-+					struct task_struct *next, int cpu);
++extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
 +extern void perf_counter_task_tick(struct task_struct *task, int cpu);
 +extern void perf_counter_init_task(struct task_struct *child);
 +extern void perf_counter_exit_task(struct task_struct *child);
@@ -57584,8 +57545,7 @@ index 0000000..2eedae8
 +static inline void
 +perf_counter_task_sched_in(struct task_struct *task, int cpu)		{ }
 +static inline void
-+perf_counter_task_sched_out(struct task_struct *task,
-+			    struct task_struct *next, int cpu)		{ }
++perf_counter_task_sched_out(struct task_struct *task, int cpu)		{ }
 +static inline void
 +perf_counter_task_tick(struct task_struct *task, int cpu)		{ }
 +static inline void perf_counter_init_task(struct task_struct *child)	{ }
@@ -57922,10 +57882,17 @@ index e1b7b21..4016b52 100644
  	RB_FL_OVERWRITE		= 1 << 0,
  };
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index b4c38bc..5396cf5 100644
+index b4c38bc..fe68809 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -77,6 +77,7 @@ struct sched_param {
+@@ -71,12 +71,14 @@ struct sched_param {
+ #include <linux/path.h>
+ #include <linux/compiler.h>
+ #include <linux/completion.h>
++#include <linux/perf_counter.h>
+ #include <linux/pid.h>
+ #include <linux/percpu.h>
+ #include <linux/topology.h>
  #include <linux/proportions.h>
  #include <linux/seccomp.h>
  #include <linux/rcupdate.h>
@@ -57933,13 +57900,12 @@ index b4c38bc..5396cf5 100644
  #include <linux/rtmutex.h>
  
  #include <linux/time.h>
-@@ -96,8 +97,9 @@ struct exec_domain;
+@@ -96,8 +98,8 @@ struct exec_domain;
  struct futex_pi_state;
  struct robust_list_head;
  struct bio;
 -struct bts_tracer;
  struct fs_struct;
-+struct perf_counter_context;
 +struct bts_context;
  
  /*
@@ -58045,17 +58011,15 @@ index b4c38bc..5396cf5 100644
  
  	/* PID/PID hash table linkage. */
  	struct pid_link pids[PIDTYPE_MAX];
-@@ -1380,6 +1401,9 @@ struct task_struct {
+@@ -1380,6 +1401,7 @@ struct task_struct {
  	struct list_head pi_state_list;
  	struct futex_pi_state *pi_state_cache;
  #endif
-+#ifdef CONFIG_PERF_COUNTERS
-+	struct perf_counter_context *perf_counter_ctxp;
-+#endif
++	struct perf_counter_context perf_counter_ctx;
  #ifdef CONFIG_NUMA
  	struct mempolicy *mempolicy;
  	short il_next;
-@@ -1428,7 +1452,9 @@ struct task_struct {
+@@ -1428,7 +1450,9 @@ struct task_struct {
  #ifdef CONFIG_TRACING
  	/* state flags for use by tracers */
  	unsigned long trace;
@@ -58066,7 +58030,7 @@ index b4c38bc..5396cf5 100644
  };
  
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -2001,8 +2027,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
+@@ -2001,8 +2025,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
  extern char *get_task_comm(char *to, struct task_struct *tsk);
  
  #ifdef CONFIG_SMP
@@ -58077,7 +58041,7 @@ index b4c38bc..5396cf5 100644
  static inline unsigned long wait_task_inactive(struct task_struct *p,
  					       long match_state)
  {
-@@ -2010,7 +2038,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
+@@ -2010,7 +2036,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
  }
  #endif
  
@@ -58087,7 +58051,7 @@ index b4c38bc..5396cf5 100644
  
  #define for_each_process(p) \
  	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
-@@ -2049,8 +2078,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+@@ -2049,8 +2076,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
  
  static inline struct task_struct *next_thread(const struct task_struct *p)
  {
@@ -58098,7 +58062,7 @@ index b4c38bc..5396cf5 100644
  }
  
  static inline int thread_group_empty(struct task_struct *p)
-@@ -2388,6 +2417,13 @@ static inline void inc_syscw(struct task_struct *tsk)
+@@ -2388,6 +2415,13 @@ static inline void inc_syscw(struct task_struct *tsk)
  #define TASK_SIZE_OF(tsk)	TASK_SIZE
  #endif
  
@@ -62527,7 +62491,7 @@ index dd7ee5f..d22664f 100644
   
  #ifdef CONFIG_ROOT_NFS
 diff --git a/init/main.c b/init/main.c
-index 3bbf93b..33ce929 100644
+index d721dad..5b7c983 100644
 --- a/init/main.c
 +++ b/init/main.c
 @@ -62,8 +62,10 @@
@@ -62549,7 +62513,7 @@ index 3bbf93b..33ce929 100644
  
  #ifdef CONFIG_X86_LOCAL_APIC
  #include <asm/smp.h>
-@@ -787,6 +788,9 @@ static void __init do_pre_smp_initcalls(void)
+@@ -786,6 +787,9 @@ static void __init do_pre_smp_initcalls(void)
  {
  	initcall_t *call;
  
@@ -62590,7 +62554,7 @@ index 4242366..90b53f6 100644
  ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
  # According to Alan Modra <alan at linuxcare.com.au>, the -fno-omit-frame-pointer is
 diff --git a/kernel/async.c b/kernel/async.c
-index 968ef94..f565891 100644
+index 5054030..94dd36f 100644
 --- a/kernel/async.c
 +++ b/kernel/async.c
 @@ -49,7 +49,6 @@ asynchronous and synchronous parts of the kernel.
@@ -62601,7 +62565,7 @@ index 968ef94..f565891 100644
  #include <linux/module.h>
  #include <linux/wait.h>
  #include <linux/sched.h>
-@@ -388,11 +387,20 @@ static int async_manager_thread(void *unused)
+@@ -392,11 +391,20 @@ static int async_manager_thread(void *unused)
  
  static int __init async_init(void)
  {
@@ -62649,20 +62613,19 @@ index 42d5654..f6c204f 100644
  
  /* compat_time_t is a 32 bit "long" and needs to get converted. */
 diff --git a/kernel/exit.c b/kernel/exit.c
-index abf9cf3..49cdf69 100644
+index abf9cf3..c1cd352 100644
 --- a/kernel/exit.c
 +++ b/kernel/exit.c
-@@ -48,7 +48,8 @@
+@@ -48,7 +48,7 @@
  #include <linux/tracehook.h>
  #include <linux/fs_struct.h>
  #include <linux/init_task.h>
 -#include <trace/sched.h>
-+#include <linux/perf_counter.h>
 +#include <trace/events/sched.h>
  
  #include <asm/uaccess.h>
  #include <asm/unistd.h>
-@@ -56,10 +57,6 @@
+@@ -56,10 +56,6 @@
  #include <asm/mmu_context.h>
  #include "cred-internals.h"
  
@@ -62673,17 +62636,17 @@ index abf9cf3..49cdf69 100644
  static void exit_mm(struct task_struct * tsk);
  
  static void __unhash_process(struct task_struct *p)
-@@ -158,6 +155,9 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
+@@ -158,6 +154,9 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
  {
  	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  
 +#ifdef CONFIG_PERF_COUNTERS
-+	WARN_ON_ONCE(tsk->perf_counter_ctxp);
++	WARN_ON_ONCE(!list_empty(&tsk->perf_counter_ctx.counter_list));
 +#endif
  	trace_sched_process_free(tsk);
  	put_task_struct(tsk);
  }
-@@ -174,6 +174,7 @@ repeat:
+@@ -174,6 +173,7 @@ repeat:
  	atomic_dec(&__task_cred(p)->user->processes);
  
  	proc_flush_task(p);
@@ -62691,7 +62654,7 @@ index abf9cf3..49cdf69 100644
  	write_lock_irq(&tasklist_lock);
  	tracehook_finish_release_task(p);
  	__exit_signal(p);
-@@ -975,16 +976,19 @@ NORET_TYPE void do_exit(long code)
+@@ -975,16 +975,19 @@ NORET_TYPE void do_exit(long code)
  		module_put(tsk->binfmt->module);
  
  	proc_exit_connector(tsk);
@@ -62716,20 +62679,18 @@ index abf9cf3..49cdf69 100644
  		exit_pi_state_list(tsk);
  	if (unlikely(current->pi_state_cache))
 diff --git a/kernel/fork.c b/kernel/fork.c
-index b9e2edd..95a374e 100644
+index b9e2edd..60a473e 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -61,8 +61,8 @@
+@@ -61,7 +61,6 @@
  #include <linux/proc_fs.h>
  #include <linux/blkdev.h>
  #include <linux/fs_struct.h>
 -#include <trace/sched.h>
  #include <linux/magic.h>
-+#include <linux/perf_counter.h>
  
  #include <asm/pgtable.h>
- #include <asm/pgalloc.h>
-@@ -71,6 +71,8 @@
+@@ -71,6 +70,8 @@
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
  
@@ -62738,7 +62699,7 @@ index b9e2edd..95a374e 100644
  /*
   * Protected counters by write_lock_irq(&tasklist_lock)
   */
-@@ -83,8 +85,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
+@@ -83,8 +84,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
  
  __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
  
@@ -62747,7 +62708,7 @@ index b9e2edd..95a374e 100644
  int nr_processes(void)
  {
  	int cpu;
-@@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages)
+@@ -178,7 +177,7 @@ void __init fork_init(unsigned long mempages)
  	/* create a slab on which task_structs can be allocated */
  	task_struct_cachep =
  		kmem_cache_create("task_struct", sizeof(struct task_struct),
@@ -62756,7 +62717,7 @@ index b9e2edd..95a374e 100644
  #endif
  
  	/* do the arch specific task caches init */
-@@ -983,6 +983,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -983,6 +982,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  		goto fork_out;
  
  	rt_mutex_init_task(p);
@@ -62764,7 +62725,7 @@ index b9e2edd..95a374e 100644
  
  #ifdef CONFIG_PROVE_LOCKING
  	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
-@@ -1089,8 +1090,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1089,8 +1089,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  #ifdef CONFIG_DEBUG_MUTEXES
  	p->blocked_on = NULL; /* not blocked yet */
  #endif
@@ -62775,7 +62736,7 @@ index b9e2edd..95a374e 100644
  
  	/* Perform scheduler related setup. Assign this task to a CPU. */
  	sched_fork(p, clone_flags);
-@@ -1461,20 +1462,20 @@ void __init proc_caches_init(void)
+@@ -1461,20 +1461,20 @@ void __init proc_caches_init(void)
  {
  	sighand_cachep = kmem_cache_create("sighand_cache",
  			sizeof(struct sighand_struct), 0,
@@ -64973,10 +64934,10 @@ index 507cf2b..947b3ad 100644
 +EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
 diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
 new file mode 100644
-index 0000000..c100554
+index 0000000..08584c1
 --- /dev/null
 +++ b/kernel/perf_counter.c
-@@ -0,0 +1,3719 @@
+@@ -0,0 +1,3584 @@
 +/*
 + * Performance counter core code
 + *
@@ -65076,20 +65037,6 @@ index 0000000..c100554
 +		hw_perf_enable();
 +}
 +
-+static void get_ctx(struct perf_counter_context *ctx)
-+{
-+	atomic_inc(&ctx->refcount);
-+}
-+
-+static void put_ctx(struct perf_counter_context *ctx)
-+{
-+	if (atomic_dec_and_test(&ctx->refcount)) {
-+		if (ctx->parent_ctx)
-+			put_ctx(ctx->parent_ctx);
-+		kfree(ctx);
-+	}
-+}
-+
 +static void
 +list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 +{
@@ -65109,24 +65056,14 @@ index 0000000..c100554
 +
 +	list_add_rcu(&counter->event_entry, &ctx->event_list);
 +	ctx->nr_counters++;
-+	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
-+		ctx->nr_enabled++;
 +}
 +
-+/*
-+ * Remove a counter from the lists for its context.
-+ * Must be called with counter->mutex and ctx->mutex held.
-+ */
 +static void
 +list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 +{
 +	struct perf_counter *sibling, *tmp;
 +
-+	if (list_empty(&counter->list_entry))
-+		return;
 +	ctx->nr_counters--;
-+	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
-+		ctx->nr_enabled--;
 +
 +	list_del_init(&counter->list_entry);
 +	list_del_rcu(&counter->event_entry);
@@ -65190,22 +65127,6 @@ index 0000000..c100554
 +}
 +
 +/*
-+ * Mark this context as not being a clone of another.
-+ * Called when counters are added to or removed from this context.
-+ * We also increment our generation number so that anything that
-+ * was cloned from this context before this will not match anything
-+ * cloned from this context after this.
-+ */
-+static void unclone_ctx(struct perf_counter_context *ctx)
-+{
-+	++ctx->generation;
-+	if (!ctx->parent_ctx)
-+		return;
-+	put_ctx(ctx->parent_ctx);
-+	ctx->parent_ctx = NULL;
-+}
-+
-+/*
 + * Cross CPU call to remove a performance counter
 + *
 + * We disable the counter on the hardware level first. After that we
@@ -65235,6 +65156,8 @@ index 0000000..c100554
 +
 +	counter_sched_out(counter, cpuctx, ctx);
 +
++	counter->task = NULL;
++
 +	list_del_counter(counter, ctx);
 +
 +	if (!ctx->task) {
@@ -65265,7 +65188,6 @@ index 0000000..c100554
 +	struct perf_counter_context *ctx = counter->ctx;
 +	struct task_struct *task = ctx->task;
 +
-+	unclone_ctx(ctx);
 +	if (!task) {
 +		/*
 +		 * Per cpu counters are removed via an smp call and
@@ -65297,6 +65219,7 @@ index 0000000..c100554
 +	 */
 +	if (!list_empty(&counter->list_entry)) {
 +		list_del_counter(counter, ctx);
++		counter->task = NULL;
 +	}
 +	spin_unlock_irq(&ctx->lock);
 +}
@@ -65381,7 +65304,6 @@ index 0000000..c100554
 +		else
 +			counter_sched_out(counter, cpuctx, ctx);
 +		counter->state = PERF_COUNTER_STATE_OFF;
-+		ctx->nr_enabled--;
 +	}
 +
 +	spin_unlock_irqrestore(&ctx->lock, flags);
@@ -65423,7 +65345,6 @@ index 0000000..c100554
 +	if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
 +		update_counter_times(counter);
 +		counter->state = PERF_COUNTER_STATE_OFF;
-+		ctx->nr_enabled--;
 +	}
 +
 +	spin_unlock_irq(&ctx->lock);
@@ -65587,17 +65508,11 @@ index 0000000..c100554
 +	 * If this is a task context, we need to check whether it is
 +	 * the current task context of this cpu. If not it has been
 +	 * scheduled out before the smp call arrived.
-+	 * Or possibly this is the right context but it isn't
-+	 * on this cpu because it had no counters.
 +	 */
-+	if (ctx->task && cpuctx->task_ctx != ctx) {
-+		if (cpuctx->task_ctx || ctx->task != current)
-+			return;
-+		cpuctx->task_ctx = ctx;
-+	}
++	if (ctx->task && cpuctx->task_ctx != ctx)
++		return;
 +
 +	spin_lock_irqsave(&ctx->lock, flags);
-+	ctx->is_active = 1;
 +	update_context_time(ctx);
 +
 +	/*
@@ -65678,6 +65593,7 @@ index 0000000..c100554
 +		return;
 +	}
 +
++	counter->task = task;
 +retry:
 +	task_oncpu_function_call(task, __perf_install_in_context,
 +				 counter);
@@ -65717,14 +65633,10 @@ index 0000000..c100554
 +	 * If this is a per-task counter, need to check whether this
 +	 * counter's task is the current task on this cpu.
 +	 */
-+	if (ctx->task && cpuctx->task_ctx != ctx) {
-+		if (cpuctx->task_ctx || ctx->task != current)
-+			return;
-+		cpuctx->task_ctx = ctx;
-+	}
++	if (ctx->task && cpuctx->task_ctx != ctx)
++		return;
 +
 +	spin_lock_irqsave(&ctx->lock, flags);
-+	ctx->is_active = 1;
 +	update_context_time(ctx);
 +
 +	counter->prev_state = counter->state;
@@ -65732,7 +65644,6 @@ index 0000000..c100554
 +		goto unlock;
 +	counter->state = PERF_COUNTER_STATE_INACTIVE;
 +	counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
-+	ctx->nr_enabled++;
 +
 +	/*
 +	 * If the counter is in a group and isn't the group leader,
@@ -65823,7 +65734,6 @@ index 0000000..c100554
 +		counter->state = PERF_COUNTER_STATE_INACTIVE;
 +		counter->tstamp_enabled =
 +			ctx->time - counter->total_time_enabled;
-+		ctx->nr_enabled++;
 +	}
 + out:
 +	spin_unlock_irq(&ctx->lock);
@@ -65869,25 +65779,6 @@ index 0000000..c100554
 +}
 +
 +/*
-+ * Test whether two contexts are equivalent, i.e. whether they
-+ * have both been cloned from the same version of the same context
-+ * and they both have the same number of enabled counters.
-+ * If the number of enabled counters is the same, then the set
-+ * of enabled counters should be the same, because these are both
-+ * inherited contexts, therefore we can't access individual counters
-+ * in them directly with an fd; we can only enable/disable all
-+ * counters via prctl, or enable/disable all counters in a family
-+ * via ioctl, which will have the same effect on both contexts.
-+ */
-+static int context_equiv(struct perf_counter_context *ctx1,
-+			 struct perf_counter_context *ctx2)
-+{
-+	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
-+		&& ctx1->parent_gen == ctx2->parent_gen
-+		&& ctx1->nr_enabled == ctx2->nr_enabled;
-+}
-+
-+/*
 + * Called from scheduler to remove the counters of the current task,
 + * with interrupts disabled.
 + *
@@ -65898,31 +65789,19 @@ index 0000000..c100554
 + * accessing the counter control register. If a NMI hits, then it will
 + * not restart the counter.
 + */
-+void perf_counter_task_sched_out(struct task_struct *task,
-+				 struct task_struct *next, int cpu)
++void perf_counter_task_sched_out(struct task_struct *task, int cpu)
 +{
 +	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-+	struct perf_counter_context *ctx = task->perf_counter_ctxp;
-+	struct perf_counter_context *next_ctx;
++	struct perf_counter_context *ctx = &task->perf_counter_ctx;
 +	struct pt_regs *regs;
 +
-+	if (likely(!ctx || !cpuctx->task_ctx))
++	if (likely(!cpuctx->task_ctx))
 +		return;
 +
 +	update_context_time(ctx);
 +
 +	regs = task_pt_regs(task);
 +	perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
-+
-+	next_ctx = next->perf_counter_ctxp;
-+	if (next_ctx && context_equiv(ctx, next_ctx)) {
-+		task->perf_counter_ctxp = next_ctx;
-+		next->perf_counter_ctxp = ctx;
-+		ctx->task = next;
-+		next_ctx->task = task;
-+		return;
-+	}
-+
 +	__perf_counter_sched_out(ctx, cpuctx);
 +
 +	cpuctx->task_ctx = NULL;
@@ -65932,8 +65811,6 @@ index 0000000..c100554
 +{
 +	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 +
-+	if (!cpuctx->task_ctx)
-+		return;
 +	__perf_counter_sched_out(ctx, cpuctx);
 +	cpuctx->task_ctx = NULL;
 +}
@@ -66032,12 +65909,8 @@ index 0000000..c100554
 +void perf_counter_task_sched_in(struct task_struct *task, int cpu)
 +{
 +	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-+	struct perf_counter_context *ctx = task->perf_counter_ctxp;
++	struct perf_counter_context *ctx = &task->perf_counter_ctx;
 +
-+	if (likely(!ctx))
-+		return;
-+	if (cpuctx->task_ctx == ctx)
-+		return;
 +	__perf_counter_sched_in(ctx, cpuctx, cpu);
 +	cpuctx->task_ctx = ctx;
 +}
@@ -66052,11 +65925,11 @@ index 0000000..c100554
 +int perf_counter_task_disable(void)
 +{
 +	struct task_struct *curr = current;
-+	struct perf_counter_context *ctx = curr->perf_counter_ctxp;
++	struct perf_counter_context *ctx = &curr->perf_counter_ctx;
 +	struct perf_counter *counter;
 +	unsigned long flags;
 +
-+	if (!ctx || !ctx->nr_counters)
++	if (likely(!ctx->nr_counters))
 +		return 0;
 +
 +	local_irq_save(flags);
@@ -66087,12 +65960,12 @@ index 0000000..c100554
 +int perf_counter_task_enable(void)
 +{
 +	struct task_struct *curr = current;
-+	struct perf_counter_context *ctx = curr->perf_counter_ctxp;
++	struct perf_counter_context *ctx = &curr->perf_counter_ctx;
 +	struct perf_counter *counter;
 +	unsigned long flags;
 +	int cpu;
 +
-+	if (!ctx || !ctx->nr_counters)
++	if (likely(!ctx->nr_counters))
 +		return 0;
 +
 +	local_irq_save(flags);
@@ -66195,23 +66068,19 @@ index 0000000..c100554
 +		return;
 +
 +	cpuctx = &per_cpu(perf_cpu_context, cpu);
-+	ctx = curr->perf_counter_ctxp;
++	ctx = &curr->perf_counter_ctx;
 +
 +	perf_adjust_freq(&cpuctx->ctx);
-+	if (ctx)
-+		perf_adjust_freq(ctx);
++	perf_adjust_freq(ctx);
 +
 +	perf_counter_cpu_sched_out(cpuctx);
-+	if (ctx)
-+		__perf_counter_task_sched_out(ctx);
++	__perf_counter_task_sched_out(ctx);
 +
 +	rotate_ctx(&cpuctx->ctx);
-+	if (ctx)
-+		rotate_ctx(ctx);
++	rotate_ctx(ctx);
 +
 +	perf_counter_cpu_sched_in(cpuctx, cpu);
-+	if (ctx)
-+		perf_counter_task_sched_in(curr, cpu);
++	perf_counter_task_sched_in(curr, cpu);
 +}
 +
 +/*
@@ -66247,22 +66116,6 @@ index 0000000..c100554
 +	return atomic64_read(&counter->count);
 +}
 +
-+/*
-+ * Initialize the perf_counter context in a task_struct:
-+ */
-+static void
-+__perf_counter_init_context(struct perf_counter_context *ctx,
-+			    struct task_struct *task)
-+{
-+	memset(ctx, 0, sizeof(*ctx));
-+	spin_lock_init(&ctx->lock);
-+	mutex_init(&ctx->mutex);
-+	INIT_LIST_HEAD(&ctx->counter_list);
-+	INIT_LIST_HEAD(&ctx->event_list);
-+	atomic_set(&ctx->refcount, 1);
-+	ctx->task = task;
-+}
-+
 +static void put_context(struct perf_counter_context *ctx)
 +{
 +	if (ctx->task)
@@ -66273,7 +66126,6 @@ index 0000000..c100554
 +{
 +	struct perf_cpu_context *cpuctx;
 +	struct perf_counter_context *ctx;
-+	struct perf_counter_context *tctx;
 +	struct task_struct *task;
 +
 +	/*
@@ -66313,36 +66165,15 @@ index 0000000..c100554
 +	if (!task)
 +		return ERR_PTR(-ESRCH);
 +
++	ctx = &task->perf_counter_ctx;
++	ctx->task = task;
++
 +	/* Reuse ptrace permission checks for now. */
 +	if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
-+		put_task_struct(task);
++		put_context(ctx);
 +		return ERR_PTR(-EACCES);
 +	}
 +
-+	ctx = task->perf_counter_ctxp;
-+	if (!ctx) {
-+		ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
-+		if (!ctx) {
-+			put_task_struct(task);
-+			return ERR_PTR(-ENOMEM);
-+		}
-+		__perf_counter_init_context(ctx, task);
-+		/*
-+		 * Make sure other cpus see correct values for *ctx
-+		 * once task->perf_counter_ctxp is visible to them.
-+		 */
-+		smp_wmb();
-+		tctx = cmpxchg(&task->perf_counter_ctxp, NULL, ctx);
-+		if (tctx) {
-+			/*
-+			 * We raced with some other task; use
-+			 * the context they set.
-+			 */
-+			kfree(ctx);
-+			ctx = tctx;
-+		}
-+	}
-+
 +	return ctx;
 +}
 +
@@ -66351,7 +66182,6 @@ index 0000000..c100554
 +	struct perf_counter *counter;
 +
 +	counter = container_of(head, struct perf_counter, rcu_head);
-+	put_ctx(counter->ctx);
 +	kfree(counter);
 +}
 +
@@ -67357,7 +67187,7 @@ index 0000000..c100554
 +	perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
 +	put_cpu_var(perf_cpu_context);
 +
-+	perf_counter_comm_ctx(current->perf_counter_ctxp, comm_event);
++	perf_counter_comm_ctx(&current->perf_counter_ctx, comm_event);
 +}
 +
 +void perf_counter_comm(struct task_struct *task)
@@ -67366,9 +67196,7 @@ index 0000000..c100554
 +
 +	if (!atomic_read(&nr_comm_tracking))
 +		return;
-+	if (!current->perf_counter_ctxp)
-+		return;
-+
++       
 +	comm_event = (struct perf_comm_event){
 +		.task	= task,
 +		.event  = {
@@ -67484,7 +67312,7 @@ index 0000000..c100554
 +	perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
 +	put_cpu_var(perf_cpu_context);
 +
-+	perf_counter_mmap_ctx(current->perf_counter_ctxp, mmap_event);
++	perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
 +
 +	kfree(buf);
 +}
@@ -67496,8 +67324,6 @@ index 0000000..c100554
 +
 +	if (!atomic_read(&nr_mmap_tracking))
 +		return;
-+	if (!current->perf_counter_ctxp)
-+		return;
 +
 +	mmap_event = (struct perf_mmap_event){
 +		.file   = file,
@@ -68099,7 +67925,6 @@ index 0000000..c100554
 +	counter->group_leader		= group_leader;
 +	counter->pmu			= NULL;
 +	counter->ctx			= ctx;
-+	get_ctx(ctx);
 +
 +	counter->state = PERF_COUNTER_STATE_INACTIVE;
 +	if (hw_event->disabled)
@@ -68265,6 +68090,21 @@ index 0000000..c100554
 +}
 +
 +/*
++ * Initialize the perf_counter context in a task_struct:
++ */
++static void
++__perf_counter_init_context(struct perf_counter_context *ctx,
++			    struct task_struct *task)
++{
++	memset(ctx, 0, sizeof(*ctx));
++	spin_lock_init(&ctx->lock);
++	mutex_init(&ctx->mutex);
++	INIT_LIST_HEAD(&ctx->counter_list);
++	INIT_LIST_HEAD(&ctx->event_list);
++	ctx->task = task;
++}
++
++/*
 + * inherit a counter from parent task to child task:
 + */
 +static struct perf_counter *
@@ -68293,18 +68133,9 @@ index 0000000..c100554
 +		return child_counter;
 +
 +	/*
-+	 * Make the child state follow the state of the parent counter,
-+	 * not its hw_event.disabled bit.  We hold the parent's mutex,
-+	 * so we won't race with perf_counter_{en,dis}able_family.
-+	 */
-+	if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
-+		child_counter->state = PERF_COUNTER_STATE_INACTIVE;
-+	else
-+		child_counter->state = PERF_COUNTER_STATE_OFF;
-+
-+	/*
 +	 * Link it up in the child's context:
 +	 */
++	child_counter->task = child;
 +	add_counter_to_ctx(child_counter, child_ctx);
 +
 +	child_counter->parent = parent_counter;
@@ -68327,6 +68158,16 @@ index 0000000..c100554
 +	mutex_lock(&parent_counter->mutex);
 +	list_add_tail(&child_counter->child_list, &parent_counter->child_list);
 +
++	/*
++	 * Make the child state follow the state of the parent counter,
++	 * not its hw_event.disabled bit.  We hold the parent's mutex,
++	 * so we won't race with perf_counter_{en,dis}able_family.
++	 */
++	if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
++		child_counter->state = PERF_COUNTER_STATE_INACTIVE;
++	else
++		child_counter->state = PERF_COUNTER_STATE_OFF;
++
 +	mutex_unlock(&parent_counter->mutex);
 +
 +	return child_counter;
@@ -68393,15 +68234,40 @@ index 0000000..c100554
 +	struct perf_counter *parent_counter;
 +
 +	/*
-+	 * Protect against concurrent operations on child_counter
-+	 * due its fd getting closed, etc.
-+	 */
-+	mutex_lock(&child_counter->mutex);
++	 * If we do not self-reap then we have to wait for the
++	 * child task to unschedule (it will happen for sure),
++	 * so that its counter is at its final count. (This
++	 * condition triggers rarely - child tasks usually get
++	 * off their CPU before the parent has a chance to
++	 * get this far into the reaping action)
++	 */
++	if (child != current) {
++		wait_task_inactive(child, 0);
++		update_counter_times(child_counter);
++		list_del_counter(child_counter, child_ctx);
++	} else {
++		struct perf_cpu_context *cpuctx;
++		unsigned long flags;
++
++		/*
++		 * Disable and unlink this counter.
++		 *
++		 * Be careful about zapping the list - IRQ/NMI context
++		 * could still be processing it:
++		 */
++		local_irq_save(flags);
++		perf_disable();
 +
-+	update_counter_times(child_counter);
-+	list_del_counter(child_counter, child_ctx);
++		cpuctx = &__get_cpu_var(perf_cpu_context);
 +
-+	mutex_unlock(&child_counter->mutex);
++		group_sched_out(child_counter, cpuctx, child_ctx);
++		update_counter_times(child_counter);
++
++		list_del_counter(child_counter, child_ctx);
++
++		perf_enable();
++		local_irq_restore(flags);
++	}
 +
 +	parent_counter = child_counter->parent;
 +	/*
@@ -68420,29 +68286,19 @@ index 0000000..c100554
 + *
 + * Note: we may be running in child context, but the PID is not hashed
 + * anymore so new counters will not be added.
-+ * (XXX not sure that is true when we get called from flush_old_exec.
-+ *  -- paulus)
 + */
 +void perf_counter_exit_task(struct task_struct *child)
 +{
 +	struct perf_counter *child_counter, *tmp;
 +	struct perf_counter_context *child_ctx;
-+	unsigned long flags;
 +
 +	WARN_ON_ONCE(child != current);
 +
-+	child_ctx = child->perf_counter_ctxp;
++	child_ctx = &child->perf_counter_ctx;
 +
-+	if (likely(!child_ctx))
++	if (likely(!child_ctx->nr_counters))
 +		return;
 +
-+	local_irq_save(flags);
-+	__perf_counter_task_sched_out(child_ctx);
-+	child->perf_counter_ctxp = NULL;
-+	local_irq_restore(flags);
-+
-+	mutex_lock(&child_ctx->mutex);
-+
 +again:
 +	list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
 +				 list_entry)
@@ -68455,10 +68311,6 @@ index 0000000..c100554
 +	 */
 +	if (!list_empty(&child_ctx->counter_list))
 +		goto again;
-+
-+	mutex_unlock(&child_ctx->mutex);
-+
-+	put_ctx(child_ctx);
 +}
 +
 +/*
@@ -68469,27 +68321,20 @@ index 0000000..c100554
 +	struct perf_counter_context *child_ctx, *parent_ctx;
 +	struct perf_counter *counter;
 +	struct task_struct *parent = current;
-+	int inherited_all = 1;
 +
-+	child->perf_counter_ctxp = NULL;
++	child_ctx  =  &child->perf_counter_ctx;
++	parent_ctx = &parent->perf_counter_ctx;
++
++	__perf_counter_init_context(child_ctx, child);
 +
 +	/*
 +	 * This is executed from the parent task context, so inherit
-+	 * counters that have been marked for cloning.
-+	 * First allocate and initialize a context for the child.
++	 * counters that have been marked for cloning:
 +	 */
 +
-+	child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
-+	if (!child_ctx)
++	if (likely(!parent_ctx->nr_counters))
 +		return;
 +
-+	parent_ctx = parent->perf_counter_ctxp;
-+	if (likely(!parent_ctx || !parent_ctx->nr_counters))
-+		return;
-+
-+	__perf_counter_init_context(child_ctx, child);
-+	child->perf_counter_ctxp = child_ctx;
-+
 +	/*
 +	 * Lock the parent list. No need to lock the child - not PID
 +	 * hashed yet and not running, so nobody can access it.
@@ -68504,31 +68349,12 @@ index 0000000..c100554
 +		if (counter != counter->group_leader)
 +			continue;
 +
-+		if (!counter->hw_event.inherit) {
-+			inherited_all = 0;
++		if (!counter->hw_event.inherit)
 +			continue;
-+		}
 +
 +		if (inherit_group(counter, parent,
-+				  parent_ctx, child, child_ctx)) {
-+			inherited_all = 0;
++				  parent_ctx, child, child_ctx))
 +			break;
-+		}
-+	}
-+
-+	if (inherited_all) {
-+		/*
-+		 * Mark the child context as a clone of the parent
-+		 * context, or of whatever the parent is a clone of.
-+		 */
-+		if (parent_ctx->parent_ctx) {
-+			child_ctx->parent_ctx = parent_ctx->parent_ctx;
-+			child_ctx->parent_gen = parent_ctx->parent_gen;
-+		} else {
-+			child_ctx->parent_ctx = parent_ctx;
-+			child_ctx->parent_gen = parent_ctx->generation;
-+		}
-+		get_ctx(child_ctx->parent_ctx);
 +	}
 +
 +	mutex_unlock(&parent_ctx->mutex);
@@ -69455,7 +69281,7 @@ index e124bf5..97a2f81 100644
  #ifdef CONFIG_DEBUG_RT_MUTEXES
  # include "rtmutex-debug.h"
 diff --git a/kernel/sched.c b/kernel/sched.c
-index 26efa47..f93305b 100644
+index 26efa47..c036590 100644
 --- a/kernel/sched.c
 +++ b/kernel/sched.c
 @@ -39,6 +39,7 @@
@@ -69983,7 +69809,7 @@ index 26efa47..f93305b 100644
  
  	if (likely(prev != next)) {
  		sched_info_switch(prev, next);
-+		perf_counter_task_sched_out(prev, next, cpu);
++		perf_counter_task_sched_out(prev, cpu);
  
  		rq->nr_switches++;
  		rq->curr = next;

xen.pvops.post.patch:

Index: xen.pvops.post.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/Attic/xen.pvops.post.patch,v
retrieving revision 1.1.2.15
retrieving revision 1.1.2.16
diff -u -p -r1.1.2.15 -r1.1.2.16
--- xen.pvops.post.patch	22 May 2009 20:02:09 -0000	1.1.2.15
+++ xen.pvops.post.patch	4 Jun 2009 19:35:03 -0000	1.1.2.16
@@ -50,6 +50,16 @@ Test some patches to get STACKPROTECTOR 
  static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
  #endif
  
+--- a/include/linux/init_task.h	2009-04-23 20:13:50.000000000 +0100
++++ b/include/linux/init_task.h	2009-04-24 20:47:17.000000000 +0100
+@@ -170,6 +170,7 @@
+ 		[PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),		\
+ 	},								\
+ 	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
++	INIT_UTRACE(tsk)						\
+ 	INIT_IDS							\
+ 	INIT_PERF_COUNTERS(tsk)						\
+ 	INIT_TRACE_IRQFLAGS						\
 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
 index 7727aa8..5d6083c 100644
 --- a/arch/x86/include/asm/paravirt_types.h
@@ -167,3 +177,14 @@ index 76f8f84..4118f17 100644
  
  obj-y			:= intel_cacheinfo.o addon_cpuid_features.o
  obj-y			+= proc.o capflags.o powerflags.o common.o
+--- linux-2.6.29.x86_64/drivers/xen/netback/netback.c.orig	2009-05-28 21:55:36.000000000 +0100
++++ linux-2.6.29.x86_64/drivers/xen/netback/netback.c	2009-05-28 21:59:06.000000000 +0100
+@@ -46,7 +46,7 @@
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/page.h>
+ 
+-/*define NETBE_DEBUG_INTERRUPT*/
++#define NETBE_DEBUG_INTERRUPT
+ 
+ struct netbk_rx_meta {
+ 	skb_frag_t frag;

xen.pvops.pre.patch:

Index: xen.pvops.pre.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/Attic/xen.pvops.pre.patch,v
retrieving revision 1.1.2.12
retrieving revision 1.1.2.13
diff -u -p -r1.1.2.12 -r1.1.2.13
--- xen.pvops.pre.patch	22 May 2009 20:02:09 -0000	1.1.2.12
+++ xen.pvops.pre.patch	4 Jun 2009 19:35:03 -0000	1.1.2.13
@@ -2,6 +2,7 @@ temporarily revert various Fedora change
 Affected patches;
 more linux-2.6-execshield.patch - arch/x86/mm/init.c arch/x86/mm/init_32.c include/linux/sched.h mm/mmap.c arch/x86/include/asm/paravirt.h arch/x86/kernel/process_32.c
 linux-2.6-defaults-pci_no_msi.patch - drivers/pci/pci.h
+linux-2.6-utrace.patch - include/linux/init_task.h
 
 --- a/arch/x86/mm/init.c	2009-04-24 20:27:42.000000000 +0100
 +++ b/arch/x86/mm/init.c	2009-04-23 20:13:34.000000000 +0100
@@ -84,7 +85,17 @@ linux-2.6-defaults-pci_no_msi.patch - dr
 -static inline void pci_yes_msi(void) { }
  static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
  #endif
- 
+
+--- a/include/linux/init_task.h	2009-04-24 20:47:17.000000000 +0100
++++ b/include/linux/init_task.h	2009-04-23 20:13:50.000000000 +0100
+@@ -170,7 +170,6 @@
+ 		[PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),		\
+ 	},								\
+ 	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
+-	INIT_UTRACE(tsk)						\
+ 	INIT_IDS							\
+ 	INIT_TRACE_IRQFLAGS						\
+ 	INIT_LOCKDEP							\
 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
 index 7727aa8..5d6083c 100644
 --- a/arch/x86/include/asm/paravirt.h


--- cpufreq-add-atom-to-p4-clockmod.patch DELETED ---


--- patch-2.6.30-rc6-git6.bz2.sign DELETED ---


--- patch-2.6.30-rc6.bz2.sign DELETED ---


--- rds-only-on-64-bit-or-x86.patch DELETED ---




More information about the fedora-extras-commits mailing list