rpms/kernel/devel patch-2.6.22-rc5-git4.bz2.sign, NONE, 1.1 .cvsignore, 1.632, 1.633 kernel-2.6.spec, 1.3229, 1.3230 linux-2.6-sched-cfs.patch, 1.3, 1.4 sources, 1.597, 1.598 upstream, 1.519, 1.520 patch-2.6.22-rc5-git1.bz2.sign, 1.1, NONE

Dave Jones (davej) fedora-extras-commits at redhat.com
Wed Jun 20 19:45:18 UTC 2007


Author: davej

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv9698

Modified Files:
	.cvsignore kernel-2.6.spec linux-2.6-sched-cfs.patch sources 
	upstream 
Added Files:
	patch-2.6.22-rc5-git4.bz2.sign 
Removed Files:
	patch-2.6.22-rc5-git1.bz2.sign 
Log Message:
* Wed Jun 20 2007 Dave Jones <davej at redhat.com>
- 2.6.22-rc5-git4.



--- NEW FILE patch-2.6.22-rc5-git4.bz2.sign ---
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
Comment: See http://www.kernel.org/signature.html for info

iD8DBQBGeNDsyGugalF9Dw4RAp8FAJ48EmHnAnLJSM0mVhN9jWeM4sRWPgCeNLwF
5jbM8YcGnYQO8cvS3b917FQ=
=DSTw
-----END PGP SIGNATURE-----


Index: .cvsignore
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/.cvsignore,v
retrieving revision 1.632
retrieving revision 1.633
diff -u -r1.632 -r1.633
--- .cvsignore	18 Jun 2007 21:09:33 -0000	1.632
+++ .cvsignore	20 Jun 2007 19:44:43 -0000	1.633
@@ -4,4 +4,4 @@
 kernel-2.6.21
 linux-2.6.21.tar.bz2
 patch-2.6.22-rc5.bz2
-patch-2.6.22-rc5-git1.bz2
+patch-2.6.22-rc5-git4.bz2


Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel-2.6.spec,v
retrieving revision 1.3229
retrieving revision 1.3230
diff -u -r1.3229 -r1.3230
--- kernel-2.6.spec	19 Jun 2007 18:16:54 -0000	1.3229
+++ kernel-2.6.spec	20 Jun 2007 19:44:43 -0000	1.3230
@@ -414,7 +414,7 @@
 %else
 # Here should be only the patches up to the upstream canonical Linus tree.
 Patch00: patch-2.6.22-rc5.bz2
-Patch01: patch-2.6.22-rc5-git1.bz2
+Patch01: patch-2.6.22-rc5-git4.bz2
 %endif
 
 %if !%{nopatches}
@@ -889,6 +889,9 @@
 patch_command='patch -p1 -F1 -s'
 ApplyPatch()
 {
+  if [ ! -f $RPM_SOURCE_DIR/$1 ]; then
+    exit 1;
+  fi
   case "$1" in
   *.bz2) bunzip2 < "$RPM_SOURCE_DIR/$1" | $patch_command ;;
   *.gz) gunzip < "$RPM_SOURCE_DIR/$1" | $patch_command ;;
@@ -902,7 +905,7 @@
 
 # Update to latest upstream.
 ApplyPatch patch-2.6.22-rc5.bz2
-ApplyPatch patch-2.6.22-rc5-git1.bz2
+ApplyPatch patch-2.6.22-rc5-git4.bz2
 
 %endif
 %if !%{nopatches}
@@ -2081,6 +2084,9 @@
 %endif
 
 %changelog
+* Wed Jun 20 2007 Dave Jones <davej at redhat.com>
+- 2.6.22-rc5-git4.
+
 * Mon Jun 19 2007 Chuck Ebbert <cebbert at redhat.com>
 - enable sound system debugging in -debug kernels
 

linux-2.6-sched-cfs.patch:

Index: linux-2.6-sched-cfs.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-sched-cfs.patch,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- linux-2.6-sched-cfs.patch	18 Jun 2007 21:09:33 -0000	1.3
+++ linux-2.6-sched-cfs.patch	20 Jun 2007 19:44:43 -0000	1.4
@@ -1,7 +1,7 @@
-Index: linux-cfs-2.6.22-rc5.q/Documentation/kernel-parameters.txt
+Index: linux/Documentation/kernel-parameters.txt
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/Documentation/kernel-parameters.txt
-+++ linux-cfs-2.6.22-rc5.q/Documentation/kernel-parameters.txt
+--- linux.orig/Documentation/kernel-parameters.txt
++++ linux/Documentation/kernel-parameters.txt
 @@ -1019,49 +1019,6 @@ and is between 256 and 4096 characters. 
  
  	mga=		[HW,DRM]
@@ -52,10 +52,10 @@
  	mousedev.tap_time=
  			[MOUSE] Maximum time between finger touching and
  			leaving touchpad surface for touch to be considered
-Index: linux-cfs-2.6.22-rc5.q/Documentation/sched-design-CFS.txt
+Index: linux/Documentation/sched-design-CFS.txt
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-rc5.q/Documentation/sched-design-CFS.txt
++++ linux/Documentation/sched-design-CFS.txt
 @@ -0,0 +1,119 @@
 +
 +This is the CFS scheduler.
@@ -176,10 +176,10 @@
 +   iterators of the scheduling modules are used. The balancing code got
 +   quite a bit simpler as a result.
 +
-Index: linux-cfs-2.6.22-rc5.q/arch/i386/kernel/smpboot.c
+Index: linux/arch/i386/kernel/smpboot.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/i386/kernel/smpboot.c
-+++ linux-cfs-2.6.22-rc5.q/arch/i386/kernel/smpboot.c
+--- linux.orig/arch/i386/kernel/smpboot.c
++++ linux/arch/i386/kernel/smpboot.c
 @@ -941,17 +941,6 @@ exit:
  }
  #endif
@@ -206,19 +206,19 @@
  
  	set_cpu_sibling_map(0);
  
-Index: linux-cfs-2.6.22-rc5.q/arch/i386/kernel/syscall_table.S
+Index: linux/arch/i386/kernel/syscall_table.S
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/i386/kernel/syscall_table.S
-+++ linux-cfs-2.6.22-rc5.q/arch/i386/kernel/syscall_table.S
+--- linux.orig/arch/i386/kernel/syscall_table.S
++++ linux/arch/i386/kernel/syscall_table.S
 @@ -323,3 +323,4 @@ ENTRY(sys_call_table)
  	.long sys_signalfd
  	.long sys_timerfd
  	.long sys_eventfd
 +	.long sys_sched_yield_to
-Index: linux-cfs-2.6.22-rc5.q/arch/i386/kernel/tsc.c
+Index: linux/arch/i386/kernel/tsc.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/i386/kernel/tsc.c
-+++ linux-cfs-2.6.22-rc5.q/arch/i386/kernel/tsc.c
+--- linux.orig/arch/i386/kernel/tsc.c
++++ linux/arch/i386/kernel/tsc.c
 @@ -4,6 +4,7 @@
   * See comments there for proper credits.
   */
@@ -250,10 +250,10 @@
  	if (!tsc_unstable) {
  		tsc_unstable = 1;
  		tsc_enabled = 0;
-Index: linux-cfs-2.6.22-rc5.q/arch/ia64/kernel/setup.c
+Index: linux/arch/ia64/kernel/setup.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/ia64/kernel/setup.c
-+++ linux-cfs-2.6.22-rc5.q/arch/ia64/kernel/setup.c
+--- linux.orig/arch/ia64/kernel/setup.c
++++ linux/arch/ia64/kernel/setup.c
 @@ -805,7 +805,6 @@ static void __cpuinit
  get_max_cacheline_size (void)
  {
@@ -281,10 +281,10 @@
  	if (max > ia64_max_cacheline_size)
  		ia64_max_cacheline_size = max;
  }
-Index: linux-cfs-2.6.22-rc5.q/arch/mips/kernel/smp.c
+Index: linux/arch/mips/kernel/smp.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/mips/kernel/smp.c
-+++ linux-cfs-2.6.22-rc5.q/arch/mips/kernel/smp.c
+--- linux.orig/arch/mips/kernel/smp.c
++++ linux/arch/mips/kernel/smp.c
 @@ -51,16 +51,6 @@ int __cpu_logical_map[NR_CPUS];		/* Map 
  EXPORT_SYMBOL(phys_cpu_present_map);
  EXPORT_SYMBOL(cpu_online_map);
@@ -310,10 +310,10 @@
  	plat_prepare_cpus(max_cpus);
  #ifndef CONFIG_HOTPLUG_CPU
  	cpu_present_map = cpu_possible_map;
-Index: linux-cfs-2.6.22-rc5.q/arch/sparc/kernel/smp.c
+Index: linux/arch/sparc/kernel/smp.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/sparc/kernel/smp.c
-+++ linux-cfs-2.6.22-rc5.q/arch/sparc/kernel/smp.c
+--- linux.orig/arch/sparc/kernel/smp.c
++++ linux/arch/sparc/kernel/smp.c
 @@ -68,16 +68,6 @@ void __cpuinit smp_store_cpu_info(int id
  	cpu_data(id).prom_node = cpu_node;
  	cpu_data(id).mid = cpu_get_hwmid(cpu_node);
@@ -331,10 +331,10 @@
  	if (cpu_data(id).mid < 0)
  		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
  }
-Index: linux-cfs-2.6.22-rc5.q/arch/sparc64/kernel/smp.c
+Index: linux/arch/sparc64/kernel/smp.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/arch/sparc64/kernel/smp.c
-+++ linux-cfs-2.6.22-rc5.q/arch/sparc64/kernel/smp.c
+--- linux.orig/arch/sparc64/kernel/smp.c
++++ linux/arch/sparc64/kernel/smp.c
 @@ -1163,32 +1163,6 @@ int setup_profiling_timer(unsigned int m
  	return -EINVAL;
  }
@@ -376,10 +376,10 @@
  }
  
  void __devinit smp_prepare_boot_cpu(void)
-Index: linux-cfs-2.6.22-rc5.q/fs/proc/array.c
+Index: linux/fs/proc/array.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/fs/proc/array.c
-+++ linux-cfs-2.6.22-rc5.q/fs/proc/array.c
+--- linux.orig/fs/proc/array.c
++++ linux/fs/proc/array.c
 @@ -165,7 +165,6 @@ static inline char * task_state(struct t
  	rcu_read_lock();
  	buffer += sprintf(buffer,
@@ -502,10 +502,10 @@
  		cputime_to_clock_t(cutime),
  		cputime_to_clock_t(cstime),
  		priority,
-Index: linux-cfs-2.6.22-rc5.q/fs/proc/base.c
+Index: linux/fs/proc/base.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/fs/proc/base.c
-+++ linux-cfs-2.6.22-rc5.q/fs/proc/base.c
+--- linux.orig/fs/proc/base.c
++++ linux/fs/proc/base.c
 @@ -296,7 +296,7 @@ static int proc_pid_wchan(struct task_st
   */
  static int proc_pid_schedstat(struct task_struct *task, char *buffer)
@@ -598,10 +598,10 @@
  	INF("cmdline",   S_IRUGO, pid_cmdline),
  	INF("stat",      S_IRUGO, tid_stat),
  	INF("statm",     S_IRUGO, pid_statm),
-Index: linux-cfs-2.6.22-rc5.q/include/asm-generic/bitops/sched.h
+Index: linux/include/asm-generic/bitops/sched.h
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/include/asm-generic/bitops/sched.h
-+++ linux-cfs-2.6.22-rc5.q/include/asm-generic/bitops/sched.h
+--- linux.orig/include/asm-generic/bitops/sched.h
++++ linux/include/asm-generic/bitops/sched.h
 @@ -6,28 +6,23 @@
  
  /*
@@ -639,10 +639,10 @@
  #else
  #error BITS_PER_LONG not defined
  #endif
-Index: linux-cfs-2.6.22-rc5.q/include/asm-i386/unistd.h
+Index: linux/include/asm-i386/unistd.h
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/include/asm-i386/unistd.h
-+++ linux-cfs-2.6.22-rc5.q/include/asm-i386/unistd.h
+--- linux.orig/include/asm-i386/unistd.h
++++ linux/include/asm-i386/unistd.h
 @@ -329,10 +329,11 @@
  #define __NR_signalfd		321
  #define __NR_timerfd		322
@@ -656,10 +656,10 @@
  
  #define __ARCH_WANT_IPC_PARSE_VERSION
  #define __ARCH_WANT_OLD_READDIR
-Index: linux-cfs-2.6.22-rc5.q/include/asm-x86_64/unistd.h
+Index: linux/include/asm-x86_64/unistd.h
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/include/asm-x86_64/unistd.h
-+++ linux-cfs-2.6.22-rc5.q/include/asm-x86_64/unistd.h
+--- linux.orig/include/asm-x86_64/unistd.h
++++ linux/include/asm-x86_64/unistd.h
 @@ -630,6 +630,9 @@ __SYSCALL(__NR_signalfd, sys_signalfd)
  __SYSCALL(__NR_timerfd, sys_timerfd)
  #define __NR_eventfd		283
@@ -670,10 +670,10 @@
  
  #ifndef __NO_STUBS
  #define __ARCH_WANT_OLD_READDIR
-Index: linux-cfs-2.6.22-rc5.q/include/linux/hardirq.h
+Index: linux/include/linux/hardirq.h
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/include/linux/hardirq.h
-+++ linux-cfs-2.6.22-rc5.q/include/linux/hardirq.h
+--- linux.orig/include/linux/hardirq.h
++++ linux/include/linux/hardirq.h
 @@ -79,6 +79,19 @@
  #endif
  
@@ -694,10 +694,10 @@
  # define preemptible()	(preempt_count() == 0 && !irqs_disabled())
  # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
  #else
-Index: linux-cfs-2.6.22-rc5.q/include/linux/sched.h
+Index: linux/include/linux/sched.h
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/include/linux/sched.h
-+++ linux-cfs-2.6.22-rc5.q/include/linux/sched.h
+--- linux.orig/include/linux/sched.h
++++ linux/include/linux/sched.h
 @@ -2,7 +2,6 @@
  #define _LINUX_SCHED_H
  
@@ -949,10 +949,10 @@
  static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
  {
  }
-Index: linux-cfs-2.6.22-rc5.q/include/linux/topology.h
+Index: linux/include/linux/topology.h
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/include/linux/topology.h
-+++ linux-cfs-2.6.22-rc5.q/include/linux/topology.h
+--- linux.orig/include/linux/topology.h
++++ linux/include/linux/topology.h
 @@ -98,7 +98,7 @@
  	.cache_nice_tries	= 0,			\
  	.busy_idx		= 0,			\
@@ -998,10 +998,10 @@
  				| BALANCE_FOR_PKG_POWER,\
  	.last_balance		= jiffies,		\
  	.balance_interval	= 1,			\
-Index: linux-cfs-2.6.22-rc5.q/init/main.c
+Index: linux/init/main.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/init/main.c
-+++ linux-cfs-2.6.22-rc5.q/init/main.c
+--- linux.orig/init/main.c
++++ linux/init/main.c
 @@ -436,7 +436,7 @@ static void noinline __init_refok rest_i
  
  	/*
@@ -1011,10 +1011,10 @@
  	 */
  	preempt_enable_no_resched();
  	schedule();
-Index: linux-cfs-2.6.22-rc5.q/kernel/delayacct.c
+Index: linux/kernel/delayacct.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/delayacct.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/delayacct.c
+--- linux.orig/kernel/delayacct.c
++++ linux/kernel/delayacct.c
 @@ -99,9 +99,10 @@ void __delayacct_blkio_end(void)
  int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
  {
@@ -1042,10 +1042,10 @@
  	d->cpu_run_virtual_total =
  		(tmp < (s64)d->cpu_run_virtual_total) ?	0 : tmp;
  
-Index: linux-cfs-2.6.22-rc5.q/kernel/exit.c
+Index: linux/kernel/exit.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/exit.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/exit.c
+--- linux.orig/kernel/exit.c
++++ linux/kernel/exit.c
 @@ -122,9 +122,9 @@ static void __exit_signal(struct task_st
  		sig->maj_flt += tsk->maj_flt;
  		sig->nvcsw += tsk->nvcsw;
@@ -1065,10 +1065,10 @@
  	write_unlock_irq(&tasklist_lock);
  	proc_flush_task(p);
  	release_thread(p);
-Index: linux-cfs-2.6.22-rc5.q/kernel/fork.c
+Index: linux/kernel/fork.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/fork.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/fork.c
+--- linux.orig/kernel/fork.c
++++ linux/kernel/fork.c
 @@ -117,6 +117,7 @@ void __put_task_struct(struct task_struc
  	WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
  	WARN_ON(atomic_read(&tsk->usage));
@@ -1095,10 +1095,10 @@
  #ifdef CONFIG_TASK_XACCT
  	p->rchar = 0;		/* I/O counter: bytes read */
  	p->wchar = 0;		/* I/O counter: bytes written */
-Index: linux-cfs-2.6.22-rc5.q/kernel/posix-cpu-timers.c
+Index: linux/kernel/posix-cpu-timers.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/posix-cpu-timers.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/posix-cpu-timers.c
+--- linux.orig/kernel/posix-cpu-timers.c
++++ linux/kernel/posix-cpu-timers.c
 @@ -161,7 +161,7 @@ static inline cputime_t virt_ticks(struc
  }
  static inline unsigned long long sched_ns(struct task_struct *p)
@@ -1239,10 +1239,10 @@
  		return;
  
  #undef	UNEXPIRED
-Index: linux-cfs-2.6.22-rc5.q/kernel/sched.c
+Index: linux/kernel/sched.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/sched.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/sched.c
+--- linux.orig/kernel/sched.c
++++ linux/kernel/sched.c
 @@ -16,6 +16,11 @@
   *		by Davide Libenzi, preemptible kernel bits by Robert Love.
   *  2003-09-03	Interactivity tuning by Con Kolivas.
@@ -1926,6 +1926,21 @@
  	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
  
 -static void set_load_weight(struct task_struct *p)
+-{
+-	if (has_rt_policy(p)) {
+-#ifdef CONFIG_SMP
+-		if (p == task_rq(p)->migration_thread)
+-			/*
+-			 * The migration thread does the actual balancing.
+-			 * Giving its load any weight will skew balancing
+-			 * adversely.
+-			 */
+-			p->load_weight = 0;
+-		else
+-#endif
+-			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+-	} else
+-		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
 +#define NICE_0_LOAD	SCHED_LOAD_SCALE
 +#define NICE_0_SHIFT	SCHED_LOAD_SHIFT
 +
@@ -1949,21 +1964,7 @@
 +
 +static inline unsigned long
 +total_raw_weighted_load(struct rq *rq)
- {
--	if (has_rt_policy(p)) {
--#ifdef CONFIG_SMP
--		if (p == task_rq(p)->migration_thread)
--			/*
--			 * The migration thread does the actual balancing.
--			 * Giving its load any weight will skew balancing
--			 * adversely.
--			 */
--			p->load_weight = 0;
--		else
--#endif
--			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
--	} else
--		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
++{
 +	return rq->rt.raw_weighted_load + rq->cfs.raw_weighted_load;
  }
  
@@ -2360,16 +2361,36 @@
  		set_task_cpu(p, dest_cpu);
  		return 0;
  	}
-@@ -1164,7 +865,7 @@ void wait_task_inactive(struct task_stru
+@@ -1158,9 +859,8 @@ migrate_task(struct task_struct *p, int 
+ void wait_task_inactive(struct task_struct *p)
+ {
+ 	unsigned long flags;
++	int running, on_rq;
+ 	struct rq *rq;
+-	struct prio_array *array;
+-	int running;
+ 
  repeat:
+ 	/*
+@@ -1192,7 +892,7 @@ repeat:
+ 	 */
  	rq = task_rq_lock(p, &flags);
- 	/* Must be off runqueue entirely, not preempted. */
--	if (unlikely(p->array || task_running(rq, p))) {
-+	if (unlikely(p->se.on_rq || task_running(rq, p))) {
- 		/* If it's preempted, we yield.  It could be a while. */
- 		preempted = !task_running(rq, p);
- 		task_rq_unlock(rq, &flags);
-@@ -1210,11 +911,12 @@ void kick_process(struct task_struct *p)
+ 	running = task_running(rq, p);
+-	array = p->array;
++	on_rq = p->se.on_rq;
+ 	task_rq_unlock(rq, &flags);
+ 
+ 	/*
+@@ -1215,7 +915,7 @@ repeat:
+ 	 * running right now), it's preempted, and we should
+ 	 * yield - it could be a while.
+ 	 */
+-	if (unlikely(array)) {
++	if (unlikely(on_rq)) {
+ 		yield();
+ 		goto repeat;
+ 	}
+@@ -1261,11 +961,12 @@ void kick_process(struct task_struct *p)
  static inline unsigned long source_load(int cpu, int type)
  {
  	struct rq *rq = cpu_rq(cpu);
@@ -2384,7 +2405,7 @@
  }
  
  /*
-@@ -1224,11 +926,12 @@ static inline unsigned long source_load(
+@@ -1275,11 +976,12 @@ static inline unsigned long source_load(
  static inline unsigned long target_load(int cpu, int type)
  {
  	struct rq *rq = cpu_rq(cpu);
@@ -2399,7 +2420,7 @@
  }
  
  /*
-@@ -1237,9 +940,10 @@ static inline unsigned long target_load(
+@@ -1288,9 +990,10 @@ static inline unsigned long target_load(
  static inline unsigned long cpu_avg_load_per_task(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
@@ -2411,7 +2432,7 @@
  }
  
  /*
-@@ -1341,9 +1045,9 @@ static int sched_balance_self(int cpu, i
+@@ -1392,9 +1095,9 @@ static int sched_balance_self(int cpu, i
  	struct sched_domain *tmp, *sd = NULL;
  
  	for_each_domain(cpu, tmp) {
@@ -2424,7 +2445,7 @@
  		if (tmp->flags & SD_POWERSAVINGS_BALANCE)
  			break;
  		if (tmp->flags & flag)
-@@ -1470,7 +1174,7 @@ static int try_to_wake_up(struct task_st
+@@ -1521,7 +1224,7 @@ static int try_to_wake_up(struct task_st
  	if (!(old_state & state))
  		goto out;
  
@@ -2433,7 +2454,7 @@
  		goto out_running;
  
  	cpu = task_cpu(p);
-@@ -1525,11 +1229,11 @@ static int try_to_wake_up(struct task_st
+@@ -1576,11 +1279,11 @@ static int try_to_wake_up(struct task_st
  			 * of the current CPU:
  			 */
  			if (sync)
@@ -2447,7 +2468,7 @@
  				/*
  				 * This domain has SD_WAKE_AFFINE and
  				 * p is cache cold in this domain, and
-@@ -1563,7 +1267,7 @@ out_set_cpu:
+@@ -1614,7 +1317,7 @@ out_set_cpu:
  		old_state = p->state;
  		if (!(old_state & state))
  			goto out;
@@ -2456,7 +2477,7 @@
  			goto out_running;
  
  		this_cpu = smp_processor_id();
-@@ -1572,25 +1276,10 @@ out_set_cpu:
+@@ -1623,25 +1326,10 @@ out_set_cpu:
  
  out_activate:
  #endif /* CONFIG_SMP */
@@ -2469,7 +2490,7 @@
 -		 */
 -		p->sleep_type = SLEEP_NONINTERACTIVE;
 -	} else
- 
+-
 -	/*
 -	 * Tasks that have marked their sleep as noninteractive get
 -	 * woken up with their sleep average not weighted in an
@@ -2477,14 +2498,14 @@
 -	 */
 -		if (old_state & TASK_NONINTERACTIVE)
 -			p->sleep_type = SLEEP_NONINTERACTIVE;
--
+ 
 -
 -	activate_task(p, rq, cpu == this_cpu);
 +	activate_task(rq, p, 1);
  	/*
  	 * Sync wakeups (i.e. those types of wakeups where the waker
  	 * has indicated that it will leave the CPU in short order)
-@@ -1599,10 +1288,8 @@ out_activate:
+@@ -1650,10 +1338,8 @@ out_activate:
  	 * the waker guarantees that the freshly woken up task is going
  	 * to be considered on this CPU.)
  	 */
@@ -2497,7 +2518,7 @@
  	success = 1;
  
  out_running:
-@@ -1625,19 +1312,27 @@ int fastcall wake_up_state(struct task_s
+@@ -1676,19 +1362,27 @@ int fastcall wake_up_state(struct task_s
  	return try_to_wake_up(p, state, 0);
  }
  
@@ -2532,7 +2553,7 @@
  
  	/*
  	 * We mark the process as running here, but have not actually
-@@ -1646,16 +1341,29 @@ void fastcall sched_fork(struct task_str
+@@ -1697,16 +1391,29 @@ void fastcall sched_fork(struct task_str
  	 * event cannot wake it up and insert it on the runqueue either.
  	 */
  	p->state = TASK_RUNNING;
@@ -2565,7 +2586,7 @@
  		memset(&p->sched_info, 0, sizeof(p->sched_info));
  #endif
  #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
-@@ -1665,34 +1373,16 @@ void fastcall sched_fork(struct task_str
+@@ -1716,34 +1423,16 @@ void fastcall sched_fork(struct task_str
  	/* Want to start with kernel preemption disabled. */
  	task_thread_info(p)->preempt_count = 1;
  #endif
@@ -2606,7 +2627,7 @@
   * wake_up_new_task - wake up a newly created task for the first time.
   *
   * This function will do some initial scheduler statistics housekeeping
-@@ -1701,110 +1391,35 @@ void fastcall sched_fork(struct task_str
+@@ -1752,108 +1441,33 @@ void fastcall sched_fork(struct task_str
   */
  void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  {
@@ -2632,7 +2653,7 @@
 +	this_cpu = smp_processor_id(); /* parent's CPU */
  
  	p->prio = effective_prio(p);
--
+ 
 -	if (likely(cpu == this_cpu)) {
 -		if (!(clone_flags & CLONE_VM)) {
 -			/*
@@ -2661,7 +2682,10 @@
 -		 *   this_rq = task_rq_lock(current, &flags);
 -		 */
 -		this_rq = rq;
--	} else {
++	if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
++			task_cpu(p) != this_cpu || !current->se.on_rq) {
++		activate_task(rq, p, 0);
+ 	} else {
 -		this_rq = cpu_rq(this_cpu);
 -
 -		/*
@@ -2674,18 +2698,23 @@
 -		if (TASK_PREEMPTS_CURR(p, rq))
 -			resched_task(rq->curr);
 -
--		/*
+ 		/*
 -		 * Parent and child are on different CPUs, now get the
 -		 * parent runqueue to update the parent's ->sleep_avg:
--		 */
++		 * Let the scheduling class do new task startup
++		 * management (if any):
+ 		 */
 -		task_rq_unlock(rq, &flags);
 -		this_rq = task_rq_lock(current, &flags);
--	}
++		p->sched_class->task_new(rq, p);
+ 	}
 -	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
 -		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
 -	task_rq_unlock(this_rq, &flags);
--}
--
++	check_preempt_curr(rq, p);
++	task_rq_unlock(rq, &flags);
+ }
+ 
 -/*
 - * Potentially available exiting-child timeslices are
 - * retrieved here - this way the parent does not get
@@ -2696,7 +2725,8 @@
 - * was given away by the parent in the first place.)
 - */
 -void fastcall sched_exit(struct task_struct *p)
--{
++void sched_dead(struct task_struct *p)
+ {
 -	unsigned long flags;
 -	struct rq *rq;
 -
@@ -2714,43 +2744,32 @@
 -		p->parent->sleep_avg = p->parent->sleep_avg /
 -		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
 -		(EXIT_WEIGHT + 1);
-+
-+	if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
-+			task_cpu(p) != this_cpu || !current->se.on_rq) {
-+		activate_task(rq, p, 0);
-+	} else {
-+		/*
-+		 * Let the scheduling class do new task startup
-+		 * management (if any):
-+		 */
-+		p->sched_class->task_new(rq, p);
-+	}
-+	check_preempt_curr(rq, p);
- 	task_rq_unlock(rq, &flags);
+-	task_rq_unlock(rq, &flags);
++	WARN_ON_ONCE(p->se.on_rq);
  }
  
-+void sched_dead(struct task_struct *p)
-+{
-+	WARN_ON_ONCE(p->se.on_rq);
-+}
-+
  /**
-  * prepare_task_switch - prepare to switch tasks
-  * @rq: the runqueue preparing to switch
-@@ -2006,17 +1621,34 @@ unsigned long nr_active(void)
- 	return running + uninterruptible;
- }
+@@ -2046,29 +1660,46 @@ unsigned long nr_active(void)
+ {
+ 	unsigned long i, running = 0, uninterruptible = 0;
  
--#ifdef CONFIG_SMP
--
--/*
-- * Is this task likely cache-hot:
-- */
--static inline int
--task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
+-	for_each_online_cpu(i) {
+-		running += cpu_rq(i)->nr_running;
+-		uninterruptible += cpu_rq(i)->nr_uninterruptible;
+-	}
++	for_each_online_cpu(i) {
++		running += cpu_rq(i)->nr_running;
++		uninterruptible += cpu_rq(i)->nr_uninterruptible;
++	}
++
++	if (unlikely((long)uninterruptible < 0))
++		uninterruptible = 0;
++
++	return running + uninterruptible;
++}
++
 +static void update_load(struct rq *this_rq)
- {
--	return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
++{
 +	struct sched_class *class = sched_class_highest;
 +	unsigned long this_load = 0;
 +	int i, scale;
@@ -2767,20 +2786,33 @@
 +		unsigned long old_load, new_load;
 +
 +		/* scale is effectively 1 << i now, and >> i divides by scale */
-+
+ 
+-	if (unlikely((long)uninterruptible < 0))
+-		uninterruptible = 0;
 +		old_load = this_rq->cpu_load[i];
 +		new_load = this_load;
-+
+ 
+-	return running + uninterruptible;
 +		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
 +	}
  }
  
-+#ifdef CONFIG_SMP
-+
+ #ifdef CONFIG_SMP
+ 
  /*
+- * Is this task likely cache-hot:
+- */
+-static inline int
+-task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
+-{
+-	return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
+-}
+-
+-/*
   * double_rq_lock - safely lock two runqueues
   *
-@@ -2133,23 +1765,17 @@ void sched_exec(void)
+  * Note this does not disable interrupts like task_rq_lock,
+@@ -2184,23 +1815,17 @@ void sched_exec(void)
   * pull_task - move a task from a remote runqueue to the local runqueue.
   * Both runqueues must be locked.
   */
@@ -2809,7 +2841,7 @@
  }
  
  /*
-@@ -2174,25 +1800,59 @@ int can_migrate_task(struct task_struct 
+@@ -2225,25 +1850,59 @@ int can_migrate_task(struct task_struct 
  		return 0;
  
  	/*
@@ -2882,7 +2914,7 @@
  
  /*
   * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
-@@ -2206,11 +1866,9 @@ static int move_tasks(struct rq *this_rq
+@@ -2257,11 +1916,9 @@ static int move_tasks(struct rq *this_rq
  		      struct sched_domain *sd, enum idle_type idle,
  		      int *all_pinned)
  {
@@ -2896,7 +2928,7 @@
  	long rem_load_move;
  
  	if (max_nr_move == 0 || max_load_move == 0)
-@@ -2230,76 +1888,42 @@ static int move_tasks(struct rq *this_rq
+@@ -2281,76 +1938,42 @@ static int move_tasks(struct rq *this_rq
  	best_prio_seen = best_prio == busiest->curr->prio;
  
  	/*
@@ -2992,7 +3024,7 @@
  	}
  out:
  	/*
-@@ -2386,7 +2010,7 @@ find_busiest_group(struct sched_domain *
+@@ -2437,7 +2060,7 @@ find_busiest_group(struct sched_domain *
  
  			avg_load += load;
  			sum_nr_running += rq->nr_running;
@@ -3001,7 +3033,7 @@
  		}
  
  		/*
-@@ -2426,8 +2050,8 @@ find_busiest_group(struct sched_domain *
+@@ -2477,8 +2100,8 @@ find_busiest_group(struct sched_domain *
  		 * Busy processors will not participate in power savings
  		 * balance.
  		 */
@@ -3012,7 +3044,7 @@
  
  		/*
  		 * If the local group is idle or completely loaded
-@@ -2437,42 +2061,42 @@ find_busiest_group(struct sched_domain *
+@@ -2488,42 +2111,42 @@ find_busiest_group(struct sched_domain *
  				    !this_nr_running))
  			power_savings_balance = 0;
  
@@ -3080,7 +3112,7 @@
  		}
  group_next:
  #endif
-@@ -2527,7 +2151,7 @@ group_next:
+@@ -2578,7 +2201,7 @@ group_next:
  	 * a think about bumping its value to force at least one task to be
  	 * moved
  	 */
@@ -3089,7 +3121,7 @@
  		unsigned long tmp, pwr_now, pwr_move;
  		unsigned int imbn;
  
-@@ -2541,7 +2165,8 @@ small_imbalance:
+@@ -2592,7 +2215,8 @@ small_imbalance:
  		} else
  			this_load_per_task = SCHED_LOAD_SCALE;
  
@@ -3099,7 +3131,7 @@
  			*imbalance = busiest_load_per_task;
  			return busiest;
  		}
-@@ -2619,11 +2244,12 @@ find_busiest_queue(struct sched_group *g
+@@ -2670,11 +2294,12 @@ find_busiest_queue(struct sched_group *g
  
  		rq = cpu_rq(i);
  
@@ -3115,7 +3147,38 @@
  			busiest = rq;
  		}
  	}
-@@ -2957,32 +2583,6 @@ static void active_load_balance(struct r
+@@ -2938,17 +2563,21 @@ static void idle_balance(int this_cpu, s
+ 	unsigned long next_balance = jiffies + 60 *  HZ;
+ 
+ 	for_each_domain(this_cpu, sd) {
+-		if (sd->flags & SD_BALANCE_NEWIDLE) {
++		unsigned long interval;
++
++		if (!(sd->flags & SD_LOAD_BALANCE))
++			continue;
++
++		if (sd->flags & SD_BALANCE_NEWIDLE)
+ 			/* If we've pulled tasks over stop searching: */
+ 			pulled_task = load_balance_newidle(this_cpu,
+-							this_rq, sd);
+-			if (time_after(next_balance,
+-				  sd->last_balance + sd->balance_interval))
+-				next_balance = sd->last_balance
+-						+ sd->balance_interval;
+-			if (pulled_task)
+-				break;
+-		}
++								this_rq, sd);
++
++		interval = msecs_to_jiffies(sd->balance_interval);
++		if (time_after(next_balance, sd->last_balance + interval))
++			next_balance = sd->last_balance + interval;
++		if (pulled_task)
++			break;
+ 	}
+ 	if (!pulled_task)
+ 		/*
+@@ -3008,32 +2637,6 @@ static void active_load_balance(struct r
  	spin_unlock(&target_rq->lock);
  }
  
@@ -3148,7 +3211,7 @@
  #ifdef CONFIG_NO_HZ
  static struct {
  	atomic_t load_balancer;
-@@ -3086,6 +2686,9 @@ static inline void rebalance_domains(int
+@@ -3137,6 +2740,9 @@ static inline void rebalance_domains(int
  		interval = msecs_to_jiffies(interval);
  		if (unlikely(!interval))
  			interval = 1;
@@ -3158,7 +3221,7 @@
  
  		if (sd->flags & SD_SERIALIZE) {
  			if (!spin_trylock(&balancing))
-@@ -3240,54 +2843,28 @@ DEFINE_PER_CPU(struct kernel_stat, kstat
+@@ -3291,54 +2897,28 @@ DEFINE_PER_CPU(struct kernel_stat, kstat
  EXPORT_PER_CPU_SYMBOL(kstat);
  
  /*
@@ -3226,7 +3289,7 @@
   * Account user cpu time to a process.
   * @p: the process that the cpu time gets accounted to
   * @hardirq_offset: the offset to subtract from hardirq_count()
-@@ -3360,81 +2937,6 @@ void account_steal_time(struct task_stru
+@@ -3411,81 +2991,6 @@ void account_steal_time(struct task_stru
  		cpustat->steal = cputime64_add(cpustat->steal, tmp);
  }
  
@@ -3308,7 +3371,7 @@
  /*
   * This function gets called by the timer code, with HZ frequency.
   * We call it with interrupts disabled.
-@@ -3444,18 +2946,19 @@ out_unlock:
+@@ -3495,18 +3000,19 @@ out_unlock:
   */
  void scheduler_tick(void)
  {
@@ -3334,7 +3397,7 @@
  	rq->idle_at_tick = idle_at_tick;
  	trigger_load_balance(cpu);
  #endif
-@@ -3499,49 +3002,27 @@ EXPORT_SYMBOL(sub_preempt_count);
+@@ -3550,49 +3056,27 @@ EXPORT_SYMBOL(sub_preempt_count);
  
  #endif
  
@@ -3390,7 +3453,7 @@
  	/*
  	 * The idle thread is not allowed to schedule!
  	 * Remove this check after it has been exercised a bit.
-@@ -3552,19 +3033,45 @@ need_resched_nonpreemptible:
+@@ -3603,19 +3087,45 @@ need_resched_nonpreemptible:
  	}
  
  	schedstat_inc(rq, sched_cnt);
@@ -3448,7 +3511,7 @@
  
  	spin_lock_irq(&rq->lock);
  
-@@ -3577,7 +3084,7 @@ need_resched_nonpreemptible:
+@@ -3628,7 +3138,7 @@ need_resched_nonpreemptible:
  		else {
  			if (prev->state == TASK_UNINTERRUPTIBLE)
  				rq->nr_uninterruptible++;
@@ -3457,7 +3520,7 @@
  		}
  	}
  
-@@ -3585,65 +3092,25 @@ need_resched_nonpreemptible:
+@@ -3636,65 +3146,25 @@ need_resched_nonpreemptible:
  	if (unlikely(!rq->nr_running)) {
  		idle_balance(cpu, rq);
  		if (!rq->nr_running) {
@@ -3494,12 +3557,12 @@
 -
 -		if (next->sleep_type == SLEEP_INTERACTIVE)
 -			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
+-
+-		array = next->array;
+-		new_prio = recalc_task_prio(next, next->timestamp + delta);
 +	next = pick_next_task(rq, prev);
 +	next->se.nr_switches++;
  
--		array = next->array;
--		new_prio = recalc_task_prio(next, next->timestamp + delta);
--
 -		if (unlikely(next->prio != new_prio)) {
 -			dequeue_task(next, array);
 -			next->prio = new_prio;
@@ -3528,7 +3591,7 @@
  		rq->nr_switches++;
  		rq->curr = next;
  		++*switch_count;
-@@ -4074,29 +3541,28 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -4125,29 +3595,28 @@ EXPORT_SYMBOL(sleep_on_timeout);
   */
  void rt_mutex_setprio(struct task_struct *p, int prio)
  {
@@ -3570,7 +3633,7 @@
  		/*
  		 * Reschedule if we are currently running on this runqueue and
  		 * our priority decreased, or if we are not currently running on
-@@ -4105,8 +3571,9 @@ void rt_mutex_setprio(struct task_struct
+@@ -4156,8 +3625,9 @@ void rt_mutex_setprio(struct task_struct
  		if (task_running(rq, p)) {
  			if (p->prio > oldprio)
  				resched_task(rq->curr);
@@ -3582,7 +3645,7 @@
  	}
  	task_rq_unlock(rq, &flags);
  }
-@@ -4115,8 +3582,7 @@ void rt_mutex_setprio(struct task_struct
+@@ -4166,8 +3636,7 @@ void rt_mutex_setprio(struct task_struct
  
  void set_user_nice(struct task_struct *p, long nice)
  {
@@ -3592,7 +3655,7 @@
  	unsigned long flags;
  	struct rq *rq;
  
-@@ -4131,15 +3597,15 @@ void set_user_nice(struct task_struct *p
+@@ -4182,15 +3651,15 @@ void set_user_nice(struct task_struct *p
  	 * The RT priorities are set via sched_setscheduler(), but we still
  	 * allow the 'normal' nice value to be set - but as expected
  	 * it wont have any effect on scheduling until the task is
@@ -3612,7 +3675,7 @@
  		dec_raw_weighted_load(rq, p);
  	}
  
-@@ -4149,8 +3615,8 @@ void set_user_nice(struct task_struct *p
+@@ -4200,8 +3669,8 @@ void set_user_nice(struct task_struct *p
  	p->prio = effective_prio(p);
  	delta = p->prio - old_prio;
  
@@ -3623,7 +3686,7 @@
  		inc_raw_weighted_load(rq, p);
  		/*
  		 * If the task increased its priority or is running and
-@@ -4271,20 +3737,29 @@ static inline struct task_struct *find_p
+@@ -4322,20 +3791,29 @@ static inline struct task_struct *find_p
  }
  
  /* Actually do priority change: must hold rq lock. */
@@ -3660,7 +3723,7 @@
  	set_load_weight(p);
  }
  
-@@ -4299,8 +3774,7 @@ static void __setscheduler(struct task_s
+@@ -4350,8 +3828,7 @@ static void __setscheduler(struct task_s
  int sched_setscheduler(struct task_struct *p, int policy,
  		       struct sched_param *param)
  {
@@ -3670,7 +3733,7 @@
  	unsigned long flags;
  	struct rq *rq;
  
-@@ -4311,12 +3785,13 @@ recheck:
+@@ -4362,12 +3839,13 @@ recheck:
  	if (policy < 0)
  		policy = oldpolicy = p->policy;
  	else if (policy != SCHED_FIFO && policy != SCHED_RR &&
@@ -3687,7 +3750,7 @@
  	 */
  	if (param->sched_priority < 0 ||
  	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
-@@ -4347,6 +3822,12 @@ recheck:
+@@ -4398,6 +3876,12 @@ recheck:
  			    param->sched_priority > rlim_rtprio)
  				return -EPERM;
  		}
@@ -3700,7 +3763,7 @@
  
  		/* can't change other user's priorities */
  		if ((current->euid != p->euid) &&
-@@ -4374,13 +3855,13 @@ recheck:
+@@ -4425,13 +3909,13 @@ recheck:
  		spin_unlock_irqrestore(&p->pi_lock, flags);
  		goto recheck;
  	}
@@ -3720,7 +3783,7 @@
  		/*
  		 * Reschedule if we are currently running on this runqueue and
  		 * our priority decreased, or if we are not currently running on
-@@ -4389,8 +3870,9 @@ recheck:
+@@ -4440,8 +3924,9 @@ recheck:
  		if (task_running(rq, p)) {
  			if (p->prio > oldprio)
  				resched_task(rq->curr);
@@ -3732,7 +3795,7 @@
  	}
  	__task_rq_unlock(rq);
  	spin_unlock_irqrestore(&p->pi_lock, flags);
-@@ -4653,50 +4135,66 @@ asmlinkage long sys_sched_getaffinity(pi
+@@ -4704,50 +4189,66 @@ asmlinkage long sys_sched_getaffinity(pi
  	if (ret < 0)
  		return ret;
  
@@ -3805,10 +3868,10 @@
 +	p_to = find_task_by_pid(pid);
 +	if (!p_to)
 +		goto out_unlock;
++
++	rq = this_rq_lock();
  
 -	if (array->nr_active == 1) {
-+	rq = this_rq_lock();
-+
 +	schedstat_inc(rq, yld_cnt);
 +	if (rq->nr_running == 1)
  		schedstat_inc(rq, yld_act_empty);
@@ -3830,7 +3893,7 @@
  
  	/*
  	 * Since we are going to call schedule() anyway, there's
-@@ -4705,13 +4203,19 @@ asmlinkage long sys_sched_yield(void)
+@@ -4756,13 +4257,19 @@ asmlinkage long sys_sched_yield(void)
  	__release(rq->lock);
  	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  	_raw_spin_unlock(&rq->lock);
@@ -3850,7 +3913,7 @@
  static void __cond_resched(void)
  {
  #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
-@@ -4847,6 +4351,8 @@ asmlinkage long sys_sched_get_priority_m
+@@ -4898,6 +4405,8 @@ asmlinkage long sys_sched_get_priority_m
  		break;
  	case SCHED_NORMAL:
  	case SCHED_BATCH:
@@ -3859,7 +3922,7 @@
  		ret = 0;
  		break;
  	}
-@@ -4871,6 +4377,8 @@ asmlinkage long sys_sched_get_priority_m
+@@ -4922,6 +4431,8 @@ asmlinkage long sys_sched_get_priority_m
  		break;
  	case SCHED_NORMAL:
  	case SCHED_BATCH:
@@ -3868,7 +3931,7 @@
  		ret = 0;
  	}
  	return ret;
-@@ -4905,7 +4413,7 @@ long sys_sched_rr_get_interval(pid_t pid
+@@ -4956,7 +4467,7 @@ long sys_sched_rr_get_interval(pid_t pid
  		goto out_unlock;
  
  	jiffies_to_timespec(p->policy == SCHED_FIFO ?
@@ -3877,7 +3940,7 @@
  	read_unlock(&tasklist_lock);
  	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  out_nounlock:
-@@ -4986,6 +4494,7 @@ void show_state_filter(unsigned long sta
+@@ -5037,6 +4548,7 @@ void show_state_filter(unsigned long sta
  	 */
  	if (state_filter == -1)
  		debug_show_all_locks();
@@ -3885,7 +3948,7 @@
  }
  
  /**
-@@ -5001,13 +4510,12 @@ void __cpuinit init_idle(struct task_str
+@@ -5052,13 +4564,12 @@ void __cpuinit init_idle(struct task_str
  	struct rq *rq = cpu_rq(cpu);
  	unsigned long flags;
  
@@ -3903,7 +3966,7 @@
  
  	spin_lock_irqsave(&rq->lock, flags);
  	rq->curr = rq->idle = idle;
-@@ -5033,6 +4541,27 @@ void __cpuinit init_idle(struct task_str
+@@ -5084,6 +4595,27 @@ void __cpuinit init_idle(struct task_str
   */
  cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
  
@@ -3931,7 +3994,7 @@
  #ifdef CONFIG_SMP
  /*
   * This is how migration works:
-@@ -5106,7 +4635,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
+@@ -5157,7 +4689,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
  static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  {
  	struct rq *rq_dest, *rq_src;
@@ -3940,7 +4003,7 @@
  
  	if (unlikely(cpu_is_offline(dest_cpu)))
  		return ret;
-@@ -5122,20 +4651,13 @@ static int __migrate_task(struct task_st
+@@ -5173,20 +4705,13 @@ static int __migrate_task(struct task_st
  	if (!cpu_isset(dest_cpu, p->cpus_allowed))
  		goto out;
  
@@ -3967,7 +4030,7 @@
  	}
  	ret = 1;
  out:
-@@ -5307,10 +4829,10 @@ void sched_idle_next(void)
+@@ -5358,10 +4883,10 @@ void sched_idle_next(void)
  	 */
  	spin_lock_irqsave(&rq->lock, flags);
  
@@ -3980,7 +4043,7 @@
  
  	spin_unlock_irqrestore(&rq->lock, flags);
  }
-@@ -5360,16 +4882,15 @@ static void migrate_dead(unsigned int de
+@@ -5411,16 +4936,15 @@ static void migrate_dead(unsigned int de
  static void migrate_dead_tasks(unsigned int dead_cpu)
  {
  	struct rq *rq = cpu_rq(dead_cpu);
@@ -4005,7 +4068,7 @@
  	}
  }
  #endif /* CONFIG_HOTPLUG_CPU */
-@@ -5400,7 +4921,7 @@ migration_call(struct notifier_block *nf
+@@ -5451,7 +4975,7 @@ migration_call(struct notifier_block *nf
  		kthread_bind(p, cpu);
  		/* Must be high prio: stop_machine expects to yield to it. */
  		rq = task_rq_lock(p, &flags);
@@ -4014,7 +4077,7 @@
  		task_rq_unlock(rq, &flags);
  		cpu_rq(cpu)->migration_thread = p;
  		break;
-@@ -5431,9 +4952,9 @@ migration_call(struct notifier_block *nf
+@@ -5482,9 +5006,9 @@ migration_call(struct notifier_block *nf
  		rq->migration_thread = NULL;
  		/* Idle task back to normal (off runqueue, low prio) */
  		rq = task_rq_lock(rq->idle, &flags);
@@ -4026,7 +4089,7 @@
  		migrate_dead_tasks(cpu);
  		task_rq_unlock(rq, &flags);
  		migrate_nr_uninterruptible(rq);
-@@ -5742,483 +5263,6 @@ init_sched_build_groups(cpumask_t span, 
+@@ -5793,483 +5317,6 @@ init_sched_build_groups(cpumask_t span, 
  
  #define SD_NODES_PER_DOMAIN 16
  
@@ -4510,7 +4573,7 @@
  #ifdef CONFIG_NUMA
  
  /**
-@@ -6748,10 +5792,6 @@ static int build_sched_domains(const cpu
+@@ -6799,10 +5846,6 @@ static int build_sched_domains(const cpu
  #endif
  		cpu_attach_domain(sd, i);
  	}
@@ -4521,7 +4584,7 @@
  
  	return 0;
  
-@@ -6958,10 +5998,12 @@ void __init sched_init_smp(void)
+@@ -7009,10 +6052,12 @@ void __init sched_init_smp(void)
  	/* Move init over to a non-isolated CPU */
  	if (set_cpus_allowed(current, non_isolated_cpus) < 0)
  		BUG();
@@ -4534,7 +4597,7 @@
  }
  #endif /* CONFIG_SMP */
  
-@@ -6977,8 +6019,15 @@ int in_sched_functions(unsigned long add
+@@ -7028,8 +6073,15 @@ int in_sched_functions(unsigned long add
  
  void __init sched_init(void)
  {
@@ -4551,7 +4614,7 @@
  
  	for_each_possible_cpu(i) {
  		struct prio_array *array;
-@@ -6988,14 +6037,14 @@ void __init sched_init(void)
+@@ -7039,14 +6091,14 @@ void __init sched_init(void)
  		spin_lock_init(&rq->lock);
  		lockdep_set_class(&rq->lock, &rq->rq_lock_key);
  		rq->nr_running = 0;
@@ -4571,7 +4634,7 @@
  		rq->active_balance = 0;
  		rq->push_cpu = 0;
  		rq->cpu = i;
-@@ -7004,16 +6053,14 @@ void __init sched_init(void)
+@@ -7055,16 +6107,14 @@ void __init sched_init(void)
  #endif
  		atomic_set(&rq->nr_iowait, 0);
  
@@ -4594,18 +4657,19 @@
  	}
  
  	set_load_weight(&init_task);
-@@ -7070,28 +6117,55 @@ EXPORT_SYMBOL(__might_sleep);
+@@ -7121,29 +6171,55 @@ EXPORT_SYMBOL(__might_sleep);
  #ifdef CONFIG_MAGIC_SYSRQ
  void normalize_rt_tasks(void)
  {
 -	struct prio_array *array;
- 	struct task_struct *p;
+ 	struct task_struct *g, *p;
  	unsigned long flags;
  	struct rq *rq;
 +	int on_rq;
  
  	read_lock_irq(&tasklist_lock);
- 	for_each_process(p) {
+-
+ 	do_each_thread(g, p) {
 -		if (!rt_task(p))
 +		p->se.fair_key = 0;
 +		p->se.wait_runtime = 0;
@@ -4658,12 +4722,12 @@
 +#endif
  		__task_rq_unlock(rq);
  		spin_unlock_irqrestore(&p->pi_lock, flags);
- 	}
-Index: linux-cfs-2.6.22-rc5.q/kernel/sched_debug.c
+ 	} while_each_thread(g, p);
+Index: linux/kernel/sched_debug.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-rc5.q/kernel/sched_debug.c
-@@ -0,0 +1,250 @@
++++ linux/kernel/sched_debug.c
+@@ -0,0 +1,258 @@
 +/*
 + * kernel/time/sched_debug.c
 + *
@@ -4770,10 +4834,18 @@
 +static void print_cpu(struct seq_file *m, int cpu, u64 now)
 +{
 +	struct rq *rq = &per_cpu(runqueues, cpu);
-+	unsigned int freq = cpu_khz ? : 1;
 +
-+	SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
-+		   cpu, freq / 1000, (freq % 1000));
++#ifdef CONFIG_X86
++	{
++		unsigned int freq = cpu_khz ? : 1;
++
++		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
++			   cpu, freq / 1000, (freq % 1000));
++	}
++#else
++	SEQ_printf(m, "\ncpu#%d\n", cpu);
++#endif
++
 +#define P(x) \
 +	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x))
 +
@@ -4914,10 +4986,10 @@
 +	p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
 +	p->se.sum_exec_runtime = 0;
 +}
-Index: linux-cfs-2.6.22-rc5.q/kernel/sched_fair.c
+Index: linux/kernel/sched_fair.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-rc5.q/kernel/sched_fair.c
++++ linux/kernel/sched_fair.c
 @@ -0,0 +1,888 @@
 +/*
 + * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
@@ -5807,10 +5879,10 @@
 +	.task_tick		= task_tick_fair,
 +	.task_new		= task_new_fair,
 +};
-Index: linux-cfs-2.6.22-rc5.q/kernel/sched_rt.c
+Index: linux/kernel/sched_rt.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-rc5.q/kernel/sched_rt.c
++++ linux/kernel/sched_rt.c
 @@ -0,0 +1,219 @@
 +/*
 + * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
@@ -6031,10 +6103,10 @@
 +	.task_tick		= task_tick_rt,
 +	.task_new		= task_new_rt,
 +};
-Index: linux-cfs-2.6.22-rc5.q/kernel/sched_stats.h
+Index: linux/kernel/sched_stats.h
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-rc5.q/kernel/sched_stats.h
++++ linux/kernel/sched_stats.h
 @@ -0,0 +1,235 @@
 +
 +#ifdef CONFIG_SCHEDSTATS
@@ -6271,10 +6343,10 @@
 +#define sched_info_switch(t, next)	do { } while (0)
 +#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 +
-Index: linux-cfs-2.6.22-rc5.q/kernel/softirq.c
+Index: linux/kernel/softirq.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/softirq.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/softirq.c
+--- linux.orig/kernel/softirq.c
++++ linux/kernel/softirq.c
 @@ -488,7 +488,6 @@ void __init softirq_init(void)
  
  static int ksoftirqd(void * __bind_cpu)
@@ -6283,10 +6355,10 @@
  	current->flags |= PF_NOFREEZE;
  
  	set_current_state(TASK_INTERRUPTIBLE);
-Index: linux-cfs-2.6.22-rc5.q/kernel/sysctl.c
+Index: linux/kernel/sysctl.c
 ===================================================================
---- linux-cfs-2.6.22-rc5.q.orig/kernel/sysctl.c
-+++ linux-cfs-2.6.22-rc5.q/kernel/sysctl.c
+--- linux.orig/kernel/sysctl.c
++++ linux/kernel/sysctl.c
 @@ -206,8 +206,60 @@ static ctl_table root_table[] = {
  	{ .ctl_name = 0 }
  };
@@ -6348,30 +6420,3 @@
  		.ctl_name	= KERN_PANIC,
  		.procname	= "panic",
  		.data		= &panic_timeout,
-Index: linux/kernel/sched_debug.c
-===================================================================
---- linux.orig/kernel/sched_debug.c
-+++ linux/kernel/sched_debug.c
-@@ -104,10 +104,18 @@ static void print_rq_runtime_sum(struct 
- static void print_cpu(struct seq_file *m, int cpu, u64 now)
- {
- 	struct rq *rq = &per_cpu(runqueues, cpu);
--	unsigned int freq = cpu_khz ? : 1;
- 
--	SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
--		   cpu, freq / 1000, (freq % 1000));
-+#ifdef CONFIG_X86
-+	{
-+		unsigned int freq = cpu_khz ? : 1;
-+
-+		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
-+			   cpu, freq / 1000, (freq % 1000));
-+	}
-+#else
-+	SEQ_printf(m, "\ncpu#%d\n", cpu);
-+#endif
-+
- #define P(x) \
- 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x))
- 
-


Index: sources
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/sources,v
retrieving revision 1.597
retrieving revision 1.598
diff -u -r1.597 -r1.598
--- sources	18 Jun 2007 21:09:33 -0000	1.597
+++ sources	20 Jun 2007 19:44:43 -0000	1.598
@@ -1,3 +1,3 @@
 1b515f588078dfa7f4bab2634bd17e80  linux-2.6.21.tar.bz2
 807de5a9464e23dfc6336ddc1c07c24f  patch-2.6.22-rc5.bz2
-1f0669d2c8a94005030b212b08c87689  patch-2.6.22-rc5-git1.bz2
+48865b9c116be0d425a567395e9981b3  patch-2.6.22-rc5-git4.bz2


Index: upstream
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/upstream,v
retrieving revision 1.519
retrieving revision 1.520
diff -u -r1.519 -r1.520
--- upstream	18 Jun 2007 21:09:33 -0000	1.519
+++ upstream	20 Jun 2007 19:44:43 -0000	1.520
@@ -1,3 +1,3 @@
 linux-2.6.21.tar.bz2
 patch-2.6.22-rc5.bz2
-patch-2.6.22-rc5-git1.bz2
+patch-2.6.22-rc5-git4.bz2


--- patch-2.6.22-rc5-git1.bz2.sign DELETED ---




More information about the fedora-extras-commits mailing list