rpms/kernel/F-7 kernel-2.6.spec, 1.3310, 1.3311 linux-2.6-sched-cfs.patch, 1.9, 1.10 linux-2.6-utrace-ptrace-compat-ia64.patch, 1.3, 1.4 linux-2.6-utrace-regset-ia64.patch, 1.3, 1.4
Chuck Ebbert (cebbert)
fedora-extras-commits at redhat.com
Fri Aug 10 17:27:41 UTC 2007
Author: cebbert
Update of /cvs/pkgs/rpms/kernel/F-7
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv27805
Modified Files:
kernel-2.6.spec linux-2.6-sched-cfs.patch
linux-2.6-utrace-ptrace-compat-ia64.patch
linux-2.6-utrace-regset-ia64.patch
Log Message:
* Fri Aug 10 2007 Chuck Ebbert <cebbert at redhat.com>
- don't use incremental patches for -stable updates
- update CFS scheduler patch
- update utrace patches
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/kernel-2.6.spec,v
retrieving revision 1.3310
retrieving revision 1.3311
diff -u -r1.3310 -r1.3311
--- kernel-2.6.spec 9 Aug 2007 15:05:37 -0000 1.3310
+++ kernel-2.6.spec 10 Aug 2007 17:27:08 -0000 1.3311
@@ -475,15 +475,7 @@
# For a stable release kernel
%if 0%{?stable_update}
-Patch00: patch-2.6.%{base_sublevel}.1.bz2
-# at present, you'll have to manually uncomment needed incrementals
-# here to get up to 2.6.%{base_sublevel}.%{stable_update}, but they will
-# all be automatically applied
-#Patch01: patch-2.6.%{base_sublevel}.1-2.bz2
-#Patch02: patch-2.6.%{base_sublevel}.2-3.bz2
-#Patch03: patch-2.6.%{base_sublevel}.3-4.bz2
-#Patch04: patch-2.6.%{base_sublevel}.4-5.bz2
-#Patch05: patch-2.6.%{base_sublevel}.5-6.bz2
+Patch00: patch-2.6.%{base_sublevel}.%{stable_update}.bz2
# non-released_kernel case
# These are automagically defined by the rcrev and gitrev values set up
@@ -1064,13 +1056,7 @@
# Update to latest upstream.
# released_kernel with stable_update available case
%if 0%{?stable_update}
-ApplyPatch patch-2.6.%{base_sublevel}.1.bz2
-if [ %{stable_update} -ge 2 ]; then
- for p in `seq 2 %{stable_update}`; do
- let o=p-1
- ApplyPatch patch-2.6.%{base_sublevel}.$o-$p.bz2
- done
-fi
+ApplyPatch patch-2.6.%{base_sublevel}.%{stable_update}.bz2
# non-released_kernel case
%else
@@ -2326,6 +2312,11 @@
%endif
%changelog
+* Fri Aug 10 2007 Chuck Ebbert <cebbert at redhat.com>
+- don't use incremental patches for -stable updates
+- update CFS scheduler patch
+- update utrace patches
+
* Wed Aug 08 2007 John W. Linville <linville at redhat.com>
- Update wireless bits (upstream fixes, iwlwifi and bcm43xx updates)
linux-2.6-sched-cfs.patch:
Index: linux-2.6-sched-cfs.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/linux-2.6-sched-cfs.patch,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- linux-2.6-sched-cfs.patch 27 Jul 2007 14:46:01 -0000 1.9
+++ linux-2.6-sched-cfs.patch 10 Aug 2007 17:27:08 -0000 1.10
@@ -39,6 +39,19 @@
[17c38b7490b3f0300c7812aefdae2ddda7ab4112]
Cache xtime every call to update_wall_time
+[mingo/254753dc321ea2b753ca9bc58ac329557a20efac][merged inline]
+sched: make the multiplication table more accurate
+
+[mingo/a69edb55605117cc0f20aa36c49c20b96590774d][merged inline]
+sched: fix update_stats_enqueue() reniced codepath
+
+[mingo/7cff8cf61cac15fa29a1ca802826d2bcbca66152][appended]
+sched: refine negative nice level granularity
+
+[mingo/f1a438d813d416fa9f4be4e6dbd10b54c5938d89][merged inline]
+sched: reorder update_cpu_load(rq) with the ->task_tick() call
+
+
Index: linux/Documentation/kernel-parameters.txt
===================================================================
--- linux.orig/Documentation/kernel-parameters.txt
@@ -2269,7 +2282,7 @@
}
/*
-@@ -791,53 +717,141 @@ static inline int __normal_prio(struct t
+@@ -791,53 +717,144 @@ static inline int __normal_prio(struct t
* this code will need modification
*/
#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
@@ -2299,11 +2312,14 @@
+ * it's +10% CPU usage.
+ */
+static const int prio_to_weight[40] = {
-+/* -20 */ 88818, 71054, 56843, 45475, 36380, 29104, 23283, 18626, 14901, 11921,
-+/* -10 */ 9537, 7629, 6103, 4883, 3906, 3125, 2500, 2000, 1600, 1280,
-+/* 0 */ NICE_0_LOAD /* 1024 */,
-+/* 1 */ 819, 655, 524, 419, 336, 268, 215, 172, 137,
-+/* 10 */ 110, 87, 70, 56, 45, 36, 29, 23, 18, 15,
++ /* -20 */ 88761, 71755, 56483, 46273, 36291,
++ /* -15 */ 29154, 23254, 18705, 14949, 11916,
++ /* -10 */ 9548, 7620, 6100, 4904, 3906,
++ /* -5 */ 3121, 2501, 1991, 1586, 1277,
++ /* 0 */ 1024, 820, 655, 526, 423,
++ /* 5 */ 335, 272, 215, 172, 137,
++ /* 10 */ 110, 87, 70, 56, 45,
++ /* 15 */ 36, 29, 23, 18, 15,
+};
-static void set_load_weight(struct task_struct *p)
@@ -2324,14 +2340,14 @@
- p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
-}
+static const u32 prio_to_wmult[40] = {
-+ 48356, 60446, 75558, 94446, 118058, 147573,
-+ 184467, 230589, 288233, 360285, 450347,
-+ 562979, 703746, 879575, 1099582, 1374389,
-+ 1717986, 2147483, 2684354, 3355443, 4194304,
-+ 5244160, 6557201, 8196502, 10250518, 12782640,
-+ 16025997, 19976592, 24970740, 31350126, 39045157,
-+ 49367440, 61356675, 76695844, 95443717, 119304647,
-+ 148102320, 186737708, 238609294, 286331153,
++ /* -20 */ 48388, 59856, 76040, 92818, 118348,
++ /* -15 */ 147320, 184698, 229616, 287308, 360437,
++ /* -10 */ 449829, 563644, 704093, 875809, 1099582,
++ /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
++ /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
++ /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
++ /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
++ /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
+};
static inline void
@@ -4147,9 +4163,9 @@
- update_cpu_clock(p, rq, now);
+ spin_lock(&rq->lock);
++ update_cpu_load(rq);
+ if (curr != rq->idle) /* FIXME: needed? */
+ curr->sched_class->task_tick(rq, curr);
-+ update_cpu_load(rq);
+ spin_unlock(&rq->lock);
- if (!idle_at_tick)
@@ -5896,7 +5912,7 @@
===================================================================
--- /dev/null
+++ linux/kernel/sched_fair.c
-@@ -0,0 +1,1107 @@
+@@ -0,0 +1,1108 @@
+/*
+ * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
+ *
@@ -6287,7 +6303,8 @@
+ (WMULT_SHIFT - NICE_0_SHIFT);
+ } else {
+ tmp = se->wait_runtime;
-+ key -= (tmp * se->load.weight) >> NICE_0_SHIFT;
++ key -= (tmp * se->load.inv_weight) >>
++ (WMULT_SHIFT - NICE_0_SHIFT);
+ }
+ }
+
@@ -8264,3 +8281,55 @@
}
-
EXPORT_SYMBOL(current_kernel_time);
+From: Ingo Molnar <mingo at elte.hu>
+Date: Thu, 9 Aug 2007 09:16:52 +0000 (+0200)
+Subject: sched: refine negative nice level granularity
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fmingo%2Flinux-2.6-sched.git;a=commitdiff_plain;h=7cff8cf61cac15fa29a1ca802826d2bcbca66152
+
+sched: refine negative nice level granularity
+
+refine the granularity of negative nice level tasks: let them
+reschedule more often to offset the effect of them consuming
+their wait_runtime proportionately slower. (This makes nice-0
+task scheduling smoother in the presence of negatively
+reniced tasks.)
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+---
+
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 7a632c5..e91db32 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
+ {
+ u64 tmp;
+
++ if (likely(curr->load.weight == NICE_0_LOAD))
++ return granularity;
+ /*
+- * Negative nice levels get the same granularity as nice-0:
++ * Positive nice levels get the same granularity as nice-0:
+ */
+- if (likely(curr->load.weight >= NICE_0_LOAD))
+- return granularity;
++ if (likely(curr->load.weight < NICE_0_LOAD)) {
++ tmp = curr->load.weight * (u64)granularity;
++ return (long) (tmp >> NICE_0_SHIFT);
++ }
+ /*
+- * Positive nice level tasks get linearly finer
++ * Negative nice level tasks get linearly finer
+ * granularity:
+ */
+- tmp = curr->load.weight * (u64)granularity;
++ tmp = curr->load.inv_weight * (u64)granularity;
+
+ /*
+ * It will always fit into 'long':
+ */
+- return (long) (tmp >> NICE_0_SHIFT);
++ return (long) (tmp >> WMULT_SHIFT);
+ }
+
+ static inline void
linux-2.6-utrace-ptrace-compat-ia64.patch:
Index: linux-2.6-utrace-ptrace-compat-ia64.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/linux-2.6-utrace-ptrace-compat-ia64.patch,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- linux-2.6-utrace-ptrace-compat-ia64.patch 20 Jul 2007 18:48:03 -0000 1.3
+++ linux-2.6-utrace-ptrace-compat-ia64.patch 10 Aug 2007 17:27:08 -0000 1.4
@@ -8,12 +8,14 @@
---
- arch/ia64/ia32/sys_ia32.c | 40 ++
- arch/ia64/kernel/ptrace.c | 1016 +++++----------------------------------------
+ arch/ia64/ia32/sys_ia32.c | 40 +
+ arch/ia64/kernel/ptrace.c | 1016 +++++-----------------------------------------
2 files changed, 159 insertions(+), 897 deletions(-)
---- linux-2.6/arch/ia64/ia32/sys_ia32.c
-+++ linux-2.6/arch/ia64/ia32/sys_ia32.c
+Index: b/arch/ia64/ia32/sys_ia32.c
+===================================================================
+--- a/arch/ia64/ia32/sys_ia32.c
++++ b/arch/ia64/ia32/sys_ia32.c
@@ -2340,6 +2340,46 @@ const struct utrace_regset_view utrace_i
};
#endif
@@ -61,9 +63,11 @@
typedef struct {
unsigned int ss_sp;
unsigned int ss_flags;
---- linux-2.6/arch/ia64/kernel/ptrace.c
-+++ linux-2.6/arch/ia64/kernel/ptrace.c
-@@ -554,81 +554,6 @@ ia64_sync_user_rbs (struct task_struct *
+Index: b/arch/ia64/kernel/ptrace.c
+===================================================================
+--- a/arch/ia64/kernel/ptrace.c
++++ b/arch/ia64/kernel/ptrace.c
+@@ -573,81 +573,6 @@ ia64_sync_user_rbs (struct task_struct *
return 0;
}
@@ -145,7 +149,7 @@
/*
* Write f32-f127 back to task->thread.fph if it has been modified.
*/
-@@ -792,828 +717,6 @@ access_nat_bits (struct task_struct *chi
+@@ -811,828 +736,6 @@ access_nat_bits (struct task_struct *chi
return 0;
}
@@ -974,7 +978,7 @@
/* "asmlinkage" so the input arguments are preserved... */
-@@ -1667,6 +770,9 @@ syscall_trace_leave (long arg0, long arg
+@@ -1694,6 +797,9 @@ syscall_trace_leave (long arg0, long arg
}
}
@@ -984,7 +988,7 @@
/* Utrace implementation starts here */
typedef struct utrace_get {
-@@ -2454,3 +1560,119 @@ const struct utrace_regset_view *utrace_
+@@ -2506,3 +1612,119 @@ const struct utrace_regset_view *utrace_
#endif
return &utrace_ia64_native;
}
linux-2.6-utrace-regset-ia64.patch:
Index: linux-2.6-utrace-regset-ia64.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/linux-2.6-utrace-regset-ia64.patch,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- linux-2.6-utrace-regset-ia64.patch 20 Jul 2007 18:48:03 -0000 1.3
+++ linux-2.6-utrace-regset-ia64.patch 10 Aug 2007 17:27:08 -0000 1.4
@@ -9,14 +9,16 @@
---
- arch/ia64/ia32/sys_ia32.c | 472 +++++++++++++++++++++++++
- arch/ia64/kernel/ptrace.c | 804 ++++++++++++++++++++++++++++++++++++++++++
- include/asm-ia64/tracehook.h | 7
+ arch/ia64/ia32/sys_ia32.c | 472 +++++++++++++++++++++++
+ arch/ia64/kernel/ptrace.c | 856 +++++++++++++++++++++++++++++++++++++++++++
include/asm-ia64/elf.h | 24 +
- 4 files changed, 1305 insertions(+), 2 deletions(-)
+ include/asm-ia64/tracehook.h | 7
+ 4 files changed, 1357 insertions(+), 2 deletions(-)
---- linux-2.6/arch/ia64/ia32/sys_ia32.c
-+++ linux-2.6/arch/ia64/ia32/sys_ia32.c
+Index: b/arch/ia64/ia32/sys_ia32.c
+===================================================================
+--- a/arch/ia64/ia32/sys_ia32.c
++++ b/arch/ia64/ia32/sys_ia32.c
@@ -44,6 +44,7 @@
#include <linux/eventpoll.h>
#include <linux/personality.h>
@@ -503,8 +505,10 @@
typedef struct {
unsigned int ss_sp;
unsigned int ss_flags;
---- linux-2.6/arch/ia64/kernel/ptrace.c
-+++ linux-2.6/arch/ia64/kernel/ptrace.c
+Index: b/arch/ia64/kernel/ptrace.c
+===================================================================
+--- a/arch/ia64/kernel/ptrace.c
++++ b/arch/ia64/kernel/ptrace.c
@@ -3,6 +3,9 @@
*
* Copyright (C) 1999-2005 Hewlett-Packard Co
@@ -532,7 +536,33 @@
#include <asm/unwind.h>
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
-@@ -548,6 +554,7 @@ ia64_sync_user_rbs (struct task_struct *
+@@ -522,6 +528,25 @@ ia64_get_user_rbs_end (struct task_struc
+ return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
+ }
+
++long
++ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
++ unsigned long user_rbs_start, unsigned long user_rbs_end)
++{
++ unsigned long addr, val;
++ long ret;
++
++ /* now copy word for word from user rbs to kernel rbs: */
++ for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
++ if (access_process_vm(child, addr, &val, sizeof(val), 0)
++ != sizeof(val))
++ return -EIO;
++ ret = ia64_poke(child, sw, user_rbs_end, addr, val);
++ if (ret < 0)
++ return ret;
++ }
++ return 0;
++}
++
+ /*
+ * Synchronize (i.e, write) the RSE backing store living in kernel
+ * space to the VM of the CHILD task. SW and PT are the pointers to
+@@ -548,6 +573,7 @@ ia64_sync_user_rbs (struct task_struct *
return 0;
}
@@ -540,7 +570,7 @@
static inline int
thread_matches (struct task_struct *thread, unsigned long addr)
{
-@@ -620,6 +627,7 @@ find_thread_for_addr (struct task_struct
+@@ -620,6 +646,7 @@ find_thread_for_addr (struct task_struct
mmput(mm);
return child;
}
@@ -548,7 +578,7 @@
/*
* Write f32-f127 back to task->thread.fph if it has been modified.
-@@ -664,6 +672,7 @@ ia64_sync_fph (struct task_struct *task)
+@@ -664,6 +691,7 @@ ia64_sync_fph (struct task_struct *task)
psr->dfh = 1;
}
@@ -556,7 +586,7 @@
static int
access_fr (struct unw_frame_info *info, int regnum, int hi,
unsigned long *data, int write_access)
-@@ -682,6 +691,7 @@ access_fr (struct unw_frame_info *info,
+@@ -682,6 +710,7 @@ access_fr (struct unw_frame_info *info,
*data = fpval.u.bits[hi];
return ret;
}
@@ -564,7 +594,7 @@
/*
* Change the machine-state of CHILD such that it will return via the normal
-@@ -782,6 +792,7 @@ access_nat_bits (struct task_struct *chi
+@@ -782,6 +811,7 @@ access_nat_bits (struct task_struct *chi
return 0;
}
@@ -572,7 +602,7 @@
static int
access_uarea (struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access)
-@@ -1248,7 +1259,9 @@ ptrace_getregs (struct task_struct *chil
+@@ -1248,7 +1278,9 @@ ptrace_getregs (struct task_struct *chil
ret = retval ? -EIO : 0;
return ret;
}
@@ -582,7 +612,7 @@
static long
ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
{
-@@ -1394,6 +1407,7 @@ ptrace_setregs (struct task_struct *chil
+@@ -1394,6 +1426,7 @@ ptrace_setregs (struct task_struct *chil
ret = retval ? -EIO : 0;
return ret;
}
@@ -590,7 +620,7 @@
/*
* Called by kernel/ptrace.c when detaching..
-@@ -1411,6 +1425,7 @@ ptrace_disable (struct task_struct *chil
+@@ -1411,6 +1444,7 @@ ptrace_disable (struct task_struct *chil
child_psr->tb = 0;
}
@@ -598,7 +628,7 @@
asmlinkage long
sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
{
-@@ -1598,6 +1613,7 @@ sys_ptrace (long request, pid_t pid, uns
+@@ -1598,6 +1632,7 @@ sys_ptrace (long request, pid_t pid, uns
unlock_kernel();
return ret;
}
@@ -606,7 +636,27 @@
/* "asmlinkage" so the input arguments are preserved... */
-@@ -1650,3 +1666,791 @@ syscall_trace_leave (long arg0, long arg
+@@ -1609,6 +1644,10 @@ syscall_trace_enter (long arg0, long arg
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(®s, 0);
+
++ /* copy user rbs to kernel rbs */
++ if (test_thread_flag(TIF_RESTORE_RSE))
++ ia64_sync_krbs(current);
++
+ if (unlikely(current->audit_context)) {
+ long syscall;
+ int arch;
+@@ -1645,8 +1684,825 @@ syscall_trace_leave (long arg0, long arg
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(®s, 1);
+
++ /* copy user rbs to kernel rbs */
++ if (test_thread_flag(TIF_RESTORE_RSE))
++ ia64_sync_krbs(current);
++
+ if (test_thread_flag(TIF_SINGLESTEP)) {
+ force_sig(SIGTRAP, current); /* XXX */
tracehook_report_syscall_step(®s);
}
}
@@ -1254,9 +1304,34 @@
+ const struct utrace_regset *regset,
+ int now)
+{
++ if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
++ return 0;
++ tsk_set_notify_resume(target);
+ return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, NULL, NULL);
+}
+
++static void do_gpregs_readback(struct unw_frame_info *info, void *arg)
++{
++ struct pt_regs *pt;
++ utrace_getset_t *dst = arg;
++ unsigned long urbs_end;
++
++ if (unw_unwind_to_user(info) < 0)
++ return;
++ pt = task_pt_regs(dst->target);
++ urbs_end = ia64_get_user_rbs_end(dst->target, pt, NULL);
++ dst->ret = ia64_sync_kernel_rbs(dst->target, info->sw, pt->ar_bspstore, urbs_end);
++}
++/*
++ * This is called to read back the register backing store.
++ */
++long ia64_sync_krbs(struct task_struct *target)
++{
++ clear_tsk_thread_flag(target, TIF_RESTORE_RSE);
++ tsk_clear_notify_resume(target);
++ return do_regset_call(do_gpregs_readback, target, NULL, 0, 0, NULL, NULL);
++}
++
+static int
+fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
+{
@@ -1398,8 +1473,10 @@
+#endif
+ return &utrace_ia64_native;
+}
---- linux-2.6/include/asm-ia64/tracehook.h
-+++ linux-2.6/include/asm-ia64/tracehook.h
+Index: b/include/asm-ia64/tracehook.h
+===================================================================
+--- a/include/asm-ia64/tracehook.h
++++ b/include/asm-ia64/tracehook.h
@@ -67,7 +67,10 @@ static inline int tracehook_single_step_
static inline void tracehook_abort_syscall(struct pt_regs *regs)
@@ -1413,8 +1490,10 @@
-#endif
+#endif /* asm/tracehook.h */
---- linux-2.6/include/asm-ia64/elf.h
-+++ linux-2.6/include/asm-ia64/elf.h
+Index: b/include/asm-ia64/elf.h
+===================================================================
+--- a/include/asm-ia64/elf.h
++++ b/include/asm-ia64/elf.h
@@ -154,6 +154,30 @@ extern void ia64_init_addr_space (void);
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
More information about the fedora-extras-commits
mailing list