rpms/kernel/F-7 linux-2.6-sched-cfs-updates.patch, NONE, 1.1 kernel-2.6.spec, 1.3339, 1.3340 linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch, 1.2, 1.3
Chuck Ebbert (cebbert)
fedora-extras-commits at redhat.com
Wed Sep 12 22:36:20 UTC 2007
Author: cebbert
Update of /cvs/pkgs/rpms/kernel/F-7
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv2760
Modified Files:
kernel-2.6.spec linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
Added Files:
linux-2.6-sched-cfs-updates.patch
Log Message:
* Wed Sep 12 2007 Chuck Ebbert <cebbert at redhat.com>
- update CFS scheduler
linux-2.6-sched-cfs-updates.patch:
--- NEW FILE linux-2.6-sched-cfs-updates.patch ---
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=a0dc72601d48b171b4870dfdd0824901a2b2b1a9
Commit: a0dc72601d48b171b4870dfdd0824901a2b2b1a9
Parent: 7fd0d2dde929ead79901e389e70dbfb3c6c06986
Author: Ingo Molnar <mingo at elte.hu>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200
sched: fix niced_granularity() shift
fix niced_granularity(). This resulted in under-scheduling for
CPU-bound negative nice level tasks (and this in turn caused
higher than necessary latencies in nice-0 tasks).
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
kernel/sched_fair.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ce39282..810b52d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -291,7 +291,7 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
/*
* It will always fit into 'long':
*/
- return (long) (tmp >> WMULT_SHIFT);
+ return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
}
static inline void
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7fd0d2dde929ead79901e389e70dbfb3c6c06986
Commit: 7fd0d2dde929ead79901e389e70dbfb3c6c06986
Parent: b21010ed6498391c0f359f2a89c907533fe07fec
Author: Suresh Siddha <suresh.b.siddha at intel.com>
AuthorDate: Wed Sep 5 14:32:48 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:48 2007 +0200
sched: fix MC/HT scheduler optimization, without breaking the FUZZ logic.
First fix the check
if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task)
with this
if (*imbalance < busiest_load_per_task)
As the current check is always false for nice 0 tasks (as
SCHED_LOAD_SCALE_FUZZ is same as busiest_load_per_task for nice 0
tasks).
With the above change, imbalance was getting reset to 0 in the corner
case condition, making the FUZZ logic fail. Fix it by not corrupting the
imbalance and change the imbalance, only when it finds that the HT/MC
optimization is needed.
Signed-off-by: Suresh Siddha <suresh.b.siddha at intel.com>
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
kernel/sched.c | 8 +++-----
1 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index b533d6d..c8759ec 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2512,7 +2512,7 @@ group_next:
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
+ if (*imbalance < busiest_load_per_task) {
unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn;
@@ -2564,10 +2564,8 @@ small_imbalance:
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
- if (pwr_move <= pwr_now)
- goto out_balanced;
-
- *imbalance = busiest_load_per_task;
+ if (pwr_move > pwr_now)
+ *imbalance = busiest_load_per_task;
}
return busiest;
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=a206c07213cf6372289f189c3774c4c3255a7ae1
Commit: a206c07213cf6372289f189c3774c4c3255a7ae1
Parent: a0dc72601d48b171b4870dfdd0824901a2b2b1a9
Author: Ingo Molnar <mingo at elte.hu>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200
sched: debug: fix cfs_rq->wait_runtime accounting
the cfs_rq->wait_runtime debug/statistics counter was not maintained
properly - fix this.
this also removes some code:
text data bss dec hex filename
13420 228 1204 14852 3a04 sched.o.before
13404 228 1204 14836 39f4 sched.o.after
Signed-off-by: Ingo Molnar <mingo at elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
---
kernel/sched.c | 1 -
kernel/sched_fair.c | 10 +++++-----
2 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index c8759ec..97986f1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
static void set_load_weight(struct task_struct *p)
{
- task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
p->se.wait_runtime = 0;
if (task_has_rt_policy(p)) {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 810b52d..bac2aff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_add(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
+
+ schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}
static inline void
@@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
+
+ schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
}
static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
@@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair);
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
delta_fair = se->wait_runtime - prev_runtime;
/*
@@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock;
}
- cfs_rq->wait_runtime -= se->wait_runtime;
#endif
}
__dequeue_entity(cfs_rq, se);
@@ -1121,10 +1123,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
* The statistical average of wait_runtime is about
* -granularity/2, so initialize the task with that:
*/
- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
+ if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
- }
__enqueue_entity(cfs_rq, se);
}
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=2491b2b89d4646e02ab51c90ab7012d124924ddc
Commit: 2491b2b89d4646e02ab51c90ab7012d124924ddc
Parent: a206c07213cf6372289f189c3774c4c3255a7ae1
Author: Ingo Molnar <mingo at elte.hu>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200
sched: debug: fix sum_exec_runtime clearing
when cleaning sched-stats also clear prev_sum_exec_runtime.
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
kernel/sched_debug.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ab18f45..c3ee38b 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -283,4 +283,5 @@ void proc_sched_set_task(struct task_struct *p)
p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
#endif
p->se.sum_exec_runtime = 0;
+ p->se.prev_sum_exec_runtime = 0;
}
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7c92e54f6f9601cfa9d8894ee248abcf62ed9a1c
Commit: 7c92e54f6f9601cfa9d8894ee248abcf62ed9a1c
Parent: cf2ab4696ee42f895eed88c2b6e432fe03dda0db
Author: Peter Zijlstra <a.p.zijlstra at chello.nl>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200
sched: simplify __check_preempt_curr_fair()
Preparatory patch for fix-ideal-runtime:
simplify __check_preempt_curr_fair(): get rid of the integer return.
text data bss dec hex filename
13404 228 1204 14836 39f4 sched.o.before
13393 228 1204 14825 39e9 sched.o.after
functionality is unchanged.
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
kernel/sched_fair.c | 8 +++-----
1 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bac2aff..f0dd4be 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -673,7 +673,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
/*
* Preempt the current task with a newly woken task if needed:
*/
-static int
+static void
__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct sched_entity *curr, unsigned long granularity)
{
@@ -686,9 +686,8 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
*/
if (__delta > niced_granularity(curr, granularity)) {
resched_task(rq_of(cfs_rq)->curr);
- return 1;
+ curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
}
- return 0;
}
static inline void
@@ -764,8 +763,7 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta_exec > ideal_runtime)
gran = 0;
- if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
- curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
+ __check_preempt_curr_fair(cfs_rq, next, curr, gran);
}
/**************************************************
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=4a55b45036a677fac43fe81ddf7fdcd007aaaee7
Commit: 4a55b45036a677fac43fe81ddf7fdcd007aaaee7
Parent: 7c92e54f6f9601cfa9d8894ee248abcf62ed9a1c
Author: Peter Zijlstra <a.p.zijlstra at chello.nl>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200
sched: improve prev_sum_exec_runtime setting
Second preparatory patch for fix-ideal runtime:
Mark prev_sum_exec_runtime at the beginning of our run, the same spot
that adds our wait period to wait_runtime. This seems a more natural
location to do this, and it also reduces the code a bit:
text data bss dec hex filename
13397 228 1204 14829 39ed sched.o.before
13391 228 1204 14823 39e7 sched.o.after
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
kernel/sched_fair.c | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f0dd4be..2d01bbc 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -684,10 +684,8 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
* preempt the current task unless the best task has
* a larger than sched_granularity fairness advantage:
*/
- if (__delta > niced_granularity(curr, granularity)) {
+ if (__delta > niced_granularity(curr, granularity))
resched_task(rq_of(cfs_rq)->curr);
- curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
- }
}
static inline void
@@ -703,6 +701,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_stats_wait_end(cfs_rq, se);
update_stats_curr_start(cfs_rq, se);
set_cfs_rq_curr(cfs_rq, se);
+ se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=1169783085adb9ac969d21103a6885e8435f7ed3
Commit: 1169783085adb9ac969d21103a6885e8435f7ed3
Parent: 4a55b45036a677fac43fe81ddf7fdcd007aaaee7
Author: Peter Zijlstra <a.p.zijlstra at chello.nl>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer: Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200
sched: fix ideal_runtime calculations for reniced tasks
fix ideal_runtime:
- do not scale it using niced_granularity()
it is against sum_exec_delta, so its wall-time, not fair-time.
- move the whole check into __check_preempt_curr_fair()
so that wakeup preemption can also benefit from the new logic.
this also results in code size reduction:
text data bss dec hex filename
13391 228 1204 14823 39e7 sched.o.before
13369 228 1204 14801 39d1 sched.o.after
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
kernel/sched_fair.c | 38 ++++++++++++++++++++++----------------
1 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2d01bbc..892616b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct sched_entity *curr, unsigned long granularity)
{
s64 __delta = curr->fair_key - se->fair_key;
+ unsigned long ideal_runtime, delta_exec;
+
+ /*
+ * ideal_runtime is compared against sum_exec_runtime, which is
+ * walltime, hence do not scale.
+ */
+ ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
+ (unsigned long)sysctl_sched_min_granularity);
+
+ /*
+ * If we executed more than what the latency constraint suggests,
+ * reduce the rescheduling granularity. This way the total latency
+ * of how much a task is not scheduled converges to
+ * sysctl_sched_latency:
+ */
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime)
+ granularity = 0;
/*
* Take scheduling granularity into account - do not
* preempt the current task unless the best task has
* a larger than sched_granularity fairness advantage:
+ *
+ * scale granularity as key space is in fair_clock.
*/
if (__delta > niced_granularity(curr, granularity))
resched_task(rq_of(cfs_rq)->curr);
@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- unsigned long gran, ideal_runtime, delta_exec;
struct sched_entity *next;
/*
@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (next == curr)
return;
- gran = sched_granularity(cfs_rq);
- ideal_runtime = niced_granularity(curr,
- max(sysctl_sched_latency / cfs_rq->nr_running,
- (unsigned long)sysctl_sched_min_granularity));
- /*
- * If we executed more than what the latency constraint suggests,
- * reduce the rescheduling granularity. This way the total latency
- * of how much a task is not scheduled converges to
- * sysctl_sched_latency:
- */
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime)
- gran = 0;
-
- __check_preempt_curr_fair(cfs_rq, next, curr, gran);
+ __check_preempt_curr_fair(cfs_rq, next, curr,
+ sched_granularity(cfs_rq));
}
/**************************************************
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/kernel-2.6.spec,v
retrieving revision 1.3339
retrieving revision 1.3340
diff -u -r1.3339 -r1.3340
--- kernel-2.6.spec 12 Sep 2007 00:17:42 -0000 1.3339
+++ kernel-2.6.spec 12 Sep 2007 22:35:48 -0000 1.3340
@@ -618,7 +618,8 @@
Patch800: linux-2.6-wakeups-hdaps.patch
Patch801: linux-2.6-wakeups.patch
Patch900: linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
-Patch901: linux-2.6-timekeeping-fixes.patch
+Patch901: linux-2.6-sched-cfs-updates.patch
+Patch902: linux-2.6-timekeeping-fixes.patch
Patch1000: linux-2.6-dmi-based-module-autoloading.patch
Patch1020: linux-2.6-usb-autosuspend-default-disable.patch
Patch1030: linux-2.6-nfs-nosharecache.patch
@@ -1088,6 +1089,8 @@
# Ingo's new scheduler.
ApplyPatch linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
+# updates from 2.6.23
+ApplyPatch linux-2.6-sched-cfs-updates.patch
# apply timekeeping updates that were in the Fedora CFS patch
ApplyPatch linux-2.6-timekeeping-fixes.patch
@@ -2306,6 +2309,9 @@
%endif
%changelog
+* Wed Sep 12 2007 Chuck Ebbert <cebbert at redhat.com>
+- update CFS scheduler
+
* Tue Sep 11 2007 Roland McGrath <roland at redhat.com>
- utrace update (#248532, #267161, #284311)
linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch:
Index: linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch 29 Aug 2007 23:42:57 -0000 1.2
+++ linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch 12 Sep 2007 22:35:48 -0000 1.3
@@ -8910,48 +8910,3 @@
config SCHEDSTATS
bool "Collect scheduler statistics"
depends on DEBUG_KERNEL && PROC_FS
-Try to fix MC/HT scheduler optimization breakage again, with out breaking
-the FUZZ logic.
-
-First fix the check
- if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task)
-with this
- if (*imbalance < busiest_load_per_task)
-
-As the current check is always false for nice 0 tasks (as SCHED_LOAD_SCALE_FUZZ
-is same as busiest_load_per_task for nice 0 tasks).
-
-With the above change, imbalance was getting reset to 0 in the corner case
-condition, making the FUZZ logic fail. Fix it by not corrupting the
-imbalance and change the imbalance, only when it finds that the
-HT/MC optimization is needed.
-
-Signed-off-by: Suresh Siddha <suresh.b.siddha at intel.com>
----
-
-diff --git a/kernel/sched.c b/kernel/sched.c
-index 9fe473a..03e5e8d 100644
---- a/kernel/sched.c
-+++ b/kernel/sched.c
-@@ -2511,7 +2511,7 @@ group_next:
- * a think about bumping its value to force at least one task to be
- * moved
- */
-- if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
-+ if (*imbalance < busiest_load_per_task) {
- unsigned long tmp, pwr_now, pwr_move;
- unsigned int imbn;
-
-@@ -2563,10 +2563,8 @@ small_imbalance:
- pwr_move /= SCHED_LOAD_SCALE;
-
- /* Move if we gain throughput */
-- if (pwr_move <= pwr_now)
-- goto out_balanced;
--
-- *imbalance = busiest_load_per_task;
-+ if (pwr_move > pwr_now)
-+ *imbalance = busiest_load_per_task;
- }
-
- return busiest;
More information about the fedora-extras-commits
mailing list