rpms/kernel/devel patch-2.6.23-rc7-git4.bz2.sign, NONE, 1.1 .cvsignore, 1.691, 1.692 kernel.spec, 1.178, 1.179 linux-2.6-highres-timers.patch, 1.10, 1.11 sources, 1.653, 1.654 upstream, 1.575, 1.576 linux-2.6-xfs-fix-filestreams-free-func-cast.patch, 1.1, NONE patch-2.6.23-rc7-git3.bz2.sign, 1.1, NONE

Dave Jones (davej) fedora-extras-commits at redhat.com
Mon Sep 24 02:26:53 UTC 2007


Author: davej

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv29852

Modified Files:
	.cvsignore kernel.spec linux-2.6-highres-timers.patch sources 
	upstream 
Added Files:
	patch-2.6.23-rc7-git4.bz2.sign 
Removed Files:
	linux-2.6-xfs-fix-filestreams-free-func-cast.patch 
	patch-2.6.23-rc7-git3.bz2.sign 
Log Message:
* Sun Sep 23 2007 Dave Jones <davej at redhat.com>
- 2.6.23-rc7-git4



--- NEW FILE patch-2.6.23-rc7-git4.bz2.sign ---
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
Comment: See http://www.kernel.org/signature.html for info

iD8DBQBG9g+pyGugalF9Dw4RAkPqAJ4lgVRIQytu/RlILy7STDLHnL/OtACdHmxD
Wmlvj0dlAuoOkQJbe2bOQmY=
=PWzh
-----END PGP SIGNATURE-----


Index: .cvsignore
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/.cvsignore,v
retrieving revision 1.691
retrieving revision 1.692
diff -u -r1.691 -r1.692
--- .cvsignore	22 Sep 2007 11:45:40 -0000	1.691
+++ .cvsignore	24 Sep 2007 02:26:20 -0000	1.692
@@ -4,4 +4,4 @@
 kernel-2.6.22
 linux-2.6.22.tar.bz2
 patch-2.6.23-rc7.bz2
-patch-2.6.23-rc7-git3.bz2
+patch-2.6.23-rc7-git4.bz2


Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.178
retrieving revision 1.179
diff -u -r1.178 -r1.179
--- kernel.spec	22 Sep 2007 11:45:40 -0000	1.178
+++ kernel.spec	24 Sep 2007 02:26:20 -0000	1.179
@@ -47,7 +47,7 @@
 # The rc snapshot level
 %define rcrev 7
 # The git snapshot level
-%define gitrev 3
+%define gitrev 4
 # Set rpm version accordingly
 %define rpmversion 2.6.%{upstream_sublevel}
 %endif
@@ -665,7 +665,6 @@
 Patch1503: linux-2.6-xfs-optimize-away-dmapi-tests.patch
 Patch1504: linux-2.6-xfs-optimize-away-realtime-tests.patch
 Patch1505: linux-2.6-xfs-refactor-xfs_mountfs.patch
-Patch1506: linux-2.6-xfs-fix-filestreams-free-func-cast.patch
 Patch1509: linux-2.6-xfs-setfattr-32bit-compat.patch
 Patch1512: linux-2.6-firewire-multi-lun.patch
 Patch1515: linux-2.6-lirc.patch
@@ -1177,7 +1176,6 @@
 ApplyPatch linux-2.6-xfs-optimize-away-dmapi-tests.patch
 ApplyPatch linux-2.6-xfs-optimize-away-realtime-tests.patch
 ApplyPatch linux-2.6-xfs-refactor-xfs_mountfs.patch
-ApplyPatch linux-2.6-xfs-fix-filestreams-free-func-cast.patch
 ApplyPatch linux-2.6-xfs-setfattr-32bit-compat.patch
 
 #
@@ -1804,6 +1802,9 @@
 
 
 %changelog
+* Sun Sep 23 2007 Dave Jones <davej at redhat.com>
+- 2.6.23-rc7-git4
+
 * Sat Sep 22 2007 Chuck Ebbert <cebbert at redhat.com>
 - 2.6.23-rc7-git3
 

linux-2.6-highres-timers.patch:

Index: linux-2.6-highres-timers.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-highres-timers.patch,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- linux-2.6-highres-timers.patch	22 Sep 2007 11:45:40 -0000	1.10
+++ linux-2.6-highres-timers.patch	24 Sep 2007 02:26:20 -0000	1.11
@@ -587,411 +587,6 @@
  }
  
  #else
-@@ -245,404 +245,6 @@ static void acpi_state_timer_broadcast(s
- 
- #endif
- 
--static void acpi_processor_idle(void)
--{
--	struct acpi_processor *pr = NULL;
--	struct acpi_processor_cx *cx = NULL;
--	struct acpi_processor_cx *next_state = NULL;
--	int sleep_ticks = 0;
--	u32 t1, t2 = 0;
--
--	/*
--	 * Interrupts must be disabled during bus mastering calculations and
--	 * for C2/C3 transitions.
--	 */
--	local_irq_disable();
--
--	pr = processors[smp_processor_id()];
--	if (!pr) {
--		local_irq_enable();
--		return;
--	}
--
--	/*
--	 * Check whether we truly need to go idle, or should
--	 * reschedule:
--	 */
--	if (unlikely(need_resched())) {
--		local_irq_enable();
--		return;
--	}
--
--	cx = pr->power.state;
--	if (!cx) {
--		if (pm_idle_save)
--			pm_idle_save();
--		else
--			acpi_safe_halt();
--		return;
--	}
--
--	/*
--	 * Check BM Activity
--	 * -----------------
--	 * Check for bus mastering activity (if required), record, and check
--	 * for demotion.
--	 */
--	if (pr->flags.bm_check) {
--		u32 bm_status = 0;
--		unsigned long diff = jiffies - pr->power.bm_check_timestamp;
--
--		if (diff > 31)
--			diff = 31;
--
--		pr->power.bm_activity <<= diff;
--
--		acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
--		if (bm_status) {
--			pr->power.bm_activity |= 0x1;
--			acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
--		}
--		/*
--		 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
--		 * the true state of bus mastering activity; forcing us to
--		 * manually check the BMIDEA bit of each IDE channel.
--		 */
--		else if (errata.piix4.bmisx) {
--			if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
--			    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
--				pr->power.bm_activity |= 0x1;
--		}
--
--		pr->power.bm_check_timestamp = jiffies;
--
--		/*
--		 * If bus mastering is or was active this jiffy, demote
--		 * to avoid a faulty transition.  Note that the processor
--		 * won't enter a low-power state during this call (to this
--		 * function) but should upon the next.
--		 *
--		 * TBD: A better policy might be to fallback to the demotion
--		 *      state (use it for this quantum only) istead of
--		 *      demoting -- and rely on duration as our sole demotion
--		 *      qualification.  This may, however, introduce DMA
--		 *      issues (e.g. floppy DMA transfer overrun/underrun).
--		 */
--		if ((pr->power.bm_activity & 0x1) &&
--		    cx->demotion.threshold.bm) {
--			local_irq_enable();
--			next_state = cx->demotion.state;
--			goto end;
--		}
--	}
--
--#ifdef CONFIG_HOTPLUG_CPU
--	/*
--	 * Check for P_LVL2_UP flag before entering C2 and above on
--	 * an SMP system. We do it here instead of doing it at _CST/P_LVL
--	 * detection phase, to work cleanly with logical CPU hotplug.
--	 */
--	if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 
--	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
--		cx = &pr->power.states[ACPI_STATE_C1];
--#endif
--
--	/*
--	 * Sleep:
--	 * ------
--	 * Invoke the current Cx state to put the processor to sleep.
--	 */
--	if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
--		current_thread_info()->status &= ~TS_POLLING;
--		/*
--		 * TS_POLLING-cleared state must be visible before we
--		 * test NEED_RESCHED:
--		 */
--		smp_mb();
--		if (need_resched()) {
--			current_thread_info()->status |= TS_POLLING;
--			local_irq_enable();
--			return;
--		}
--	}
--
--	switch (cx->type) {
--
--	case ACPI_STATE_C1:
--		/*
--		 * Invoke C1.
--		 * Use the appropriate idle routine, the one that would
--		 * be used without acpi C-states.
--		 */
--		if (pm_idle_save)
--			pm_idle_save();
--		else
--			acpi_safe_halt();
--
--		/*
--		 * TBD: Can't get time duration while in C1, as resumes
--		 *      go to an ISR rather than here.  Need to instrument
--		 *      base interrupt handler.
--		 *
--		 * Note: the TSC better not stop in C1, sched_clock() will
--		 *       skew otherwise.
--		 */
--		sleep_ticks = 0xFFFFFFFF;
--		break;
--
--	case ACPI_STATE_C2:
--		/* Get start time (ticks) */
--		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
--		/* Tell the scheduler that we are going deep-idle: */
--		sched_clock_idle_sleep_event();
--		/* Invoke C2 */
--		acpi_state_timer_broadcast(pr, cx, 1);
--		acpi_cstate_enter(cx);
--		/* Get end time (ticks) */
--		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
--
--#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
--		/* TSC halts in C2, so notify users */
--		mark_tsc_unstable("possible TSC halt in C2");
--#endif
--		/* Compute time (ticks) that we were actually asleep */
--		sleep_ticks = ticks_elapsed(t1, t2);
--
--		/* Tell the scheduler how much we idled: */
--		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
--
--		/* Re-enable interrupts */
--		local_irq_enable();
--		/* Do not account our idle-switching overhead: */
--		sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
--
--		current_thread_info()->status |= TS_POLLING;
--		acpi_state_timer_broadcast(pr, cx, 0);
--		break;
--
--	case ACPI_STATE_C3:
--		/*
--		 * disable bus master
--		 * bm_check implies we need ARB_DIS
--		 * !bm_check implies we need cache flush
--		 * bm_control implies whether we can do ARB_DIS
--		 *
--		 * That leaves a case where bm_check is set and bm_control is
--		 * not set. In that case we cannot do much, we enter C3
--		 * without doing anything.
--		 */
--		if (pr->flags.bm_check && pr->flags.bm_control) {
--			if (atomic_inc_return(&c3_cpu_count) ==
--			    num_online_cpus()) {
--				/*
--				 * All CPUs are trying to go to C3
--				 * Disable bus master arbitration
--				 */
--				acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
--			}
--		} else if (!pr->flags.bm_check) {
--			/* SMP with no shared cache... Invalidate cache  */
--			ACPI_FLUSH_CPU_CACHE();
--		}
--
--		/* Get start time (ticks) */
--		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
--		/* Invoke C3 */
--		acpi_state_timer_broadcast(pr, cx, 1);
--		/* Tell the scheduler that we are going deep-idle: */
--		sched_clock_idle_sleep_event();
--		acpi_cstate_enter(cx);
--		/* Get end time (ticks) */
--		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
--		if (pr->flags.bm_check && pr->flags.bm_control) {
--			/* Enable bus master arbitration */
--			atomic_dec(&c3_cpu_count);
--			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
--		}
--
--#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
--		/* TSC halts in C3, so notify users */
--		mark_tsc_unstable("TSC halts in C3");
--#endif
--		/* Compute time (ticks) that we were actually asleep */
--		sleep_ticks = ticks_elapsed(t1, t2);
--		/* Tell the scheduler how much we idled: */
--		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
--
--		/* Re-enable interrupts */
--		local_irq_enable();
--		/* Do not account our idle-switching overhead: */
--		sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
--
--		current_thread_info()->status |= TS_POLLING;
--		acpi_state_timer_broadcast(pr, cx, 0);
--		break;
--
--	default:
--		local_irq_enable();
--		return;
--	}
--	cx->usage++;
--	if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
--		cx->time += sleep_ticks;
--
--	next_state = pr->power.state;
--
--#ifdef CONFIG_HOTPLUG_CPU
--	/* Don't do promotion/demotion */
--	if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
--	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
--		next_state = cx;
--		goto end;
--	}
--#endif
--
--	/*
--	 * Promotion?
--	 * ----------
--	 * Track the number of longs (time asleep is greater than threshold)
--	 * and promote when the count threshold is reached.  Note that bus
--	 * mastering activity may prevent promotions.
--	 * Do not promote above max_cstate.
--	 */
--	if (cx->promotion.state &&
--	    ((cx->promotion.state - pr->power.states) <= max_cstate)) {
--		if (sleep_ticks > cx->promotion.threshold.ticks &&
--		  cx->promotion.state->latency <= system_latency_constraint()) {
--			cx->promotion.count++;
--			cx->demotion.count = 0;
--			if (cx->promotion.count >=
--			    cx->promotion.threshold.count) {
--				if (pr->flags.bm_check) {
--					if (!
--					    (pr->power.bm_activity & cx->
--					     promotion.threshold.bm)) {
--						next_state =
--						    cx->promotion.state;
--						goto end;
--					}
--				} else {
--					next_state = cx->promotion.state;
--					goto end;
--				}
--			}
--		}
--	}
--
--	/*
--	 * Demotion?
--	 * ---------
--	 * Track the number of shorts (time asleep is less than time threshold)
--	 * and demote when the usage threshold is reached.
--	 */
--	if (cx->demotion.state) {
--		if (sleep_ticks < cx->demotion.threshold.ticks) {
--			cx->demotion.count++;
--			cx->promotion.count = 0;
--			if (cx->demotion.count >= cx->demotion.threshold.count) {
--				next_state = cx->demotion.state;
--				goto end;
--			}
--		}
--	}
--
--      end:
--	/*
--	 * Demote if current state exceeds max_cstate
--	 * or if the latency of the current state is unacceptable
--	 */
--	if ((pr->power.state - pr->power.states) > max_cstate ||
--		pr->power.state->latency > system_latency_constraint()) {
--		if (cx->demotion.state)
--			next_state = cx->demotion.state;
--	}
--
--	/*
--	 * New Cx State?
--	 * -------------
--	 * If we're going to start using a new Cx state we must clean up
--	 * from the previous and prepare to use the new.
--	 */
--	if (next_state != pr->power.state)
--		acpi_processor_power_activate(pr, next_state);
--}
--
--static int acpi_processor_set_power_policy(struct acpi_processor *pr)
--{
--	unsigned int i;
--	unsigned int state_is_set = 0;
--	struct acpi_processor_cx *lower = NULL;
--	struct acpi_processor_cx *higher = NULL;
--	struct acpi_processor_cx *cx;
--
--
--	if (!pr)
--		return -EINVAL;
--
--	/*
--	 * This function sets the default Cx state policy (OS idle handler).
--	 * Our scheme is to promote quickly to C2 but more conservatively
--	 * to C3.  We're favoring C2  for its characteristics of low latency
--	 * (quick response), good power savings, and ability to allow bus
--	 * mastering activity.  Note that the Cx state policy is completely
--	 * customizable and can be altered dynamically.
--	 */
--
--	/* startup state */
--	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
--		cx = &pr->power.states[i];
--		if (!cx->valid)
--			continue;
--
--		if (!state_is_set)
--			pr->power.state = cx;
--		state_is_set++;
--		break;
--	}
--
--	if (!state_is_set)
--		return -ENODEV;
--
--	/* demotion */
--	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
--		cx = &pr->power.states[i];
--		if (!cx->valid)
--			continue;
--
--		if (lower) {
--			cx->demotion.state = lower;
--			cx->demotion.threshold.ticks = cx->latency_ticks;
--			cx->demotion.threshold.count = 1;
--			if (cx->type == ACPI_STATE_C3)
--				cx->demotion.threshold.bm = bm_history;
--		}
--
--		lower = cx;
--	}
--
--	/* promotion */
--	for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
--		cx = &pr->power.states[i];
--		if (!cx->valid)
--			continue;
--
--		if (higher) {
--			cx->promotion.state = higher;
--			cx->promotion.threshold.ticks = cx->latency_ticks;
--			if (cx->type >= ACPI_STATE_C2)
--				cx->promotion.threshold.count = 4;
--			else
--				cx->promotion.threshold.count = 10;
--			if (higher->type == ACPI_STATE_C3)
--				cx->promotion.threshold.bm = bm_history;
--		}
--
--		higher = cx;
--	}
--
--	return 0;
--}
--
- static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
- {
- 
 @@ -922,7 +457,7 @@ static void acpi_processor_power_verify_
  	 * Normalize the C2 latency to expidite policy
  	 */
@@ -1467,20 +1062,427 @@
 +		count++;
  	}
  
-+	if (!count)
-+		return -EINVAL;
-+
-+	dev->state_count = count;
- 	return 0;
- }
-+
-+struct cpuidle_driver acpi_idle_driver = {
-+	.name =		"acpi_idle",
-+	.init =		acpi_idle_init,
-+	.redetect =	acpi_idle_init,
-+	.bm_check =	acpi_idle_bm_check,
-+	.owner =	THIS_MODULE,
-+};
++	if (!count)
++		return -EINVAL;
++
++	dev->state_count = count;
+ 	return 0;
+ }
++
++struct cpuidle_driver acpi_idle_driver = {
++	.name =		"acpi_idle",
++	.init =		acpi_idle_init,
++	.redetect =	acpi_idle_init,
++	.bm_check =	acpi_idle_bm_check,
++	.owner =	THIS_MODULE,
++};
+--- linux-2.6.22.noarch/drivers/acpi/processor_idle.c~	2007-09-23 22:22:11.000000000 -0400
++++ linux-2.6.22.noarch/drivers/acpi/processor_idle.c	2007-09-23 22:22:52.000000000 -0400
+@@ -260,404 +260,6 @@ int acpi_processor_resume(struct acpi_de
+ 	return 0;
+ }
+ 
+-static void acpi_processor_idle(void)
+-{
+-	struct acpi_processor *pr = NULL;
+-	struct acpi_processor_cx *cx = NULL;
+-	struct acpi_processor_cx *next_state = NULL;
+-	int sleep_ticks = 0;
+-	u32 t1, t2 = 0;
+-
+-	/*
+-	 * Interrupts must be disabled during bus mastering calculations and
+-	 * for C2/C3 transitions.
+-	 */
+-	local_irq_disable();
+-
+-	pr = processors[smp_processor_id()];
+-	if (!pr) {
+-		local_irq_enable();
+-		return;
+-	}
+-
+-	/*
+-	 * Check whether we truly need to go idle, or should
+-	 * reschedule:
+-	 */
+-	if (unlikely(need_resched())) {
+-		local_irq_enable();
+-		return;
+-	}
+-
+-	cx = pr->power.state;
+-	if (!cx || acpi_idle_suspend) {
+-		if (pm_idle_save)
+-			pm_idle_save();
+-		else
+-			acpi_safe_halt();
+-		return;
+-	}
+-
+-	/*
+-	 * Check BM Activity
+-	 * -----------------
+-	 * Check for bus mastering activity (if required), record, and check
+-	 * for demotion.
+-	 */
+-	if (pr->flags.bm_check) {
+-		u32 bm_status = 0;
+-		unsigned long diff = jiffies - pr->power.bm_check_timestamp;
+-
+-		if (diff > 31)
+-			diff = 31;
+-
+-		pr->power.bm_activity <<= diff;
+-
+-		acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
+-		if (bm_status) {
+-			pr->power.bm_activity |= 0x1;
+-			acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
+-		}
+-		/*
+-		 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
+-		 * the true state of bus mastering activity; forcing us to
+-		 * manually check the BMIDEA bit of each IDE channel.
+-		 */
+-		else if (errata.piix4.bmisx) {
+-			if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
+-			    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
+-				pr->power.bm_activity |= 0x1;
+-		}
+-
+-		pr->power.bm_check_timestamp = jiffies;
+-
+-		/*
+-		 * If bus mastering is or was active this jiffy, demote
+-		 * to avoid a faulty transition.  Note that the processor
+-		 * won't enter a low-power state during this call (to this
+-		 * function) but should upon the next.
+-		 *
+-		 * TBD: A better policy might be to fallback to the demotion
+-		 *      state (use it for this quantum only) istead of
+-		 *      demoting -- and rely on duration as our sole demotion
+-		 *      qualification.  This may, however, introduce DMA
+-		 *      issues (e.g. floppy DMA transfer overrun/underrun).
+-		 */
+-		if ((pr->power.bm_activity & 0x1) &&
+-		    cx->demotion.threshold.bm) {
+-			local_irq_enable();
+-			next_state = cx->demotion.state;
+-			goto end;
+-		}
+-	}
+-
+-#ifdef CONFIG_HOTPLUG_CPU
+-	/*
+-	 * Check for P_LVL2_UP flag before entering C2 and above on
+-	 * an SMP system. We do it here instead of doing it at _CST/P_LVL
+-	 * detection phase, to work cleanly with logical CPU hotplug.
+-	 */
+-	if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 
+-	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+-		cx = &pr->power.states[ACPI_STATE_C1];
+-#endif
+-
+-	/*
+-	 * Sleep:
+-	 * ------
+-	 * Invoke the current Cx state to put the processor to sleep.
+-	 */
+-	if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
+-		current_thread_info()->status &= ~TS_POLLING;
+-		/*
+-		 * TS_POLLING-cleared state must be visible before we
+-		 * test NEED_RESCHED:
+-		 */
+-		smp_mb();
+-		if (need_resched()) {
+-			current_thread_info()->status |= TS_POLLING;
+-			local_irq_enable();
+-			return;
+-		}
+-	}
+-
+-	switch (cx->type) {
+-
+-	case ACPI_STATE_C1:
+-		/*
+-		 * Invoke C1.
+-		 * Use the appropriate idle routine, the one that would
+-		 * be used without acpi C-states.
+-		 */
+-		if (pm_idle_save)
+-			pm_idle_save();
+-		else
+-			acpi_safe_halt();
+-
+-		/*
+-		 * TBD: Can't get time duration while in C1, as resumes
+-		 *      go to an ISR rather than here.  Need to instrument
+-		 *      base interrupt handler.
+-		 *
+-		 * Note: the TSC better not stop in C1, sched_clock() will
+-		 *       skew otherwise.
+-		 */
+-		sleep_ticks = 0xFFFFFFFF;
+-		break;
+-
+-	case ACPI_STATE_C2:
+-		/* Get start time (ticks) */
+-		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+-		/* Tell the scheduler that we are going deep-idle: */
+-		sched_clock_idle_sleep_event();
+-		/* Invoke C2 */
+-		acpi_state_timer_broadcast(pr, cx, 1);
+-		acpi_cstate_enter(cx);
+-		/* Get end time (ticks) */
+-		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+-
+-#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
+-		/* TSC halts in C2, so notify users */
+-		mark_tsc_unstable("possible TSC halt in C2");
+-#endif
+-		/* Compute time (ticks) that we were actually asleep */
+-		sleep_ticks = ticks_elapsed(t1, t2);
+-
+-		/* Tell the scheduler how much we idled: */
+-		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+-
+-		/* Re-enable interrupts */
+-		local_irq_enable();
+-		/* Do not account our idle-switching overhead: */
+-		sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
+-
+-		current_thread_info()->status |= TS_POLLING;
+-		acpi_state_timer_broadcast(pr, cx, 0);
+-		break;
+-
+-	case ACPI_STATE_C3:
+-		/*
+-		 * disable bus master
+-		 * bm_check implies we need ARB_DIS
+-		 * !bm_check implies we need cache flush
+-		 * bm_control implies whether we can do ARB_DIS
+-		 *
+-		 * That leaves a case where bm_check is set and bm_control is
+-		 * not set. In that case we cannot do much, we enter C3
+-		 * without doing anything.
+-		 */
+-		if (pr->flags.bm_check && pr->flags.bm_control) {
+-			if (atomic_inc_return(&c3_cpu_count) ==
+-			    num_online_cpus()) {
+-				/*
+-				 * All CPUs are trying to go to C3
+-				 * Disable bus master arbitration
+-				 */
+-				acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
+-			}
+-		} else if (!pr->flags.bm_check) {
+-			/* SMP with no shared cache... Invalidate cache  */
+-			ACPI_FLUSH_CPU_CACHE();
+-		}
+-
+-		/* Get start time (ticks) */
+-		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+-		/* Invoke C3 */
+-		acpi_state_timer_broadcast(pr, cx, 1);
+-		/* Tell the scheduler that we are going deep-idle: */
+-		sched_clock_idle_sleep_event();
+-		acpi_cstate_enter(cx);
+-		/* Get end time (ticks) */
+-		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+-		if (pr->flags.bm_check && pr->flags.bm_control) {
+-			/* Enable bus master arbitration */
+-			atomic_dec(&c3_cpu_count);
+-			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+-		}
+-
+-#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
+-		/* TSC halts in C3, so notify users */
+-		mark_tsc_unstable("TSC halts in C3");
+-#endif
+-		/* Compute time (ticks) that we were actually asleep */
+-		sleep_ticks = ticks_elapsed(t1, t2);
+-		/* Tell the scheduler how much we idled: */
+-		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+-
+-		/* Re-enable interrupts */
+-		local_irq_enable();
+-		/* Do not account our idle-switching overhead: */
+-		sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
+-
+-		current_thread_info()->status |= TS_POLLING;
+-		acpi_state_timer_broadcast(pr, cx, 0);
+-		break;
+-
+-	default:
+-		local_irq_enable();
+-		return;
+-	}
+-	cx->usage++;
+-	if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
+-		cx->time += sleep_ticks;
+-
+-	next_state = pr->power.state;
+-
+-#ifdef CONFIG_HOTPLUG_CPU
+-	/* Don't do promotion/demotion */
+-	if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+-	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
+-		next_state = cx;
+-		goto end;
+-	}
+-#endif
+-
+-	/*
+-	 * Promotion?
+-	 * ----------
+-	 * Track the number of longs (time asleep is greater than threshold)
+-	 * and promote when the count threshold is reached.  Note that bus
+-	 * mastering activity may prevent promotions.
+-	 * Do not promote above max_cstate.
+-	 */
+-	if (cx->promotion.state &&
+-	    ((cx->promotion.state - pr->power.states) <= max_cstate)) {
+-		if (sleep_ticks > cx->promotion.threshold.ticks &&
+-		  cx->promotion.state->latency <= system_latency_constraint()) {
+-			cx->promotion.count++;
+-			cx->demotion.count = 0;
+-			if (cx->promotion.count >=
+-			    cx->promotion.threshold.count) {
+-				if (pr->flags.bm_check) {
+-					if (!
+-					    (pr->power.bm_activity & cx->
+-					     promotion.threshold.bm)) {
+-						next_state =
+-						    cx->promotion.state;
+-						goto end;
+-					}
+-				} else {
+-					next_state = cx->promotion.state;
+-					goto end;
+-				}
+-			}
+-		}
+-	}
+-
+-	/*
+-	 * Demotion?
+-	 * ---------
+-	 * Track the number of shorts (time asleep is less than time threshold)
+-	 * and demote when the usage threshold is reached.
+-	 */
+-	if (cx->demotion.state) {
+-		if (sleep_ticks < cx->demotion.threshold.ticks) {
+-			cx->demotion.count++;
+-			cx->promotion.count = 0;
+-			if (cx->demotion.count >= cx->demotion.threshold.count) {
+-				next_state = cx->demotion.state;
+-				goto end;
+-			}
+-		}
+-	}
+-
+-      end:
+-	/*
+-	 * Demote if current state exceeds max_cstate
+-	 * or if the latency of the current state is unacceptable
+-	 */
+-	if ((pr->power.state - pr->power.states) > max_cstate ||
+-		pr->power.state->latency > system_latency_constraint()) {
+-		if (cx->demotion.state)
+-			next_state = cx->demotion.state;
+-	}
+-
+-	/*
+-	 * New Cx State?
+-	 * -------------
+-	 * If we're going to start using a new Cx state we must clean up
+-	 * from the previous and prepare to use the new.
+-	 */
+-	if (next_state != pr->power.state)
+-		acpi_processor_power_activate(pr, next_state);
+-}
+-
+-static int acpi_processor_set_power_policy(struct acpi_processor *pr)
+-{
+-	unsigned int i;
+-	unsigned int state_is_set = 0;
+-	struct acpi_processor_cx *lower = NULL;
+-	struct acpi_processor_cx *higher = NULL;
+-	struct acpi_processor_cx *cx;
+-
+-
+-	if (!pr)
+-		return -EINVAL;
+-
+-	/*
+-	 * This function sets the default Cx state policy (OS idle handler).
+-	 * Our scheme is to promote quickly to C2 but more conservatively
+-	 * to C3.  We're favoring C2  for its characteristics of low latency
+-	 * (quick response), good power savings, and ability to allow bus
+-	 * mastering activity.  Note that the Cx state policy is completely
+-	 * customizable and can be altered dynamically.
+-	 */
+-
+-	/* startup state */
+-	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+-		cx = &pr->power.states[i];
+-		if (!cx->valid)
+-			continue;
+-
+-		if (!state_is_set)
+-			pr->power.state = cx;
+-		state_is_set++;
+-		break;
+-	}
+-
+-	if (!state_is_set)
+-		return -ENODEV;
+-
+-	/* demotion */
+-	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+-		cx = &pr->power.states[i];
+-		if (!cx->valid)
+-			continue;
+-
+-		if (lower) {
+-			cx->demotion.state = lower;
+-			cx->demotion.threshold.ticks = cx->latency_ticks;
+-			cx->demotion.threshold.count = 1;
+-			if (cx->type == ACPI_STATE_C3)
+-				cx->demotion.threshold.bm = bm_history;
+-		}
+-
+-		lower = cx;
+-	}
+-
+-	/* promotion */
+-	for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
+-		cx = &pr->power.states[i];
+-		if (!cx->valid)
+-			continue;
+-
+-		if (higher) {
+-			cx->promotion.state = higher;
+-			cx->promotion.threshold.ticks = cx->latency_ticks;
+-			if (cx->type >= ACPI_STATE_C2)
+-				cx->promotion.threshold.count = 4;
+-			else
+-				cx->promotion.threshold.count = 10;
+-			if (higher->type == ACPI_STATE_C3)
+-				cx->promotion.threshold.bm = bm_history;
+-		}
+-
+-		higher = cx;
+-	}
+-
+-	return 0;
+-}
+-
+ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
+ {
+ 
 Index: linux-2.6.23-rc3/drivers/acpi/tables/tbxface.c
 ===================================================================
 --- linux-2.6.23-rc3.orig/drivers/acpi/tables/tbxface.c	2007-08-13 16:49:30.000000000 +0200
@@ -3230,15 +3232,16 @@
  };
  
  struct acpi_processor {
-@@ -320,6 +321,8 @@ int acpi_processor_power_init(struct acp
- int acpi_processor_cst_has_changed(struct acpi_processor *pr);
- int acpi_processor_power_exit(struct acpi_processor *pr,
- 			      struct acpi_device *device);
+@@ -324,6 +324,9 @@ int acpi_processor_power_exit(struct acp
+ int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
+ int acpi_processor_resume(struct acpi_device * device);
+ 
 +extern struct cpuidle_driver acpi_idle_driver;
 +void acpi_max_cstate_changed(void);
- 
++
  /* in processor_thermal.c */
  int acpi_processor_get_limit_info(struct acpi_processor *pr);
+ extern struct file_operations acpi_processor_limit_fops;
 Index: linux-2.6.23-rc3/include/linux/acpi.h
 ===================================================================
 --- linux-2.6.23-rc3.orig/include/linux/acpi.h	2007-08-13 16:49:30.000000000 +0200


Index: sources
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/sources,v
retrieving revision 1.653
retrieving revision 1.654
diff -u -r1.653 -r1.654
--- sources	22 Sep 2007 11:45:40 -0000	1.653
+++ sources	24 Sep 2007 02:26:20 -0000	1.654
@@ -1,3 +1,3 @@
 2e230d005c002fb3d38a3ca07c0200d0  linux-2.6.22.tar.bz2
 828dd9bfc7271e2c9da6acaf31612983  patch-2.6.23-rc7.bz2
-1bf67697b6630e24ad5cc844290d8d64  patch-2.6.23-rc7-git3.bz2
+37ff766bfc5834bc33b84b5164debe79  patch-2.6.23-rc7-git4.bz2


Index: upstream
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/upstream,v
retrieving revision 1.575
retrieving revision 1.576
diff -u -r1.575 -r1.576
--- upstream	22 Sep 2007 11:45:40 -0000	1.575
+++ upstream	24 Sep 2007 02:26:20 -0000	1.576
@@ -1,3 +1,3 @@
 linux-2.6.22.tar.bz2
 patch-2.6.23-rc7.bz2
-patch-2.6.23-rc7-git3.bz2
+patch-2.6.23-rc7-git4.bz2


--- linux-2.6-xfs-fix-filestreams-free-func-cast.patch DELETED ---


--- patch-2.6.23-rc7-git3.bz2.sign DELETED ---




More information about the fedora-extras-commits mailing list