rpms/kernel/devel kernel.spec, 1.1150, 1.1151 linux-2.6-execshield.patch, 1.97, 1.98 linux-2.6-xen-execshield-add-xen-specific-load_user_cs_desc.patch, 1.1, NONE linux-2.6-xen-execshield-only-define-load_user_cs_desc-on-32-bit.patch, 1.1, NONE

Kyle McMartin kyle at fedoraproject.org
Mon Dec 8 04:50:30 UTC 2008


Author: kyle

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs1.fedora.phx.redhat.com:/tmp/cvs-serv6665

Modified Files:
	kernel.spec linux-2.6-execshield.patch 
Removed Files:
	linux-2.6-xen-execshield-add-xen-specific-load_user_cs_desc.patch 
	linux-2.6-xen-execshield-only-define-load_user_cs_desc-on-32-bit.patch 
Log Message:
* Mon Dec 08 2008 Kyle McMartin <kyle at redhat.com>
- execshield re-merge. xen bits shoved into execshield patch
  (they belong there...)



Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.1150
retrieving revision 1.1151
diff -u -r1.1150 -r1.1151
--- kernel.spec	8 Dec 2008 03:24:58 -0000	1.1150
+++ kernel.spec	8 Dec 2008 04:49:57 -0000	1.1151
@@ -601,8 +601,6 @@
 Patch149: linux-2.6-efika-not-chrp.patch
 
 Patch160: linux-2.6-execshield.patch
-Patch161: linux-2.6-xen-execshield-add-xen-specific-load_user_cs_desc.patch
-Patch163: linux-2.6-xen-execshield-only-define-load_user_cs_desc-on-32-bit.patch
 Patch250: linux-2.6-debug-sizeof-structs.patch
 Patch260: linux-2.6-debug-nmi-timeout.patch
 Patch270: linux-2.6-debug-taint-vm.patch
@@ -1138,9 +1136,7 @@
 #
 # Exec shield
 #
-#ApplyPatch linux-2.6-execshield.patch
-#ApplyPatch linux-2.6-xen-execshield-add-xen-specific-load_user_cs_desc.patch
-#ApplyPatch linux-2.6-xen-execshield-only-define-load_user_cs_desc-on-32-bit.patch
+ApplyPatch linux-2.6-execshield.patch
 
 #
 # bugfixes to drivers and filesystems
@@ -1914,6 +1910,10 @@
 %kernel_variant_files -k vmlinux %{with_kdump} kdump
 
 %changelog
+* Mon Dec 08 2008 Kyle McMartin <kyle at redhat.com>
+- execshield re-merge. xen bits shoved into execshield patch
+  (they belong there...)
+
 * Mon Dec 08 2008 Dave Airlie <airlied at redhat.com>
 - modesetting: rebase radeon patch
 

linux-2.6-execshield.patch:

Index: linux-2.6-execshield.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-execshield.patch,v
retrieving revision 1.97
retrieving revision 1.98
diff -u -r1.97 -r1.98
--- linux-2.6-execshield.patch	10 Oct 2008 19:46:18 -0000	1.97
+++ linux-2.6-execshield.patch	8 Dec 2008 04:49:58 -0000	1.98
@@ -1,6 +1,120 @@
---- linux-2.6.25.noarch/arch/x86/kernel/cpu/common.c~	2008-05-16 13:41:08.000000000 -0400
-+++ linux-2.6.25.noarch/arch/x86/kernel/cpu/common.c	2008-05-16 13:42:11.000000000 -0400
-@@ -479,6 +479,21 @@ void __cpuinit identify_cpu(struct cpuin
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index e6b82b1..c314884 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -6,6 +6,7 @@
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
+ #include <linux/smp.h>
++#include <linux/mm_types.h>
+ 
+ static inline void fill_ldt(struct desc_struct *desc,
+ 			    const struct user_desc *info)
+@@ -95,6 +96,9 @@ static inline int desc_empty(const void *ptr)
+ 
+ #define load_TLS(t, cpu) native_load_tls(t, cpu)
+ #define set_ldt native_set_ldt
++#ifdef CONFIG_X86_32
++#define load_user_cs_desc native_load_user_cs_desc
++#endif /*CONFIG_X86_32*/
+ 
+ #define write_ldt_entry(dt, entry, desc)	\
+ 	native_write_ldt_entry(dt, entry, desc)
+@@ -381,6 +385,24 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
+ 	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+ 
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
++{
++	limit = (limit - 1) / PAGE_SIZE;
++	desc->a = limit & 0xffff;
++	desc->b = (limit & 0xf0000) | 0x00c0fb00;
++}
++
++static inline void native_load_user_cs_desc(int cpu, struct mm_struct *mm)
++{
++	get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs;
++}
++
++extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
++extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
++extern void arch_flush_exec_range(struct mm_struct *mm);
++#endif /* CONFIG_X86_32 */
++
+ #else
+ /*
+  * GET_DESC_BASE reads the descriptor base of the specified segment.
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 80a1dee..8314c66 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -7,12 +7,19 @@
+ /*
+  * The x86 doesn't have a mmu context, but
+  * we put the segment information here.
++ *
++ * exec_limit is used to track the range PROT_EXEC
++ * mappings span.
+  */
+ typedef struct {
+ 	void *ldt;
+ 	int size;
+ 	struct mutex lock;
+ 	void *vdso;
++#ifdef CONFIG_X86_32
++	struct desc_struct user_cs;
++	unsigned long exec_limit;
++#endif
+ } mm_context_t;
+ 
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index ba3e2ff..42a65f4 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -113,6 +113,9 @@ struct pv_cpu_ops {
+ 	void (*store_gdt)(struct desc_ptr *);
+ 	void (*store_idt)(struct desc_ptr *);
+ 	void (*set_ldt)(const void *desc, unsigned entries);
++#ifdef CONFIG_X86_32
++	void (*load_user_cs_desc)(int cpu, struct mm_struct *mm);
++#endif /*CONFIG_X86_32*/
+ 	unsigned long (*store_tr)(void);
+ 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+ #ifdef CONFIG_X86_64
+@@ -860,6 +863,12 @@ static inline void set_ldt(const void *addr, unsigned entries)
+ {
+ 	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
+ }
++#ifdef CONFIG_X86_32
++static inline void load_user_cs_desc(unsigned int cpu, struct mm_struct *mm)
++{
++	PVOP_VCALL2(pv_cpu_ops.load_user_cs_desc, cpu, mm);
++}
++#endif /*CONFIG_X86_32*/
+ static inline void store_gdt(struct desc_ptr *dtr)
+ {
+ 	PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 5ca01e3..4f319b1 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -154,6 +154,9 @@ static inline int hlt_works(int cpu)
+ 
+ #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
+ 
++#define __HAVE_ARCH_ALIGN_STACK
++extern unsigned long arch_align_stack(unsigned long sp);
++
+ extern void cpu_detect(struct cpuinfo_x86 *c);
+ 
+ extern struct pt_regs *idle_regs(struct pt_regs *);
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index b9c9ea0..666dd0e 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -687,6 +687,21 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  	 * we do "generic changes."
  	 */
  
@@ -13,20 +127,45 @@
 +	 */
 +	if (exec_shield != 0) {
 +#ifdef CONFIG_X86_PAE
-+		if (!test_bit(X86_FEATURE_NX, c->x86_capability))
++		if (!test_cpu_cap(c, X86_FEATURE_NX))
 +#endif
-+			clear_bit(X86_FEATURE_SEP, c->x86_capability);
++			clear_cpu_cap(c, X86_FEATURE_SEP);
 +	}
 +
 +
  	/* If the model name is still unset, do table lookup. */
  	if (!c->x86_model_id[0]) {
  		char *p;
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index e4c8fb6..30f7508 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -326,6 +326,9 @@ struct pv_cpu_ops pv_cpu_ops = {
+ 	.read_tscp = native_read_tscp,
+ 	.load_tr_desc = native_load_tr_desc,
+ 	.set_ldt = native_set_ldt,
++#ifdef CONFIG_X86_32
++	.load_user_cs_desc = native_load_user_cs_desc,
++#endif /*CONFIG_X86_32*/
+ 	.load_gdt = native_load_gdt,
+ 	.load_idt = native_load_idt,
+ 	.store_gdt = native_store_gdt,
 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index a7d50a5..83f7b4e 100644
+index 0a1302f..ab96a10 100644
 --- a/arch/x86/kernel/process_32.c
 +++ b/arch/x86/kernel/process_32.c
-@@ -677,7 +677,8 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
+@@ -354,6 +354,10 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ 	regs->cs		= __USER_CS;
+ 	regs->ip		= new_ip;
+ 	regs->sp		= new_sp;
++	preempt_disable();
++	load_user_cs_desc(smp_processor_id(), current->mm);
++	preempt_enable();
++
+ 	/*
+ 	 * Free the old FP and other extended state
+ 	 */
+@@ -558,7 +562,8 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
  	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
  
  	__unlazy_fpu(prev_p);
@@ -36,11 +175,7 @@
  
  	/* we're going to use this soon, after a few expensive things */
  	if (next_p->fpu_counter > 5)
-diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index a7d50a5..86e35cb 100644
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -847,3 +847,39 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+@@ -731,3 +736,39 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
  	unsigned long range_end = mm->brk + 0x02000000;
  	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
  }
@@ -80,8 +215,10 @@
 +	mm->context.exec_limit = 0;
 +	set_user_cs(&mm->context.user_cs, 0);
 +}
---- linux-2.6.25.noarch/arch/x86/kernel/tlb_32.c~	2008-05-16 13:30:44.000000000 -0400
-+++ linux-2.6.25.noarch/arch/x86/kernel/tlb_32.c	2008-05-16 13:31:14.000000000 -0400
+diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
+index f4049f3..e7a1b7c 100644
+--- a/arch/x86/kernel/tlb_32.c
++++ b/arch/x86/kernel/tlb_32.c
 @@ -2,6 +2,7 @@
  #include <linux/cpu.h>
  #include <linux/interrupt.h>
@@ -90,7 +227,7 @@
  #include <asm/tlbflush.h>
  
  DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
-@@ -92,6 +93,8 @@ void smp_invalidate_interrupt(struct pt_
+@@ -92,6 +93,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
  	unsigned long cpu;
  
  	cpu = get_cpu();
@@ -99,13 +236,14 @@
  
  	if (!cpu_isset(cpu, flush_cpumask))
  		goto out;
---- linux-2.6.26.noarch/arch/x86/kernel/traps_32.c~	2008-07-14 20:20:15.000000000 -0400
-+++ linux-2.6.26.noarch/arch/x86/kernel/traps_32.c	2008-07-14 20:26:18.000000000 -0400
-@@ -596,7 +596,91 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", inv
- DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
- DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
- DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
--DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1)
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 04d242a..bb52785 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -159,6 +159,63 @@ static int lazy_iobitmap_copy(void)
+ 
+ 	return 0;
+ }
 +
 +/*
 + * lazy-check for CS validity on exec-shield binaries:
@@ -157,94 +295,97 @@
 +				current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
 +		}
 +		load_user_cs_desc(cpu, current->mm);
++
 +		return 1;
 +	}
 +
 +	return 0;
 +}
-+
-+/*
-+ * The fixup code for errors in iret jumps to here (iret_exc).  It loses
-+ * the original trap number and error code.  The bogus trap 32 and error
-+ * code 0 are what the vanilla kernel delivers via:
-+ * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
-+ *
-+ * NOTE: Because of the final "1" in the macro we need to enable interrupts.
-+ *
-+ * In case of a general protection fault in the iret instruction, we
-+ * need to check for a lazy CS update for exec-shield.
-+ */
-+void do_iret_error(struct pt_regs *regs, long error_code)
-+{
-+	int ok;
-+	local_irq_enable();
-+	ok = check_lazy_exec_limit(get_cpu(), regs, error_code);
-+	put_cpu();
-+	if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
-+		error_code, 32, SIGSEGV) != NOTIFY_STOP) {
-+			siginfo_t info;
-+			info.si_signo = SIGSEGV;
-+			info.si_errno = 0;
-+			info.si_code = ILL_BADSTK;
-+			info.si_addr = 0;
-+			do_trap(32, SIGSEGV, "iret exception", 0, regs, error_code,
-+				&info);
-+	}
-+}
- 
- void __kprobes
- do_general_protection(struct pt_regs *regs, long error_code)
-@@ -605,6 +688,7 @@ do_general_protection(struct pt_regs *re
- 	struct thread_struct *thread;
- 	struct tss_struct *tss;
- 	int cpu;
-+	int ok;
+ #endif
  
- 	cpu = get_cpu();
- 	tss = &per_cpu(init_tss, cpu);
-@@ -645,6 +729,23 @@ do_general_protection(struct pt_regs *re
+ static void __kprobes
+@@ -320,6 +377,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
  	if (!user_mode(regs))
  		goto gp_in_kernel;
  
-+	ok = check_lazy_exec_limit(cpu, regs, error_code);
++#ifdef CONFIG_X86_32
++{
++	int cpu;
++	int ok;
 +
++	cpu = get_cpu();
++	ok = check_lazy_exec_limit(cpu, regs, error_code);
 +	put_cpu();
 +
 +	if (ok)
 +		return;
 +
 +	if (print_fatal_signals) {
-+		printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code,
-+			error_code/8, regs->ip, smp_processor_id());
++		printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n",
++			error_code, error_code/8, regs->ip, smp_processor_id());
 +		printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x.\n",
 +			current->mm->context.exec_limit,
 +			current->mm->context.user_cs.a,
 +			current->mm->context.user_cs.b);
 +	}
-+
++}
++#endif /*CONFIG_X86_32*/
 +
  	tsk->thread.error_code = error_code;
  	tsk->thread.trap_no = 13;
  
-@@ -662,11 +763,13 @@ do_general_protection(struct pt_regs *re
- 	return;
+@@ -931,19 +1011,37 @@ do_device_not_available(struct pt_regs *regs, long error)
+ }
  
- gp_in_vm86:
-+	put_cpu();
+ #ifdef CONFIG_X86_32
++/*
++ * The fixup code for errors in iret jumps to here (iret_exc). It loses
++ * the original trap number and erorr code. The bogus trap 32 and error
++ * code 0 are what the vanilla kernel delivers via:
++ * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
++ *
++ * NOTE: Because of the final "1" in the macro we need to enable interrupts.
++ *
++ * In case of a general protection fault in the iret instruction, we
++ * need to check for a lazy CS update for exec-shield.
++ */
+ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
+ {
+-	siginfo_t info;
++	int ok;
++	int cpu;
++
  	local_irq_enable();
- 	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
- 	return;
  
- gp_in_kernel:
+-	info.si_signo = SIGILL;
+-	info.si_errno = 0;
+-	info.si_code = ILL_BADSTK;
+-	info.si_addr = 0;
+-	if (notify_die(DIE_TRAP, "iret exception",
+-			regs, error_code, 32, SIGILL) == NOTIFY_STOP)
+-		return;
+-	do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
++	cpu = get_cpu();
++	ok = check_lazy_exec_limit(cpu, regs, error_code);
 +	put_cpu();
- 	if (fixup_exception(regs))
- 		return;
++
++	if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
++		error_code, 32, SIGSEGV) != NOTIFY_STOP) {
++			siginfo_t info;
++			info.si_signo = SIGSEGV;
++			info.si_errno = 0;
++			info.si_code = ILL_BADSTK;
++			info.si_addr = 0;
++			do_trap(32, SIGSEGV, "iret exception", 0, error_code, &info);
++	}
+ }
+ #endif
  
 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 8106bba..23932be 100644
+index c483f42..8c3bbd9 100644
 --- a/arch/x86/mm/init_32.c
 +++ b/arch/x86/mm/init_32.c
-@@ -473,7 +473,7 @@ static int disable_nx __initdata;
+@@ -570,7 +570,7 @@ static int disable_nx __initdata;
   * Control non executable mappings.
   *
   * on      Enable
@@ -253,7 +394,7 @@
   */
  static int __init noexec_setup(char *str)
  {
-@@ -482,14 +482,12 @@ static int __init noexec_setup(char *str)
+@@ -579,14 +579,12 @@ static int __init noexec_setup(char *str)
  			__supported_pte_mask |= _PAGE_NX;
  			disable_nx = 0;
  		}
@@ -274,7 +415,8 @@
  
  	return 0;
  }
-@@ -782,6 +782,10 @@ unsigned long __init_refok init_memory_m
+@@ -845,7 +843,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ 	set_nx();
  	if (nx_enabled)
  		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
 +	else
@@ -307,10 +449,10 @@
  	}
  }
 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
-index 348f134..0bd5f44 100644
+index 513f330..2fb420a 100644
 --- a/arch/x86/vdso/vdso32-setup.c
 +++ b/arch/x86/vdso/vdso32-setup.c
-@@ -336,7 +336,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
  	if (compat)
  		addr = VDSO_HIGH_BASE;
  	else {
@@ -319,11 +461,50 @@
  		if (IS_ERR_VALUE(addr)) {
  			ret = addr;
  			goto up_fail;
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 5e4686d..b41f314 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -315,6 +315,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
+ 	xen_mc_issue(PARAVIRT_LAZY_CPU);
+ }
+ 
++#ifdef CONFIG_X86_32
++static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm)
++{
++	void *gdt;
++	xmaddr_t mgdt;
++	u64 descriptor;
++	struct desc_struct user_cs;
++
++	gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
++	mgdt = virt_to_machine(gdt);
++
++	user_cs = mm->context.user_cs;
++	descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32;
++
++	HYPERVISOR_update_descriptor(mgdt.maddr, descriptor);
++}
++#endif /*CONFIG_X86_32*/
++
+ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+ 	unsigned long *frames;
+@@ -1231,6 +1249,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+ 
+ 	.load_tr_desc = paravirt_nop,
+ 	.set_ldt = xen_set_ldt,
++#ifdef CONFIG_X86_32
++	.load_user_cs_desc = xen_load_user_cs_desc,
++#endif /*CONFIG_X86_32*/
+ 	.load_gdt = xen_load_gdt,
+ 	.load_idt = xen_load_idt,
+ 	.load_tls = xen_load_tls,
 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 41a958a..22cbd06 100644
+index 8fcfa39..5e1e2d8 100644
 --- a/fs/binfmt_elf.c
 +++ b/fs/binfmt_elf.c
-@@ -81,7 +81,7 @@ static struct linux_binfmt elf_format = {
+@@ -80,7 +80,7 @@ static struct linux_binfmt elf_format = {
  		.hasvdso	= 1
  };
  
@@ -332,7 +513,7 @@
  
  static int set_brk(unsigned long start, unsigned long end)
  {
-@@ -709,6 +711,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -723,6 +723,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  			break;
  		}
  
@@ -344,7 +525,7 @@
  	/* Some simple consistency checks for the interpreter */
  	if (elf_interpreter) {
  		retval = -ELIBBAD;
-@@ -721,6 +721,15 @@ static int load_elf_binary(struct linux_
+@@ -742,6 +747,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  	if (retval)
  		goto out_free_dentry;
  
@@ -360,17 +541,17 @@
  	/* OK, This is the point of no return */
  	current->flags &= ~PF_FORKNOEXEC;
  	current->mm->def_flags = def_flags;
-@@ -741,7 +757,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -749,7 +763,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
  	   may depend on the personality.  */
- 	SET_PERSONALITY(loc->elf_ex, 0);
+ 	SET_PERSONALITY(loc->elf_ex);
 -	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
 +	if (!(exec_shield & 2) &&
 +			elf_read_implies_exec(loc->elf_ex, executable_stack))
  		current->personality |= READ_IMPLIES_EXEC;
  
  	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-@@ -906,7 +923,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -914,7 +929,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  					    interpreter,
  					    &interp_map_addr,
  					    load_bias);
@@ -379,9 +560,11 @@
  			/*
  			 * load_elf_interp() returns relocation
  			 * adjustment
---- linux-2.6.25.noarch/fs/proc/array.c~	2008-05-16 13:51:04.000000000 -0400
-+++ linux-2.6.25.noarch/fs/proc/array.c	2008-05-16 13:53:24.000000000 -0400
-@@ -473,8 +473,13 @@ static int do_task_stat(struct seq_file 
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 6af7fba..bb98552 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -412,8 +412,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  		unlock_task_sighand(task, &flags);
  	}
  
@@ -397,87 +580,11 @@
  	if (!whole) {
  		min_flt = task->min_flt;
  		maj_flt = task->maj_flt;
-diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
-index 5b6a05d..7ad80b9 100644
---- a/include/asm-x86/desc.h
-+++ b/include/asm-x86/desc.h
-@@ -353,6 +353,22 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
- 	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
- }
- 
-+#ifdef CONFIG_X86_32
-+static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
-+{
-+	limit = (limit - 1) / PAGE_SIZE;
-+	desc->a = limit & 0xffff;
-+	desc->b = (limit & 0xf0000) | 0x00c0fb00;
-+}
-+
-+#define load_user_cs_desc(cpu, mm) \
-+	get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs
-+
-+extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
-+extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
-+extern void arch_flush_exec_range(struct mm_struct *mm);
-+#endif /* CONFIG_X86_32 */
-+
- #else
- /*
-  * GET_DESC_BASE reads the descriptor base of the specified segment.
-diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
-index efa962c..db9b109 100644
---- a/include/asm-x86/mmu.h
-+++ b/include/asm-x86/mmu.h
-@@ -9,6 +9,8 @@
-  * we put the segment information here.
-  *
-  * cpu_vm_mask is used to optimize ldt flushing.
-+ * exec_limit is used to track the range PROT_EXEC
-+ * mappings span.
-  */
- typedef struct {
- 	void *ldt;
-@@ -18,6 +20,10 @@ typedef struct {
- 	int size;
- 	struct mutex lock;
- 	void *vdso;
-+#ifdef CONFIG_X86_32
-+	struct desc_struct user_cs;
-+	unsigned long exec_limit;
-+#endif
- } mm_context_t;
- 
- #ifdef CONFIG_SMP
---- linux-2.6.25.noarch/include/asm-x86/processor.h~	2008-05-16 15:56:21.000000000 -0400
-+++ linux-2.6.25.noarch/include/asm-x86/processor.h	2008-05-16 16:02:44.000000000 -0400
-@@ -151,6 +151,9 @@ static inline int hlt_works(int cpu)
- 
- #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
- 
-+#define __HAVE_ARCH_ALIGN_STACK
-+extern unsigned long arch_align_stack(unsigned long sp);
-+
- extern void cpu_detect(struct cpuinfo_x86 *c);
- 
- extern void identify_cpu(struct cpuinfo_x86 *);
---- linux-2.6.25.noarch/arch/x86/kernel/process_32.c~	2008-05-16 16:02:50.000000000 -0400
-+++ linux-2.6.25.noarch/arch/x86/kernel/process_32.c	2008-05-16 16:03:21.000000000 -0400
-@@ -411,6 +411,10 @@ start_thread(struct pt_regs *regs, unsig
- 	regs->cs		= __USER_CS;
- 	regs->ip		= new_ip;
- 	regs->sp		= new_sp;
-+	preempt_disable();
-+	load_user_cs_desc(smp_processor_id(), current->mm);
-+	preempt_enable();
-+
- 	/*
- 	 * Free the old FP and other extended state
- 	 */
 diff --git a/include/linux/mm.h b/include/linux/mm.h
-index e8abb38..1483fc7 100644
+index ffee2f7..c14817b 100644
 --- a/include/linux/mm.h
 +++ b/include/linux/mm.h
-@@ -1041,7 +1041,13 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1105,7 +1105,13 @@ extern int install_special_mapping(struct mm_struct *mm,
  				   unsigned long addr, unsigned long len,
  				   unsigned long flags, struct page **pages);
  
@@ -493,10 +600,10 @@
  extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index bfee0bd..30ec32f 100644
+index fe82547..8007dbf 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
-@@ -166,6 +166,9 @@ struct mm_struct {
+@@ -177,6 +177,9 @@ struct mm_struct {
  	unsigned long (*get_unmapped_area) (struct file *filp,
  				unsigned long addr, unsigned long len,
  				unsigned long pgoff, unsigned long flags);
@@ -507,10 +614,10 @@
  	unsigned long mmap_base;		/* base of mmap area */
  	unsigned long task_size;		/* size of task vm space */
 diff --git a/include/linux/resource.h b/include/linux/resource.h
-index ae13db7..14757af 100644
+index 40fc7e6..68c2549 100644
 --- a/include/linux/resource.h
 +++ b/include/linux/resource.h
-@@ -54,8 +54,11 @@ struct rlimit {
+@@ -55,8 +55,11 @@ struct rlimit {
  /*
   * Limit the stack by to some sane default: root can always
   * increase this limit if needed..  8MB seems reasonable.
@@ -522,12 +629,12 @@
 +#define EXEC_STACK_BIAS	(2*1024*1024)
  
  /*
-  * GPG wants 32kB of mlocked memory, to make sure pass phrases
+  * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 00e1441..c1c8a7d 100644
+index 55e30d1..d400ab0 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -98,6 +98,9 @@ struct futex_pi_state;
+@@ -97,6 +97,9 @@ struct futex_pi_state;
  struct robust_list_head;
  struct bio;
  
@@ -537,7 +644,7 @@
  /*
   * List of flags we want to share for kernel threads,
   * if only because they are not used by them anyway.
-@@ -342,6 +345,10 @@ extern int sysctl_max_map_count;
+@@ -345,6 +348,10 @@ extern int sysctl_max_map_count;
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  		       unsigned long, unsigned long);
@@ -549,11 +656,11 @@
  arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
  			  unsigned long len, unsigned long pgoff,
 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index d41ef6b..5304704 100644
+index 3d56fe7..b512845 100644
 --- a/kernel/sysctl.c
 +++ b/kernel/sysctl.c
-@@ -83,6 +83,26 @@ extern int maps_protect;
- extern int sysctl_stat_interval;
+@@ -82,6 +82,26 @@ extern int percpu_pagelist_fraction;
+ extern int compat_log;
  extern int latencytop_enabled;
  extern int sysctl_nr_open_min, sysctl_nr_open_max;
 +
@@ -579,7 +686,7 @@
  #ifdef CONFIG_RCU_TORTURE_TEST
  extern int rcutorture_runnable;
  #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
-@@ -383,6 +403,14 @@ static struct ctl_table kern_table[] = {
+@@ -373,6 +393,14 @@ static struct ctl_table kern_table[] = {
  		.proc_handler	= &proc_dointvec,
  	},
  	{
@@ -595,7 +702,7 @@
  		.procname	= "core_uses_pid",
  		.data		= &core_uses_pid,
 diff --git a/mm/mmap.c b/mm/mmap.c
-index a32d28c..7634038 100644
+index d4855a6..0850042 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -27,6 +27,7 @@
@@ -606,7 +713,7 @@
  
  #include <asm/uaccess.h>
  #include <asm/cacheflush.h>
-@@ -41,6 +41,18 @@
+@@ -43,6 +44,18 @@
  #define arch_rebalance_pgtables(addr, len)		(addr)
  #endif
  
@@ -625,7 +732,7 @@
  static void unmap_region(struct mm_struct *mm,
  		struct vm_area_struct *vma, struct vm_area_struct *prev,
  		unsigned long start, unsigned long end);
-@@ -377,6 +378,8 @@ static inline void
+@@ -391,6 +404,8 @@ static inline void
  __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
@@ -634,7 +741,7 @@
  	if (prev) {
  		vma->vm_next = prev->vm_next;
  		prev->vm_next = vma;
-@@ -480,6 +483,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -494,6 +509,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
  	rb_erase(&vma->vm_rb, &mm->mm_rb);
  	if (mm->mmap_cache == vma)
  		mm->mmap_cache = prev;
@@ -643,7 +750,7 @@
  }
  
  /*
-@@ -785,6 +790,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -800,6 +817,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  		} else					/* cases 2, 5, 7 */
  			vma_adjust(prev, prev->vm_start,
  				end, prev->vm_pgoff, NULL);
@@ -652,7 +759,7 @@
  		return prev;
  	}
  
-@@ -940,7 +947,8 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
+@@ -955,7 +974,8 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
  	/* Obtain the address to map to. we verify (or select) it and ensure
  	 * that it represents a valid section of the address space.
  	 */
@@ -662,7 +769,7 @@
  	if (addr & ~PAGE_MASK)
  		return addr;
  
-@@ -1410,13 +1418,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1440,13 +1460,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  }
  
  unsigned long
@@ -683,7 +790,7 @@
  	if (file && file->f_op && file->f_op->get_unmapped_area)
  		get_area = file->f_op->get_unmapped_area;
  	addr = get_area(file, addr, len, pgoff, flags);
-@@ -1430,8 +1442,74 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+@@ -1460,8 +1484,74 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
  
  	return arch_rebalance_pgtables(addr, len);
  }
@@ -759,7 +866,7 @@
  
  /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
  struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
-@@ -1506,6 +1585,14 @@ out:
+@@ -1536,6 +1626,14 @@ out:
  	return prev ? prev->vm_next : vma;
  }
  
@@ -774,7 +881,7 @@
  /*
   * Verify that the stack growth is acceptable and
   * update accounting. This is shared with both the
-@@ -1522,7 +1609,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
+@@ -1552,7 +1650,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
  		return -ENOMEM;
  
  	/* Stack limit test */
@@ -783,7 +890,7 @@
  		return -ENOMEM;
  
  	/* mlock limit tests */
-@@ -1826,10 +1913,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1862,10 +1960,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	if (new->vm_ops && new->vm_ops->open)
  		new->vm_ops->open(new);
  
@@ -800,7 +907,7 @@
  		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
  	return 0;
-@@ -2173,6 +2173,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2109,6 +2211,7 @@ void exit_mmap(struct mm_struct *mm)
  	vm_unacct_memory(nr_accounted);
  	free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
  	tlb_finish_mmu(tlb, 0, end);
@@ -809,11 +916,11 @@
  	/*
  	 * Walk the list again, actually closing and freeing it,
 diff --git a/mm/mprotect.c b/mm/mprotect.c
-index 4de5468..6d822ad 100644
+index fded06f..cdfcbd2 100644
 --- a/mm/mprotect.c
 +++ b/mm/mprotect.c
-@@ -23,8 +23,15 @@
- #include <linux/swapops.h>
+@@ -24,8 +24,15 @@
+ #include <linux/mmu_notifier.h>
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
 +#include <asm/pgalloc.h>
@@ -828,7 +935,7 @@
  
  #ifndef pgprot_modify
  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-@@ -134,7 +138,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+@@ -140,7 +147,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long oldflags = vma->vm_flags;
  	long nrpages = (end - start) >> PAGE_SHIFT;
@@ -837,7 +944,7 @@
  	pgoff_t pgoff;
  	int error;
  	int dirty_accountable = 0;
-@@ -211,6 +211,9 @@ success:
+@@ -204,6 +211,9 @@ success:
  		dirty_accountable = 1;
  	}
  
@@ -848,10 +955,10 @@
  	if (is_vm_hugetlb_page(vma))
  		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
 diff --git a/mm/mremap.c b/mm/mremap.c
-index 08e3c7f..101f885 100644
+index 58a2908..5bb50e6 100644
 --- a/mm/mremap.c
 +++ b/mm/mremap.c
-@@ -392,8 +392,8 @@ unsigned long do_mremap(unsigned long addr,
+@@ -400,8 +400,8 @@ unsigned long do_mremap(unsigned long addr,
  			if (vma->vm_flags & VM_MAYSHARE)
  				map_flags |= MAP_SHARED;
  


--- linux-2.6-xen-execshield-add-xen-specific-load_user_cs_desc.patch DELETED ---


--- linux-2.6-xen-execshield-only-define-load_user_cs_desc-on-32-bit.patch DELETED ---




More information about the fedora-extras-commits mailing list