rpms/kernel/devel kernel.spec, 1.1501, 1.1502 linux-2.6-execshield.patch, 1.104, 1.105

Kyle McMartin kyle at fedoraproject.org
Mon Apr 6 17:36:05 UTC 2009


Author: kyle

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs1.fedora.phx.redhat.com:/tmp/cvs-serv21925

Modified Files:
	kernel.spec linux-2.6-execshield.patch 
Log Message:
* Mon Apr 06 2009 Kyle McMartin <kyle at redhat.com>
- linux-2.6-execshield.patch: rebase for 2.6.30



Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.1501
retrieving revision 1.1502
diff -u -r1.1501 -r1.1502
--- kernel.spec	6 Apr 2009 15:44:28 -0000	1.1501
+++ kernel.spec	6 Apr 2009 17:35:34 -0000	1.1502
@@ -1101,7 +1101,7 @@
 #
 # Exec shield
 #
-#ApplyPatch linux-2.6-execshield.patch
+ApplyPatch linux-2.6-execshield.patch
 
 #
 # bugfixes to drivers and filesystems
@@ -1826,6 +1826,9 @@
 #	                ||     ||
 %changelog
 * Mon Apr 06 2009 Kyle McMartin <kyle at redhat.com>
+- linux-2.6-execshield.patch: rebase for 2.6.30
+
+* Mon Apr 06 2009 Kyle McMartin <kyle at redhat.com>
 - Linux 2.6.29-git13
 - drop patches merged upstream:
   - fix-ppc-debug_kmap_atomic.patch

linux-2.6-execshield.patch:

Index: linux-2.6-execshield.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-execshield.patch,v
retrieving revision 1.104
retrieving revision 1.105
diff -u -r1.104 -r1.105
--- linux-2.6-execshield.patch	11 Mar 2009 16:28:50 -0000	1.104
+++ linux-2.6-execshield.patch	6 Apr 2009 17:35:34 -0000	1.105
@@ -1,5 +1,5 @@
 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index dc27705..34ed3a2 100644
+index 5623c50..353a24e 100644
 --- a/arch/x86/include/asm/desc.h
 +++ b/arch/x86/include/asm/desc.h
 @@ -6,6 +6,7 @@
@@ -10,7 +10,7 @@
  
  static inline void fill_ldt(struct desc_struct *desc,
  			    const struct user_desc *info)
-@@ -95,6 +96,9 @@ static inline int desc_empty(const void *ptr)
+@@ -94,6 +95,9 @@ static inline int desc_empty(const void *ptr)
  
  #define load_TLS(t, cpu) native_load_tls(t, cpu)
  #define set_ldt native_set_ldt
@@ -20,7 +20,7 @@
  
  #define write_ldt_entry(dt, entry, desc)	\
  	native_write_ldt_entry(dt, entry, desc)
-@@ -379,6 +383,27 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
+@@ -380,6 +384,27 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
  	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
  }
  
@@ -73,10 +73,10 @@
  
  #ifdef CONFIG_SMP
 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index e299287..aaa8a35 100644
+index 7727aa8..5d6083c 100644
 --- a/arch/x86/include/asm/paravirt.h
 +++ b/arch/x86/include/asm/paravirt.h
-@@ -113,6 +113,9 @@ struct pv_cpu_ops {
+@@ -138,6 +138,9 @@ struct pv_cpu_ops {
  	void (*store_gdt)(struct desc_ptr *);
  	void (*store_idt)(struct desc_ptr *);
  	void (*set_ldt)(const void *desc, unsigned entries);
@@ -86,7 +86,7 @@
  	unsigned long (*store_tr)(void);
  	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
  #ifdef CONFIG_X86_64
-@@ -860,6 +863,12 @@ static inline void set_ldt(const void *addr, unsigned entries)
+@@ -953,6 +956,12 @@ static inline void set_ldt(const void *addr, unsigned entries)
  {
  	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  }
@@ -100,10 +100,10 @@
  {
  	PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 3bfd523..99c8119 100644
+index 34c5237..4fc080f 100644
 --- a/arch/x86/include/asm/processor.h
 +++ b/arch/x86/include/asm/processor.h
-@@ -158,6 +158,9 @@ static inline int hlt_works(int cpu)
+@@ -159,6 +159,9 @@ static inline int hlt_works(int cpu)
  
  #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
  
@@ -114,12 +114,12 @@
  
  extern struct pt_regs *idle_regs(struct pt_regs *);
 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 83492b1..a84c787 100644
+index c4f6678..0a680c0 100644
 --- a/arch/x86/kernel/cpu/common.c
 +++ b/arch/x86/kernel/cpu/common.c
-@@ -708,6 +708,21 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 	 * we do "generic changes."
- 	 */
+@@ -789,6 +789,20 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 	/* Filter out anything that depends on CPUID levels we don't have */
+ 	filter_cpuid_features(c, true);
  
 +	/*
 +	 *  emulation of NX with segment limits unfortunately means
@@ -135,15 +135,14 @@
 +			clear_cpu_cap(c, X86_FEATURE_SEP);
 +	}
 +
-+
  	/* If the model name is still unset, do table lookup. */
  	if (!c->x86_model_id[0]) {
- 		char *p;
+ 		const char *p;
 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index c6520a4..2066aa1 100644
+index 8e45f44..13c0535 100644
 --- a/arch/x86/kernel/paravirt.c
 +++ b/arch/x86/kernel/paravirt.c
-@@ -352,6 +352,9 @@ struct pv_cpu_ops pv_cpu_ops = {
+@@ -369,6 +369,9 @@ struct pv_cpu_ops pv_cpu_ops = {
  	.read_tscp = native_read_tscp,
  	.load_tr_desc = native_load_tr_desc,
  	.set_ldt = native_set_ldt,
@@ -154,19 +153,21 @@
  	.load_idt = native_load_idt,
  	.store_gdt = native_store_gdt,
 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index bd4da2a..60823d4 100644
+index 76f8f84..4118f17 100644
 --- a/arch/x86/kernel/process_32.c
 +++ b/arch/x86/kernel/process_32.c
-@@ -343,6 +343,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+@@ -301,7 +301,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
  void
  start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
  {
 +	int cpu;
 +
- 	__asm__("movl %0, %%gs" : : "r"(0));
+ 	set_user_gs(regs, 0);
++
  	regs->fs		= 0;
  	set_fs(USER_DS);
-@@ -352,6 +354,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ 	regs->ds		= __USER_DS;
+@@ -310,6 +313,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
  	regs->cs		= __USER_CS;
  	regs->ip		= new_ip;
  	regs->sp		= new_sp;
@@ -178,7 +179,7 @@
  	/*
  	 * Free the old FP and other extended state
  	 */
-@@ -519,7 +526,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -356,7 +364,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
  
  	__unlazy_fpu(prev_p);
@@ -188,7 +189,7 @@
  
  	/* we're going to use this soon, after a few expensive things */
  	if (next_p->fpu_counter > 5)
-@@ -692,3 +700,41 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+@@ -509,3 +518,41 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
  	unsigned long range_end = mm->brk + 0x02000000;
  	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
  }
@@ -230,34 +231,13 @@
 +	mm->context.exec_limit = 0;
 +	set_user_cs(&mm->context.user_cs, 0);
 +}
-diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
-index ce50546..bd6593a 100644
---- a/arch/x86/kernel/tlb_32.c
-+++ b/arch/x86/kernel/tlb_32.c
-@@ -2,6 +2,7 @@
- #include <linux/cpu.h>
- #include <linux/interrupt.h>
- 
-+#include <asm/desc.h>
- #include <asm/tlbflush.h>
- 
- DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
-@@ -91,6 +92,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
- 	unsigned long cpu;
- 
- 	cpu = get_cpu();
-+	if (current->active_mm)
-+		load_user_cs_desc(cpu, current->active_mm);
- 
- 	if (!cpu_isset(cpu, flush_cpumask))
- 		goto out;
 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index a9e7548..af0f8f0 100644
+index a1d2883..453b616 100644
 --- a/arch/x86/kernel/traps.c
 +++ b/arch/x86/kernel/traps.c
-@@ -160,6 +160,76 @@ static int lazy_iobitmap_copy(void)
- 
- 	return 0;
+@@ -118,6 +118,76 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
+ 	if (!user_mode_vm(regs))
+ 		die(str, regs, err);
  }
 +
 +static inline int
@@ -332,7 +312,7 @@
  #endif
  
  static void __kprobes
-@@ -323,6 +393,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -276,6 +346,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
  	if (!user_mode(regs))
  		goto gp_in_kernel;
  
@@ -362,7 +342,7 @@
  	tsk->thread.error_code = error_code;
  	tsk->thread.trap_no = 13;
  
-@@ -934,19 +1027,37 @@ dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
+@@ -888,19 +981,37 @@ do_device_not_available(struct pt_regs *regs, long error_code)
  }
  
  #ifdef CONFIG_X86_32
@@ -388,7 +368,7 @@
 -	info.si_signo = SIGILL;
 -	info.si_errno = 0;
 -	info.si_code = ILL_BADSTK;
--	info.si_addr = 0;
+-	info.si_addr = NULL;
 -	if (notify_die(DIE_TRAP, "iret exception",
 -			regs, error_code, 32, SIGILL) == NOTIFY_STOP)
 -		return;
@@ -409,11 +389,27 @@
  }
  #endif
  
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index fd3da1d..ac54294 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -163,7 +163,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ 	set_nx();
+ 	if (nx_enabled)
+ 		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
++	else
+ #endif
++	if (exec_shield)
++		printk(KERN_INFO "Using x86 segment limits to approximate "
++			"NX protection\n");
+ 
+ 	/* Enable PSE if available */
+ 	if (cpu_has_pse)
 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 2cef050..a18ae07 100644
+index 749559e..ad9943c 100644
 --- a/arch/x86/mm/init_32.c
 +++ b/arch/x86/mm/init_32.c
-@@ -617,7 +617,7 @@ static int disable_nx __initdata;
+@@ -602,7 +602,7 @@ static int disable_nx __initdata;
   * Control non executable mappings.
   *
   * on      Enable
@@ -422,7 +418,7 @@
   */
  static int __init noexec_setup(char *str)
  {
-@@ -626,14 +626,12 @@ static int __init noexec_setup(char *str)
+@@ -611,14 +611,12 @@ static int __init noexec_setup(char *str)
  			__supported_pte_mask |= _PAGE_NX;
  			disable_nx = 0;
  		}
@@ -443,20 +439,8 @@
  
  	return 0;
  }
-@@ -892,7 +890,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
- 	set_nx();
- 	if (nx_enabled)
- 		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
-+	else
- #endif
-+	if (exec_shield)
-+		printk(KERN_INFO "Using x86 segment limits to approximate "
-+			"NX protection\n");
- 
- 	/* Enable PSE if available */
- 	if (cpu_has_pse)
 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 56fe712..30d2be7 100644
+index 1658296..72056cf 100644
 --- a/arch/x86/mm/mmap.c
 +++ b/arch/x86/mm/mmap.c
 @@ -111,13 +111,16 @@ static unsigned long mmap_legacy_base(void)
@@ -477,6 +461,28 @@
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
  }
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 821e970..cc106da 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -6,6 +6,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ 
++#include <asm/desc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+ #include <asm/apic.h>
+@@ -129,6 +130,9 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
+ 	union smp_flush_state *f;
+ 
+ 	cpu = smp_processor_id();
++	if (current->active_mm)
++		load_user_cs_desc(cpu, current->active_mm);
++
+ 	/*
+ 	 * orig_rax contains the negated interrupt vector.
+ 	 * Use that to determine where the sender put the data.
 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
 index 1241f11..3f2c44c 100644
 --- a/arch/x86/vdso/vdso32-setup.c
@@ -491,10 +497,10 @@
  			ret = addr;
  			goto up_fail;
 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index b58e963..cdc83ce 100644
+index 82cd39a..702e3a0 100644
 --- a/arch/x86/xen/enlighten.c
 +++ b/arch/x86/xen/enlighten.c
-@@ -316,6 +316,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
+@@ -282,6 +282,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
  	xen_mc_issue(PARAVIRT_LAZY_CPU);
  }
  
@@ -519,7 +525,7 @@
  static void xen_load_gdt(const struct desc_ptr *dtr)
  {
  	unsigned long *frames;
-@@ -1232,6 +1250,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+@@ -792,6 +810,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
  
  	.load_tr_desc = paravirt_nop,
  	.set_ldt = xen_set_ldt,
@@ -530,10 +536,10 @@
  	.load_idt = xen_load_idt,
  	.load_tls = xen_load_tls,
 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 33b7235..ce1f044 100644
+index 40381df..f856fab 100644
 --- a/fs/binfmt_elf.c
 +++ b/fs/binfmt_elf.c
-@@ -80,7 +80,7 @@ static struct linux_binfmt elf_format = {
+@@ -73,7 +73,7 @@ static struct linux_binfmt elf_format = {
  		.hasvdso	= 1
  };
  
@@ -542,7 +548,7 @@
  
  static int set_brk(unsigned long start, unsigned long end)
  {
-@@ -735,6 +735,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -721,6 +721,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  			break;
  		}
  
@@ -554,7 +560,7 @@
  	/* Some simple consistency checks for the interpreter */
  	if (elf_interpreter) {
  		retval = -ELIBBAD;
-@@ -754,6 +759,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -740,6 +745,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  	if (retval)
  		goto out_free_dentry;
  
@@ -570,7 +576,7 @@
  	/* OK, This is the point of no return */
  	current->flags &= ~PF_FORKNOEXEC;
  	current->mm->def_flags = def_flags;
-@@ -761,7 +775,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -747,7 +761,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
  	   may depend on the personality.  */
  	SET_PERSONALITY(loc->elf_ex);
@@ -580,7 +586,7 @@
  		current->personality |= READ_IMPLIES_EXEC;
  
  	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-@@ -926,7 +941,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -912,7 +927,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  					    interpreter,
  					    &interp_map_addr,
  					    load_bias);
@@ -589,34 +595,11 @@
  			/*
  			 * load_elf_interp() returns relocation
  			 * adjustment
-diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 7e4877d..0c368c1 100644
---- a/fs/proc/array.c
-+++ b/fs/proc/array.c
-@@ -424,8 +424,16 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- 		unlock_task_sighand(task, &flags);
- 	}
- 
--	if (!whole || num_threads < 2)
--		wchan = get_wchan(task);
-+	if (!whole || num_threads < 2) {
-+		const struct cred *cred;
-+
-+		wchan = 0;
-+		cred = __task_cred(task);
-+		if (current_uid() == cred->uid || current_euid() == cred->uid ||
-+			capable(CAP_SYS_NICE))
-+				wchan = get_wchan(task);
-+	}
-+
- 	if (!whole) {
- 		min_flt = task->min_flt;
- 		maj_flt = task->maj_flt;
 diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 065cdf8..aa94aa9 100644
+index bff1f0d..88c5efa 100644
 --- a/include/linux/mm.h
 +++ b/include/linux/mm.h
-@@ -1135,7 +1135,13 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1138,7 +1138,13 @@ extern int install_special_mapping(struct mm_struct *mm,
  				   unsigned long addr, unsigned long len,
  				   unsigned long flags, struct page **pages);
  
@@ -632,10 +615,10 @@
  extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 92915e8..4bfd050 100644
+index 0e80e26..af904ea 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
-@@ -194,6 +194,9 @@ struct mm_struct {
+@@ -198,6 +198,9 @@ struct mm_struct {
  	unsigned long (*get_unmapped_area) (struct file *filp,
  				unsigned long addr, unsigned long len,
  				unsigned long pgoff, unsigned long flags);
@@ -663,12 +646,12 @@
  /*
   * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 8c216e0..79eca33 100644
+index b94f354..aed6221 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -98,6 +98,9 @@ struct robust_list_head;
- struct bio;
+@@ -99,6 +99,9 @@ struct bio;
  struct bts_tracer;
+ struct fs_struct;
  
 +extern int exec_shield;
 +extern int print_fatal_signals;
@@ -676,7 +659,7 @@
  /*
   * List of flags we want to share for kernel threads,
   * if only because they are not used by them anyway.
-@@ -346,6 +349,10 @@ extern int sysctl_max_map_count;
+@@ -351,6 +354,10 @@ extern int sysctl_max_map_count;
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  		       unsigned long, unsigned long);
@@ -688,10 +671,10 @@
  arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
  			  unsigned long len, unsigned long pgoff,
 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index c5ef44f..f7abce4 100644
+index 82350f8..d89dd29 100644
 --- a/kernel/sysctl.c
 +++ b/kernel/sysctl.c
-@@ -85,6 +85,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
+@@ -86,6 +86,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
  #ifndef CONFIG_MMU
  extern int sysctl_nr_trim_pages;
  #endif
@@ -718,7 +701,7 @@
  #ifdef CONFIG_RCU_TORTURE_TEST
  extern int rcutorture_runnable;
  #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
-@@ -379,6 +399,14 @@ static struct ctl_table kern_table[] = {
+@@ -377,6 +397,14 @@ static struct ctl_table kern_table[] = {
  		.proc_handler	= &proc_dointvec,
  	},
  	{
@@ -734,10 +717,10 @@
  		.procname	= "core_uses_pid",
  		.data		= &core_uses_pid,
 diff --git a/mm/mmap.c b/mm/mmap.c
-index 00ced3e..931bc3b 100644
+index 4a38411..12ca810 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
-@@ -27,6 +27,7 @@
+@@ -28,6 +28,7 @@
  #include <linux/mempolicy.h>
  #include <linux/rmap.h>
  #include <linux/mmu_notifier.h>
@@ -745,7 +728,7 @@
  
  #include <asm/uaccess.h>
  #include <asm/cacheflush.h>
-@@ -43,6 +44,18 @@
+@@ -44,6 +45,18 @@
  #define arch_rebalance_pgtables(addr, len)		(addr)
  #endif
  
@@ -764,7 +747,7 @@
  static void unmap_region(struct mm_struct *mm,
  		struct vm_area_struct *vma, struct vm_area_struct *prev,
  		unsigned long start, unsigned long end);
-@@ -391,6 +404,8 @@ static inline void
+@@ -392,6 +405,8 @@ static inline void
  __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
@@ -773,7 +756,7 @@
  	if (prev) {
  		vma->vm_next = prev->vm_next;
  		prev->vm_next = vma;
-@@ -493,6 +508,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -494,6 +509,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
  	rb_erase(&vma->vm_rb, &mm->mm_rb);
  	if (mm->mmap_cache == vma)
  		mm->mmap_cache = prev;
@@ -782,7 +765,7 @@
  }
  
  /*
-@@ -802,6 +819,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -803,6 +820,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  		} else					/* cases 2, 5, 7 */
  			vma_adjust(prev, prev->vm_start,
  				end, prev->vm_pgoff, NULL);
@@ -791,7 +774,7 @@
  		return prev;
  	}
  
-@@ -956,7 +975,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -957,7 +976,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	/* Obtain the address to map to. we verify (or select) it and ensure
  	 * that it represents a valid section of the address space.
  	 */
@@ -801,7 +784,7 @@
  	if (addr & ~PAGE_MASK)
  		return addr;
  
-@@ -1436,13 +1456,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1440,13 +1460,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  }
  
  unsigned long
@@ -822,7 +805,7 @@
  	if (file && file->f_op && file->f_op->get_unmapped_area)
  		get_area = file->f_op->get_unmapped_area;
  	addr = get_area(file, addr, len, pgoff, flags);
-@@ -1456,8 +1480,76 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+@@ -1460,8 +1484,76 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
  
  	return arch_rebalance_pgtables(addr, len);
  }
@@ -900,7 +883,7 @@
  
  /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
  struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
-@@ -1532,6 +1624,14 @@ out:
+@@ -1536,6 +1628,14 @@ out:
  	return prev ? prev->vm_next : vma;
  }
  
@@ -915,7 +898,7 @@
  /*
   * Verify that the stack growth is acceptable and
   * update accounting. This is shared with both the
-@@ -1548,7 +1648,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1552,7 +1652,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		return -ENOMEM;
  
  	/* Stack limit test */
@@ -924,7 +907,7 @@
  		return -ENOMEM;
  
  	/* mlock limit tests */
-@@ -1858,10 +1958,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1862,10 +1962,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	if (new->vm_ops && new->vm_ops->open)
  		new->vm_ops->open(new);
  
@@ -941,7 +924,7 @@
  		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
  	return 0;
-@@ -2110,6 +2214,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2114,6 +2218,7 @@ void exit_mmap(struct mm_struct *mm)
  	vm_unacct_memory(nr_accounted);
  	free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
  	tlb_finish_mmu(tlb, 0, end);




More information about the fedora-extras-commits mailing list