Attachment 'MC2_litmusrt_ipdps15.patch'

Download

   1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
   2 index 8dd7248..48128ed 100644
   3 --- a/Documentation/kernel-parameters.txt
   4 +++ b/Documentation/kernel-parameters.txt
   5 @@ -879,6 +879,7 @@ and is between 256 and 4096 characters. It is defined in the file
   6  			     controller
   7  	i8042.nopnp	[HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
   8  			     controllers
   9 +	i8042.notimeout	[HW] Ignore timeout condition signalled by conroller
  10  	i8042.reset	[HW] Reset the controller during init and cleanup
  11  	i8042.unlock	[HW] Unlock (ignore) the keylock
  12  
  13 @@ -1745,7 +1746,7 @@ and is between 256 and 4096 characters. It is defined in the file
  14  
  15  	nousb		[USB] Disable the USB subsystem
  16  
  17 -	nowatchdog	[KNL] Disable the lockup detector.
  18 +	nowatchdog	[KNL] Disable the lockup detector (NMI watchdog).
  19  
  20  	nowb		[ARM]
  21  
  22 diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
  23 index 55b859b..506d9d9 100644
  24 --- a/Documentation/power/runtime_pm.txt
  25 +++ b/Documentation/power/runtime_pm.txt
  26 @@ -336,8 +336,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
  27        zero)
  28  
  29    bool pm_runtime_suspended(struct device *dev);
  30 -    - return true if the device's runtime PM status is 'suspended', or false
  31 -      otherwise
  32 +    - return true if the device's runtime PM status is 'suspended' and its
  33 +      'power.disable_depth' field is equal to zero, or false otherwise
  34  
  35    void pm_runtime_allow(struct device *dev);
  36      - set the power.runtime_auto flag for the device and decrease its usage
  37 diff --git a/Makefile b/Makefile
  38 index 8e53f47..77a7eb5 100644
  39 --- a/Makefile
  40 +++ b/Makefile
  41 @@ -1,7 +1,7 @@
  42  VERSION = 2
  43  PATCHLEVEL = 6
  44  SUBLEVEL = 36
  45 -EXTRAVERSION =-litmus2010
  46 +EXTRAVERSION =.4-litmus2010
  47  NAME = Flesh-Eating Bats with Fangs
  48  
  49  # *DOCUMENTATION*
  50 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
  51 index 6e8f05c..d757555 100644
  52 --- a/arch/arm/include/asm/assembler.h
  53 +++ b/arch/arm/include/asm/assembler.h
  54 @@ -215,7 +215,7 @@
  55  	@ Slightly optimised to avoid incrementing the pointer twice
  56  	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  57  	.if	\rept == 2
  58 -	usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
  59 +	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
  60  	.endif
  61  
  62  	add\cond \ptr, #\rept * \inc
  63 diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
  64 index 0826599..48066ce 100644
  65 --- a/arch/arm/include/asm/kgdb.h
  66 +++ b/arch/arm/include/asm/kgdb.h
  67 @@ -70,7 +70,8 @@ extern int kgdb_fault_expected;
  68  #define _GP_REGS		16
  69  #define _FP_REGS		8
  70  #define _EXTRA_REGS		2
  71 -#define DBG_MAX_REG_NUM		(_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
  72 +#define GDB_MAX_REGS		(_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
  73 +#define DBG_MAX_REG_NUM		(_GP_REGS + _FP_REGS + _EXTRA_REGS)
  74  
  75  #define KGDB_MAX_NO_CPUS	1
  76  #define BUFMAX			400
  77 @@ -93,7 +94,7 @@ extern int kgdb_fault_expected;
  78  #define _SPT			13
  79  #define _LR			14
  80  #define _PC			15
  81 -#define _CPSR			(DBG_MAX_REG_NUM - 1)
  82 +#define _CPSR			(GDB_MAX_REGS - 1)
  83  
  84  /*
  85   * So that we can denote the end of a frame for tracing,
  86 diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
  87 index 584a683..b99087a 100644
  88 --- a/arch/arm/kernel/calls.S
  89 +++ b/arch/arm/kernel/calls.S
  90 @@ -384,14 +384,12 @@
  91  		CALL(sys_complete_job)
  92  		CALL(sys_od_open)
  93  		CALL(sys_od_close)
  94 -/* 375 */	CALL(sys_fmlp_down)
  95 -		CALL(sys_fmlp_up)
  96 -		CALL(sys_srp_down)
  97 -		CALL(sys_srp_up)
  98 +/* 375 */	CALL(sys_litmus_lock)
  99 +		CALL(sys_litmus_unlock)
 100  		CALL(sys_query_job_no)
 101 -/* 380 */	CALL(sys_wait_for_job_release)
 102 +		CALL(sys_wait_for_job_release)
 103  		CALL(sys_wait_for_ts_release)
 104 -		CALL(sys_release_ts)
 105 +/* 380 */	CALL(sys_release_ts)
 106  		CALL(sys_null_call)
 107  #ifndef syscalls_counted
 108  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 109 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
 110 index d6e8b4d..778c2f7 100644
 111 --- a/arch/arm/kernel/kgdb.c
 112 +++ b/arch/arm/kernel/kgdb.c
 113 @@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 114  		return;
 115  
 116  	/* Initialize to zero */
 117 -	for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
 118 +	for (regno = 0; regno < GDB_MAX_REGS; regno++)
 119  		gdb_regs[regno] = 0;
 120  
 121  	/* Otherwise, we have only some registers from switch_to() */
 122 diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
 123 index 1e4cbd4..64f6bc1 100644
 124 --- a/arch/arm/lib/findbit.S
 125 +++ b/arch/arm/lib/findbit.S
 126 @@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
 127   */
 128  .L_found:
 129  #if __LINUX_ARM_ARCH__ >= 5
 130 -		rsb	r1, r3, #0
 131 -		and	r3, r3, r1
 132 +		rsb	r0, r3, #0
 133 +		and	r3, r3, r0
 134  		clz	r3, r3
 135  		rsb	r3, r3, #31
 136  		add	r0, r2, r3
 137 @@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
 138  		addeq	r2, r2, #1
 139  		mov	r0, r2
 140  #endif
 141 +		cmp	r1, r0			@ Clamp to maxbit
 142 +		movlo	r0, r1
 143  		mov	pc, lr
 144  
 145 diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h
 146 index 57f8ee1..27ac6f5 100644
 147 --- a/arch/arm/mach-at91/include/mach/at91_mci.h
 148 +++ b/arch/arm/mach-at91/include/mach/at91_mci.h
 149 @@ -74,6 +74,8 @@
 150  #define			AT91_MCI_TRTYP_BLOCK	(0 << 19)
 151  #define			AT91_MCI_TRTYP_MULTIPLE	(1 << 19)
 152  #define			AT91_MCI_TRTYP_STREAM	(2 << 19)
 153 +#define			AT91_MCI_TRTYP_SDIO_BYTE	(4 << 19)
 154 +#define			AT91_MCI_TRTYP_SDIO_BLOCK	(5 << 19)
 155  
 156  #define AT91_MCI_BLKR		0x18		/* Block Register */
 157  #define		AT91_MCI_BLKR_BCNT(n)	((0xffff & (n)) << 0)	/* Block count */
 158 diff --git a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
 159 index d16ce7e..9b50442 100644
 160 --- a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
 161 +++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
 162 @@ -10,7 +10,7 @@
 163   * published by the Free Software Foundation.
 164   */
 165  
 166 -		.macro	addruart,rx
 167 +		.macro	addruart,rx,rtmp
 168  		mrc	p15, 0, \rx, c1, c0
 169  		tst	\rx, #1			@ MMU enabled?
 170  		moveq	\rx,      #0x10000000
 171 diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
 172 index 38088c3..78defd7 100644
 173 --- a/arch/arm/mach-cns3xxx/pcie.c
 174 +++ b/arch/arm/mach-cns3xxx/pcie.c
 175 @@ -369,7 +369,7 @@ static int __init cns3xxx_pcie_init(void)
 176  {
 177  	int i;
 178  
 179 -	hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS,
 180 +	hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0,
 181  			"imprecise external abort");
 182  
 183  	for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) {
 184 diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
 185 index 86aa689..47010d8 100644
 186 --- a/arch/arm/mm/cache-v6.S
 187 +++ b/arch/arm/mm/cache-v6.S
 188 @@ -196,6 +196,10 @@ ENTRY(v6_flush_kern_dcache_area)
 189   *	- end     - virtual end address of region
 190   */
 191  v6_dma_inv_range:
 192 +#ifdef CONFIG_DMA_CACHE_RWFO
 193 +	ldrb	r2, [r0]			@ read for ownership
 194 +	strb	r2, [r0]			@ write for ownership
 195 +#endif
 196  	tst	r0, #D_CACHE_LINE_SIZE - 1
 197  	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 198  #ifdef HARVARD_CACHE
 199 @@ -204,6 +208,10 @@ v6_dma_inv_range:
 200  	mcrne	p15, 0, r0, c7, c11, 1		@ clean unified line
 201  #endif
 202  	tst	r1, #D_CACHE_LINE_SIZE - 1
 203 +#ifdef CONFIG_DMA_CACHE_RWFO
 204 +	ldrneb	r2, [r1, #-1]			@ read for ownership
 205 +	strneb	r2, [r1, #-1]			@ write for ownership
 206 +#endif
 207  	bic	r1, r1, #D_CACHE_LINE_SIZE - 1
 208  #ifdef HARVARD_CACHE
 209  	mcrne	p15, 0, r1, c7, c14, 1		@ clean & invalidate D line
 210 @@ -211,10 +219,6 @@ v6_dma_inv_range:
 211  	mcrne	p15, 0, r1, c7, c15, 1		@ clean & invalidate unified line
 212  #endif
 213  1:
 214 -#ifdef CONFIG_DMA_CACHE_RWFO
 215 -	ldr	r2, [r0]			@ read for ownership
 216 -	str	r2, [r0]			@ write for ownership
 217 -#endif
 218  #ifdef HARVARD_CACHE
 219  	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D line
 220  #else
 221 @@ -222,6 +226,10 @@ v6_dma_inv_range:
 222  #endif
 223  	add	r0, r0, #D_CACHE_LINE_SIZE
 224  	cmp	r0, r1
 225 +#ifdef CONFIG_DMA_CACHE_RWFO
 226 +	ldrlo	r2, [r0]			@ read for ownership
 227 +	strlo	r2, [r0]			@ write for ownership
 228 +#endif
 229  	blo	1b
 230  	mov	r0, #0
 231  	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 232 @@ -256,12 +264,12 @@ v6_dma_clean_range:
 233   *	- end     - virtual end address of region
 234   */
 235  ENTRY(v6_dma_flush_range)
 236 -	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 237 -1:
 238  #ifdef CONFIG_DMA_CACHE_RWFO
 239 -	ldr	r2, [r0]			@ read for ownership
 240 -	str	r2, [r0]			@ write for ownership
 241 +	ldrb	r2, [r0]		@ read for ownership
 242 +	strb	r2, [r0]		@ write for ownership
 243  #endif
 244 +	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 245 +1:
 246  #ifdef HARVARD_CACHE
 247  	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
 248  #else
 249 @@ -269,6 +277,10 @@ ENTRY(v6_dma_flush_range)
 250  #endif
 251  	add	r0, r0, #D_CACHE_LINE_SIZE
 252  	cmp	r0, r1
 253 +#ifdef CONFIG_DMA_CACHE_RWFO
 254 +	ldrlob	r2, [r0]			@ read for ownership
 255 +	strlob	r2, [r0]			@ write for ownership
 256 +#endif
 257  	blo	1b
 258  	mov	r0, #0
 259  	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 260 diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
 261 index 9b906de..56036ff 100644
 262 --- a/arch/arm/mm/fault-armv.c
 263 +++ b/arch/arm/mm/fault-armv.c
 264 @@ -65,6 +65,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
 265  	return ret;
 266  }
 267  
 268 +#if USE_SPLIT_PTLOCKS
 269 +/*
 270 + * If we are using split PTE locks, then we need to take the page
 271 + * lock here.  Otherwise we are using shared mm->page_table_lock
 272 + * which is already locked, thus cannot take it.
 273 + */
 274 +static inline void do_pte_lock(spinlock_t *ptl)
 275 +{
 276 +	/*
 277 +	 * Use nested version here to indicate that we are already
 278 +	 * holding one similar spinlock.
 279 +	 */
 280 +	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
 281 +}
 282 +
 283 +static inline void do_pte_unlock(spinlock_t *ptl)
 284 +{
 285 +	spin_unlock(ptl);
 286 +}
 287 +#else /* !USE_SPLIT_PTLOCKS */
 288 +static inline void do_pte_lock(spinlock_t *ptl) {}
 289 +static inline void do_pte_unlock(spinlock_t *ptl) {}
 290 +#endif /* USE_SPLIT_PTLOCKS */
 291 +
 292  static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 293  	unsigned long pfn)
 294  {
 295 @@ -89,11 +113,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 296  	 */
 297  	ptl = pte_lockptr(vma->vm_mm, pmd);
 298  	pte = pte_offset_map_nested(pmd, address);
 299 -	spin_lock(ptl);
 300 +	do_pte_lock(ptl);
 301  
 302  	ret = do_adjust_pte(vma, address, pfn, pte);
 303  
 304 -	spin_unlock(ptl);
 305 +	do_pte_unlock(ptl);
 306  	pte_unmap_nested(pte);
 307  
 308  	return ret;
 309 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
 310 index 7185b00..4e2f620 100644
 311 --- a/arch/arm/mm/init.c
 312 +++ b/arch/arm/mm/init.c
 313 @@ -282,6 +282,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 314  	memblock_reserve(__pa(_stext), _end - _stext);
 315  #endif
 316  #ifdef CONFIG_BLK_DEV_INITRD
 317 +	if (phys_initrd_size &&
 318 +	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
 319 +		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
 320 +		       phys_initrd_start, phys_initrd_size);
 321 +		phys_initrd_start = phys_initrd_size = 0;
 322 +	}
 323  	if (phys_initrd_size) {
 324  		memblock_reserve(phys_initrd_start, phys_initrd_size);
 325  
 326 diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
 327 index ec7eddf..f5c5b8d 100644
 328 --- a/arch/arm/plat-omap/dma.c
 329 +++ b/arch/arm/plat-omap/dma.c
 330 @@ -30,6 +30,7 @@
 331  #include <linux/irq.h>
 332  #include <linux/io.h>
 333  #include <linux/slab.h>
 334 +#include <linux/delay.h>
 335  
 336  #include <asm/system.h>
 337  #include <mach/hardware.h>
 338 @@ -996,11 +997,17 @@ void omap_start_dma(int lch)
 339  	l = dma_read(CCR(lch));
 340  
 341  	/*
 342 -	 * Errata: On ES2.0 BUFFERING disable must be set.
 343 -	 * This will always fail on ES1.0
 344 +	 * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
 345 +	 * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
 346 +	 * bursting is enabled. This might result in data gets stalled in
 347 +	 * FIFO at the end of the block.
 348 +	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
 349 +	 * guarantee no data will stay in the DMA FIFO in case inter frame
 350 +	 * buffering occurs.
 351  	 */
 352 -	if (cpu_is_omap24xx())
 353 -		l |= OMAP_DMA_CCR_EN;
 354 +	if (cpu_is_omap2420() ||
 355 +	    (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
 356 +		l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 357  
 358  	l |= OMAP_DMA_CCR_EN;
 359  	dma_write(l, CCR(lch));
 360 @@ -1018,8 +1025,39 @@ void omap_stop_dma(int lch)
 361  		dma_write(0, CICR(lch));
 362  
 363  	l = dma_read(CCR(lch));
 364 -	l &= ~OMAP_DMA_CCR_EN;
 365 -	dma_write(l, CCR(lch));
 366 +	/* OMAP3 Errata i541: sDMA FIFO draining does not finish */
 367 +	if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
 368 +		int i = 0;
 369 +		u32 sys_cf;
 370 +
 371 +		/* Configure No-Standby */
 372 +		l = dma_read(OCP_SYSCONFIG);
 373 +		sys_cf = l;
 374 +		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
 375 +		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
 376 +		dma_write(l , OCP_SYSCONFIG);
 377 +
 378 +		l = dma_read(CCR(lch));
 379 +		l &= ~OMAP_DMA_CCR_EN;
 380 +		dma_write(l, CCR(lch));
 381 +
 382 +		/* Wait for sDMA FIFO drain */
 383 +		l = dma_read(CCR(lch));
 384 +		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
 385 +					OMAP_DMA_CCR_WR_ACTIVE))) {
 386 +			udelay(5);
 387 +			i++;
 388 +			l = dma_read(CCR(lch));
 389 +		}
 390 +		if (i >= 100)
 391 +			printk(KERN_ERR "DMA drain did not complete on "
 392 +					"lch %d\n", lch);
 393 +		/* Restore OCP_SYSCONFIG */
 394 +		dma_write(sys_cf, OCP_SYSCONFIG);
 395 +	} else {
 396 +		l &= ~OMAP_DMA_CCR_EN;
 397 +		dma_write(l, CCR(lch));
 398 +	}
 399  
 400  	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 401  		int next_lch, cur_lch = lch;
 402 diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
 403 index af3a039..cf66f85 100644
 404 --- a/arch/arm/plat-omap/include/plat/dma.h
 405 +++ b/arch/arm/plat-omap/include/plat/dma.h
 406 @@ -335,6 +335,10 @@
 407  #define OMAP2_DMA_MISALIGNED_ERR_IRQ	(1 << 11)
 408  
 409  #define OMAP_DMA_CCR_EN			(1 << 7)
 410 +#define OMAP_DMA_CCR_RD_ACTIVE		(1 << 9)
 411 +#define OMAP_DMA_CCR_WR_ACTIVE		(1 << 10)
 412 +#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC	(1 << 24)
 413 +#define OMAP_DMA_CCR_BUFFERING_DISABLE	(1 << 25)
 414  
 415  #define OMAP_DMA_DATA_TYPE_S8		0x00
 416  #define OMAP_DMA_DATA_TYPE_S16		0x01
 417 diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
 418 index ab608b7..730a461 100644
 419 --- a/arch/avr32/include/asm/syscalls.h
 420 +++ b/arch/avr32/include/asm/syscalls.h
 421 @@ -16,18 +16,9 @@
 422  #include <linux/signal.h>
 423  
 424  /* kernel/process.c */
 425 -asmlinkage int sys_fork(struct pt_regs *);
 426  asmlinkage int sys_clone(unsigned long, unsigned long,
 427  			 unsigned long, unsigned long,
 428  			 struct pt_regs *);
 429 -asmlinkage int sys_vfork(struct pt_regs *);
 430 -asmlinkage int sys_execve(const char __user *, char __user *__user *,
 431 -			  char __user *__user *, struct pt_regs *);
 432 -
 433 -/* kernel/signal.c */
 434 -asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
 435 -			       struct pt_regs *);
 436 -asmlinkage int sys_rt_sigreturn(struct pt_regs *);
 437  
 438  /* mm/cache.c */
 439  asmlinkage int sys_cacheflush(int, void __user *, size_t);
 440 diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
 441 index 592c707..b07d990 100644
 442 --- a/arch/microblaze/Makefile
 443 +++ b/arch/microblaze/Makefile
 444 @@ -72,12 +72,16 @@ export MMU DTB
 445  
 446  all: linux.bin
 447  
 448 -BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
 449 +# With make 3.82 we cannot mix normal and wildcard targets
 450 +BOOT_TARGETS1 = linux.bin linux.bin.gz
 451 +BOOT_TARGETS2 = simpleImage.%
 452  
 453  archclean:
 454  	$(Q)$(MAKE) $(clean)=$(boot)
 455  
 456 -$(BOOT_TARGETS): vmlinux
 457 +$(BOOT_TARGETS1): vmlinux
 458 +	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 459 +$(BOOT_TARGETS2): vmlinux
 460  	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 461  
 462  define archhelp
 463 diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
 464 deleted file mode 120000
 465 index 7cb6578..0000000
 466 --- a/arch/microblaze/boot/dts/system.dts
 467 +++ /dev/null
 468 @@ -1 +0,0 @@
 469 -../../platform/generic/system.dts
 470 \ No newline at end of file
 471 diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
 472 new file mode 100644
 473 index 0000000..2d5c417
 474 --- /dev/null
 475 +++ b/arch/microblaze/boot/dts/system.dts
 476 @@ -0,0 +1,364 @@
 477 +/*
 478 + * Device Tree Generator version: 1.1
 479 + *
 480 + * (C) Copyright 2007-2008 Xilinx, Inc.
 481 + * (C) Copyright 2007-2009 Michal Simek
 482 + *
 483 + * Michal SIMEK <monstr@monstr.eu>
 484 + *
 485 + * This program is free software; you can redistribute it and/or
 486 + * modify it under the terms of the GNU General Public License as
 487 + * published by the Free Software Foundation; either version 2 of
 488 + * the License, or (at your option) any later version.
 489 + *
 490 + * This program is distributed in the hope that it will be useful,
 491 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 492 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 493 + * GNU General Public License for more details.
 494 + *
 495 + * You should have received a copy of the GNU General Public License
 496 + * along with this program; if not, write to the Free Software
 497 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 498 + * MA 02111-1307 USA
 499 + *
 500 + * CAUTION: This file is automatically generated by libgen.
 501 + * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
 502 + *
 503 + * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101
 504 + */
 505 +
 506 +/dts-v1/;
 507 +/ {
 508 +	#address-cells = <1>;
 509 +	#size-cells = <1>;
 510 +	compatible = "xlnx,microblaze";
 511 +	hard-reset-gpios = <&LEDs_8Bit 2 1>;
 512 +	model = "testing";
 513 +	DDR2_SDRAM: memory@90000000 {
 514 +		device_type = "memory";
 515 +		reg = < 0x90000000 0x10000000 >;
 516 +	} ;
 517 +	aliases {
 518 +		ethernet0 = &Hard_Ethernet_MAC;
 519 +		serial0 = &RS232_Uart_1;
 520 +	} ;
 521 +	chosen {
 522 +		bootargs = "console=ttyUL0,115200 highres=on";
 523 +		linux,stdout-path = "/plb@0/serial@84000000";
 524 +	} ;
 525 +	cpus {
 526 +		#address-cells = <1>;
 527 +		#cpus = <0x1>;
 528 +		#size-cells = <0>;
 529 +		microblaze_0: cpu@0 {
 530 +			clock-frequency = <125000000>;
 531 +			compatible = "xlnx,microblaze-7.10.d";
 532 +			d-cache-baseaddr = <0x90000000>;
 533 +			d-cache-highaddr = <0x9fffffff>;
 534 +			d-cache-line-size = <0x10>;
 535 +			d-cache-size = <0x2000>;
 536 +			device_type = "cpu";
 537 +			i-cache-baseaddr = <0x90000000>;
 538 +			i-cache-highaddr = <0x9fffffff>;
 539 +			i-cache-line-size = <0x10>;
 540 +			i-cache-size = <0x2000>;
 541 +			model = "microblaze,7.10.d";
 542 +			reg = <0>;
 543 +			timebase-frequency = <125000000>;
 544 +			xlnx,addr-tag-bits = <0xf>;
 545 +			xlnx,allow-dcache-wr = <0x1>;
 546 +			xlnx,allow-icache-wr = <0x1>;
 547 +			xlnx,area-optimized = <0x0>;
 548 +			xlnx,cache-byte-size = <0x2000>;
 549 +			xlnx,d-lmb = <0x1>;
 550 +			xlnx,d-opb = <0x0>;
 551 +			xlnx,d-plb = <0x1>;
 552 +			xlnx,data-size = <0x20>;
 553 +			xlnx,dcache-addr-tag = <0xf>;
 554 +			xlnx,dcache-always-used = <0x1>;
 555 +			xlnx,dcache-byte-size = <0x2000>;
 556 +			xlnx,dcache-line-len = <0x4>;
 557 +			xlnx,dcache-use-fsl = <0x1>;
 558 +			xlnx,debug-enabled = <0x1>;
 559 +			xlnx,div-zero-exception = <0x1>;
 560 +			xlnx,dopb-bus-exception = <0x0>;
 561 +			xlnx,dynamic-bus-sizing = <0x1>;
 562 +			xlnx,edge-is-positive = <0x1>;
 563 +			xlnx,family = "virtex5";
 564 +			xlnx,fpu-exception = <0x1>;
 565 +			xlnx,fsl-data-size = <0x20>;
 566 +			xlnx,fsl-exception = <0x0>;
 567 +			xlnx,fsl-links = <0x0>;
 568 +			xlnx,i-lmb = <0x1>;
 569 +			xlnx,i-opb = <0x0>;
 570 +			xlnx,i-plb = <0x1>;
 571 +			xlnx,icache-always-used = <0x1>;
 572 +			xlnx,icache-line-len = <0x4>;
 573 +			xlnx,icache-use-fsl = <0x1>;
 574 +			xlnx,ill-opcode-exception = <0x1>;
 575 +			xlnx,instance = "microblaze_0";
 576 +			xlnx,interconnect = <0x1>;
 577 +			xlnx,interrupt-is-edge = <0x0>;
 578 +			xlnx,iopb-bus-exception = <0x0>;
 579 +			xlnx,mmu-dtlb-size = <0x4>;
 580 +			xlnx,mmu-itlb-size = <0x2>;
 581 +			xlnx,mmu-tlb-access = <0x3>;
 582 +			xlnx,mmu-zones = <0x10>;
 583 +			xlnx,number-of-pc-brk = <0x1>;
 584 +			xlnx,number-of-rd-addr-brk = <0x0>;
 585 +			xlnx,number-of-wr-addr-brk = <0x0>;
 586 +			xlnx,opcode-0x0-illegal = <0x1>;
 587 +			xlnx,pvr = <0x2>;
 588 +			xlnx,pvr-user1 = <0x0>;
 589 +			xlnx,pvr-user2 = <0x0>;
 590 +			xlnx,reset-msr = <0x0>;
 591 +			xlnx,sco = <0x0>;
 592 +			xlnx,unaligned-exceptions = <0x1>;
 593 +			xlnx,use-barrel = <0x1>;
 594 +			xlnx,use-dcache = <0x1>;
 595 +			xlnx,use-div = <0x1>;
 596 +			xlnx,use-ext-brk = <0x1>;
 597 +			xlnx,use-ext-nm-brk = <0x1>;
 598 +			xlnx,use-extended-fsl-instr = <0x0>;
 599 +			xlnx,use-fpu = <0x2>;
 600 +			xlnx,use-hw-mul = <0x2>;
 601 +			xlnx,use-icache = <0x1>;
 602 +			xlnx,use-interrupt = <0x1>;
 603 +			xlnx,use-mmu = <0x3>;
 604 +			xlnx,use-msr-instr = <0x1>;
 605 +			xlnx,use-pcmp-instr = <0x1>;
 606 +		} ;
 607 +	} ;
 608 +	mb_plb: plb@0 {
 609 +		#address-cells = <1>;
 610 +		#size-cells = <1>;
 611 +		compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
 612 +		ranges ;
 613 +		FLASH: flash@a0000000 {
 614 +			bank-width = <2>;
 615 +			compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
 616 +			reg = < 0xa0000000 0x2000000 >;
 617 +			xlnx,family = "virtex5";
 618 +			xlnx,include-datawidth-matching-0 = <0x1>;
 619 +			xlnx,include-datawidth-matching-1 = <0x0>;
 620 +			xlnx,include-datawidth-matching-2 = <0x0>;
 621 +			xlnx,include-datawidth-matching-3 = <0x0>;
 622 +			xlnx,include-negedge-ioregs = <0x0>;
 623 +			xlnx,include-plb-ipif = <0x1>;
 624 +			xlnx,include-wrbuf = <0x1>;
 625 +			xlnx,max-mem-width = <0x10>;
 626 +			xlnx,mch-native-dwidth = <0x20>;
 627 +			xlnx,mch-plb-clk-period-ps = <0x1f40>;
 628 +			xlnx,mch-splb-awidth = <0x20>;
 629 +			xlnx,mch0-accessbuf-depth = <0x10>;
 630 +			xlnx,mch0-protocol = <0x0>;
 631 +			xlnx,mch0-rddatabuf-depth = <0x10>;
 632 +			xlnx,mch1-accessbuf-depth = <0x10>;
 633 +			xlnx,mch1-protocol = <0x0>;
 634 +			xlnx,mch1-rddatabuf-depth = <0x10>;
 635 +			xlnx,mch2-accessbuf-depth = <0x10>;
 636 +			xlnx,mch2-protocol = <0x0>;
 637 +			xlnx,mch2-rddatabuf-depth = <0x10>;
 638 +			xlnx,mch3-accessbuf-depth = <0x10>;
 639 +			xlnx,mch3-protocol = <0x0>;
 640 +			xlnx,mch3-rddatabuf-depth = <0x10>;
 641 +			xlnx,mem0-width = <0x10>;
 642 +			xlnx,mem1-width = <0x20>;
 643 +			xlnx,mem2-width = <0x20>;
 644 +			xlnx,mem3-width = <0x20>;
 645 +			xlnx,num-banks-mem = <0x1>;
 646 +			xlnx,num-channels = <0x0>;
 647 +			xlnx,priority-mode = <0x0>;
 648 +			xlnx,synch-mem-0 = <0x0>;
 649 +			xlnx,synch-mem-1 = <0x0>;
 650 +			xlnx,synch-mem-2 = <0x0>;
 651 +			xlnx,synch-mem-3 = <0x0>;
 652 +			xlnx,synch-pipedelay-0 = <0x2>;
 653 +			xlnx,synch-pipedelay-1 = <0x2>;
 654 +			xlnx,synch-pipedelay-2 = <0x2>;
 655 +			xlnx,synch-pipedelay-3 = <0x2>;
 656 +			xlnx,tavdv-ps-mem-0 = <0x1adb0>;
 657 +			xlnx,tavdv-ps-mem-1 = <0x3a98>;
 658 +			xlnx,tavdv-ps-mem-2 = <0x3a98>;
 659 +			xlnx,tavdv-ps-mem-3 = <0x3a98>;
 660 +			xlnx,tcedv-ps-mem-0 = <0x1adb0>;
 661 +			xlnx,tcedv-ps-mem-1 = <0x3a98>;
 662 +			xlnx,tcedv-ps-mem-2 = <0x3a98>;
 663 +			xlnx,tcedv-ps-mem-3 = <0x3a98>;
 664 +			xlnx,thzce-ps-mem-0 = <0x88b8>;
 665 +			xlnx,thzce-ps-mem-1 = <0x1b58>;
 666 +			xlnx,thzce-ps-mem-2 = <0x1b58>;
 667 +			xlnx,thzce-ps-mem-3 = <0x1b58>;
 668 +			xlnx,thzoe-ps-mem-0 = <0x1b58>;
 669 +			xlnx,thzoe-ps-mem-1 = <0x1b58>;
 670 +			xlnx,thzoe-ps-mem-2 = <0x1b58>;
 671 +			xlnx,thzoe-ps-mem-3 = <0x1b58>;
 672 +			xlnx,tlzwe-ps-mem-0 = <0x88b8>;
 673 +			xlnx,tlzwe-ps-mem-1 = <0x0>;
 674 +			xlnx,tlzwe-ps-mem-2 = <0x0>;
 675 +			xlnx,tlzwe-ps-mem-3 = <0x0>;
 676 +			xlnx,twc-ps-mem-0 = <0x2af8>;
 677 +			xlnx,twc-ps-mem-1 = <0x3a98>;
 678 +			xlnx,twc-ps-mem-2 = <0x3a98>;
 679 +			xlnx,twc-ps-mem-3 = <0x3a98>;
 680 +			xlnx,twp-ps-mem-0 = <0x11170>;
 681 +			xlnx,twp-ps-mem-1 = <0x2ee0>;
 682 +			xlnx,twp-ps-mem-2 = <0x2ee0>;
 683 +			xlnx,twp-ps-mem-3 = <0x2ee0>;
 684 +			xlnx,xcl0-linesize = <0x4>;
 685 +			xlnx,xcl0-writexfer = <0x1>;
 686 +			xlnx,xcl1-linesize = <0x4>;
 687 +			xlnx,xcl1-writexfer = <0x1>;
 688 +			xlnx,xcl2-linesize = <0x4>;
 689 +			xlnx,xcl2-writexfer = <0x1>;
 690 +			xlnx,xcl3-linesize = <0x4>;
 691 +			xlnx,xcl3-writexfer = <0x1>;
 692 +		} ;
 693 +		Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
 694 +			#address-cells = <1>;
 695 +			#size-cells = <1>;
 696 +			compatible = "xlnx,compound";
 697 +			ethernet@81c00000 {
 698 +				compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
 699 +				device_type = "network";
 700 +				interrupt-parent = <&xps_intc_0>;
 701 +				interrupts = < 5 2 >;
 702 +				llink-connected = <&PIM3>;
 703 +				local-mac-address = [ 00 0a 35 00 00 00 ];
 704 +				reg = < 0x81c00000 0x40 >;
 705 +				xlnx,bus2core-clk-ratio = <0x1>;
 706 +				xlnx,phy-type = <0x1>;
 707 +				xlnx,phyaddr = <0x1>;
 708 +				xlnx,rxcsum = <0x0>;
 709 +				xlnx,rxfifo = <0x1000>;
 710 +				xlnx,temac-type = <0x0>;
 711 +				xlnx,txcsum = <0x0>;
 712 +				xlnx,txfifo = <0x1000>;
 713 +			} ;
 714 +		} ;
 715 +		IIC_EEPROM: i2c@81600000 {
 716 +			compatible = "xlnx,xps-iic-2.00.a";
 717 +			interrupt-parent = <&xps_intc_0>;
 718 +			interrupts = < 6 2 >;
 719 +			reg = < 0x81600000 0x10000 >;
 720 +			xlnx,clk-freq = <0x7735940>;
 721 +			xlnx,family = "virtex5";
 722 +			xlnx,gpo-width = <0x1>;
 723 +			xlnx,iic-freq = <0x186a0>;
 724 +			xlnx,scl-inertial-delay = <0x0>;
 725 +			xlnx,sda-inertial-delay = <0x0>;
 726 +			xlnx,ten-bit-adr = <0x0>;
 727 +		} ;
 728 +		LEDs_8Bit: gpio@81400000 {
 729 +			compatible = "xlnx,xps-gpio-1.00.a";
 730 +			interrupt-parent = <&xps_intc_0>;
 731 +			interrupts = < 7 2 >;
 732 +			reg = < 0x81400000 0x10000 >;
 733 +			xlnx,all-inputs = <0x0>;
 734 +			xlnx,all-inputs-2 = <0x0>;
 735 +			xlnx,dout-default = <0x0>;
 736 +			xlnx,dout-default-2 = <0x0>;
 737 +			xlnx,family = "virtex5";
 738 +			xlnx,gpio-width = <0x8>;
 739 +			xlnx,interrupt-present = <0x1>;
 740 +			xlnx,is-bidir = <0x1>;
 741 +			xlnx,is-bidir-2 = <0x1>;
 742 +			xlnx,is-dual = <0x0>;
 743 +			xlnx,tri-default = <0xffffffff>;
 744 +			xlnx,tri-default-2 = <0xffffffff>;
 745 +			#gpio-cells = <2>;
 746 +			gpio-controller;
 747 +		} ;
 748 +
 749 +		gpio-leds {
 750 +			compatible = "gpio-leds";
 751 +
 752 +			heartbeat {
 753 +				label = "Heartbeat";
 754 +				gpios = <&LEDs_8Bit 4 1>;
 755 +				linux,default-trigger = "heartbeat";
 756 +			};
 757 +
 758 +			yellow {
 759 +				label = "Yellow";
 760 +				gpios = <&LEDs_8Bit 5 1>;
 761 +			};
 762 +
 763 +			red {
 764 +				label = "Red";
 765 +				gpios = <&LEDs_8Bit 6 1>;
 766 +			};
 767 +
 768 +			green {
 769 +				label = "Green";
 770 +				gpios = <&LEDs_8Bit 7 1>;
 771 +			};
 772 +		} ;
 773 +		RS232_Uart_1: serial@84000000 {
 774 +			clock-frequency = <125000000>;
 775 +			compatible = "xlnx,xps-uartlite-1.00.a";
 776 +			current-speed = <115200>;
 777 +			device_type = "serial";
 778 +			interrupt-parent = <&xps_intc_0>;
 779 +			interrupts = < 8 0 >;
 780 +			port-number = <0>;
 781 +			reg = < 0x84000000 0x10000 >;
 782 +			xlnx,baudrate = <0x1c200>;
 783 +			xlnx,data-bits = <0x8>;
 784 +			xlnx,family = "virtex5";
 785 +			xlnx,odd-parity = <0x0>;
 786 +			xlnx,use-parity = <0x0>;
 787 +		} ;
 788 +		SysACE_CompactFlash: sysace@83600000 {
 789 +			compatible = "xlnx,xps-sysace-1.00.a";
 790 +			interrupt-parent = <&xps_intc_0>;
 791 +			interrupts = < 4 2 >;
 792 +			reg = < 0x83600000 0x10000 >;
 793 +			xlnx,family = "virtex5";
 794 +			xlnx,mem-width = <0x10>;
 795 +		} ;
 796 +		debug_module: debug@84400000 {
 797 +			compatible = "xlnx,mdm-1.00.d";
 798 +			reg = < 0x84400000 0x10000 >;
 799 +			xlnx,family = "virtex5";
 800 +			xlnx,interconnect = <0x1>;
 801 +			xlnx,jtag-chain = <0x2>;
 802 +			xlnx,mb-dbg-ports = <0x1>;
 803 +			xlnx,uart-width = <0x8>;
 804 +			xlnx,use-uart = <0x1>;
 805 +			xlnx,write-fsl-ports = <0x0>;
 806 +		} ;
 807 +		mpmc@90000000 {
 808 +			#address-cells = <1>;
 809 +			#size-cells = <1>;
 810 +			compatible = "xlnx,mpmc-4.02.a";
 811 +			PIM3: sdma@84600180 {
 812 +				compatible = "xlnx,ll-dma-1.00.a";
 813 +				interrupt-parent = <&xps_intc_0>;
 814 +				interrupts = < 2 2 1 2 >;
 815 +				reg = < 0x84600180 0x80 >;
 816 +			} ;
 817 +		} ;
 818 +		xps_intc_0: interrupt-controller@81800000 {
 819 +			#interrupt-cells = <0x2>;
 820 +			compatible = "xlnx,xps-intc-1.00.a";
 821 +			interrupt-controller ;
 822 +			reg = < 0x81800000 0x10000 >;
 823 +			xlnx,kind-of-intr = <0x100>;
 824 +			xlnx,num-intr-inputs = <0x9>;
 825 +		} ;
 826 +		xps_timer_1: timer@83c00000 {
 827 +			compatible = "xlnx,xps-timer-1.00.a";
 828 +			interrupt-parent = <&xps_intc_0>;
 829 +			interrupts = < 3 2 >;
 830 +			reg = < 0x83c00000 0x10000 >;
 831 +			xlnx,count-width = <0x20>;
 832 +			xlnx,family = "virtex5";
 833 +			xlnx,gen0-assert = <0x1>;
 834 +			xlnx,gen1-assert = <0x1>;
 835 +			xlnx,one-timer-only = <0x0>;
 836 +			xlnx,trig0-assert = <0x1>;
 837 +			xlnx,trig1-assert = <0x1>;
 838 +		} ;
 839 +	} ;
 840 +}  ;
 841 diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
 842 index 5742bb4..5c0a357 100644
 843 --- a/arch/mips/jz4740/board-qi_lb60.c
 844 +++ b/arch/mips/jz4740/board-qi_lb60.c
 845 @@ -5,7 +5,7 @@
 846   *
 847   * Copyright (c) 2009 Qi Hardware inc.,
 848   * Author: Xiangfu Liu <xiangfu@qi-hardware.com>
 849 - * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de>
 850 + * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de>
 851   *
 852   * This program is free software; you can redistribute it and/or modify
 853   * it under the terms of the GNU General Public License version 2 or later
 854 @@ -235,7 +235,7 @@ static const unsigned int qi_lb60_keypad_rows[] = {
 855  	QI_LB60_GPIO_KEYIN(3),
 856  	QI_LB60_GPIO_KEYIN(4),
 857  	QI_LB60_GPIO_KEYIN(5),
 858 -	QI_LB60_GPIO_KEYIN(7),
 859 +	QI_LB60_GPIO_KEYIN(6),
 860  	QI_LB60_GPIO_KEYIN8,
 861  };
 862  
 863 diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
 864 index df971fa..4896ed0 100644
 865 --- a/arch/parisc/kernel/firmware.c
 866 +++ b/arch/parisc/kernel/firmware.c
 867 @@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
 868  	unsigned int i;
 869  	unsigned long flags;
 870  
 871 -	for (i = 0; i < count && i < 79;) {
 872 +	for (i = 0; i < count;) {
 873  		switch(str[i]) {
 874  		case '\n':
 875  			iodc_dbuf[i+0] = '\r';
 876  			iodc_dbuf[i+1] = '\n';
 877  			i += 2;
 878  			goto print;
 879 -		case '\b':	/* BS */
 880 -			i--; /* overwrite last */
 881  		default:
 882  			iodc_dbuf[i] = str[i];
 883  			i++;
 884 @@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
 885  		}
 886  	}
 887  
 888 -	/* if we're at the end of line, and not already inserting a newline,
 889 -	 * insert one anyway. iodc console doesn't claim to support >79 char
 890 -	 * lines. don't account for this in the return value.
 891 -	 */
 892 -	if (i == 79 && iodc_dbuf[i-1] != '\n') {
 893 -		iodc_dbuf[i+0] = '\r';
 894 -		iodc_dbuf[i+1] = '\n';
 895 -	}
 896 -
 897  print:
 898          spin_lock_irqsave(&pdc_lock, flags);
 899          real32_call(PAGE0->mem_cons.iodc_io,
 900 diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
 901 index 8bcb10b..d086e0f 100644
 902 --- a/arch/powerpc/boot/dts/p1022ds.dts
 903 +++ b/arch/powerpc/boot/dts/p1022ds.dts
 904 @@ -280,13 +280,13 @@
 905  			ranges = <0x0 0xc100 0x200>;
 906  			cell-index = <1>;
 907  			dma00: dma-channel@0 {
 908 -				compatible = "fsl,eloplus-dma-channel";
 909 +				compatible = "fsl,ssi-dma-channel";
 910  				reg = <0x0 0x80>;
 911  				cell-index = <0>;
 912  				interrupts = <76 2>;
 913  			};
 914  			dma01: dma-channel@80 {
 915 -				compatible = "fsl,eloplus-dma-channel";
 916 +				compatible = "fsl,ssi-dma-channel";
 917  				reg = <0x80 0x80>;
 918  				cell-index = <1>;
 919  				interrupts = <77 2>;
 920 diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
 921 index 55cba4a..f8cd9fb 100644
 922 --- a/arch/powerpc/kernel/cpu_setup_6xx.S
 923 +++ b/arch/powerpc/kernel/cpu_setup_6xx.S
 924 @@ -18,7 +18,7 @@
 925  #include <asm/mmu.h>
 926  
 927  _GLOBAL(__setup_cpu_603)
 928 -	mflr	r4
 929 +	mflr	r5
 930  BEGIN_MMU_FTR_SECTION
 931  	li	r10,0
 932  	mtspr	SPRN_SPRG_603_LRU,r10		/* init SW LRU tracking */
 933 @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
 934  	bl	__init_fpu_registers
 935  END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
 936  	bl	setup_common_caches
 937 -	mtlr	r4
 938 +	mtlr	r5
 939  	blr
 940  _GLOBAL(__setup_cpu_604)
 941 -	mflr	r4
 942 +	mflr	r5
 943  	bl	setup_common_caches
 944  	bl	setup_604_hid0
 945 -	mtlr	r4
 946 +	mtlr	r5
 947  	blr
 948  _GLOBAL(__setup_cpu_750)
 949 -	mflr	r4
 950 +	mflr	r5
 951  	bl	__init_fpu_registers
 952  	bl	setup_common_caches
 953  	bl	setup_750_7400_hid0
 954 -	mtlr	r4
 955 +	mtlr	r5
 956  	blr
 957  _GLOBAL(__setup_cpu_750cx)
 958 -	mflr	r4
 959 +	mflr	r5
 960  	bl	__init_fpu_registers
 961  	bl	setup_common_caches
 962  	bl	setup_750_7400_hid0
 963  	bl	setup_750cx
 964 -	mtlr	r4
 965 +	mtlr	r5
 966  	blr
 967  _GLOBAL(__setup_cpu_750fx)
 968 -	mflr	r4
 969 +	mflr	r5
 970  	bl	__init_fpu_registers
 971  	bl	setup_common_caches
 972  	bl	setup_750_7400_hid0
 973  	bl	setup_750fx
 974 -	mtlr	r4
 975 +	mtlr	r5
 976  	blr
 977  _GLOBAL(__setup_cpu_7400)
 978 -	mflr	r4
 979 +	mflr	r5
 980  	bl	__init_fpu_registers
 981  	bl	setup_7400_workarounds
 982  	bl	setup_common_caches
 983  	bl	setup_750_7400_hid0
 984 -	mtlr	r4
 985 +	mtlr	r5
 986  	blr
 987  _GLOBAL(__setup_cpu_7410)
 988 -	mflr	r4
 989 +	mflr	r5
 990  	bl	__init_fpu_registers
 991  	bl	setup_7410_workarounds
 992  	bl	setup_common_caches
 993  	bl	setup_750_7400_hid0
 994  	li	r3,0
 995  	mtspr	SPRN_L2CR2,r3
 996 -	mtlr	r4
 997 +	mtlr	r5
 998  	blr
 999  _GLOBAL(__setup_cpu_745x)
1000 -	mflr	r4
1001 +	mflr	r5
1002  	bl	setup_common_caches
1003  	bl	setup_745x_specifics
1004 -	mtlr	r4
1005 +	mtlr	r5
1006  	blr
1007  
1008  /* Enable caches for 603's, 604, 750 & 7400 */
1009 @@ -194,10 +194,10 @@ setup_750cx:
1010  	cror	4*cr0+eq,4*cr0+eq,4*cr1+eq
1011  	cror	4*cr0+eq,4*cr0+eq,4*cr2+eq
1012  	bnelr
1013 -	lwz	r6,CPU_SPEC_FEATURES(r5)
1014 +	lwz	r6,CPU_SPEC_FEATURES(r4)
1015  	li	r7,CPU_FTR_CAN_NAP
1016  	andc	r6,r6,r7
1017 -	stw	r6,CPU_SPEC_FEATURES(r5)
1018 +	stw	r6,CPU_SPEC_FEATURES(r4)
1019  	blr
1020  
1021  /* 750fx specific
1022 @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
1023  	andis.	r11,r11,L3CR_L3E@h
1024  	beq	1f
1025  END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
1026 -	lwz	r6,CPU_SPEC_FEATURES(r5)
1027 +	lwz	r6,CPU_SPEC_FEATURES(r4)
1028  	andi.	r0,r6,CPU_FTR_L3_DISABLE_NAP
1029  	beq	1f
1030  	li	r7,CPU_FTR_CAN_NAP
1031  	andc	r6,r6,r7
1032 -	stw	r6,CPU_SPEC_FEATURES(r5)
1033 +	stw	r6,CPU_SPEC_FEATURES(r4)
1034  1:
1035  	mfspr	r11,SPRN_HID0
1036  
1037 diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
1038 index 8eff48e..3fee685 100644
1039 --- a/arch/powerpc/kernel/ppc970-pmu.c
1040 +++ b/arch/powerpc/kernel/ppc970-pmu.c
1041 @@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event)
1042  	switch (unit) {
1043  	case PM_VPU:
1044  		mask = 0x4c;		/* byte 0 bits 2,3,6 */
1045 +		break;
1046  	case PM_LSU0:
1047  		/* byte 2 bits 0,2,3,4,6; all of byte 1 */
1048  		mask = 0x085dff00;
1049 +		break;
1050  	case PM_LSU1L:
1051  		mask = 0x50 << 24;	/* byte 3 bits 4,6 */
1052  		break;
1053 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
1054 index 09dffe6..1eb64ba 100644
1055 --- a/arch/powerpc/mm/hash_utils_64.c
1056 +++ b/arch/powerpc/mm/hash_utils_64.c
1057 @@ -1122,7 +1122,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1058  	else
1059  #endif /* CONFIG_PPC_HAS_HASH_64K */
1060  		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1061 -				    subpage_protection(pgdir, ea));
1062 +				    subpage_protection(mm, ea));
1063  
1064  	/* Dump some info in case of hash insertion failure, they should
1065  	 * never happen so it is really useful to know if/when they do
1066 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1067 index 002878c..1537ab3 100644
1068 --- a/arch/powerpc/mm/numa.c
1069 +++ b/arch/powerpc/mm/numa.c
1070 @@ -181,7 +181,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
1071  	dbg("removing cpu %lu from node %d\n", cpu, node);
1072  
1073  	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
1074 -		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
1075 +		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
1076  	} else {
1077  		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
1078  		       cpu, node);
1079 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1080 index cf79b46..568b503 100644
1081 --- a/arch/powerpc/platforms/pseries/lpar.c
1082 +++ b/arch/powerpc/platforms/pseries/lpar.c
1083 @@ -680,6 +680,13 @@ EXPORT_SYMBOL(arch_free_page);
1084  /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1085  extern long hcall_tracepoint_refcount;
1086  
1087 +/*
1088 + * Since the tracing code might execute hcalls we need to guard against
1089 + * recursion. One example of this are spinlocks calling H_YIELD on
1090 + * shared processor partitions.
1091 + */
1092 +static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
1093 +
1094  void hcall_tracepoint_regfunc(void)
1095  {
1096  	hcall_tracepoint_refcount++;
1097 @@ -692,12 +699,42 @@ void hcall_tracepoint_unregfunc(void)
1098  
1099  void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
1100  {
1101 +	unsigned long flags;
1102 +	unsigned int *depth;
1103 +
1104 +	local_irq_save(flags);
1105 +
1106 +	depth = &__get_cpu_var(hcall_trace_depth);
1107 +
1108 +	if (*depth)
1109 +		goto out;
1110 +
1111 +	(*depth)++;
1112  	trace_hcall_entry(opcode, args);
1113 +	(*depth)--;
1114 +
1115 +out:
1116 +	local_irq_restore(flags);
1117  }
1118  
1119  void __trace_hcall_exit(long opcode, unsigned long retval,
1120  			unsigned long *retbuf)
1121  {
1122 +	unsigned long flags;
1123 +	unsigned int *depth;
1124 +
1125 +	local_irq_save(flags);
1126 +
1127 +	depth = &__get_cpu_var(hcall_trace_depth);
1128 +
1129 +	if (*depth)
1130 +		goto out;
1131 +
1132 +	(*depth)++;
1133  	trace_hcall_exit(opcode, retval, retbuf);
1134 +	(*depth)--;
1135 +
1136 +out:
1137 +	local_irq_restore(flags);
1138  }
1139  #endif
1140 diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
1141 index 3017532..b7f02a4 100644
1142 --- a/arch/powerpc/sysdev/fsl_rio.c
1143 +++ b/arch/powerpc/sysdev/fsl_rio.c
1144 @@ -954,7 +954,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
1145  	if (dsr & DOORBELL_DSR_QFI) {
1146  		pr_info("RIO: doorbell queue full\n");
1147  		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
1148 -		goto out;
1149  	}
1150  
1151  	/* XXX Need to check/dispatch until queue empty */
1152 diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
1153 index ac15139..1995c17 100644
1154 --- a/arch/s390/kernel/nmi.c
1155 +++ b/arch/s390/kernel/nmi.c
1156 @@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
1157  static int notrace s390_revalidate_registers(struct mci *mci)
1158  {
1159  	int kill_task;
1160 -	u64 tmpclock;
1161  	u64 zero;
1162  	void *fpt_save_area, *fpt_creg_save_area;
1163  
1164 @@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
1165  			: "0", "cc");
1166  #endif
1167  	/* Revalidate clock comparator register */
1168 -	asm volatile(
1169 -		"	stck	0(%1)\n"
1170 -		"	sckc	0(%1)"
1171 -		: "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
1172 -
1173 +	if (S390_lowcore.clock_comparator == -1)
1174 +		set_clock_comparator(S390_lowcore.mcck_clock);
1175 +	else
1176 +		set_clock_comparator(S390_lowcore.clock_comparator);
1177  	/* Check if old PSW is valid */
1178  	if (!mci->wp)
1179  		/*
1180 diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
1181 index 3479f1b..c1e326c 100644
1182 --- a/arch/s390/kernel/vtime.c
1183 +++ b/arch/s390/kernel/vtime.c
1184 @@ -19,6 +19,7 @@
1185  #include <linux/kernel_stat.h>
1186  #include <linux/rcupdate.h>
1187  #include <linux/posix-timers.h>
1188 +#include <linux/cpu.h>
1189  
1190  #include <asm/s390_ext.h>
1191  #include <asm/timer.h>
1192 @@ -565,6 +566,23 @@ void init_cpu_vtimer(void)
1193  	__ctl_set_bit(0,10);
1194  }
1195  
1196 +static int __cpuinit s390_nohz_notify(struct notifier_block *self,
1197 +				      unsigned long action, void *hcpu)
1198 +{
1199 +	struct s390_idle_data *idle;
1200 +	long cpu = (long) hcpu;
1201 +
1202 +	idle = &per_cpu(s390_idle, cpu);
1203 +	switch (action) {
1204 +	case CPU_DYING:
1205 +	case CPU_DYING_FROZEN:
1206 +		idle->nohz_delay = 0;
1207 +	default:
1208 +		break;
1209 +	}
1210 +	return NOTIFY_OK;
1211 +}
1212 +
1213  void __init vtime_init(void)
1214  {
1215  	/* request the cpu timer external interrupt */
1216 @@ -573,5 +591,6 @@ void __init vtime_init(void)
1217  
1218  	/* Enable cpu timer interrupts on the boot cpu. */
1219  	init_cpu_vtimer();
1220 +	cpu_notifier(s390_nohz_notify, 0);
1221  }
1222  
1223 diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
1224 index 752b362..7c37ec3 100644
1225 --- a/arch/s390/lib/delay.c
1226 +++ b/arch/s390/lib/delay.c
1227 @@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs)
1228  {
1229  	unsigned long mask, cr0, cr0_saved;
1230  	u64 clock_saved;
1231 +	u64 end;
1232  
1233 +	mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
1234 +	end = get_clock() + (usecs << 12);
1235  	clock_saved = local_tick_disable();
1236 -	set_clock_comparator(get_clock() + (usecs << 12));
1237  	__ctl_store(cr0_saved, 0, 0);
1238  	cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
1239  	__ctl_load(cr0 , 0, 0);
1240 -	mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
1241  	lockdep_off();
1242 -	trace_hardirqs_on();
1243 -	__load_psw_mask(mask);
1244 -	local_irq_disable();
1245 +	do {
1246 +		set_clock_comparator(end);
1247 +		trace_hardirqs_on();
1248 +		__load_psw_mask(mask);
1249 +		local_irq_disable();
1250 +	} while (get_clock() < end);
1251  	lockdep_on();
1252  	__ctl_load(cr0_saved, 0, 0);
1253  	local_tick_enable(clock_saved);
1254 diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
1255 index b237d52..34ba197 100644
1256 --- a/arch/sh/include/asm/io.h
1257 +++ b/arch/sh/include/asm/io.h
1258 @@ -322,7 +322,15 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
1259  	 * mapping must be done by the PMB or by using page tables.
1260  	 */
1261  	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
1262 -		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
1263 +		u64 flags = pgprot_val(prot);
1264 +
1265 +		/*
1266 +		 * Anything using the legacy PTEA space attributes needs
1267 +		 * to be kicked down to page table mappings.
1268 +		 */
1269 +		if (unlikely(flags & _PAGE_PCC_MASK))
1270 +			return NULL;
1271 +		if (unlikely(flags & _PAGE_CACHABLE))
1272  			return (void __iomem *)P1SEGADDR(offset);
1273  
1274  		return (void __iomem *)P2SEGADDR(offset);
1275 diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
1276 index e172d69..45743bf 100644
1277 --- a/arch/sh/include/asm/pgtable_32.h
1278 +++ b/arch/sh/include/asm/pgtable_32.h
1279 @@ -76,6 +76,10 @@
1280  /* Wrapper for extended mode pgprot twiddling */
1281  #define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
1282  
1283 +#ifdef CONFIG_X2TLB
1284 +#define _PAGE_PCC_MASK	0x00000000	/* No legacy PTEA support */
1285 +#else
1286 +
1287  /* software: moves to PTEA.TC (Timing Control) */
1288  #define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
1289  #define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
1290 @@ -89,7 +93,8 @@
1291  #define _PAGE_PCC_ATR8	0x60000000	/* Attribute Memory space, 8 bit bus */
1292  #define _PAGE_PCC_ATR16	0x60000001	/* Attribute Memory space, 6 bit bus */
1293  
1294 -#ifndef CONFIG_X2TLB
1295 +#define _PAGE_PCC_MASK	0xe0000001
1296 +
1297  /* copy the ptea attributes */
1298  static inline unsigned long copy_ptea_attributes(unsigned long x)
1299  {
1300 diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h
1301 index be201fd..ae717e3 100644
1302 --- a/arch/sh/include/asm/syscalls_32.h
1303 +++ b/arch/sh/include/asm/syscalls_32.h
1304 @@ -19,9 +19,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
1305  asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
1306  			 unsigned long r6, unsigned long r7,
1307  			 struct pt_regs __regs);
1308 -asmlinkage int sys_execve(const char __user *ufilename, char __user * __user *uargv,
1309 -			  char __user * __user *uenvp, unsigned long r7,
1310 -			  struct pt_regs __regs);
1311 +asmlinkage int sys_execve(const char __user *ufilename,
1312 +			  const char __user *const __user *uargv,
1313 +			  const char __user *const __user *uenvp,
1314 +			  unsigned long r7, struct pt_regs __regs);
1315  asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5,
1316  			      unsigned long r6, unsigned long r7,
1317  			      struct pt_regs __regs);
1318 diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h
1319 index 963e1a4..f61a501 100644
1320 --- a/arch/sparc/include/asm/openprom.h
1321 +++ b/arch/sparc/include/asm/openprom.h
1322 @@ -37,7 +37,7 @@ struct linux_dev_v2_funcs {
1323  	int (*v2_dev_open)(char *devpath);
1324  	void (*v2_dev_close)(int d);
1325  	int (*v2_dev_read)(int d, char *buf, int nbytes);
1326 -	int (*v2_dev_write)(int d, char *buf, int nbytes);
1327 +	int (*v2_dev_write)(int d, const char *buf, int nbytes);
1328  	int (*v2_dev_seek)(int d, int hi, int lo);
1329  
1330  	/* Never issued (multistage load support) */
1331 diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
1332 index 33e31ce..618a5bd 100644
1333 --- a/arch/sparc/include/asm/oplib_32.h
1334 +++ b/arch/sparc/include/asm/oplib_32.h
1335 @@ -60,25 +60,6 @@ extern char *prom_getbootargs(void);
1336  extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
1337  extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
1338  
1339 -/* Device operations. */
1340 -
1341 -/* Open the device described by the passed string.  Note, that the format
1342 - * of the string is different on V0 vs. V2->higher proms.  The caller must
1343 - * know what he/she is doing!  Returns the device descriptor, an int.
1344 - */
1345 -extern int prom_devopen(char *device_string);
1346 -
1347 -/* Close a previously opened device described by the passed integer
1348 - * descriptor.
1349 - */
1350 -extern int prom_devclose(int device_handle);
1351 -
1352 -/* Do a seek operation on the device described by the passed integer
1353 - * descriptor.
1354 - */
1355 -extern void prom_seek(int device_handle, unsigned int seek_hival,
1356 -		      unsigned int seek_lowval);
1357 -
1358  /* Miscellaneous routines, don't really fit in any category per se. */
1359  
1360  /* Reboot the machine with the command line passed. */
1361 @@ -121,19 +102,8 @@ extern int prom_getrev(void);
1362  /* Get the prom firmware revision. */
1363  extern int prom_getprev(void);
1364  
1365 -/* Character operations to/from the console.... */
1366 -
1367 -/* Non-blocking get character from console. */
1368 -extern int prom_nbgetchar(void);
1369 -
1370 -/* Non-blocking put character to console. */
1371 -extern int prom_nbputchar(char character);
1372 -
1373 -/* Blocking get character from console. */
1374 -extern char prom_getchar(void);
1375 -
1376 -/* Blocking put character to console. */
1377 -extern void prom_putchar(char character);
1378 +/* Write a buffer of characters to the console. */
1379 +extern void prom_console_write_buf(const char *buf, int len);
1380  
1381  /* Prom's internal routines, don't use in kernel/boot code. */
1382  extern void prom_printf(const char *fmt, ...);
1383 @@ -238,7 +208,6 @@ extern int prom_node_has_property(int node, char *property);
1384  extern int prom_setprop(int node, const char *prop_name, char *prop_value,
1385  			int value_size);
1386  
1387 -extern int prom_pathtoinode(char *path);
1388  extern int prom_inst2pkg(int);
1389  
1390  /* Dorking with Bus ranges... */
1391 diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
1392 index 3e0b2d6..209463d 100644
1393 --- a/arch/sparc/include/asm/oplib_64.h
1394 +++ b/arch/sparc/include/asm/oplib_64.h
1395 @@ -67,27 +67,6 @@ extern void prom_init(void *cif_handler, void *cif_stack);
1396  /* Boot argument acquisition, returns the boot command line string. */
1397  extern char *prom_getbootargs(void);
1398  
1399 -/* Device utilities. */
1400 -
1401 -/* Device operations. */
1402 -
1403 -/* Open the device described by the passed string.  Note, that the format
1404 - * of the string is different on V0 vs. V2->higher proms.  The caller must
1405 - * know what he/she is doing!  Returns the device descriptor, an int.
1406 - */
1407 -extern int prom_devopen(const char *device_string);
1408 -
1409 -/* Close a previously opened device described by the passed integer
1410 - * descriptor.
1411 - */
1412 -extern int prom_devclose(int device_handle);
1413 -
1414 -/* Do a seek operation on the device described by the passed integer
1415 - * descriptor.
1416 - */
1417 -extern void prom_seek(int device_handle, unsigned int seek_hival,
1418 -		      unsigned int seek_lowval);
1419 -
1420  /* Miscellaneous routines, don't really fit in any category per se. */
1421  
1422  /* Reboot the machine with the command line passed. */
1423 @@ -109,33 +88,14 @@ extern void prom_halt(void) __attribute__ ((noreturn));
1424  /* Halt and power-off the machine. */
1425  extern void prom_halt_power_off(void) __attribute__ ((noreturn));
1426  
1427 -/* Set the PROM 'sync' callback function to the passed function pointer.
1428 - * When the user gives the 'sync' command at the prom prompt while the
1429 - * kernel is still active, the prom will call this routine.
1430 - *
1431 - */
1432 -typedef int (*callback_func_t)(long *cmd);
1433 -extern void prom_setcallback(callback_func_t func_ptr);
1434 -
1435  /* Acquire the IDPROM of the root node in the prom device tree.  This
1436   * gets passed a buffer where you would like it stuffed.  The return value
1437   * is the format type of this idprom or 0xff on error.
1438   */
1439  extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
1440  
1441 -/* Character operations to/from the console.... */
1442 -
1443 -/* Non-blocking get character from console. */
1444 -extern int prom_nbgetchar(void);
1445 -
1446 -/* Non-blocking put character to console. */
1447 -extern int prom_nbputchar(char character);
1448 -
1449 -/* Blocking get character from console. */
1450 -extern char prom_getchar(void);
1451 -
1452 -/* Blocking put character to console. */
1453 -extern void prom_putchar(char character);
1454 +/* Write a buffer of characters to the console. */
1455 +extern void prom_console_write_buf(const char *buf, int len);
1456  
1457  /* Prom's internal routines, don't use in kernel/boot code. */
1458  extern void prom_printf(const char *fmt, ...);
1459 @@ -278,9 +238,7 @@ extern int prom_finddevice(const char *name);
1460  extern int prom_setprop(int node, const char *prop_name, char *prop_value,
1461  			int value_size);
1462  
1463 -extern int prom_pathtoinode(const char *path);
1464  extern int prom_inst2pkg(int);
1465 -extern int prom_service_exists(const char *service_name);
1466  extern void prom_sun4v_guest_soft_state(void);
1467  
1468  extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
1469 diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
1470 index 6a7b4db..dcefd22 100644
1471 --- a/arch/sparc/kernel/leon_kernel.c
1472 +++ b/arch/sparc/kernel/leon_kernel.c
1473 @@ -114,7 +114,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
1474  	if (leon3_gptimer_regs && leon3_irqctrl_regs) {
1475  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0);
1476  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld,
1477 -				      (((1000000 / 100) - 1)));
1478 +				      (((1000000 / HZ) - 1)));
1479  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0);
1480  
1481  #ifdef CONFIG_SMP
1482 @@ -128,7 +128,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
1483  		}
1484  
1485  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0);
1486 -		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1)));
1487 +		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1)));
1488  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0);
1489  # endif
1490  
1491 diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
1492 index 1b8c073..816c0fa 100644
1493 --- a/arch/sparc/prom/Makefile
1494 +++ b/arch/sparc/prom/Makefile
1495 @@ -6,7 +6,6 @@ ccflags := -Werror
1496  
1497  lib-y                 := bootstr_$(BITS).o
1498  lib-$(CONFIG_SPARC32) += devmap.o
1499 -lib-y                 += devops_$(BITS).o
1500  lib-y                 += init_$(BITS).o
1501  lib-$(CONFIG_SPARC32) += memory.o
1502  lib-y                 += misc_$(BITS).o
1503 diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c
1504 index 5340264..4886310 100644
1505 --- a/arch/sparc/prom/console_32.c
1506 +++ b/arch/sparc/prom/console_32.c
1507 @@ -16,63 +16,26 @@
1508  
1509  extern void restore_current(void);
1510  
1511 -/* Non blocking get character from console input device, returns -1
1512 - * if no input was taken.  This can be used for polling.
1513 - */
1514 -int
1515 -prom_nbgetchar(void)
1516 -{
1517 -	static char inc;
1518 -	int i = -1;
1519 -	unsigned long flags;
1520 -
1521 -	spin_lock_irqsave(&prom_lock, flags);
1522 -	switch(prom_vers) {
1523 -	case PROM_V0:
1524 -		i = (*(romvec->pv_nbgetchar))();
1525 -		break;
1526 -	case PROM_V2:
1527 -	case PROM_V3:
1528 -		if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) {
1529 -			i = inc;
1530 -		} else {
1531 -			i = -1;
1532 -		}
1533 -		break;
1534 -	default:
1535 -		i = -1;
1536 -		break;
1537 -	};
1538 -	restore_current();
1539 -	spin_unlock_irqrestore(&prom_lock, flags);
1540 -	return i; /* Ugh, we could spin forever on unsupported proms ;( */
1541 -}
1542 -
1543  /* Non blocking put character to console device, returns -1 if
1544   * unsuccessful.
1545   */
1546 -int
1547 -prom_nbputchar(char c)
1548 +static int prom_nbputchar(const char *buf)
1549  {
1550 -	static char outc;
1551  	unsigned long flags;
1552  	int i = -1;
1553  
1554  	spin_lock_irqsave(&prom_lock, flags);
1555  	switch(prom_vers) {
1556  	case PROM_V0:
1557 -		i = (*(romvec->pv_nbputchar))(c);
1558 +		i = (*(romvec->pv_nbputchar))(*buf);
1559  		break;
1560  	case PROM_V2:
1561  	case PROM_V3:
1562 -		outc = c;
1563 -		if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1)
1564 +		if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout,
1565 +							  buf, 0x1) == 1)
1566  			i = 0;
1567 -		else
1568 -			i = -1;
1569  		break;
1570  	default:
1571 -		i = -1;
1572  		break;
1573  	};
1574  	restore_current();
1575 @@ -80,18 +43,14 @@ prom_nbputchar(char c)
1576  	return i; /* Ugh, we could spin forever on unsupported proms ;( */
1577  }
1578  
1579 -/* Blocking version of get character routine above. */
1580 -char
1581 -prom_getchar(void)
1582 +void prom_console_write_buf(const char *buf, int len)
1583  {
1584 -	int character;
1585 -	while((character = prom_nbgetchar()) == -1) ;
1586 -	return (char) character;
1587 +	while (len) {
1588 +		int n = prom_nbputchar(buf);
1589 +		if (n)
1590 +			continue;
1591 +		len--;
1592 +		buf++;
1593 +	}
1594  }
1595  
1596 -/* Blocking version of put character routine above. */
1597 -void
1598 -prom_putchar(char c)
1599 -{
1600 -	while(prom_nbputchar(c) == -1) ;
1601 -}
1602 diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c
1603 index 10322dc..ed39e75 100644
1604 --- a/arch/sparc/prom/console_64.c
1605 +++ b/arch/sparc/prom/console_64.c
1606 @@ -15,85 +15,34 @@
1607  
1608  extern int prom_stdin, prom_stdout;
1609  
1610 -/* Non blocking get character from console input device, returns -1
1611 - * if no input was taken.  This can be used for polling.
1612 - */
1613 -inline int
1614 -prom_nbgetchar(void)
1615 -{
1616 -	unsigned long args[7];
1617 -	char inc;
1618 -
1619 -	args[0] = (unsigned long) "read";
1620 -	args[1] = 3;
1621 -	args[2] = 1;
1622 -	args[3] = (unsigned int) prom_stdin;
1623 -	args[4] = (unsigned long) &inc;
1624 -	args[5] = 1;
1625 -	args[6] = (unsigned long) -1;
1626 -
1627 -	p1275_cmd_direct(args);
1628 -
1629 -	if (args[6] == 1)
1630 -		return inc;
1631 -	return -1;
1632 -}
1633 -
1634 -/* Non blocking put character to console device, returns -1 if
1635 - * unsuccessful.
1636 - */
1637 -inline int
1638 -prom_nbputchar(char c)
1639 +static int __prom_console_write_buf(const char *buf, int len)
1640  {
1641  	unsigned long args[7];
1642 -	char outc;
1643 -	
1644 -	outc = c;
1645 +	int ret;
1646  
1647  	args[0] = (unsigned long) "write";
1648  	args[1] = 3;
1649  	args[2] = 1;
1650  	args[3] = (unsigned int) prom_stdout;
1651 -	args[4] = (unsigned long) &outc;
1652 -	args[5] = 1;
1653 +	args[4] = (unsigned long) buf;
1654 +	args[5] = (unsigned int) len;
1655  	args[6] = (unsigned long) -1;
1656  
1657  	p1275_cmd_direct(args);
1658  
1659 -	if (args[6] == 1)
1660 -		return 0;
1661 -	else
1662 +	ret = (int) args[6];
1663 +	if (ret < 0)
1664  		return -1;
1665 +	return ret;
1666  }
1667  
1668 -/* Blocking version of get character routine above. */
1669 -char
1670 -prom_getchar(void)
1671 -{
1672 -	int character;
1673 -	while((character = prom_nbgetchar()) == -1) ;
1674 -	return (char) character;
1675 -}
1676 -
1677 -/* Blocking version of put character routine above. */
1678 -void
1679 -prom_putchar(char c)
1680 +void prom_console_write_buf(const char *buf, int len)
1681  {
1682 -	prom_nbputchar(c);
1683 -}
1684 -
1685 -void
1686 -prom_puts(const char *s, int len)
1687 -{
1688 -	unsigned long args[7];
1689 -
1690 -	args[0] = (unsigned long) "write";
1691 -	args[1] = 3;
1692 -	args[2] = 1;
1693 -	args[3] = (unsigned int) prom_stdout;
1694 -	args[4] = (unsigned long) s;
1695 -	args[5] = len;
1696 -	args[6] = (unsigned long) -1;
1697 -
1698 -	p1275_cmd_direct(args);
1699 +	while (len) {
1700 +		int n = __prom_console_write_buf(buf, len);
1701 +		if (n < 0)
1702 +			continue;
1703 +		len -= n;
1704 +		buf += len;
1705 +	}
1706  }
1707 diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
1708 index 6cb1581..2fdcebf 100644
1709 --- a/arch/sparc/prom/misc_64.c
1710 +++ b/arch/sparc/prom/misc_64.c
1711 @@ -18,7 +18,7 @@
1712  #include <asm/system.h>
1713  #include <asm/ldc.h>
1714  
1715 -int prom_service_exists(const char *service_name)
1716 +static int prom_service_exists(const char *service_name)
1717  {
1718  	unsigned long args[5];
1719  
1720 @@ -150,20 +150,6 @@ void prom_halt_power_off(void)
1721  	prom_halt();
1722  }
1723  
1724 -/* Set prom sync handler to call function 'funcp'. */
1725 -void prom_setcallback(callback_func_t funcp)
1726 -{
1727 -	unsigned long args[5];
1728 -	if (!funcp)
1729 -		return;
1730 -	args[0] = (unsigned long) "set-callback";
1731 -	args[1] = 1;
1732 -	args[2] = 1;
1733 -	args[3] = (unsigned long) funcp;
1734 -	args[4] = (unsigned long) -1;
1735 -	p1275_cmd_direct(args);
1736 -}
1737 -
1738  /* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
1739   * format type.  'num_bytes' is the number of bytes that your idbuf
1740   * has space for.  Returns 0xff on error.
1741 diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
1742 index ca86926..d9682f0 100644
1743 --- a/arch/sparc/prom/printf.c
1744 +++ b/arch/sparc/prom/printf.c
1745 @@ -15,22 +15,45 @@
1746  
1747  #include <linux/kernel.h>
1748  #include <linux/compiler.h>
1749 +#include <linux/spinlock.h>
1750  
1751  #include <asm/openprom.h>
1752  #include <asm/oplib.h>
1753  
1754 +#define CONSOLE_WRITE_BUF_SIZE	1024
1755 +
1756  static char ppbuf[1024];
1757 +static char console_write_buf[CONSOLE_WRITE_BUF_SIZE];
1758 +static DEFINE_RAW_SPINLOCK(console_write_lock);
1759  
1760  void notrace prom_write(const char *buf, unsigned int n)
1761  {
1762 -	char ch;
1763 +	unsigned int dest_len;
1764 +	unsigned long flags;
1765 +	char *dest;
1766 +
1767 +	dest = console_write_buf;
1768 +	raw_spin_lock_irqsave(&console_write_lock, flags);
1769  
1770 -	while (n != 0) {
1771 -		--n;
1772 -		if ((ch = *buf++) == '\n')
1773 -			prom_putchar('\r');
1774 -		prom_putchar(ch);
1775 +	dest_len = 0;
1776 +	while (n-- != 0) {
1777 +		char ch = *buf++;
1778 +		if (ch == '\n') {
1779 +			*dest++ = '\r';
1780 +			dest_len++;
1781 +		}
1782 +		*dest++ = ch;
1783 +		dest_len++;
1784 +		if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) {
1785 +			prom_console_write_buf(console_write_buf, dest_len);
1786 +			dest = console_write_buf;
1787 +			dest_len = 0;
1788 +		}
1789  	}
1790 +	if (dest_len)
1791 +		prom_console_write_buf(console_write_buf, dest_len);
1792 +
1793 +	raw_spin_unlock_irqrestore(&console_write_lock, flags);
1794  }
1795  
1796  void notrace prom_printf(const char *fmt, ...)
1797 diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
1798 index b21592f..71e7f08 100644
1799 --- a/arch/sparc/prom/tree_32.c
1800 +++ b/arch/sparc/prom/tree_32.c
1801 @@ -341,18 +341,3 @@ int prom_inst2pkg(int inst)
1802  	if (node == -1) return 0;
1803  	return node;
1804  }
1805 -
1806 -/* Return 'node' assigned to a particular prom 'path'
1807 - * FIXME: Should work for v0 as well
1808 - */
1809 -int prom_pathtoinode(char *path)
1810 -{
1811 -	int node, inst;
1812 -	
1813 -	inst = prom_devopen (path);
1814 -	if (inst == -1) return 0;
1815 -	node = prom_inst2pkg (inst);
1816 -	prom_devclose (inst);
1817 -	if (node == -1) return 0;
1818 -	return node;
1819 -}
1820 diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
1821 index 9d3f913..8327b1b 100644
1822 --- a/arch/sparc/prom/tree_64.c
1823 +++ b/arch/sparc/prom/tree_64.c
1824 @@ -374,24 +374,6 @@ inline int prom_inst2pkg(int inst)
1825  	return node;
1826  }
1827  
1828 -/* Return 'node' assigned to a particular prom 'path'
1829 - * FIXME: Should work for v0 as well
1830 - */
1831 -int
1832 -prom_pathtoinode(const char *path)
1833 -{
1834 -	int node, inst;
1835 -
1836 -	inst = prom_devopen (path);
1837 -	if (inst == 0)
1838 -		return 0;
1839 -	node = prom_inst2pkg(inst);
1840 -	prom_devclose(inst);
1841 -	if (node == -1)
1842 -		return 0;
1843 -	return node;
1844 -}
1845 -
1846  int prom_ihandle2path(int handle, char *buffer, int bufsize)
1847  {
1848  	unsigned long args[7];
1849 diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
1850 index 84c2911..aaf6282 100644
1851 --- a/arch/tile/kernel/process.c
1852 +++ b/arch/tile/kernel/process.c
1853 @@ -212,6 +212,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
1854  	childregs->sp = sp;  /* override with new user stack pointer */
1855  
1856  	/*
1857 +	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
1858 +	 * which is passed in as arg #5 to sys_clone().
1859 +	 */
1860 +	if (clone_flags & CLONE_SETTLS)
1861 +		childregs->tp = regs->regs[4];
1862 +
1863 +	/*
1864  	 * Copy the callee-saved registers from the passed pt_regs struct
1865  	 * into the context-switch callee-saved registers area.
1866  	 * We have to restore the callee-saved registers since we may
1867 diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
1868 index 7f7338c..1664cce 100644
1869 --- a/arch/um/drivers/line.c
1870 +++ b/arch/um/drivers/line.c
1871 @@ -727,6 +727,9 @@ struct winch {
1872  
1873  static void free_winch(struct winch *winch, int free_irq_ok)
1874  {
1875 +	if (free_irq_ok)
1876 +		free_irq(WINCH_IRQ, winch);
1877 +
1878  	list_del(&winch->list);
1879  
1880  	if (winch->pid != -1)
1881 @@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok)
1882  		os_close_file(winch->fd);
1883  	if (winch->stack != 0)
1884  		free_stack(winch->stack, 0);
1885 -	if (free_irq_ok)
1886 -		free_irq(WINCH_IRQ, winch);
1887  	kfree(winch);
1888  }
1889  
1890 diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
1891 index ec63785..9a873d7 100644
1892 --- a/arch/um/kernel/uml.lds.S
1893 +++ b/arch/um/kernel/uml.lds.S
1894 @@ -22,7 +22,7 @@ SECTIONS
1895    _text = .;
1896    _stext = .;
1897    __init_begin = .;
1898 -  INIT_TEXT_SECTION(PAGE_SIZE)
1899 +  INIT_TEXT_SECTION(0)
1900    . = ALIGN(PAGE_SIZE);
1901  
1902    .text      :
1903 diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
1904 index dec5678..6e3359d 100644
1905 --- a/arch/um/os-Linux/time.c
1906 +++ b/arch/um/os-Linux/time.c
1907 @@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
1908  long long disable_timer(void)
1909  {
1910  	struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
1911 -	int remain, max = UM_NSEC_PER_SEC / UM_HZ;
1912 +	long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
1913  
1914  	if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
1915  		printk(UM_KERN_ERR "disable_timer - setitimer failed, "
1916 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
1917 index 3f76523..f857bd3 100644
1918 --- a/arch/x86/include/asm/cpufeature.h
1919 +++ b/arch/x86/include/asm/cpufeature.h
1920 @@ -152,7 +152,7 @@
1921  #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
1922  #define X86_FEATURE_OSVW	(6*32+ 9) /* OS Visible Workaround */
1923  #define X86_FEATURE_IBS		(6*32+10) /* Instruction Based Sampling */
1924 -#define X86_FEATURE_SSE5	(6*32+11) /* SSE-5 */
1925 +#define X86_FEATURE_XOP		(6*32+11) /* extended AVX instructions */
1926  #define X86_FEATURE_SKINIT	(6*32+12) /* SKINIT/STGI instructions */
1927  #define X86_FEATURE_WDT		(6*32+13) /* Watchdog timer */
1928  #define X86_FEATURE_NODEID_MSR	(6*32+19) /* NodeId MSR */
1929 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
1930 index 30a3e97..6a45ec4 100644
1931 --- a/arch/x86/include/asm/io.h
1932 +++ b/arch/x86/include/asm/io.h
1933 @@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
1934  
1935  extern void iounmap(volatile void __iomem *addr);
1936  
1937 +extern void set_iounmap_nonlazy(void);
1938  
1939  #ifdef __KERNEL__
1940  
1941 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1942 index c52e2eb..6986312 100644
1943 --- a/arch/x86/include/asm/kvm_host.h
1944 +++ b/arch/x86/include/asm/kvm_host.h
1945 @@ -79,7 +79,7 @@
1946  #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
1947  #define KVM_MIN_FREE_MMU_PAGES 5
1948  #define KVM_REFILL_PAGES 25
1949 -#define KVM_MAX_CPUID_ENTRIES 40
1950 +#define KVM_MAX_CPUID_ENTRIES 80
1951  #define KVM_NR_FIXED_MTRR_REGION 88
1952  #define KVM_NR_VAR_MTRR 8
1953  
1954 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
1955 index 4a2d4e0..8b5393e 100644
1956 --- a/arch/x86/include/asm/mmu_context.h
1957 +++ b/arch/x86/include/asm/mmu_context.h
1958 @@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1959  	unsigned cpu = smp_processor_id();
1960  
1961  	if (likely(prev != next)) {
1962 -		/* stop flush ipis for the previous mm */
1963 -		cpumask_clear_cpu(cpu, mm_cpumask(prev));
1964  #ifdef CONFIG_SMP
1965  		percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
1966  		percpu_write(cpu_tlbstate.active_mm, next);
1967 @@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1968  		/* Re-load page tables */
1969  		load_cr3(next->pgd);
1970  
1971 +		/* stop flush ipis for the previous mm */
1972 +		cpumask_clear_cpu(cpu, mm_cpumask(prev));
1973 +
1974  		/*
1975  		 * load the LDT, if the LDT is different:
1976  		 */
1977 diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
1978 index 1635074..33fc296 100644
1979 --- a/arch/x86/include/asm/mrst.h
1980 +++ b/arch/x86/include/asm/mrst.h
1981 @@ -26,7 +26,7 @@ enum mrst_cpu_type {
1982  };
1983  
1984  extern enum mrst_cpu_type __mrst_cpu_chip;
1985 -static enum mrst_cpu_type mrst_identify_cpu(void)
1986 +static inline enum mrst_cpu_type mrst_identify_cpu(void)
1987  {
1988  	return __mrst_cpu_chip;
1989  }
1990 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1991 index ebaa04a..37ea41c 100644
1992 --- a/arch/x86/include/asm/processor.h
1993 +++ b/arch/x86/include/asm/processor.h
1994 @@ -768,29 +768,6 @@ extern unsigned long		idle_halt;
1995  extern unsigned long		idle_nomwait;
1996  extern bool			c1e_detected;
1997  
1998 -/*
1999 - * on systems with caches, caches must be flashed as the absolute
2000 - * last instruction before going into a suspended halt.  Otherwise,
2001 - * dirty data can linger in the cache and become stale on resume,
2002 - * leading to strange errors.
2003 - *
2004 - * perform a variety of operations to guarantee that the compiler
2005 - * will not reorder instructions.  wbinvd itself is serializing
2006 - * so the processor will not reorder.
2007 - *
2008 - * Systems without cache can just go into halt.
2009 - */
2010 -static inline void wbinvd_halt(void)
2011 -{
2012 -	mb();
2013 -	/* check for clflush to determine if wbinvd is legal */
2014 -	if (cpu_has_clflush)
2015 -		asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
2016 -	else
2017 -		while (1)
2018 -			halt();
2019 -}
2020 -
2021  extern void enable_sep_cpu(void);
2022  extern int sysenter_setup(void);
2023  
2024 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
2025 index 4cfc908..4c2f63c 100644
2026 --- a/arch/x86/include/asm/smp.h
2027 +++ b/arch/x86/include/asm/smp.h
2028 @@ -50,7 +50,7 @@ struct smp_ops {
2029  	void (*smp_prepare_cpus)(unsigned max_cpus);
2030  	void (*smp_cpus_done)(unsigned max_cpus);
2031  
2032 -	void (*smp_send_stop)(void);
2033 +	void (*stop_other_cpus)(int wait);
2034  	void (*smp_send_reschedule)(int cpu);
2035  
2036  	int (*cpu_up)(unsigned cpu);
2037 @@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
2038  
2039  static inline void smp_send_stop(void)
2040  {
2041 -	smp_ops.smp_send_stop();
2042 +	smp_ops.stop_other_cpus(0);
2043 +}
2044 +
2045 +static inline void stop_other_cpus(void)
2046 +{
2047 +	smp_ops.stop_other_cpus(1);
2048  }
2049  
2050  static inline void smp_prepare_boot_cpu(void)
2051 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
2052 index e3b534c..e0f220e 100644
2053 --- a/arch/x86/kernel/apic/apic.c
2054 +++ b/arch/x86/kernel/apic/apic.c
2055 @@ -1340,6 +1340,14 @@ void __cpuinit end_local_APIC_setup(void)
2056  
2057  	setup_apic_nmi_watchdog(NULL);
2058  	apic_pm_activate();
2059 +
2060 +	/*
2061 +	 * Now that local APIC setup is completed for BP, configure the fault
2062 +	 * handling for interrupt remapping.
2063 +	 */
2064 +	if (!smp_processor_id() && intr_remapping_enabled)
2065 +		enable_drhd_fault_handling();
2066 +
2067  }
2068  
2069  #ifdef CONFIG_X86_X2APIC
2070 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
2071 index 5c5b8f3..4d90327 100644
2072 --- a/arch/x86/kernel/apic/io_apic.c
2073 +++ b/arch/x86/kernel/apic/io_apic.c
2074 @@ -1397,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int irq,
2075  		irte.dlvry_mode = apic->irq_delivery_mode;
2076  		irte.vector = vector;
2077  		irte.dest_id = IRTE_DEST(destination);
2078 +		irte.redir_hint = 1;
2079  
2080  		/* Set source-id of interrupt request */
2081  		set_ioapic_sid(&irte, apic_id);
2082 @@ -3348,6 +3349,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
2083  		irte.dlvry_mode = apic->irq_delivery_mode;
2084  		irte.vector = cfg->vector;
2085  		irte.dest_id = IRTE_DEST(dest);
2086 +		irte.redir_hint = 1;
2087  
2088  		/* Set source-id of interrupt request */
2089  		if (pdev)
2090 @@ -3624,6 +3626,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
2091  	msg.data |= MSI_DATA_VECTOR(cfg->vector);
2092  	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2093  	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2094 +	msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
2095  
2096  	dmar_msi_write(irq, &msg);
2097  
2098 diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
2099 index 83e9be4..fac49a8 100644
2100 --- a/arch/x86/kernel/apic/probe_64.c
2101 +++ b/arch/x86/kernel/apic/probe_64.c
2102 @@ -76,13 +76,6 @@ void __init default_setup_apic_routing(void)
2103  		/* need to update phys_pkg_id */
2104  		apic->phys_pkg_id = apicid_phys_pkg_id;
2105  	}
2106 -
2107 -	/*
2108 -	 * Now that apic routing model is selected, configure the
2109 -	 * fault handling for intr remapping.
2110 -	 */
2111 -	if (intr_remapping_enabled)
2112 -		enable_drhd_fault_handling();
2113  }
2114  
2115  /* Same for both flat and physical. */
2116 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
2117 index ba5f62f..81fa3cb 100644
2118 --- a/arch/x86/kernel/cpu/amd.c
2119 +++ b/arch/x86/kernel/cpu/amd.c
2120 @@ -305,8 +305,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
2121  	/* use socket ID also for last level cache */
2122  	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
2123  	/* fixup topology information on multi-node processors */
2124 -	if ((c->x86 == 0x10) && (c->x86_model == 9))
2125 -		amd_fixup_dcm(c);
2126 +	amd_fixup_dcm(c);
2127  #endif
2128  }
2129  
2130 diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
2131 index cd8da24..a2baafb 100644
2132 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
2133 +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
2134 @@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
2135  		per_cpu(acfreq_data, policy->cpu) = NULL;
2136  		acpi_processor_unregister_performance(data->acpi_data,
2137  						      policy->cpu);
2138 +		kfree(data->freq_table);
2139  		kfree(data);
2140  	}
2141  
2142 diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
2143 index c5f59d0..ac140c7 100644
2144 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c
2145 +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
2146 @@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void)
2147  
2148  	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
2149  		return 0;
2150 -	if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
2151 +	if (boot_cpu_data.x86 < 0xf)
2152  		return 0;
2153  	/* In case some hypervisor doesn't pass SYSCFG through: */
2154  	if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
2155 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
2156 index 01c0f3e..bebabec 100644
2157 --- a/arch/x86/kernel/cpu/mtrr/main.c
2158 +++ b/arch/x86/kernel/cpu/mtrr/main.c
2159 @@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
2160  }
2161  
2162  /*
2163 - * MTRR initialization for all AP's
2164 + * Delayed MTRR initialization for all AP's
2165   */
2166  void mtrr_aps_init(void)
2167  {
2168  	if (!use_intel())
2169  		return;
2170  
2171 +	/*
2172 +	 * Check if someone has requested the delay of AP MTRR initialization,
2173 +	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
2174 +	 * then we are done.
2175 +	 */
2176 +	if (!mtrr_aps_delayed_init)
2177 +		return;
2178 +
2179  	set_mtrr(~0U, 0, 0, 0);
2180  	mtrr_aps_delayed_init = false;
2181  }
2182 diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
2183 index c2897b7..46d5844 100644
2184 --- a/arch/x86/kernel/cpu/perf_event_amd.c
2185 +++ b/arch/x86/kernel/cpu/perf_event_amd.c
2186 @@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids
2187   [ C(DTLB) ] = {
2188  	[ C(OP_READ) ] = {
2189  		[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
2190 -		[ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
2191 +		[ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
2192  	},
2193  	[ C(OP_WRITE) ] = {
2194  		[ C(RESULT_ACCESS) ] = 0,
2195 @@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids
2196   [ C(ITLB) ] = {
2197  	[ C(OP_READ) ] = {
2198  		[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
2199 -		[ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
2200 +		[ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
2201  	},
2202  	[ C(OP_WRITE) ] = {
2203  		[ C(RESULT_ACCESS) ] = -1,
2204 diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
2205 index 045b36c..9948288 100644
2206 --- a/arch/x86/kernel/crash_dump_64.c
2207 +++ b/arch/x86/kernel/crash_dump_64.c
2208 @@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
2209  	if (!csize)
2210  		return 0;
2211  
2212 -	vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
2213 +	vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
2214  	if (!vaddr)
2215  		return -ENOMEM;
2216  
2217 @@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
2218  	} else
2219  		memcpy(buf, vaddr + offset, csize);
2220  
2221 +	set_iounmap_nonlazy();
2222  	iounmap(vaddr);
2223  	return csize;
2224  }
2225 diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
2226 index ff15c9d..42c5942 100644
2227 --- a/arch/x86/kernel/hw_breakpoint.c
2228 +++ b/arch/x86/kernel/hw_breakpoint.c
2229 @@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
2230  	dr6_p = (unsigned long *)ERR_PTR(args->err);
2231  	dr6 = *dr6_p;
2232  
2233 +	/* If it's a single step, TRAP bits are random */
2234 +	if (dr6 & DR_STEP)
2235 +		return NOTIFY_DONE;
2236 +
2237  	/* Do an early return if no trap bits are set in DR6 */
2238  	if ((dr6 & DR_TRAP_BITS) == 0)
2239  		return NOTIFY_DONE;
2240 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
2241 index 3561702..2573689 100644
2242 --- a/arch/x86/kernel/microcode_intel.c
2243 +++ b/arch/x86/kernel/microcode_intel.c
2244 @@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2245  
2246  		/* For performance reasons, reuse mc area when possible */
2247  		if (!mc || mc_size > curr_mc_size) {
2248 -			if (mc)
2249 -				vfree(mc);
2250 +			vfree(mc);
2251  			mc = vmalloc(mc_size);
2252  			if (!mc)
2253  				break;
2254 @@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2255  
2256  		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
2257  		    microcode_sanity_check(mc) < 0) {
2258 -			vfree(mc);
2259  			break;
2260  		}
2261  
2262  		if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
2263 -			if (new_mc)
2264 -				vfree(new_mc);
2265 +			vfree(new_mc);
2266  			new_rev = mc_header.rev;
2267  			new_mc  = mc;
2268  			mc = NULL;	/* trigger new vmalloc */
2269 @@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2270  		leftover  -= mc_size;
2271  	}
2272  
2273 -	if (mc)
2274 -		vfree(mc);
2275 +	vfree(mc);
2276  
2277  	if (leftover) {
2278 -		if (new_mc)
2279 -			vfree(new_mc);
2280 +		vfree(new_mc);
2281  		state = UCODE_ERROR;
2282  		goto out;
2283  	}
2284 @@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2285  		goto out;
2286  	}
2287  
2288 -	if (uci->mc)
2289 -		vfree(uci->mc);
2290 +	vfree(uci->mc);
2291  	uci->mc = (struct microcode_intel *)new_mc;
2292  
2293  	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
2294 diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
2295 index 0e0cdde..a2bd899 100644
2296 --- a/arch/x86/kernel/olpc.c
2297 +++ b/arch/x86/kernel/olpc.c
2298 @@ -114,6 +114,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
2299  	unsigned long flags;
2300  	int ret = -EIO;
2301  	int i;
2302 +	int restarts = 0;
2303  
2304  	spin_lock_irqsave(&ec_lock, flags);
2305  
2306 @@ -169,7 +170,9 @@ restart:
2307  			if (wait_on_obf(0x6c, 1)) {
2308  				printk(KERN_ERR "olpc-ec:  timeout waiting for"
2309  						" EC to provide data!\n");
2310 -				goto restart;
2311 +				if (restarts++ < 10)
2312 +					goto restart;
2313 +				goto err;
2314  			}
2315  			outbuf[i] = inb(0x68);
2316  			pr_devel("olpc-ec:  received 0x%x\n", outbuf[i]);
2317 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
2318 index e3af342..76a0d71 100644
2319 --- a/arch/x86/kernel/reboot.c
2320 +++ b/arch/x86/kernel/reboot.c
2321 @@ -641,7 +641,7 @@ void native_machine_shutdown(void)
2322  	/* O.K Now that I'm on the appropriate processor,
2323  	 * stop all of the others.
2324  	 */
2325 -	smp_send_stop();
2326 +	stop_other_cpus();
2327  #endif
2328  
2329  	lapic_shutdown();
2330 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
2331 index 74cca60..96af3a8 100644
2332 --- a/arch/x86/kernel/smp.c
2333 +++ b/arch/x86/kernel/smp.c
2334 @@ -174,10 +174,10 @@ asmlinkage void smp_reboot_interrupt(void)
2335  	irq_exit();
2336  }
2337  
2338 -static void native_smp_send_stop(void)
2339 +static void native_stop_other_cpus(int wait)
2340  {
2341  	unsigned long flags;
2342 -	unsigned long wait;
2343 +	unsigned long timeout;
2344  
2345  	if (reboot_force)
2346  		return;
2347 @@ -194,9 +194,12 @@ static void native_smp_send_stop(void)
2348  	if (num_online_cpus() > 1) {
2349  		apic->send_IPI_allbutself(REBOOT_VECTOR);
2350  
2351 -		/* Don't wait longer than a second */
2352 -		wait = USEC_PER_SEC;
2353 -		while (num_online_cpus() > 1 && wait--)
2354 +		/*
2355 +		 * Don't wait longer than a second if the caller
2356 +		 * didn't ask us to wait.
2357 +		 */
2358 +		timeout = USEC_PER_SEC;
2359 +		while (num_online_cpus() > 1 && (wait || timeout--))
2360  			udelay(1);
2361  	}
2362  
2363 @@ -254,7 +257,7 @@ struct smp_ops smp_ops = {
2364  	.smp_prepare_cpus	= native_smp_prepare_cpus,
2365  	.smp_cpus_done		= native_smp_cpus_done,
2366  
2367 -	.smp_send_stop		= native_smp_send_stop,
2368 +	.stop_other_cpus	= native_stop_other_cpus,
2369  	.smp_send_reschedule	= native_smp_send_reschedule,
2370  
2371  	.cpu_up			= native_cpu_up,
2372 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2373 index 8b3bfc4..016179e 100644
2374 --- a/arch/x86/kernel/smpboot.c
2375 +++ b/arch/x86/kernel/smpboot.c
2376 @@ -1383,11 +1383,94 @@ void play_dead_common(void)
2377  	local_irq_disable();
2378  }
2379  
2380 +#define MWAIT_SUBSTATE_MASK		0xf
2381 +#define MWAIT_SUBSTATE_SIZE		4
2382 +
2383 +#define CPUID_MWAIT_LEAF		5
2384 +#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
2385 +
2386 +/*
2387 + * We need to flush the caches before going to sleep, lest we have
2388 + * dirty data in our caches when we come back up.
2389 + */
2390 +static inline void mwait_play_dead(void)
2391 +{
2392 +	unsigned int eax, ebx, ecx, edx;
2393 +	unsigned int highest_cstate = 0;
2394 +	unsigned int highest_subcstate = 0;
2395 +	int i;
2396 +	void *mwait_ptr;
2397 +
2398 +	if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
2399 +		return;
2400 +	if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
2401 +		return;
2402 +	if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
2403 +		return;
2404 +
2405 +	eax = CPUID_MWAIT_LEAF;
2406 +	ecx = 0;
2407 +	native_cpuid(&eax, &ebx, &ecx, &edx);
2408 +
2409 +	/*
2410 +	 * eax will be 0 if EDX enumeration is not valid.
2411 +	 * Initialized below to cstate, sub_cstate value when EDX is valid.
2412 +	 */
2413 +	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
2414 +		eax = 0;
2415 +	} else {
2416 +		edx >>= MWAIT_SUBSTATE_SIZE;
2417 +		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
2418 +			if (edx & MWAIT_SUBSTATE_MASK) {
2419 +				highest_cstate = i;
2420 +				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
2421 +			}
2422 +		}
2423 +		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
2424 +			(highest_subcstate - 1);
2425 +	}
2426 +
2427 +	/*
2428 +	 * This should be a memory location in a cache line which is
2429 +	 * unlikely to be touched by other processors.  The actual
2430 +	 * content is immaterial as it is not actually modified in any way.
2431 +	 */
2432 +	mwait_ptr = &current_thread_info()->flags;
2433 +
2434 +	wbinvd();
2435 +
2436 +	while (1) {
2437 +		/*
2438 +		 * The CLFLUSH is a workaround for erratum AAI65 for
2439 +		 * the Xeon 7400 series.  It's not clear it is actually
2440 +		 * needed, but it should be harmless in either case.
2441 +		 * The WBINVD is insufficient due to the spurious-wakeup
2442 +		 * case where we return around the loop.
2443 +		 */
2444 +		clflush(mwait_ptr);
2445 +		__monitor(mwait_ptr, 0, 0);
2446 +		mb();
2447 +		__mwait(eax, 0);
2448 +	}
2449 +}
2450 +
2451 +static inline void hlt_play_dead(void)
2452 +{
2453 +	if (current_cpu_data.x86 >= 4)
2454 +		wbinvd();
2455 +
2456 +	while (1) {
2457 +		native_halt();
2458 +	}
2459 +}
2460 +
2461  void native_play_dead(void)
2462  {
2463  	play_dead_common();
2464  	tboot_shutdown(TB_SHUTDOWN_WFS);
2465 -	wbinvd_halt();
2466 +
2467 +	mwait_play_dead();	/* Only returns on failure */
2468 +	hlt_play_dead();
2469  }
2470  
2471  #else /* ... !CONFIG_HOTPLUG_CPU */
2472 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
2473 index d78c5ed..e00d1af 100644
2474 --- a/arch/x86/kernel/syscall_table_32.S
2475 +++ b/arch/x86/kernel/syscall_table_32.S
2476 @@ -345,12 +345,14 @@ ENTRY(sys_call_table)
2477  	.long sys_complete_job
2478  	.long sys_od_open
2479  	.long sys_od_close
2480 -	.long sys_fmlp_down
2481 -	.long sys_fmlp_up
2482 -	.long sys_srp_down
2483 -	.long sys_srp_up
2484 +	.long sys_litmus_lock
2485 +	.long sys_litmus_unlock
2486  	.long sys_query_job_no
2487  	.long sys_wait_for_job_release
2488  	.long sys_wait_for_ts_release
2489  	.long sys_release_ts
2490  	.long sys_null_call
2491 +	.long sys_set_rt_task_mc_param
2492 +	.long sys_change_speed
2493 +	.long sys_register_pid
2494 +	.long sys_get_job_report
2495 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
2496 index 60788de..9f4edeb 100644
2497 --- a/arch/x86/kernel/traps.c
2498 +++ b/arch/x86/kernel/traps.c
2499 @@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
2500  	if (regs->flags & X86_VM_MASK) {
2501  		handle_vm86_trap((struct kernel_vm86_regs *) regs,
2502  				error_code, 1);
2503 +		preempt_conditional_cli(regs);
2504  		return;
2505  	}
2506  
2507 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
2508 index 5ffb5622..61fb985 100644
2509 --- a/arch/x86/kernel/vm86_32.c
2510 +++ b/arch/x86/kernel/vm86_32.c
2511 @@ -551,8 +551,14 @@ cannot_handle:
2512  int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
2513  {
2514  	if (VMPI.is_vm86pus) {
2515 -		if ((trapno == 3) || (trapno == 1))
2516 -			return_to_32bit(regs, VM86_TRAP + (trapno << 8));
2517 +		if ((trapno == 3) || (trapno == 1)) {
2518 +			KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
2519 +			/* setting this flag forces the code in entry_32.S to
2520 +			   call save_v86_state() and change the stack pointer
2521 +			   to KVM86->regs32 */
2522 +			set_thread_flag(TIF_IRET);
2523 +			return 0;
2524 +		}
2525  		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
2526  		return 0;
2527  	}
2528 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
2529 index 9c253bd..5471285 100644
2530 --- a/arch/x86/kernel/xsave.c
2531 +++ b/arch/x86/kernel/xsave.c
2532 @@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
2533  	 * Setup init_xstate_buf to represent the init state of
2534  	 * all the features managed by the xsave
2535  	 */
2536 -	init_xstate_buf = alloc_bootmem(xstate_size);
2537 +	init_xstate_buf = alloc_bootmem_align(xstate_size,
2538 +					      __alignof__(struct xsave_struct));
2539  	init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
2540  
2541  	clts();
2542 diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
2543 index 4b7b73c..9f163e6 100644
2544 --- a/arch/x86/kvm/i8259.c
2545 +++ b/arch/x86/kvm/i8259.c
2546 @@ -570,6 +570,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
2547  	s->pics[1].elcr_mask = 0xde;
2548  	s->pics[0].pics_state = s;
2549  	s->pics[1].pics_state = s;
2550 +	s->pics[0].isr_ack = 0xff;
2551 +	s->pics[1].isr_ack = 0xff;
2552  
2553  	/*
2554  	 * Initialize PIO device
2555 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2556 index 311f6da..7fed5b7 100644
2557 --- a/arch/x86/kvm/mmu.c
2558 +++ b/arch/x86/kvm/mmu.c
2559 @@ -2254,6 +2254,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2560  		return 0;
2561  	}
2562  	direct = !is_paging(vcpu);
2563 +
2564 +	if (mmu_check_root(vcpu, root_gfn))
2565 +		return 1;
2566 +
2567  	for (i = 0; i < 4; ++i) {
2568  		hpa_t root = vcpu->arch.mmu.pae_root[i];
2569  
2570 @@ -2265,13 +2269,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2571  				continue;
2572  			}
2573  			root_gfn = pdptr >> PAGE_SHIFT;
2574 +			if (mmu_check_root(vcpu, root_gfn))
2575 +				return 1;
2576  		} else if (vcpu->arch.mmu.root_level == 0)
2577  			root_gfn = 0;
2578 -		if (mmu_check_root(vcpu, root_gfn))
2579 -			return 1;
2580  		if (tdp_enabled) {
2581  			direct = 1;
2582 -			root_gfn = i << 30;
2583 +			root_gfn = i << (30 - PAGE_SHIFT);
2584  		}
2585  		spin_lock(&vcpu->kvm->mmu_lock);
2586  		kvm_mmu_free_some_pages(vcpu);
2587 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
2588 index 8a3f9f6..e7c3f3b 100644
2589 --- a/arch/x86/kvm/svm.c
2590 +++ b/arch/x86/kvm/svm.c
2591 @@ -88,6 +88,14 @@ struct nested_state {
2592  	/* A VMEXIT is required but not yet emulated */
2593  	bool exit_required;
2594  
2595 +	/*
2596 +	 * If we vmexit during an instruction emulation we need this to restore
2597 +	 * the l1 guest rip after the emulation
2598 +	 */
2599 +	unsigned long vmexit_rip;
2600 +	unsigned long vmexit_rsp;
2601 +	unsigned long vmexit_rax;
2602 +
2603  	/* cache for intercepts of the guest */
2604  	u16 intercept_cr_read;
2605  	u16 intercept_cr_write;
2606 @@ -1206,8 +1214,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2607  		if (old == new) {
2608  			/* cr0 write with ts and mp unchanged */
2609  			svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2610 -			if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
2611 +			if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
2612 +				svm->nested.vmexit_rip = kvm_rip_read(vcpu);
2613 +				svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
2614 +				svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
2615  				return;
2616 +			}
2617  		}
2618  	}
2619  
2620 @@ -2399,6 +2411,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2621  	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2622  }
2623  
2624 +static int cr0_write_interception(struct vcpu_svm *svm)
2625 +{
2626 +	struct kvm_vcpu *vcpu = &svm->vcpu;
2627 +	int r;
2628 +
2629 +	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2630 +
2631 +	if (svm->nested.vmexit_rip) {
2632 +		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2633 +		kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2634 +		kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2635 +		svm->nested.vmexit_rip = 0;
2636 +	}
2637 +
2638 +	return r == EMULATE_DONE;
2639 +}
2640 +
2641  static int cr8_write_interception(struct vcpu_svm *svm)
2642  {
2643  	struct kvm_run *kvm_run = svm->vcpu.run;
2644 @@ -2672,7 +2701,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2645  	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
2646  	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
2647  	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
2648 -	[SVM_EXIT_WRITE_CR0]			= emulate_on_interception,
2649 +	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
2650  	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
2651  	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
2652  	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
2653 @@ -3252,6 +3281,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2654  	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2655  
2656  	load_host_msrs(vcpu);
2657 +	kvm_load_ldt(ldt_selector);
2658  	loadsegment(fs, fs_selector);
2659  #ifdef CONFIG_X86_64
2660  	load_gs_index(gs_selector);
2661 @@ -3259,7 +3289,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2662  #else
2663  	loadsegment(gs, gs_selector);
2664  #endif
2665 -	kvm_load_ldt(ldt_selector);
2666  
2667  	reload_tss(vcpu);
2668  
2669 @@ -3354,6 +3383,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
2670  static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
2671  {
2672  	switch (func) {
2673 +	case 0x00000001:
2674 +		/* Mask out xsave bit as long as it is not supported by SVM */
2675 +		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
2676 +		break;
2677 +	case 0x80000001:
2678 +		if (nested)
2679 +			entry->ecx |= (1 << 2); /* Set SVM bit */
2680 +		break;
2681  	case 0x8000000A:
2682  		entry->eax = 1; /* SVM revision 1 */
2683  		entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
2684 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2685 index 7bddfab..b3986fe 100644
2686 --- a/arch/x86/kvm/vmx.c
2687 +++ b/arch/x86/kvm/vmx.c
2688 @@ -828,10 +828,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2689  #endif
2690  
2691  #ifdef CONFIG_X86_64
2692 -	if (is_long_mode(&vmx->vcpu)) {
2693 -		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2694 +	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2695 +	if (is_long_mode(&vmx->vcpu))
2696  		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2697 -	}
2698  #endif
2699  	for (i = 0; i < vmx->save_nmsrs; ++i)
2700  		kvm_set_shared_msr(vmx->guest_msrs[i].index,
2701 @@ -846,23 +845,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2702  
2703  	++vmx->vcpu.stat.host_state_reload;
2704  	vmx->host_state.loaded = 0;
2705 -	if (vmx->host_state.fs_reload_needed)
2706 -		loadsegment(fs, vmx->host_state.fs_sel);
2707 +#ifdef CONFIG_X86_64
2708 +	if (is_long_mode(&vmx->vcpu))
2709 +		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2710 +#endif
2711  	if (vmx->host_state.gs_ldt_reload_needed) {
2712  		kvm_load_ldt(vmx->host_state.ldt_sel);
2713  #ifdef CONFIG_X86_64
2714  		load_gs_index(vmx->host_state.gs_sel);
2715 -		wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
2716  #else
2717  		loadsegment(gs, vmx->host_state.gs_sel);
2718  #endif
2719  	}
2720 +	if (vmx->host_state.fs_reload_needed)
2721 +		loadsegment(fs, vmx->host_state.fs_sel);
2722  	reload_tss();
2723  #ifdef CONFIG_X86_64
2724 -	if (is_long_mode(&vmx->vcpu)) {
2725 -		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2726 -		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2727 -	}
2728 +	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2729  #endif
2730  	if (current_thread_info()->status & TS_USEDFPU)
2731  		clts();
2732 @@ -4249,11 +4248,6 @@ static int vmx_get_lpage_level(void)
2733  		return PT_PDPE_LEVEL;
2734  }
2735  
2736 -static inline u32 bit(int bitno)
2737 -{
2738 -	return 1 << (bitno & 31);
2739 -}
2740 -
2741  static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
2742  {
2743  	struct kvm_cpuid_entry2 *best;
2744 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2745 index 3a09c62..a5746de 100644
2746 --- a/arch/x86/kvm/x86.c
2747 +++ b/arch/x86/kvm/x86.c
2748 @@ -153,11 +153,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
2749  
2750  u64 __read_mostly host_xcr0;
2751  
2752 -static inline u32 bit(int bitno)
2753 -{
2754 -	return 1 << (bitno & 31);
2755 -}
2756 -
2757  static void kvm_on_user_return(struct user_return_notifier *urn)
2758  {
2759  	unsigned slot;
2760 @@ -1994,9 +1989,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2761  		0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
2762  	/* cpuid 0x80000001.ecx */
2763  	const u32 kvm_supported_word6_x86_features =
2764 -		F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
2765 +		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
2766  		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2767 -		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
2768 +		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2769  		0 /* SKINIT */ | 0 /* WDT */;
2770  
2771  	/* all calls to cpuid_count() should be made on the same cpu */
2772 @@ -2305,6 +2300,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2773  		!kvm_exception_is_soft(vcpu->arch.exception.nr);
2774  	events->exception.nr = vcpu->arch.exception.nr;
2775  	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2776 +	events->exception.pad = 0;
2777  	events->exception.error_code = vcpu->arch.exception.error_code;
2778  
2779  	events->interrupt.injected =
2780 @@ -2318,12 +2314,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2781  	events->nmi.injected = vcpu->arch.nmi_injected;
2782  	events->nmi.pending = vcpu->arch.nmi_pending;
2783  	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2784 +	events->nmi.pad = 0;
2785  
2786  	events->sipi_vector = vcpu->arch.sipi_vector;
2787  
2788  	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2789  			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2790  			 | KVM_VCPUEVENT_VALID_SHADOW);
2791 +	memset(&events->reserved, 0, sizeof(events->reserved));
2792  }
2793  
2794  static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2795 @@ -2366,6 +2364,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2796  	dbgregs->dr6 = vcpu->arch.dr6;
2797  	dbgregs->dr7 = vcpu->arch.dr7;
2798  	dbgregs->flags = 0;
2799 +	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2800  }
2801  
2802  static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2803 @@ -2849,6 +2848,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2804  		sizeof(ps->channels));
2805  	ps->flags = kvm->arch.vpit->pit_state.flags;
2806  	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2807 +	memset(&ps->reserved, 0, sizeof(ps->reserved));
2808  	return r;
2809  }
2810  
2811 @@ -2912,10 +2912,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2812  		struct kvm_memslots *slots, *old_slots;
2813  		unsigned long *dirty_bitmap;
2814  
2815 -		spin_lock(&kvm->mmu_lock);
2816 -		kvm_mmu_slot_remove_write_access(kvm, log->slot);
2817 -		spin_unlock(&kvm->mmu_lock);
2818 -
2819  		r = -ENOMEM;
2820  		dirty_bitmap = vmalloc(n);
2821  		if (!dirty_bitmap)
2822 @@ -2937,6 +2933,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2823  		dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2824  		kfree(old_slots);
2825  
2826 +		spin_lock(&kvm->mmu_lock);
2827 +		kvm_mmu_slot_remove_write_access(kvm, log->slot);
2828 +		spin_unlock(&kvm->mmu_lock);
2829 +
2830  		r = -EFAULT;
2831  		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2832  			vfree(dirty_bitmap);
2833 @@ -3229,6 +3229,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
2834  		now_ns = timespec_to_ns(&now);
2835  		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
2836  		user_ns.flags = 0;
2837 +		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
2838  
2839  		r = -EFAULT;
2840  		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
2841 @@ -5111,6 +5112,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2842  
2843  	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
2844  	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2845 +	if (sregs->cr4 & X86_CR4_OSXSAVE)
2846 +		update_cpuid(vcpu);
2847  	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
2848  		load_pdptrs(vcpu, vcpu->arch.cr3);
2849  		mmu_reset_needed = 1;
2850 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
2851 index b7a4047..0bf3274 100644
2852 --- a/arch/x86/kvm/x86.h
2853 +++ b/arch/x86/kvm/x86.h
2854 @@ -65,6 +65,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
2855  	return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
2856  }
2857  
2858 +static inline u32 bit(int bitno)
2859 +{
2860 +	return 1 << (bitno & 31);
2861 +}
2862 +
2863  void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
2864  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
2865  
2866 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
2867 index b67a6b5..4262331 100644
2868 --- a/arch/x86/oprofile/op_model_amd.c
2869 +++ b/arch/x86/oprofile/op_model_amd.c
2870 @@ -484,21 +484,29 @@ static int __init_ibs_nmi(void)
2871  	return 0;
2872  }
2873  
2874 -/* initialize the APIC for the IBS interrupts if available */
2875 +/*
2876 + * check and reserve APIC extended interrupt LVT offset for IBS if
2877 + * available
2878 + *
2879 + * init_ibs() preforms implicitly cpu-local operations, so pin this
2880 + * thread to its current CPU
2881 + */
2882 +
2883  static void init_ibs(void)
2884  {
2885 -	ibs_caps = get_ibs_caps();
2886 +	preempt_disable();
2887  
2888 +	ibs_caps = get_ibs_caps();
2889  	if (!ibs_caps)
2890 -		return;
2891 +		goto out;
2892  
2893 -	if (__init_ibs_nmi()) {
2894 +	if (__init_ibs_nmi() < 0)
2895  		ibs_caps = 0;
2896 -		return;
2897 -	}
2898 +	else
2899 +		printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
2900  
2901 -	printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
2902 -	       (unsigned)ibs_caps);
2903 +out:
2904 +	preempt_enable();
2905  }
2906  
2907  static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
2908 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
2909 index 4a2afa1..b6552b1 100644
2910 --- a/arch/x86/vdso/Makefile
2911 +++ b/arch/x86/vdso/Makefile
2912 @@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
2913  
2914  export CPPFLAGS_vdso.lds += -P -C
2915  
2916 -VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
2917 +VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
2918  		      	-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
2919  
2920  $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
2921 @@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y)		+= sysenter
2922  vdso32-images			= $(vdso32.so-y:%=vdso32-%.so)
2923  
2924  CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
2925 -VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
2926 +VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
2927  
2928  # This makes sure the $(obj) subdirectory exists even though vdso32/
2929  # is not a kbuild sub-make subdirectory.
2930 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
2931 index 7d46c84..0f6cd14 100644
2932 --- a/arch/x86/xen/enlighten.c
2933 +++ b/arch/x86/xen/enlighten.c
2934 @@ -1017,10 +1017,6 @@ static void xen_reboot(int reason)
2935  {
2936  	struct sched_shutdown r = { .reason = reason };
2937  
2938 -#ifdef CONFIG_SMP
2939 -	smp_send_stop();
2940 -#endif
2941 -
2942  	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
2943  		BUG();
2944  }
2945 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
2946 index 25f232b..f4d0100 100644
2947 --- a/arch/x86/xen/smp.c
2948 +++ b/arch/x86/xen/smp.c
2949 @@ -400,9 +400,9 @@ static void stop_self(void *v)
2950  	BUG();
2951  }
2952  
2953 -static void xen_smp_send_stop(void)
2954 +static void xen_stop_other_cpus(int wait)
2955  {
2956 -	smp_call_function(stop_self, NULL, 0);
2957 +	smp_call_function(stop_self, NULL, wait);
2958  }
2959  
2960  static void xen_smp_send_reschedule(int cpu)
2961 @@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
2962  	.cpu_disable = xen_cpu_disable,
2963  	.play_dead = xen_play_dead,
2964  
2965 -	.smp_send_stop = xen_smp_send_stop,
2966 +	.stop_other_cpus = xen_stop_other_cpus,
2967  	.smp_send_reschedule = xen_smp_send_reschedule,
2968  
2969  	.send_call_func_ipi = xen_smp_send_call_function_ipi,
2970 diff --git a/block/blk-map.c b/block/blk-map.c
2971 index ade0a08..267a57b 100644
2972 --- a/block/blk-map.c
2973 +++ b/block/blk-map.c
2974 @@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
2975  			unaligned = 1;
2976  			break;
2977  		}
2978 +		if (!iov[i].iov_len)
2979 +			return -EINVAL;
2980  	}
2981  
2982  	if (unaligned || (q->dma_pad_mask & len) || map_data)
2983 diff --git a/block/blk-merge.c b/block/blk-merge.c
2984 index eafc94f..c24bf43 100644
2985 --- a/block/blk-merge.c
2986 +++ b/block/blk-merge.c
2987 @@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
2988  		return 0;
2989  
2990  	fbio = bio;
2991 -	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
2992 +	cluster = blk_queue_cluster(q);
2993  	seg_size = 0;
2994  	nr_phys_segs = 0;
2995  	for_each_bio(bio) {
2996 @@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
2997  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
2998  				   struct bio *nxt)
2999  {
3000 -	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
3001 +	if (!blk_queue_cluster(q))
3002  		return 0;
3003  
3004  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
3005 @@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
3006  	int nsegs, cluster;
3007  
3008  	nsegs = 0;
3009 -	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
3010 +	cluster = blk_queue_cluster(q);
3011  
3012  	/*
3013  	 * for each bio in rq
3014 diff --git a/block/blk-settings.c b/block/blk-settings.c
3015 index a234f4b..ea9430d 100644
3016 --- a/block/blk-settings.c
3017 +++ b/block/blk-settings.c
3018 @@ -125,7 +125,7 @@ void blk_set_default_limits(struct queue_limits *lim)
3019  	lim->alignment_offset = 0;
3020  	lim->io_opt = 0;
3021  	lim->misaligned = 0;
3022 -	lim->no_cluster = 0;
3023 +	lim->cluster = 1;
3024  }
3025  EXPORT_SYMBOL(blk_set_default_limits);
3026  
3027 @@ -343,7 +343,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
3028   *   hardware can operate on without reverting to read-modify-write
3029   *   operations.
3030   */
3031 -void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
3032 +void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
3033  {
3034  	q->limits.physical_block_size = size;
3035  
3036 @@ -468,15 +468,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
3037  void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
3038  {
3039  	blk_stack_limits(&t->limits, &b->limits, 0);
3040 -
3041 -	if (!t->queue_lock)
3042 -		WARN_ON_ONCE(1);
3043 -	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
3044 -		unsigned long flags;
3045 -		spin_lock_irqsave(t->queue_lock, flags);
3046 -		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
3047 -		spin_unlock_irqrestore(t->queue_lock, flags);
3048 -	}
3049  }
3050  EXPORT_SYMBOL(blk_queue_stack_limits);
3051  
3052 @@ -547,7 +538,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
3053  	t->io_min = max(t->io_min, b->io_min);
3054  	t->io_opt = lcm(t->io_opt, b->io_opt);
3055  
3056 -	t->no_cluster |= b->no_cluster;
3057 +	t->cluster &= b->cluster;
3058  	t->discard_zeroes_data &= b->discard_zeroes_data;
3059  
3060  	/* Physical block size a multiple of the logical block size? */
3061 @@ -643,7 +634,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
3062  		       sector_t offset)
3063  {
3064  	struct request_queue *t = disk->queue;
3065 -	struct request_queue *b = bdev_get_queue(bdev);
3066  
3067  	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
3068  		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
3069 @@ -654,17 +644,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
3070  		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
3071  		       top, bottom);
3072  	}
3073 -
3074 -	if (!t->queue_lock)
3075 -		WARN_ON_ONCE(1);
3076 -	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
3077 -		unsigned long flags;
3078 -
3079 -		spin_lock_irqsave(t->queue_lock, flags);
3080 -		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
3081 -			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
3082 -		spin_unlock_irqrestore(t->queue_lock, flags);
3083 -	}
3084  }
3085  EXPORT_SYMBOL(disk_stack_limits);
3086  
3087 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
3088 index 0749b89..a26c930 100644
3089 --- a/block/blk-sysfs.c
3090 +++ b/block/blk-sysfs.c
3091 @@ -114,7 +114,7 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
3092  
3093  static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
3094  {
3095 -	if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
3096 +	if (blk_queue_cluster(q))
3097  		return queue_var_show(queue_max_segment_size(q), (page));
3098  
3099  	return queue_var_show(PAGE_CACHE_SIZE, (page));
3100 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
3101 index 9eba291..909479c 100644
3102 --- a/block/cfq-iosched.c
3103 +++ b/block/cfq-iosched.c
3104 @@ -3402,6 +3402,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3105  {
3106  	struct cfq_io_context *cic = cfqd->active_cic;
3107  
3108 +	/* If the queue already has requests, don't wait */
3109 +	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3110 +		return false;
3111 +
3112  	/* If there are other queues in the group, don't wait */
3113  	if (cfqq->cfqg->nr_cfqq > 1)
3114  		return false;
3115 diff --git a/block/genhd.c b/block/genhd.c
3116 index 59a2db6..315afd2 100644
3117 --- a/block/genhd.c
3118 +++ b/block/genhd.c
3119 @@ -541,13 +541,15 @@ void add_disk(struct gendisk *disk)
3120  	disk->major = MAJOR(devt);
3121  	disk->first_minor = MINOR(devt);
3122  
3123 +	/* Register BDI before referencing it from bdev */
3124 +	bdi = &disk->queue->backing_dev_info;
3125 +	bdi_register_dev(bdi, disk_devt(disk));
3126 +
3127  	blk_register_region(disk_devt(disk), disk->minors, NULL,
3128  			    exact_match, exact_lock, disk);
3129  	register_disk(disk);
3130  	blk_register_queue(disk);
3131  
3132 -	bdi = &disk->queue->backing_dev_info;
3133 -	bdi_register_dev(bdi, disk_devt(disk));
3134  	retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
3135  				   "bdi");
3136  	WARN_ON(retval);
3137 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
3138 index a8b5a10..4f4230b 100644
3139 --- a/block/scsi_ioctl.c
3140 +++ b/block/scsi_ioctl.c
3141 @@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
3142  	if (hdr->iovec_count) {
3143  		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
3144  		size_t iov_data_len;
3145 -		struct sg_iovec *iov;
3146 +		struct sg_iovec *sg_iov;
3147 +		struct iovec *iov;
3148 +		int i;
3149  
3150 -		iov = kmalloc(size, GFP_KERNEL);
3151 -		if (!iov) {
3152 +		sg_iov = kmalloc(size, GFP_KERNEL);
3153 +		if (!sg_iov) {
3154  			ret = -ENOMEM;
3155  			goto out;
3156  		}
3157  
3158 -		if (copy_from_user(iov, hdr->dxferp, size)) {
3159 -			kfree(iov);
3160 +		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
316